|
build: 3906 (7eee341b) with Apple clang version 15.0.0 (clang-1500.3.9.4) for arm64-apple-darwin23.6.0 |
|
llama_model_loader: loaded meta data with 35 key-value pairs and 219 tensors from salamandra-2b-instruct_IQ2_M.gguf (version GGUF V3 (latest)) |
|
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. |
|
llama_model_loader: - kv 0: general.architecture str = llama |
|
llama_model_loader: - kv 1: general.type str = model |
|
llama_model_loader: - kv 2: general.size_label str = 2.3B |
|
llama_model_loader: - kv 3: general.license str = apache-2.0 |
|
llama_model_loader: - kv 4: general.tags arr[str,1] = ["text-generation"] |
|
llama_model_loader: - kv 5: general.languages arr[str,36] = ["bg", "ca", "code", "cs", "cy", "da"... |
|
llama_model_loader: - kv 6: llama.block_count u32 = 24 |
|
llama_model_loader: - kv 7: llama.context_length u32 = 8192 |
|
llama_model_loader: - kv 8: llama.embedding_length u32 = 2048 |
|
llama_model_loader: - kv 9: llama.feed_forward_length u32 = 5440 |
|
llama_model_loader: - kv 10: llama.attention.head_count u32 = 16 |
|
llama_model_loader: - kv 11: llama.attention.head_count_kv u32 = 16 |
|
llama_model_loader: - kv 12: llama.rope.freq_base f32 = 10000.000000 |
|
llama_model_loader: - kv 13: llama.attention.layer_norm_rms_epsilon f32 = 0.000010 |
|
llama_model_loader: - kv 14: general.file_type u32 = 29 |
|
llama_model_loader: - kv 15: llama.vocab_size u32 = 256000 |
|
llama_model_loader: - kv 16: llama.rope.dimension_count u32 = 128 |
|
llama_model_loader: - kv 17: tokenizer.ggml.add_space_prefix bool = true |
|
llama_model_loader: - kv 18: tokenizer.ggml.model str = llama |
|
llama_model_loader: - kv 19: tokenizer.ggml.pre str = default |
|
llama_model_loader: - kv 20: tokenizer.ggml.tokens arr[str,256000] = ["<unk>", "<s>", "</s>", "<pad>", "<|... |
|
llama_model_loader: - kv 21: tokenizer.ggml.scores arr[f32,256000] = [-1000.000000, -1000.000000, -1000.00... |
|
llama_model_loader: - kv 22: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, ... |
|
llama_model_loader: - kv 23: tokenizer.ggml.bos_token_id u32 = 1 |
|
llama_model_loader: - kv 24: tokenizer.ggml.eos_token_id u32 = 2 |
|
llama_model_loader: - kv 25: tokenizer.ggml.unknown_token_id u32 = 0 |
|
llama_model_loader: - kv 26: tokenizer.ggml.padding_token_id u32 = 0 |
|
llama_model_loader: - kv 27: tokenizer.ggml.add_bos_token bool = true |
|
llama_model_loader: - kv 28: tokenizer.ggml.add_eos_token bool = false |
|
llama_model_loader: - kv 29: tokenizer.chat_template str = {%- if not date_string is defined %}{... |
|
llama_model_loader: - kv 30: general.quantization_version u32 = 2 |
|
llama_model_loader: - kv 31: quantize.imatrix.file str = imatrix/oscar/imatrix.dat |
|
llama_model_loader: - kv 32: quantize.imatrix.dataset str = ./imatrix/oscar/imatrix-dataset.txt |
|
llama_model_loader: - kv 33: quantize.imatrix.entries_count i32 = 168 |
|
llama_model_loader: - kv 34: quantize.imatrix.chunks_count i32 = 44176 |
|
llama_model_loader: - type f32: 49 tensors |
|
llama_model_loader: - type iq4_nl: 24 tensors |
|
llama_model_loader: - type iq3_s: 49 tensors |
|
llama_model_loader: - type iq2_s: 96 tensors |
|
llama_model_loader: - type bf16: 1 tensors |
|
llm_load_vocab: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect |
|
llm_load_vocab: special tokens cache size = 104 |
|
llm_load_vocab: token to piece cache size = 1.8842 MB |
|
llm_load_print_meta: format = GGUF V3 (latest) |
|
llm_load_print_meta: arch = llama |
|
llm_load_print_meta: vocab type = SPM |
|
llm_load_print_meta: n_vocab = 256000 |
|
llm_load_print_meta: n_merges = 0 |
|
llm_load_print_meta: vocab_only = 0 |
|
llm_load_print_meta: n_ctx_train = 8192 |
|
llm_load_print_meta: n_embd = 2048 |
|
llm_load_print_meta: n_layer = 24 |
|
llm_load_print_meta: n_head = 16 |
|
llm_load_print_meta: n_head_kv = 16 |
|
llm_load_print_meta: n_rot = 128 |
|
llm_load_print_meta: n_swa = 0 |
|
llm_load_print_meta: n_embd_head_k = 128 |
|
llm_load_print_meta: n_embd_head_v = 128 |
|
llm_load_print_meta: n_gqa = 1 |
|
llm_load_print_meta: n_embd_k_gqa = 2048 |
|
llm_load_print_meta: n_embd_v_gqa = 2048 |
|
llm_load_print_meta: f_norm_eps = 0.0e+00 |
|
llm_load_print_meta: f_norm_rms_eps = 1.0e-05 |
|
llm_load_print_meta: f_clamp_kqv = 0.0e+00 |
|
llm_load_print_meta: f_max_alibi_bias = 0.0e+00 |
|
llm_load_print_meta: f_logit_scale = 0.0e+00 |
|
llm_load_print_meta: n_ff = 5440 |
|
llm_load_print_meta: n_expert = 0 |
|
llm_load_print_meta: n_expert_used = 0 |
|
llm_load_print_meta: causal attn = 1 |
|
llm_load_print_meta: pooling type = 0 |
|
llm_load_print_meta: rope type = 0 |
|
llm_load_print_meta: rope scaling = linear |
|
llm_load_print_meta: freq_base_train = 10000.0 |
|
llm_load_print_meta: freq_scale_train = 1 |
|
llm_load_print_meta: n_ctx_orig_yarn = 8192 |
|
llm_load_print_meta: rope_finetuned = unknown |
|
llm_load_print_meta: ssm_d_conv = 0 |
|
llm_load_print_meta: ssm_d_inner = 0 |
|
llm_load_print_meta: ssm_d_state = 0 |
|
llm_load_print_meta: ssm_dt_rank = 0 |
|
llm_load_print_meta: ssm_dt_b_c_rms = 0 |
|
llm_load_print_meta: model type = ?B |
|
llm_load_print_meta: model ftype = IQ2_M - 2.7 bpw |
|
llm_load_print_meta: model params = 2.25 B |
|
llm_load_print_meta: model size = 1.63 GiB (6.20 BPW) |
|
llm_load_print_meta: general.name = n/a |
|
llm_load_print_meta: BOS token = 1 '<s>' |
|
llm_load_print_meta: EOS token = 2 '</s>' |
|
llm_load_print_meta: UNK token = 0 '<unk>' |
|
llm_load_print_meta: PAD token = 0 '<unk>' |
|
llm_load_print_meta: LF token = 145 '<0x0A>' |
|
llm_load_print_meta: EOT token = 5 '<|im_end|>' |
|
llm_load_print_meta: EOG token = 2 '</s>' |
|
llm_load_print_meta: EOG token = 5 '<|im_end|>' |
|
llm_load_print_meta: max token length = 72 |
|
llm_load_tensors: ggml ctx size = 0.20 MiB |
|
llm_load_tensors: offloading 24 repeating layers to GPU |
|
llm_load_tensors: offloading non-repeating layers to GPU |
|
llm_load_tensors: offloaded 25/25 layers to GPU |
|
llm_load_tensors: Metal buffer size = 1666.03 MiB |
|
llm_load_tensors: CPU buffer size = 214.84 MiB |
|
............................. |
|
llama_new_context_with_model: n_ctx = 8192 |
|
llama_new_context_with_model: n_batch = 512 |
|
llama_new_context_with_model: n_ubatch = 128 |
|
llama_new_context_with_model: flash_attn = 0 |
|
llama_new_context_with_model: freq_base = 10000.0 |
|
llama_new_context_with_model: freq_scale = 1 |
|
ggml_metal_init: allocating |
|
ggml_metal_init: found device: Apple M3 Max |
|
ggml_metal_init: picking default device: Apple M3 Max |
|
ggml_metal_init: using embedded metal library |
|
ggml_metal_init: GPU name: Apple M3 Max |
|
ggml_metal_init: GPU family: MTLGPUFamilyApple9 (1009) |
|
ggml_metal_init: GPU family: MTLGPUFamilyCommon3 (3003) |
|
ggml_metal_init: GPU family: MTLGPUFamilyMetal3 (5001) |
|
ggml_metal_init: simdgroup reduction support = true |
|
ggml_metal_init: simdgroup matrix mul. support = true |
|
ggml_metal_init: hasUnifiedMemory = true |
|
ggml_metal_init: recommendedMaxWorkingSetSize = 42949.67 MB |
|
llama_kv_cache_init: Metal KV buffer size = 1536.00 MiB |
|
llama_new_context_with_model: KV self size = 1536.00 MiB, K (f16): 768.00 MiB, V (f16): 768.00 MiB |
|
llama_new_context_with_model: CPU output buffer size = 0.98 MiB |
|
llama_new_context_with_model: Metal compute buffer size = 72.00 MiB |
|
llama_new_context_with_model: CPU compute buffer size = 125.00 MiB |
|
llama_new_context_with_model: graph nodes = 774 |
|
llama_new_context_with_model: graph splits = 3 |
|
common_init_from_params: warming up the model with an empty run - please wait ... (--no-warmup to disable) |
|
|
|
system_info: n_threads = 15 (n_threads_batch = 15) / 16 | AVX = 0 | AVX_VNNI = 0 | AVX2 = 0 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 0 | NEON = 1 | SVE = 0 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | RISCV_VECT = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | SSSE3 = 0 | VSX = 0 | MATMUL_INT8 = 1 | LLAMAFILE = 1 | |
|
perplexity: tokenizing the input .. |
|
perplexity: tokenization took 2958.69 ms |
|
perplexity: calculating perplexity over 134 chunks, n_ctx=8192, batch_size=512, n_seq=1 |
|
perplexity: 9.96 seconds per pass - ETA 22.22 minutes |
|
[1]24.6873,[2]24.9338,[3]22.0731,[4]21.5617,[5]20.3352,[6]19.5244,[7]20.9527,[8]20.4266,[9]19.9018,[10]18.9376,[11]19.8449,[12]20.0418,[13]21.7210,[14]22.2197,[15]22.1832,[16]22.8980,[17]23.3093,[18]23.1485,[19]23.1590,[20]23.5955,[21]23.5301,[22]21.2644,[23]21.4668,[24]20.8670,[25]20.1188,[26]19.4641,[27]19.1656,[28]18.8924,[29]18.7991,[30]18.4629,[31]18.8051,[32]18.8515,[33]19.4711,[34]19.8213,[35]20.1886,[36]19.7944,[37]19.7345,[38]19.8108,[39]19.5440,[40]19.5595,[41]19.5283,[42]19.2267,[43]19.1196,[44]19.3165,[45]19.5565,[46]19.3342,[47]19.7107,[48]19.9107,[49]20.3577,[50]20.8280,[51]20.8886,[52]21.2299,[53]21.7101,[54]22.1793,[55]22.3834,[56]22.1513,[57]22.0584,[58]21.6444,[59]21.4662,[60]21.1954,[61]21.2462,[62]21.4794,[63]21.7730,[64]21.8675,[65]21.9175,[66]22.2055,[67]22.1659,[68]22.0154,[69]21.8073,[70]21.6691,[71]21.6765,[72]21.6068,[73]21.6309,[74]21.5618,[75]21.5749,[76]21.4933,[77]21.5708,[78]21.5741,[79]21.5906,[80]21.6349,[81]21.1919,[82]21.1488,[83]20.9681,[84]21.0413,[85]21.1279,[86]21.4274,[87]21.4890,[88]21.7182,[89]21.8058,[90]21.9993,[91]22.0906,[92]21.8472,[93]21.9383,[94]21.9085,[95]22.1222,[96]22.3960,[97]22.5114,[98]22.6622,[99]22.9017,[100]22.9584,[101]22.9937,[102]22.9430,[103]22.8901,[104]22.8680,[105]22.8335,[106]22.6443,[107]22.4466,[108]22.5295,[109]22.5529,[110]22.4183,[111]22.3709,[112]22.1711,[113]21.9629,[114]21.9476,[115]21.8984,[116]21.8964,[117]21.7396,[118]21.5493,[119]21.5328,[120]21.6170,[121]21.6406,[122]21.6783,[123]21.7342,[124]21.7686,[125]21.7701,[126]21.8124,[127]21.8536,[128]21.9658,[129]21.9526,[130]21.9195,[131]22.0023,[132]21.9718,[133]21.8911,[134]21.6684, |
|
Final estimate: PPL = 21.6684 +/- 0.08942 |
|
|
|
llama_perf_context_print: load time = 1070.26 ms |
|
llama_perf_context_print: prompt eval time = 1307831.03 ms / 1097728 tokens ( 1.19 ms per token, 839.35 tokens per second) |
|
llama_perf_context_print: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second) |
|
llama_perf_context_print: total time = 1348983.83 ms / 1097729 tokens |
|
ggml_metal_free: deallocating |
|
|