build: 3906 (7eee341b) with Apple clang version 15.0.0 (clang-1500.3.9.4) for arm64-apple-darwin23.6.0 llama_model_loader: loaded meta data with 35 key-value pairs and 219 tensors from salamandra-2b-instruct_IQ2_S.gguf (version GGUF V3 (latest)) llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. llama_model_loader: - kv 0: general.architecture str = llama llama_model_loader: - kv 1: general.type str = model llama_model_loader: - kv 2: general.size_label str = 2.3B llama_model_loader: - kv 3: general.license str = apache-2.0 llama_model_loader: - kv 4: general.tags arr[str,1] = ["text-generation"] llama_model_loader: - kv 5: general.languages arr[str,36] = ["bg", "ca", "code", "cs", "cy", "da"... llama_model_loader: - kv 6: llama.block_count u32 = 24 llama_model_loader: - kv 7: llama.context_length u32 = 8192 llama_model_loader: - kv 8: llama.embedding_length u32 = 2048 llama_model_loader: - kv 9: llama.feed_forward_length u32 = 5440 llama_model_loader: - kv 10: llama.attention.head_count u32 = 16 llama_model_loader: - kv 11: llama.attention.head_count_kv u32 = 16 llama_model_loader: - kv 12: llama.rope.freq_base f32 = 10000.000000 llama_model_loader: - kv 13: llama.attention.layer_norm_rms_epsilon f32 = 0.000010 llama_model_loader: - kv 14: general.file_type u32 = 28 llama_model_loader: - kv 15: llama.vocab_size u32 = 256000 llama_model_loader: - kv 16: llama.rope.dimension_count u32 = 128 llama_model_loader: - kv 17: tokenizer.ggml.add_space_prefix bool = true llama_model_loader: - kv 18: tokenizer.ggml.model str = llama llama_model_loader: - kv 19: tokenizer.ggml.pre str = default llama_model_loader: - kv 20: tokenizer.ggml.tokens arr[str,256000] = ["", "", "", "", "<|... llama_model_loader: - kv 21: tokenizer.ggml.scores arr[f32,256000] = [-1000.000000, -1000.000000, -1000.00... llama_model_loader: - kv 22: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, ... llama_model_loader: - kv 23: tokenizer.ggml.bos_token_id u32 = 1 llama_model_loader: - kv 24: tokenizer.ggml.eos_token_id u32 = 2 llama_model_loader: - kv 25: tokenizer.ggml.unknown_token_id u32 = 0 llama_model_loader: - kv 26: tokenizer.ggml.padding_token_id u32 = 0 llama_model_loader: - kv 27: tokenizer.ggml.add_bos_token bool = true llama_model_loader: - kv 28: tokenizer.ggml.add_eos_token bool = false llama_model_loader: - kv 29: tokenizer.chat_template str = {%- if not date_string is defined %}{... llama_model_loader: - kv 30: general.quantization_version u32 = 2 llama_model_loader: - kv 31: quantize.imatrix.file str = imatrix/oscar/imatrix.dat llama_model_loader: - kv 32: quantize.imatrix.dataset str = ./imatrix/oscar/imatrix-dataset.txt llama_model_loader: - kv 33: quantize.imatrix.entries_count i32 = 168 llama_model_loader: - kv 34: quantize.imatrix.chunks_count i32 = 44176 llama_model_loader: - type f32: 49 tensors llama_model_loader: - type iq2_xs: 96 tensors llama_model_loader: - type iq4_nl: 24 tensors llama_model_loader: - type iq3_s: 49 tensors llama_model_loader: - type bf16: 1 tensors llm_load_vocab: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect llm_load_vocab: special tokens cache size = 104 llm_load_vocab: token to piece cache size = 1.8842 MB llm_load_print_meta: format = GGUF V3 (latest) llm_load_print_meta: arch = llama llm_load_print_meta: vocab type = SPM llm_load_print_meta: n_vocab = 256000 llm_load_print_meta: n_merges = 0 llm_load_print_meta: vocab_only = 0 llm_load_print_meta: n_ctx_train = 8192 llm_load_print_meta: n_embd = 2048 llm_load_print_meta: n_layer = 24 llm_load_print_meta: n_head = 16 llm_load_print_meta: n_head_kv = 16 llm_load_print_meta: n_rot = 128 llm_load_print_meta: n_swa = 0 llm_load_print_meta: n_embd_head_k = 128 llm_load_print_meta: n_embd_head_v = 128 llm_load_print_meta: n_gqa = 1 llm_load_print_meta: n_embd_k_gqa = 2048 llm_load_print_meta: n_embd_v_gqa = 2048 llm_load_print_meta: f_norm_eps = 0.0e+00 llm_load_print_meta: f_norm_rms_eps = 1.0e-05 llm_load_print_meta: f_clamp_kqv = 0.0e+00 llm_load_print_meta: f_max_alibi_bias = 0.0e+00 llm_load_print_meta: f_logit_scale = 0.0e+00 llm_load_print_meta: n_ff = 5440 llm_load_print_meta: n_expert = 0 llm_load_print_meta: n_expert_used = 0 llm_load_print_meta: causal attn = 1 llm_load_print_meta: pooling type = 0 llm_load_print_meta: rope type = 0 llm_load_print_meta: rope scaling = linear llm_load_print_meta: freq_base_train = 10000.0 llm_load_print_meta: freq_scale_train = 1 llm_load_print_meta: n_ctx_orig_yarn = 8192 llm_load_print_meta: rope_finetuned = unknown llm_load_print_meta: ssm_d_conv = 0 llm_load_print_meta: ssm_d_inner = 0 llm_load_print_meta: ssm_d_state = 0 llm_load_print_meta: ssm_dt_rank = 0 llm_load_print_meta: ssm_dt_b_c_rms = 0 llm_load_print_meta: model type = ?B llm_load_print_meta: model ftype = IQ2_S - 2.5 bpw llm_load_print_meta: model params = 2.25 B llm_load_print_meta: model size = 1.61 GiB (6.12 BPW) llm_load_print_meta: general.name = n/a llm_load_print_meta: BOS token = 1 '' llm_load_print_meta: EOS token = 2 '' llm_load_print_meta: UNK token = 0 '' llm_load_print_meta: PAD token = 0 '' llm_load_print_meta: LF token = 145 '<0x0A>' llm_load_print_meta: EOT token = 5 '<|im_end|>' llm_load_print_meta: EOG token = 2 '' llm_load_print_meta: EOG token = 5 '<|im_end|>' llm_load_print_meta: max token length = 72 llm_load_tensors: ggml ctx size = 0.20 MiB llm_load_tensors: offloading 24 repeating layers to GPU llm_load_tensors: offloading non-repeating layers to GPU llm_load_tensors: offloaded 25/25 layers to GPU llm_load_tensors: Metal buffer size = 1644.10 MiB llm_load_tensors: CPU buffer size = 214.84 MiB ............................ llama_new_context_with_model: n_ctx = 8192 llama_new_context_with_model: n_batch = 512 llama_new_context_with_model: n_ubatch = 128 llama_new_context_with_model: flash_attn = 0 llama_new_context_with_model: freq_base = 10000.0 llama_new_context_with_model: freq_scale = 1 ggml_metal_init: allocating ggml_metal_init: found device: Apple M3 Max ggml_metal_init: picking default device: Apple M3 Max ggml_metal_init: using embedded metal library ggml_metal_init: GPU name: Apple M3 Max ggml_metal_init: GPU family: MTLGPUFamilyApple9 (1009) ggml_metal_init: GPU family: MTLGPUFamilyCommon3 (3003) ggml_metal_init: GPU family: MTLGPUFamilyMetal3 (5001) ggml_metal_init: simdgroup reduction support = true ggml_metal_init: simdgroup matrix mul. support = true ggml_metal_init: hasUnifiedMemory = true ggml_metal_init: recommendedMaxWorkingSetSize = 42949.67 MB llama_kv_cache_init: Metal KV buffer size = 1536.00 MiB llama_new_context_with_model: KV self size = 1536.00 MiB, K (f16): 768.00 MiB, V (f16): 768.00 MiB llama_new_context_with_model: CPU output buffer size = 0.98 MiB llama_new_context_with_model: Metal compute buffer size = 72.00 MiB llama_new_context_with_model: CPU compute buffer size = 125.00 MiB llama_new_context_with_model: graph nodes = 774 llama_new_context_with_model: graph splits = 3 common_init_from_params: warming up the model with an empty run - please wait ... (--no-warmup to disable) system_info: n_threads = 15 (n_threads_batch = 15) / 16 | AVX = 0 | AVX_VNNI = 0 | AVX2 = 0 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 0 | NEON = 1 | SVE = 0 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | RISCV_VECT = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | SSSE3 = 0 | VSX = 0 | MATMUL_INT8 = 1 | LLAMAFILE = 1 | perplexity: tokenizing the input .. perplexity: tokenization took 3608.25 ms perplexity: calculating perplexity over 134 chunks, n_ctx=8192, batch_size=512, n_seq=1 perplexity: 13.67 seconds per pass - ETA 30.53 minutes [1]29.6010,[2]29.5172,[3]25.7862,[4]25.0898,[5]23.4808,[6]22.3861,[7]24.0240,[8]23.4845,[9]22.8642,[10]21.8118,[11]22.9535,[12]23.1602,[13]25.2016,[14]25.7935,[15]25.7491,[16]26.5747,[17]27.1112,[18]26.9186,[19]26.8906,[20]27.4454,[21]27.3279,[22]24.9212,[23]25.1674,[24]24.4545,[25]23.6118,[26]22.8598,[27]22.5139,[28]22.1881,[29]22.0997,[30]21.6820,[31]22.0830,[32]22.0955,[33]22.8174,[34]23.1896,[35]23.5745,[36]23.0854,[37]22.9720,[38]23.0211,[39]22.6747,[40]22.6684,[41]22.6271,[42]22.2360,[43]22.0886,[44]22.3052,[45]22.5768,[46]22.3153,[47]22.7746,[48]23.0428,[49]23.6171,[50]24.1869,[51]24.2844,[52]24.7079,[53]25.2634,[54]25.8122,[55]26.0916,[56]25.8450,[57]25.7558,[58]25.2386,[59]25.0156,[60]24.6812,[61]24.7264,[62]25.0349,[63]25.4095,[64]25.5203,[65]25.5732,[66]25.9103,[67]25.8713,[68]25.6933,[69]25.4466,[70]25.2968,[71]25.3280,[72]25.2495,[73]25.2949,[74]25.2370,[75]25.2667,[76]25.1723,[77]25.2576,[78]25.2563,[79]25.2661,[80]25.3049,[81]24.7500,[82]24.7057,[83]24.5000,[84]24.5956,[85]24.7109,[86]25.0832,[87]25.1826,[88]25.4442,[89]25.5627,[90]25.7940,[91]25.9148,[92]25.6167,[93]25.7212,[94]25.6776,[95]25.9323,[96]26.2587,[97]26.4019,[98]26.5851,[99]26.8920,[100]26.9814,[101]27.0229,[102]26.9609,[103]26.8877,[104]26.8538,[105]26.7966,[106]26.5723,[107]26.3274,[108]26.4363,[109]26.4825,[110]26.3185,[111]26.2732,[112]26.0267,[113]25.7671,[114]25.7432,[115]25.6708,[116]25.6618,[117]25.4755,[118]25.2418,[119]25.2166,[120]25.3133,[121]25.3392,[122]25.3860,[123]25.4629,[124]25.4971,[125]25.5046,[126]25.5585,[127]25.6160,[128]25.7583,[129]25.7385,[130]25.6987,[131]25.7995,[132]25.7599,[133]25.6648,[134]25.3893, Final estimate: PPL = 25.3893 +/- 0.10575 llama_perf_context_print: load time = 637.32 ms llama_perf_context_print: prompt eval time = 1669379.99 ms / 1097728 tokens ( 1.52 ms per token, 657.57 tokens per second) llama_perf_context_print: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second) llama_perf_context_print: total time = 1728537.03 ms / 1097729 tokens ggml_metal_free: deallocating