|
llama_model_loader: loaded meta data with 31 key-value pairs and 435 tensors from Yi-Coder-9B-Chat-IMat-GGUF/Yi-Coder-9B-Chat.Q8_0.gguf.hardlink.gguf (version GGUF V3 (latest)) |
|
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. |
|
llama_model_loader: - kv 0: general.architecture str = llama |
|
llama_model_loader: - kv 1: general.type str = model |
|
llama_model_loader: - kv 2: general.name str = Yi Coder 9B Chat |
|
llama_model_loader: - kv 3: general.finetune str = Chat |
|
llama_model_loader: - kv 4: general.basename str = Yi-Coder |
|
llama_model_loader: - kv 5: general.size_label str = 9B |
|
llama_model_loader: - kv 6: general.license str = apache-2.0 |
|
llama_model_loader: - kv 7: llama.block_count u32 = 48 |
|
llama_model_loader: - kv 8: llama.context_length u32 = 131072 |
|
llama_model_loader: - kv 9: llama.embedding_length u32 = 4096 |
|
llama_model_loader: - kv 10: llama.feed_forward_length u32 = 11008 |
|
llama_model_loader: - kv 11: llama.attention.head_count u32 = 32 |
|
llama_model_loader: - kv 12: llama.attention.head_count_kv u32 = 4 |
|
llama_model_loader: - kv 13: llama.rope.freq_base f32 = 10000000.000000 |
|
llama_model_loader: - kv 14: llama.attention.layer_norm_rms_epsilon f32 = 0.000010 |
|
llama_model_loader: - kv 15: general.file_type u32 = 7 |
|
llama_model_loader: - kv 16: llama.vocab_size u32 = 64000 |
|
llama_model_loader: - kv 17: llama.rope.dimension_count u32 = 128 |
|
llama_model_loader: - kv 18: tokenizer.ggml.add_space_prefix bool = false |
|
llama_model_loader: - kv 19: tokenizer.ggml.model str = llama |
|
llama_model_loader: - kv 20: tokenizer.ggml.pre str = default |
|
llama_model_loader: - kv 21: tokenizer.ggml.tokens arr[str,64000] = ["<unk>", "<|startoftext|>", "<|endof... |
|
llama_model_loader: - kv 22: tokenizer.ggml.scores arr[f32,64000] = [-1000.000000, -1000.000000, -1000.00... |
|
llama_model_loader: - kv 23: tokenizer.ggml.token_type arr[i32,64000] = [3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 3, 3, ... |
|
llama_model_loader: - kv 24: tokenizer.ggml.bos_token_id u32 = 1 |
|
llama_model_loader: - kv 25: tokenizer.ggml.eos_token_id u32 = 7 |
|
llama_model_loader: - kv 26: tokenizer.ggml.padding_token_id u32 = 0 |
|
llama_model_loader: - kv 27: tokenizer.ggml.add_bos_token bool = false |
|
llama_model_loader: - kv 28: tokenizer.ggml.add_eos_token bool = false |
|
llama_model_loader: - kv 29: tokenizer.chat_template str = {% if messages[0]['role'] == 'system'... |
|
llama_model_loader: - kv 30: general.quantization_version u32 = 2 |
|
llama_model_loader: - type f32: 97 tensors |
|
llama_model_loader: - type q8_0: 338 tensors |
|
llm_load_vocab: special tokens cache size = 13 |
|
llm_load_vocab: token to piece cache size = 0.3834 MB |
|
llm_load_print_meta: format = GGUF V3 (latest) |
|
llm_load_print_meta: arch = llama |
|
llm_load_print_meta: vocab type = SPM |
|
llm_load_print_meta: n_vocab = 64000 |
|
llm_load_print_meta: n_merges = 0 |
|
llm_load_print_meta: vocab_only = 0 |
|
llm_load_print_meta: n_ctx_train = 131072 |
|
llm_load_print_meta: n_embd = 4096 |
|
llm_load_print_meta: n_layer = 48 |
|
llm_load_print_meta: n_head = 32 |
|
llm_load_print_meta: n_head_kv = 4 |
|
llm_load_print_meta: n_rot = 128 |
|
llm_load_print_meta: n_swa = 0 |
|
llm_load_print_meta: n_embd_head_k = 128 |
|
llm_load_print_meta: n_embd_head_v = 128 |
|
llm_load_print_meta: n_gqa = 8 |
|
llm_load_print_meta: n_embd_k_gqa = 512 |
|
llm_load_print_meta: n_embd_v_gqa = 512 |
|
llm_load_print_meta: f_norm_eps = 0.0e+00 |
|
llm_load_print_meta: f_norm_rms_eps = 1.0e-05 |
|
llm_load_print_meta: f_clamp_kqv = 0.0e+00 |
|
llm_load_print_meta: f_max_alibi_bias = 0.0e+00 |
|
llm_load_print_meta: f_logit_scale = 0.0e+00 |
|
llm_load_print_meta: n_ff = 11008 |
|
llm_load_print_meta: n_expert = 0 |
|
llm_load_print_meta: n_expert_used = 0 |
|
llm_load_print_meta: causal attn = 1 |
|
llm_load_print_meta: pooling type = 0 |
|
llm_load_print_meta: rope type = 0 |
|
llm_load_print_meta: rope scaling = linear |
|
llm_load_print_meta: freq_base_train = 10000000.0 |
|
llm_load_print_meta: freq_scale_train = 1 |
|
llm_load_print_meta: n_ctx_orig_yarn = 131072 |
|
llm_load_print_meta: rope_finetuned = unknown |
|
llm_load_print_meta: ssm_d_conv = 0 |
|
llm_load_print_meta: ssm_d_inner = 0 |
|
llm_load_print_meta: ssm_d_state = 0 |
|
llm_load_print_meta: ssm_dt_rank = 0 |
|
llm_load_print_meta: ssm_dt_b_c_rms = 0 |
|
llm_load_print_meta: model type = 34B |
|
llm_load_print_meta: model ftype = Q8_0 |
|
llm_load_print_meta: model params = 8.83 B |
|
llm_load_print_meta: model size = 8.74 GiB (8.50 BPW) |
|
llm_load_print_meta: general.name = Yi Coder 9B Chat |
|
llm_load_print_meta: BOS token = 1 '<|startoftext|>' |
|
llm_load_print_meta: EOS token = 7 '<|im_end|>' |
|
llm_load_print_meta: UNK token = 0 '<unk>' |
|
llm_load_print_meta: PAD token = 0 '<unk>' |
|
llm_load_print_meta: LF token = 315 '<0x0A>' |
|
llm_load_print_meta: EOT token = 2 '<|endoftext|>' |
|
llm_load_print_meta: max token length = 48 |
|
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no |
|
ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no |
|
ggml_cuda_init: found 1 CUDA devices: |
|
Device 0: NVIDIA GeForce RTX 4090, compute capability 8.9, VMM: yes |
|
llm_load_tensors: ggml ctx size = 0.41 MiB |
|
llm_load_tensors: offloading 48 repeating layers to GPU |
|
llm_load_tensors: offloading non-repeating layers to GPU |
|
llm_load_tensors: offloaded 49/49 layers to GPU |
|
llm_load_tensors: CPU buffer size = 265.62 MiB |
|
llm_load_tensors: CUDA0 buffer size = 8682.16 MiB |
|
................................................................................................. |
|
llama_new_context_with_model: n_ctx = 512 |
|
llama_new_context_with_model: n_batch = 512 |
|
llama_new_context_with_model: n_ubatch = 512 |
|
llama_new_context_with_model: flash_attn = 0 |
|
llama_new_context_with_model: freq_base = 10000000.0 |
|
llama_new_context_with_model: freq_scale = 1 |
|
llama_kv_cache_init: CUDA0 KV buffer size = 48.00 MiB |
|
llama_new_context_with_model: KV self size = 48.00 MiB, K (f16): 24.00 MiB, V (f16): 24.00 MiB |
|
llama_new_context_with_model: CUDA_Host output buffer size = 0.24 MiB |
|
llama_new_context_with_model: CUDA0 compute buffer size = 133.00 MiB |
|
llama_new_context_with_model: CUDA_Host compute buffer size = 9.01 MiB |
|
llama_new_context_with_model: graph nodes = 1542 |
|
llama_new_context_with_model: graph splits = 2 |
|
|
|
system_info: n_threads = 25 (n_threads_batch = 25) / 32 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 1 | AVX512_VNNI = 1 | AVX512_BF16 = 1 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | |
|
compute_imatrix: tokenizing the input .. |
|
compute_imatrix: tokenization took 99.249 ms |
|
compute_imatrix: computing over 146 chunks with batch_size 512 |
|
compute_imatrix: 0.88 seconds per pass - ETA 2.13 minutes |
|
[1]9.8198,[2]6.5604,[3]6.5762,[4]7.0806,[5]6.9953,[6]7.1565,[7]5.9481,[8]6.7761,[9]6.6810, |
|
save_imatrix: stored collected data after 10 chunks in Yi-Coder-9B-Chat-IMat-GGUF/imatrix.dat |
|
[10]7.2665,[11]7.3542,[12]6.7100,[13]7.1440,[14]7.8428,[15]8.1226,[16]8.6544,[17]9.1495,[18]9.2832,[19]9.4664, |
|
save_imatrix: stored collected data after 20 chunks in Yi-Coder-9B-Chat-IMat-GGUF/imatrix.dat |
|
[20]9.8392,[21]9.3568,[22]9.2338,[23]9.3600,[24]9.3835,[25]9.3889,[26]9.1261,[27]9.4375,[28]9.6221,[29]9.9584, |
|
save_imatrix: stored collected data after 30 chunks in Yi-Coder-9B-Chat-IMat-GGUF/imatrix.dat |
|
[30]10.0395,[31]10.2920,[32]10.5665,[33]10.5537,[34]10.3409,[35]9.9756,[36]9.4164,[37]8.9196,[38]8.8557,[39]8.7853, |
|
save_imatrix: stored collected data after 40 chunks in Yi-Coder-9B-Chat-IMat-GGUF/imatrix.dat |
|
[40]8.7371,[41]8.4383,[42]8.1871,[43]8.0015,[44]7.7722,[45]7.5820,[46]7.5505,[47]7.6632,[48]7.8130,[49]7.9891, |
|
save_imatrix: stored collected data after 50 chunks in Yi-Coder-9B-Chat-IMat-GGUF/imatrix.dat |
|
[50]8.1707,[51]8.5257,[52]8.8144,[53]9.0342,[54]9.1952,[55]9.2209,[56]9.1161,[57]9.2642,[58]9.3533,[59]9.4595, |
|
save_imatrix: stored collected data after 60 chunks in Yi-Coder-9B-Chat-IMat-GGUF/imatrix.dat |
|
[60]9.3627,[61]9.2811,[62]9.3249,[63]9.5089,[64]9.6632,[65]9.7850,[66]9.8601,[67]9.9305,[68]10.0022,[69]10.0562, |
|
save_imatrix: stored collected data after 70 chunks in Yi-Coder-9B-Chat-IMat-GGUF/imatrix.dat |
|
[70]9.9152,[71]9.8122,[72]9.7318,[73]9.6664,[74]9.7231,[75]9.7928,[76]9.7848,[77]9.8168,[78]9.8241,[79]9.7773, |
|
save_imatrix: stored collected data after 80 chunks in Yi-Coder-9B-Chat-IMat-GGUF/imatrix.dat |
|
[80]9.7172,[81]9.6326,[82]9.6526,[83]9.6156,[84]9.5799,[85]9.5961,[86]9.5612,[87]9.4946,[88]9.4473,[89]9.4639, |
|
save_imatrix: stored collected data after 90 chunks in Yi-Coder-9B-Chat-IMat-GGUF/imatrix.dat |
|
[90]9.4329,[91]9.4060,[92]9.3100,[93]9.2929,[94]9.3700,[95]9.3770,[96]9.3207,[97]9.3439,[98]9.3690,[99]9.3938, |
|
save_imatrix: stored collected data after 100 chunks in Yi-Coder-9B-Chat-IMat-GGUF/imatrix.dat |
|
[100]9.2644,[101]9.3233,[102]9.3417,[103]9.3820,[104]9.4176,[105]9.4469,[106]9.3712,[107]9.3010,[108]9.2315,[109]9.1467, |
|
save_imatrix: stored collected data after 110 chunks in Yi-Coder-9B-Chat-IMat-GGUF/imatrix.dat |
|
[110]9.0751,[111]9.0000,[112]8.9344,[113]8.8693,[114]8.8142,[115]8.8457,[116]8.9097,[117]9.0049,[118]9.0946,[119]9.1784, |
|
save_imatrix: stored collected data after 120 chunks in Yi-Coder-9B-Chat-IMat-GGUF/imatrix.dat |
|
[120]9.3380,[121]9.4386,[122]9.4729,[123]9.4955,[124]9.4380,[125]9.4421,[126]9.4011,[127]9.3066,[128]9.2026,[129]9.1276, |
|
save_imatrix: stored collected data after 130 chunks in Yi-Coder-9B-Chat-IMat-GGUF/imatrix.dat |
|
[130]9.1887,[131]9.1931,[132]9.2312,[133]9.2682,[134]9.3254,[135]9.3580,[136]9.3817,[137]9.3953,[138]9.3918,[139]9.3793, |
|
save_imatrix: stored collected data after 140 chunks in Yi-Coder-9B-Chat-IMat-GGUF/imatrix.dat |
|
[140]9.4587,[141]9.5294,[142]9.5985,[143]9.6776,[144]9.7522,[145]9.8222,[146]9.8724, |
|
save_imatrix: stored collected data after 146 chunks in Yi-Coder-9B-Chat-IMat-GGUF/imatrix.dat |
|
|
|
llama_print_timings: load time = 2483.50 ms |
|
llama_print_timings: sample time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second) |
|
llama_print_timings: prompt eval time = 110783.96 ms / 74752 tokens ( 1.48 ms per token, 674.75 tokens per second) |
|
llama_print_timings: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second) |
|
llama_print_timings: total time = 112968.42 ms / 74753 tokens |
|
|
|
Final estimate: PPL = 9.8724 +/- 0.15747 |
|
|