File size: 11,264 Bytes
e15c783
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
build: 3906 (7eee341b) with Apple clang version 15.0.0 (clang-1500.3.9.4) for arm64-apple-darwin23.6.0
llama_model_loader: loaded meta data with 35 key-value pairs and 219 tensors from salamandra-2b-instruct_Q8_0.gguf (version GGUF V3 (latest))
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
llama_model_loader: - kv   0:                       general.architecture str              = llama
llama_model_loader: - kv   1:                               general.type str              = model
llama_model_loader: - kv   2:                         general.size_label str              = 2.3B
llama_model_loader: - kv   3:                            general.license str              = apache-2.0
llama_model_loader: - kv   4:                               general.tags arr[str,1]       = ["text-generation"]
llama_model_loader: - kv   5:                          general.languages arr[str,36]      = ["bg", "ca", "code", "cs", "cy", "da"...
llama_model_loader: - kv   6:                          llama.block_count u32              = 24
llama_model_loader: - kv   7:                       llama.context_length u32              = 8192
llama_model_loader: - kv   8:                     llama.embedding_length u32              = 2048
llama_model_loader: - kv   9:                  llama.feed_forward_length u32              = 5440
llama_model_loader: - kv  10:                 llama.attention.head_count u32              = 16
llama_model_loader: - kv  11:              llama.attention.head_count_kv u32              = 16
llama_model_loader: - kv  12:                       llama.rope.freq_base f32              = 10000.000000
llama_model_loader: - kv  13:     llama.attention.layer_norm_rms_epsilon f32              = 0.000010
llama_model_loader: - kv  14:                          general.file_type u32              = 7
llama_model_loader: - kv  15:                           llama.vocab_size u32              = 256000
llama_model_loader: - kv  16:                 llama.rope.dimension_count u32              = 128
llama_model_loader: - kv  17:            tokenizer.ggml.add_space_prefix bool             = true
llama_model_loader: - kv  18:                       tokenizer.ggml.model str              = llama
llama_model_loader: - kv  19:                         tokenizer.ggml.pre str              = default
llama_model_loader: - kv  20:                      tokenizer.ggml.tokens arr[str,256000]  = ["<unk>", "<s>", "</s>", "<pad>", "<|...
llama_model_loader: - kv  21:                      tokenizer.ggml.scores arr[f32,256000]  = [-1000.000000, -1000.000000, -1000.00...
llama_model_loader: - kv  22:                  tokenizer.ggml.token_type arr[i32,256000]  = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, ...
llama_model_loader: - kv  23:                tokenizer.ggml.bos_token_id u32              = 1
llama_model_loader: - kv  24:                tokenizer.ggml.eos_token_id u32              = 2
llama_model_loader: - kv  25:            tokenizer.ggml.unknown_token_id u32              = 0
llama_model_loader: - kv  26:            tokenizer.ggml.padding_token_id u32              = 0
llama_model_loader: - kv  27:               tokenizer.ggml.add_bos_token bool             = true
llama_model_loader: - kv  28:               tokenizer.ggml.add_eos_token bool             = false
llama_model_loader: - kv  29:                    tokenizer.chat_template str              = {%- if not date_string is defined %}{...
llama_model_loader: - kv  30:               general.quantization_version u32              = 2
llama_model_loader: - kv  31:                      quantize.imatrix.file str              = imatrix/oscar/imatrix.dat
llama_model_loader: - kv  32:                   quantize.imatrix.dataset str              = ./imatrix/oscar/imatrix-dataset.txt
llama_model_loader: - kv  33:             quantize.imatrix.entries_count i32              = 168
llama_model_loader: - kv  34:              quantize.imatrix.chunks_count i32              = 44176
llama_model_loader: - type  f32:   49 tensors
llama_model_loader: - type q8_0:  169 tensors
llama_model_loader: - type bf16:    1 tensors
llm_load_vocab: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect
llm_load_vocab: special tokens cache size = 104
llm_load_vocab: token to piece cache size = 1.8842 MB
llm_load_print_meta: format           = GGUF V3 (latest)
llm_load_print_meta: arch             = llama
llm_load_print_meta: vocab type       = SPM
llm_load_print_meta: n_vocab          = 256000
llm_load_print_meta: n_merges         = 0
llm_load_print_meta: vocab_only       = 0
llm_load_print_meta: n_ctx_train      = 8192
llm_load_print_meta: n_embd           = 2048
llm_load_print_meta: n_layer          = 24
llm_load_print_meta: n_head           = 16
llm_load_print_meta: n_head_kv        = 16
llm_load_print_meta: n_rot            = 128
llm_load_print_meta: n_swa            = 0
llm_load_print_meta: n_embd_head_k    = 128
llm_load_print_meta: n_embd_head_v    = 128
llm_load_print_meta: n_gqa            = 1
llm_load_print_meta: n_embd_k_gqa     = 2048
llm_load_print_meta: n_embd_v_gqa     = 2048
llm_load_print_meta: f_norm_eps       = 0.0e+00
llm_load_print_meta: f_norm_rms_eps   = 1.0e-05
llm_load_print_meta: f_clamp_kqv      = 0.0e+00
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
llm_load_print_meta: f_logit_scale    = 0.0e+00
llm_load_print_meta: n_ff             = 5440
llm_load_print_meta: n_expert         = 0
llm_load_print_meta: n_expert_used    = 0
llm_load_print_meta: causal attn      = 1
llm_load_print_meta: pooling type     = 0
llm_load_print_meta: rope type        = 0
llm_load_print_meta: rope scaling     = linear
llm_load_print_meta: freq_base_train  = 10000.0
llm_load_print_meta: freq_scale_train = 1
llm_load_print_meta: n_ctx_orig_yarn  = 8192
llm_load_print_meta: rope_finetuned   = unknown
llm_load_print_meta: ssm_d_conv       = 0
llm_load_print_meta: ssm_d_inner      = 0
llm_load_print_meta: ssm_d_state      = 0
llm_load_print_meta: ssm_dt_rank      = 0
llm_load_print_meta: ssm_dt_b_c_rms   = 0
llm_load_print_meta: model type       = ?B
llm_load_print_meta: model ftype      = Q8_0
llm_load_print_meta: model params     = 2.25 B
llm_load_print_meta: model size       = 2.69 GiB (10.25 BPW) 
llm_load_print_meta: general.name     = n/a
llm_load_print_meta: BOS token        = 1 '<s>'
llm_load_print_meta: EOS token        = 2 '</s>'
llm_load_print_meta: UNK token        = 0 '<unk>'
llm_load_print_meta: PAD token        = 0 '<unk>'
llm_load_print_meta: LF token         = 145 '<0x0A>'
llm_load_print_meta: EOT token        = 5 '<|im_end|>'
llm_load_print_meta: EOG token        = 2 '</s>'
llm_load_print_meta: EOG token        = 5 '<|im_end|>'
llm_load_print_meta: max token length = 72
llm_load_tensors: ggml ctx size =    0.20 MiB
llm_load_tensors: offloading 24 repeating layers to GPU
llm_load_tensors: offloading non-repeating layers to GPU
llm_load_tensors: offloaded 25/25 layers to GPU
llm_load_tensors:      Metal buffer size =  2752.45 MiB
llm_load_tensors:        CPU buffer size =   531.25 MiB
..............................................
llama_new_context_with_model: n_ctx      = 8192
llama_new_context_with_model: n_batch    = 512
llama_new_context_with_model: n_ubatch   = 128
llama_new_context_with_model: flash_attn = 0
llama_new_context_with_model: freq_base  = 10000.0
llama_new_context_with_model: freq_scale = 1
ggml_metal_init: allocating
ggml_metal_init: found device: Apple M3 Max
ggml_metal_init: picking default device: Apple M3 Max
ggml_metal_init: using embedded metal library
ggml_metal_init: GPU name:   Apple M3 Max
ggml_metal_init: GPU family: MTLGPUFamilyApple9  (1009)
ggml_metal_init: GPU family: MTLGPUFamilyCommon3 (3003)
ggml_metal_init: GPU family: MTLGPUFamilyMetal3  (5001)
ggml_metal_init: simdgroup reduction support   = true
ggml_metal_init: simdgroup matrix mul. support = true
ggml_metal_init: hasUnifiedMemory              = true
ggml_metal_init: recommendedMaxWorkingSetSize  = 42949.67 MB
llama_kv_cache_init:      Metal KV buffer size =  1536.00 MiB
llama_new_context_with_model: KV self size  = 1536.00 MiB, K (f16):  768.00 MiB, V (f16):  768.00 MiB
llama_new_context_with_model:        CPU  output buffer size =     0.98 MiB
llama_new_context_with_model:      Metal compute buffer size =    72.00 MiB
llama_new_context_with_model:        CPU compute buffer size =   125.00 MiB
llama_new_context_with_model: graph nodes  = 774
llama_new_context_with_model: graph splits = 3
common_init_from_params: warming up the model with an empty run - please wait ... (--no-warmup to disable)

system_info: n_threads = 15 (n_threads_batch = 15) / 16 | AVX = 0 | AVX_VNNI = 0 | AVX2 = 0 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 0 | NEON = 1 | SVE = 0 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | RISCV_VECT = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | SSSE3 = 0 | VSX = 0 | MATMUL_INT8 = 1 | LLAMAFILE = 1 | 
perplexity: tokenizing the input ..
perplexity: tokenization took 2890.81 ms
perplexity: calculating perplexity over 134 chunks, n_ctx=8192, batch_size=512, n_seq=1
perplexity: 9.66 seconds per pass - ETA 21.57 minutes
[1]17.0900,[2]17.2426,[3]15.6960,[4]15.5004,[5]14.8627,[6]14.4270,[7]15.3004,[8]14.7900,[9]14.5098,[10]13.8086,[11]14.4216,[12]14.4788,[13]15.5017,[14]15.7599,[15]15.7465,[16]16.2741,[17]16.5646,[18]16.4675,[19]16.5092,[20]16.8109,[21]16.8498,[22]14.8777,[23]15.0527,[24]14.6855,[25]14.1820,[26]13.7497,[27]13.5624,[28]13.3915,[29]13.3436,[30]13.1437,[31]13.3746,[32]13.4818,[33]13.9475,[34]14.2460,[35]14.5429,[36]14.3139,[37]14.3042,[38]14.3798,[39]14.2324,[40]14.2660,[41]14.2413,[42]14.0575,[43]14.0072,[44]14.1687,[45]14.3701,[46]14.2268,[47]14.4577,[48]14.5686,[49]14.8366,[50]15.1069,[51]15.1403,[52]15.3456,[53]15.6561,[54]15.9664,[55]16.0721,[56]15.9099,[57]15.8156,[58]15.5525,[59]15.4494,[60]15.2612,[61]15.3116,[62]15.4426,[63]15.6231,[64]15.6839,[65]15.7118,[66]15.8945,[67]15.8694,[68]15.7612,[69]15.6235,[70]15.5194,[71]15.5137,[72]15.4592,[73]15.4688,[74]15.4122,[75]15.3869,[76]15.3275,[77]15.3844,[78]15.3841,[79]15.3929,[80]15.4288,[81]15.1483,[82]15.1268,[83]14.9998,[84]15.0316,[85]15.0785,[86]15.2672,[87]15.2901,[88]15.4407,[89]15.4927,[90]15.6146,[91]15.6685,[92]15.5091,[93]15.5730,[94]15.5610,[95]15.6941,[96]15.8808,[97]15.9547,[98]16.0485,[99]16.1837,[100]16.2244,[101]16.2508,[102]16.2129,[103]16.1850,[104]16.1688,[105]16.1554,[106]16.0302,[107]15.9058,[108]15.9652,[109]15.9820,[110]15.8958,[111]15.8602,[112]15.7149,[113]15.5783,[114]15.5728,[115]15.5477,[116]15.5567,[117]15.4532,[118]15.3271,[119]15.3203,[120]15.3782,[121]15.3928,[122]15.4152,[123]15.4498,[124]15.4654,[125]15.4595,[126]15.4843,[127]15.5078,[128]15.5850,[129]15.5754,[130]15.5533,[131]15.6085,[132]15.5844,[133]15.5300,[134]15.3831,
Final estimate: PPL = 15.3831 +/- 0.06266

llama_perf_context_print:        load time =    1576.71 ms
llama_perf_context_print: prompt eval time = 1364068.65 ms / 1097728 tokens (    1.24 ms per token,   804.75 tokens per second)
llama_perf_context_print:        eval time =       0.00 ms /     1 runs   (    0.00 ms per token,      inf tokens per second)
llama_perf_context_print:       total time = 1400622.10 ms / 1097729 tokens
ggml_metal_free: deallocating