File size: 16,078 Bytes
0313279 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 |
❯ build/bin/llama-perplexity -m gguf/makeself/Llama-3.3-70B-Instruct-Q2_K/Llama-3.3-70B-Instruct_Q2_K.gguf -f ../ai/jailbreaking/model_check_and_datasets/wikitext-2-raw/wiki.test.raw 2>&1 | tee -a Perplexity_Llama-3.3-70B-Instruct-Q2_K.txt build: 3821 (70392f1f) with Apple clang version 15.0.0 (clang-1500.3.9.4) for arm64-apple-darwin23.6.0 llama_model_loader: loaded meta data with 28 key-value pairs and 724 tensors from gguf/makeself/Llama-3.3-70B-Instruct-Q2_K/Llama-3.3-70B-Instruct_Q2_K.gguf (version GGUF V3 (latest)) llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. llama_model_loader: - kv 0: general.architecture str = llama llama_model_loader: - kv 1: general.type str = model llama_model_loader: - kv 2: general.name str = 5825c9120fc701a0b7d9a30d61005f2a09466b74 llama_model_loader: - kv 3: general.finetune str = 5825c9120fc701a0b7d9a30d61005f2a09466b74 llama_model_loader: - kv 4: general.size_label str = 71B llama_model_loader: - kv 5: llama.block_count u32 = 80 llama_model_loader: - kv 6: llama.context_length u32 = 131072 llama_model_loader: - kv 7: llama.embedding_length u32 = 8192 llama_model_loader: - kv 8: llama.feed_forward_length u32 = 28672 llama_model_loader: - kv 9: llama.attention.head_count u32 = 64 llama_model_loader: - kv 10: llama.attention.head_count_kv u32 = 8 llama_model_loader: - kv 11: llama.rope.freq_base f32 = 500000.000000 llama_model_loader: - kv 12: llama.attention.layer_norm_rms_epsilon f32 = 0.000010 llama_model_loader: - kv 13: llama.attention.key_length u32 = 128 llama_model_loader: - kv 14: llama.attention.value_length u32 = 128 llama_model_loader: - kv 15: general.file_type u32 = 10 llama_model_loader: - kv 16: llama.vocab_size u32 = 128256 llama_model_loader: - kv 17: llama.rope.dimension_count u32 = 128 llama_model_loader: - kv 18: tokenizer.ggml.model str = gpt2 llama_model_loader: - kv 19: tokenizer.ggml.pre str = llama-bpe llama_model_loader: - kv 20: tokenizer.ggml.tokens arr[str,128256] = ["!", "\"", "#", "$", "%", "&", "'", ... llama_model_loader: - kv 21: tokenizer.ggml.token_type arr[i32,128256] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ... llama_model_loader: - kv 22: tokenizer.ggml.merges arr[str,280147] = ["Ġ Ġ", "Ġ ĠĠĠ", "ĠĠ ĠĠ", "... llama_model_loader: - kv 23: tokenizer.ggml.bos_token_id u32 = 128000 llama_model_loader: - kv 24: tokenizer.ggml.eos_token_id u32 = 128009 llama_model_loader: - kv 25: tokenizer.ggml.padding_token_id u32 = 128004 llama_model_loader: - kv 26: tokenizer.chat_template str = {{- bos_token }}\n{%- if custom_tools ... llama_model_loader: - kv 27: general.quantization_version u32 = 2 llama_model_loader: - type f32: 162 tensors llama_model_loader: - type q2_K: 321 tensors llama_model_loader: - type q3_K: 160 tensors llama_model_loader: - type q5_K: 80 tensors llama_model_loader: - type q6_K: 1 tensors llm_load_vocab: special tokens cache size = 256 llm_load_vocab: token to piece cache size = 0.7999 MB llm_load_print_meta: format = GGUF V3 (latest) llm_load_print_meta: arch = llama llm_load_print_meta: vocab type = BPE llm_load_print_meta: n_vocab = 128256 llm_load_print_meta: n_merges = 280147 llm_load_print_meta: vocab_only = 0 llm_load_print_meta: n_ctx_train = 131072 llm_load_print_meta: n_embd = 8192 llm_load_print_meta: n_layer = 80 llm_load_print_meta: n_head = 64 llm_load_print_meta: n_head_kv = 8 llm_load_print_meta: n_rot = 128 llm_load_print_meta: n_swa = 0 llm_load_print_meta: n_embd_head_k = 128 llm_load_print_meta: n_embd_head_v = 128 llm_load_print_meta: n_gqa = 8 llm_load_print_meta: n_embd_k_gqa = 1024 llm_load_print_meta: n_embd_v_gqa = 1024 llm_load_print_meta: f_norm_eps = 0.0e+00 llm_load_print_meta: f_norm_rms_eps = 1.0e-05 llm_load_print_meta: f_clamp_kqv = 0.0e+00 llm_load_print_meta: f_max_alibi_bias = 0.0e+00 llm_load_print_meta: f_logit_scale = 0.0e+00 llm_load_print_meta: n_ff = 28672 llm_load_print_meta: n_expert = 0 llm_load_print_meta: n_expert_used = 0 llm_load_print_meta: causal attn = 1 llm_load_print_meta: pooling type = 0 llm_load_print_meta: rope type = 0 llm_load_print_meta: rope scaling = linear llm_load_print_meta: freq_base_train = 500000.0 llm_load_print_meta: freq_scale_train = 1 llm_load_print_meta: n_ctx_orig_yarn = 131072 llm_load_print_meta: rope_finetuned = unknown llm_load_print_meta: ssm_d_conv = 0 llm_load_print_meta: ssm_d_inner = 0 llm_load_print_meta: ssm_d_state = 0 llm_load_print_meta: ssm_dt_rank = 0 llm_load_print_meta: ssm_dt_b_c_rms = 0 llm_load_print_meta: model type = 70B llm_load_print_meta: model ftype = Q2_K - Medium llm_load_print_meta: model params = 70.55 B llm_load_print_meta: model size = 24.56 GiB (2.99 BPW) llm_load_print_meta: general.name = 5825c9120fc701a0b7d9a30d61005f2a09466b74 llm_load_print_meta: BOS token = 128000 '<|begin_of_text|>' llm_load_print_meta: EOS token = 128009 '<|eot_id|>' llm_load_print_meta: PAD token = 128004 '<|finetune_right_pad_id|>' llm_load_print_meta: LF token = 128 'Ä' llm_load_print_meta: EOT token = 128009 '<|eot_id|>' llm_load_print_meta: EOM token = 128008 '<|eom_id|>' llm_load_print_meta: EOG token = 128008 '<|eom_id|>' llm_load_print_meta: EOG token = 128009 '<|eot_id|>' llm_load_print_meta: max token length = 256 llm_load_tensors: ggml ctx size = 0.68 MiB llm_load_tensors: offloading 80 repeating layers to GPU llm_load_tensors: offloading non-repeating layers to GPU llm_load_tensors: offloaded 81/81 layers to GPU llm_load_tensors: CPU buffer size = 328.78 MiB llm_load_tensors: Metal buffer size = 25145.77 MiB .................................................................................................. llama_new_context_with_model: n_ctx = 2048 llama_new_context_with_model: n_batch = 2048 llama_new_context_with_model: n_ubatch = 512 llama_new_context_with_model: flash_attn = 0 llama_new_context_with_model: freq_base = 500000.0 llama_new_context_with_model: freq_scale = 1 ggml_metal_init: allocating ggml_metal_init: found device: Apple M1 Max ggml_metal_init: picking default device: Apple M1 Max ggml_metal_init: using embedded metal library ggml_metal_init: GPU name: Apple M1 Max ggml_metal_init: GPU family: MTLGPUFamilyApple7 (1007) ggml_metal_init: GPU family: MTLGPUFamilyCommon3 (3003) ggml_metal_init: GPU family: MTLGPUFamilyMetal3 (5001) ggml_metal_init: simdgroup reduction support = true ggml_metal_init: simdgroup matrix mul. support = true ggml_metal_init: hasUnifiedMemory = true ggml_metal_init: recommendedMaxWorkingSetSize = 51539.61 MB llama_kv_cache_init: Metal KV buffer size = 640.00 MiB llama_new_context_with_model: KV self size = 640.00 MiB, K (f16): 320.00 MiB, V (f16): 320.00 MiB llama_new_context_with_model: CPU output buffer size = 1.96 MiB llama_new_context_with_model: Metal compute buffer size = 324.00 MiB llama_new_context_with_model: CPU compute buffer size = 20.01 MiB llama_new_context_with_model: graph nodes = 2566 llama_new_context_with_model: graph splits = 2 llama_init_from_gpt_params: warming up the model with an empty run - please wait ... (--no-warmup to disable) system_info: n_threads = 8 (n_threads_batch = 8) / 10 | AVX = 0 | AVX_VNNI = 0 | AVX2 = 0 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | AVX512_BF16 = 0 | FMA = 0 | NEON = 1 | SVE = 0 | ARM_FMA = 1 | F16C = 0 | FP16_VA = 1 | RISCV_VECT = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 0 | SSSE3 = 0 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 | perplexity: tokenizing the input .. perplexity: tokenization took 243.868 ms perplexity: calculating perplexity over 564 chunks, n_ctx=512, batch_size=2048, n_seq=4 perplexity: 77.44 seconds per pass - ETA 3 hours 1.98 minutes [1]4.5888,[2]5.3815,[3]5.2988,[4]5.5173,[5]5.5963,[6]5.8407,[7]6.0682,[8]6.3110,[9]6.7393,[10]6.8626,[11]6.9609,[12]7.1558,[13]7.5128,[14]7.3059,[15]7.2831,[16]7.1360,[17]7.1588,[18]7.3722,[19]7.2090,[20]7.0883,[21]7.1152,[22]6.8748,[23]6.6500,[24]6.5364,[25]6.3574,[26]6.3051,[27]6.2508,[28]6.1829,[29]6.2337,[30]6.2603,[31]6.2697,[32]6.2566,[33]6.3009,[34]6.3396,[35]6.4099,[36]6.4724,[37]6.4606,[38]6.4909,[39]6.4878,[40]6.5105,[41]6.5250,[42]6.4719,[43]6.5094,[44]6.4610,[45]6.5861,[46]6.6055,[47]6.5995,[48]6.5737,[49]6.5488,[50]6.5933,[51]6.6393,[52]6.6151,[53]6.7276,[54]6.7254,[55]6.7511,[56]6.7813,[57]6.7996,[58]6.8166,[59]6.7653,[60]6.8161,[61]6.8704,[62]6.9294,[63]6.9893,[64]7.0540,[65]7.0492,[66]7.0453,[67]7.0214,[68]7.0502,[69]7.0807,[70]7.0807,[71]7.0625,[72]7.0301,[73]7.0028,[74]7.0025,[75]6.9449,[76]6.8908,[77]6.8400,[78]6.8408,[79]6.8464,[80]6.8567,[81]6.8381,[82]6.8741,[83]6.8818,[84]6.8671,[85]6.8649,[86]6.8532,[87]6.9242,[88]6.9263,[89]6.9330,[90]6.9365,[91]6.9307,[92]6.9290,[93]6.9163,[94]6.9195,[95]6.9101,[96]6.9376,[97]6.9515,[98]6.9518,[99]6.9665,[100]6.9611,[101]6.9645,[102]6.9861,[103]7.0134,[104]7.0562,[105]7.0529,[106]7.1047,[107]7.1319,[108]7.1422,[109]7.1913,[110]7.2340,[111]7.2563,[112]7.2289,[113]7.2235,[114]7.2237,[115]7.2098,[116]7.2132,[117]7.2123,[118]7.1991,[119]7.1888,[120]7.1709,[121]7.1513,[122]7.1331,[123]7.1092,[124]7.0695,[125]7.0352,[126]7.0101,[127]6.9843,[128]6.9852,[129]6.9870,[130]6.9914,[131]7.0014,[132]6.9919,[133]6.9689,[134]6.9789,[135]6.9693,[136]6.9745,[137]6.9811,[138]7.0070,[139]7.0286,[140]7.0122,[141]6.9829,[142]6.9525,[143]6.9123,[144]6.8824,[145]6.8394,[146]6.8099,[147]6.7823,[148]6.7617,[149]6.7405,[150]6.7216,[151]6.6950,[152]6.6729,[153]6.6513,[154]6.6193,[155]6.5995,[156]6.5856,[157]6.5560,[158]6.5495,[159]6.5280,[160]6.5152,[161]6.5330,[162]6.5350,[163]6.5558,[164]6.5654,[165]6.5945,[166]6.6267,[167]6.6491,[168]6.6863,[169]6.7043,[170]6.7337,[171]6.7762,[172]6.7961,[173]6.7974,[174]6.7808,[175]6.7994,[176]6.8023,[177]6.8062,[178]6.8098,[179]6.8036,[180]6.8025,[181]6.8116,[182]6.8199,[183]6.8388,[184]6.8534,[185]6.8687,[186]6.8836,[187]6.9065,[188]6.9235,[189]6.9357,[190]6.9493,[191]6.9437,[192]6.9388,[193]6.9252,[194]6.9230,[195]6.9526,[196]6.9558,[197]6.9684,[198]6.9645,[199]6.9553,[200]6.9438,[201]6.9191,[202]6.9131,[203]6.8916,[204]6.8847,[205]6.8777,[206]6.8651,[207]6.8578,[208]6.8692,[209]6.8846,[210]6.8863,[211]6.8715,[212]6.8500,[213]6.8459,[214]6.8510,[215]6.8433,[216]6.8506,[217]6.8349,[218]6.8200,[219]6.8129,[220]6.8119,[221]6.7920,[222]6.7841,[223]6.7734,[224]6.7668,[225]6.7719,[226]6.7676,[227]6.7462,[228]6.7422,[229]6.7291,[230]6.7166,[231]6.7198,[232]6.7231,[233]6.7336,[234]6.7317,[235]6.7415,[236]6.7473,[237]6.7622,[238]6.7757,[239]6.7855,[240]6.7910,[241]6.7995,[242]6.8133,[243]6.8196,[244]6.8436,[245]6.8673,[246]6.8717,[247]6.8722,[248]6.8829,[249]6.8745,[250]6.8475,[251]6.8317,[252]6.8097,[253]6.7960,[254]6.7926,[255]6.7913,[256]6.7858,[257]6.7821,[258]6.7733,[259]6.7649,[260]6.7522,[261]6.7365,[262]6.7238,[263]6.7123,[264]6.6935,[265]6.6880,[266]6.6714,[267]6.6637,[268]6.6506,[269]6.6417,[270]6.6308,[271]6.6213,[272]6.6174,[273]6.5924,[274]6.5770,[275]6.5779,[276]6.5807,[277]6.5680,[278]6.5598,[279]6.5585,[280]6.5688,[281]6.5779,[282]6.5887,[283]6.5921,[284]6.5930,[285]6.6092,[286]6.6090,[287]6.6148,[288]6.6075,[289]6.6051,[290]6.6061,[291]6.6073,[292]6.6007,[293]6.6037,[294]6.6107,[295]6.6125,[296]6.6157,[297]6.6145,[298]6.6102,[299]6.6136,[300]6.6192,[301]6.6143,[302]6.6090,[303]6.6098,[304]6.6012,[305]6.5996,[306]6.6106,[307]6.6154,[308]6.6153,[309]6.6188,[310]6.6103,[311]6.6110,[312]6.6150,[313]6.6262,[314]6.6437,[315]6.6483,[316]6.6562,[317]6.6514,[318]6.6552,[319]6.6516,[320]6.6446,[321]6.6457,[322]6.6442,[323]6.6379,[324]6.6443,[325]6.6336,[326]6.6355,[327]6.6371,[328]6.6321,[329]6.6278,[330]6.6136,[331]6.6200,[332]6.6179,[333]6.6141,[334]6.6101,[335]6.5982,[336]6.5927,[337]6.5846,[338]6.5799,[339]6.5752,[340]6.5781,[341]6.5790,[342]6.5837,[343]6.5923,[344]6.6019,[345]6.6029,[346]6.6055,[347]6.6096,[348]6.6168,[349]6.6222,[350]6.6145,[351]6.6091,[352]6.6130,[353]6.6294,[354]6.6424,[355]6.6521,[356]6.6602,[357]6.6706,[358]6.6833,[359]6.6936,[360]6.6976,[361]6.6978,[362]6.7041,[363]6.7075,[364]6.7057,[365]6.7097,[366]6.7223,[367]6.7260,[368]6.7343,[369]6.7368,[370]6.7439,[371]6.7542,[372]6.7654,[373]6.7653,[374]6.7605,[375]6.7512,[376]6.7517,[377]6.7637,[378]6.7742,[379]6.7732,[380]6.7664,[381]6.7591,[382]6.7624,[383]6.7703,[384]6.7743,[385]6.7774,[386]6.7811,[387]6.7841,[388]6.7887,[389]6.7923,[390]6.7814,[391]6.7713,[392]6.7630,[393]6.7620,[394]6.7610,[395]6.7562,[396]6.7551,[397]6.7626,[398]6.7596,[399]6.7529,[400]6.7543,[401]6.7516,[402]6.7433,[403]6.7427,[404]6.7395,[405]6.7389,[406]6.7354,[407]6.7328,[408]6.7271,[409]6.7257,[410]6.7206,[411]6.7196,[412]6.7130,[413]6.7138,[414]6.7208,[415]6.7285,[416]6.7273,[417]6.7183,[418]6.7202,[419]6.7158,[420]6.7146,[421]6.7157,[422]6.7102,[423]6.7085,[424]6.7021,[425]6.6909,[426]6.6909,[427]6.6875,[428]6.6827,[429]6.6720,[430]6.6730,[431]6.6647,[432]6.6569,[433]6.6511,[434]6.6482,[435]6.6366,[436]6.6378,[437]6.6357,[438]6.6327,[439]6.6319,[440]6.6312,[441]6.6355,[442]6.6382,[443]6.6551,[444]6.6588,[445]6.6563,[446]6.6561,[447]6.6569,[448]6.6622,[449]6.6630,[450]6.6623,[451]6.6640,[452]6.6721,[453]6.6765,[454]6.6751,[455]6.6790,[456]6.6734,[457]6.6758,[458]6.6670,[459]6.6712,[460]6.6809,[461]6.6820,[462]6.6811,[463]6.6731,[464]6.6769,[465]6.6918,[466]6.6986,[467]6.6967,[468]6.7008,[469]6.6987,[470]6.6979,[471]6.6968,[472]6.6926,[473]6.6875,[474]6.6853,[475]6.6853,[476]6.6850,[477]6.6784,[478]6.6771,[479]6.6726,[480]6.6735,[481]6.6746,[482]6.6795,[483]6.6751,[484]6.6772,[485]6.6739,[486]6.6766,[487]6.6834,[488]6.6867,[489]6.6892,[490]6.6936,[491]6.6937,[492]6.6983,[493]6.7036,[494]6.7067,[495]6.7061,[496]6.7057,[497]6.7058,[498]6.7046,[499]6.7053,[500]6.7039,[501]6.7000,[502]6.7018,[503]6.7042,[504]6.7041,[505]6.7003,[506]6.7027,[507]6.7051,[508]6.7122,[509]6.7098,[510]6.7109,[511]6.7062,[512]6.7053,[513]6.7049,[514]6.7041,[515]6.7022,[516]6.7049,[517]6.7059,[518]6.7011,[519]6.7033,[520]6.7060,[521]6.7064,[522]6.7139,[523]6.7151,[524]6.7119,[525]6.7111,[526]6.7118,[527]6.7144,[528]6.7115,[529]6.7025,[530]6.6933,[531]6.6987,[532]6.6911,[533]6.6864,[534]6.6725,[535]6.6649,[536]6.6627,[537]6.6653,[538]6.6678,[539]6.6690,[540]6.6741,[541]6.6788,[542]6.6846,[543]6.6923,[544]6.6992,[545]6.6982,[546]6.7035,[547]6.7048,[548]6.6978,[549]6.6949,[550]6.6847,[551]6.6850,[552]6.6852,[553]6.6882,[554]6.6880,[555]6.6870,[556]6.6824,[557]6.6770,[558]6.6755,[559]6.6743,[560]6.6773,[561]6.6801,[562]6.6907,[563]6.6856,[564]6.6865, Final estimate: PPL = 6.6865 +/- 0.04336 llama_perf_context_print: load time = 1846.20 ms llama_perf_context_print: prompt eval time = 10450379.66 ms / 288768 tokens ( 36.19 ms per token, 27.63 tokens per second) llama_perf_context_print: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second) llama_perf_context_print: total time = 10459401.32 ms / 288769 tokens ggml_metal_free: deallocating |