File size: 3,690 Bytes
75c309e
 
fa09dfd
 
1d85732
fa09dfd
1d85732
fa09dfd
75c309e
fa09dfd
5a290c0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
---
license: llama2
widget:
  - example_title: "ALMA-Cymraeg-13B"
    text: "### Saesneg:\nFor the first time, GPs no longer have to physically print, sign and hand a green paper prescription form to the patient or wait for it to be taken to the pharmacy. Instead, the prescription is sent electronically from the surgery via the IT system to the patient’s chosen pharmacy - even without the patient needing to visit the surgery to pick up a repeat prescription form."
    output:
      text: "\n### Cymraeg:\nAm y tro cyntaf, nid oes rhaid i feddygon teulu bellach argraffu, llofnodi a throsglwyddo ffurflen bresgripsiwn werdd i'r claf neu aros iddi gael ei chludo i'r fferyllfa. Yn lle hynny, caiff y presgripsiwn ei anfon yn electronig gan y practis drwy'r system TG at fferyllfa ddewisedig y claf - heb fod angen i'r claf ymweld â'r practis er mwyn casglu ffurflen bresgripsiwn ailadrodd."
pipeline_tag: text-generation
---

BangorAI/ALMA-Cymraeg-13B-0.1-4.0bpw-exl2

#### Esiampl

```python
import time
import sys, os
import dataclasses
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from exllamav2 import(
    ExLlamaV2,
    ExLlamaV2Config,
    ExLlamaV2Cache,
    ExLlamaV2Tokenizer,
    ExLlamaV2Lora,    
)

from exllamav2.generator import (
    ExLlamaV2StreamingGenerator,
    ExLlamaV2Sampler
)


class ModelClass:
    def __init__(self, generator, tokenizer, model):
        self.generator = generator
        self.tokenizer = tokenizer
        self.model = model

DEBUG = os.environ.get("DEBUG") and True or False

# Cychwyn model a storfa
def load_model(model_directory, max_seq_len=8192):
    """
    Yn llwytho model o gyfeiriadur ac yn dychwelyd y generadur a'r tocynnwr
    """
    config = ExLlamaV2Config()
    config.model_dir = model_directory
    config.max_seq_len = max_seq_len
    config.prepare()

    model = ExLlamaV2(config)
    print("Llwytho model: " + model_directory)

    cache = ExLlamaV2Cache(model, lazy = True, max_seq_len=max_seq_len)
    model.load_autosplit(cache)

    tokenizer = ExLlamaV2Tokenizer(config)
    generator = ExLlamaV2StreamingGenerator(model, cache, tokenizer)
    model = ModelClass(generator=generator, tokenizer=tokenizer, model=model)
    generator.warmup()
    return model

def generate_text(prompt, settings, max_new_tokens):
    sys.stdout.flush()
    input_ids = base_model.tokenizer.encode(prompt)
    generated_tokens = 0 # input_ids.shape[-1]
    base_model.generator.set_stop_conditions(["\n"])
    base_model.generator.begin_stream(input_ids, settings)
    time_begin = time.time()

    while True:
        chunk, eos, _ = base_model.generator.stream()
        generated_tokens += 1
        print (chunk, end = "")
        sys.stdout.flush()
        if eos or generated_tokens == max_new_tokens: break

    time_end = time.time()
    time_total = time_end - time_begin
    print(f"\nYmateb cyflawn mewn {time_total:.2f} eiliad, {generated_tokens} tocyn, {generated_tokens / time_total:.2f} tocyn/eiliad")
    return ""

base_model = load_model("./ALMA-Cymraeg-13B-0.1-4.0bpw-exl2")

settings = ExLlamaV2Sampler.Settings()
settings.temperature = 0.15 # newid fel bod angen e.e. 0.75
settings.top_k = 90 # newid fel bod angen e.e. 50
settings.top_p = 1.0 # ayyb
settings.token_repetition_penalty = 1.15 # ayyb
max_new_tokens = 2000 # ayyb

system_prompt = "Cyfieithwch y testun Saesneg canlynol i'r Gymraeg."

while True:
    user_input = input("Saesneg: ")

    prompt = f"{system_prompt}\n\n### Saesneg:\n{user_input}\n\n### Cymraeg:\n"
    if DEBUG: print(f"{prompt}\n\n")
    print("Cymraeg:")
    response = generate_text(prompt, settings, max_new_tokens)
    print("="*132)
```