metadata
license: llama2
widget:
- example_title: ALMA-Cymraeg-13B
text: >-
### Saesneg:
For the first time, GPs no longer have to physically print, sign and hand
a green paper prescription form to the patient or wait for it to be taken
to the pharmacy. Instead, the prescription is sent electronically from the
surgery via the IT system to the patient’s chosen pharmacy - even without
the patient needing to visit the surgery to pick up a repeat prescription
form.
output:
text: >-
### Cymraeg:
Am y tro cyntaf, nid oes rhaid i feddygon teulu bellach argraffu,
llofnodi a throsglwyddo ffurflen bresgripsiwn werdd i'r claf neu aros
iddi gael ei chludo i'r fferyllfa. Yn lle hynny, caiff y presgripsiwn ei
anfon yn electronig gan y practis drwy'r system TG at fferyllfa
ddewisedig y claf - heb fod angen i'r claf ymweld â'r practis er mwyn
casglu ffurflen bresgripsiwn ailadrodd.
pipeline_tag: text-generation
BangorAI/ALMA-Cymraeg-13B-0.1-4.0bpw-exl2
Esiampl
import time
import sys, os
import dataclasses
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from exllamav2 import(
ExLlamaV2,
ExLlamaV2Config,
ExLlamaV2Cache,
ExLlamaV2Tokenizer,
ExLlamaV2Lora,
)
from exllamav2.generator import (
ExLlamaV2StreamingGenerator,
ExLlamaV2Sampler
)
class ModelClass:
def __init__(self, generator, tokenizer, model):
self.generator = generator
self.tokenizer = tokenizer
self.model = model
DEBUG = os.environ.get("DEBUG") and True or False
# Cychwyn model a storfa
def load_model(model_directory, max_seq_len=8192):
"""
Yn llwytho model o gyfeiriadur ac yn dychwelyd y generadur a'r tocynnwr
"""
config = ExLlamaV2Config()
config.model_dir = model_directory
config.max_seq_len = max_seq_len
config.prepare()
model = ExLlamaV2(config)
print("Llwytho model: " + model_directory)
cache = ExLlamaV2Cache(model, lazy = True, max_seq_len=max_seq_len)
model.load_autosplit(cache)
tokenizer = ExLlamaV2Tokenizer(config)
generator = ExLlamaV2StreamingGenerator(model, cache, tokenizer)
model = ModelClass(generator=generator, tokenizer=tokenizer, model=model)
generator.warmup()
return model
def generate_text(prompt, settings, max_new_tokens):
sys.stdout.flush()
input_ids = base_model.tokenizer.encode(prompt)
generated_tokens = 0 # input_ids.shape[-1]
base_model.generator.set_stop_conditions(["\n"])
base_model.generator.begin_stream(input_ids, settings)
time_begin = time.time()
while True:
chunk, eos, _ = base_model.generator.stream()
generated_tokens += 1
print (chunk, end = "")
sys.stdout.flush()
if eos or generated_tokens == max_new_tokens: break
time_end = time.time()
time_total = time_end - time_begin
print(f"\nYmateb cyflawn mewn {time_total:.2f} eiliad, {generated_tokens} tocyn, {generated_tokens / time_total:.2f} tocyn/eiliad")
return ""
base_model = load_model("./ALMA-Cymraeg-13B-0.1-4.0bpw-exl2")
settings = ExLlamaV2Sampler.Settings()
settings.temperature = 0.15 # newid fel bod angen e.e. 0.75
settings.top_k = 90 # newid fel bod angen e.e. 50
settings.top_p = 1.0 # ayyb
settings.token_repetition_penalty = 1.15 # ayyb
max_new_tokens = 2000 # ayyb
system_prompt = "Cyfieithwch y testun Saesneg canlynol i'r Gymraeg."
while True:
user_input = input("Saesneg: ")
prompt = f"{system_prompt}\n\n### Saesneg:\n{user_input}\n\n### Cymraeg:\n"
if DEBUG: print(f"{prompt}\n\n")
print("Cymraeg:")
response = generate_text(prompt, settings, max_new_tokens)
print("="*132)