mjbuehler commited on
Commit
245e2c8
1 Parent(s): c1dea12

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +61 -3
README.md CHANGED
@@ -3,12 +3,68 @@ library_name: transformers
3
  tags: []
4
  ---
5
 
6
- # Model Card for Model ID
7
 
8
  <!-- Provide a quick summary of what the model is/does. -->
9
 
10
-
11
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  ## Model Details
13
 
14
  ### Model Description
@@ -37,6 +93,8 @@ This is the model card of a 🤗 transformers model that has been pushed on the
37
 
38
  <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
 
 
 
40
  ### Direct Use
41
 
42
  <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
 
3
  tags: []
4
  ---
5
 
6
+ # Model Card for X-LoRA-Gemma-7b
7
 
8
  <!-- Provide a quick summary of what the model is/does. -->
9
 
10
+ ```
11
+ import torch
12
+ from xlora.xlora_utils import load_model
13
+
14
+ XLoRa_model_name = 'lamm-mit/x-lora-gemma-7b'
15
+
16
+ model,tokenizer=load_model(model_name = XLoRa_model_name,
17
+ device='cuda:0',
18
+ use_flash_attention_2=True,
19
+ dtype=torch.bfloat16,
20
+ )
21
+ ```
22
+
23
+ ```
24
+ def generate_XLoRA_Gemma (system_prompt='You a helpful assistant. You are familiar with materials science. ',
25
+ prompt='What is spider silk in the context of bioinspired materials?',
26
+ repetition_penalty=1.,num_beams=1,num_return_sequences=1,
27
+ top_p=0.9, top_k=256, temperature=.5,max_new_tokens=512, verbatim=False, eos_token=None,
28
+ add_special_tokens=True, prepend_response='',
29
+ ):
30
+ if eos_token==None:
31
+ eos_token= tokenizer.eos_token_id
32
+
33
+ if system_prompt==None:
34
+ messages=[ {"role": "user", "content": prompt}, ]
35
+ else:
36
+ messages=[ {"role": "user", "content": system_prompt+prompt}, ]
37
+ txt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True, )
38
+ txt=txt+prepend_response
39
+
40
+ inputs = tokenizer(txt, add_special_tokens =add_special_tokens, return_tensors ='pt').to(device)
41
+ with torch.no_grad():
42
+
43
+ outputs = model.generate(input_ids = inputs["input_ids"],
44
+ attention_mask = inputs["attention_mask"] , # This is usually done automatically by the tokenizer
45
+ max_new_tokens=max_new_tokens,
46
+ temperature=temperature, #value used to modulate the next token probabilities.
47
+ num_beams=num_beams,
48
+ top_k = top_k,
49
+ top_p = top_p,
50
+ num_return_sequences = num_return_sequences,
51
+ eos_token_id=eos_token,
52
+ pad_token_id = eos_token,
53
+ do_sample =True,#skip_prompt=True,
54
+ repetition_penalty=repetition_penalty,
55
+ )
56
+ return tokenizer.batch_decode(outputs[:,inputs["input_ids"].shape[1]:].detach().cpu().numpy(), skip_special_tokens=True)
57
+
58
+ ```
59
+ Then, use as follows:
60
+ ```
61
+ from IPython.display import display, Markdown
62
+ q='''What is graphene?'''
63
+ res=generate_XLoRA_Gemma( system_prompt='You design materials.',
64
+ prompt=q, max_new_tokens=1024, temperature=0.3, )
65
+
66
+ display (Markdown(res))
67
+ ```
68
  ## Model Details
69
 
70
  ### Model Description
 
93
 
94
  <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
95
 
96
+
97
+
98
  ### Direct Use
99
 
100
  <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->