Nikola299 commited on
Commit
2fa835e
1 Parent(s): 0e7f609

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -3
app.py CHANGED
@@ -1,9 +1,36 @@
1
  import streamlit as st
2
- from transformers import pipeline
 
 
 
 
 
 
3
 
4
- pipe = pipeline('sentiment-analysis')
5
  text = st.text_area("Enter text....")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  if text:
8
  out = pipe(text)
9
- st.json(out)
 
1
  import streamlit as st
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
4
+
5
+
6
+ torch.random.manual_seed(0)
7
+ model = AutoModelForCausalLM.from_pretrained("microsoft/Phi-3-mini-4k-instruct",trust_remote_code=True)
8
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
9
 
 
10
  text = st.text_area("Enter text....")
11
+ messages = [
12
+ {"role": "system", "content": "You are a helpful AI assistant."},
13
+ {"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"},
14
+ {"role": "assistant", "content": "Sure! Here are some ways to eat bananas and dragonfruits together: 1. Banana and dragonfruit smoothie: Blend bananas and dragonfruits together with some milk and honey. 2. Banana and dragonfruit salad: Mix sliced bananas and dragonfruits together with some lemon juice and honey."},
15
+ {"role": "user", "content": "What about solving an 2x + 3 = 7 equation?"},
16
+ ]
17
+
18
+
19
+ pipe = pipeline(
20
+ "text-generation",
21
+ model=model,
22
+ tokenizer=tokenizer,
23
+ )
24
+
25
+ generation_args = {
26
+ "max_new_tokens": 500,
27
+ "return_full_text": False,
28
+ "temperature": 0.0,
29
+ "do_sample": False,
30
+ }
31
+
32
+ output = pipe(messages, **generation_args)
33
 
34
  if text:
35
  out = pipe(text)
36
+ st.write(out)