storresbusquets commited on
Commit
b45bcf7
·
1 Parent(s): a263611

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -26
app.py CHANGED
@@ -205,36 +205,41 @@ class GradioInference:
205
  # results["text"], max_length=150, min_length=30, do_sample=False
206
  # )
207
 
208
- ########################## PRUEBA CHATGPT #################################
209
- from langchain.chat_models import ChatOllama
210
- from langchain.chains.llm import LLMChain
211
- from langchain.prompts import PromptTemplate
212
- from langchain.chains.combine_documents.stuff import StuffDocumentsChain
213
- from langchain.document_loaders import WebBaseLoader
214
 
215
- # OPENAI_API_KEY = os.environ['OPENAI_API_KEY']
216
- loader = WebBaseLoader("https://lilianweng.github.io/posts/2023-06-23-agent/")
217
-
218
- text = results["text"]
219
- # Define prompt
220
- prompt_template = """Write a concise summary of the following:
221
- "{text}"
222
- CONCISE SUMMARY:"""
223
- prompt = PromptTemplate.from_template(prompt_template)
224
-
225
- # Define LLM chain
226
- llm = ChatOllama(model_name="llama2:7b-chat")
227
- llm_chain = LLMChain(llm=llm, prompt=prompt)
228
 
229
- # Define StuffDocumentsChain
230
- stuff_chain = StuffDocumentsChain(
231
- llm_chain=llm_chain, document_variable_name="text"
 
 
 
 
 
 
 
 
 
232
  )
233
-
234
- docs = loader.load()
235
- sum = stuff_chain.run(docs)
236
 
237
- ########################## FIN PRUEBA CHATGPT #################################
 
 
 
 
 
 
 
 
 
 
 
 
 
238
 
239
  #### Prueba
240
  # WHITESPACE_HANDLER = lambda k: re.sub('\s+', ' ', re.sub('\n+', ' ', k.strip()))
 
205
  # results["text"], max_length=150, min_length=30, do_sample=False
206
  # )
207
 
208
+ ########################## PRUEBA LLAMA2 #################################
209
+ from langchain import HuggingFacePipeline, PromptTemplate, LLMChain
 
 
 
 
210
 
211
+ llm_model = "meta-llama/Llama-2-7b-chat-hf"
212
+
213
+ llm_tokenizer = AutoTokenizer.from_pretrained(model)
 
 
 
 
 
 
 
 
 
 
214
 
215
+ pipeline = transformers.pipeline(
216
+ "text-generation", #task
217
+ model=llm_model,
218
+ tokenizer=llm_tokenizer,
219
+ torch_dtype=torch.bfloat16,
220
+ trust_remote_code=True,
221
+ device_map="auto",
222
+ max_length=1000,
223
+ do_sample=True,
224
+ top_k=10,
225
+ num_return_sequences=1,
226
+ eos_token_id=tokenizer.eos_token_id
227
  )
 
 
 
228
 
229
+ template = """
230
+ Write a concise summary of the following text delimited by triple backquotes.
231
+ ```{text}```
232
+ CONCISE SUMMARY:
233
+ """
234
+
235
+ prompt = PromptTemplate(template=template, input_variables=["text"])
236
+
237
+ llm_chain = LLMChain(prompt=prompt, llm=llm)
238
+
239
+ text = results["text"]
240
+
241
+ sum = llm_chain.run(text)
242
+ ########################## FIN PRUEBA LLAMA2 #################################
243
 
244
  #### Prueba
245
  # WHITESPACE_HANDLER = lambda k: re.sub('\s+', ' ', re.sub('\n+', ' ', k.strip()))