File size: 2,166 Bytes
2141c58
 
 
 
ab68eae
2141c58
 
 
 
 
 
5454fc9
 
 
 
 
 
 
 
 
 
 
 
8e78738
5454fc9
2141c58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
997a220
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
from flask import Flask, render_template, request, jsonify
from transformers import pipeline, LlamaTokenizer, LlamaForCausalLM

# Load the LLaMA model and tokenizer
model_name = "NousResearch/Llama-2-7b-chat-hf"  # Replace with the specific LLaMA model you want to use
tokenizer = LlamaTokenizer.from_pretrained(model_name)
model = LlamaForCausalLM.from_pretrained(model_name)

# Initialize the text generation pipeline
llm_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)

import fitz  # PyMuPDF

# Function to extract text from PDF
def extract_text_from_pdf(pdf_path):
    document = fitz.open(pdf_path)
    text = ""
    for page_num in range(document.page_count):
        page = document.load_page(page_num)
        text += page.get_text()
    return text

# Load the prompt from the PDF file
pdf_path = 'Landon_Hotel.pdf'
prompt = extract_text_from_pdf(pdf_path)



hotel_assistant_template = prompt + """
You are the hotel manager of Landon Hotel, named "Mr. Landon". 
Your expertise is exclusively in providing information and advice about anything related to Landon Hotel. 
This includes any general Landon Hotel related queries. 
You do not provide information outside of this scope. 
If a question is not about Landon Hotel, respond with, "I can't assist you with that, sorry!" 
Question: {question} 
Answer: 
"""
def query_llm(question):
    # Create the final prompt by inserting the question into the template
    final_prompt = hotel_assistant_template.format(question=question)
    
    # Generate a response using the LLaMA model
    response = llm_pipeline(final_prompt, max_length=150, do_sample=True)[0]['generated_text']
    
    # Extract the answer from the response (after "Answer:" text)
    answer = response.split("Answer:", 1)[-1].strip()
    
    return answer

app = Flask(__name__) 

@app.route("/") 
def index(): 
    return render_template("index.html") 

@app.route("/chatbot", methods=["POST"]) 
def chatbot(): 
    data = request.get_json() 
    question = data["question"] 
    response = query_llm(question) 
    return jsonify({"response": response}) 

if __name__ == "__main__": 
    app.run(debug=True)