File size: 3,540 Bytes
49c5d55
 
 
 
 
 
 
 
 
fe76245
49c5d55
ddaa7c3
 
fe76245
49c5d55
fe76245
49c5d55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate
from langchain_groq import ChatGroq
from langchain_huggingface import ChatHuggingFace
from langchain_huggingface import HuggingFaceEndpoint
from dotenv import load_dotenv
from langchain.schema.output_parser import StrOutputParser
from langchain_huggingface import ChatHuggingFace
from langchain_google_genai import ChatGoogleGenerativeAI
import os
from huggingface_hub import login

load_dotenv()

login(token=os.environ["HUGGING_FACE_API_KEY"])
os.environ['CURL_CA_BUNDLE'] = ''

load_dotenv()

class Bot():
    def __init__(self):
        self.groq_models = ['gemma-7b-it', 'llama3-70b-8192',\
                            'llama3-8b-8192', 'mixtral-8x7b-32768']
        self.hf_models = ["01-ai/Yi-1.5-34B-Chat", "google/gemma-1.1-2b-it",\
                          "google/gemma-1.1-7b-it"]
        self.google_models = ["gemini-1.0-pro", "gemini-1.5-flash",\
                              "gemini-1.5-pro"]
        self.models = ["gemini-1.0-pro", "gemini-1.5-flash", "gemini-1.5-pro", "01-ai/Yi-1.5-34B-Chat", "google/gemma-1.1-2b-it",\
                          "google/gemma-1.1-7b-it", 'gemma-7b-it', 'llama3-70b-8192', 'llama3-8b-8192', 'mixtral-8x7b-32768']

    def call_groq(self, model, temp = 0.7, given_prompt = "Hi"):
        try:
            llm = ChatGroq(
                temperature=temp,
                model= model
            )

            system = "You are a helpful assistant."
            human = "{text}"
            prompt = ChatPromptTemplate.from_messages([("system", system), ("human", human)])

            chain = prompt | llm | StrOutputParser()
            return chain.invoke({"text": given_prompt})

        except Exception as e:
            return f"Error: {str(e)}"

    def call_hf(self,model, temp = 0.7, given_prompt = "Hi"):
        try:
            llm = HuggingFaceEndpoint(
                repo_id=model,
                temperature=temp
                )

            chat = ChatHuggingFace(llm=llm, verbose=True)

            template = """
                You are a helpful assistant

                User: {query}

                Answer:
            """

            prompt = PromptTemplate(
                template=template,
                input_variables=["query"]
            )

            chain =prompt | chat | StrOutputParser()

            return chain.invoke({"query": given_prompt})

        except Exception as e:
            return f"Error: {str(e)}"

    def call_google(self,model, temp=0.7, given_prompt = "Hi"):
        try:
            model = ChatGoogleGenerativeAI(model = model, temprature = temp)
            system = "You are a helpful assistant."
            human = "{text}"
            prompt = ChatPromptTemplate.from_messages([("human", human)])
            chain = prompt | model | StrOutputParser()
            return chain.invoke({"text": given_prompt})
        except Exception as e:
            return f"Error: {str(e)}"

    def response(self, model, prompt="Hi", temprature = 0.7):
        if model in self.groq_models:
            res_show = self.call_groq(temp = temprature, given_prompt = prompt, model= model)
        elif model in self.hf_models:
            res_show = self.call_hf(given_prompt = prompt, temp = temprature, model = model)
        elif model in self.google_models:
            res_show = self.call_google(given_prompt = prompt, temp = temprature, model = model)
        else:
            return "Sorry! App not working properly"
        return res_show