Spaces:
Sleeping
Sleeping
Update helpers/foundation_models.py
Browse files- helpers/foundation_models.py +47 -0
helpers/foundation_models.py
CHANGED
@@ -147,3 +147,50 @@ def llama2_7b_ysa(prompt: str) -> str:
|
|
147 |
response: str = output[0]['generated_text']
|
148 |
|
149 |
return response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
response: str = output[0]['generated_text']
|
148 |
|
149 |
return response
|
150 |
+
|
151 |
+
|
152 |
+
def llama2_7b_brk_letters(prompt: str) -> str:
|
153 |
+
"""
|
154 |
+
Queries the LLaMA 2 7B model hosted on a specific Hugging Face endpoint with a given prompt,
|
155 |
+
and returns the generated text as a response.
|
156 |
+
|
157 |
+
Args:
|
158 |
+
prompt (str): The input text prompt to be sent to the LLaMA 2 7B model for generating text.
|
159 |
+
|
160 |
+
Returns:
|
161 |
+
str: The text generated by the LLaMA 2 7B model in response to the input prompt.
|
162 |
+
"""
|
163 |
+
|
164 |
+
# Endpoint URL of the LLaMA 2 7B model hosted on Hugging Face.
|
165 |
+
API_URL = "https://hd1rl4q31aom5qwc.us-east-1.aws.endpoints.huggingface.cloud"
|
166 |
+
|
167 |
+
# Headers to include in the HTTP request. Specifies the expected format of the response and request.
|
168 |
+
headers = {
|
169 |
+
"Accept": "application/json",
|
170 |
+
"Content-Type": "application/json"
|
171 |
+
}
|
172 |
+
|
173 |
+
def query_llama2_7b_brk_letters(payload: dict) -> dict:
|
174 |
+
"""
|
175 |
+
Sends a POST request to the LLaMA 2 7B API endpoint with a given payload.
|
176 |
+
|
177 |
+
Args:
|
178 |
+
payload (dict): The data to be sent in the POST request, including the input prompt
|
179 |
+
and any parameters for the model.
|
180 |
+
|
181 |
+
Returns:
|
182 |
+
dict: The JSON response from the API, parsed into a dictionary.
|
183 |
+
"""
|
184 |
+
response = requests.post(API_URL, headers=headers, json=payload)
|
185 |
+
return response.json()
|
186 |
+
|
187 |
+
# The payload for the POST request, including the prompt and any model parameters.
|
188 |
+
output = query_llama2_7b_brk_letters({
|
189 |
+
"inputs": prompt,
|
190 |
+
"parameters": {}
|
191 |
+
})
|
192 |
+
|
193 |
+
# Extracts the generated text from the API's response.
|
194 |
+
response = output[0]['generated_text']
|
195 |
+
|
196 |
+
return response
|