Spaces:
Running
Running
:wrench: drop reflection. add Nemotron. make default model.
Browse files- README.md +1 -1
- app.py +6 -3
- readme-generator/generate.sh +1 -1
README.md
CHANGED
@@ -1977,7 +1977,7 @@ models:
|
|
1977 |
- ziniuli/Mistral-7B-ReMax-v0.1
|
1978 |
- zmzmxz/NeuralPipe-7B-slerp
|
1979 |
- Qwen/Qwen2.5-72B
|
1980 |
-
-
|
1981 |
---
|
1982 |
|
1983 |
# Overview
|
|
|
1977 |
- ziniuli/Mistral-7B-ReMax-v0.1
|
1978 |
- zmzmxz/NeuralPipe-7B-slerp
|
1979 |
- Qwen/Qwen2.5-72B
|
1980 |
+
- nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
|
1981 |
---
|
1982 |
|
1983 |
# Overview
|
app.py
CHANGED
@@ -40,12 +40,14 @@ model_class_filter = {
|
|
40 |
# we run a few other models here as well
|
41 |
REFLECTION="mattshumer/Reflection-Llama-3.1-70B"
|
42 |
QWEN25_72B="Qwen/Qwen2.5-72B"
|
|
|
43 |
bigger_whitelisted_models = [
|
44 |
-
|
45 |
-
|
46 |
]
|
47 |
# REFLECTION is in backup hosting
|
48 |
model_class_from_model_id[REFLECTION] = 'llama31-70b-16k'
|
|
|
49 |
def build_model_choices():
|
50 |
all_choices = []
|
51 |
for model_class in model_cache:
|
@@ -72,6 +74,7 @@ def model_in_list(model):
|
|
72 |
key=os.environ.get('RANDOM_SEED', 'kcOtfNHA+e')
|
73 |
o = random.Random(f"{key}-{datetime.date.today().strftime('%Y-%m-%d')}")
|
74 |
initial_model = o.choice(model_choices)[1]
|
|
|
75 |
# this doesn't work in HF spaces because we're iframed :(
|
76 |
# def initial_model(referer=None):
|
77 |
# return REFLECTION
|
@@ -105,7 +108,7 @@ def respond(message, history, model):
|
|
105 |
history_openai_format.append({"role": "assistant", "content":assistant})
|
106 |
history_openai_format.append({"role": "user", "content": message})
|
107 |
|
108 |
-
if model ==
|
109 |
history_openai_format = [
|
110 |
{"role": "system", "content": REFLECTION_SYSTEM_PROMPT},
|
111 |
*history_openai_format
|
|
|
40 |
# we run a few other models here as well
|
41 |
REFLECTION="mattshumer/Reflection-Llama-3.1-70B"
|
42 |
QWEN25_72B="Qwen/Qwen2.5-72B"
|
43 |
+
NEMOTRON="nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"
|
44 |
bigger_whitelisted_models = [
|
45 |
+
QWEN25_72B,
|
46 |
+
NEMOTRON
|
47 |
]
|
48 |
# REFLECTION is in backup hosting
|
49 |
model_class_from_model_id[REFLECTION] = 'llama31-70b-16k'
|
50 |
+
model_class_from_model_id[NEMOTRON] = 'llama31-70b-16k'
|
51 |
def build_model_choices():
|
52 |
all_choices = []
|
53 |
for model_class in model_cache:
|
|
|
74 |
key=os.environ.get('RANDOM_SEED', 'kcOtfNHA+e')
|
75 |
o = random.Random(f"{key}-{datetime.date.today().strftime('%Y-%m-%d')}")
|
76 |
initial_model = o.choice(model_choices)[1]
|
77 |
+
initial_model = NEMOTRON
|
78 |
# this doesn't work in HF spaces because we're iframed :(
|
79 |
# def initial_model(referer=None):
|
80 |
# return REFLECTION
|
|
|
108 |
history_openai_format.append({"role": "assistant", "content":assistant})
|
109 |
history_openai_format.append({"role": "user", "content": message})
|
110 |
|
111 |
+
if model == REFLECTION:
|
112 |
history_openai_format = [
|
113 |
{"role": "system", "content": REFLECTION_SYSTEM_PROMPT},
|
114 |
*history_openai_format
|
readme-generator/generate.sh
CHANGED
@@ -20,7 +20,7 @@ $(cat ../model-cache.json \
|
|
20 |
console.log(yamlStr);"
|
21 |
)
|
22 |
- Qwen/Qwen2.5-72B
|
23 |
-
-
|
24 |
---
|
25 |
|
26 |
$(cat body.md)
|
|
|
20 |
console.log(yamlStr);"
|
21 |
)
|
22 |
- Qwen/Qwen2.5-72B
|
23 |
+
- nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
|
24 |
---
|
25 |
|
26 |
$(cat body.md)
|