Update librechat.yaml
Browse files- librechat.yaml +71 -70
librechat.yaml
CHANGED
@@ -13,74 +13,75 @@ endpoints:
|
|
13 |
# # Should only be one or the other, either `supportedIds` or `excludedIds`
|
14 |
# supportedIds: ["asst_supportedAssistantId1", "asst_supportedAssistantId2"]
|
15 |
# # excludedIds: ["asst_excludedAssistantId"]
|
|
|
16 |
#groq
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
|
|
13 |
# # Should only be one or the other, either `supportedIds` or `excludedIds`
|
14 |
# supportedIds: ["asst_supportedAssistantId1", "asst_supportedAssistantId2"]
|
15 |
# # excludedIds: ["asst_excludedAssistantId"]
|
16 |
+
custom:
|
17 |
#groq
|
18 |
+
- name: "groq"
|
19 |
+
apiKey: "user_provided"
|
20 |
+
baseURL: "https://api.groq.com/openai/v1/"
|
21 |
+
models:
|
22 |
+
default:
|
23 |
+
[
|
24 |
+
'llama3-70b-8192',
|
25 |
+
'llama3-8b-8192',
|
26 |
+
'llama2-70b-4096',
|
27 |
+
'mixtral-8x7b-32768',
|
28 |
+
'gemma-7b-it',
|
29 |
+
]
|
30 |
+
fetch: true
|
31 |
+
titleConvo: true
|
32 |
+
titleModel: "mixtral-8x7b-32768"
|
33 |
+
summarize: false
|
34 |
+
summaryModel: "mixtral-8x7b-32768"
|
35 |
+
forcePrompt: false
|
36 |
+
modelDisplayLabel: "groq"
|
37 |
+
|
38 |
+
# Mistral AI API
|
39 |
+
- name: "Mistral"
|
40 |
+
apiKey: "user_provided"
|
41 |
+
baseURL: "https://api.mistral.ai/v1"
|
42 |
+
models:
|
43 |
+
default: ['mistral-tiny', 'mistral-small', 'mistral-medium']
|
44 |
+
fetch: true
|
45 |
+
titleConvo: true
|
46 |
+
titleMethod: "completion"
|
47 |
+
titleModel: "open-mistral-7b"
|
48 |
+
summarize: false
|
49 |
+
summaryModel: "open-mistral-7b"
|
50 |
+
forcePrompt: false
|
51 |
+
modelDisplayLabel: "Mistral"
|
52 |
+
dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"]
|
53 |
+
|
54 |
+
# Preplexity
|
55 |
+
- name: "Perplexity"
|
56 |
+
apiKey: "user_provided"
|
57 |
+
baseURL: "https://api.perplexity.ai/"
|
58 |
+
models:
|
59 |
+
default: [
|
60 |
+
"mistral-7b-instruct",
|
61 |
+
"sonar-small-chat",
|
62 |
+
"sonar-small-online",
|
63 |
+
"sonar-medium-chat",
|
64 |
+
"sonar-medium-online"
|
65 |
+
]
|
66 |
+
fetch: false # fetching list of models is not supported
|
67 |
+
titleConvo: true
|
68 |
+
titleModel: "sonar-medium-chat"
|
69 |
+
summarize: false
|
70 |
+
summaryModel: "sonar-medium-chat"
|
71 |
+
forcePrompt: false
|
72 |
+
dropParams: ["stop", "frequency_penalty"]
|
73 |
+
modelDisplayLabel: "Perplexity"
|
74 |
+
|
75 |
+
# OpenRouter
|
76 |
+
- name: 'OpenRouter'
|
77 |
+
# Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well.
|
78 |
+
apiKey: 'user_provided'
|
79 |
+
baseURL: 'https://openrouter.ai/api/v1'
|
80 |
+
models:
|
81 |
+
default: ['meta-llama/llama-3-70b-instruct']
|
82 |
+
fetch: true
|
83 |
+
titleConvo: true
|
84 |
+
titleModel: 'meta-llama/llama-3-8b-instruct'
|
85 |
+
# Recommended: Drop the stop parameter from the request as Openrouter models use a variety of stop tokens.
|
86 |
+
dropParams: ['stop']
|
87 |
+
modelDisplayLabel: 'OpenRouter'
|