Update librechat.yaml
Browse files- librechat.yaml +40 -8
librechat.yaml
CHANGED
@@ -1,11 +1,43 @@
|
|
1 |
# Configuration version (required)
|
2 |
-
version: 1.0.
|
3 |
|
4 |
# Cache settings: Set to true to enable caching
|
5 |
cache: true
|
6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
# Definition of custom endpoints
|
8 |
endpoints:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
custom:
|
10 |
# Mistral AI API
|
11 |
- name: "Mistral" # Unique name for the endpoint
|
@@ -17,7 +49,7 @@ endpoints:
|
|
17 |
# Models configuration
|
18 |
models:
|
19 |
# List of default models to use. At least one value is required.
|
20 |
-
default: ["mistral-
|
21 |
# Fetch option: Set to true to fetch models from API.
|
22 |
fetch: true # Defaults to false.
|
23 |
|
@@ -27,26 +59,26 @@ endpoints:
|
|
27 |
titleConvo: true # Set to true to enable title conversation
|
28 |
|
29 |
# Title Method: Choose between "completion" or "functions".
|
30 |
-
titleMethod: "completion" # Defaults to "completion" if omitted.
|
31 |
|
32 |
# Title Model: Specify the model to use for titles.
|
33 |
titleModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
|
34 |
|
35 |
# Summarize setting: Set to true to enable summarization.
|
36 |
-
summarize: false
|
37 |
|
38 |
# Summary Model: Specify the model to use if summarization is enabled.
|
39 |
-
summaryModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
|
40 |
|
41 |
# Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`.
|
42 |
-
forcePrompt: false
|
43 |
|
44 |
# The label displayed for the AI model in messages.
|
45 |
modelDisplayLabel: "Mistral" # Default is "AI" when not set.
|
46 |
|
47 |
# Add additional parameters to the request. Default params will be overwritten.
|
48 |
-
addParams:
|
49 |
-
safe_prompt: true # This field is specific to Mistral AI: https://docs.mistral.ai/api/
|
50 |
|
51 |
# Drop Default params parameters from the request. See default params in guide linked below.
|
52 |
# NOTE: For Mistral, it is necessary to drop the following parameters or you will encounter a 422 Error:
|
|
|
1 |
# Configuration version (required)
|
2 |
+
version: 1.0.3
|
3 |
|
4 |
# Cache settings: Set to true to enable caching
|
5 |
cache: true
|
6 |
|
7 |
+
# fileConfig:
|
8 |
+
# endpoints:
|
9 |
+
# assistants:
|
10 |
+
# fileLimit: 5
|
11 |
+
# fileSizeLimit: 10 # Maximum size for an individual file in MB
|
12 |
+
# totalSizeLimit: 50 # Maximum total size for all files in a single request in MB
|
13 |
+
# supportedMimeTypes:
|
14 |
+
# - "image/.*"
|
15 |
+
# - "application/pdf"
|
16 |
+
# openAI:
|
17 |
+
# disabled: true # Disables file uploading to the OpenAI endpoint
|
18 |
+
# default:
|
19 |
+
# totalSizeLimit: 20
|
20 |
+
# YourCustomEndpointName:
|
21 |
+
# fileLimit: 2
|
22 |
+
# fileSizeLimit: 5
|
23 |
+
# serverFileSizeLimit: 100 # Global server file size limit in MB
|
24 |
+
# avatarSizeLimit: 2 # Limit for user avatar image size in MB
|
25 |
+
# rateLimits:
|
26 |
+
# fileUploads:
|
27 |
+
# ipMax: 100
|
28 |
+
# ipWindowInMinutes: 60 # Rate limit window for file uploads per IP
|
29 |
+
# userMax: 50
|
30 |
+
# userWindowInMinutes: 60 # Rate limit window for file uploads per user
|
31 |
+
|
32 |
# Definition of custom endpoints
|
33 |
endpoints:
|
34 |
+
# assistants:
|
35 |
+
# disableBuilder: false # Disable Assistants Builder Interface by setting to `true`
|
36 |
+
# pollIntervalMs: 750 # Polling interval for checking assistant updates
|
37 |
+
# timeoutMs: 180000 # Timeout for assistant operations
|
38 |
+
# # Should only be one or the other, either `supportedIds` or `excludedIds`
|
39 |
+
# supportedIds: ["asst_supportedAssistantId1", "asst_supportedAssistantId2"]
|
40 |
+
# # excludedIds: ["asst_excludedAssistantId"]
|
41 |
custom:
|
42 |
# Mistral AI API
|
43 |
- name: "Mistral" # Unique name for the endpoint
|
|
|
49 |
# Models configuration
|
50 |
models:
|
51 |
# List of default models to use. At least one value is required.
|
52 |
+
default: ["mistral-tiny-latest", "mistral-small-latest", "mistral-medium-latest", "mistral-large-latest"]
|
53 |
# Fetch option: Set to true to fetch models from API.
|
54 |
fetch: true # Defaults to false.
|
55 |
|
|
|
59 |
titleConvo: true # Set to true to enable title conversation
|
60 |
|
61 |
# Title Method: Choose between "completion" or "functions".
|
62 |
+
# titleMethod: "completion" # Defaults to "completion" if omitted.
|
63 |
|
64 |
# Title Model: Specify the model to use for titles.
|
65 |
titleModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
|
66 |
|
67 |
# Summarize setting: Set to true to enable summarization.
|
68 |
+
# summarize: false
|
69 |
|
70 |
# Summary Model: Specify the model to use if summarization is enabled.
|
71 |
+
# summaryModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
|
72 |
|
73 |
# Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`.
|
74 |
+
# forcePrompt: false
|
75 |
|
76 |
# The label displayed for the AI model in messages.
|
77 |
modelDisplayLabel: "Mistral" # Default is "AI" when not set.
|
78 |
|
79 |
# Add additional parameters to the request. Default params will be overwritten.
|
80 |
+
# addParams:
|
81 |
+
# safe_prompt: true # This field is specific to Mistral AI: https://docs.mistral.ai/api/
|
82 |
|
83 |
# Drop Default params parameters from the request. See default params in guide linked below.
|
84 |
# NOTE: For Mistral, it is necessary to drop the following parameters or you will encounter a 422 Error:
|