Superchik commited on
Commit
031c40e
1 Parent(s): 14763ef

Update librechat.yaml

Browse files
Files changed (1) hide show
  1. librechat.yaml +3 -94
librechat.yaml CHANGED
@@ -1,5 +1,5 @@
1
  # Configuration version (required)
2
- version: 1.1.2
3
 
4
  # Cache settings: Set to true to enable caching
5
  cache: false
@@ -10,87 +10,6 @@ registration:
10
  # Definition of custom endpoints
11
  endpoints:
12
  custom:
13
- # Mistral AI API
14
- - name: "Mistral" # Unique name for the endpoint
15
- # For `apiKey` and `baseURL`, you can use environment variables that you define.
16
- # recommended environment variables:
17
- apiKey: "${MISTRAL_API_KEY}"
18
- baseURL: "https://api.mistral.ai/v1"
19
-
20
- # Models configuration
21
- models:
22
- # List of default models to use. At least one value is required.
23
- default: ["mistral-tiny", "mistral-small", "mistral-medium"]
24
- # Fetch option: Set to true to fetch models from API.
25
- fetch: true # Defaults to false.
26
-
27
- # Optional configurations
28
-
29
- # Title Conversation setting
30
- titleConvo: true # Set to true to enable title conversation
31
-
32
- # Title Method: Choose between "completion" or "functions".
33
- titleMethod: "completion" # Defaults to "completion" if omitted.
34
-
35
- # Title Model: Specify the model to use for titles.
36
- titleModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
37
-
38
- # Summarize setting: Set to true to enable summarization.
39
- summarize: false
40
-
41
- # Summary Model: Specify the model to use if summarization is enabled.
42
- summaryModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
43
-
44
- # Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`.
45
- forcePrompt: false
46
-
47
- # The label displayed for the AI model in messages.
48
- modelDisplayLabel: "Mistral" # Default is "AI" when not set.
49
-
50
- # Add additional parameters to the request. Default params will be overwritten.
51
- addParams:
52
- safe_mode: true # This field is specific to Mistral AI: https://docs.mistral.ai/api/
53
-
54
- # Drop Default params parameters from the request. See default params in guide linked below.
55
- dropParams: ["stop", "temperature", "top_p"]
56
- # - stop # dropped since it's not recognized by Mistral AI API
57
- # `temperature` and `top_p` are removed to allow Mistral AI API defaults to be used:
58
- # - temperature
59
- # - top_p
60
-
61
- # OpenRouter.ai Example
62
- - name: "OpenRouter"
63
- # For `apiKey` and `baseURL`, you can use environment variables that you define.
64
- # recommended environment variables:
65
- # Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well.
66
- apiKey: "${OPENROUTER_KEY}"
67
- baseURL: "https://openrouter.ai/api/v1"
68
- models:
69
- default: ["nousresearch/nous-capybara-7b:free", "mistralai/mistral-7b-instruct:free", "huggingfaceh4/zephyr-7b-beta:free", "openchat/openchat-7b:free", "gryphe/mythomist-7b:free", "undi95/toppy-m-7b:free", "openrouter/cinematika-7b:free", "openrouter/auto", "nousresearch/nous-capybara-7b", "mistralai/mistral-7b-instruct", "huggingfaceh4/zephyr-7b-beta", "openchat/openchat-7b", "gryphe/mythomist-7b", "openrouter/cinematika-7b", "rwkv/rwkv-5-world-3b", "recursal/rwkv-5-3b-ai-town", "jondurbin/bagel-34b", "jebcarter/psyfighter-13b", "koboldai/psyfighter-13b-2", "neversleep/noromaid-mixtral-8x7b-instruct", "nousresearch/nous-hermes-llama2-13b", "meta-llama/codellama-34b-instruct", "phind/phind-codellama-34b", "intel/neural-chat-7b", "nousresearch/nous-hermes-2-mixtral-8x7b-dpo", "nousresearch/nous-hermes-2-mixtral-8x7b-sft", "haotian-liu/llava-13b", "nousresearch/nous-hermes-2-vision-7b", "meta-llama/llama-2-13b-chat", "gryphe/mythomax-l2-13b", "nousresearch/nous-hermes-llama2-70b", "teknium/openhermes-2-mistral-7b", "teknium/openhermes-2.5-mistral-7b", "undi95/remm-slerp-l2-13b", "undi95/toppy-m-7b", "01-ai/yi-34b-chat", "01-ai/yi-34b", "01-ai/yi-6b", "togethercomputer/stripedhyena-nous-7b", "togethercomputer/stripedhyena-hessian-7b", "mistralai/mixtral-8x7b", "nousresearch/nous-hermes-yi-34b", "open-orca/mistral-7b-openorca", "openai/gpt-3.5-turbo", "openai/gpt-3.5-turbo-1106", "openai/gpt-3.5-turbo-16k", "openai/gpt-4-1106-preview", "openai/gpt-4", "openai/gpt-4-32k", "openai/gpt-4-vision-preview", "openai/gpt-3.5-turbo-instruct", "google/palm-2-chat-bison", "google/palm-2-codechat-bison", "google/palm-2-chat-bison-32k", "google/palm-2-codechat-bison-32k", "google/gemini-pro", "google/gemini-pro-vision", "perplexity/pplx-70b-online", "perplexity/pplx-7b-online", "perplexity/pplx-7b-chat", "perplexity/pplx-70b-chat", "meta-llama/llama-2-70b-chat", "nousresearch/nous-capybara-34b", "jondurbin/airoboros-l2-70b", "austism/chronos-hermes-13b", "migtissera/synthia-70b", "pygmalionai/mythalion-13b", "undi95/remm-slerp-l2-13b-6k", "xwin-lm/xwin-lm-70b", "gryphe/mythomax-l2-13b-8k", "alpindale/goliath-120b ", "lizpreciatior/lzlv-70b-fp16-hf", "neversleep/noromaid-20b", "mistralai/mixtral-8x7b-instruct", "cognitivecomputations/dolphin-mixtral-8x7b", "anthropic/claude-2", "anthropic/claude-2.0", "anthropic/claude-instant-v1", "mancer/weaver", "mistralai/mistral-tiny", "mistralai/mistral-small", "mistralai/mistral-medium"]
70
- fetch: true
71
- titleConvo: true
72
- titleModel: "gpt-3.5-turbo"
73
- summarize: false
74
- summaryModel: "gpt-3.5-turbo"
75
- forcePrompt: false
76
- modelDisplayLabel: "OpenRouter"
77
-
78
- - name: "Reverse Proxy"
79
- # For `apiKey` and `baseURL`, you can use environment variables that you define.
80
- # recommended environment variables:
81
- # Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well.
82
- apiKey: "user_provided"
83
- baseURL: "user_provided"
84
- models:
85
- default: ["gpt-3.5-turbo"]
86
- fetch: true
87
- titleConvo: true
88
- titleModel: "gpt-3.5-turbo"
89
- summarize: false
90
- summaryModel: "gpt-3.5-turbo"
91
- forcePrompt: false
92
- modelDisplayLabel: "AI"
93
-
94
  # Zapzatron_API
95
  - name: "Zapzatron_API"
96
  apiKey: "user_provided"
@@ -98,20 +17,10 @@ endpoints:
98
  models:
99
  default: [
100
  "gpt-4o",
101
- "gpt-4o-2024-08-06",
102
- "gpt-4o-2024-05-13",
103
  "gpt-4o-mini",
104
- "gpt-4o-mini-2024-07-18",
105
- "gpt-4",
106
- "gpt-4-vision-preview",
107
- "gpt-4-turbo-preview",
108
  "gpt-4-0125-preview",
109
- "gpt-4-1106-preview",
110
- "gpt-4-0613",
111
  "gpt-3.5-turbo",
112
- "gpt-3.5-turbo-0125",
113
- "gpt-3.5-turbo-1106",
114
- "gpt-3.5-turbo-0613",
115
  "gemini-pro",
116
  "llama3-70b-8192",
117
  ]
@@ -121,7 +30,7 @@ endpoints:
121
  summarize: false
122
  summaryModel: "gpt-4o-mini"
123
  forcePrompt: false
124
- dropParams: ["stop", "user", "frequency_penalty", "presence_penalty", "temperature", "top_p", "stream"]
125
  modelDisplayLabel: "Zapzatron_API"
126
  # iconURL: "https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/main/icons/NagaAI.png"
127
 
 
1
  # Configuration version (required)
2
+ version: 1.1.5
3
 
4
  # Cache settings: Set to true to enable caching
5
  cache: false
 
10
  # Definition of custom endpoints
11
  endpoints:
12
  custom:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  # Zapzatron_API
14
  - name: "Zapzatron_API"
15
  apiKey: "user_provided"
 
17
  models:
18
  default: [
19
  "gpt-4o",
 
 
20
  "gpt-4o-mini",
 
 
 
 
21
  "gpt-4-0125-preview",
22
+ "gpt-4",
 
23
  "gpt-3.5-turbo",
 
 
 
24
  "gemini-pro",
25
  "llama3-70b-8192",
26
  ]
 
30
  summarize: false
31
  summaryModel: "gpt-4o-mini"
32
  forcePrompt: false
33
+ # dropParams: ["stop", "user", "frequency_penalty", "presence_penalty", "temperature", "top_p", "stream"]
34
  modelDisplayLabel: "Zapzatron_API"
35
  # iconURL: "https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/main/icons/NagaAI.png"
36