File size: 2,852 Bytes
3772222 4e42ab2 3772222 f7e0ac1 d85390e c0f72fc d85390e bafd54e 9a367b9 d85390e 3772222 9b1b01f c0f72fc 3772222 e85ed25 bafd54e 9e75ffe 9b1b01f d85390e 30bd031 d85390e 30bd031 d85390e 9b1b01f 4f56dd1 a7a048d c0f72fc a7a048d bafd54e a7a048d 5ffec4a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 |
# Configuration version (required)
version: 1.1.5
# Cache settings: Set to true to enable caching
cache: true
# Definition of custom endpoints
endpoints:
# assistants:
# disableBuilder: false # Disable Assistants Builder Interface by setting to `true`
# pollIntervalMs: 750 # Polling interval for checking assistant updates
# timeoutMs: 180000 # Timeout for assistant operations
# # Should only be one or the other, either `supportedIds` or `excludedIds`
# supportedIds: ["asst_supportedAssistantId1", "asst_supportedAssistantId2"]
# # excludedIds: ["asst_excludedAssistantId"]
#groq
- name: "groq"
apiKey: "user_provided"
baseURL: "https://api.groq.com/openai/v1/"
models:
default:
[
'llama3-70b-8192',
'llama3-8b-8192',
'llama2-70b-4096',
'mixtral-8x7b-32768',
'gemma-7b-it',
]
fetch: true
titleConvo: true
titleModel: "mixtral-8x7b-32768"
summarize: false
summaryModel: "mixtral-8x7b-32768"
forcePrompt: false
modelDisplayLabel: "groq"
# Mistral AI API
- name: "Mistral"
apiKey: "user_provided"
baseURL: "https://api.mistral.ai/v1"
models:
default: ['mistral-tiny', 'mistral-small', 'mistral-medium']
fetch: true
titleConvo: true
titleMethod: "completion"
titleModel: "open-mistral-7b"
summarize: false
summaryModel: "open-mistral-7b"
forcePrompt: false
modelDisplayLabel: "Mistral"
dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"]
# Preplexity
- name: "Perplexity"
apiKey: "user_provided"
baseURL: "https://api.perplexity.ai/"
models:
default: [
"mistral-7b-instruct",
"sonar-small-chat",
"sonar-small-online",
"sonar-medium-chat",
"sonar-medium-online"
]
fetch: false # fetching list of models is not supported
titleConvo: true
titleModel: "sonar-medium-chat"
summarize: false
summaryModel: "sonar-medium-chat"
forcePrompt: false
dropParams: ["stop", "frequency_penalty"]
modelDisplayLabel: "Perplexity"
# OpenRouter
- name: 'OpenRouter'
# Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well.
apiKey: 'user_provided'
baseURL: 'https://openrouter.ai/api/v1'
models:
default: ['meta-llama/llama-3-70b-instruct']
fetch: true
titleConvo: true
titleModel: 'meta-llama/llama-3-8b-instruct'
# Recommended: Drop the stop parameter from the request as Openrouter models use a variety of stop tokens.
dropParams: ['stop']
modelDisplayLabel: 'OpenRouter' |