# Configuration version (required) version: 1.1.5 # Cache settings: Set to true to enable caching cache: true # Definition of custom endpoints endpoints: # assistants: # disableBuilder: false # Disable Assistants Builder Interface by setting to `true` # pollIntervalMs: 750 # Polling interval for checking assistant updates # timeoutMs: 180000 # Timeout for assistant operations # # Should only be one or the other, either `supportedIds` or `excludedIds` # supportedIds: ["asst_supportedAssistantId1", "asst_supportedAssistantId2"] # # excludedIds: ["asst_excludedAssistantId"] #groq - name: "groq" apiKey: "user_provided" baseURL: "https://api.groq.com/openai/v1/" models: default: [ 'llama3-70b-8192', 'llama3-8b-8192', 'llama2-70b-4096', 'mixtral-8x7b-32768', 'gemma-7b-it', ] fetch: true titleConvo: true titleModel: "mixtral-8x7b-32768" summarize: false summaryModel: "mixtral-8x7b-32768" forcePrompt: false modelDisplayLabel: "groq" # Mistral AI API - name: "Mistral" apiKey: "user_provided" baseURL: "https://api.mistral.ai/v1" models: default: ['mistral-tiny', 'mistral-small', 'mistral-medium'] fetch: true titleConvo: true titleMethod: "completion" titleModel: "open-mistral-7b" summarize: false summaryModel: "open-mistral-7b" forcePrompt: false modelDisplayLabel: "Mistral" dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"] # Preplexity - name: "Perplexity" apiKey: "user_provided" baseURL: "https://api.perplexity.ai/" models: default: [ "mistral-7b-instruct", "sonar-small-chat", "sonar-small-online", "sonar-medium-chat", "sonar-medium-online" ] fetch: false # fetching list of models is not supported titleConvo: true titleModel: "sonar-medium-chat" summarize: false summaryModel: "sonar-medium-chat" forcePrompt: false dropParams: ["stop", "frequency_penalty"] modelDisplayLabel: "Perplexity" # OpenRouter - name: 'OpenRouter' # Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well. apiKey: 'user_provided' baseURL: 'https://openrouter.ai/api/v1' models: default: ['meta-llama/llama-3-70b-instruct'] fetch: true titleConvo: true titleModel: 'meta-llama/llama-3-8b-instruct' # Recommended: Drop the stop parameter from the request as Openrouter models use a variety of stop tokens. dropParams: ['stop'] modelDisplayLabel: 'OpenRouter'