File size: 2,966 Bytes
3772222
4e42ab2
3772222
 
 
 
4e42ab2
dbb9da1
4e42ab2
 
 
3772222
 
f7e0ac1
 
 
 
 
 
 
3772222
d85390e
 
 
 
 
 
 
 
9a367b9
d85390e
 
 
 
 
 
 
3772222
9b1b01f
3772222
 
d85390e
 
30bd031
 
d85390e
 
9a367b9
9b1b01f
d85390e
30bd031
d85390e
30bd031
d85390e
9b1b01f
4f56dd1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a7a048d
 
 
 
 
 
 
 
 
 
 
 
 
 
16d61c8
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
# Configuration version (required)
version: 1.1.5

# Cache settings: Set to true to enable caching
cache: true

registration:
  socialLogins: ['github', 'google', 'discord', 'openid', 'facebook']
  allowedDomains:
  - "gmail.com"

# Definition of custom endpoints
endpoints:
  # assistants:
  #   disableBuilder: false # Disable Assistants Builder Interface by setting to `true`
  #   pollIntervalMs: 750  # Polling interval for checking assistant updates
  #   timeoutMs: 180000  # Timeout for assistant operations
  #   # Should only be one or the other, either `supportedIds` or `excludedIds`
  #   supportedIds: ["asst_supportedAssistantId1", "asst_supportedAssistantId2"]
  #   # excludedIds: ["asst_excludedAssistantId"]
  custom:
    #groq
    - name: "groq"
      apiKey: "${GROQ_API_KEY}"
      baseURL: "https://api.groq.com/openai/v1/"
      models:
        default: [
          "gemma-7b-it"
          ]
        fetch: true
      titleConvo: true
      titleModel: "mixtral-8x7b-32768"
      summarize: false
      summaryModel: "mixtral-8x7b-32768"
      forcePrompt: false
      modelDisplayLabel: "groq"
      
    # Mistral AI API
    - name: "Mistral"
      apiKey: "${MISTRAL_API_KEY}"
      baseURL: "https://api.mistral.ai/v1"
      models: 
        default: [
          "mistral-small-latest",
          "mistral-medium-latest",
          "mistral-large-latest"
          ]
        fetch: false
      titleConvo: true
      titleMethod: "completion"
      titleModel: "open-mistral-7b"
      summarize: false
      summaryModel: "open-mistral-7b"
      forcePrompt: false
      modelDisplayLabel: "Mistral"
      dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"]

    # Preplexity
    - name: "Perplexity"
      apiKey: "user_provided"
      baseURL: "https://api.perplexity.ai/"
      models:
        default: [
          "mistral-7b-instruct",
          "sonar-small-chat",
          "sonar-small-online",
          "sonar-medium-chat",
          "sonar-medium-online"
          ]
        fetch: false # fetching list of models is not supported
      titleConvo: true
      titleModel: "sonar-medium-chat"
      summarize: false
      summaryModel: "sonar-medium-chat"
      forcePrompt: false
      dropParams: ["stop", "frequency_penalty"]
      modelDisplayLabel: "Perplexity"

    # OpenRouter
    - name: 'OpenRouter'
      # Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well.
      apiKey: '${OPENROUTER_KEY}'
      baseURL: 'https://openrouter.ai/api/v1'
      models:
        default: ['meta-llama/llama-3-8b-instruct']
        fetch: true
      titleConvo: true
      titleModel: 'meta-llama/llama-3-8b-instruct'
      # Recommended: Drop the stop parameter from the request as Openrouter models use a variety of stop tokens.
      dropParams: ['stop']
      modelDisplayLabel: 'OpenRouter'


config:
  env:
    ALLOW_REGISTRATION: "true"