Artin2009 commited on
Commit
2a7bb88
·
verified ·
1 Parent(s): c2cfa59

Update chain_app.py

Browse files
Files changed (1) hide show
  1. chain_app.py +626 -629
chain_app.py CHANGED
@@ -1955,655 +1955,652 @@ async def on_chat_start():
1955
  @cl.on_message
1956
  async def main(message: cl.Message):
1957
  chat_profile = cl.user_session.get("chat_profile")
1958
- if chat_profile == 'neural-brain-AI':
1959
- completion = openai_client.chat.completions.create(
1960
- model="ft:gpt-3.5-turbo-1106:nb:aria1:9UWDrLJK",
1961
- messages=[
1962
- {"role": "system", "content": f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people"},
1963
- {"role": "user", "content": message.content}
1964
- ]
1965
- )
1966
- model_response = completion.choices[0].message.content
1967
- await cl.Message(
1968
- content=model_response
1969
- ).send()
1970
-
1971
- elif chat_profile == "Dorna-AI":
1972
- result = hf_text_client.predict(
1973
- message=message.content,
1974
- request=f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people",
1975
- param_3=512,
1976
- param_4=0.7,
1977
- param_5=0.95,
1978
- api_name="/chat"
1979
- )
1980
- model_response = result.strip("</s>")
1981
- await cl.Message(
1982
- content=model_response
1983
- ).send()
1984
- elif chat_profile == "gpt4-o-mini":
1985
- completion = openai_client.chat.completions.create(
1986
- model="gpt-4o-mini",
1987
- messages=[
1988
- {"role": "system", "content": f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people"},
1989
- {"role": "user", "content": message.content}
1990
- ]
1991
- )
1992
- model_response = completion.choices[0].message.content
1993
- await cl.Message(
1994
- content=model_response
1995
- ).send()
1996
- # elif chat_profile == 'Image-Generation':
1997
- # result = hf_image_client.predict(
1998
- # prompt=message.content,
1999
- # negative_prompt="",
2000
- # seed=0,
2001
- # randomize_seed=True,
2002
- # width=512,
2003
- # height=512,
2004
- # guidance_scale=0,
2005
- # num_inference_steps=2,
2006
- # api_name="/infer"
2007
- # )
2008
- # image = cl.Image(path=result, name="result", display="inline")
2009
- # await cl.Message(
2010
- # content="This message has an image!",
2011
- # elements=[image],
2012
- # ).send()
2013
- elif chat_profile == 'GPT-4':
2014
- completion = openai_client.chat.completions.create(
2015
- model="gpt-4",
2016
- messages=[
2017
- {"role": "system", "content": f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people"},
2018
- {"role": "user", "content": message.content}
2019
- ]
2020
- )
2021
- model_response = completion.choices[0].message.content
2022
- await cl.Message(
2023
- content=model_response
2024
- ).send()
2025
-
2026
- elif chat_profile == 'gpt-3.5-turbo':
2027
- completion = openai_client.chat.completions.create(
2028
- model="gpt-3.5-turbo",
2029
- messages=[
2030
- {"role": "system", "content": f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people"},
2031
- {"role": "user", "content": message.content}
2032
- ]
2033
- )
2034
- model_response = completion.choices[0].message.content
2035
- await cl.Message(
2036
- content=model_response
2037
- ).send()
2038
- elif chat_profile == 'GPT-3.5-turbo-0125':
2039
- completion = openai_client.chat.completions.create(
2040
- model="GPT-3.5-turbo-0125",
2041
- messages=[
2042
- {"role": "system", "content": f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people"},
2043
- {"role": "user", "content": message.content}
2044
- ]
2045
- )
2046
- model_response = completion.choices[0].message.content
2047
- await cl.Message(
2048
- content=model_response
2049
- ).send()
2050
- elif chat_profile == 'gpt-3.5-turbo-1106':
2051
- completion = openai_client.chat.completions.create(
2052
- model="gpt-3.5-turbo-1106",
2053
- messages=[
2054
- {"role": "system", "content": f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people"},
2055
- {"role": "user", "content": message.content}
2056
- ]
2057
- )
2058
- model_response = completion.choices[0].message.content
2059
- await cl.Message(
2060
- content=model_response
2061
- ).send()
2062
- # elif chat_profile == 'davinci-002':
2063
- # completion = openai_client.chat.completions.create(
2064
- # model="davinci-002",
2065
- # messages=[
2066
- # {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
2067
- # {"role": "user", "content": message.content}
2068
- # ]
2069
- # )
2070
- # model_response = completion.choices[0].message.content
2071
- # await cl.Message(
2072
- # content=model_response
2073
- # ).send()
2074
-
2075
- elif chat_profile == 'TTS':
2076
- response = openai_client.audio.speech.create(
2077
- model="tts-1",
2078
- voice="alloy",
2079
- input=message.content,
2080
- )
2081
-
2082
- response.stream_to_file("output.mp3")
2083
-
2084
- elements = [
2085
- cl.Audio(name="output.mp3", path="./output.mp3", display="inline"),
2086
- ]
2087
- await cl.Message(
2088
- content="Here it is the response!",
2089
- elements=elements,
2090
- ).send()
2091
-
2092
- elif chat_profile == 'Qwen2-57B':
2093
- client = Client("Qwen/Qwen2-57b-a14b-instruct-demo", hf_token=hf_token)
2094
- result = client.predict(
2095
- query=message.content,
2096
- system=f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people",
2097
- api_name="/model_chat"
2098
- )
2099
- await cl.Message(
2100
- content=result[1][0][1]
2101
- ).send()
2102
-
2103
- elif chat_profile == 'Qwen2-7B':
2104
- client = Client("Qwen/Qwen2-7b-instruct-demo", hf_token=hf_token)
2105
- result = client.predict(
2106
- query=message.content,
2107
- system=f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people",
2108
- api_name="/model_chat"
2109
- )
2110
- await cl.Message(
2111
- content=result[1][0][1]
2112
-
2113
- ).send()
2114
-
2115
- elif chat_profile == 'Qwen2-1.5B':
2116
- client = Client("Qwen/Qwen2-1.5b-instruct-demo", hf_token=hf_token)
2117
- result = client.predict(
2118
- query=message.content,
2119
- system=f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people",
2120
- api_name="/model_chat"
2121
- )
2122
- await cl.Message(
2123
- content=result[1][0][1]
2124
-
2125
- ).send()
2126
-
2127
- elif chat_profile == 'Qwen2-0.5B':
2128
- client = Client("Qwen/Qwen2-0.5B-Instruct", hf_token=hf_token)
2129
- result = client.predict(
2130
- query=message.content,
2131
- system=f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people",
2132
- api_name="/model_chat"
2133
- )
2134
- await cl.Message(
2135
- content=result[1][0][1]
2136
 
2137
- ).send()
2138
- elif chat_profile == 'Qwen1.5-110B':
2139
- client = Client("Qwen/Qwen1.5-110B-Chat-demo", hf_token=hf_token)
2140
- result = client.predict(
2141
- query=message.content,
2142
- system=f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people",
2143
- api_name="/model_chat"
2144
- )
2145
- await cl.Message(
2146
- content=result[1][0][1]
2147
-
2148
- ).send()
2149
-
2150
- elif chat_profile == 'Qwen1.5-32B':
2151
- client = Client("Qwen/Qwen1.5-32B-Chat-demo", hf_token=hf_token)
2152
- result = client.predict(
2153
- query=message.content,
2154
- system=f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people",
2155
- api_name="/model_chat"
2156
- )
2157
- await cl.Message(
2158
- content=result[1][0][1]
2159
-
2160
- ).send()
2161
-
2162
- elif chat_profile == 'Qwen1.5-2.7B':
2163
- client = Client("Qwen/qwen1.5-MoE-A2.7B-Chat-demo", hf_token=hf_token)
2164
- result = client.predict(
2165
- query=message.content,
2166
- system=f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people",
2167
- api_name="/model_chat"
2168
- )
2169
- await cl.Message(
2170
- content=result[1][0][1]
2171
-
2172
- ).send()
2173
-
2174
- # elif chat_profile == 'Qwen-14B':
2175
- # client = Client("Qwen/qwen1.5-MoE-A2.7B-Chat-demo", hf_token=hf_token)
2176
- # result = client.predict(
2177
- # query=message.content,
2178
- # system="You are a helpful AI chatbot made by two iranian boys named Artin Daneshvar and Sadra Noadoust",
2179
- # api_name="/model_chat"
2180
- # )
2181
- # await cl.Message(
2182
- # content=result[1][0][1]
2183
-
2184
- # ).send()
2185
-
2186
- # elif chat_profile == 'Qwen-7B':
2187
- # client = Client("Qwen/qwen1.5-MoE-A2.7B-Chat-demo", hf_token=hf_token)
2188
- # result = client.predict(
2189
- # query=message.content,
2190
- # system="You are a helpful AI chatbot made by two iranian boys named Artin Daneshvar and Sadra Noadoust",
2191
- # api_name="/model_chat"
2192
- # )
2193
- # await cl.Message(
2194
- # content=result[1][0][1]
2195
-
2196
- # ).send()
2197
-
2198
- elif chat_profile == 'Llama-3.1-405B':
2199
- completion = groq_client.chat.completions.create(
2200
- model="llama-3.1-405b-reasoning",
2201
- messages=[
2202
- {
2203
- "role": "user",
2204
- "content": message.content
2205
- }
2206
- ],
2207
- temperature=1,
2208
- max_tokens=1024,
2209
- top_p=1,
2210
- stream=True,
2211
- stop=None,
2212
- )
2213
-
2214
- complete_content = ""
2215
-
2216
- # Iterate over each chunk
2217
- for chunk in completion:
2218
- # Retrieve the content from the current chunk
2219
- content = chunk.choices[0].delta.content
2220
-
2221
- # Check if the content is not None before concatenating it
2222
- if content is not None:
2223
- complete_content += content
2224
-
2225
- # Send the concatenated content as a message
2226
- await cl.Message(content=complete_content).send()
2227
-
2228
-
2229
- elif chat_profile == 'Llama-3.1-70B':
2230
- completion = groq_client.chat.completions.create(
2231
- model="llama-3.1-70b-versatile",
2232
- messages=[
2233
- {
2234
- "role": "user",
2235
- "content": message.content
2236
- }
2237
- ],
2238
- temperature=1,
2239
- max_tokens=1024,
2240
- top_p=1,
2241
- stream=True,
2242
- stop=None,
2243
- )
2244
-
2245
- complete_content = ""
2246
-
2247
- # Iterate over each chunk
2248
- for chunk in completion:
2249
- # Retrieve the content from the current chunk
2250
- content = chunk.choices[0].delta.content
2251
-
2252
- # Check if the content is not None before concatenating it
2253
- if content is not None:
2254
- complete_content += content
2255
-
2256
- # Send the concatenated content as a message
2257
- await cl.Message(content=complete_content).send()
2258
-
2259
- elif chat_profile == 'Llama-3.1-8B':
2260
- completion = groq_client.chat.completions.create(
2261
- model="llama-3.1-8b-instant",
2262
- messages=[
2263
- {
2264
- "role": "user",
2265
- "content": message.content
2266
- }
2267
- ],
2268
- temperature=1,
2269
- max_tokens=1024,
2270
- top_p=1,
2271
- stream=True,
2272
- stop=None,
2273
- )
2274
-
2275
- complete_content = ""
 
 
 
 
 
2276
 
2277
- # Iterate over each chunk
2278
- for chunk in completion:
2279
- # Retrieve the content from the current chunk
2280
- content = chunk.choices[0].delta.content
2281
-
2282
- # Check if the content is not None before concatenating it
2283
- if content is not None:
2284
- complete_content += content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2285
 
2286
- # Send the concatenated content as a message
2287
- await cl.Message(content=complete_content).send()
2288
-
2289
- elif chat_profile == 'Llama-3-70B':
2290
- completion = groq_client.chat.completions.create(
2291
- model="llama3-70b-8192",
2292
- messages=[
2293
- {
2294
- "role": "user",
2295
- "content": message.content
2296
- }
2297
- ],
2298
- temperature=1,
2299
- max_tokens=1024,
2300
- top_p=1,
2301
- stream=True,
2302
- stop=None,
2303
- )
2304
 
2305
- complete_content = ""
2306
 
2307
- # Iterate over each chunk
2308
- for chunk in completion:
2309
- # Retrieve the content from the current chunk
2310
- content = chunk.choices[0].delta.content
2311
-
2312
- # Check if the content is not None before concatenating it
2313
- if content is not None:
2314
- complete_content += content
2315
-
2316
- # Send the concatenated content as a message
2317
- await cl.Message(content=complete_content).send()
2318
-
2319
- elif chat_profile == 'Llama-3-8B':
2320
- completion = groq_client.chat.completions.create(
2321
- model="llama3-8b-8192",
2322
- messages=[
2323
- {
2324
- "role": "user",
2325
- "content": message.content
2326
- }
2327
- ],
2328
- temperature=1,
2329
- max_tokens=1024,
2330
- top_p=1,
2331
- stream=True,
2332
- stop=None,
2333
- )
2334
-
2335
- complete_content = ""
2336
-
2337
- # Iterate over each chunk
2338
- for chunk in completion:
2339
- # Retrieve the content from the current chunk
2340
- content = chunk.choices[0].delta.content
2341
-
2342
- # Check if the content is not None before concatenating it
2343
- if content is not None:
2344
- complete_content += content
2345
-
2346
- # Send the concatenated content as a message
2347
- await cl.Message(content=complete_content).send()
2348
-
2349
- elif chat_profile == 'gemma2-27B':
2350
- client = Client("gokaygokay/Gemma-2-llamacpp")
2351
- result = client.predict(
2352
- message=message.content,
2353
- model="gemma-2-27b-it-Q5_K_M.gguf",
2354
- system_message=f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people",
2355
- max_tokens=2048,
2356
- temperature=0.7,
2357
- top_p=0.95,
2358
- top_k=40,
2359
- repeat_penalty=1.1,
2360
- api_name="/chat"
2361
- )
2362
- await cl.Message(
2363
- content=result
2364
- ).send()
2365
 
2366
- elif chat_profile == 'gemma2-9B':
2367
- completion = groq_client.chat.completions.create(
2368
- model="gemma2-9b-it",
2369
- messages=[
2370
- {
2371
- "role": "user",
2372
- "content": message.content
2373
- }
2374
- ],
2375
- temperature=1,
2376
- max_tokens=1024,
2377
- top_p=1,
2378
- stream=True,
2379
- stop=None,
2380
- )
2381
 
2382
- complete_content = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2383
 
2384
- # Iterate over each chunk
2385
- for chunk in completion:
2386
- # Retrieve the content from the current chunk
2387
- content = chunk.choices[0].delta.content
2388
-
2389
- # Check if the content is not None before concatenating it
2390
- if content is not None:
2391
- complete_content += content
2392
 
2393
- # Send the concatenated content as a message
2394
- await cl.Message(content=complete_content).send()
 
 
 
 
 
 
2395
 
2396
- elif chat_profile == 'gemma-7B':
2397
- completion = groq_client.chat.completions.create(
2398
- model="gemma-7b-it",
2399
- messages=[
2400
- {
2401
- "role": "user",
2402
- "content": message.content
2403
- }
2404
- ],
2405
- temperature=1,
2406
- max_tokens=1024,
2407
- top_p=1,
2408
- stream=True,
2409
- stop=None,
2410
- )
 
 
 
2411
 
2412
- complete_content = ""
2413
 
2414
- # Iterate over each chunk
2415
- for chunk in completion:
2416
- # Retrieve the content from the current chunk
2417
- content = chunk.choices[0].delta.content
2418
-
2419
- # Check if the content is not None before concatenating it
2420
- if content is not None:
2421
- complete_content += content
2422
 
2423
- # Send the concatenated content as a message
2424
- await cl.Message(content=complete_content).send()
2425
-
2426
- elif chat_profile == "zephyr-7B":
2427
- result = hf_text_client.predict(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2428
  message=message.content,
2429
- request=f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people",
2430
- param_3=512,
2431
- param_4=0.7,
2432
- param_5=0.95,
 
 
 
2433
  api_name="/chat"
2434
- )
2435
- model_response = result.strip("</s>")
2436
- await cl.Message(
2437
- content=model_response
2438
- ).send()
2439
-
2440
- elif chat_profile == 'mixtral-8x7B':
2441
- completion = groq_client.chat.completions.create(
2442
- model="mixtral-8x7b-32768",
2443
- messages=[
2444
- {
2445
- "role": "user",
2446
- "content": message.content
2447
- }
2448
- ],
2449
- temperature=1,
2450
- max_tokens=1024,
2451
- top_p=1,
2452
- stream=True,
2453
- stop=None,
2454
- )
2455
-
2456
- complete_content = ""
2457
-
2458
- for chunk in completion:
2459
- content = chunk.choices[0].delta.content
2460
-
2461
- if content is not None:
2462
- complete_content += content
2463
-
2464
- await cl.Message(content=complete_content).send()
2465
-
2466
- elif chat_profile == 'mistral-nemo-12B':
2467
- client = Client("0x7o/Mistral-Nemo-Instruct", hf_token=hf_token)
2468
- result = client.predict(
2469
- message=message.content,
2470
- max_new_tokens=512,
2471
- temperature=0.7,
2472
- top_p=0.95,
2473
- api_name="/chat"
2474
- )
2475
- await cl.Message(
2476
- content=result[1][0][1]
2477
-
2478
- ).send()
2479
-
2480
- # elif chat_profile == 'Toka-353M':
2481
- # output = query({
2482
- # "inputs": message.content,
2483
- # })
2484
- # await cl.Message(
2485
- # content=output[0]['sequence']
2486
- # ).send()
2487
-
2488
- elif chat_profile == 'Aya-23B':
2489
- stream = co.chat_stream(
2490
- model='c4ai-aya-23',
2491
- message=message.content,
2492
- temperature=0.3,
2493
- # chat_history=[{"role": "User", "message": "Hello"}, {"role": "Chatbot", "message": "Hello! How can I help you today?"}, {"role": "User", "message": "Hi"}, {"role": "User", "message": "hello"}],
2494
- prompt_truncation='OFF',
2495
- connectors=[],
2496
- )
2497
-
2498
- complete_content = ''
2499
- for event in stream:
2500
- if event.event_type == 'text-generation':
2501
- complete_content += event.text
2502
- await cl.Message(content=complete_content).send()
2503
 
2504
- elif chat_profile == 'Aya-35B':
2505
- stream = co.chat_stream(
2506
- model='c4ai-aya-23',
2507
  message=message.content,
2508
- temperature=0.3,
2509
- # chat_history=[{"role": "User", "message": "Hello"}, {"role": "Chatbot", "message": "Hello! How can I help you today?"}, {"role": "User", "message": "Hi"}, {"role": "User", "message": "hello"}],
2510
- prompt_truncation='OFF',
2511
- connectors=[],
2512
- )
2513
-
2514
- complete_content = ''
2515
- for event in stream:
2516
- if event.event_type == 'text-generation':
2517
- complete_content += event.text
2518
- await cl.Message(content=complete_content).send()
2519
-
2520
- elif chat_profile == 'Command-R-Plus':
2521
- stream = co.chat_stream(
2522
- model='command-r-plus',
2523
- message=message.content,
2524
- temperature=0.3,
2525
- chat_history=[],
2526
- prompt_truncation='AUTO',
2527
- )
2528
- complete_content = ''
2529
- for event in stream:
2530
- if event.event_type == 'text-generation':
2531
- complete_content += event.text
2532
- await cl.Message(content=complete_content).send()
2533
-
2534
- elif chat_profile == 'Command-R':
2535
- stream = co.chat_stream(
2536
- model='command-r',
2537
- message=message.content,
2538
- temperature=0.3,
2539
- chat_history=[],
2540
- prompt_truncation='AUTO',
2541
- )
2542
- complete_content = ''
2543
- for event in stream:
2544
- if event.event_type == 'text-generation':
2545
- complete_content += event.text
2546
- await cl.Message(content=complete_content).send()
2547
-
2548
- elif chat_profile == 'Command':
2549
- stream = co.chat_stream(
2550
- model='command',
2551
- message=message.content,
2552
- temperature=0.3,
2553
- chat_history=[],
2554
- prompt_truncation='AUTO',
2555
- )
2556
- complete_content = ''
2557
- for event in stream:
2558
- if event.event_type == 'text-generation':
2559
- complete_content += event.text
2560
- await cl.Message(content=complete_content).send()
2561
 
2562
- elif chat_profile == 'Command-Light':
2563
- stream = co.chat_stream(
2564
- model='command-light',
2565
- message=message.content,
2566
- temperature=0.3,
2567
- chat_history=[],
2568
- prompt_truncation='AUTO',
2569
- )
2570
- complete_content = ''
2571
- for event in stream:
2572
- if event.event_type == 'text-generation':
2573
- complete_content += event.text
2574
- await cl.Message(content=complete_content).send()
2575
-
2576
- elif chat_profile == 'Command-Light-Nightly':
2577
- stream = co.chat_stream(
2578
- model='command-light-nightly',
2579
- message=message.content,
2580
- temperature=0.3,
2581
- chat_history=[],
2582
- prompt_truncation='AUTO',
2583
- )
2584
- complete_content = ''
2585
- for event in stream:
2586
- if event.event_type == 'text-generation':
2587
- complete_content += event.text
2588
- await cl.Message(content=complete_content).send()
2589
-
2590
- elif chat_profile == 'Command-Nightly':
2591
- stream = co.chat_stream(
2592
- model='command-light-nightly',
2593
- message=message.content,
2594
- temperature=0.3,
2595
- chat_history=[],
2596
- prompt_truncation='AUTO',
2597
- )
2598
- complete_content = ''
2599
- for event in stream:
2600
- if event.event_type == 'text-generation':
2601
- complete_content += event.text
2602
- await cl.Message(content=complete_content).send()
2603
- else:
2604
  await cl.Message(
2605
- content="Try Sending your message again. or edit it.",
 
2606
  ).send()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2607
 
2608
 
2609
  @cl.on_settings_update
 
1955
  @cl.on_message
1956
  async def main(message: cl.Message):
1957
  chat_profile = cl.user_session.get("chat_profile")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1958
 
1959
+ if chat_profile == 'neural-brain-AI':
1960
+ completion = openai_client.chat.completions.create(
1961
+ model="ft:gpt-3.5-turbo-1106:nb:aria1:9UWDrLJK",
1962
+ messages=[
1963
+ {"role": "system", "content": f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people"},
1964
+ {"role": "user", "content": message.content}
1965
+ ]
1966
+ )
1967
+ model_response = completion.choices[0].message.content
1968
+ await cl.Message(
1969
+ content=model_response
1970
+ ).send()
1971
+
1972
+ elif chat_profile == "Dorna-AI":
1973
+ result = hf_text_client.predict(
1974
+ message=message.content,
1975
+ request=f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people",
1976
+ param_3=512,
1977
+ param_4=0.7,
1978
+ param_5=0.95,
1979
+ api_name="/chat"
1980
+ )
1981
+ model_response = result.strip("</s>")
1982
+ await cl.Message(
1983
+ content=model_response
1984
+ ).send()
1985
+ elif chat_profile == "gpt4-o-mini":
1986
+ completion = openai_client.chat.completions.create(
1987
+ model="gpt-4o-mini",
1988
+ messages=[
1989
+ {"role": "system", "content": f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people"},
1990
+ {"role": "user", "content": message.content}
1991
+ ]
1992
+ )
1993
+ model_response = completion.choices[0].message.content
1994
+ await cl.Message(
1995
+ content=model_response
1996
+ ).send()
1997
+ # elif chat_profile == 'Image-Generation':
1998
+ # result = hf_image_client.predict(
1999
+ # prompt=message.content,
2000
+ # negative_prompt="",
2001
+ # seed=0,
2002
+ # randomize_seed=True,
2003
+ # width=512,
2004
+ # height=512,
2005
+ # guidance_scale=0,
2006
+ # num_inference_steps=2,
2007
+ # api_name="/infer"
2008
+ # )
2009
+ # image = cl.Image(path=result, name="result", display="inline")
2010
+ # await cl.Message(
2011
+ # content="This message has an image!",
2012
+ # elements=[image],
2013
+ # ).send()
2014
+ elif chat_profile == 'GPT-4':
2015
+ completion = openai_client.chat.completions.create(
2016
+ model="gpt-4",
2017
+ messages=[
2018
+ {"role": "system", "content": f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people"},
2019
+ {"role": "user", "content": message.content}
2020
+ ]
2021
+ )
2022
+ model_response = completion.choices[0].message.content
2023
+ await cl.Message(
2024
+ content=model_response
2025
+ ).send()
2026
+
2027
+ elif chat_profile == 'gpt-3.5-turbo':
2028
+ completion = openai_client.chat.completions.create(
2029
+ model="gpt-3.5-turbo",
2030
+ messages=[
2031
+ {"role": "system", "content": f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people"},
2032
+ {"role": "user", "content": message.content}
2033
+ ]
2034
+ )
2035
+ model_response = completion.choices[0].message.content
2036
+ await cl.Message(
2037
+ content=model_response
2038
+ ).send()
2039
+ elif chat_profile == 'GPT-3.5-turbo-0125':
2040
+ completion = openai_client.chat.completions.create(
2041
+ model="GPT-3.5-turbo-0125",
2042
+ messages=[
2043
+ {"role": "system", "content": f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people"},
2044
+ {"role": "user", "content": message.content}
2045
+ ]
2046
+ )
2047
+ model_response = completion.choices[0].message.content
2048
+ await cl.Message(
2049
+ content=model_response
2050
+ ).send()
2051
+ elif chat_profile == 'gpt-3.5-turbo-1106':
2052
+ completion = openai_client.chat.completions.create(
2053
+ model="gpt-3.5-turbo-1106",
2054
+ messages=[
2055
+ {"role": "system", "content": f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people"},
2056
+ {"role": "user", "content": message.content}
2057
+ ]
2058
+ )
2059
+ model_response = completion.choices[0].message.content
2060
+ await cl.Message(
2061
+ content=model_response
2062
+ ).send()
2063
+ # elif chat_profile == 'davinci-002':
2064
+ # completion = openai_client.chat.completions.create(
2065
+ # model="davinci-002",
2066
+ # messages=[
2067
+ # {"role": "system", "content": "You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust"},
2068
+ # {"role": "user", "content": message.content}
2069
+ # ]
2070
+ # )
2071
+ # model_response = completion.choices[0].message.content
2072
+ # await cl.Message(
2073
+ # content=model_response
2074
+ # ).send()
2075
+
2076
+ elif chat_profile == 'TTS':
2077
+ response = openai_client.audio.speech.create(
2078
+ model="tts-1",
2079
+ voice="alloy",
2080
+ input=message.content,
2081
+ )
2082
+
2083
+ response.stream_to_file("output.mp3")
2084
+
2085
+ elements = [
2086
+ cl.Audio(name="output.mp3", path="./output.mp3", display="inline"),
2087
+ ]
2088
+ await cl.Message(
2089
+ content="Here it is the response!",
2090
+ elements=elements,
2091
+ ).send()
2092
+
2093
+ elif chat_profile == 'Qwen2-57B':
2094
+ client = Client("Qwen/Qwen2-57b-a14b-instruct-demo", hf_token=hf_token)
2095
+ result = client.predict(
2096
+ query=message.content,
2097
+ system=f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people",
2098
+ api_name="/model_chat"
2099
+ )
2100
+ await cl.Message(
2101
+ content=result[1][0][1]
2102
+ ).send()
2103
 
2104
+ elif chat_profile == 'Qwen2-7B':
2105
+ client = Client("Qwen/Qwen2-7b-instruct-demo", hf_token=hf_token)
2106
+ result = client.predict(
2107
+ query=message.content,
2108
+ system=f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people",
2109
+ api_name="/model_chat"
2110
+ )
2111
+ await cl.Message(
2112
+ content=result[1][0][1]
2113
+
2114
+ ).send()
2115
+
2116
+ elif chat_profile == 'Qwen2-1.5B':
2117
+ client = Client("Qwen/Qwen2-1.5b-instruct-demo", hf_token=hf_token)
2118
+ result = client.predict(
2119
+ query=message.content,
2120
+ system=f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people",
2121
+ api_name="/model_chat"
2122
+ )
2123
+ await cl.Message(
2124
+ content=result[1][0][1]
2125
+
2126
+ ).send()
2127
+
2128
+ elif chat_profile == 'Qwen2-0.5B':
2129
+ client = Client("Qwen/Qwen2-0.5B-Instruct", hf_token=hf_token)
2130
+ result = client.predict(
2131
+ query=message.content,
2132
+ system=f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people",
2133
+ api_name="/model_chat"
2134
+ )
2135
+ await cl.Message(
2136
+ content=result[1][0][1]
2137
+
2138
+ ).send()
2139
+ elif chat_profile == 'Qwen1.5-110B':
2140
+ client = Client("Qwen/Qwen1.5-110B-Chat-demo", hf_token=hf_token)
2141
+ result = client.predict(
2142
+ query=message.content,
2143
+ system=f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people",
2144
+ api_name="/model_chat"
2145
+ )
2146
+ await cl.Message(
2147
+ content=result[1][0][1]
2148
+
2149
+ ).send()
2150
+
2151
+ elif chat_profile == 'Qwen1.5-32B':
2152
+ client = Client("Qwen/Qwen1.5-32B-Chat-demo", hf_token=hf_token)
2153
+ result = client.predict(
2154
+ query=message.content,
2155
+ system=f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people",
2156
+ api_name="/model_chat"
2157
+ )
2158
+ await cl.Message(
2159
+ content=result[1][0][1]
2160
+
2161
+ ).send()
2162
+
2163
+ elif chat_profile == 'Qwen1.5-2.7B':
2164
+ client = Client("Qwen/qwen1.5-MoE-A2.7B-Chat-demo", hf_token=hf_token)
2165
+ result = client.predict(
2166
+ query=message.content,
2167
+ system=f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people",
2168
+ api_name="/model_chat"
2169
+ )
2170
+ await cl.Message(
2171
+ content=result[1][0][1]
2172
+
2173
+ ).send()
2174
+
2175
+ # elif chat_profile == 'Qwen-14B':
2176
+ # client = Client("Qwen/qwen1.5-MoE-A2.7B-Chat-demo", hf_token=hf_token)
2177
+ # result = client.predict(
2178
+ # query=message.content,
2179
+ # system="You are a helpful AI chatbot made by two iranian boys named Artin Daneshvar and Sadra Noadoust",
2180
+ # api_name="/model_chat"
2181
+ # )
2182
+ # await cl.Message(
2183
+ # content=result[1][0][1]
2184
+
2185
+ # ).send()
2186
+
2187
+ # elif chat_profile == 'Qwen-7B':
2188
+ # client = Client("Qwen/qwen1.5-MoE-A2.7B-Chat-demo", hf_token=hf_token)
2189
+ # result = client.predict(
2190
+ # query=message.content,
2191
+ # system="You are a helpful AI chatbot made by two iranian boys named Artin Daneshvar and Sadra Noadoust",
2192
+ # api_name="/model_chat"
2193
+ # )
2194
+ # await cl.Message(
2195
+ # content=result[1][0][1]
2196
+
2197
+ # ).send()
2198
 
2199
+ elif chat_profile == 'Llama-3.1-405B':
2200
+ completion = groq_client.chat.completions.create(
2201
+ model="llama-3.1-405b-reasoning",
2202
+ messages=[
2203
+ {
2204
+ "role": "user",
2205
+ "content": message.content
2206
+ }
2207
+ ],
2208
+ temperature=1,
2209
+ max_tokens=1024,
2210
+ top_p=1,
2211
+ stream=True,
2212
+ stop=None,
2213
+ )
 
 
 
2214
 
2215
+ complete_content = ""
2216
 
2217
+ # Iterate over each chunk
2218
+ for chunk in completion:
2219
+ # Retrieve the content from the current chunk
2220
+ content = chunk.choices[0].delta.content
2221
+
2222
+ # Check if the content is not None before concatenating it
2223
+ if content is not None:
2224
+ complete_content += content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2225
 
2226
+ # Send the concatenated content as a message
2227
+ await cl.Message(content=complete_content).send()
2228
+
 
 
 
 
 
 
 
 
 
 
 
 
2229
 
2230
+ elif chat_profile == 'Llama-3.1-70B':
2231
+ completion = groq_client.chat.completions.create(
2232
+ model="llama-3.1-70b-versatile",
2233
+ messages=[
2234
+ {
2235
+ "role": "user",
2236
+ "content": message.content
2237
+ }
2238
+ ],
2239
+ temperature=1,
2240
+ max_tokens=1024,
2241
+ top_p=1,
2242
+ stream=True,
2243
+ stop=None,
2244
+ )
2245
 
2246
+ complete_content = ""
 
 
 
 
 
 
 
2247
 
2248
+ # Iterate over each chunk
2249
+ for chunk in completion:
2250
+ # Retrieve the content from the current chunk
2251
+ content = chunk.choices[0].delta.content
2252
+
2253
+ # Check if the content is not None before concatenating it
2254
+ if content is not None:
2255
+ complete_content += content
2256
 
2257
+ # Send the concatenated content as a message
2258
+ await cl.Message(content=complete_content).send()
2259
+
2260
+ elif chat_profile == 'Llama-3.1-8B':
2261
+ completion = groq_client.chat.completions.create(
2262
+ model="llama-3.1-8b-instant",
2263
+ messages=[
2264
+ {
2265
+ "role": "user",
2266
+ "content": message.content
2267
+ }
2268
+ ],
2269
+ temperature=1,
2270
+ max_tokens=1024,
2271
+ top_p=1,
2272
+ stream=True,
2273
+ stop=None,
2274
+ )
2275
 
2276
+ complete_content = ""
2277
 
2278
+ # Iterate over each chunk
2279
+ for chunk in completion:
2280
+ # Retrieve the content from the current chunk
2281
+ content = chunk.choices[0].delta.content
2282
+
2283
+ # Check if the content is not None before concatenating it
2284
+ if content is not None:
2285
+ complete_content += content
2286
 
2287
+ # Send the concatenated content as a message
2288
+ await cl.Message(content=complete_content).send()
2289
+
2290
+ elif chat_profile == 'Llama-3-70B':
2291
+ completion = groq_client.chat.completions.create(
2292
+ model="llama3-70b-8192",
2293
+ messages=[
2294
+ {
2295
+ "role": "user",
2296
+ "content": message.content
2297
+ }
2298
+ ],
2299
+ temperature=1,
2300
+ max_tokens=1024,
2301
+ top_p=1,
2302
+ stream=True,
2303
+ stop=None,
2304
+ )
2305
+
2306
+ complete_content = ""
2307
+
2308
+ # Iterate over each chunk
2309
+ for chunk in completion:
2310
+ # Retrieve the content from the current chunk
2311
+ content = chunk.choices[0].delta.content
2312
+
2313
+ # Check if the content is not None before concatenating it
2314
+ if content is not None:
2315
+ complete_content += content
2316
+
2317
+ # Send the concatenated content as a message
2318
+ await cl.Message(content=complete_content).send()
2319
+
2320
+ elif chat_profile == 'Llama-3-8B':
2321
+ completion = groq_client.chat.completions.create(
2322
+ model="llama3-8b-8192",
2323
+ messages=[
2324
+ {
2325
+ "role": "user",
2326
+ "content": message.content
2327
+ }
2328
+ ],
2329
+ temperature=1,
2330
+ max_tokens=1024,
2331
+ top_p=1,
2332
+ stream=True,
2333
+ stop=None,
2334
+ )
2335
+
2336
+ complete_content = ""
2337
+
2338
+ # Iterate over each chunk
2339
+ for chunk in completion:
2340
+ # Retrieve the content from the current chunk
2341
+ content = chunk.choices[0].delta.content
2342
+
2343
+ # Check if the content is not None before concatenating it
2344
+ if content is not None:
2345
+ complete_content += content
2346
+
2347
+ # Send the concatenated content as a message
2348
+ await cl.Message(content=complete_content).send()
2349
+
2350
+ elif chat_profile == 'gemma2-27B':
2351
+ client = Client("gokaygokay/Gemma-2-llamacpp")
2352
+ result = client.predict(
2353
  message=message.content,
2354
+ model="gemma-2-27b-it-Q5_K_M.gguf",
2355
+ system_message=f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people",
2356
+ max_tokens=2048,
2357
+ temperature=0.7,
2358
+ top_p=0.95,
2359
+ top_k=40,
2360
+ repeat_penalty=1.1,
2361
  api_name="/chat"
2362
+ )
2363
+ await cl.Message(
2364
+ content=result
2365
+ ).send()
2366
+
2367
+ elif chat_profile == 'gemma2-9B':
2368
+ completion = groq_client.chat.completions.create(
2369
+ model="gemma2-9b-it",
2370
+ messages=[
2371
+ {
2372
+ "role": "user",
2373
+ "content": message.content
2374
+ }
2375
+ ],
2376
+ temperature=1,
2377
+ max_tokens=1024,
2378
+ top_p=1,
2379
+ stream=True,
2380
+ stop=None,
2381
+ )
2382
+
2383
+ complete_content = ""
2384
+
2385
+ # Iterate over each chunk
2386
+ for chunk in completion:
2387
+ # Retrieve the content from the current chunk
2388
+ content = chunk.choices[0].delta.content
2389
+
2390
+ # Check if the content is not None before concatenating it
2391
+ if content is not None:
2392
+ complete_content += content
2393
+
2394
+ # Send the concatenated content as a message
2395
+ await cl.Message(content=complete_content).send()
2396
+
2397
+ elif chat_profile == 'gemma-7B':
2398
+ completion = groq_client.chat.completions.create(
2399
+ model="gemma-7b-it",
2400
+ messages=[
2401
+ {
2402
+ "role": "user",
2403
+ "content": message.content
2404
+ }
2405
+ ],
2406
+ temperature=1,
2407
+ max_tokens=1024,
2408
+ top_p=1,
2409
+ stream=True,
2410
+ stop=None,
2411
+ )
2412
+
2413
+ complete_content = ""
2414
+
2415
+ # Iterate over each chunk
2416
+ for chunk in completion:
2417
+ # Retrieve the content from the current chunk
2418
+ content = chunk.choices[0].delta.content
2419
+
2420
+ # Check if the content is not None before concatenating it
2421
+ if content is not None:
2422
+ complete_content += content
2423
+
2424
+ # Send the concatenated content as a message
2425
+ await cl.Message(content=complete_content).send()
 
 
 
 
 
2426
 
2427
+ elif chat_profile == "zephyr-7B":
2428
+ result = hf_text_client.predict(
 
2429
  message=message.content,
2430
+ request=f"You are neural nexus official chatbot, you are made by Artin Daneshvar and Sadra Noadoust and you are here to help people",
2431
+ param_3=512,
2432
+ param_4=0.7,
2433
+ param_5=0.95,
2434
+ api_name="/chat"
2435
+ )
2436
+ model_response = result.strip("</s>")
2437
+ await cl.Message(
2438
+ content=model_response
2439
+ ).send()
2440
+
2441
+ elif chat_profile == 'mixtral-8x7B':
2442
+ completion = groq_client.chat.completions.create(
2443
+ model="mixtral-8x7b-32768",
2444
+ messages=[
2445
+ {
2446
+ "role": "user",
2447
+ "content": message.content
2448
+ }
2449
+ ],
2450
+ temperature=1,
2451
+ max_tokens=1024,
2452
+ top_p=1,
2453
+ stream=True,
2454
+ stop=None,
2455
+ )
2456
+
2457
+ complete_content = ""
2458
+
2459
+ for chunk in completion:
2460
+ content = chunk.choices[0].delta.content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2461
 
2462
+ if content is not None:
2463
+ complete_content += content
2464
+
2465
+ await cl.Message(content=complete_content).send()
2466
+
2467
+ elif chat_profile == 'mistral-nemo-12B':
2468
+ client = Client("0x7o/Mistral-Nemo-Instruct", hf_token=hf_token)
2469
+ result = client.predict(
2470
+ message=message.content,
2471
+ max_new_tokens=512,
2472
+ temperature=0.7,
2473
+ top_p=0.95,
2474
+ api_name="/chat"
2475
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2476
  await cl.Message(
2477
+ content=result[1][0][1]
2478
+
2479
  ).send()
2480
+
2481
+ # elif chat_profile == 'Toka-353M':
2482
+ # output = query({
2483
+ # "inputs": message.content,
2484
+ # })
2485
+ # await cl.Message(
2486
+ # content=output[0]['sequence']
2487
+ # ).send()
2488
+
2489
+ elif chat_profile == 'Aya-23B':
2490
+ stream = co.chat_stream(
2491
+ model='c4ai-aya-23',
2492
+ message=message.content,
2493
+ temperature=0.3,
2494
+ # chat_history=[{"role": "User", "message": "Hello"}, {"role": "Chatbot", "message": "Hello! How can I help you today?"}, {"role": "User", "message": "Hi"}, {"role": "User", "message": "hello"}],
2495
+ prompt_truncation='OFF',
2496
+ connectors=[],
2497
+ )
2498
+
2499
+ complete_content = ''
2500
+ for event in stream:
2501
+ if event.event_type == 'text-generation':
2502
+ complete_content += event.text
2503
+ await cl.Message(content=complete_content).send()
2504
+
2505
+ elif chat_profile == 'Aya-35B':
2506
+ stream = co.chat_stream(
2507
+ model='c4ai-aya-23',
2508
+ message=message.content,
2509
+ temperature=0.3,
2510
+ # chat_history=[{"role": "User", "message": "Hello"}, {"role": "Chatbot", "message": "Hello! How can I help you today?"}, {"role": "User", "message": "Hi"}, {"role": "User", "message": "hello"}],
2511
+ prompt_truncation='OFF',
2512
+ connectors=[],
2513
+ )
2514
+
2515
+ complete_content = ''
2516
+ for event in stream:
2517
+ if event.event_type == 'text-generation':
2518
+ complete_content += event.text
2519
+ await cl.Message(content=complete_content).send()
2520
+
2521
+ elif chat_profile == 'Command-R-Plus':
2522
+ stream = co.chat_stream(
2523
+ model='command-r-plus',
2524
+ message=message.content,
2525
+ temperature=0.3,
2526
+ chat_history=[],
2527
+ prompt_truncation='AUTO',
2528
+ )
2529
+ complete_content = ''
2530
+ for event in stream:
2531
+ if event.event_type == 'text-generation':
2532
+ complete_content += event.text
2533
+ await cl.Message(content=complete_content).send()
2534
+
2535
+ elif chat_profile == 'Command-R':
2536
+ stream = co.chat_stream(
2537
+ model='command-r',
2538
+ message=message.content,
2539
+ temperature=0.3,
2540
+ chat_history=[],
2541
+ prompt_truncation='AUTO',
2542
+ )
2543
+ complete_content = ''
2544
+ for event in stream:
2545
+ if event.event_type == 'text-generation':
2546
+ complete_content += event.text
2547
+ await cl.Message(content=complete_content).send()
2548
+
2549
+ elif chat_profile == 'Command':
2550
+ stream = co.chat_stream(
2551
+ model='command',
2552
+ message=message.content,
2553
+ temperature=0.3,
2554
+ chat_history=[],
2555
+ prompt_truncation='AUTO',
2556
+ )
2557
+ complete_content = ''
2558
+ for event in stream:
2559
+ if event.event_type == 'text-generation':
2560
+ complete_content += event.text
2561
+ await cl.Message(content=complete_content).send()
2562
+
2563
+ elif chat_profile == 'Command-Light':
2564
+ stream = co.chat_stream(
2565
+ model='command-light',
2566
+ message=message.content,
2567
+ temperature=0.3,
2568
+ chat_history=[],
2569
+ prompt_truncation='AUTO',
2570
+ )
2571
+ complete_content = ''
2572
+ for event in stream:
2573
+ if event.event_type == 'text-generation':
2574
+ complete_content += event.text
2575
+ await cl.Message(content=complete_content).send()
2576
+
2577
+ elif chat_profile == 'Command-Light-Nightly':
2578
+ stream = co.chat_stream(
2579
+ model='command-light-nightly',
2580
+ message=message.content,
2581
+ temperature=0.3,
2582
+ chat_history=[],
2583
+ prompt_truncation='AUTO',
2584
+ )
2585
+ complete_content = ''
2586
+ for event in stream:
2587
+ if event.event_type == 'text-generation':
2588
+ complete_content += event.text
2589
+ await cl.Message(content=complete_content).send()
2590
+
2591
+ elif chat_profile == 'Command-Nightly':
2592
+ stream = co.chat_stream(
2593
+ model='command-light-nightly',
2594
+ message=message.content,
2595
+ temperature=0.3,
2596
+ chat_history=[],
2597
+ prompt_truncation='AUTO',
2598
+ )
2599
+ complete_content = ''
2600
+ for event in stream:
2601
+ if event.event_type == 'text-generation':
2602
+ complete_content += event.text
2603
+ await cl.Message(content=complete_content).send()
2604
 
2605
 
2606
  @cl.on_settings_update