mca183 commited on
Commit
ea6ab2a
1 Parent(s): 364533b

updated prompt; reduce `max_token`; just return the whole text

Browse files
Files changed (1) hide show
  1. app.py +3 -6
app.py CHANGED
@@ -11,7 +11,7 @@ co = cohere.Client(cohere_api_key)
11
 
12
  def generate_data(data_src):
13
 
14
- command_prompt= f'''This is a sample dataset in csv below, and I want you to help me generate more data with different variations (at least 100 examples)
15
 
16
  ```csv
17
  {data_src}
@@ -20,16 +20,13 @@ def generate_data(data_src):
20
  response = co.generate(
21
  model='command',
22
  prompt=command_prompt,
23
- max_tokens=2606,
24
  temperature=0.9,
25
  k=0,
26
  stop_sequences=[],
27
  return_likelihoods='NONE')
28
 
29
- txt_response = response.generations[0].text
30
- txt_split_1 = txt_response.split("```csv")
31
- txt_split_2 = txt_split_1[1].split("```")
32
- data_text = txt_split_2[0]
33
 
34
  return data_text
35
 
 
11
 
12
  def generate_data(data_src):
13
 
14
+ command_prompt= f'''This is a sample dataset in csv below, and I want you to help me generate more data in csv format with different variations (at least 100 examples).
15
 
16
  ```csv
17
  {data_src}
 
20
  response = co.generate(
21
  model='command',
22
  prompt=command_prompt,
23
+ max_tokens=260,
24
  temperature=0.9,
25
  k=0,
26
  stop_sequences=[],
27
  return_likelihoods='NONE')
28
 
29
+ data_text = response.generations[0].text
 
 
 
30
 
31
  return data_text
32