Nanobit commited on
Commit
cb4f0e9
1 Parent(s): 4c0eddb

Lint prompters.py

Browse files
Files changed (1) hide show
  1. src/axolotl/prompters.py +73 -25
src/axolotl/prompters.py CHANGED
@@ -1,28 +1,37 @@
1
- import copy
 
2
  import dataclasses
3
  import logging
4
  from enum import auto, Enum
5
- from typing import List, Tuple, Any, Union, Generator
6
 
7
  IGNORE_TOKEN_ID = -100
8
 
9
 
10
  class PromptStyle(Enum):
11
- instruct = "instruct"
12
- chat = "chat"
 
 
 
 
13
 
14
 
15
  class AlpacaPrompter:
 
 
 
 
16
  system_prompt = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n"
17
  system_no_input_prompt = "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n"
18
  prompt_style = None
19
 
20
- def __init__(self, prompt_style=PromptStyle.instruct.value):
21
- self.prompt_style = prompt_style if prompt_style else PromptStyle.instruct.value
22
  self.match_prompt_style()
23
 
24
  def match_prompt_style(self):
25
- if self.prompt_style == PromptStyle.instruct.value:
26
  self.prompt_input = (
27
  self.system_prompt
28
  + "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
@@ -32,7 +41,7 @@ class AlpacaPrompter:
32
  + "### Instruction:\n{instruction}\n\n### Response:\n"
33
  )
34
  self.response_split = "### Response:"
35
- if self.prompt_style == PromptStyle.chat.value:
36
  self.prompt_input = (
37
  self.system_prompt + "USER: {instruction}\n{input}\nASSISTANT:"
38
  )
@@ -44,7 +53,7 @@ class AlpacaPrompter:
44
  def build_prompt(
45
  self,
46
  instruction: str,
47
- input: Union[None, str] = None,
48
  output: Union[None, str] = None,
49
  ) -> Generator[str, None, None]:
50
  # returns the full prompt from instruction and optional input
@@ -62,33 +71,60 @@ class AlpacaPrompter:
62
 
63
 
64
  class UnpromptedPrompter(AlpacaPrompter):
 
 
 
 
65
  system_prompt = ""
66
  system_no_input_prompt = ""
67
 
68
 
69
  class JeopardyPrompter(AlpacaPrompter):
 
 
 
 
70
  prompt_input = "Below is a Jeopardy clue paired with input providing the category of the clue. Write a concise response that best answers tbe clue given the category.\n\n### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
71
 
72
 
73
  class MultipleChoiceExplainPrompter(AlpacaPrompter):
 
 
 
 
74
  system_prompt = (
75
  "Choose the answer that best answers the question. Explain your reasoning."
76
  )
77
 
78
 
79
  class MultipleChoiceConcisePrompter(AlpacaPrompter):
 
 
 
 
80
  prompt_input = "Choose the answer that best answers the question. Be concise in your response.\n\nUSER: {instruction}\n{input}\nASSISTANT:\n"
81
 
82
 
83
  class SummarizeTLDRPrompter(AlpacaPrompter):
 
 
 
 
84
  prompt_no_input = (
85
  "USER: Summarize the following article as a TL;DR.\n{instruction}\nASSISTANT:"
86
  )
87
 
88
 
89
  class CompletionPrompter:
 
 
 
 
90
  def build_prompt(
91
- self, instruction: str, input=None, output=None
 
 
 
92
  ) -> Generator[str, None, None]:
93
  yield instruction
94
 
@@ -97,14 +133,22 @@ class CompletionPrompter:
97
 
98
 
99
  class GPTeacherPrompter(AlpacaPrompter):
100
- ...
 
 
101
 
102
 
103
  class NomicGPT4AllPrompter(AlpacaPrompter):
104
- ...
 
 
105
 
106
 
107
  class ReflectAlpacaPrompter:
 
 
 
 
108
  system_prompt = "Below is an instruction that describes a task, paired with an input that provides further context. You, the Assistant, should generate a response as if it were an abstract for an academic or technical paper on the query along with a methodology. Then generate an Agent Reflection where you create a long form response as if from subject matter expert, be verbose, diligent, and creative in your application of knowledge, apply it through the lens of the response generated by the assistant. Look for flawed reasoning, faulty logic, or other mistakes in the method. Finally, generate a final response and method for the user with the Assistant abstract and Reflection analysis as augmentations to the generation\n\n"
109
  system_no_input_prompt = "Below is an instruction that describes a task. You, the Assistant, should generate a response as if it were an abstract for an academic or technical paper on the query along with a methodology. Then generate an Agent Reflection where you create a long form response as if from subject matter expert, be verbose, diligent, and creative in your application of knowledge, apply it through the lens of the response generated by the assistant. Look for flawed reasoning, faulty logic, or other mistakes in the method. Finally, generate a final response and method for the user with the Assistant abstract and Reflection analysis as augmentations to the generation\n\n"
110
 
@@ -120,7 +164,7 @@ class ReflectAlpacaPrompter:
120
  self.match_prompt_style()
121
 
122
  def match_prompt_style(self):
123
- if self.prompt_style == PromptStyle.instruct.value:
124
  self.prompt_input = (
125
  self.system_prompt
126
  + "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
@@ -131,7 +175,7 @@ class ReflectAlpacaPrompter:
131
  )
132
  self.agent_label = "### Thought:\n{output}\n\n### Agent Reflection:\n{reflection}\n\n### Final Response:\n{corrected}"
133
  self.response_split = "### Final Response:"
134
- if self.prompt_style == PromptStyle.chat.value:
135
  self.prompt_input = (
136
  self.system_prompt + "USER: {instruction}\n{input}\nASSISTANT:"
137
  )
@@ -143,10 +187,10 @@ class ReflectAlpacaPrompter:
143
  )
144
  self.response_split = "ASSISTANT:"
145
 
146
- def build_prompt(
147
  self,
148
  instruction: str,
149
- input: Union[None, str] = None,
150
  output: Union[None, str] = None,
151
  reflection: Union[None, str] = None,
152
  corrected: Union[None, str] = None,
@@ -176,7 +220,7 @@ class SeparatorStyle(Enum):
176
  DOLLY = auto()
177
 
178
 
179
- # TODO clean this 💩 up
180
  @dataclasses.dataclass
181
  class Conversation:
182
  """A class that keeps all conversation history."""
@@ -193,11 +237,11 @@ class Conversation:
193
  seps = [self.sep, self.sep2]
194
  preamble = self.system + seps[0]
195
  yield preamble
196
- for i, (role, message) in enumerate(self.messages):
197
  if message:
198
  yield (role + ":", " " + message)
199
  else:
200
- logging.warning("role with empty message: " + role)
201
  yield (role + ":",)
202
 
203
  def copy(self):
@@ -227,10 +271,14 @@ conv_vicuna_v1_1 = Conversation(
227
  )
228
 
229
 
230
- class ShareGPTPrompter:
 
 
 
 
231
  def __init__(self, prompt_style=None):
232
- if prompt_style != PromptStyle.chat.value:
233
- raise Exception(
234
  f"unsupported prompt_style for ShareGPTPrompter({prompt_style})"
235
  )
236
 
@@ -240,7 +288,7 @@ class ShareGPTPrompter:
240
  # self.prompt_no_input = self.system_no_input_prompt + "USER: {instruction}\nASSISTANT:"
241
  # self.response_split = "ASSISTANT:"
242
 
243
- def build_prompt(self, source, *args, **kwargs) -> Generator[str, None, None]:
244
  # ignore the system prompt if provided
245
  if source[0]["from"] == "system":
246
  source.pop(0)
@@ -261,9 +309,9 @@ class ShareGPTPrompter:
261
  ):
262
  # Skip the first one if it is not from human
263
  source = source[1:]
264
- except IndexError as e:
265
  # sometimes there is a bing or system chat
266
- raise e
267
 
268
  conv.messages = []
269
  for j, sentence in enumerate(source):
 
1
+ """Module containing prompters"""
2
+
3
  import dataclasses
4
  import logging
5
  from enum import auto, Enum
6
+ from typing import List, Union, Generator
7
 
8
  IGNORE_TOKEN_ID = -100
9
 
10
 
11
  class PromptStyle(Enum):
12
+ """
13
+ Enum for prompt styles
14
+ """
15
+
16
+ INSTRUCT = "instruct"
17
+ CHAT = "chat"
18
 
19
 
20
  class AlpacaPrompter:
21
+ """
22
+ Base class for alpaca prompters
23
+ """
24
+
25
  system_prompt = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n"
26
  system_no_input_prompt = "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n"
27
  prompt_style = None
28
 
29
+ def __init__(self, prompt_style=PromptStyle.INSTRUCT.value):
30
+ self.prompt_style = prompt_style if prompt_style else PromptStyle.INSTRUCT.value
31
  self.match_prompt_style()
32
 
33
  def match_prompt_style(self):
34
+ if self.prompt_style == PromptStyle.INSTRUCT.value:
35
  self.prompt_input = (
36
  self.system_prompt
37
  + "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
 
41
  + "### Instruction:\n{instruction}\n\n### Response:\n"
42
  )
43
  self.response_split = "### Response:"
44
+ if self.prompt_style == PromptStyle.CHAT.value:
45
  self.prompt_input = (
46
  self.system_prompt + "USER: {instruction}\n{input}\nASSISTANT:"
47
  )
 
53
  def build_prompt(
54
  self,
55
  instruction: str,
56
+ input: Union[None, str] = None, # pylint: disable=redefined-builtin
57
  output: Union[None, str] = None,
58
  ) -> Generator[str, None, None]:
59
  # returns the full prompt from instruction and optional input
 
71
 
72
 
73
  class UnpromptedPrompter(AlpacaPrompter):
74
+ """
75
+ Prompter for alpaca no system prompt
76
+ """
77
+
78
  system_prompt = ""
79
  system_no_input_prompt = ""
80
 
81
 
82
  class JeopardyPrompter(AlpacaPrompter):
83
+ """
84
+ Prompter for Jeopardy
85
+ """
86
+
87
  prompt_input = "Below is a Jeopardy clue paired with input providing the category of the clue. Write a concise response that best answers tbe clue given the category.\n\n### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
88
 
89
 
90
  class MultipleChoiceExplainPrompter(AlpacaPrompter):
91
+ """
92
+ Prompter for multiple choice explain
93
+ """
94
+
95
  system_prompt = (
96
  "Choose the answer that best answers the question. Explain your reasoning."
97
  )
98
 
99
 
100
  class MultipleChoiceConcisePrompter(AlpacaPrompter):
101
+ """
102
+ Prompter for multiple choice concise
103
+ """
104
+
105
  prompt_input = "Choose the answer that best answers the question. Be concise in your response.\n\nUSER: {instruction}\n{input}\nASSISTANT:\n"
106
 
107
 
108
  class SummarizeTLDRPrompter(AlpacaPrompter):
109
+ """
110
+ Prompter for summarize TLDR
111
+ """
112
+
113
  prompt_no_input = (
114
  "USER: Summarize the following article as a TL;DR.\n{instruction}\nASSISTANT:"
115
  )
116
 
117
 
118
  class CompletionPrompter:
119
+ """
120
+ Prompter for completion
121
+ """
122
+
123
  def build_prompt(
124
+ self,
125
+ instruction: str,
126
+ input=None, # pylint: disable=redefined-builtin, unused-argument
127
+ output=None, # pylint: disable=unused-argument
128
  ) -> Generator[str, None, None]:
129
  yield instruction
130
 
 
133
 
134
 
135
  class GPTeacherPrompter(AlpacaPrompter):
136
+ """
137
+ Prompter for GPTeacher
138
+ """
139
 
140
 
141
  class NomicGPT4AllPrompter(AlpacaPrompter):
142
+ """
143
+ Prompter for NomicGPT4All
144
+ """
145
 
146
 
147
  class ReflectAlpacaPrompter:
148
+ """
149
+ Prompter for ReflectAlpaca
150
+ """
151
+
152
  system_prompt = "Below is an instruction that describes a task, paired with an input that provides further context. You, the Assistant, should generate a response as if it were an abstract for an academic or technical paper on the query along with a methodology. Then generate an Agent Reflection where you create a long form response as if from subject matter expert, be verbose, diligent, and creative in your application of knowledge, apply it through the lens of the response generated by the assistant. Look for flawed reasoning, faulty logic, or other mistakes in the method. Finally, generate a final response and method for the user with the Assistant abstract and Reflection analysis as augmentations to the generation\n\n"
153
  system_no_input_prompt = "Below is an instruction that describes a task. You, the Assistant, should generate a response as if it were an abstract for an academic or technical paper on the query along with a methodology. Then generate an Agent Reflection where you create a long form response as if from subject matter expert, be verbose, diligent, and creative in your application of knowledge, apply it through the lens of the response generated by the assistant. Look for flawed reasoning, faulty logic, or other mistakes in the method. Finally, generate a final response and method for the user with the Assistant abstract and Reflection analysis as augmentations to the generation\n\n"
154
 
 
164
  self.match_prompt_style()
165
 
166
  def match_prompt_style(self):
167
+ if self.prompt_style == PromptStyle.INSTRUCT.value:
168
  self.prompt_input = (
169
  self.system_prompt
170
  + "### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
 
175
  )
176
  self.agent_label = "### Thought:\n{output}\n\n### Agent Reflection:\n{reflection}\n\n### Final Response:\n{corrected}"
177
  self.response_split = "### Final Response:"
178
+ if self.prompt_style == PromptStyle.CHAT.value:
179
  self.prompt_input = (
180
  self.system_prompt + "USER: {instruction}\n{input}\nASSISTANT:"
181
  )
 
187
  )
188
  self.response_split = "ASSISTANT:"
189
 
190
+ def build_prompt( # pylint: disable=too-many-arguments
191
  self,
192
  instruction: str,
193
+ input: Union[None, str] = None, # pylint: disable=redefined-builtin
194
  output: Union[None, str] = None,
195
  reflection: Union[None, str] = None,
196
  corrected: Union[None, str] = None,
 
220
  DOLLY = auto()
221
 
222
 
223
+ # TODO clean this 💩 up # pylint: disable=fixme
224
  @dataclasses.dataclass
225
  class Conversation:
226
  """A class that keeps all conversation history."""
 
237
  seps = [self.sep, self.sep2]
238
  preamble = self.system + seps[0]
239
  yield preamble
240
+ for _, (role, message) in enumerate(self.messages):
241
  if message:
242
  yield (role + ":", " " + message)
243
  else:
244
+ logging.warning(f"role with empty message: {role}")
245
  yield (role + ":",)
246
 
247
  def copy(self):
 
271
  )
272
 
273
 
274
+ class ShareGPTPrompter: # pylint: disable=too-few-public-methods
275
+ """
276
+ A prompter that generates prompts for the ShareGPT
277
+ """
278
+
279
  def __init__(self, prompt_style=None):
280
+ if prompt_style != PromptStyle.CHAT.value:
281
+ raise ValueError(
282
  f"unsupported prompt_style for ShareGPTPrompter({prompt_style})"
283
  )
284
 
 
288
  # self.prompt_no_input = self.system_no_input_prompt + "USER: {instruction}\nASSISTANT:"
289
  # self.response_split = "ASSISTANT:"
290
 
291
+ def build_prompt(self, source) -> Generator[str, None, None]:
292
  # ignore the system prompt if provided
293
  if source[0]["from"] == "system":
294
  source.pop(0)
 
309
  ):
310
  # Skip the first one if it is not from human
311
  source = source[1:]
312
+ except IndexError as err:
313
  # sometimes there is a bing or system chat
314
+ raise err
315
 
316
  conv.messages = []
317
  for j, sentence in enumerate(source):