srush HF staff commited on
Commit
cd607b2
1 Parent(s): 68e49d2

Upload with huggingface_hub

Browse files
Files changed (13) hide show
  1. #chat.py# +76 -0
  2. #chatgpt.py# +78 -0
  3. #ner.py# +71 -0
  4. #qa.py# +23 -12
  5. app.py +3 -3
  6. bash.py +12 -6
  7. chat.py +12 -1
  8. gatsby.py +10 -4
  9. math_demo.py +11 -4
  10. ner.py +10 -4
  11. pal.py +7 -2
  12. qa.py +8 -4
  13. stats.py +9 -3
#chat.py# ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # + tags=["hide_inp"]
2
+ desc = """
3
+ # ChatGPT
4
+
5
+ "ChatGPT" like examples. Adapted from
6
+ [LangChain](https://langchain.readthedocs.io/en/latest/modules/memory/examples/chatgpt_clone.html)'s
7
+ version of this [blog post](https://www.engraved.blog/building-a-virtual-machine-inside/).
8
+ """
9
+ # -
10
+
11
+
12
+ import warnings
13
+ from dataclasses import dataclass
14
+ from typing import List, Tuple
15
+ import minichain
16
+
17
+ # + tags=["hide_inp"]
18
+ warnings.filterwarnings("ignore")
19
+ # -
20
+
21
+
22
+ # Generic stateful Memory
23
+
24
+ MEMORY = 2
25
+
26
+ @dataclass
27
+ class State:
28
+ memory: List[Tuple[str, str]]
29
+ human_input: str = ""
30
+
31
+ def push(self, response: str) -> "State":
32
+ memory = self.memory if len(self.memory) < MEMORY else self.memory[1:]
33
+ return State(memory + [(self.human_input, response)])
34
+
35
+ # Chat prompt with memory
36
+
37
+ class ChatPrompt(minichain.TemplatePrompt):
38
+ template_file = "chatgpt.pmpt.tpl"
39
+ def parse(self, out: str, inp: State) -> State:
40
+ result = out.split("Assistant:")[-1]
41
+ return inp.push(result)
42
+
43
+ # class Human(minichain.Prompt):
44
+ # def parse(self, out: str, inp: State) -> State:
45
+ # return inp.human_input = out
46
+
47
+
48
+ with minichain.start_chain("chat") as backend:
49
+ prompt = ChatPrompt(backend.OpenAI())
50
+ state = State([])
51
+
52
+
53
+ examples = [
54
+ "I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.",
55
+ "ls ~",
56
+ "cd ~",
57
+ "{Please make a file jokes.txt inside and put some jokes inside}",
58
+ """echo -e "x=lambda y:y*5+3;print('Result:' + str(x(6)))" > run.py && python3 run.py""",
59
+ """echo -e "print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])" > run.py && python3 run.py""",
60
+ """echo -e "echo 'Hello from Docker" > entrypoint.sh && echo -e "FROM ubuntu:20.04\nCOPY entrypoint.sh entrypoint.sh\nENTRYPOINT [\"/bin/sh\",\"entrypoint.sh\"]">Dockerfile && docker build . -t my_docker_image && docker run -t my_docker_image""",
61
+ "nvidia-smi"
62
+ ]
63
+
64
+ gradio = prompt.to_gradio(fields= ["human_input"],
65
+ initial_state= state,
66
+ examples=examples,
67
+ out_type="json",
68
+ description=desc
69
+ )
70
+ if __name__ == "__main__":
71
+ gradio.launch()
72
+
73
+ # for i in range(len(fake_human)):
74
+ # human.chain(prompt)
75
+
76
+
#chatgpt.py# ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # + tags=["hide_inp"]
2
+
3
+ desc = """
4
+ # ChatGPT
5
+
6
+ "ChatGPT" like examples. Adapted from
7
+ [LangChain](https://langchain.readthedocs.io/en/latest/modules/memory/examples/chatgpt_clone.html)'s
8
+ version of this [blog post](https://www.engraved.blog/building-a-virtual-machine-inside/).
9
+ """
10
+ # -
11
+
12
+ import warnings
13
+ from dataclasses import dataclass
14
+ from typing import List, Tuple
15
+ from IPython.display import Markdown, display
16
+ import minichain
17
+
18
+ # + tags=["hide_inp"]
19
+ warnings.filterwarnings("ignore")
20
+ # -
21
+
22
+
23
+ # Generic stateful Memory
24
+
25
+ MEMORY = 2
26
+
27
+ @dataclass
28
+ class State:
29
+ memory: List[Tuple[str, str]]
30
+ human_input: str = ""
31
+
32
+ def push(self, response: str) -> "State":
33
+ memory = self.memory if len(self.memory) < MEMORY else self.memory[1:]
34
+ return State(memory + [(self.human_input, response)])
35
+
36
+ # Chat prompt with memory
37
+
38
+ class ChatPrompt(minichain.TemplatePrompt):
39
+ template_file = "chatgpt.pmpt.tpl"
40
+ def parse(self, out: str, inp: State) -> State:
41
+ result = out.split("Assistant:")[-1]
42
+ return inp.push(result)
43
+
44
+ class Human(minichain.Prompt):
45
+ def parse(self, out: str, inp: State) -> State:
46
+ return inp.human_input = out
47
+
48
+
49
+ fake_human = [
50
+ "I want you to act as a Linux terminal. I will type commands and you will reply with what the terminal should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. Do not write explanations. Do not type commands unless I instruct you to do so. When I need to tell you something in English I will do so by putting text inside curly brackets {like this}. My first command is pwd.",
51
+ "ls ~",
52
+ "cd ~",
53
+ "{Please make a file jokes.txt inside and put some jokes inside}",
54
+ """echo -e "x=lambda y:y*5+3;print('Result:' + str(x(6)))" > run.py && python3 run.py""",
55
+ """echo -e "print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])" > run.py && python3 run.py""",
56
+ """echo -e "echo 'Hello from Docker" > entrypoint.sh && echo -e "FROM ubuntu:20.04\nCOPY entrypoint.sh entrypoint.sh\nENTRYPOINT [\"/bin/sh\",\"entrypoint.sh\"]">Dockerfile && docker build . -t my_docker_image && docker run -t my_docker_image""",
57
+ "nvidia-smi"
58
+ ]
59
+
60
+ with minichain.start_chain("chatgpt") as backend:
61
+ prompt = ChatPrompt(backend.OpenAI())
62
+ human = Human(backend.Mock(fake_human))
63
+ state = State([])
64
+ for i in range(len(fake_human)):
65
+ human.chain(prompt)
66
+ # display(Markdown(f'**Human:** <span style="color: blue">{t}</span>'))
67
+ # display(Markdown(f'**Assistant:** {state.memory[-1][1]}'))
68
+ # display(Markdown(f'--------------'))
69
+
70
+
71
+ # + tags=["hide_inp"]
72
+ ChatPrompt().show(State([("human 1", "output 1"), ("human 2", "output 2") ], "cd ~"),
73
+ "Text Assistant: Hello")
74
+ # -
75
+
76
+ # View the run log.
77
+
78
+ minichain.show_log("chatgpt.log")
#ner.py# ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # + tags=["hide_inp"]
2
+
3
+ desc = """
4
+ # NER
5
+
6
+ Notebook implementation of named entity recognition.
7
+ Adapted from [promptify](https://github.com/promptslab/Promptify/blob/main/promptify/prompts/nlp/templates/ner.jinja).
8
+ """
9
+ # -
10
+
11
+ import json
12
+
13
+ import minichain
14
+
15
+ # Prompt to extract NER tags as json
16
+
17
+ class NERPrompt(minichain.TemplatePrompt):
18
+ template_file = "ner.pmpt.tpl"
19
+
20
+ def parse(self, response, inp):
21
+ return json.loads(response)
22
+
23
+ # Use NER to ask a simple queston.
24
+
25
+ class TeamPrompt(minichain.Prompt):
26
+ def prompt(self, inp):
27
+ return "Can you describe these basketball teams? " + \
28
+ " ".join([i["E"] for i in inp if i["T"] =="Team"])
29
+
30
+ def parse(self, response, inp):
31
+ return response
32
+
33
+ # Run the system.
34
+
35
+ with minichain.start_chain("ner") as backend:
36
+ ner_prompt = NERPrompt(backend.OpenAI())
37
+ team_prompt = TeamPrompt(backend.OpenAI())
38
+ prompt = ner_prompt.chain(team_prompt)
39
+ # results = prompt(
40
+ # {"text_input": "An NBA playoff pairing a year ago, the 76ers (39-20) meet the Miami Heat (32-29) for the first time this season on Monday night at home.",
41
+ # "labels" : ["Team", "Date"],
42
+ # "domain": "Sports"
43
+ # }
44
+ # )
45
+ # print(results)
46
+
47
+ gradio = prompt.to_gradio(fields =["text_input", "labels", "domain"],
48
+ examples=[["An NBA playoff pairing a year ago, the 76ers (39-20) meet the Miami Heat (32-29) for the first time this season on Monday night at home.", "Team, Date", "Sports"]],
49
+ description=desc)
50
+
51
+
52
+ if __name__ == "__main__":
53
+ gradio.launch()
54
+
55
+
56
+ # View prompt examples.
57
+
58
+ # + tags=["hide_inp"]
59
+ # NERPrompt().show(
60
+ # {
61
+ # "input": "I went to New York",
62
+ # "domain": "Travel",
63
+ # "labels": ["City"]
64
+ # },
65
+ # '[{"T": "City", "E": "New York"}]',
66
+ # )
67
+ # # -
68
+
69
+ # # View log.
70
+
71
+ # minichain.show_log("ner.log")
#qa.py# CHANGED
@@ -1,10 +1,14 @@
1
- # # QA
 
 
2
 
3
- # Questions answering with embeddings. Adapted from [OpenAI
4
- # Notebook](https://github.com/openai/openai-cookbook/blob/main/examples/Question_answering_using_embeddings.ipynb).
 
5
 
6
  import datasets
7
  import numpy as np
 
8
  from minichain import EmbeddingPrompt, TemplatePrompt, show_log, start_chain
9
 
10
  # We use Hugging Face Datasets as the database by assigning
@@ -31,15 +35,22 @@ class QAPrompt(TemplatePrompt):
31
 
32
 
33
  with start_chain("qa") as backend:
34
- question = "Who won the 2020 Summer Olympics men's high jump?"
35
  prompt = KNNPrompt(backend.OpenAIEmbed()).chain(QAPrompt(backend.OpenAI()))
36
- result = prompt(question)
37
- print(result)
38
 
39
- # + tags=["hide_inp"]
40
- QAPrompt().show(
41
- {"question": "Who won the race?", "docs": ["doc1", "doc2", "doc3"]}, "Joe Bob"
42
- )
43
- # -
 
 
 
 
 
 
 
 
44
 
45
- show_log("qa.log")
 
1
+ # + tags=["hide_inp"]
2
+ desc = """
3
+ # QA
4
 
5
+ Questions answering with embeddings. Adapted from [OpenAI Notebook](https://github.com/openai/openai-cookbook/blob/main/examples/Question_answering_using_embeddings.ipynb).
6
+ """
7
+ # -
8
 
9
  import datasets
10
  import numpy as np
11
+
12
  from minichain import EmbeddingPrompt, TemplatePrompt, show_log, start_chain
13
 
14
  # We use Hugging Face Datasets as the database by assigning
 
35
 
36
 
37
  with start_chain("qa") as backend:
 
38
  prompt = KNNPrompt(backend.OpenAIEmbed()).chain(QAPrompt(backend.OpenAI()))
39
+
40
+ question = "Who won the 2020 Summer Olympics men's high jump?"
41
 
42
+ gradio = prompt.to_gradio(fields=["query"],
43
+ examples=[question],
44
+ description=desc)
45
+ if __name__ == "__main__":
46
+ gradio.launch()
47
+
48
+
49
+
50
+ # # + tags=["hide_inp"]
51
+ # QAPrompt().show(
52
+ # {"question": "Who won the race?", "docs": ["doc1", "doc2", "doc3"]}, "Joe Bob"
53
+ # )
54
+ # # -
55
 
56
+ # show_log("qa.log")
app.py CHANGED
@@ -13,9 +13,9 @@ css = "#clean div.form {border: 0px} #response {border: 0px; background: #ffeec6
13
  with gr.Blocks(css=css) as demo:
14
  gr.HTML("<center> <img width='10%' style='display:inline; padding: 5px' src='https://user-images.githubusercontent.com/35882/218286642-67985b6f-d483-49be-825b-f62b72c469cd.png'> <h1 style='display:inline'> Mini-Chain </h1> <img width='10%' style='display:inline;padding: 5px' src='https://avatars.githubusercontent.com/u/25720743?s=200&v=4'> </center><br><center><a href='https://github.com/srush/minichain'>[code]</a> <a href='https://user-images.githubusercontent.com/35882/218286642-67985b6f-d483-49be-825b-f62b72c469cd.png'>[docs]</a></center>")
15
 
16
- gr.TabbedInterface([chat, qa, gatsby, math_demo, ner, bash, pal, stats],
17
- ["Chat", "QA", "Book", "Math", "NER", "Bash", "PAL", "Stats"],
18
- css= css)
19
 
20
  demo.launch()
21
 
 
13
  with gr.Blocks(css=css) as demo:
14
  gr.HTML("<center> <img width='10%' style='display:inline; padding: 5px' src='https://user-images.githubusercontent.com/35882/218286642-67985b6f-d483-49be-825b-f62b72c469cd.png'> <h1 style='display:inline'> Mini-Chain </h1> <img width='10%' style='display:inline;padding: 5px' src='https://avatars.githubusercontent.com/u/25720743?s=200&v=4'> </center><br><center><a href='https://github.com/srush/minichain'>[code]</a> <a href='https://user-images.githubusercontent.com/35882/218286642-67985b6f-d483-49be-825b-f62b72c469cd.png'>[docs]</a></center>")
15
 
16
+ gr.TabbedInterface([math_demo, qa, chat, gatsby, ner, bash, pal, stats],
17
+ ["Math", "QA", "Chat", "Book", "NER", "Bash", "PAL", "Stats"],
18
+ css = css)
19
 
20
  demo.launch()
21
 
bash.py CHANGED
@@ -1,6 +1,12 @@
1
- # Notebook to generate and run a bash command.
2
- # Adapted from LangChain
3
- # [BashChain](https://langchain.readthedocs.io/en/latest/modules/chains/examples/llm_bash.html)
 
 
 
 
 
 
4
 
5
  import minichain
6
 
@@ -38,9 +44,9 @@ with minichain.start_chain("bash") as backend:
38
  gradio = prompt.to_gradio(fields =["question"],
39
  examples=['Go up one directory, and then into the minichain directory,'
40
  'and list the files in the directory'],
41
- out_type="markdown"
42
-
43
- )
44
  if __name__ == "__main__":
45
  gradio.launch()
46
 
 
1
+ # + tags=["hide_inp"]
2
+
3
+ desc = """
4
+ # Bash
5
+
6
+ Notebook to generate and run a bash command. Adapted from LangChain
7
+ [BashChain](https://langchain.readthedocs.io/en/latest/modules/chains/examples/llm_bash.html)
8
+ """
9
+ # -
10
 
11
  import minichain
12
 
 
44
  gradio = prompt.to_gradio(fields =["question"],
45
  examples=['Go up one directory, and then into the minichain directory,'
46
  'and list the files in the directory'],
47
+ out_type="markdown",
48
+ description=desc)
49
+
50
  if __name__ == "__main__":
51
  gradio.launch()
52
 
chat.py CHANGED
@@ -1,3 +1,13 @@
 
 
 
 
 
 
 
 
 
 
1
 
2
 
3
  import warnings
@@ -55,7 +65,8 @@ examples = [
55
  gradio = prompt.to_gradio(fields= ["human_input"],
56
  initial_state= state,
57
  examples=examples,
58
- out_type="json"
 
59
  )
60
  if __name__ == "__main__":
61
  gradio.launch()
 
1
+ # + tags=["hide_inp"]
2
+
3
+ desc = """
4
+ # ChatGPT
5
+
6
+ "ChatGPT" like examples. Adapted from
7
+ [LangChain](https://langchain.readthedocs.io/en/latest/modules/memory/examples/chatgpt_clone.html)'s
8
+ version of this [blog post](https://www.engraved.blog/building-a-virtual-machine-inside/).
9
+ """
10
+ # -
11
 
12
 
13
  import warnings
 
65
  gradio = prompt.to_gradio(fields= ["human_input"],
66
  initial_state= state,
67
  examples=examples,
68
+ out_type="json",
69
+ description=desc
70
  )
71
  if __name__ == "__main__":
72
  gradio.launch()
gatsby.py CHANGED
@@ -1,6 +1,11 @@
1
- # Questions answering with Hugging Face embeddings. Adapted from the
2
- # [LlamaIndex
3
- # example](https://github.com/jerryjliu/gpt_index/blob/main/examples/gatsby/TestGatsby.ipynb).
 
 
 
 
 
4
 
5
  import datasets
6
  import numpy as np
@@ -40,7 +45,8 @@ with start_chain("gatsby") as backend:
40
 
41
  gradio = prompt.to_gradio(fields=["query"],
42
  examples=["What did Gatsby do before he met Daisy?"],
43
- keys={"HF_KEY"})
 
44
  if __name__ == "__main__":
45
  gradio.launch()
46
 
 
1
+ # + tags=["hide_inp"]
2
+ desc = """
3
+ # Book QA
4
+
5
+ Questions answering with Hugging Face embeddings. Adapted from the [LlamaIndex
6
+ example](https://github.com/jerryjliu/gpt_index/blob/main/examples/gatsby/TestGatsby.ipynb).
7
+ """
8
+ # -
9
 
10
  import datasets
11
  import numpy as np
 
45
 
46
  gradio = prompt.to_gradio(fields=["query"],
47
  examples=["What did Gatsby do before he met Daisy?"],
48
+ keys={"HF_KEY"},
49
+ description=desc)
50
  if __name__ == "__main__":
51
  gradio.launch()
52
 
math_demo.py CHANGED
@@ -1,5 +1,11 @@
1
- # Notebook to answer a math problem with code.
2
- # Adapted from Dust [maths-generate-code](https://dust.tt/spolu/a/d12ac33169)
 
 
 
 
 
 
3
 
4
  import minichain
5
 
@@ -19,8 +25,9 @@ with minichain.start_chain("math") as backend:
19
  # print(result)
20
 
21
  gradio = prompt.to_gradio(fields =["question"],
22
- examples=["What is the sum of the powers of 3 (3^i) that are smaller than 100?"],
23
- out_type="markdown"
 
24
 
25
  )
26
  if __name__ == "__main__":
 
1
+ # + tags=["hide_inp"]
2
+ desc = """
3
+ # Math
4
+
5
+ Notebook to answer a math problem with code.
6
+ Adapted from Dust [maths-generate-code](https://dust.tt/spolu/a/d12ac33169)
7
+ """
8
+ # -
9
 
10
  import minichain
11
 
 
25
  # print(result)
26
 
27
  gradio = prompt.to_gradio(fields =["question"],
28
+ examples=["What is the sum of the powers of 3 (3^i) that are smaller than 100?"],
29
+ out_type="markdown",
30
+ description=desc
31
 
32
  )
33
  if __name__ == "__main__":
ner.py CHANGED
@@ -1,7 +1,12 @@
1
- # # NER
 
 
 
2
 
3
- # Notebook implementation of named entity recognition.
4
- # Adapted from [promptify](https://github.com/promptslab/Promptify/blob/main/promptify/prompts/nlp/templates/ner.jinja).
 
 
5
 
6
  import json
7
 
@@ -40,7 +45,8 @@ with minichain.start_chain("ner") as backend:
40
  # print(results)
41
 
42
  gradio = prompt.to_gradio(fields =["text_input", "labels", "domain"],
43
- examples=[["An NBA playoff pairing a year ago, the 76ers (39-20) meet the Miami Heat (32-29) for the first time this season on Monday night at home.", "Team, Date", "Sports"]])
 
44
 
45
 
46
  if __name__ == "__main__":
 
1
+ # + tags=["hide_inp"]
2
+
3
+ desc = """
4
+ # NER
5
 
6
+ Notebook implementation of named entity recognition.
7
+ Adapted from [promptify](https://github.com/promptslab/Promptify/blob/main/promptify/prompts/nlp/templates/ner.jinja).
8
+ """
9
+ # -
10
 
11
  import json
12
 
 
45
  # print(results)
46
 
47
  gradio = prompt.to_gradio(fields =["text_input", "labels", "domain"],
48
+ examples=[["An NBA playoff pairing a year ago, the 76ers (39-20) meet the Miami Heat (32-29) for the first time this season on Monday night at home.", "Team, Date", "Sports"]],
49
+ description=desc)
50
 
51
 
52
  if __name__ == "__main__":
pal.py CHANGED
@@ -1,4 +1,8 @@
1
- # Adapted from Prompt-aided Language Models [PAL](https://arxiv.org/pdf/2211.10435.pdf).
 
 
 
 
2
 
3
  import minichain
4
 
@@ -28,7 +32,8 @@ question = "Melanie is a door-to-door saleswoman. She sold a third of her " \
28
  "how many did she start with?"
29
 
30
  gradio = prompt.to_gradio(fields =["question"],
31
- examples=[question])
 
32
  if __name__ == "__main__":
33
  gradio.launch()
34
 
 
1
+ desc = """
2
+ # PAL
3
+
4
+ Adapted from Prompt-aided Language Models [PAL](https://arxiv.org/pdf/2211.10435.pdf).
5
+ """
6
 
7
  import minichain
8
 
 
32
  "how many did she start with?"
33
 
34
  gradio = prompt.to_gradio(fields =["question"],
35
+ examples=[question],
36
+ description=desc)
37
  if __name__ == "__main__":
38
  gradio.launch()
39
 
qa.py CHANGED
@@ -1,7 +1,10 @@
1
- # # QA
 
 
2
 
3
- # Questions answering with embeddings. Adapted from [OpenAI
4
- # Notebook](https://github.com/openai/openai-cookbook/blob/main/examples/Question_answering_using_embeddings.ipynb).
 
5
 
6
  import datasets
7
  import numpy as np
@@ -37,7 +40,8 @@ with start_chain("qa") as backend:
37
  question = "Who won the 2020 Summer Olympics men's high jump?"
38
 
39
  gradio = prompt.to_gradio(fields=["query"],
40
- examples=[question])
 
41
  if __name__ == "__main__":
42
  gradio.launch()
43
 
 
1
+ # + tags=["hide_inp"]
2
+ desc = """
3
+ # QA
4
 
5
+ Questions answering with embeddings. Adapted from [OpenAI Notebook](https://github.com/openai/openai-cookbook/blob/main/examples/Question_answering_using_embeddings.ipynb).
6
+ """
7
+ # -
8
 
9
  import datasets
10
  import numpy as np
 
40
  question = "Who won the 2020 Summer Olympics men's high jump?"
41
 
42
  gradio = prompt.to_gradio(fields=["query"],
43
+ examples=[question],
44
+ description=desc)
45
  if __name__ == "__main__":
46
  gradio.launch()
47
 
stats.py CHANGED
@@ -1,4 +1,9 @@
1
- # Information extraction from a typed data specification.
 
 
 
 
 
2
 
3
  import minichain
4
  from dataclasses import dataclass
@@ -40,8 +45,9 @@ with minichain.start_chain("stats") as backend:
40
 
41
  article = open("sixers.txt").read()
42
  gradio = prompt.to_gradio(fields =["passage"],
43
- examples=[article],
44
- out_type="json"
 
45
  )
46
  if __name__ == "__main__":
47
  gradio.launch()
 
1
+
2
+ desc = """
3
+ # Typed Extraction
4
+
5
+ Information extraction from a typed data specification.
6
+ """
7
 
8
  import minichain
9
  from dataclasses import dataclass
 
45
 
46
  article = open("sixers.txt").read()
47
  gradio = prompt.to_gradio(fields =["passage"],
48
+ examples=[article],
49
+ out_type="json",
50
+ desc
51
  )
52
  if __name__ == "__main__":
53
  gradio.launch()