File size: 4,534 Bytes
f5ec828 872b692 f5ec828 872b692 f5ec828 872b692 f5ec828 872b692 f5ec828 872b692 f5ec828 872b692 f5ec828 872b692 f5ec828 872b692 f5ec828 872b692 f5ec828 872b692 f5ec828 872b692 f5ec828 872b692 f5ec828 872b692 f5ec828 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "1ca7b759",
"metadata": {},
"outputs": [],
"source": [
"!pip install -q git+https://github.com/srush/MiniChain\n",
"!git clone https://github.com/srush/MiniChain; cp -fr MiniChain/examples/* . "
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cc3f2a69",
"metadata": {
"lines_to_next_cell": 0,
"tags": [
"hide_inp"
]
},
"outputs": [],
"source": [
"desc = \"\"\"\n",
"### Chat\n",
"\n",
"A chat-like example for multi-turn chat with state. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/srush/MiniChain/blob/master/examples/chat.ipynb)\n",
"\n",
"(Adapted from [LangChain](https://langchain.readthedocs.io/en/latest/modules/memory/examples/chatgpt_clone.html)'s version of this [blog post](https://www.engraved.blog/building-a-virtual-machine-inside/).)\n",
"\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1ca0a1eb",
"metadata": {
"lines_to_next_cell": 2
},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "f4ac1417",
"metadata": {},
"outputs": [],
"source": [
"from dataclasses import dataclass, replace\n",
"from typing import List, Tuple\n",
"from minichain import OpenAI, prompt, show"
]
},
{
"cell_type": "markdown",
"id": "56c8ab3d",
"metadata": {},
"source": [
"Generic stateful Memory"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "7c3ffeaa",
"metadata": {
"lines_to_next_cell": 1
},
"outputs": [],
"source": [
"MEMORY = 2"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c309a94f",
"metadata": {
"lines_to_next_cell": 1
},
"outputs": [],
"source": [
"@dataclass\n",
"class State:\n",
" memory: List[Tuple[str, str]]\n",
" human_input: str = \"\"\n",
"\n",
" def push(self, response: str) -> \"State\":\n",
" memory = self.memory if len(self.memory) < MEMORY else self.memory[1:]\n",
" return State(memory + [(self.human_input, response)])"
]
},
{
"cell_type": "markdown",
"id": "4c9a82f1",
"metadata": {},
"source": [
"Chat prompt with memory"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "279179dd",
"metadata": {},
"outputs": [],
"source": [
"@prompt(OpenAI(), template_file=\"chat.pmpt.tpl\")\n",
"def chat_prompt(model, state: State) -> State:\n",
" out = model(state)\n",
" result = out.split(\"Assistant:\")[-1]\n",
" return state.push(result)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2d94b22d",
"metadata": {},
"outputs": [],
"source": [
"examples = [\n",
" \"ls ~\",\n",
" \"cd ~\",\n",
" \"{Please make a file jokes.txt inside and put some jokes inside}\",\n",
" \"\"\"echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py && python3 run.py\"\"\",\n",
" \"\"\"echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py && python3 run.py\"\"\",\n",
" \"\"\"echo -e \"echo 'Hello from Docker\" > entrypoint.sh && echo -e \"FROM ubuntu:20.04\\nCOPY entrypoint.sh entrypoint.sh\\nENTRYPOINT [\\\"/bin/sh\\\",\\\"entrypoint.sh\\\"]\">Dockerfile && docker build . -t my_docker_image && docker run -t my_docker_image\"\"\",\n",
" \"nvidia-smi\"\n",
"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d77406cf",
"metadata": {},
"outputs": [],
"source": [
"gradio = show(lambda command, state: chat_prompt(replace(state, human_input=command)),\n",
" initial_state=State([]),\n",
" subprompts=[chat_prompt],\n",
" examples=examples,\n",
" out_type=\"json\",\n",
" description=desc,\n",
")\n",
"if __name__ == \"__main__\":\n",
" gradio.launch()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bd255c7b",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"jupytext": {
"cell_metadata_filter": "tags,-all",
"main_language": "python",
"notebook_metadata_filter": "-all"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
|