{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "62e745e2", "metadata": {}, "outputs": [], "source": [ "!pip install -q git+https://github.com/srush/MiniChain\n", "!git clone git+https://github.com/srush/MiniChain; cp -fr MiniChain/examples/* . " ] }, { "cell_type": "code", "execution_count": null, "id": "8b7656d3", "metadata": { "lines_to_next_cell": 0, "tags": [ "hide_inp" ] }, "outputs": [], "source": [ "desc = \"\"\"\n", "### Chat\n", "\n", "A chat-like example for multi-turn chat with state. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/srush/MiniChain/blob/master/examples/chat.ipynb)\n", "\n", "(Adapted from [LangChain](https://langchain.readthedocs.io/en/latest/modules/memory/examples/chatgpt_clone.html)'s version of this [blog post](https://www.engraved.blog/building-a-virtual-machine-inside/).)\n", "\n", "\"\"\"" ] }, { "cell_type": "code", "execution_count": null, "id": "1c551f62", "metadata": { "lines_to_next_cell": 2 }, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "4ad7d54a", "metadata": {}, "outputs": [], "source": [ "from dataclasses import dataclass, replace\n", "from typing import List, Tuple\n", "from minichain import OpenAI, prompt, show" ] }, { "cell_type": "markdown", "id": "46da3feb", "metadata": {}, "source": [ "Generic stateful Memory" ] }, { "cell_type": "code", "execution_count": null, "id": "3f3fdeaa", "metadata": { "lines_to_next_cell": 1 }, "outputs": [], "source": [ "MEMORY = 2" ] }, { "cell_type": "code", "execution_count": null, "id": "7bb9b6f9", "metadata": { "lines_to_next_cell": 1 }, "outputs": [], "source": [ "@dataclass\n", "class State:\n", " memory: List[Tuple[str, str]]\n", " human_input: str = \"\"\n", "\n", " def push(self, response: str) -> \"State\":\n", " memory = self.memory if len(self.memory) < MEMORY else self.memory[1:]\n", " return State(memory + [(self.human_input, response)])" ] }, { "cell_type": "markdown", "id": "06bfc54f", "metadata": {}, "source": [ "Chat prompt with memory" ] }, { "cell_type": "code", "execution_count": null, "id": "abc6322d", "metadata": {}, "outputs": [], "source": [ "@prompt(OpenAI(), template_file=\"chat.pmpt.tpl\")\n", "def chat_prompt(model, state: State) -> State:\n", " out = model(state)\n", " result = out.split(\"Assistant:\")[-1]\n", " return state.push(result)" ] }, { "cell_type": "code", "execution_count": null, "id": "3e2d2251", "metadata": {}, "outputs": [], "source": [ "examples = [\n", " \"ls ~\",\n", " \"cd ~\",\n", " \"{Please make a file jokes.txt inside and put some jokes inside}\",\n", " \"\"\"echo -e \"x=lambda y:y*5+3;print('Result:' + str(x(6)))\" > run.py && python3 run.py\"\"\",\n", " \"\"\"echo -e \"print(list(filter(lambda x: all(x%d for d in range(2,x)),range(2,3**10)))[:10])\" > run.py && python3 run.py\"\"\",\n", " \"\"\"echo -e \"echo 'Hello from Docker\" > entrypoint.sh && echo -e \"FROM ubuntu:20.04\\nCOPY entrypoint.sh entrypoint.sh\\nENTRYPOINT [\\\"/bin/sh\\\",\\\"entrypoint.sh\\\"]\">Dockerfile && docker build . -t my_docker_image && docker run -t my_docker_image\"\"\",\n", " \"nvidia-smi\"\n", "]" ] }, { "cell_type": "code", "execution_count": null, "id": "da9746f9", "metadata": {}, "outputs": [], "source": [ "gradio = show(lambda command, state: chat_prompt(replace(state, human_input=command)),\n", " initial_state=State([]),\n", " subprompts=[chat_prompt],\n", " examples=examples,\n", " out_type=\"json\",\n", " description=desc,\n", ")\n", "if __name__ == \"__main__\":\n", " gradio.launch()" ] }, { "cell_type": "code", "execution_count": null, "id": "db848300", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "jupytext": { "cell_metadata_filter": "tags,-all", "main_language": "python", "notebook_metadata_filter": "-all" } }, "nbformat": 4, "nbformat_minor": 5 }