# + tags=["hide_inp"] desc = """ ### Backtrack on Failure Chain that backtracks on failure. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/srush/MiniChain/blob/master/examples/backtrack.ipynb) """ # - # $ from minichain import prompt, Mock, show, OpenAI import minichain @prompt(Mock(["dog", "blue", "cat"]), stream=True) def prompt_generation(model): out = "" for token in model.stream(""): out += token yield out yield out @prompt(Mock(["No", "Yes"]), template="Answer 'yes' is {{query}} is a color. Answer:", stream=False) def prompt_validation(model, x): out = model(dict(query=x)) if out.strip().lower().startswith("yes"): return x return model.fail(1) def run(): x = prompt_generation() return prompt_validation(x) # $ gradio = show(run, examples = [], subprompts=[prompt_generation, prompt_validation], code=open("backtrack.py", "r").read().split("$")[1].strip().strip("#").strip(), out_type="markdown" ) if __name__ == "__main__": gradio.queue().launch()