import time
import random
import streamlit as st
from example_prompts import EXAMPLE_PROMPTS
HEADER = """
# WARNING: This app uses BLOOM-6b3 as a backend generation . We are currently working on making it work with BLOOM-176
"""
SIDE_BAR_TEXT = """
# *PETALS: A Collaborative Inference of Large Models*
A BigScience initiative.
- [Introduction](#introduction)
* [What is *PETALS* ?](#what-is--petals---)
* [Generation parameters](#generation-parameters)
# Introduction
This Space is an interactive Space of *PETALS* paper (Submitted in EMNLP 2022) that aims to run BLOOM-176 in a distributed manner for efficient and cost-effective inference and fine-tuning.
## What is *PETALS* ?
With the release of BLOOM-176B and OPT-175B, everyone can download pretrained models of this scale. Still, using these models requires supercomputer-grade hardware, which is unavailable to many researchers.
PETALS proposes to run BLOOM-176 in a distributed manner. The model is run on multiple computers from different users. Each user can benefit from the large model's inference by running a script similar to the one on this Space or from this Google Colab link: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1FEu0Dt_MjiwvdIz1SmIr9QfDDvNAJdZ-#scrollTo=O0WwC_IqofNH)
## Generation parameters
"""
def write_incremental(text, place_holder, delay=0.05):
"""
Write a text in a streamlit widget, one character at a time.
Adapted from: https://discuss.streamlit.io/t/display-several-pieces-of-strings-incrementally-on-the-same-line/9279
"""
for i in range(len(text) + 1):
place_holder.markdown("### %s " % text[0:i].replace("\n", "
"), unsafe_allow_html=True)
# place_holder.markdown("#### %s" % text[0:i])
time.sleep(delay)
def i_am_feeling_lucky():
"""
Return a random prompt from EXAMPLE_PROMPT
"""
return EXAMPLE_PROMPTS[random.randint(0, len(EXAMPLE_PROMPTS) - 1)]