import streamlit as st from transformers import pipeline import requests from bs4 import BeautifulSoup SCITE_API_KEY = st.secrets["SCITE_API_KEY"] def remove_html(x): soup = BeautifulSoup(x, 'html.parser') text = soup.get_text() return text def search(term, limit=25): search = f"https://api.scite.ai/search?mode=citations&term={term}&limit={limit}&offset=0&user_slug=domenic-rosati-keW5&compute_aggregations=false" req = requests.get( search, headers={ 'Authorization': f'Bearer {SCITE_API_KEY}' } ) return ( remove_html('\n'.join(['\n'.join([cite['snippet'] for cite in doc['citations']]) for doc in req.json()['hits']])), [(doc['doi'], doc['citations'], doc['title']) for doc in req.json()['hits']] ) def find_source(text, docs): for doc in docs: if text in remove_html(doc[1][0]['snippet']): new_text = text for snip in remove_html(doc[1][0]['snippet']).split('.'): if text in snip: new_text = snip return { 'citation_statement': doc[1][0]['snippet'].replace('', '').replace('', ''), 'text': new_text, 'from': doc[1][0]['source'], 'supporting': doc[1][0]['target'], 'source_title': doc[2], 'source_link': f"https://scite.ai/reports/{doc[0]}" } return { 'citation_statement': '', 'text': text, 'from': '', 'supporting': '', 'source_title': '', 'source_link': '' } @st.experimental_singleton def init_models(): question_answerer = pipeline("question-answering", model='sultan/BioM-ELECTRA-Large-SQuAD2-BioASQ8B') return question_answerer qa_model = init_models() def card(title, context, score, link): return st.markdown(f"""