Spaces:
Sleeping
Sleeping
File size: 1,579 Bytes
5cb07ef |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
import gradio as gr
import torch
from paper_rec import recommender, etl
from gradio.inputs import Textbox
def recommend(txt):
if len(txt.strip()) <= 0:
return {"msg": "no recommendations available for the input text."}
top_n = 10
# model user preferences:
cleaned_txt = etl.clean_text(txt)
sentences = etl.get_sentences_from_txt(txt)
rec = recommender.Recommender()
# loading data and model from HF
rec.load_data()
rec.load_model()
# compute user embedding
user_embedding = torch.from_numpy(rec.embedding(sentences))
# get recommendations based on user preferences
recs = rec.recommend(user_embedding, top_k=100)
# deduplicate
recs_output = []
seen_paper = set()
for p in recs:
if p["id"] not in seen_paper:
recs_output.append({"id": p["id"],
"title": p["title"],
"abstract": p["authors"],
"abstract": p["abstract"]
})
seen_paper.add(p["id"])
if len(recs_output) >= top_n:
break
# report top-n
return recs_output
def inputs():
pass
title = "Interactive demo: paper-rec"
description = "Demo that recommends you what recent papers in AI/ML to read next based on what you like."
iface = gr.Interface(fn=recommend,
inputs=[Textbox(lines=10, placeholder="Titles and abstracts from papers you like", default="", label="Sample of what I like <3")],
outputs="json",
layout='vertical'
)
iface.launch() |