Spaces:
Sleeping
Sleeping
shrivarshan
commited on
Commit
•
1bafbdd
1
Parent(s):
4925bad
Update app.py
Browse files
app.py
CHANGED
@@ -2,13 +2,13 @@
|
|
2 |
import streamlit as st
|
3 |
import requests
|
4 |
from transformers import pipeline
|
5 |
-
import spacy
|
6 |
|
7 |
# Initialize the summarizer pipeline using Hugging Face Transformers
|
8 |
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
|
9 |
|
10 |
# Load spaCy model
|
11 |
-
nlp = spacy.load("en_core_web_sm")
|
12 |
|
13 |
# Function to perform search using Google Custom Search API
|
14 |
def perform_search(query):
|
@@ -39,17 +39,17 @@ def rank_sources(results):
|
|
39 |
|
40 |
# Function to extract related topics using spaCy
|
41 |
def extract_related_topics(query_list):
|
42 |
-
combined_query = " ".join(query_list)
|
43 |
-
doc = nlp(combined_query)
|
44 |
|
45 |
# Extract keywords or named entities
|
46 |
-
keywords = [token.text for token in doc if token.is_alpha and not token.is_stop]
|
47 |
-
entities = [ent.text for ent in doc.ents]
|
48 |
|
49 |
# Combine and deduplicate keywords and entities
|
50 |
-
related_topics = list(set(keywords + entities))
|
51 |
-
related_topics.insert(0,"Deep Learning")
|
52 |
-
return
|
53 |
|
54 |
# Function to display search results and summaries
|
55 |
def display_results(query):
|
|
|
2 |
import streamlit as st
|
3 |
import requests
|
4 |
from transformers import pipeline
|
5 |
+
#import spacy
|
6 |
|
7 |
# Initialize the summarizer pipeline using Hugging Face Transformers
|
8 |
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
|
9 |
|
10 |
# Load spaCy model
|
11 |
+
#nlp = spacy.load("en_core_web_sm")
|
12 |
|
13 |
# Function to perform search using Google Custom Search API
|
14 |
def perform_search(query):
|
|
|
39 |
|
40 |
# Function to extract related topics using spaCy
|
41 |
def extract_related_topics(query_list):
|
42 |
+
#combined_query = " ".join(query_list)
|
43 |
+
#doc = nlp(combined_query)
|
44 |
|
45 |
# Extract keywords or named entities
|
46 |
+
#keywords = [token.text for token in doc if token.is_alpha and not token.is_stop]
|
47 |
+
#entities = [ent.text for ent in doc.ents]
|
48 |
|
49 |
# Combine and deduplicate keywords and entities
|
50 |
+
#related_topics = list(set(keywords + entities))
|
51 |
+
#related_topics.insert(0,"Deep Learning")
|
52 |
+
return ["Machine","AI","GenAI"] # Limit to 3 related topics
|
53 |
|
54 |
# Function to display search results and summaries
|
55 |
def display_results(query):
|