import gradio as gr from transformers import pipeline import spacy import subprocess import sys # Download en_core_web_sm model if not available try: nlp = spacy.load("en_core_web_sm") except OSError: # If the model is not present, download it subprocess.run([sys.executable, "-m", "spacy", "download", "en_core_web_sm"]) nlp = spacy.load("en_core_web_sm") # Load the Hugging Face model for text classification classifier = pipeline("text-classification", model="distilbert-base-uncased-finetuned-sst-2-english") # Function for text classification def classify_text(text_content): results = classifier(text_content) classifications = [(result['label'], result['score']) for result in results] return classifications # Function for syntax analysis def analyze_syntax(text_content): doc = nlp(text_content) syntax_data = [ { "Token": token.text, "Part of Speech": token.pos_, "Lemma": token.lemma_, "Dependency": token.dep_, } for token in doc ] return syntax_data # Gradio Interface def classify_and_analyze(text_content): classifications = classify_text(text_content) syntax_analysis = analyze_syntax(text_content) return classifications, syntax_analysis # Define Gradio interface with two outputs iface = gr.Interface( fn=classify_and_analyze, inputs="text", outputs=[ gr.outputs.Dataframe(headers=["Label", "Confidence"], label="Classification Results"), gr.outputs.Dataframe(headers=["Token", "Part of Speech", "Lemma", "Dependency"], label="Syntax Analysis"), ], title="Text Classification and Syntax Analysis Tool", description="Analyze text classification and syntax with Hugging Face Transformers and SpaCy.", ) iface.launch()