|
import torch |
|
import gradio as gr |
|
import numpy as np |
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
|
|
class ThreatAnalyzer: |
|
def __init__(self): |
|
|
|
model_name = 'distilbert-base-uncased-finetuned-sst-2-english' |
|
self.tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
self.model = AutoModelForSequenceClassification.from_pretrained(model_name) |
|
|
|
def analyze_threat(self, input_text): |
|
""" |
|
Comprehensive threat analysis with recommendations |
|
""" |
|
|
|
inputs = self.tokenizer(input_text, return_tensors="pt", truncation=True, padding=True) |
|
outputs = self.model(**inputs) |
|
predictions = torch.nn.functional.softmax(outputs.logits, dim=-1) |
|
threat_score = predictions[0][1].item() |
|
|
|
|
|
threat_patterns = { |
|
"phishing": ["verify", "urgent", "suspend", "account", "click here"], |
|
"social_engineering": ["tech support", "password", "credentials", "urgent"], |
|
"malware": ["download", "patch", "update", "critical"], |
|
"impersonation": ["admin", "support", "verify identity"] |
|
} |
|
|
|
|
|
detected_patterns = [] |
|
for threat_type, keywords in threat_patterns.items(): |
|
if any(keyword.lower() in input_text.lower() for keyword in keywords): |
|
detected_patterns.append(threat_type) |
|
|
|
|
|
recommendations = [] |
|
if threat_score > 0.7: |
|
recommendations.extend([ |
|
"π¨ High Risk: Do NOT interact with this message", |
|
"Immediately report to IT security", |
|
"Do not click any links or download attachments" |
|
]) |
|
elif threat_score > 0.4: |
|
recommendations.extend([ |
|
"β οΈ Potential Threat Detected", |
|
"Verify the source independently", |
|
"Contact sender through official channels" |
|
]) |
|
|
|
|
|
if "phishing" in detected_patterns: |
|
recommendations.append("Phishing Indicator: Check sender's email address carefully") |
|
if "social_engineering" in detected_patterns: |
|
recommendations.append("Social Engineering Alert: Never share personal credentials") |
|
if "malware" in detected_patterns: |
|
recommendations.append("Malware Risk: Scan with updated antivirus before opening") |
|
|
|
return { |
|
"threat_score": threat_score, |
|
"threat_level": self._classify_threat_level(threat_score), |
|
"detected_patterns": detected_patterns, |
|
"recommendations": recommendations |
|
} |
|
|
|
def _classify_threat_level(self, score): |
|
"""Classify threat level""" |
|
if score > 0.8: return "CRITICAL THREAT" |
|
if score > 0.6: return "HIGH THREAT" |
|
if score > 0.4: return "MODERATE THREAT" |
|
if score > 0.2: return "LOW THREAT" |
|
return "NO SIGNIFICANT THREAT" |
|
|
|
def launch_threat_analysis_demo(): |
|
"""Create Gradio interface for threat analysis""" |
|
analyzer = ThreatAnalyzer() |
|
|
|
def analyze_input(input_text): |
|
try: |
|
result = analyzer.analyze_threat(input_text) |
|
|
|
|
|
output = f""" |
|
π Threat Analysis Results: |
|
|
|
Threat Score: {result['threat_score']:.2%} |
|
Threat Level: {result['threat_level']} |
|
|
|
Detected Threat Patterns: |
|
{', '.join(result['detected_patterns']) or 'No specific patterns identified'} |
|
|
|
Recommendations: |
|
{chr(10).join('β’ ' + rec for rec in result['recommendations'])} |
|
|
|
π‘ Always verify suspicious communications |
|
""" |
|
return output |
|
except Exception as e: |
|
return f"Error in analysis: {str(e)}" |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("# Cybersecurity Threat Analysis") |
|
|
|
input_text = gr.Textbox(label="Enter suspicious text or message") |
|
output_text = gr.Textbox(label="Threat Analysis Results") |
|
|
|
analyze_btn = gr.Button("Analyze Threat") |
|
analyze_btn.click(fn=analyze_input, inputs=input_text, outputs=output_text) |
|
|
|
return demo |
|
|
|
|
|
if __name__ == "__main__": |
|
demo = launch_threat_analysis_demo() |
|
demo.launch(debug=True, share=True) |