Spaces:
Sleeping
Sleeping
from flask import Flask, request, render_template, redirect, url_for | |
from transformers import AutoTokenizer, AutoModel | |
import torch | |
import os | |
os.environ["TOKENIZERS_PARALLELISM"] = "false" | |
app = Flask(__name__) | |
# Dictionary to store programs and their courses | |
programs = {} | |
# Default model name | |
current_model_name = 'sentence-transformers/all-mpnet-base-v2' | |
# Function to load the tokenizer and model dynamically | |
def load_model_and_tokenizer(model_name): | |
try: | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModel.from_pretrained(model_name) | |
return tokenizer, model, None | |
except Exception as e: | |
return None, None, str(e) | |
# Load the initial model and tokenizer | |
tokenizer, model, error = load_model_and_tokenizer(current_model_name) | |
def mean_pooling(token_embeddings, mask): | |
"""Applies mean pooling to token embeddings, considering the mask.""" | |
mask = mask.unsqueeze(-1).expand(token_embeddings.size()) | |
sum_embeddings = torch.sum(token_embeddings * mask, dim=1) | |
sum_mask = torch.clamp(mask.sum(dim=1), min=1e-9) # Avoid division by zero | |
return sum_embeddings / sum_mask | |
def compute_plo_embeddings(): | |
"""Computes embeddings for the predefined PLOs.""" | |
tokens = tokenizer(plos, padding=True, truncation=True, return_tensors='pt') | |
mask = tokens['attention_mask'] | |
with torch.no_grad(): | |
outputs = model(**tokens) | |
return mean_pooling(outputs.last_hidden_state, mask) | |
# Predefined Program Learning Outcomes (PLOs) | |
plos = [ | |
"Analyze a complex computing problem and apply principles of computing and other relevant disciplines to identify solutions.", | |
"Design, implement, and evaluate a computing-based solution to meet a given set of computing requirements.", | |
"Communicate effectively in a variety of professional contexts.", | |
"Recognize professional responsibilities and make informed judgments in computing practice based on legal and ethical principles.", | |
"Function effectively as a member or leader of a team engaged in activities appropriate to the program’s discipline.", | |
"Support the delivery, use, and management of information systems within an information systems environment." | |
] | |
# Compute PLO embeddings (once at startup) | |
plo_embeddings = compute_plo_embeddings() | |
def get_similarity(input_sentence): | |
"""Calculates the similarity between an input sentence and predefined PLOs.""" | |
tokens = tokenizer(input_sentence, padding=True, truncation=True, return_tensors='pt') | |
mask = tokens['attention_mask'] | |
with torch.no_grad(): | |
outputs = model(**tokens) | |
input_embedding = mean_pooling(outputs.last_hidden_state, mask) | |
similarities = torch.nn.functional.cosine_similarity(input_embedding, plo_embeddings) | |
return similarities | |
def index(): | |
"""Home page displaying current programs and model status.""" | |
return render_template('index.html', programs=programs, model_name=current_model_name) | |
def set_model(): | |
"""Allows users to dynamically change the model.""" | |
global tokenizer, model, plo_embeddings, current_model_name | |
model_name = request.form['model_name'] | |
tokenizer, model, error = load_model_and_tokenizer(model_name) | |
if error: | |
return render_template('index.html', programs=programs, message=f"Error loading model: {error}") | |
# Update the global model name and recompute embeddings | |
current_model_name = model_name | |
plo_embeddings = compute_plo_embeddings() | |
return redirect(url_for('index')) | |
def add_program(): | |
"""Adds a new program.""" | |
if request.method == 'POST': | |
program_name = request.form['program_name'] | |
if program_name not in programs: | |
programs[program_name] = {} # Initialize an empty dictionary for courses | |
return redirect(url_for('index')) | |
return render_template('addprogram.html') | |
def create_course(): | |
"""Creates a new course under a specific program.""" | |
if request.method == 'POST': | |
program_name = request.form['program'] | |
course_name = request.form['course_name'] | |
outcomes = request.form['course_outcomes'].split('\n') | |
if program_name in programs: | |
programs[program_name][course_name] = outcomes # Add course to the selected program | |
return redirect(url_for('index')) | |
return render_template('addcourse.html', programs=programs) | |
def match_outcomes(): | |
"""Matches course outcomes with predefined PLOs.""" | |
course_name = request.form['course'] | |
print(course_name) | |
course_outcomes = request.form['course_outcomes'].split('\n') | |
results = [] | |
for co in course_outcomes: | |
co = co.strip() | |
if co: # Ensure the outcome is not empty | |
similarities = get_similarity(co) | |
top_matches_indices = similarities.topk(3).indices.tolist() | |
results.append({ | |
'course_outcome': co, | |
'course_name' : course_name, | |
'best_matches': top_matches_indices | |
}) | |
return render_template('result.html', course_name =course_name, results=results) | |
if __name__ == '__main__': | |
app.run(debug=True) | |