Spaces:
Sleeping
Sleeping
import torch | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
class CodingAgent: | |
def __init__(self, model_path): | |
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
self.model = AutoModelForCausalLM.from_pretrained(model_path).to(self.device) | |
self.tokenizer = AutoTokenizer.from_pretrained(model_path) | |
def generate_code(self, prompt, max_length=512, temperature=0.7, top_k=50, top_p=0.95): | |
inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device) | |
with torch.no_grad(): | |
outputs = self.model.generate( | |
**inputs, | |
max_length=max_length, | |
temperature=temperature, | |
top_k=top_k, | |
top_p=top_p, | |
do_sample=True, | |
num_return_sequences=1, | |
) | |
return self.tokenizer.decode(outputs[0], skip_special_tokens=True) | |
def answer_coding_question(self, question): | |
prompt = f"As a coding assistant, please answer the following question:\n\nQuestion: {question}\n\nAnswer:" | |
return self.generate_code(prompt) | |
def explain_code(self, code): | |
prompt = f"Please explain the following code:\n\n```python\n{code}\n```\n\nExplanation:" | |
return self.generate_code(prompt) | |
def suggest_improvements(self, code): | |
prompt = f"Please suggest improvements for the following code:\n\n```python\n{code}\n```\n\nSuggestions:" | |
return self.generate_code(prompt) |