pip install transformers huggingface_hub gradio torch from huggingface_hub import login # Login using your Hugging Face token (replace "your_token" with your actual token) login("your_token") from transformers import AutoTokenizer, AutoModelForCausalLM # Load the tokenizer and model tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen-350M-multi") model = AutoModelForCausalLM.from_pretrained("Salesforce/codegen-350M-multi") # Input text for code generation text = "def bubble_sort(list_elements):" # Tokenize the input text input_ids = tokenizer(text, return_tensors="pt").input_ids # Generate code based on the input text generated_ids = model.generate( input_ids, max_length=200, # Adjust as needed num_return_sequences=1, # Number of generated sequences to return pad_token_id=tokenizer.eos_token_id # Handle padding tokens ) # Decode the generated tokens to text generated_code = tokenizer.decode(generated_ids[0], skip_special_tokens=True) print(generated_code) from huggingface_hub import HfApi, Repository # Replace with your Hugging Face username and repo name repo_name = "your-username/codegen-350M-multi-bubble-sort" # Initialize the repository to push your model and tokenizer api = HfApi() # Create a new repo (if you haven't already) api.create_repo(repo_name, exist_ok=True) # Push the tokenizer and model to Hugging Face Hub model.push_to_hub(repo_name) tokenizer.push_to_hub(repo_name) print(f"Model and tokenizer pushed to Hugging Face Hub under: {repo_name}")