Ali-C137's picture
Upload app.py
facb321 verified
raw
history blame
4.28 kB
# -*- coding: utf-8 -*-
"""Untitled2.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1iZpCUgC5T_ASnlDgMYm1n4RH8BZsm7sx
"""
# !pip install gradio
def estimate_training_cost(number_of_parameters, number_of_tokens, gpu_throughput=312e12, utilization_rate=0.5, overhead=1.10, cost_per_gpu_hour=1.85):
"""
Estimates the training cost of a large language model.
Args:
- number_of_parameters (int): The number of parameters in the model.
- number_of_tokens (int): The number of tokens to train on.
- gpu_throughput (float, optional): The peak throughput of the GPU in FLOPs/sec. Default is 312 TFLOPs/sec for A100 GPUs.
- utilization_rate (float, optional): The utilization rate of the GPU (0 < utilization_rate ≤ 1). Default is 0.5 (50%).
- overhead (float, optional): Multiplier to account for overhead and additional costs (1 + overhead percentage). Default is 1.10 (10% overhead).
- cost_per_gpu_hour (float, optional): The cost per hour of using the GPU. Default is $1.85/hour.
Returns:
- float: The estimated total cost of training the model.
"""
# Calculate the total number of FLOPs required for training
total_flops = 6 * number_of_parameters * number_of_tokens
# Calculate the number of hours required on the A100 GPUs
gpu_hours = total_flops / (gpu_throughput * 3600)
# Adjust for the actual utilization of the GPUs
adjusted_gpu_hours = gpu_hours / utilization_rate
# Account for the overhead
actual_gpu_hours = adjusted_gpu_hours * overhead
# Calculate the total cost
total_cost = actual_gpu_hours * cost_per_gpu_hour
return total_cost
# Example usage:
# Let's say we have a model with 70 billion parameters and it's trained on 2 trillion tokens
# The default values for the other parameters are used in this example
total_cost = estimate_training_cost(number_of_parameters=70e9, number_of_tokens=2e12)
total_cost
import gradio as gr
# Assume the function estimate_training_cost is already defined as per the previous discussion.
def gradio_interface(number_of_parameters, number_of_tokens, utilization_rate, overhead, cost_per_gpu_hour):
# Convert string inputs to correct types
number_of_parameters = float(number_of_parameters) * 1e9 # Convert from billions to actual number
number_of_tokens = float(number_of_tokens) * 1e12 # Convert from trillions to actual number
utilization_rate = float(utilization_rate)
overhead = float(overhead)
cost_per_gpu_hour = float(cost_per_gpu_hour)
# Estimate the cost
cost = estimate_training_cost(number_of_parameters, number_of_tokens, utilization_rate=utilization_rate, overhead=overhead, cost_per_gpu_hour=cost_per_gpu_hour)
# Return the result as a formatted string
return f"The estimated training cost is ${cost:,.2f}"
# Define the title and description for the Gradio app
title = "<h2 style='text-align: center;'>LLM Training Cost Calculator</h2>"
description = "<p style='text-align: center;'>Estimate the cost of training large language models (LLM). This tool helps you to calculate the cost based on model parameters and tokens. We plan to extend this calculator to include the cost of fine-tuning models using strategies like LoRA or QLoRA. Stay tuned for updates where you'll be able to input the model ID from the Hugging Face Hub, select the fine-tuning strategy, and specify quantization details if QLoRA is chosen.</p>"
# Create the Gradio interface with title and description
iface = gr.Interface(
fn=gradio_interface,
inputs=[
gr.Textbox(label="Number of Parameters (in billions)", value="70"),
gr.Textbox(label="Number of Tokens (in trillions)", value="2"),
gr.Slider(minimum=0.1, maximum=1.0, step=0.1, value=0.5, label="GPU Utilization Rate"),
gr.Slider(minimum=1.0, maximum=2.0, step=0.01, value=1.10, label="Overhead (1 + overhead percentage)"),
gr.Textbox(label="Cost per GPU Hour ($)", value="1.85")
],
outputs=[gr.Textbox(label="Estimated Training Cost")],
title=title,
description=description,
article="<p style='text-align: center;'>Developed with ❤️ by Elfilali Ali</p>"
)
# Run the interface
iface.launch()