import os | |
from datasets import load_dataset, Dataset | |
TOKEN = os.environ.get("DEBUG") | |
requests_dataset = load_dataset("EnergyStarAI/requests_debug", split="test") | |
def normalize_task(task): | |
# Makes assumption about how the task names are being written, and called. | |
return '_'.join(task.split()).lower() | |
requests_dset = requests_dataset.to_pandas() | |
for model, task in requests_dset[['model','task']].loc[requests_dset['status'] == 'PENDING'].to_dict(orient= 'split', index=False)['data']: | |
print("%s,%s" % (model, normalize_task(task))) | |