meg-huggingface commited on
Commit
c5729e2
1 Parent(s): 258cdcb

Changing names to AIEnergyScore

Browse files
Files changed (6) hide show
  1. Dockerfile +1 -1
  2. create_results.py +17 -13
  3. entrypoint.sh +2 -2
  4. failed_run.py +3 -3
  5. parse_requests.py +4 -6
  6. pause_space.py +1 -1
Dockerfile CHANGED
@@ -76,4 +76,4 @@ RUN --mount=type=secret,id=DEBUG,mode=0444,required=true \
76
  git init && \
77
  git remote add origin $(cat /run/secrets/DEBUG)
78
 
79
- CMD ["/entrypoint.sh"]
 
76
  git init && \
77
  git remote add origin $(cat /run/secrets/DEBUG)
78
 
79
+ ENTRYPOINT ["/entrypoint.sh"]
create_results.py CHANGED
@@ -10,28 +10,32 @@ api = HfApi(token=TOKEN)
10
 
11
  out_dir = sys.argv[1]
12
 
13
- #Uploading results
14
  api.upload_folder(
15
  folder_path=out_dir,
16
  repo_id="AIEnergyScore/results_debug",
17
  repo_type="dataset",
18
  )
19
 
20
- #Updating requests
21
- requests = load_dataset("AIEnergyScore/requests_debug", split="test", token=TOKEN)
 
22
  requests_dset = requests.to_pandas()
23
 
24
- models_ran=[]
25
  for f in os.scandir(out_dir):
26
- if f.is_dir():
27
- for s in os.scandir(f):
28
- if s.is_dir() and s.name not in ['hooks','info','objects','refs','logs']:
29
- for m in os.scandir(s):
30
- models_ran.append(s.name+'/' + m.name)
31
-
 
32
  print("Models ran are: " + str(models_ran))
33
 
34
- requests_dset.loc[requests_dset["model"].isin(models_ran), ['status']] = "COMPLETED"
35
- updated_dset =Dataset.from_pandas(requests_dset)
36
- updated_dset.push_to_hub("AIEnergyScore/requests_debug", split="test", token=TOKEN)
 
 
37
  print("Updated model status")
 
10
 
11
  out_dir = sys.argv[1]
12
 
13
+ # Uploading results
14
  api.upload_folder(
15
  folder_path=out_dir,
16
  repo_id="AIEnergyScore/results_debug",
17
  repo_type="dataset",
18
  )
19
 
20
+ # Updating requests
21
+ requests = load_dataset("AIEnergyScore/requests_debug", split="test",
22
+ token=TOKEN)
23
  requests_dset = requests.to_pandas()
24
 
25
+ models_ran = []
26
  for f in os.scandir(out_dir):
27
+ if f.is_dir():
28
+ for s in os.scandir(f):
29
+ if s.is_dir() and s.name not in ['hooks', 'info', 'objects', 'refs',
30
+ 'logs']:
31
+ for m in os.scandir(s):
32
+ models_ran.append(s.name + '/' + m.name)
33
+
34
  print("Models ran are: " + str(models_ran))
35
 
36
+ requests_dset.loc[
37
+ requests_dset["model"].isin(models_ran), ['status']] = "COMPLETED"
38
+ updated_dset = Dataset.from_pandas(requests_dset)
39
+ updated_dset.push_to_hub("AIEnergyScore/requests_debug", split="test",
40
+ token=TOKEN)
41
  print("Updated model status")
entrypoint.sh CHANGED
@@ -1,6 +1,6 @@
1
  #!/bin/bash
2
 
3
- export SPACE="EnergyStarAI/launch-computation-example"
4
 
5
  echo "Not checking h100 -- already know it's not there."
6
  #python /check_h100.py
@@ -31,4 +31,4 @@ python /upload_run_folder.py --run_dir "/runs"
31
  # Pausing space
32
  echo "Pausing space."
33
  python /pause_space.py
34
- echo "Done."
 
1
  #!/bin/bash
2
 
3
+ export SPACE="AIEnergyScore/launch-computation-example"
4
 
5
  echo "Not checking h100 -- already know it's not there."
6
  #python /check_h100.py
 
31
  # Pausing space
32
  echo "Pausing space."
33
  python /pause_space.py
34
+ echo "Done."
failed_run.py CHANGED
@@ -26,7 +26,7 @@ parser.add_argument(
26
  args = parser.parse_args()
27
 
28
  # Updating request
29
- dataset = load_dataset("EnergyStarAI/requests_debug", split="test",
30
  token=TOKEN).to_pandas()
31
 
32
  # Set benchmark to failed
@@ -48,5 +48,5 @@ except FileNotFoundError as e:
48
  print(f"Could not find {args.run_dir}/error.log")
49
 
50
  updated_dataset = Dataset.from_pandas(dataset)
51
- updated_dataset.push_to_hub("EnergyStarAI/requests_debug", split="test",
52
- token=TOKEN)
 
26
  args = parser.parse_args()
27
 
28
  # Updating request
29
+ dataset = load_dataset("AIEnergyScore/requests_debug", split="test",
30
  token=TOKEN).to_pandas()
31
 
32
  # Set benchmark to failed
 
48
  print(f"Could not find {args.run_dir}/error.log")
49
 
50
  updated_dataset = Dataset.from_pandas(dataset)
51
+ updated_dataset.push_to_hub("AIEnergyScore/requests_debug", split="test",
52
+ token=TOKEN)
parse_requests.py CHANGED
@@ -1,10 +1,10 @@
1
  import os
2
  from datasets import load_dataset, Dataset
3
 
4
- skip_statuses = ['COMPLETED', 'FAILED', 'RUNNING']
5
  TOKEN = os.environ.get("DEBUG")
6
  requests_dataset = load_dataset("AIEnergyScore/requests_debug", split="test")
7
 
 
8
  def normalize_task(task):
9
  # Makes assumption about how the task names are being written, and called.
10
  return '_'.join(task.split()).lower()
@@ -12,9 +12,7 @@ def normalize_task(task):
12
 
13
  requests_dset = requests_dataset.to_pandas()
14
 
15
- for model, task in requests_dset[['model','task']].loc[requests_dset['status'] == 'PENDING'].to_dict(orient= 'split', index=False)['data']:
 
 
16
  print("%s,%s" % (model, normalize_task(task)))
17
-
18
- # Custom errors we will rerun.
19
- for model, task in requests_dset[['model','task']].loc[~requests_dset['status'].isin(skip_statuses)].to_dict(orient= 'split', index=False)['data']:
20
- print("%s,%s" % (model, normalize_task(task)))
 
1
  import os
2
  from datasets import load_dataset, Dataset
3
 
 
4
  TOKEN = os.environ.get("DEBUG")
5
  requests_dataset = load_dataset("AIEnergyScore/requests_debug", split="test")
6
 
7
+
8
  def normalize_task(task):
9
  # Makes assumption about how the task names are being written, and called.
10
  return '_'.join(task.split()).lower()
 
12
 
13
  requests_dset = requests_dataset.to_pandas()
14
 
15
+ for model, task in requests_dset[['model', 'task']].loc[
16
+ requests_dset['status'] == 'PENDING'].to_dict(orient='split', index=False)[
17
+ 'data']:
18
  print("%s,%s" % (model, normalize_task(task)))
 
 
 
 
pause_space.py CHANGED
@@ -3,4 +3,4 @@ import os
3
 
4
  TOKEN = os.environ.get("DEBUG")
5
  API = HfApi(token=TOKEN)
6
- API.pause_space("EnergyStarAI/launch-computation-example")
 
3
 
4
  TOKEN = os.environ.get("DEBUG")
5
  API = HfApi(token=TOKEN)
6
+ API.pause_space("AIEnergyScore/launch-computation-example")