File size: 1,339 Bytes
2071122 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import datasets
import pyarrow
def test_local_hf_match(dataset_tag):
print(f"For dataset : '{dataset_tag}' testing if local and remote ids match ...")
ids_hf = datasets.load_dataset(
path = "RosettaCommons/MIP",
name = dataset_tag,
data_dir = dataset_tag,
cache_dir = "/scratch/maom_root/maom0/maom",
keep_in_memory = True).data['train'].select(['id']).to_pandas()
ids_local = pyarrow.parquet.read_table(
source = f"intermediate/{dataset_tag}.parquet",
columns = ["id"]).to_pandas()
assert ids_local.equals(ids_hf)
test_local_hf_match("rosetta_high_quality_models")
test_local_hf_match("rosetta_low_quality_models")
test_local_hf_match("dmpfold_high_quality_models")
test_local_hf_match("dmpfold_low_quality_models")
test_local_hf_match("rosetta_high_quality_function_predictions")
test_local_hf_match("rosetta_low_quality_function_predictions")
test_local_hf_match("dmpfold_high_quality_function_predictions")
test_local_hf_match("dmpfold_low_quality_function_predictions")
import pandas
dataset_long = pyarrow.parquet.read_table(
"intermediate/dmpfold_low_quality_function_predictions.parquet").to_pandas()
dataset_wide = pandas.pivot(
dataset_long[["id", "term_id", "Y_hat"]],
columns = "term_id",
index = "id",
values = "Y_hat")
|