Create project1.py
Browse files- project1.py +157 -0
project1.py
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datasets
|
2 |
+
import pandas as pd
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
_CITATION = """\
|
6 |
+
@InProceedings{huggingface:dataset,
|
7 |
+
title = {A great new dataset},
|
8 |
+
author={huggingface, Inc.
|
9 |
+
},
|
10 |
+
year={2020}
|
11 |
+
}
|
12 |
+
"""
|
13 |
+
|
14 |
+
_DESCRIPTION = """\
|
15 |
+
This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
|
16 |
+
"""
|
17 |
+
|
18 |
+
_HOMEPAGE = ""
|
19 |
+
|
20 |
+
_LICENSE = ""
|
21 |
+
|
22 |
+
class HealthStatisticsDataset(datasets.GeneratorBasedBuilder):
|
23 |
+
def _info(self):
|
24 |
+
return datasets.DatasetInfo(
|
25 |
+
description=_DESCRIPTION,
|
26 |
+
features=datasets.Features(
|
27 |
+
{
|
28 |
+
"Year": datasets.Value("int32"),
|
29 |
+
"LocationAbbr": datasets.Value("string"),
|
30 |
+
"LocationDesc": datasets.Value("string"),
|
31 |
+
"Latitude": datasets.Value("float32"),
|
32 |
+
"Longitude": datasets.Value("float32"),
|
33 |
+
"Disease_Type": datasets.Value("int32"),
|
34 |
+
"Data_Value_Type": datasets.Value("int32"),
|
35 |
+
"Data_Value": datasets.Value("float32"),
|
36 |
+
"Break_Out_Category": datasets.Value("string"),
|
37 |
+
"Break_Out_Details": datasets.Value("string"),
|
38 |
+
"Break_Out_Type": datasets.Value("int32"),
|
39 |
+
"Life_Expectancy": datasets.Value("float32")
|
40 |
+
}
|
41 |
+
),
|
42 |
+
supervised_keys=None,
|
43 |
+
homepage=_HOMEPAGE,
|
44 |
+
license=_LICENSE,
|
45 |
+
citation=_CITATION,
|
46 |
+
)
|
47 |
+
|
48 |
+
def _split_generators(self, dl_manager):
|
49 |
+
data = pd.read_csv(dl_manager.download_and_extract("https://docs.google.com/uc?export=download&id=1eChYmZ3RMq1v-ek1u6DD2m_dGIrz3sbi&confirm=t"))
|
50 |
+
processed_data = self.preprocess_data(data)
|
51 |
+
return [
|
52 |
+
datasets.SplitGenerator(
|
53 |
+
name=datasets.Split.TRAIN,
|
54 |
+
gen_kwargs={"data": processed_data},
|
55 |
+
),
|
56 |
+
]
|
57 |
+
|
58 |
+
def _generate_examples(self, data):
|
59 |
+
for key, row in data.iterrows():
|
60 |
+
year = int(row['Year']) if 'Year' in row else None
|
61 |
+
latitude, longitude = None, None
|
62 |
+
if isinstance(row['Geolocation'], str):
|
63 |
+
geo_str = row['Geolocation'].replace('POINT (', '').replace(')', '')
|
64 |
+
longitude, latitude = map(float, geo_str.split())
|
65 |
+
yield key, {
|
66 |
+
"Year": year,
|
67 |
+
"LocationAbbr": row.get('LocationAbbr', None),
|
68 |
+
"LocationDesc": row.get('LocationDesc', None),
|
69 |
+
"Latitude": latitude,
|
70 |
+
"Longitude": longitude,
|
71 |
+
"Disease_Type": int(row["Disease_Type"]) if "Disease_Type" in row else None,
|
72 |
+
"Data_Value_Type": int(row["Data_Value_Type"]) if "Data_Value_Type" in row else None,
|
73 |
+
"Data_Value": float(row["Data_Value"]) if "Data_Value" in row else None,
|
74 |
+
"Break_Out_Category": row.get("Break_Out_Category", None),
|
75 |
+
"Break_Out_Details": row.get("Break_Out_Details", None),
|
76 |
+
"Break_Out_Type": int(row["Break_Out_Type"]) if 'Break_Out_Type' in row else None,
|
77 |
+
"Life_Expectancy": float(row["Life_Expectancy"]) if row.get("Life_Expectancy") else None
|
78 |
+
}
|
79 |
+
|
80 |
+
@staticmethod
|
81 |
+
def preprocess_data(data):
|
82 |
+
data = data[['YearStart', 'LocationAbbr', 'LocationDesc', 'Geolocation', 'Topic', 'Question', 'Data_Value_Type', 'Data_Value', 'Data_Value_Alt',
|
83 |
+
'Low_Confidence_Limit', 'High_Confidence_Limit', 'Break_Out_Category', 'Break_Out']]
|
84 |
+
|
85 |
+
pd.options.mode.chained_assignment = None
|
86 |
+
|
87 |
+
disease_columns = [
|
88 |
+
'Major cardiovascular disease mortality rate among US adults (18+); NVSS',
|
89 |
+
'Diseases of the heart (heart disease) mortality rate among US adults (18+); NVSS',
|
90 |
+
'Acute myocardial infarction (heart attack) mortality rate among US adults (18+); NVSS',
|
91 |
+
'Coronary heart disease mortality rate among US adults (18+); NVSS',
|
92 |
+
'Heart failure mortality rate among US adults (18+); NVSS',
|
93 |
+
'Cerebrovascular disease (stroke) mortality rate among US adults (18+); NVSS',
|
94 |
+
'Ischemic stroke mortality rate among US adults (18+); NVSS',
|
95 |
+
'Hemorrhagic stroke mortality rate among US adults (18+); NVSS'
|
96 |
+
]
|
97 |
+
|
98 |
+
disease_column_mapping = {column_name: index for index, column_name in enumerate(disease_columns)}
|
99 |
+
data['Question'] = data['Question'].apply(lambda x: disease_column_mapping.get(x, -1))
|
100 |
+
|
101 |
+
sex_columns = ['Male', 'Female']
|
102 |
+
sex_column_mapping = {column_name: index + 1 for index, column_name in enumerate(sex_columns)}
|
103 |
+
|
104 |
+
age_columns = ['18-24', '25-44', '45-64', '65+']
|
105 |
+
age_column_mapping = {column_name: index + 1 for index, column_name in enumerate(age_columns)}
|
106 |
+
|
107 |
+
race_columns = ['Non-Hispanic White', 'Non-Hispanic Black', 'Hispanic', 'Other']
|
108 |
+
race_column_mapping = {column_name: index + 1 for index, column_name in enumerate(race_columns)}
|
109 |
+
|
110 |
+
def map_break_out_category(value):
|
111 |
+
if value in sex_column_mapping:
|
112 |
+
return sex_column_mapping[value]
|
113 |
+
elif value in age_column_mapping:
|
114 |
+
return age_column_mapping[value]
|
115 |
+
elif value in race_column_mapping:
|
116 |
+
return race_column_mapping[value]
|
117 |
+
else:
|
118 |
+
return value
|
119 |
+
|
120 |
+
data['Break_Out_Type'] = data['Break_Out'].apply(map_break_out_category)
|
121 |
+
data.drop(columns=['Topic', 'Low_Confidence_Limit', 'High_Confidence_Limit', 'Data_Value_Alt'], axis=1, inplace=True)
|
122 |
+
data['Data_Value_Type'] = data['Data_Value_Type'].apply(lambda x: 1 if x == 'Age-Standardized' else 0)
|
123 |
+
data.rename(columns={'Question':'Disease_Type', 'YearStart':'Year', 'Break_Out':'Break_Out_Details'}, inplace=True)
|
124 |
+
data['Break_Out_Type'] = data['Break_Out_Type'].replace('Overall', 0)
|
125 |
+
|
126 |
+
pd.options.mode.chained_assignment = 'warn'
|
127 |
+
|
128 |
+
lt2000 = pd.read_csv("https://docs.google.com/uc?export=download&id=1ktRNl7jg0Z83rkymD9gcsGLdVqVaFtd-&confirm=t")
|
129 |
+
lt2000 = lt2000[(lt2000['race_name'] == 'Total') & (lt2000['age_name'] == '<1 year')]
|
130 |
+
lt2000 = lt2000[['location_name', 'val']]
|
131 |
+
lt2000.rename(columns={'val':'Life_Expectancy'}, inplace=True)
|
132 |
+
|
133 |
+
lt2005 = pd.read_csv("https://docs.google.com/uc?export=download&id=1xZqeOgj32-BkOhDTZVc4k_tp1ddnOEh7&confirm=t")
|
134 |
+
lt2005 = lt2005[(lt2005['race_name'] == 'Total') & (lt2005['age_name'] == '<1 year')]
|
135 |
+
lt2005 = lt2005[['location_name', 'val']]
|
136 |
+
lt2005.rename(columns={'val':'Life_Expectancy'}, inplace=True)
|
137 |
+
|
138 |
+
lt2010 = pd.read_csv("https://docs.google.com/uc?export=download&id=1ItqHBuuUa38PVytfahaAV8NWwbhHMMg8&confirm=t")
|
139 |
+
lt2010 = lt2010[(lt2010['race_name'] == 'Total') & (lt2010['age_name'] == '<1 year')]
|
140 |
+
lt2010 = lt2010[['location_name', 'val']]
|
141 |
+
lt2010.rename(columns={'val':'Life_Expectancy'}, inplace=True)
|
142 |
+
|
143 |
+
lt2015 = pd.read_csv("https://docs.google.com/uc?export=download&id=1rOgQY1RQiry2ionTKM_UWgT8cYD2E0vX&confirm=t")
|
144 |
+
lt2015 = lt2015[(lt2015['race_name'] == 'Total') & (lt2015['age_name'] == '<1 year')]
|
145 |
+
lt2015 = lt2015[['location_name', 'val']]
|
146 |
+
lt2015.rename(columns={'val':'Life_Expectancy'}, inplace=True)
|
147 |
+
|
148 |
+
lt_data = pd.concat([lt2000, lt2005, lt2010, lt2015])
|
149 |
+
lt_data.drop_duplicates(subset=['location_name'], inplace=True)
|
150 |
+
|
151 |
+
data2 = pd.merge(data, lt_data, how='inner', left_on='LocationDesc', right_on='location_name')
|
152 |
+
data2.drop(columns=['location_name'], axis=1, inplace=True)
|
153 |
+
data2 = data2[(data2['Break_Out_Details'] != '75+') & (data2['Break_Out_Details'] != '35+')]
|
154 |
+
data2.rename(columns={'Question':'Disease_Type'}, inplace=True)
|
155 |
+
data2['Life_Expectancy'] = np.where(data2['Break_Out_Type'] == 0, data2['Life_Expectancy'], np.nan)
|
156 |
+
data2 = data2.reset_index(drop=True)
|
157 |
+
return data2
|