Datasets:
Upload nbfi.py
Browse files
nbfi.py
CHANGED
@@ -145,8 +145,7 @@ _CITATION = """"""
|
|
145 |
|
146 |
# Dataset info
|
147 |
urls_per_split = {
|
148 |
-
"train": "https://gist.githubusercontent.com/msetzu/6c83dc3b7092d428ae2f08dc91e1020c/raw/9fc3171b293d0dc29963357450308eb4c7e3a15b/Train_Dataset.csv"
|
149 |
-
"test": "https://gist.githubusercontent.com/msetzu/f0032b855008f579299d7ad78d9dd9c2/raw/ba42badeb10b505cb283bdb16d3de581ffe7a332/Test_Dataset.csv"
|
150 |
}
|
151 |
|
152 |
features_per_config = {k: datasets.Features(features_types_per_config[k]) for k in features_types_per_config}
|
@@ -180,8 +179,7 @@ class NBFI(datasets.GeneratorBasedBuilder):
|
|
180 |
downloads = dl_manager.download_and_extract(urls_per_split)
|
181 |
|
182 |
return [
|
183 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloads["train"]})
|
184 |
-
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloads["test"]}),
|
185 |
]
|
186 |
|
187 |
def _generate_examples(self, filepath: str):
|
@@ -199,6 +197,7 @@ class NBFI(datasets.GeneratorBasedBuilder):
|
|
199 |
|
200 |
|
201 |
def preprocess(self, data: pandas.DataFrame, config: str = DEFAULT_CONFIG) -> pandas.DataFrame:
|
|
|
202 |
data.drop("ID", axis="columns", inplace=True)
|
203 |
data.drop("Own_House_Age", axis="columns", inplace=True)
|
204 |
data.drop("Type_Organization", axis="columns", inplace=True)
|
@@ -237,16 +236,6 @@ class NBFI(datasets.GeneratorBasedBuilder):
|
|
237 |
data = data[data.Registration_Days != "x"]
|
238 |
data = data[data.ID_Days != "x"]
|
239 |
|
240 |
-
print("len(data.columns)")
|
241 |
-
print(len(data.columns))
|
242 |
-
print("len(_BASE_FEATURE_NAMES)")
|
243 |
-
print(len(_BASE_FEATURE_NAMES))
|
244 |
-
print("len(features_types_per_config[config].keys())")
|
245 |
-
print(len(features_types_per_config[config].keys()))
|
246 |
-
print("data.columns, features_types_per_config, _BASE_FEATURE_NAMES")
|
247 |
-
for f, ft, fb in zip(data.columns, features_types_per_config[config].keys(), _BASE_FEATURE_NAMES):
|
248 |
-
print(f, ft, fb)
|
249 |
-
|
250 |
data.columns = _BASE_FEATURE_NAMES
|
251 |
|
252 |
data["education_level"] = data["education_level"].apply(lambda x: _EDUCATION_ENCODING[x])
|
|
|
145 |
|
146 |
# Dataset info
|
147 |
urls_per_split = {
|
148 |
+
"train": "https://gist.githubusercontent.com/msetzu/6c83dc3b7092d428ae2f08dc91e1020c/raw/9fc3171b293d0dc29963357450308eb4c7e3a15b/Train_Dataset.csv"
|
|
|
149 |
}
|
150 |
|
151 |
features_per_config = {k: datasets.Features(features_types_per_config[k]) for k in features_types_per_config}
|
|
|
179 |
downloads = dl_manager.download_and_extract(urls_per_split)
|
180 |
|
181 |
return [
|
182 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloads["train"]})
|
|
|
183 |
]
|
184 |
|
185 |
def _generate_examples(self, filepath: str):
|
|
|
197 |
|
198 |
|
199 |
def preprocess(self, data: pandas.DataFrame, config: str = DEFAULT_CONFIG) -> pandas.DataFrame:
|
200 |
+
print(data.columns)
|
201 |
data.drop("ID", axis="columns", inplace=True)
|
202 |
data.drop("Own_House_Age", axis="columns", inplace=True)
|
203 |
data.drop("Type_Organization", axis="columns", inplace=True)
|
|
|
236 |
data = data[data.Registration_Days != "x"]
|
237 |
data = data[data.ID_Days != "x"]
|
238 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
239 |
data.columns = _BASE_FEATURE_NAMES
|
240 |
|
241 |
data["education_level"] = data["education_level"].apply(lambda x: _EDUCATION_ENCODING[x])
|