Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files- app.py +71 -0
- functions.py +207 -0
app.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import hopsworks
|
3 |
+
import joblib
|
4 |
+
import pandas as pd
|
5 |
+
import numpy as np
|
6 |
+
from datetime import timedelta, datetime
|
7 |
+
|
8 |
+
from functions import *
|
9 |
+
|
10 |
+
|
11 |
+
def fancy_header(text, font_size=24):
|
12 |
+
res = f'<span style="color:#ff5f27; font-size: {font_size}px;">{text}</span>'
|
13 |
+
st.markdown(res, unsafe_allow_html=True )
|
14 |
+
|
15 |
+
|
16 |
+
st.title('Air Quality Prediction Project🌩')
|
17 |
+
|
18 |
+
progress_bar = st.sidebar.header('Working Progress')
|
19 |
+
progress_bar = st.sidebar.progress(0)
|
20 |
+
st.write(36 * "-")
|
21 |
+
fancy_header('\n Connecting to Hopsworks Feature Store...')
|
22 |
+
|
23 |
+
project = hopsworks.login()
|
24 |
+
fs = project.get_feature_store()
|
25 |
+
feature_view = fs.get_feature_view(
|
26 |
+
name = 'air_quality_fv',
|
27 |
+
version = 1
|
28 |
+
)
|
29 |
+
|
30 |
+
st.write("Successfully connected!✔️")
|
31 |
+
progress_bar.progress(20)
|
32 |
+
|
33 |
+
st.write(36 * "-")
|
34 |
+
fancy_header('\n Getting data from Feature Store...')
|
35 |
+
|
36 |
+
today = datetime.date.today()
|
37 |
+
city = "vienna"
|
38 |
+
weekly_data = get_weather_data_weekly(city, today)
|
39 |
+
|
40 |
+
progress_bar.progress(50)
|
41 |
+
|
42 |
+
#latest_date_unix = str(X.date.values[0])[:10]
|
43 |
+
#latest_date = time.ctime(int(latest_date_unix))
|
44 |
+
|
45 |
+
#st.write(f"Data for {latest_date}")
|
46 |
+
|
47 |
+
#data_to_display = decode_features(X, feature_view=feature_view)
|
48 |
+
|
49 |
+
progress_bar.progress(60)
|
50 |
+
|
51 |
+
st.write(36 * "-")
|
52 |
+
|
53 |
+
mr = project.get_model_registry()
|
54 |
+
model = mr.get_best_model("aqi_model", "rmse", "min")
|
55 |
+
model_dir = model.download()
|
56 |
+
model = joblib.load(model_dir + "/aqi_model.pkl")
|
57 |
+
|
58 |
+
progress_bar.progress(80)
|
59 |
+
st.sidebar.write("-" * 36)
|
60 |
+
|
61 |
+
|
62 |
+
preds = model.predict(data_encoder(weekly_data)).astype(int)
|
63 |
+
poll_level = get_aplevel(preds.T.reshape(-1, 1))
|
64 |
+
|
65 |
+
next_week = [(datetime.today() + timedelta(days=d)).strftime('%A') for d in range(1, 7)]
|
66 |
+
|
67 |
+
df = pd.DataFrame(data=preds, index=["eg"], columns=[f"AQI Predictions for {next_day}" for next_day in next_week], dtype=int)
|
68 |
+
|
69 |
+
st.sidebar.write(df)
|
70 |
+
progress_bar.progress(100)
|
71 |
+
st.button("Re-run")
|
functions.py
ADDED
@@ -0,0 +1,207 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import os
|
3 |
+
import joblib
|
4 |
+
import pandas as pd
|
5 |
+
import datetime
|
6 |
+
import numpy as np
|
7 |
+
from sklearn.preprocessing import OrdinalEncoder
|
8 |
+
from dotenv import load_dotenv
|
9 |
+
load_dotenv(override=True)
|
10 |
+
|
11 |
+
|
12 |
+
def decode_features(df, feature_view):
|
13 |
+
"""Decodes features in the input DataFrame using corresponding Hopsworks Feature Store transformation functions"""
|
14 |
+
df_res = df.copy()
|
15 |
+
|
16 |
+
import inspect
|
17 |
+
|
18 |
+
|
19 |
+
td_transformation_functions = feature_view._batch_scoring_server._transformation_functions
|
20 |
+
|
21 |
+
res = {}
|
22 |
+
for feature_name in td_transformation_functions:
|
23 |
+
if feature_name in df_res.columns:
|
24 |
+
td_transformation_function = td_transformation_functions[feature_name]
|
25 |
+
sig, foobar_locals = inspect.signature(td_transformation_function.transformation_fn), locals()
|
26 |
+
param_dict = dict([(param.name, param.default) for param in sig.parameters.values() if param.default != inspect._empty])
|
27 |
+
if td_transformation_function.name == "min_max_scaler":
|
28 |
+
df_res[feature_name] = df_res[feature_name].map(
|
29 |
+
lambda x: x * (param_dict["max_value"] - param_dict["min_value"]) + param_dict["min_value"])
|
30 |
+
|
31 |
+
elif td_transformation_function.name == "standard_scaler":
|
32 |
+
df_res[feature_name] = df_res[feature_name].map(
|
33 |
+
lambda x: x * param_dict['std_dev'] + param_dict["mean"])
|
34 |
+
elif td_transformation_function.name == "label_encoder":
|
35 |
+
dictionary = param_dict['value_to_index']
|
36 |
+
dictionary_ = {v: k for k, v in dictionary.items()}
|
37 |
+
df_res[feature_name] = df_res[feature_name].map(
|
38 |
+
lambda x: dictionary_[x])
|
39 |
+
return df_res
|
40 |
+
|
41 |
+
|
42 |
+
def get_model(project, model_name, evaluation_metric, sort_metrics_by):
|
43 |
+
"""Retrieve desired model or download it from the Hopsworks Model Registry.
|
44 |
+
In second case, it will be physically downloaded to this directory"""
|
45 |
+
TARGET_FILE = "model.pkl"
|
46 |
+
list_of_files = [os.path.join(dirpath,filename) for dirpath, _, filenames \
|
47 |
+
in os.walk('.') for filename in filenames if filename == TARGET_FILE]
|
48 |
+
|
49 |
+
if list_of_files:
|
50 |
+
model_path = list_of_files[0]
|
51 |
+
model = joblib.load(model_path)
|
52 |
+
else:
|
53 |
+
if not os.path.exists(TARGET_FILE):
|
54 |
+
mr = project.get_model_registry()
|
55 |
+
# get best model based on custom metrics
|
56 |
+
model = mr.get_best_model(model_name,
|
57 |
+
evaluation_metric,
|
58 |
+
sort_metrics_by)
|
59 |
+
model_dir = model.download()
|
60 |
+
model = joblib.load(model_dir + "/model.pkl")
|
61 |
+
|
62 |
+
return model
|
63 |
+
|
64 |
+
|
65 |
+
def get_air_quality_data(station_name):
|
66 |
+
AIR_QUALITY_API_KEY = os.getenv('AIR_QUALITY_API_KEY')
|
67 |
+
request_value = f'https://api.waqi.info/feed/{station_name}/?token={AIR_QUALITY_API_KEY}'
|
68 |
+
answer = requests.get(request_value).json()["data"]
|
69 |
+
forecast = answer['forecast']['daily']
|
70 |
+
return [
|
71 |
+
answer["time"]["s"][:10], # Date
|
72 |
+
int(forecast['pm25'][0]['avg']), # avg predicted pm25
|
73 |
+
int(forecast['pm10'][0]['avg']), # avg predicted pm10
|
74 |
+
max(int(forecast['pm25'][0]['avg']), int(forecast['pm10'][0]['avg'])) # avg predicted aqi
|
75 |
+
]
|
76 |
+
|
77 |
+
def get_air_quality_df(data):
|
78 |
+
col_names = [
|
79 |
+
'date',
|
80 |
+
'pm25',
|
81 |
+
'pm10',
|
82 |
+
'aqi'
|
83 |
+
]
|
84 |
+
|
85 |
+
new_data = pd.DataFrame(
|
86 |
+
data
|
87 |
+
).T
|
88 |
+
new_data.columns = col_names
|
89 |
+
new_data['pm25'] = pd.to_numeric(new_data['pm25'])
|
90 |
+
new_data['pm10'] = pd.to_numeric(new_data['pm10'])
|
91 |
+
new_data['aqi'] = pd.to_numeric(new_data['aqi'])
|
92 |
+
|
93 |
+
print(new_data)
|
94 |
+
return new_data
|
95 |
+
|
96 |
+
|
97 |
+
def get_weather_data_daily(city):
|
98 |
+
WEATHER_API_KEY = os.getenv('WEATHER_API_KEY')
|
99 |
+
answer = requests.get(f'https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/{city}/today?unitGroup=metric&include=days&key={WEATHER_API_KEY}&contentType=json').json()
|
100 |
+
data = answer['days'][0]
|
101 |
+
return [
|
102 |
+
answer['address'].lower(),
|
103 |
+
data['datetime'],
|
104 |
+
data['tempmax'],
|
105 |
+
data['tempmin'],
|
106 |
+
data['temp'],
|
107 |
+
data['feelslikemax'],
|
108 |
+
data['feelslikemin'],
|
109 |
+
data['feelslike'],
|
110 |
+
data['dew'],
|
111 |
+
data['humidity'],
|
112 |
+
data['precip'],
|
113 |
+
data['precipprob'],
|
114 |
+
data['precipcover'],
|
115 |
+
data['snow'],
|
116 |
+
data['snowdepth'],
|
117 |
+
data['windgust'],
|
118 |
+
data['windspeed'],
|
119 |
+
data['winddir'],
|
120 |
+
data['pressure'],
|
121 |
+
data['cloudcover'],
|
122 |
+
data['visibility'],
|
123 |
+
data['solarradiation'],
|
124 |
+
data['solarenergy'],
|
125 |
+
data['uvindex'],
|
126 |
+
data['conditions']
|
127 |
+
]
|
128 |
+
def get_weather_data_weekly(city: str, start_date: datetime) -> pd.DataFrame:
|
129 |
+
WEATHER_API_KEY = os.getenv('WEATHER_API_KEY')
|
130 |
+
end_date = f"{start_date + datetime.timedelta(days=6):%Y-%m-%d}"
|
131 |
+
answer = requests.get(f'https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/{city}/{start_date}/{end_date}?unitGroup=metric&include=days&key={WEATHER_API_KEY}&contentType=json').json()
|
132 |
+
weather_data = answer['days']
|
133 |
+
final_df = pd.DataFrame()
|
134 |
+
|
135 |
+
for i in range(7):
|
136 |
+
data = weather_data[i]
|
137 |
+
list_of_data = [
|
138 |
+
answer['address'].lower(), data['datetime'], data['tempmax'], data['tempmin'], data['temp'], data['feelslikemax'],
|
139 |
+
data['feelslikemin'], data['feelslike'], data['dew'], data['humidity'], data['precip'], data['precipprob'], data['precipcover'],
|
140 |
+
data['snow'], data['snowdepth'], data['windgust'], data['windspeed'], data['winddir'], data['pressure'], data['cloudcover'],
|
141 |
+
data['visibility'], data['solarradiation'], data['solarenergy'], data['uvindex'], data['conditions']
|
142 |
+
]
|
143 |
+
weather_df = get_weather_df(list_of_data)
|
144 |
+
final_df = pd.concat([final_df, weather_df])
|
145 |
+
return final_df
|
146 |
+
|
147 |
+
def get_weather_df(data):
|
148 |
+
col_names = [
|
149 |
+
'name',
|
150 |
+
'date',
|
151 |
+
'tempmax',
|
152 |
+
'tempmin',
|
153 |
+
'temp',
|
154 |
+
'feelslikemax',
|
155 |
+
'feelslikemin',
|
156 |
+
'feelslike',
|
157 |
+
'dew',
|
158 |
+
'humidity',
|
159 |
+
'precip',
|
160 |
+
'precipprob',
|
161 |
+
'precipcover',
|
162 |
+
'snow',
|
163 |
+
'snowdepth',
|
164 |
+
'windgust',
|
165 |
+
'windspeed',
|
166 |
+
'winddir',
|
167 |
+
'pressure',
|
168 |
+
'cloudcover',
|
169 |
+
'visibility',
|
170 |
+
'solarradiation',
|
171 |
+
'solarenergy',
|
172 |
+
'uvindex',
|
173 |
+
'conditions'
|
174 |
+
]
|
175 |
+
|
176 |
+
new_data = pd.DataFrame(
|
177 |
+
data
|
178 |
+
).T
|
179 |
+
new_data.columns = col_names
|
180 |
+
for col in col_names:
|
181 |
+
if col not in ['name', 'date', 'conditions']:
|
182 |
+
new_data[col] = pd.to_numeric(new_data[col])
|
183 |
+
|
184 |
+
return new_data
|
185 |
+
|
186 |
+
def data_encoder(X):
|
187 |
+
X.drop(columns=['date', 'name'], inplace=True)
|
188 |
+
X['conditions'] = OrdinalEncoder().fit_transform(X[['conditions']])
|
189 |
+
return X
|
190 |
+
|
191 |
+
def transform(df):
|
192 |
+
df.loc[df["windgust"].isna(),'windgust'] = df['windspeed']
|
193 |
+
df['snow'].fillna(0,inplace=True)
|
194 |
+
df['snowdepth'].fillna(0, inplace=True)
|
195 |
+
df['pressure'].fillna(df['pressure'].mean(), inplace=True)
|
196 |
+
return df
|
197 |
+
|
198 |
+
|
199 |
+
def get_aplevel(temps:np.ndarray) -> list:
|
200 |
+
boundary_list = np.array([0, 50, 100, 150, 200, 300]) # assert temps.shape == [x, 1]
|
201 |
+
redf = np.logical_not(temps<=boundary_list) # temps.shape[0] x boundary_list.shape[0] ndarray
|
202 |
+
hift = np.concatenate((np.roll(redf, -1)[:, :-1], np.full((temps.shape[0], 1), False)), axis = 1)
|
203 |
+
cat = np.nonzero(np.not_equal(redf,hift))
|
204 |
+
|
205 |
+
air_pollution_level = ['Good', 'Moderate', 'Unhealthy for sensitive Groups','Unhealthy' ,'Very Unhealthy', 'Hazardous']
|
206 |
+
level = [air_pollution_level[el] for el in cat[1]]
|
207 |
+
return level
|