|
import streamlit as st |
|
import pandas as pd |
|
import numpy as np |
|
from sklearn.preprocessing import StandardScaler |
|
from sklearn.model_selection import train_test_split |
|
from sklearn.metrics import mean_squared_error, mean_absolute_error |
|
import tensorflow as tf |
|
import plotly.graph_objs as go |
|
from tensorflow.keras.models import Sequential |
|
from tensorflow.keras.layers import Dense, Dropout, BatchNormalization, Input |
|
from tensorflow.keras.regularizers import l2 |
|
from tensorflow.keras.callbacks import EarlyStopping |
|
from datasets import load_dataset |
|
|
|
st.title("ETTm2 Dataset Analysis and Prediction Verison 2") |
|
st.image('wicked_ETTm2.png', caption='Wicked ETTm2 Dataset') |
|
|
|
dataset = load_dataset('TroglodyteDerivations/ETTm2') |
|
data = dataset['train'].to_pandas() |
|
|
|
|
|
lags = 3 |
|
|
|
for col in ['HUFL', 'HULL', 'MUFL', 'MULL', 'LUFL', 'LULL', 'OT']: |
|
for lag in range(1, lags + 1): |
|
data[f'{col}_lag{lag}'] = data[col].shift(lag) |
|
|
|
|
|
data = data.dropna() |
|
|
|
|
|
|
|
|
|
X = data.drop(columns=['date', 'OT']) |
|
y = data['OT'] |
|
|
|
|
|
scaler_X = StandardScaler() |
|
scaler_y = StandardScaler() |
|
|
|
X_scaled = scaler_X.fit_transform(X) |
|
y_scaled = scaler_y.fit_transform(y.values.reshape(-1, 1)).flatten() |
|
|
|
|
|
X_train, X_val, y_train, y_val = train_test_split(X_scaled, y_scaled, test_size=0.2, random_state=42) |
|
|
|
|
|
model = Sequential() |
|
model.add(Dense(64, input_dim=X_train.shape[1], activation='relu')) |
|
model.add(Dropout(0.2)) |
|
model.add(Dense(32, activation='relu')) |
|
model.add(Dropout(0.2)) |
|
model.add(Dense(16, activation='relu')) |
|
model.add(Dense(1, activation='linear')) |
|
|
|
|
|
model.compile(optimizer='adam', loss='mean_squared_error') |
|
|
|
|
|
history = model.fit(X_train, y_train, epochs=50, batch_size=32, validation_data=(X_val, y_val), verbose=1) |
|
|
|
|
|
y_pred_scaled = model.predict(X_val) |
|
|
|
|
|
y_pred = scaler_y.inverse_transform(y_pred_scaled) |
|
y_val_original = scaler_y.inverse_transform(y_val.reshape(-1, 1)) |
|
|
|
|
|
mse = mean_squared_error(y_val_original, y_pred) |
|
mae = mean_absolute_error(y_val_original, y_pred) |
|
|
|
st.write(f"Mean Squared Error: {mse}") |
|
st.write(f"Mean Absolute Error: {mae}") |
|
|
|
|
|
final_prediction_scaled = model.predict(X_scaled[-1].reshape(1, -1)) |
|
final_prediction = scaler_y.inverse_transform(final_prediction_scaled) |
|
st.write(f"Final Predicted Oil Temperature: {final_prediction[0][0]}") |
|
|
|
|
|
def create_visualizations(y_val_original, y_pred): |
|
|
|
df_val_lr = pd.DataFrame({ |
|
'Actual': y_val_original.flatten(), |
|
'Predicted': y_pred.flatten() |
|
}) |
|
|
|
|
|
scatter_plot_lr = go.Figure() |
|
scatter_plot_lr.add_trace(go.Scatter(x=df_val_lr['Actual'], y=df_val_lr['Predicted'], mode='markers', name='Actual vs. Predicted', marker=dict(color='orange'))) |
|
scatter_plot_lr.add_trace(go.Scatter(x=[df_val_lr['Actual'].min(), df_val_lr['Actual'].max()], y=[df_val_lr['Actual'].min(), df_val_lr['Actual'].max()], mode='lines', name='Ideal', line=dict(color='black'))) |
|
scatter_plot_lr.update_layout( |
|
title='Actual vs. Predicted Oil Temperature (Variant 1)', |
|
xaxis_title='Actual Oil Temperature', |
|
yaxis_title='Predicted Oil Temperature', |
|
plot_bgcolor='white' |
|
) |
|
|
|
|
|
residuals_lr = df_val_lr['Actual'] - df_val_lr['Predicted'] |
|
residual_plot_lr = go.Figure() |
|
residual_plot_lr.add_trace(go.Scatter(x=df_val_lr['Actual'], y=residuals_lr, mode='markers', name='Residuals', marker=dict(color='orange'))) |
|
residual_plot_lr.add_trace(go.Scatter(x=[df_val_lr['Actual'].min(), df_val_lr['Actual'].max()], y=[0, 0], mode='lines', name='Zero Residual Line', line=dict(color='black'))) |
|
residual_plot_lr.update_layout( |
|
title='Residual Plot (Variant 1)', |
|
xaxis_title='Actual Oil Temperature', |
|
yaxis_title='Residuals', |
|
plot_bgcolor='white' |
|
) |
|
|
|
|
|
df_val_lr['Timestamp'] = pd.date_range(start='2016-01-01', periods=len(df_val_lr), freq='h') |
|
time_series_plot_lr = go.Figure() |
|
time_series_plot_lr.add_trace(go.Scatter(x=df_val_lr['Timestamp'], y=df_val_lr['Actual'], mode='lines', name='Actual', line=dict(color='orange'))) |
|
time_series_plot_lr.add_trace(go.Scatter(x=df_val_lr['Timestamp'], y=df_val_lr['Predicted'], mode='lines', name='Predicted', line=dict(color='black'))) |
|
time_series_plot_lr.update_layout( |
|
title='Time Series Plot of Actual vs. Predicted Oil Temperature (Variant 1)', |
|
xaxis_title='Timestamp', |
|
yaxis_title='Oil Temperature', |
|
plot_bgcolor='white' |
|
) |
|
|
|
return scatter_plot_lr, residual_plot_lr, time_series_plot_lr |
|
|
|
|
|
scatter_plot_lr, residual_plot_lr, time_series_plot_lr = create_visualizations(y_val_original, y_pred) |
|
|
|
|
|
st.plotly_chart(scatter_plot_lr) |
|
st.plotly_chart(residual_plot_lr) |
|
st.plotly_chart(time_series_plot_lr) |
|
|
|
|
|
st.write("Training Epochs for Variant 1") |
|
epochs = list(range(1, len(history.history['loss']) + 1)) |
|
epochs_plot = go.Figure() |
|
epochs_plot.add_trace(go.Scatter(x=epochs, y=history.history['loss'], mode='lines', name='Training Loss', line=dict(color='orange'))) |
|
epochs_plot.add_trace(go.Scatter(x=epochs, y=history.history['val_loss'], mode='lines', name='Validation Loss', line=dict(color='black'))) |
|
epochs_plot.update_layout( |
|
title='Training and Validation Loss Over Epochs (Variant 1)', |
|
xaxis_title='Epochs', |
|
yaxis_title='Loss', |
|
plot_bgcolor='white' |
|
) |
|
st.plotly_chart(epochs_plot) |
|
|
|
|
|
|
|
|
|
|
|
dataset = load_dataset('TroglodyteDerivations/ETTm2') |
|
data = dataset['train'].to_pandas() |
|
|
|
|
|
lags = 3 |
|
|
|
for col in ['HUFL', 'HULL', 'MUFL', 'MULL', 'LUFL', 'LULL', 'OT']: |
|
for lag in range(1, lags + 1): |
|
data[f'{col}_lag{lag}'] = data[col].shift(lag) |
|
|
|
|
|
data = data.dropna() |
|
|
|
|
|
X = data.drop(columns=['date', 'OT']) |
|
y = data['OT'] |
|
|
|
|
|
scaler_X = StandardScaler() |
|
scaler_y = StandardScaler() |
|
|
|
X_scaled = scaler_X.fit_transform(X) |
|
y_scaled = scaler_y.fit_transform(y.values.reshape(-1, 1)).flatten() |
|
|
|
|
|
X_train, X_val, y_train, y_val = train_test_split(X_scaled, y_scaled, test_size=0.2, random_state=42) |
|
|
|
|
|
model = Sequential() |
|
model.add(Dense(128, input_dim=X_train.shape[1], activation='relu', kernel_regularizer=l2(0.01))) |
|
model.add(BatchNormalization()) |
|
model.add(Dropout(0.3)) |
|
model.add(Dense(64, activation='relu', kernel_regularizer=l2(0.01))) |
|
model.add(BatchNormalization()) |
|
model.add(Dropout(0.3)) |
|
model.add(Dense(32, activation='relu', kernel_regularizer=l2(0.01))) |
|
model.add(BatchNormalization()) |
|
model.add(Dropout(0.3)) |
|
model.add(Dense(16, activation='relu', kernel_regularizer=l2(0.01))) |
|
model.add(BatchNormalization()) |
|
model.add(Dense(1, activation='linear')) |
|
|
|
|
|
model.compile(optimizer='adam', loss='mean_squared_error') |
|
|
|
|
|
history = model.fit(X_train, y_train, epochs=100, batch_size=32, validation_data=(X_val, y_val), verbose=1) |
|
|
|
|
|
y_pred_scaled = model.predict(X_val) |
|
|
|
|
|
y_pred = scaler_y.inverse_transform(y_pred_scaled) |
|
y_val_original = scaler_y.inverse_transform(y_val.reshape(-1, 1)) |
|
|
|
|
|
mse = mean_squared_error(y_val_original, y_pred) |
|
mae = mean_absolute_error(y_val_original, y_pred) |
|
|
|
st.write(f"Mean Squared Error: {mse}") |
|
st.write(f"Mean Absolute Error: {mae}") |
|
|
|
|
|
final_prediction_scaled = model.predict(X_scaled[-1].reshape(1, -1)) |
|
final_prediction = scaler_y.inverse_transform(final_prediction_scaled) |
|
st.write(f"Final Predicted Oil Temperature: {final_prediction[0][0]}") |
|
|
|
|
|
def create_visualizations(y_val_original, y_pred): |
|
|
|
df_val_lr = pd.DataFrame({ |
|
'Actual': y_val_original.flatten(), |
|
'Predicted': y_pred.flatten() |
|
}) |
|
|
|
|
|
scatter_plot_lr = go.Figure() |
|
scatter_plot_lr.add_trace(go.Scatter(x=df_val_lr['Actual'], y=df_val_lr['Predicted'], mode='markers', name='Actual vs. Predicted', marker=dict(color='orange'))) |
|
scatter_plot_lr.add_trace(go.Scatter(x=[df_val_lr['Actual'].min(), df_val_lr['Actual'].max()], y=[df_val_lr['Actual'].min(), df_val_lr['Actual'].max()], mode='lines', name='Ideal', line=dict(color='black'))) |
|
scatter_plot_lr.update_layout( |
|
title='Actual vs. Predicted Oil Temperature (Variant 2)', |
|
xaxis_title='Actual Oil Temperature', |
|
yaxis_title='Predicted Oil Temperature', |
|
plot_bgcolor='white' |
|
) |
|
|
|
|
|
residuals_lr = df_val_lr['Actual'] - df_val_lr['Predicted'] |
|
residual_plot_lr = go.Figure() |
|
residual_plot_lr.add_trace(go.Scatter(x=df_val_lr['Actual'], y=residuals_lr, mode='markers', name='Residuals', marker=dict(color='orange'))) |
|
residual_plot_lr.add_trace(go.Scatter(x=[df_val_lr['Actual'].min(), df_val_lr['Actual'].max()], y=[0, 0], mode='lines', name='Zero Residual Line', line=dict(color='black'))) |
|
residual_plot_lr.update_layout( |
|
title='Residual Plot (Variant 2)', |
|
xaxis_title='Actual Oil Temperature', |
|
yaxis_title='Residuals', |
|
plot_bgcolor='white' |
|
) |
|
|
|
|
|
df_val_lr['Timestamp'] = pd.date_range(start='2016-01-01', periods=len(df_val_lr), freq='h') |
|
time_series_plot_lr = go.Figure() |
|
time_series_plot_lr.add_trace(go.Scatter(x=df_val_lr['Timestamp'], y=df_val_lr['Actual'], mode='lines', name='Actual', line=dict(color='orange'))) |
|
time_series_plot_lr.add_trace(go.Scatter(x=df_val_lr['Timestamp'], y=df_val_lr['Predicted'], mode='lines', name='Predicted', line=dict(color='black'))) |
|
time_series_plot_lr.update_layout( |
|
title='Time Series Plot of Actual vs. Predicted Oil Temperature (Variant 2)', |
|
xaxis_title='Timestamp', |
|
yaxis_title='Oil Temperature', |
|
plot_bgcolor='white' |
|
) |
|
|
|
return scatter_plot_lr, residual_plot_lr, time_series_plot_lr |
|
|
|
|
|
scatter_plot_lr, residual_plot_lr, time_series_plot_lr = create_visualizations(y_val_original, y_pred) |
|
|
|
|
|
st.plotly_chart(scatter_plot_lr) |
|
st.plotly_chart(residual_plot_lr) |
|
st.plotly_chart(time_series_plot_lr) |
|
|
|
|
|
st.write("Training Epochs for Variant 2") |
|
epochs = list(range(1, len(history.history['loss']) + 1)) |
|
epochs_plot = go.Figure() |
|
epochs_plot.add_trace(go.Scatter(x=epochs, y=history.history['loss'], mode='lines', name='Training Loss', line=dict(color='orange'))) |
|
epochs_plot.add_trace(go.Scatter(x=epochs, y=history.history['val_loss'], mode='lines', name='Validation Loss', line=dict(color='black'))) |
|
epochs_plot.update_layout( |
|
title='Training and Validation Loss Over Epochs (Variant 2)', |
|
xaxis_title='Epochs', |
|
yaxis_title='Loss', |
|
plot_bgcolor='white' |
|
) |
|
st.plotly_chart(epochs_plot) |
|
|
|
|
|
|
|
|
|
|
|
dataset = load_dataset('TroglodyteDerivations/ETTm2') |
|
data = dataset['train'].to_pandas() |
|
|
|
|
|
lags = 3 |
|
|
|
for col in ['HUFL', 'HULL', 'MUFL', 'MULL', 'LUFL', 'LULL', 'OT']: |
|
for lag in range(1, lags + 1): |
|
data[f'{col}_lag{lag}'] = data[col].shift(lag) |
|
|
|
|
|
data = data.dropna() |
|
|
|
|
|
X = data.drop(columns=['date', 'OT']) |
|
y = data['OT'] |
|
|
|
|
|
scaler_X = StandardScaler() |
|
scaler_y = StandardScaler() |
|
|
|
X_scaled = scaler_X.fit_transform(X) |
|
y_scaled = scaler_y.fit_transform(y.values.reshape(-1, 1)).flatten() |
|
|
|
|
|
X_train, X_val, y_train, y_val = train_test_split(X_scaled, y_scaled, test_size=0.2, random_state=42) |
|
|
|
|
|
model = Sequential() |
|
model.add(Dense(256, input_dim=X_train.shape[1], activation='relu', kernel_regularizer=l2(0.01))) |
|
model.add(BatchNormalization()) |
|
model.add(Dropout(0.4)) |
|
model.add(Dense(128, activation='relu', kernel_regularizer=l2(0.01))) |
|
model.add(BatchNormalization()) |
|
model.add(Dropout(0.4)) |
|
model.add(Dense(64, activation='relu', kernel_regularizer=l2(0.01))) |
|
model.add(BatchNormalization()) |
|
model.add(Dropout(0.4)) |
|
model.add(Dense(32, activation='relu', kernel_regularizer=l2(0.01))) |
|
model.add(BatchNormalization()) |
|
model.add(Dense(1, activation='linear')) |
|
|
|
|
|
model.compile(optimizer='adam', loss='mean_squared_error') |
|
|
|
|
|
early_stopping = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True) |
|
|
|
|
|
history = model.fit(X_train, y_train, epochs=200, batch_size=32, validation_data=(X_val, y_val), verbose=1, callbacks=[early_stopping]) |
|
|
|
|
|
y_pred_scaled = model.predict(X_val) |
|
|
|
|
|
y_pred = scaler_y.inverse_transform(y_pred_scaled) |
|
y_val_original = scaler_y.inverse_transform(y_val.reshape(-1, 1)) |
|
|
|
|
|
mse = mean_squared_error(y_val_original, y_pred) |
|
mae = mean_absolute_error(y_val_original, y_pred) |
|
|
|
st.write(f"Mean Squared Error: {mse}") |
|
st.write(f"Mean Absolute Error: {mae}") |
|
|
|
|
|
final_prediction_scaled = model.predict(X_scaled[-1].reshape(1, -1)) |
|
final_prediction = scaler_y.inverse_transform(final_prediction_scaled) |
|
st.write(f"Final Predicted Oil Temperature: {final_prediction[0][0]}") |
|
|
|
|
|
def create_visualizations(y_val_original, y_pred): |
|
|
|
df_val_lr = pd.DataFrame({ |
|
'Actual': y_val_original.flatten(), |
|
'Predicted': y_pred.flatten() |
|
}) |
|
|
|
|
|
scatter_plot_lr = go.Figure() |
|
scatter_plot_lr.add_trace(go.Scatter(x=df_val_lr['Actual'], y=df_val_lr['Predicted'], mode='markers', name='Actual vs. Predicted', marker=dict(color='orange'))) |
|
scatter_plot_lr.add_trace(go.Scatter(x=[df_val_lr['Actual'].min(), df_val_lr['Actual'].max()], y=[df_val_lr['Actual'].min(), df_val_lr['Actual'].max()], mode='lines', name='Ideal', line=dict(color='black'))) |
|
scatter_plot_lr.update_layout( |
|
title='Actual vs. Predicted Oil Temperature (Variant 3)', |
|
xaxis_title='Actual Oil Temperature', |
|
yaxis_title='Predicted Oil Temperature', |
|
plot_bgcolor='white' |
|
) |
|
|
|
|
|
residuals_lr = df_val_lr['Actual'] - df_val_lr['Predicted'] |
|
residual_plot_lr = go.Figure() |
|
residual_plot_lr.add_trace(go.Scatter(x=df_val_lr['Actual'], y=residuals_lr, mode='markers', name='Residuals', marker=dict(color='orange'))) |
|
residual_plot_lr.add_trace(go.Scatter(x=[df_val_lr['Actual'].min(), df_val_lr['Actual'].max()], y=[0, 0], mode='lines', name='Zero Residual Line', line=dict(color='black'))) |
|
residual_plot_lr.update_layout( |
|
title='Residual Plot (Variant 3)', |
|
xaxis_title='Actual Oil Temperature', |
|
yaxis_title='Residuals', |
|
plot_bgcolor='white' |
|
) |
|
|
|
|
|
df_val_lr['Timestamp'] = pd.date_range(start='2016-01-01', periods=len(df_val_lr), freq='h') |
|
time_series_plot_lr = go.Figure() |
|
time_series_plot_lr.add_trace(go.Scatter(x=df_val_lr['Timestamp'], y=df_val_lr['Actual'], mode='lines', name='Actual', line=dict(color='orange'))) |
|
time_series_plot_lr.add_trace(go.Scatter(x=df_val_lr['Timestamp'], y=df_val_lr['Predicted'], mode='lines', name='Predicted', line=dict(color='black'))) |
|
time_series_plot_lr.update_layout( |
|
title='Time Series Plot of Actual vs. Predicted Oil Temperature (Variant 3)', |
|
xaxis_title='Timestamp', |
|
yaxis_title='Oil Temperature', |
|
plot_bgcolor='white' |
|
) |
|
|
|
return scatter_plot_lr, residual_plot_lr, time_series_plot_lr |
|
|
|
|
|
scatter_plot_lr, residual_plot_lr, time_series_plot_lr = create_visualizations(y_val_original, y_pred) |
|
|
|
|
|
st.plotly_chart(scatter_plot_lr) |
|
st.plotly_chart(residual_plot_lr) |
|
st.plotly_chart(time_series_plot_lr) |
|
|
|
|
|
st.write("Training Epochs for Variant 3") |
|
epochs = list(range(1, len(history.history['loss']) + 1)) |
|
epochs_plot = go.Figure() |
|
epochs_plot.add_trace(go.Scatter(x=epochs, y=history.history['loss'], mode='lines', name='Training Loss', line=dict(color='orange'))) |
|
epochs_plot.add_trace(go.Scatter(x=epochs, y=history.history['val_loss'], mode='lines', name='Validation Loss', line=dict(color='black'))) |
|
epochs_plot.update_layout( |
|
title='Training and Validation Loss Over Epochs (Variant 3)', |
|
xaxis_title='Epochs', |
|
yaxis_title='Loss', |
|
plot_bgcolor='white' |
|
) |
|
st.plotly_chart(epochs_plot) |
|
|
|
|
|
|
|
|
|
|
|
dataset = load_dataset('TroglodyteDerivations/ETTm2') |
|
data = dataset['train'].to_pandas() |
|
|
|
|
|
lags = 3 |
|
|
|
for col in ['HUFL', 'HULL', 'MUFL', 'MULL', 'LUFL', 'LULL', 'OT']: |
|
for lag in range(1, lags + 1): |
|
data[f'{col}_lag{lag}'] = data[col].shift(lag) |
|
|
|
|
|
data = data.dropna() |
|
|
|
|
|
X = data.drop(columns=['date', 'OT']) |
|
y = data['OT'] |
|
|
|
|
|
scaler_X = StandardScaler() |
|
scaler_y = StandardScaler() |
|
|
|
X_scaled = scaler_X.fit_transform(X) |
|
y_scaled = scaler_y.fit_transform(y.values.reshape(-1, 1)).flatten() |
|
|
|
|
|
X_train, X_val, y_train, y_val = train_test_split(X_scaled, y_scaled, test_size=0.2, random_state=42) |
|
|
|
|
|
model = Sequential() |
|
model.add(Input(shape=(X_train.shape[1],))) |
|
model.add(Dense(128, activation='relu', kernel_regularizer=l2(0.0002))) |
|
model.add(BatchNormalization()) |
|
model.add(Dropout(0.0002)) |
|
model.add(Dense(64, activation='relu', kernel_regularizer=l2(0.00001))) |
|
model.add(BatchNormalization()) |
|
model.add(Dropout(0.0002)) |
|
model.add(Dense(32, activation='relu', kernel_regularizer=l2(0.00001))) |
|
model.add(BatchNormalization()) |
|
model.add(Dropout(0.0002)) |
|
model.add(Dense(16, activation='relu', kernel_regularizer=l2(0.00001))) |
|
model.add(BatchNormalization()) |
|
model.add(Dense(1, activation='linear')) |
|
|
|
|
|
model.compile(optimizer='adam', loss='mean_squared_error') |
|
|
|
|
|
early_stopping = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True) |
|
|
|
|
|
history = model.fit(X_train, y_train, epochs=200, batch_size=64, validation_data=(X_val, y_val), verbose=1, callbacks=[early_stopping]) |
|
|
|
|
|
y_pred_scaled = model.predict(X_val) |
|
|
|
|
|
y_pred = scaler_y.inverse_transform(y_pred_scaled) |
|
y_val_original = scaler_y.inverse_transform(y_val.reshape(-1, 1)) |
|
|
|
|
|
mse = mean_squared_error(y_val_original, y_pred) |
|
mae = mean_absolute_error(y_val_original, y_pred) |
|
|
|
st.write(f"Mean Squared Error: {mse}") |
|
st.write(f"Mean Absolute Error: {mae}") |
|
|
|
|
|
final_prediction_scaled = model.predict(X_scaled[-1].reshape(1, -1)) |
|
final_prediction = scaler_y.inverse_transform(final_prediction_scaled) |
|
st.write(f"Final Predicted Oil Temperature: {final_prediction[0][0]}") |
|
|
|
|
|
def create_visualizations(y_val_original, y_pred): |
|
|
|
df_val_lr = pd.DataFrame({ |
|
'Actual': y_val_original.flatten(), |
|
'Predicted': y_pred.flatten() |
|
}) |
|
|
|
|
|
scatter_plot_lr = go.Figure() |
|
scatter_plot_lr.add_trace(go.Scatter(x=df_val_lr['Actual'], y=df_val_lr['Predicted'], mode='markers', name='Actual vs. Predicted', marker=dict(color='orange'))) |
|
scatter_plot_lr.add_trace(go.Scatter(x=[df_val_lr['Actual'].min(), df_val_lr['Actual'].max()], y=[df_val_lr['Actual'].min(), df_val_lr['Actual'].max()], mode='lines', name='Ideal', line=dict(color='black'))) |
|
scatter_plot_lr.update_layout( |
|
title='Actual vs. Predicted Oil Temperature (Variant 4)', |
|
xaxis_title='Actual Oil Temperature', |
|
yaxis_title='Predicted Oil Temperature', |
|
plot_bgcolor='white' |
|
) |
|
|
|
|
|
residuals_lr = df_val_lr['Actual'] - df_val_lr['Predicted'] |
|
residual_plot_lr = go.Figure() |
|
residual_plot_lr.add_trace(go.Scatter(x=df_val_lr['Actual'], y=residuals_lr, mode='markers', name='Residuals', marker=dict(color='orange'))) |
|
residual_plot_lr.add_trace(go.Scatter(x=[df_val_lr['Actual'].min(), df_val_lr['Actual'].max()], y=[0, 0], mode='lines', name='Zero Residual Line', line=dict(color='black'))) |
|
residual_plot_lr.update_layout( |
|
title='Residual Plot (Variant 4)', |
|
xaxis_title='Actual Oil Temperature', |
|
yaxis_title='Residuals', |
|
plot_bgcolor='white' |
|
) |
|
|
|
|
|
df_val_lr['Timestamp'] = pd.date_range(start='2016-01-01', periods=len(df_val_lr), freq='h') |
|
time_series_plot_lr = go.Figure() |
|
time_series_plot_lr.add_trace(go.Scatter(x=df_val_lr['Timestamp'], y=df_val_lr['Actual'], mode='lines', name='Actual', line=dict(color='orange'))) |
|
time_series_plot_lr.add_trace(go.Scatter(x=df_val_lr['Timestamp'], y=df_val_lr['Predicted'], mode='lines', name='Predicted', line=dict(color='black'))) |
|
time_series_plot_lr.update_layout( |
|
title='Time Series Plot of Actual vs. Predicted Oil Temperature (Variant 4)', |
|
xaxis_title='Timestamp', |
|
yaxis_title='Oil Temperature', |
|
plot_bgcolor='white' |
|
) |
|
|
|
return scatter_plot_lr, residual_plot_lr, time_series_plot_lr |
|
|
|
|
|
scatter_plot_lr, residual_plot_lr, time_series_plot_lr = create_visualizations(y_val_original, y_pred) |
|
|
|
|
|
st.plotly_chart(scatter_plot_lr) |
|
st.plotly_chart(residual_plot_lr) |
|
st.plotly_chart(time_series_plot_lr) |
|
|
|
|
|
st.write("Training Epochs for Variant 4") |
|
epochs = list(range(1, len(history.history['loss']) + 1)) |
|
epochs_plot = go.Figure() |
|
epochs_plot.add_trace(go.Scatter(x=epochs, y=history.history['loss'], mode='lines', name='Training Loss', line=dict(color='orange'))) |
|
epochs_plot.add_trace(go.Scatter(x=epochs, y=history.history['val_loss'], mode='lines', name='Validation Loss', line=dict(color='black'))) |
|
epochs_plot.update_layout( |
|
title='Training and Validation Loss Over Epochs (Variant 4)', |
|
xaxis_title='Epochs', |
|
yaxis_title='Loss', |
|
plot_bgcolor='white' |
|
) |
|
st.plotly_chart(epochs_plot) |