Files changed (1) hide show
  1. app.py +277 -0
app.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pandas as pd
3
+ from sklearn.preprocessing import LabelEncoder
4
+ from sklearn.model_selection import train_test_split
5
+ from tensorflow.keras.models import Sequential, Model
6
+ from tensorflow.keras.layers import Dense, Dropout, Input, LayerNormalization, MultiHeadAttention, GlobalAveragePooling1D, Embedding, Layer, LSTM, Bidirectional, Conv1D
7
+ from tensorflow.keras.optimizers import Adam
8
+ from tensorflow.keras.utils import to_categorical
9
+ from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
10
+ import tensorflow as tf
11
+ import optuna
12
+ import gradio as gr
13
+
14
+ # Combined data set
15
+ data = [
16
+ "Double big 12", "Single big 11", "Single big 13", "Double big 12", "Double small 10",
17
+ "Double big 12", "Double big 12", "Single small 7", "Single small 5", "Single small 9",
18
+ "Single big 13", "Double small 8", "Single small 5", "Double big 14", "Single big 11",
19
+ "Double big 14", "Single big 17", "Triple 9", "Double small 6", "Single big 13",
20
+ "Double big 14", "Double small 8", "Double small 8", "Single big 13", "Single small 9",
21
+ "Double small 8", "Double small 8", "Single big 12", "Double small 8", "Double big 14",
22
+ "Double small 10", "Single big 13", "Single big 11", "Double big 14", "Double big 14",
23
+ "Double small", "Single big", "Double biga", "Single small", "Single small",
24
+ "Double small", "Single small", "Single small", "Double small", "Double small",
25
+ "Double big", "Single big", "Triple", "Double big", "Single big", "Single big",
26
+ "Double small", "Single small", "Double big", "Double small", "Double big",
27
+ "Single small", "Single big", "Double small", "Double big", "Double big",
28
+ "Double small", "Single big", "Double big", "Triple", "Single big", "Double small",
29
+ "Single big", "Single small", "Double small", "Single big", "Single big",
30
+ "Single big", "Double small", "Double small", "Single big", "Single small",
31
+ "Single big", "Single small", "Single small", "Double small", "Single small",
32
+ "Single big"
33
+ ]
34
+
35
+ # Counting the data points
36
+ num_data_points = len(data)
37
+ print(f'Total number of data points: {num_data_points}')
38
+
39
+ # Encoding the labels
40
+ encoder = LabelEncoder()
41
+ encoded_data = encoder.fit_transform(data)
42
+
43
+ # Create sequences
44
+ sequence_length = 10
45
+ X, y = [], []
46
+ for i in range(len(encoded_data) - sequence_length):
47
+ X.append(encoded_data[i:i + sequence_length])
48
+ y.append(encoded_data[i + sequence_length])
49
+
50
+ X = np.array(X)
51
+ y = np.array(y)
52
+ y = to_categorical(y, num_classes=len(encoder.classes_))
53
+
54
+ # Reshape X for Transformer
55
+ X = X.reshape((X.shape[0], X.shape[1]))
56
+
57
+ print(f'Input shape: {X.shape}')
58
+ print(f'Output shape: {y.shape}')
59
+
60
+ class TransformerBlock(Layer):
61
+ def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):
62
+ super(TransformerBlock, self).__init__()
63
+ self.att = MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim)
64
+ self.ffn = Sequential([
65
+ Dense(ff_dim, activation="relu"),
66
+ Dense(embed_dim),
67
+ ])
68
+ self.layernorm1 = LayerNormalization(epsilon=1e-6)
69
+ self.layernorm2 = LayerNormalization(epsilon=1e-6)
70
+ self.dropout1 = Dropout(rate)
71
+ self.dropout2 = Dropout(rate)
72
+
73
+ def call(self, inputs, training=False):
74
+ attn_output = self.att(inputs, inputs)
75
+ attn_output = self.dropout1(attn_output, training=training)
76
+ out1 = self.layernorm1(inputs + attn_output)
77
+ ffn_output = self.ffn(out1)
78
+ ffn_output = self.dropout2(ffn_output, training=training)
79
+ return self.layernorm2(out1 + ffn_output)
80
+
81
+ def build_model(trial):
82
+ embed_dim = trial.suggest_int('embed_dim', 64, 256, step=32)
83
+ num_heads = trial.suggest_int('num_heads', 2, 8, step=2)
84
+ ff_dim = trial.suggest_int('ff_dim', 128, 512, step=64)
85
+ rate = trial.suggest_float('dropout', 0.1, 0.5, step=0.1)
86
+ num_transformer_blocks = trial.suggest_int('num_transformer_blocks', 1, 3)
87
+
88
+ inputs = Input(shape=(sequence_length,))
89
+ embedding_layer = Embedding(input_dim=len(encoder.classes_), output_dim=embed_dim)
90
+ x = embedding_layer(inputs)
91
+
92
+ for _ in range(num_transformer_blocks):
93
+ transformer_block = TransformerBlock(embed_dim, num_heads, ff_dim, rate)
94
+ x = transformer_block(x)
95
+
96
+ x = Conv1D(128, 3, activation='relu')(x)
97
+ x = Bidirectional(LSTM(128, return_sequences=True))(x)
98
+ x = GlobalAveragePooling1D()(x)
99
+ x = Dropout(rate)(x)
100
+ x = Dense(ff_dim, activation="relu")(x)
101
+ x = Dropout(rate)(x)
102
+ outputs = Dense(len(encoder.classes_), activation="softmax")(x)
103
+
104
+ model = Model(inputs=inputs, outputs=outputs)
105
+
106
+ optimizer = Adam(learning_rate=trial.suggest_float('lr', 1e-5, 1e-2, log=True))
107
+ model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
108
+
109
+ return model
110
+
111
+ # Split data into train, validation, and test sets
112
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
113
+ X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42)
114
+
115
+ def objective(trial):
116
+ model = build_model(trial)
117
+
118
+ early_stopping = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True)
119
+ reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=1e-6)
120
+
121
+ history = model.fit(
122
+ X_train, y_train,
123
+ epochs=100,
124
+ batch_size=64,
125
+ validation_data=(X_val, y_val),
126
+ callbacks=[early_stopping, reduce_lr],
127
+ verbose=0
128
+ )
129
+
130
+ val_accuracy = max(history.history['val_accuracy'])
131
+ return val_accuracy
132
+
133
+ study = optuna.create_study(direction='maximize')
134
+ study.optimize(objective, n_trials=50)
135
+
136
+ best_trial = study.best_trial
137
+ print(f'Best hyperparameters: {best_trial.params}')
138
+
139
+ best_model = build_model(best_trial)
140
+ early_stopping = EarlyStopping(monitor='val_loss', patience=20, restore_best_weights=True)
141
+ reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=10, min_lr=1e-6)
142
+
143
+ history = best_model.fit(
144
+ X_train, y_train,
145
+ epochs=500,
146
+ batch_size=64,
147
+ validation_data=(X_val, y_val),
148
+ callbacks=[early_stopping, reduce_lr],
149
+ verbose=2
150
+ )
151
+
152
+ # Evaluate on test set
153
+ test_loss, test_accuracy = best_model.evaluate(X_test, y_test, verbose=0)
154
+ print(f'Test accuracy: {test_accuracy:.4f}')
155
+
156
+ def predict_next(model, data, sequence_length, encoder):
157
+ last_sequence = data[-sequence_length:]
158
+ last_sequence = np.array(encoder.transform(last_sequence)).reshape((1, sequence_length))
159
+ prediction = model.predict(last_sequence)
160
+ predicted_label = encoder.inverse_transform([np.argmax(prediction)])
161
+ return predicted_label[0]
162
+
163
+ def update_data(data, new_outcome):
164
+ data.append(new_outcome)
165
+ if len(data) > sequence_length:
166
+ data.pop(0)
167
+ return data
168
+
169
+ def retrain_model(model, X, y, epochs=10):
170
+ early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)
171
+ reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=1e-6)
172
+
173
+ X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
174
+
175
+ model.fit(
176
+ X_train, y_train,
177
+ epochs=epochs,
178
+ batch_size=64,
179
+ validation_data=(X_val, y_val),
180
+ callbacks=[early_stopping, reduce_lr],
181
+ verbose=0
182
+ )
183
+ return model
184
+
185
+ # Interactive component
186
+ def gradio_predict(outcome):
187
+ global data, X, y, best_model
188
+
189
+ if outcome not in encoder.classes_:
190
+ return "Invalid outcome. Please try again."
191
+
192
+ data = update_data(data, outcome)
193
+
194
+ if len(data) < sequence_length:
195
+ return "Not enough data to make a prediction."
196
+
197
+ predicted_next = predict_next(best_model, data, sequence_length, encoder)
198
+ return f'Predicted next outcome: {predicted_next}'
199
+
200
+ def gradio_update(actual_next):
201
+ global data, X, y, best_model
202
+
203
+ if actual_next not in encoder.classes_:
204
+ return "Invalid outcome. Please try again."
205
+
206
+ data = update_data(data, actual_next)
207
+
208
+ if len(data) < sequence_length:
209
+ return "Not enough data to update the model."
210
+
211
+ encoded_actual_next = encoder.transform([actual_next])[0]
212
+ new_X = np.append(X, [X[-sequence_length:]], axis=0)
213
+ new_y = np.append(y, to_categorical(encoded_actual_next, num_classes=len(encoder.classes_)), axis=0)
214
+
215
+ best_model = retrain_model(best_model, new_X, new_y, epochs=10)
216
+
217
+ return "Model updated with new data."
218
+
219
+ # Gradio interface
220
+ with gr.Blocks() as demo:
221
+ gr.Markdown("## Outcome Prediction with Enhanced Transformer")
222
+ with gr.Row():
223
+ outcome_input = gr.Textbox(label="Current Outcome")
224
+ predict_button = gr.Button("Predict Next")
225
+ predicted_output = gr.Textbox(label="Predicted Next Outcome")
226
+ with gr.Row():
227
+ actual_input = gr.Textbox(label="Actual Next Outcome")
228
+ update_button = gr.Button("Update Model")
229
+ update_output = gr.Textbox(label="Update Status")
230
+
231
+ predict_button.click(gradio_predict, inputs=outcome_input, outputs=predicted_output)
232
+ update_button.click(gradio_update, inputs=actual_input, outputs=update_output)
233
+
234
+ demo.launch()
235
+
236
+ # Save the model for future use
237
+ best_model.save("enhanced_transformer_model.h5")
238
+ print("Model saved as enhanced_transformer_model.h5")
239
+
240
+ # Loading the model for later use
241
+ loaded_model = tf.keras.models.load_model("enhanced_transformer_model.h5", custom_objects={'TransformerBlock': TransformerBlock})
242
+
243
+ # Function to test the loaded model
244
+ def test_loaded_model(test_outcome):
245
+ global data
246
+
247
+ if test_outcome not in encoder.classes_:
248
+ return "Invalid outcome. Test prediction aborted."
249
+
250
+ data = update_data(data, test_outcome)
251
+ if len(data) >= sequence_length:
252
+ predicted_next = predict_next(loaded_model, data, sequence_length, encoder)
253
+ return f'Predicted next outcome with loaded model: {predicted_next}'
254
+ else:
255
+ return "Not enough data to make a prediction."
256
+
257
+ # Adding testing functionality to Gradio interface
258
+ with gr.Blocks() as demo:
259
+ gr.Markdown("## Outcome Prediction with Enhanced Transformer")
260
+ with gr.Row():
261
+ outcome_input = gr.Textbox(label="Current Outcome")
262
+ predict_button = gr.Button("Predict Next")
263
+ predicted_output = gr.Textbox(label="Predicted Next Outcome")
264
+ with gr.Row():
265
+ actual_input = gr.Textbox(label="Actual Next Outcome")
266
+ update_button = gr.Button("Update Model")
267
+ update_output = gr.Textbox(label="Update Status")
268
+ with gr.Row():
269
+ test_input = gr.Textbox(label="Test Outcome for Loaded Model")
270
+ test_button = gr.Button("Test Loaded Model")
271
+ test_output = gr.Textbox(label="Loaded Model Prediction")
272
+
273
+ predict_button.click(gradio_predict, inputs=outcome_input, outputs=predicted_output)
274
+ update_button.click(gradio_update, inputs=actual_input, outputs=update_output)
275
+ test_button.click(test_loaded_model, inputs=test_input, outputs=test_output)
276
+
277
+ demo.launch()