-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathCNN-2.py
106 lines (84 loc) · 2.75 KB
/
CNN-2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras import Input
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
path = 'StudentPerformanceFactors.csv'
student_performance = pd.read_csv(path)
student_performance.head()
y = student_performance.Exam_Score
features = [
'Hours_Studied', 'Previous_Scores', 'Attendance', 'Sleep_Hours', 'Tutoring_Sessions',
'Physical_Activity', 'Parental_Involvement', 'Gender', 'Access_to_Resources', 'Extracurricular_Activities',
'Motivation_Level', 'Internet_Access', 'Family_Income', 'Teacher_Quality', 'School_Type',
'Peer_Influence', 'Learning_Disabilities', 'Parental_Education_Level', 'Distance_from_Home'
]
X = student_performance[features]
X = pd.get_dummies(X)
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
scaler = StandardScaler()
train_X = scaler.fit_transform(train_X)
val_X = scaler.transform(val_X)
train_X = train_X.reshape(train_X.shape[0], 40, 1, 1)
val_X = val_X.reshape(val_X.shape[0], 40, 1, 1)
model = models.Sequential([
Input(shape=(40, 1, 1)),
layers.Conv2D(32, (2, 1), activation='relu'),
layers.MaxPooling2D((2, 1)),
layers.Conv2D(64, (2, 1), activation='relu'),
layers.MaxPooling2D((2, 1)),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
model.compile(optimizer='adam', loss='mse', metrics=['mae'])
early_stopping = EarlyStopping(
monitor='val_mae',
patience=10,
verbose=1,
mode='min',
restore_best_weights=True
)
reduce_lr = ReduceLROnPlateau(
monitor='val_mae',
factor=0.5,
patience=5,
verbose=1,
min_lr=1e-6
)
history = model.fit(
train_X,
train_y,
epochs=100,
validation_data=(val_X, val_y),
callbacks=[early_stopping, reduce_lr]
)
import matplotlib.pyplot as plt
loss = history.history['loss']
val_loss = history.history['val_loss']
mae = history.history['mae']
val_mae = history.history['val_mae']
epochs = range(1, len(loss) + 1)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.plot(epochs, loss, label='Training Loss')
plt.plot(epochs, val_loss, label='Validation Loss')
plt.title('Training and Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.grid()
plt.subplot(1, 2, 2)
plt.plot(epochs, mae, label='Training MAE')
plt.plot(epochs, val_mae, label='Validation MAE')
plt.title('Training and Validation MAE')
plt.xlabel('Epochs')
plt.ylabel('Mean Absolute Error')
plt.legend()
plt.grid()
plt.tight_layout()
plt.show()
val_loss, val_mae = model.evaluate(val_X, val_y)
print(f"Validation Loss: {val_loss}, Validation MAE: {val_mae}")