-
Notifications
You must be signed in to change notification settings - Fork 1
/
neuralNetworkTest.py
141 lines (117 loc) · 4.16 KB
/
neuralNetworkTest.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
import matplotlib.pyplot as plt
import seaborn as sns
# Load the dataset
desired_path = "D:/OneDrive - Polytechnic University of the Philippines/2024_Feb/Designation/ITech Designee/2024 Events/12-02_06_Neural_Networks/ECDS-NeuralNets/Day 1/Practice_Dataset"
data = pd.read_csv(f'{desired_path}/predictive_maintenance.csv')
# Drop 'UDI' and ensure 'Failure Type' is removed from features
data.drop('UDI', axis=1, inplace=True)
X = data.drop('Failure Type', axis=1)
y = data['Failure Type']
# Check for non-numeric columns and encode them
print(X.dtypes) # Inspect data types
X = pd.get_dummies(X) # Convert categorical to numeric if needed
# Handle missing values if any
print(X.isnull().sum()) # Check for NaNs
X = X.fillna(0) # Replace NaNs with 0
# Standardize the features
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# Encode the target variable
label_encoder = LabelEncoder()
y_encoded = label_encoder.fit_transform(y)
# Split the dataset into training and validation sets
X_train, X_val, y_train, y_val = train_test_split(X_scaled, y_encoded, test_size=0.2, random_state=42)
# Convert to PyTorch tensors
X_train_tensor = torch.Tensor(X_train)
y_train_tensor = torch.LongTensor(y_train)
X_val_tensor = torch.Tensor(X_val)
y_val_tensor = torch.LongTensor(y_val)
# Define the Neural Network
class PredictiveMaintenanceNN(nn.Module):
def __init__(self, input_size):
super(PredictiveMaintenanceNN, self).__init__()
self.fc1 = nn.Linear(input_size, 36)
self.fc2 = nn.Linear(36, 24)
self.fc3 = nn.Linear(24, len(label_encoder.classes_))
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
return self.softmax(x)
# Initialize model, loss function, and optimizer
input_size = X.shape[1]
model = PredictiveMaintenanceNN(input_size)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
# Training the model
losses = []
accuracies = []
for epoch in range(100):
model.train()
# Forward pass
outputs = model(X_train_tensor)
loss = criterion(outputs, y_train_tensor)
losses.append(loss.item())
# Backward pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Calculate accuracy
with torch.no_grad():
_, predicted = torch.max(outputs, 1)
accuracy = accuracy_score(y_train_tensor.numpy(), predicted.numpy())
accuracies.append(accuracy)
if (epoch + 1) % 10 == 0:
print(f'Epoch [{epoch+1}/100], Loss: {loss.item():.4f}, Accuracy: {accuracy * 100:.2f}%')
# Plot Loss vs Epoch
plt.figure(figsize=(10, 5))
plt.plot(range(1, 101), losses, label='Training Loss', color='red')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Learning Curve (Loss vs Epoch)')
plt.legend()
plt.grid()
plt.show()
# Plot Accuracy vs Epoch
plt.figure(figsize=(10, 5))
plt.plot(range(1, 101), accuracies, label='Training Accuracy', color='blue')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.title('Training Accuracy vs Epoch')
plt.legend()
plt.grid()
plt.show()
# Evaluate the model
with torch.no_grad():
model.eval()
y_pred_tensor = model(X_val_tensor)
_, y_pred = torch.max(y_pred_tensor, 1)
# Convert predictions and true labels to numpy arrays
y_pred = y_pred.numpy()
y_val = y_val_tensor.numpy()
# Confusion Matrix
cm = confusion_matrix(y_val, y_pred)
print("Confusion Matrix:")
print(cm)
# Classification Report
report = classification_report(y_val, y_pred, target_names=label_encoder.classes_)
print("\nClassification Report:")
print(report)
# Plot Confusion Matrix
plt.figure(figsize=(6, 5))
sns.heatmap(cm, annot=True, fmt="d", cmap="Blues", xticklabels=label_encoder.classes_, yticklabels=label_encoder.classes_)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.title('Confusion Matrix')
plt.show()