-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathutils.py
More file actions
86 lines (78 loc) · 3.02 KB
/
utils.py
File metadata and controls
86 lines (78 loc) · 3.02 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, KFold, cross_val_score, cross_val_predict
from sklearn.metrics import roc_curve, auc, accuracy_score, classification_report, confusion_matrix, r2_score
from sklearn.preprocessing import StandardScaler
def loadTrainingData():
data_train = pd.read_csv('data/data_train.csv')
X = data_train.drop(columns=['Patient Id', 'No Show/LateCancel Flag'])
y = data_train['No Show/LateCancel Flag']
return(X, y)
def loadTestData():
data_test = pd.read_csv('data/data_test.csv')
patientIds = data_test['Patient Id']
X = data_test.drop(columns=['Patient Id', 'No Show/LateCancel Flag'])
return(X, patientIds)
def processData(X_train, X_test, y_train, y_test):
X_train_balance1 = X_train[y_train == 1]
y_train1 = y_train[y_train == 1]
X_train_balance0 = X_train[y_train == 0].sample(len(X_train_balance1))
y_train0 = y_train[y_train == 0].sample(len(X_train_balance0))
X_train = pd.concat([X_train_balance1, X_train_balance0])
y_train = pd.concat([y_train1, y_train0])
return(X_train, X_test, y_train, y_test)
def kFoldValidation(X, y, func, k):
model = None
max_auc = 0.0
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
kfold = KFold(n_splits=k)
kfold.get_n_splits(X)
i = 0
for train_index, test_index in kfold.split(X):
X_train = X.iloc[train_index]
X_test = X.iloc[test_index]
y_train = y.iloc[train_index]
y_test = y.iloc[test_index]
X_train, X_test, y_train, y_test = processData(X_train, X_test, y_train, y_test)
mod = func(X_train, X_test, y_train, y_test)
y_pred = mod.predict(X_test)
# AUC ROC
fpr, tpr, _ = roc_curve(y_test, y_pred)
AUC = auc(fpr, tpr)
aucs.append(AUC)
if AUC > max_auc:
model = mod
max_auc = AUC
y_pred = mod.predict(X_test)
fpr, tpr, _ = roc_curve(y_test, y_pred)
tprs.append(np.interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
plt.rcParams.update({'font.size': 28})
plt.plot(fpr, tpr, lw=1, alpha=0.3)
i += 1
# mean ROC
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
plt.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
# +/- 1 std ROC
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
# chance line
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('AUC ROC')
plt.legend()
plt.show()
return (model, max_auc)