by Sven Mayer
The material is licensed under the Creative Commons Attribution-Share Alike 4.0 (CC BY-SA) license: https://creativecommons.org/licenses/by-sa/4.0
import os
os.environ["CUDA_VISIBLE_DEVICES"]="1"
import sys
print("Python version: ", sys.version)
import numpy as np
print("numpy version", np.__version__)
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
print("matplotlib version", matplotlib.__version__)
import tensorflow as tf
print("TF:", tf.__version__)
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if (len(physical_devices) > 0):
tf.config.set_visible_devices(physical_devices[0], 'GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
else:
print("TF: No GPU found")
import pandas as pd
from tqdm import tqdm
Python version: 3.9.1 (default, Jan 8 2021, 17:17:17) [Clang 12.0.0 (clang-1200.0.32.28)] numpy version 1.19.5 matplotlib version 3.3.3 INFO:tensorflow:Enabling eager execution INFO:tensorflow:Enabling v2 tensorshape INFO:tensorflow:Enabling resource variables INFO:tensorflow:Enabling tensor equality INFO:tensorflow:Enabling control flow v2 TF: 2.5.0-rc0 TF: No GPU found
df = pd.read_pickle("./dataKnckleTouch/knuckletouch_only3ps.pkl")
dfTrain = df[df.userID.isin([1,2])]
dfValidation = df[df.userID.isin([3])]
###
x_train = np.stack(dfTrain.Blob1D.to_list()).reshape((-1, 20,20,1))
y_train = np.stack(dfTrain.Label.to_list())
print(x_train.shape, y_train.shape)
x_val = np.stack(dfValidation.Blob1D.to_list()).reshape((-1, 20,20,1))
y_val = np.stack(dfValidation.Label.to_list())
print(x_val.shape, y_val.shape)
input_size = x_val.shape[1:]
print(input_size)
def getModel():
model = tf.keras.Sequential()
model.add(tf.keras.layers.InputLayer(input_size, name = "InputLayer"))
model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3), name = "Conv1", activation='relu'))
model.add(tf.keras.layers.MaxPool2D())
model.add(tf.keras.layers.Dropout(.5))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(64, name = "HiddenLayer1", activation='relu', activity_regularizer=tf.keras.regularizers.L2(0.001)))
model.add(tf.keras.layers.Dropout(.2))
model.add(tf.keras.layers.Dense(2, name = "OutputLayer", activation='softmax'))
return model
model = getModel()
model.summary()
learning_rate = 0.001
optimizer = tf.keras.optimizers.Adamax(learning_rate=learning_rate)
lossFunction = tf.keras.losses.CategoricalCrossentropy()
model.compile(optimizer=optimizer, loss=lossFunction, metrics=['accuracy'])
history = model.fit(x_train, y_train,
validation_data = (x_val, y_val),
epochs=50,
verbose=1)
plt.plot(history.history['accuracy'], label="Train")
plt.plot(history.history['val_accuracy'], label="Validation")
plt.ylim(0.5, 1)
#plt.text (0.1,.9, f"$lr={learning_rate}$", size=12)
plt.xlabel('Predicted')
plt.ylabel('Loss')
plt.show()
(14662, 20, 20, 1) (14662, 2) (6342, 20, 20, 1) (6342, 2) (20, 20, 1) Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= Conv1 (Conv2D) (None, 18, 18, 32) 320 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 9, 9, 32) 0 _________________________________________________________________ dropout (Dropout) (None, 9, 9, 32) 0 _________________________________________________________________ flatten (Flatten) (None, 2592) 0 _________________________________________________________________ HiddenLayer1 (Dense) (None, 64) 165952 _________________________________________________________________ dropout_1 (Dropout) (None, 64) 0 _________________________________________________________________ OutputLayer (Dense) (None, 2) 130 ================================================================= Total params: 166,402 Trainable params: 166,402 Non-trainable params: 0 _________________________________________________________________ Epoch 1/50 459/459 [==============================] - 2s 4ms/step - loss: 1.5268 - accuracy: 0.6364 - val_loss: 0.6541 - val_accuracy: 0.7031 Epoch 2/50 459/459 [==============================] - 2s 4ms/step - loss: 0.6050 - accuracy: 0.6758 - val_loss: 0.5985 - val_accuracy: 0.7026 Epoch 3/50 459/459 [==============================] - 2s 4ms/step - loss: 0.5494 - accuracy: 0.6917 - val_loss: 0.5589 - val_accuracy: 0.7162 Epoch 4/50 459/459 [==============================] - 2s 4ms/step - loss: 0.4933 - accuracy: 0.7363 - val_loss: 0.4737 - val_accuracy: 0.8199 Epoch 5/50 459/459 [==============================] - 2s 4ms/step - loss: 0.4228 - accuracy: 0.8202 - val_loss: 0.4640 - val_accuracy: 0.8015 Epoch 6/50 459/459 [==============================] - 2s 4ms/step - loss: 0.3663 - accuracy: 0.8562 - val_loss: 0.4133 - val_accuracy: 0.8311 Epoch 7/50 459/459 [==============================] - 2s 4ms/step - loss: 0.3235 - accuracy: 0.8805 - val_loss: 0.4430 - val_accuracy: 0.8109 Epoch 8/50 459/459 [==============================] - 2s 4ms/step - loss: 0.3019 - accuracy: 0.8906 - val_loss: 0.3653 - val_accuracy: 0.8622 Epoch 9/50 459/459 [==============================] - 2s 4ms/step - loss: 0.2907 - accuracy: 0.8938 - val_loss: 0.4449 - val_accuracy: 0.8111 Epoch 10/50 459/459 [==============================] - 2s 4ms/step - loss: 0.2727 - accuracy: 0.9004 - val_loss: 0.4250 - val_accuracy: 0.8269 Epoch 11/50 459/459 [==============================] - 2s 4ms/step - loss: 0.2649 - accuracy: 0.9061 - val_loss: 0.4305 - val_accuracy: 0.8190 Epoch 12/50 459/459 [==============================] - 2s 4ms/step - loss: 0.2394 - accuracy: 0.9154 - val_loss: 0.3853 - val_accuracy: 0.8425 Epoch 13/50 459/459 [==============================] - 2s 4ms/step - loss: 0.2343 - accuracy: 0.9200 - val_loss: 0.4220 - val_accuracy: 0.8294 Epoch 14/50 459/459 [==============================] - 2s 4ms/step - loss: 0.2162 - accuracy: 0.9307 - val_loss: 0.4732 - val_accuracy: 0.8195 Epoch 15/50 459/459 [==============================] - 2s 4ms/step - loss: 0.2158 - accuracy: 0.9289 - val_loss: 0.3747 - val_accuracy: 0.8507 Epoch 16/50 459/459 [==============================] - 2s 4ms/step - loss: 0.2008 - accuracy: 0.9351 - val_loss: 0.4414 - val_accuracy: 0.8277 Epoch 17/50 459/459 [==============================] - 2s 4ms/step - loss: 0.1996 - accuracy: 0.9349 - val_loss: 0.3644 - val_accuracy: 0.8587 Epoch 18/50 459/459 [==============================] - 2s 4ms/step - loss: 0.1977 - accuracy: 0.9368 - val_loss: 0.4060 - val_accuracy: 0.8425 Epoch 19/50 459/459 [==============================] - 2s 4ms/step - loss: 0.1829 - accuracy: 0.9395 - val_loss: 0.3913 - val_accuracy: 0.8521 Epoch 20/50 459/459 [==============================] - 2s 4ms/step - loss: 0.1839 - accuracy: 0.9357 - val_loss: 0.3515 - val_accuracy: 0.8658 Epoch 21/50 459/459 [==============================] - 2s 4ms/step - loss: 0.1848 - accuracy: 0.9399 - val_loss: 0.3551 - val_accuracy: 0.8666 Epoch 22/50 459/459 [==============================] - 2s 4ms/step - loss: 0.1801 - accuracy: 0.9401 - val_loss: 0.3574 - val_accuracy: 0.8661 Epoch 23/50 459/459 [==============================] - 2s 4ms/step - loss: 0.1782 - accuracy: 0.9414 - val_loss: 0.3967 - val_accuracy: 0.8554 Epoch 24/50 459/459 [==============================] - 2s 4ms/step - loss: 0.1705 - accuracy: 0.9424 - val_loss: 0.4159 - val_accuracy: 0.8483 Epoch 25/50 459/459 [==============================] - 2s 4ms/step - loss: 0.1743 - accuracy: 0.9429 - val_loss: 0.4222 - val_accuracy: 0.8472 Epoch 26/50 459/459 [==============================] - 2s 4ms/step - loss: 0.1713 - accuracy: 0.9421 - val_loss: 0.4025 - val_accuracy: 0.8513 Epoch 27/50 459/459 [==============================] - 2s 4ms/step - loss: 0.1614 - accuracy: 0.9444 - val_loss: 0.3980 - val_accuracy: 0.8605 Epoch 28/50 459/459 [==============================] - 2s 4ms/step - loss: 0.1594 - accuracy: 0.9465 - val_loss: 0.4153 - val_accuracy: 0.8529 Epoch 29/50 459/459 [==============================] - 2s 4ms/step - loss: 0.1654 - accuracy: 0.9462 - val_loss: 0.3722 - val_accuracy: 0.8620 Epoch 30/50 459/459 [==============================] - 2s 4ms/step - loss: 0.1645 - accuracy: 0.9417 - val_loss: 0.4114 - val_accuracy: 0.8554 Epoch 31/50 459/459 [==============================] - 2s 4ms/step - loss: 0.1607 - accuracy: 0.9462 - val_loss: 0.3681 - val_accuracy: 0.8702 Epoch 32/50 459/459 [==============================] - 2s 4ms/step - loss: 0.1572 - accuracy: 0.9440 - val_loss: 0.3719 - val_accuracy: 0.8657 Epoch 33/50 459/459 [==============================] - 2s 4ms/step - loss: 0.1440 - accuracy: 0.9513 - val_loss: 0.5276 - val_accuracy: 0.8209 Epoch 34/50 459/459 [==============================] - 2s 4ms/step - loss: 0.1592 - accuracy: 0.9453 - val_loss: 0.3372 - val_accuracy: 0.8833 Epoch 35/50 459/459 [==============================] - 2s 4ms/step - loss: 0.1549 - accuracy: 0.9486 - val_loss: 0.3424 - val_accuracy: 0.8745 Epoch 36/50 459/459 [==============================] - 2s 4ms/step - loss: 0.1480 - accuracy: 0.9496 - val_loss: 0.3684 - val_accuracy: 0.8696 Epoch 37/50 459/459 [==============================] - 2s 4ms/step - loss: 0.1561 - accuracy: 0.9463 - val_loss: 0.4780 - val_accuracy: 0.8403 Epoch 38/50 459/459 [==============================] - 2s 4ms/step - loss: 0.1514 - accuracy: 0.9465 - val_loss: 0.4367 - val_accuracy: 0.8579 Epoch 39/50 459/459 [==============================] - 2s 4ms/step - loss: 0.1537 - accuracy: 0.9487 - val_loss: 0.4239 - val_accuracy: 0.8575 Epoch 40/50 459/459 [==============================] - 2s 4ms/step - loss: 0.1497 - accuracy: 0.9476 - val_loss: 0.4576 - val_accuracy: 0.8515 Epoch 41/50 459/459 [==============================] - 2s 5ms/step - loss: 0.1472 - accuracy: 0.9490 - val_loss: 0.3747 - val_accuracy: 0.8694 Epoch 42/50 459/459 [==============================] - 2s 5ms/step - loss: 0.1423 - accuracy: 0.9485 - val_loss: 0.3809 - val_accuracy: 0.8699 Epoch 43/50 459/459 [==============================] - 2s 5ms/step - loss: 0.1391 - accuracy: 0.9543 - val_loss: 0.4099 - val_accuracy: 0.8584 Epoch 44/50 459/459 [==============================] - 2s 5ms/step - loss: 0.1421 - accuracy: 0.9523 - val_loss: 0.3750 - val_accuracy: 0.8794 Epoch 45/50 459/459 [==============================] - 2s 5ms/step - loss: 0.1340 - accuracy: 0.9550 - val_loss: 0.4776 - val_accuracy: 0.8456 Epoch 46/50 459/459 [==============================] - 2s 5ms/step - loss: 0.1386 - accuracy: 0.9524 - val_loss: 0.3536 - val_accuracy: 0.8797 Epoch 47/50 459/459 [==============================] - 2s 5ms/step - loss: 0.1389 - accuracy: 0.9533 - val_loss: 0.3292 - val_accuracy: 0.8868 Epoch 48/50 459/459 [==============================] - 2s 5ms/step - loss: 0.1356 - accuracy: 0.9527 - val_loss: 0.4020 - val_accuracy: 0.8664 Epoch 49/50 459/459 [==============================] - 2s 5ms/step - loss: 0.1373 - accuracy: 0.9534 - val_loss: 0.3168 - val_accuracy: 0.8901 Epoch 50/50 459/459 [==============================] - 2s 5ms/step - loss: 0.1328 - accuracy: 0.9548 - val_loss: 0.4138 - val_accuracy: 0.8680
%%time
#y_train_pred = []
#bs = 128
#for i in range(0, len(dfTrain), bs):
# y_train_pred.extend(model.predict(x_train[i:i+bs]))
y_val_pred = []
bs = 128
for i in range(0, len(dfValidation), bs):
y_val_pred.extend(model.predict(x_val[i:i+bs]))
CPU times: user 1.87 s, sys: 169 ms, total: 2.04 s Wall time: 1.47 s
y_val_c = list(np.argmax(y_val, axis=1))
y_val_c = list(map(lambda x: "Knuckle" if x == 1 else "Finger", y_val_c))
y_val_pred_c = list(np.argmax(y_val_pred, axis=1))
y_val_pred_c = list(map(lambda x: "Knuckle" if x == 1 else "Finger", y_val_pred_c))
from sklearn.metrics import confusion_matrix
c = confusion_matrix(y_val_c, y_val_pred_c, labels=["Knuckle", "Finger"])
tn, fp, fn, tp = c.ravel()
plt.imshow(c, cmap="YlGn")
plt.ylabel("True")
plt.xlabel("Predict")
plt.text(0,0,f"True Negative \n {tn}", ha="center")
plt.text(1,0,f"False Positive \n {fp}", ha="center")
plt.text(0,1,f"False Negative \n {fn}", ha="center")
plt.text(1,1,f"True Positive \n {tp}", ha="center", c="w")
plt.yticks([0,1], ["Knuckle", "Finger"])
plt.xticks([0,1], ["Knuckle", "Finger"])
plt.colorbar()
plt.savefig("./figures/13_cm_bin.png", dpi=500, bbox_inches = 'tight', pad_inches = 0)
plt.show()
y_val_c = list(np.argmax(y_val, axis=1))
y_val_c = list(map(lambda x: "Knuckle" if x == 1 else "Finger", y_val_c))
y_val_pred_c = list(np.argmax(y_val_pred, axis=1))
y_val_pred_c = list(map(lambda x: "Knuckle" if x == 1 else "Finger", y_val_pred_c))
from sklearn.metrics import confusion_matrix
c = confusion_matrix(y_val_c, y_val_pred_c, labels=["Knuckle", "Finger"], normalize = "true")
tn, fp, fn, tp = c.ravel()
plt.imshow(c, cmap="YlGn")
plt.ylabel("True")
plt.xlabel("Predict")
plt.text(0,0,f"True Negative \n {np.round(tn,2)}", ha="center", c="w")
plt.text(1,0,f"False Positive \n {np.round(fp,2)}", ha="center")
plt.text(0,1,f"False Negative \n {np.round(fn,2)}", ha="center")
plt.text(1,1,f"True Positive \n {np.round(tp,2)}", ha="center", c="w")
plt.yticks([0,1], ["Knuckle", "Finger"])
plt.xticks([0,1], ["Knuckle", "Finger"])
plt.colorbar()
plt.savefig("./figures/13_cm_bin_normed.png", dpi=500, bbox_inches = 'tight', pad_inches = 0)
plt.show()
y_val_c = list(np.argmax(y_val, axis=1))
y_val_pred_c = list(np.argmax(y_val_pred, axis=1))
p = tp/(tp + fp)
r = tp/(tp + fn)
f1 = 2 * ((p*r)/(p+r))
accuracy = (tp + tn) / (tn + fp + fn + tp)
print("Presiciton", p)
print("Recall", r)
print("F1 Score", f1)
print("Accuracy" ,accuracy)
Presiciton 0.9001962056839278 Recall 0.8521758636159713 F1 Score 0.8755280826965026 Accuracy 0.8788480167336756
(x_train, y_train), (x_val, y_val) = tf.keras.datasets.mnist.load_data()
x_train = x_train.reshape(-1, 28*28)[:30000,:]
x_val = x_val.reshape(-1, 28*28)[:5000,:]
y_train = tf.keras.utils.to_categorical(y_train)[:30000,:]
y_val = tf.keras.utils.to_categorical(y_val)[:5000,:]
input_size = x_val.shape[-1]
output_size = y_val.shape[-1]
print(input_size, output_size)
784 10
def getModel():
model = tf.keras.Sequential()
model.add(tf.keras.layers.InputLayer((input_size,), name = "InputLayer"))
model.add(tf.keras.layers.Dense(256, name = "HiddenLayer1", activation = 'relu'))
model.add(tf.keras.layers.Dropout(.5))
model.add(tf.keras.layers.Dense(128, name = "HiddenLayer2", activation = 'relu'))
model.add(tf.keras.layers.Dropout(.5))
model.add(tf.keras.layers.Dense(output_size, name = "OutputLayer", activation = 'softmax'))
return model
model = getModel()
model.summary()
lossFunction = tf.keras.losses.CategoricalCrossentropy()
model = getModel()
model.compile(optimizer="adam", loss=lossFunction, metrics=['accuracy'])
history = model.fit(x_train, y_train,
validation_data = (x_val, y_val),
epochs=25,
verbose=1)
Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= HiddenLayer1 (Dense) (None, 256) 200960 _________________________________________________________________ dropout_2 (Dropout) (None, 256) 0 _________________________________________________________________ HiddenLayer2 (Dense) (None, 128) 32896 _________________________________________________________________ dropout_3 (Dropout) (None, 128) 0 _________________________________________________________________ OutputLayer (Dense) (None, 10) 1290 ================================================================= Total params: 235,146 Trainable params: 235,146 Non-trainable params: 0 _________________________________________________________________ Epoch 1/25 938/938 [==============================] - 2s 1ms/step - loss: 23.2819 - accuracy: 0.3983 - val_loss: 1.3672 - val_accuracy: 0.6120 Epoch 2/25 938/938 [==============================] - 1s 1ms/step - loss: 1.6867 - accuracy: 0.4708 - val_loss: 1.1501 - val_accuracy: 0.6544 Epoch 3/25 938/938 [==============================] - 1s 1ms/step - loss: 1.4840 - accuracy: 0.5319 - val_loss: 1.0080 - val_accuracy: 0.7296 Epoch 4/25 938/938 [==============================] - 1s 1ms/step - loss: 1.3193 - accuracy: 0.5720 - val_loss: 0.8866 - val_accuracy: 0.7478 Epoch 5/25 938/938 [==============================] - 1s 1ms/step - loss: 1.2131 - accuracy: 0.6160 - val_loss: 0.7641 - val_accuracy: 0.7624 Epoch 6/25 938/938 [==============================] - 1s 1ms/step - loss: 1.0729 - accuracy: 0.6571 - val_loss: 0.6985 - val_accuracy: 0.8398 Epoch 7/25 938/938 [==============================] - 1s 1ms/step - loss: 0.9422 - accuracy: 0.7267 - val_loss: 0.5970 - val_accuracy: 0.8590 Epoch 8/25 938/938 [==============================] - 1s 1ms/step - loss: 0.8555 - accuracy: 0.7493 - val_loss: 0.5385 - val_accuracy: 0.8582 Epoch 9/25 938/938 [==============================] - 1s 1ms/step - loss: 0.7864 - accuracy: 0.7683 - val_loss: 0.4948 - val_accuracy: 0.8748 Epoch 10/25 938/938 [==============================] - 1s 1ms/step - loss: 0.7423 - accuracy: 0.7888 - val_loss: 0.4697 - val_accuracy: 0.8888 Epoch 11/25 938/938 [==============================] - 1s 1ms/step - loss: 0.7296 - accuracy: 0.7883 - val_loss: 0.4742 - val_accuracy: 0.8880 Epoch 12/25 938/938 [==============================] - 1s 1ms/step - loss: 0.6809 - accuracy: 0.8006 - val_loss: 0.4775 - val_accuracy: 0.8810 Epoch 13/25 938/938 [==============================] - 1s 1ms/step - loss: 0.6690 - accuracy: 0.8058 - val_loss: 0.4573 - val_accuracy: 0.8918 Epoch 14/25 938/938 [==============================] - 1s 1ms/step - loss: 0.6716 - accuracy: 0.8109 - val_loss: 0.4672 - val_accuracy: 0.8886 Epoch 15/25 938/938 [==============================] - 1s 1ms/step - loss: 0.6683 - accuracy: 0.8107 - val_loss: 0.4707 - val_accuracy: 0.8872 Epoch 16/25 938/938 [==============================] - 1s 1ms/step - loss: 0.6370 - accuracy: 0.8200 - val_loss: 0.4453 - val_accuracy: 0.8968 Epoch 17/25 938/938 [==============================] - 1s 2ms/step - loss: 0.6122 - accuracy: 0.8296 - val_loss: 0.4753 - val_accuracy: 0.8982 Epoch 18/25 938/938 [==============================] - 1s 1ms/step - loss: 0.6174 - accuracy: 0.8356 - val_loss: 0.4778 - val_accuracy: 0.9052 Epoch 19/25 938/938 [==============================] - 1s 1ms/step - loss: 0.6092 - accuracy: 0.8342 - val_loss: 0.4548 - val_accuracy: 0.8966 Epoch 20/25 938/938 [==============================] - 1s 1ms/step - loss: 0.6100 - accuracy: 0.8347 - val_loss: 0.4481 - val_accuracy: 0.8952 Epoch 21/25 938/938 [==============================] - 1s 1ms/step - loss: 0.6000 - accuracy: 0.8345 - val_loss: 0.4692 - val_accuracy: 0.9008 Epoch 22/25 938/938 [==============================] - 1s 1ms/step - loss: 0.5875 - accuracy: 0.8396 - val_loss: 0.4583 - val_accuracy: 0.8978 Epoch 23/25 938/938 [==============================] - 1s 1ms/step - loss: 0.5909 - accuracy: 0.8366 - val_loss: 0.4294 - val_accuracy: 0.9018 Epoch 24/25 938/938 [==============================] - 1s 1ms/step - loss: 0.5945 - accuracy: 0.8376 - val_loss: 0.4439 - val_accuracy: 0.9080 Epoch 25/25 938/938 [==============================] - 1s 1ms/step - loss: 0.5653 - accuracy: 0.8428 - val_loss: 0.4238 - val_accuracy: 0.9030
%%time
y_val_pred = []
bs = 128
for i in range(0, len(x_val), bs):
y_val_pred.extend(model.predict(x_val[i:i+bs]))
CPU times: user 1.24 s, sys: 48.2 ms, total: 1.29 s Wall time: 1.18 s
y_val_c = list(np.argmax(y_val, axis=1))
y_val_pred_c = list(np.argmax(y_val_pred, axis=1))
c = confusion_matrix(y_val_c, y_val_pred_c)
plt.imshow(c, cmap="YlGn")
plt.ylabel("True")
plt.xlabel("Predict")
plt.colorbar()
plt.savefig("./figures/13_cm_MNIST.png", dpi=500, bbox_inches = 'tight', pad_inches = 0)
plt.show()