AdSense

Wednesday, October 21, 2020

Deep Learning (Binary-Class Classification, Supervised Learning): Implementation and Showing Biases and Weights

Deep Learning (Binary-Class Classification): Implementation and Showing Biases and Weights, Supervised Learning



0_MacOS_Python_setup.txt
# Install on Terminal of MacOS

# 1. pandas
#pip3 install -U pandas

# 2. NumPy
#pip3 install -U numpy

# 3. matplotlib
#pip3 install -U matplotlib

# 4. scikit-learn (sklearn)
#pip3 install -U scikit-learn

# 5. tensorflow
#pip3 install -U tensorflow



1_MacOS_Terminal.txt
########## Run Terminal on MacOS and execute
### TO UPDATE
cd "YOUR_WORKING_DIRECTORY"


python3 dlclsweights.py 300 l1l2 0.0200



Input data files

train_data_raw.csv
1.0, 2.0
2.0, 4.0
4.0, 8.0
5.0, 10.0
7.0, 14.0
8.0, 16.0
10.0, 20.0
11.0, 22.0
13.0, 26.0
14.0, 28.0
16.0, 32.0
17.0, 34.0
19.0, 38.0
20.0, 40.0
22.0, 44.0
23.0, 46.0
25.0, 50.0
26.0, 52.0
28.0, 56.0
29.0, 58.0


train_targets_raw.csv
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
1
1
1
1

test_data_raw.csv
0.0, 0.0
3.0, 6.0
6.0, 12.0
9.0, 18.0
12.0, 24.0
15.0, 30.0
18.0, 36.0
21.0, 42.0
24.0, 48.0
27.0, 54.0
30.0, 60.0

test_targets_raw.csv
0
0
0
0
0
0
0
1
1
1
1


Python files


dlclsweights.py

#################### Deep Learning (Binary-Class Classification, Supervised Learning): Implementation and Showing Biases and Weights ####################

########## How to run this code
#
# You can run this code on your MacOS Terminal (or other terminals) as follows:
#
# python3 dlclsweights.py 300 l1l2 0.0200
# python3 dlclsweights.py (num_epochs: number of epochs) (regl1l2: regularization) (regl1l2f: learning rate of regularization)

########## import sys

import sys



########## Argument(s)

#num_epochs = 10000
num_epochs = int(sys.argv[1])

#regl1l2  = 'None'
#regl1l2  = 'l1l2'
regl1l2 = str(sys.argv[2])

#regl1l2f = 0.001
regl1l2f = float(sys.argv[3])

#dropout_rate = 0
#dropout_rate = float(sys.argv[4])



########## import others

import numpy as np

import matplotlib.pyplot as plt

from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score, cohen_kappa_score
from sklearn.metrics import r2_score

import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras import Model
from tensorflow.keras import regularizers
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Layer
from tensorflow.keras.layers import Dropout

print(tf.__version__)
#2.3.0



########## Loading raw data (before standardization)

train_data_raw    = np.loadtxt('train_data_raw.csv', dtype='float64', delimiter=',')
'''
1.0, 2.0
2.0, 4.0
4.0, 8.0
5.0, 10.0
7.0, 14.0
8.0, 16.0
10.0, 20.0
11.0, 22.0
13.0, 26.0
14.0, 28.0
16.0, 32.0
17.0, 34.0
19.0, 38.0
20.0, 40.0
22.0, 44.0
23.0, 46.0
25.0, 50.0
26.0, 52.0
28.0, 56.0
29.0, 58.0
'''

#train_targets_raw = np.loadtxt('train_targets_raw.csv', dtype='float64', delimiter=',')
train_targets_raw = np.loadtxt('train_targets_raw.csv', dtype='int', delimiter=',')
'''
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
1
1
1
1
1
'''

test_data_raw     = np.loadtxt('test_data_raw.csv', dtype='float64', delimiter=',')
'''
0.0, 0.0
3.0, 6.0
6.0, 12.0
9.0, 18.0
12.0, 24.0
15.0, 30.0
18.0, 36.0
21.0, 42.0
24.0, 48.0
27.0, 54.0
30.0, 60.0
'''

#test_targets_raw  = np.loadtxt('test_targets_raw.csv', dtype='float64', delimiter=',')
test_targets_raw  = np.loadtxt('test_targets_raw.csv', dtype='int', delimiter=',')
'''
0
0
0
0
0
0
0
1
1
1
1
'''


########## Standardization (data/features to have average = 0, standard deviation = 1)

sc = StandardScaler()

#train_data = sc.fit_transform(train_data_raw)
train_data    = train_data_raw    # no standardization in this case
np.savetxt('train_data.csv', train_data, fmt ='%.8f', delimiter=',')
#
print(train_data.shape)
#(20, 2)
#
print(train_data.shape[0])
#20
#
print(train_data.shape[1])
#2

train_targets = train_targets_raw
#np.savetxt('train_targets.csv', train_targets, fmt ='%.8f', delimiter=',')
np.savetxt('train_targets.csv', train_targets, fmt ='%i', delimiter=',')

#test_data = sc.fit_transform(test_data_raw)
test_data     = test_data_raw    # no standardization in this case
np.savetxt('test_data.csv', test_data, fmt ='%.8f', delimiter=',')

test_targets  = test_targets_raw
#np.savetxt('test_targets.csv', test_targets, fmt ='%.8f', delimiter=',')
np.savetxt('test_targets.csv', test_targets, fmt ='%i', delimiter=',')



##### Regularization

#print(regl1l2)
#print(regl1l2f)

if regl1l2 == 'None':
    rg = None
    #
elif regl1l2 == 'l1':
    rg = regularizers.l1(l1=regl1l2f)    # L1 regularization
    #
elif regl1l2 == 'l2':
    rg = regularizers.l2(l2=regl1l2f)    # L2 regularization
    #
elif regl1l2 == 'l1l2':
    rg = regularizers.l1_l2(l1=regl1l2f, l2=regl1l2f)    # L1 & L2 regularization
    #
else:
    print('Error: The second argument should be None, l1, l2, or l1l2.')
    exit()



########## Model

#all-node-connected network

'''
model = Sequential([
    #Conv2D(1, (3, 3), padding='same', name='L0_conv2d', input_shape=(10, 10, 1)),
    #Flatten(name='L1_flatten'),
    #Dense(10, name='L2_dense', use_bias=False),
    #Dense(1, name='L3_dense'),
    #BatchNormalization(name='L4_bn')
    #
    #Dense(1, kernel_regularizer=rg, activation='relu', name='L0_dense', input_shape=(train_data.shape[1],)),
    #Dense(2, kernel_regularizer=rg, activation='relu', name='L0_dense', use_bias=True, input_shape=(1,)),
    Dense(1, kernel_regularizer=rg, activation='relu', name='L0_dense', use_bias=True, input_shape=(1,)),
    Dropout(dropout_rate, name='L1_dropout'),
    Dense(1, kernel_regularizer=rg, activation='relu', name='L2_dense', use_bias=True),
    Dropout(dropout_rate, name='L3_dropout'),
    Dense(1, name='L4_dense')
    #Dense(1, activation='softmax', name='L4_dense')
])
'''
model = Sequential([
    #Dense(1, kernel_regularizer=rg, activation='relu', name='L0_dense', use_bias=True, input_shape=(1,)),
    #Use the following Dense when train_data and test_data have more-than-1 columns.
    #Dense(1, kernel_regularizer=rg, activation='relu', name='L0_dense', use_bias=True, input_shape=(train_data.shape[1],)),
    Dense(train_data.shape[1], kernel_regularizer=rg, activation='relu', name='L0_dense', use_bias=True, input_shape=(train_data.shape[1],)),
    Dense(1, activation='sigmoid', name='L1_dense')
])

model.summary()
'''
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
L0_dense (Dense)             (None, 2)                 6         
_________________________________________________________________
L1_dense (Dense)             (None, 1)                 3         
=================================================================
Total params: 9
Trainable params: 9
Non-trainable params: 0
_________________________________________________________________
'''
#exit()



########## Model Compiling: Regression
#model.compile(optimizer='rmsprop', loss='mse', metrics=['mean_absolute_error'])
#
########## Model Compiling: Classification (Binary Class: 0 or 1)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])



########## Model Fitting and History Recording

history = model.fit( train_data,
                     train_targets,
                     validation_data=(test_data, test_targets),
                     epochs=num_epochs,
                     batch_size=1,
                     verbose=1)    # Trains the model (in silent mode, verbose=0)

'''
...
Epoch 300/300
20/20 [==============================] - 0s 1ms/step - loss: 0.2799 - accuracy: 0.9000 - val_loss: 0.2165 - val_accuracy: 1.0000
'''

#print(history)
#<tensorflow.python.keras.callbacks.History object at 0x145d80490>

#print(history.history)

loss     = history.history['loss']
val_loss = history.history['val_loss']
#
#mae      = history.history['mean_absolute_error']
#val_mae  = history.history['val_mean_absolute_error']

acc      = history.history['accuracy']
val_acc  = history.history['val_accuracy']

epochs = range(1, len(loss)+1)



########## Drawing figures

##### Loss
plt.plot(epochs, loss, 'r', label='Training')
plt.plot(epochs, val_loss, 'b', label='Validation')

plt.xlabel('epochs')
plt.ylabel('loss')

plt.title('Training and validation loss')
plt.legend()

plt.savefig('Fig_1_Loss.png')
plt.show()

##### Accuracy
plt.plot(epochs, acc, 'r', label='Training')
plt.plot(epochs, val_acc, 'b', label='Validation')

plt.xlabel('epochs')
plt.ylabel('accuracy')

plt.title('Training and validation accuracy')
plt.legend()

plt.savefig('Fig_2_Accuracy.png')
plt.show()



########## Model Evaluation by Test Data and Test Targets

#model.evaluate(test_data, test_targets)
#1/1 [==============================] - 0s 341us/step - loss: 0.2165 - accuracy: 1.0000

score = model.evaluate(test_data, test_targets)
print(score)
# [loss, accuracy]



########## Model Predictions by using Test Data

test_targets_pred = model.predict(test_data)
#test_targets_pred = model.predict_classes(test_data)
#
#predict will return the scores of the regression and predict_class will return the class of your prediction. Although it seems similar there are some differences:
#
#Imagine you are trying to predict if the picture is a dog or a cat (you have a classifier):
#
#predict will return you: 0.6 cat and 0.4 dog (for example).
#predict_class will return you cat

#binary classification: 0 or 1 (If a predicted target is more than 0.5, then it is regarded as 1.)
test_targets_pred = test_targets_pred > 0.5

np.savetxt('test_targets_pred.csv', test_targets_pred, fmt ='%.8f', delimiter=',')
'''
0.00000000
0.00000000
0.00000000
0.00000000
0.00000000
0.00000000
0.00000000
1.00000000
1.00000000
1.00000000
1.00000000
'''

#Confusion matrix
print(confusion_matrix(test_targets, test_targets_pred))



########## Showing weights

print(len(model.layers))
#2

#exit()

l0 = model.layers[0]
l1 = model.layers[1]



##### Model Weights

print('##### Model Weights #####')

#model.weights has all the weights of all the layers.
print(model.weights)
'''
[<tf.Variable 'L0_dense/kernel:0' shape=(2, 2) dtype=float32, numpy=
array([[0.46310312, 0.02329355],
       [0.00431281, 0.03213428]], dtype=float32)>, <tf.Variable 'L0_dense/bias:0' shape=(2,) dtype=float32, numpy=array([-1.4569175,  1.4839554], dtype=float32)>, <tf.Variable 'L1_dense/kernel:0' shape=(2, 1) dtype=float32, numpy=
array([[ 0.72996795],
       [-1.2167052 ]], dtype=float32)>, <tf.Variable 'L1_dense/bias:0' shape=(1,) dtype=float32, numpy=array([-1.6834593], dtype=float32)>]
'''
#
#
print(type(model.weights))
# <class 'list'>
#
print(len(model.weights))
# 4

print(type(model.weights[0]))
#<class 'tensorflow.python.ops.resource_variable_ops.ResourceVariable'>
#
print(model.weights[0])
'''
<tf.Variable 'L0_dense/kernel:0' shape=(2, 2) dtype=float32, numpy=
array([[0.46310312, 0.02329355],
       [0.00431281, 0.03213428]], dtype=float32)>
'''
#
print(model.weights[0].numpy())
'''
[[0.46310312 0.02329355]
 [0.00431281 0.03213428]]
'''

print(model.weights[1])
#<tf.Variable 'L0_dense/bias:0' shape=(2,) dtype=float32, numpy=array([-1.4569175,  1.4839554], dtype=float32)>
#
print(model.weights[1].numpy())
#[-1.4569175  1.4839554]

print(model.weights[2])
'''
<tf.Variable 'L1_dense/kernel:0' shape=(2, 1) dtype=float32, numpy=
array([[ 0.72996795],
       [-1.2167052 ]], dtype=float32)>
'''
#
print(model.weights[2].numpy())
'''
[[ 0.72996795]
 [-1.2167052 ]]
'''

print(model.weights[3])
#<tf.Variable 'L1_dense/bias:0' shape=(1,) dtype=float32, numpy=array([-1.6834593], dtype=float32)>
#
print(model.weights[3].numpy())
#[-1.6834593]

for w in model.weights:
    print('{:<25}{}'.format(w.name, w.shape))
'''
L0_dense/kernel:0        (2, 2)
L0_dense/bias:0          (2,)
L1_dense/kernel:0        (2, 1)
L1_dense/bias:0          (1,)
'''



##### Layer 0

print('##### Layer 0: Dense #####')

print(l0.weights)
'''
[<tf.Variable 'L0_dense/kernel:0' shape=(2, 2) dtype=float32, numpy=
array([[0.46310312, 0.02329355],
       [0.00431281, 0.03213428]], dtype=float32)>, <tf.Variable 'L0_dense/bias:0' shape=(2,) dtype=float32, numpy=array([-1.4569175,  1.4839554], dtype=float32)>]
'''

for w in l0.weights:
    print('{:<25}{}'.format(w.name, w.shape))
'''
L0_dense/kernel:0        (2, 2)
L0_dense/bias:0          (2,)
'''

### kernel

print(l0.weights[0])
'''
<tf.Variable 'L0_dense/kernel:0' shape=(2, 2) dtype=float32, numpy=
array([[0.46310312, 0.02329355],
       [0.00431281, 0.03213428]], dtype=float32)>
'''

print(l0.weights[0].name)
#L0_dense/kernel:0

print(l0.weights[0].numpy())
'''
[[0.46310312 0.02329355]
 [0.00431281 0.03213428]]
'''


### bias

print(l0.weights[1])
#<tf.Variable 'L0_dense/bias:0' shape=(2,) dtype=float32, numpy=array([-1.4569175,  1.4839554], dtype=float32)>

print(l0.weights[1].name)
#L0_dense/bias:0

print(l0.weights[1].numpy())
#[-1.4569175  1.4839554]



##### Layer 1

#print('##### Layer 1: Dropout #####')
print('##### Layer 1: Dense #####')

print(l1.weights)
'''
[<tf.Variable 'L1_dense/kernel:0' shape=(2, 1) dtype=float32, numpy=
array([[ 0.72996795],
       [-1.2167052 ]], dtype=float32)>, <tf.Variable 'L1_dense/bias:0' shape=(1,) dtype=float32, numpy=array([-1.6834593], dtype=float32)>]
'''

for w in l1.weights:
    print('{:<25}{}'.format(w.name, w.shape))
'''
L1_dense/kernel:0        (2, 1)
L1_dense/bias:0          (1,)
'''    

### kernel

print(l1.weights[0])
'''
<tf.Variable 'L1_dense/kernel:0' shape=(2, 1) dtype=float32, numpy=
array([[ 0.72996795],
       [-1.2167052 ]], dtype=float32)>
'''

print(l1.weights[0].name)
#L1_dense/kernel:0

print(l1.weights[0].numpy())
'''
[[ 0.72996795]
 [-1.2167052 ]]
'''


### bias

print(l1.weights[1])
#<tf.Variable 'L1_dense/bias:0' shape=(1,) dtype=float32, numpy=array([-1.6834593], dtype=float32)>

print(l1.weights[1].name)
#L1_dense/bias:0

print(l1.weights[1].numpy())
#[-1.6834593]



########## Notes

##### Layer 0

### kernel

#print(l0.weights[0].name)
#L0_dense/kernel:0
#
#print(l0.weights[0].numpy())
'''
[[0.46310312 0.02329355]
 [0.00431281 0.03213428]]
'''


### bias

#print(l0.weights[1].name)
#L0_dense/bias:0
#
#print(l0.weights[1].numpy())
#[-1.4569175  1.4839554]



##### Layer 1

### kernel

#print(l1.weights[0].name)
#L1_dense/kernel:0
#
#print(l1.weights[0].numpy())
'''
[[ 0.72996795]
 [-1.2167052 ]]
'''


### bias

#print(l1.weights[1].name)
#L1_dense/bias:0
#
#print(l1.weights[1].numpy())
#[-1.6834593]


# By using these weights, we can derive the equation below:
#
# Layer 0:
# y0_0 = -1.4569175 + (0.46310312 * x1 + 0.02329355 * x2)
# y0_1 =  1.4839554 + (0.00431281 * x1 + 0.03213428 * x2)
#
# Layer 1:
# y = y1 = -1.6834593 + (0.72996795) * y0_0 + (-1.2167052) * y0_1
#
#
# Compare (1) calculated results by using this equation and (2) test_targets_pred: results of model.predict(test_data)
#
#test_data_raw
'''
0.0, 0.0
3.0, 6.0
6.0, 12.0
9.0, 18.0
12.0, 24.0
15.0, 30.0
18.0, 36.0
21.0, 42.0
24.0, 48.0
27.0, 54.0
30.0, 60.0
'''
#
# (1) calculated results by using this equation
'''
For x1 = 9.0, x2 = 18.0

Layer 0:
y0_0 = -1.4569175 + (0.46310312 * x1 + 0.02329355 * x2)
y0_1 =  1.4839554 + (0.00431281 * x1 + 0.03213428 * x2)

y0_0 = -1.4569175 + (0.46310312 * 9.0 + 0.02329355 * 18.0)
y0_1 =  1.4839554 + (0.00431281 * 9.0 + 0.03213428 * 18.0)

y0_0 = 3.13029448
y0_1 = 2.10118773


Layer 1:
y = y1
  = -1.6834593 + (0.72996795) * y0_0 + (-1.2167052) * y0_1
  = -1.6834593 + (0.72996795) * 3.13029448 + (-1.2167052) * 2.10118773
  = -1.95497069280528

sigmoid:
1/(1+exp(-y)) = 1/(1+exp(1.95497069280528)) = 0.12 <= 0.50
Thus, the predicted result is zero (0).
  
'''

'''
For x1 = 21.0, x2 = 42.0

Layer 0:
y0_0 = -1.4569175 + (0.46310312 * x1 + 0.02329355 * x2)
y0_1 =  1.4839554 + (0.00431281 * x1 + 0.03213428 * x2)

y0_0 = -1.4569175 + (0.46310312 * 21.0 + 0.02329355 * 42.0)
y0_1 =  1.4839554 + (0.00431281 * 21.0 + 0.03213428 * 42.0)

y0_0 = 9.24657712
y0_1 = 2.92416417


Layer 1:
y = y1
  = -1.6834593 + (0.72996795) * y0_0 + (-1.2167052) * y0_1
  = -1.6834593 + (0.72996795) * 9.24657712 + (-1.2167052) * 2.92416417
  = 1.50839989351062

sigmoid:
1/(1+exp(-y)) = 1/(1+exp(-1.50839989351062)) = 0.818823950075551 > 0.50
Thus, the predicted result is one (1).
  
'''
# (2) test_targets_pred
'''
0.00000000
0.00000000
0.00000000
0.00000000
0.00000000
0.00000000
0.00000000
1.00000000
1.00000000
1.00000000
1.00000000
'''





Figures

Fig_1_Loss.png


Fig_2_Accuracy.png



References


No comments:

Post a Comment

Deep Learning (Regression, Multiple Features/Explanatory Variables, Supervised Learning): Impelementation and Showing Biases and Weights

Deep Learning (Regression, Multiple Features/Explanatory Variables, Supervised Learning): Impelementation and Showing Biases and Weights ...