3.9. Face RecognitionΒΆ

In [1]:
# set up pose recognizer network

from conx import *

net = Network("Pose recognizer")
Using TensorFlow backend.
/usr/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: compiletime version 3.5 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.6
  return f(*args, **kwds)
conx, version 3.5.15
In [2]:
# uncomment one of the following lines:

datafile = 'cmu_faces_full_size'
#datafile = 'cmu_faces_half_size'
#datafile = 'cmu_faces_quarter_size'

net.dataset.get(datafile)
net.dataset.split(0.40)  # use 40% of the dataset for validation
net.dataset.shuffle()
net.dataset.summary()

Dataset name: CMU Faces, full-size

Original source: http://archive.ics.uci.edu/ml/datasets/cmu+face+images

Dataset Split: * training : 375 * testing : 249 * total : 624

Input Summary: * shape : [(120, 128)] * range : [(0.0, 1.0)]

Target Summary: * shape : [(4,)] * range : [(0, 1)]

In [3]:
net.dataset.inputs.shape
Out[3]:
[(120, 128)]
In [4]:
array2image(net.dataset.inputs[0])
Out[4]:
_images/Face_recognition_4_0.png
In [5]:
net.dataset.targets[0]
Out[5]:
[0, 1, 0, 0]
In [6]:
net.add(Layer('input', net.dataset.inputs.shape[0], colormap="Greys_r"))
net.add(FlattenLayer("flatten"))
net.add(Layer('hidden', 3, activation='sigmoid'))
net.add(Layer('output', 4, activation='sigmoid', colormap="Greys_r"))
net.connect()
net.compile(loss='mean_squared_error', optimizer=SGD(lr=0.3, momentum=0.1))
In [7]:
# net["output"].colormap = "Greys_r"
# net["output"].minmax = (0, 1)
# net["input"].colormap = "Greys_r"
# net["input"].minmax = (0, 1)
In [8]:
net.dashboard()
In [9]:
net.reset()
net.evaluate()
Out[9]:
{'acc': 0.0,
 'loss': 0.2527311338583628,
 'val_acc': 0.0,
 'val_loss': 0.2576007554569398}
In [10]:
net.test()
========================================================
Testing validation dataset with tolerance 0.1...
Total count: 375
      correct: 0
      incorrect: 375
Total percentage correct: 0.0
In [11]:
if net.saved():
    net.load()
    net.plot_results()
else:
    net.train(200, accuracy=1.0, tolerance=0.25, report_rate=5, plot=True)
    net.save()
_images/Face_recognition_11_0.png
In [12]:
net.train(10, accuracy=1.0, tolerance=0.25, report_rate=3)
Evaluating initial validation metrics...
Training...
       |  Training |  Training |  Validate |  Validate
Epochs |     Error |  Accuracy |     Error |  Accuracy
------ | --------- | --------- | --------- | ---------
#  200 |   0.01761 |   0.94385 |   0.02366 |   0.89157
#  203 |   0.03107 |   0.85294 |   0.02398 |   0.89200
#  206 |   0.02814 |   0.87166 |   0.02629 |   0.80400
#  209 |   0.02668 |   0.88235 |   0.02308 |   0.86800
========================================================================
#  210 |   0.02586 |   0.89037 |   0.02251 |   0.88800
In [13]:
net.train(10, accuracy=1.0, tolerance=0.25, report_rate=5)
Evaluating initial validation metrics...
Training...
       |  Training |  Training |  Validate |  Validate
Epochs |     Error |  Accuracy |     Error |  Accuracy
------ | --------- | --------- | --------- | ---------
#  210 |   0.02586 |   0.89037 |   0.02242 |   0.89157
#  215 |   0.02433 |   0.90374 |   0.02648 |   0.80400
#  220 |   0.02316 |   0.90374 |   0.02054 |   0.88000
========================================================================
#  220 |   0.02316 |   0.90374 |   0.02054 |   0.88000
In [14]:
net.plot_results()
_images/Face_recognition_14_0.png
In [15]:
net.plot(['acc', 'val_acc'])
_images/Face_recognition_15_0.png
In [16]:
net.plot("all", title="All available metrics")
_images/Face_recognition_16_0.png
In [17]:
net.plot(['loss', 'val_loss'], ymax=0.1, start=50, end=150)
_images/Face_recognition_17_0.png
In [18]:
net.plot(['.*loss'], ymax=0.1, start=50, end=150)
_images/Face_recognition_18_0.png
In [19]:
net.plot_layer_weights('hidden', vshape=net.dataset.inputs.shape[0])
net.plot_layer_weights('output', colormap='viridis')
_images/Face_recognition_19_0.png
_images/Face_recognition_19_1.png
In [20]:
# colorbar colors or values can be easily changed
net.plot_layer_weights('hidden', vshape=net.dataset.inputs.shape[0],
                       units=(2,1,0), colormap="RdBu", wrange=(-0.25, 0.25))
net.plot_layer_weights('output', units=0, wmin=0, colormap="copper", ticks=10)
_images/Face_recognition_20_0.png
_images/Face_recognition_20_1.png
In [21]:
net.show_unit_weights('output', 0)
 3.17 -0.84 -3.78
In [22]:
net.get_weights("output")
Out[22]:
[[[3.171057939529419,
   -3.336315155029297,
   -3.017305374145508,
   2.3693573474884033],
  [-0.8448236584663391,
   -0.8334797620773315,
   1.11385977268219,
   1.6187794208526611],
  [-3.7793588638305664,
   3.0504801273345947,
   -2.793135643005371,
   2.2680766582489014]],
 [-1.2294634580612183,
  -1.2983039617538452,
  0.7965996861457825,
  -4.31489896774292]]