3.5. Autoencoding

3.5.1. One-hot Encoder

In [1]:
from conx import *
conx, version 3.4.3
Using Theano backend.
In [2]:
size = 5
In [3]:
net = Network("Autoencoder")
net.add(Layer("input", size, minmax=(0,1)))
net.add(Layer("hidden", 5, activation="relu", visible=True))
net.add(Layer("output", size, activation="sigmoid"))
net.config["font_family"] = "monospace"
In [4]:
net.connect()
In [5]:
net.compile(error='binary_crossentropy', optimizer="adam")
In [6]:
net
Out[6]:
AutoencoderLayer: output (output) shape = (5,) Keras class = Dense activation = sigmoidoutputWeights from hidden to output output/kernel has shape (5, 5) output/bias has shape (5,)Layer: hidden (hidden) shape = (5,) Keras class = Dense activation = reluhiddenWeights from input to hidden hidden/kernel has shape (5, 5) hidden/bias has shape (5,)Layer: input (input) shape = (5,) Keras class = Inputinput
In [7]:
patterns = [onehot(i, size) for i in range(size)]
In [8]:
patterns[0]
Out[8]:
[1, 0, 0, 0, 0]
In [9]:
net.dataset.load([(p,p) for p in patterns])
In [10]:
net
Out[10]:
AutoencoderLayer: output (output) shape = (5,) Keras class = Dense activation = sigmoidoutputWeights from hidden to output output/kernel has shape (5, 5) output/bias has shape (5,)Layer: hidden (hidden) shape = (5,) Keras class = Dense activation = reluhiddenWeights from input to hidden hidden/kernel has shape (5, 5) hidden/bias has shape (5,)Layer: input (input) shape = (5,) Keras class = Inputinput
In [11]:
import time
for i in range(size):
    net.propagate(net.dataset.inputs[i])
    time.sleep(1)
In [12]:
net.dataset.summary()
Input Summary:
   count  : 5 (5 for training, 0 for testing)
   shape  : [(5,)]
   range  : (0.0, 1.0)
Target Summary:
   count  : 5 (5 for training, 0 for testing)
   shape  : [(5,)]
   range  : (0.0, 1.0)
In [13]:
net.reset()
net.train(accuracy=1, epochs=10000, report_rate=200, tolerance=0.4, plot=True)
_images/Autoencoder_14_0.svg
========================================================================
       |  Training |  Training
Epochs |     Error |  Accuracy
------ | --------- | ---------
#  975 |   0.09615 |   1.00000
In [14]:
net.propagate(net.dataset.inputs[0])
Out[14]:
[0.807578444480896,
 0.07467009127140045,
 0.02190828137099743,
 0.039483316242694855,
 0.014103797264397144]
In [15]:
net.test(tolerance=0.4)
========================================================
Testing train dataset with tolerance 0.4...
Total count: 1
      correct: 1
      incorrect: 0
Total percentage correct: 1.0
In [16]:
for i in range(size):
    net.propagate(net.dataset.inputs[i])
    time.sleep(1)
In [17]:
net.dashboard()

3.5.2. MNIST Autoencoding

In [18]:
from conx import *
In [19]:
net = Network("MNIST-Autoencoder")
In [20]:
net.add(ImageLayer("input", (28,28), 1))
net.add(Conv2DLayer("conv", 3, (5,5), activation="relu"))
net.add(MaxPool2DLayer("pool", pool_size=(2,2)))
net.add(FlattenLayer("flatten"))
net.add(Layer("hidden3", 25, activation="relu"))
net.add(Layer("output", (28,28,1), activation="sigmoid"))
In [21]:
net.connect()
net.compile(error="mse", optimizer="adam")
net
Out[21]:
MNIST-AutoencoderLayer: output (output) shape = (28, 28, 1) Keras class = Dense activation = sigmoidoutputWeights from hidden3 to output output/kernel has shape (25, 784) output/bias has shape (784,)Layer: hidden3 (hidden) shape = (25,) Keras class = Dense activation = reluhidden3Weights from flatten to hidden3 hidden3/kernel has shape (432, 25) hidden3/bias has shape (25,)Layer: flatten (hidden) Keras class = FlattenflattenWeights from pool to flattenLayer: pool (hidden) Keras class = MaxPooling2D pool_size = (2, 2)pool30Weights from conv to poolLayer: conv (hidden) Keras class = Conv2D activation = reluconv30Weights from input to conv conv/kernel has shape (5, 5, 1, 3) conv/bias has shape (3,)Layer: input (input) shape = (28, 28, 1) Keras class = Inputinput
In [22]:
net.dataset.get('mnist')
net.dataset.set_targets_from_inputs()
net.dataset.targets.reshape(0, (28 * 28))
net.dataset.summary()
Input Summary:
   count  : 70000 (70000 for training, 0 for testing)
   shape  : [(28, 28, 1)]
   range  : (0.0, 1.0)
Target Summary:
   count  : 70000 (70000 for training, 0 for testing)
   shape  : [(784,)]
   range  : (0.0, 1.0)
In [23]:
net.dashboard()
In [24]:
net.propagate_to_features("pool", net.dataset.inputs[0], cols=1, scale=10)
Out[24]:

Feature 0

Feature 1

Feature 2
In [25]:
image = net.dataset.inputs[0]
output = net.propagate_to_image("output", image)
output.size
Out[25]:
(28, 28)
In [26]:
net.propagate_to("hidden3", image)
Out[26]:
[0.2753388583660126,
 0.0,
 0.0,
 0.0,
 0.0,
 0.12743401527404785,
 0.0,
 0.013124743476510048,
 0.07096123695373535,
 0.0,
 0.04465332627296448,
 0.0,
 0.0,
 0.0,
 0.12247335910797119,
 0.0,
 0.0,
 0.0,
 0.303422212600708,
 0.0,
 0.1968642622232437,
 0.0,
 0.11999066174030304,
 0.22198209166526794,
 0.0]
In [27]:
net.dataset.slice(10)
In [28]:
net.train(accuracy=0.5, epochs=1000, report_rate=100, tolerance=.4, plot=True)
_images/Autoencoder_30_0.svg
========================================================================
       |  Training |  Training
Epochs |     Error |  Accuracy
------ | --------- | ---------
#  738 |   0.00220 |   0.50000
In [29]:
for i in range(10):
    net.propagate(net.dataset.inputs[i])
In [30]:
net.test(show_inputs=False, show_outputs=False, show=True)
========================================================
Testing train dataset with tolerance 0.4000...
# | result
---------------------------------------
0 | X
Total count: 1
      correct: 0
      incorrect: 1
Total percentage correct: 0.0