Autoencoding

One-hot Encoder

In [1]:
from conx import *
conx, version 3.4.0
Using Theano backend.
In [2]:
size = 5
In [3]:
net = Network("Autoencoder")
net.add(Layer("input", size, minmax=(0,1)))
net.add(Layer("hidden", 5, activation="relu", visible=True))
net.add(Layer("output", size, activation="sigmoid"))
net.config["font_family"] = "monospace"
In [4]:
net.connect()
In [5]:
net.compile(error='binary_crossentropy', optimizer="adam")
In [6]:
net
Out[6]:
AutoencoderLayer: output (output) shape = (5,) Keras class = Dense activation = sigmoidoutputWeights from hidden to output output/kernel has shape (5, 5) output/bias has shape (5,)Layer: hidden (hidden) shape = (5,) Keras class = Dense activation = reluhiddenWeights from input to hidden hidden/kernel has shape (5, 5) hidden/bias has shape (5,)Layer: input (input) shape = (5,) Keras class = Inputinput
In [7]:
patterns = [onehot(i, size) for i in range(size)]
In [8]:
patterns[0]
Out[8]:
[1, 0, 0, 0, 0]
In [9]:
net.dataset.load([(p,p) for p in patterns])
In [10]:
net
Out[10]:
AutoencoderLayer: output (output) shape = (5,) Keras class = Dense activation = sigmoidoutputWeights from hidden to output output/kernel has shape (5, 5) output/bias has shape (5,)Layer: hidden (hidden) shape = (5,) Keras class = Dense activation = reluhiddenWeights from input to hidden hidden/kernel has shape (5, 5) hidden/bias has shape (5,)Layer: input (input) shape = (5,) Keras class = Inputinput
In [11]:
import time
for i in range(size):
    net.propagate(net.dataset.inputs[i])
    time.sleep(1)
In [12]:
net.dataset.summary()
Input Summary:
   count  : 5 (5 for training, 0 for testing)
   shape  : [(5,)]
   range  : (0.0, 1.0)
Target Summary:
   count  : 5 (5 for training, 0 for testing)
   shape  : [(5,)]
   range  : (0.0, 1.0)
In [14]:
net.reset()
net.train(accuracy=1, epochs=10000, report_rate=200, tolerance=0.4, plot=True)
_images/Autoencoder_14_0.svg
========================================================================
       |  Training |  Training
Epochs |     Error |  Accuracy
------ | --------- | ---------
# 2389 |   0.09550 |   1.00000
In [15]:
net.propagate(net.dataset.inputs[0])
Out[15]:
[0.8744319677352905,
 0.002665942534804344,
 0.043912623077631,
 4.047724360134453e-06,
 0.005730808712542057]
In [16]:
net.test(tolerance=0.4)
========================================================
Testing train dataset with tolerance 0.4...
Total count: 1
      correct: 1
      incorrect: 0
Total percentage correct: 1.0
In [17]:
for i in range(size):
    net.propagate(net.dataset.inputs[i])
    time.sleep(1)
In [18]:
net.dashboard()

MNIST Autoencoding

In [19]:
from conx import *
In [20]:
net = Network("MNIST-Autoencoder")
In [21]:
net.add(ImageLayer("input", (28,28), 1))
net.add(Conv2DLayer("conv", 3, (5,5), activation="relu"))
net.add(MaxPool2DLayer("pool", pool_size=(2,2)))
net.add(FlattenLayer("flatten"))
net.add(Layer("hidden3", 25, activation="relu"))
net.add(Layer("output", (28,28,1), activation="sigmoid"))
In [22]:
net.connect()
net.compile(error="mse", optimizer="adam")
net
Out[22]:
MNIST-AutoencoderLayer: output (output) shape = (28, 28, 1) Keras class = Dense activation = sigmoidoutputWeights from hidden3 to output output/kernel has shape (25, 784) output/bias has shape (784,)Layer: hidden3 (hidden) shape = (25,) Keras class = Dense activation = reluhidden3Weights from flatten to hidden3 hidden3/kernel has shape (432, 25) hidden3/bias has shape (25,)Layer: flatten (hidden) Keras class = FlattenflattenWeights from pool to flattenLayer: pool (hidden) Keras class = MaxPooling2D pool_size = (2, 2)pool30Weights from conv to poolLayer: conv (hidden) Keras class = Conv2D activation = reluconv30Weights from input to conv conv/kernel has shape (5, 5, 1, 3) conv/bias has shape (3,)Layer: input (input) shape = (28, 28, 1) Keras class = Inputinput
In [23]:
net.dataset.get('mnist')
net.dataset.set_targets_from_inputs()
net.dataset.targets.reshape(0, (28 * 28))
net.dataset.summary()
Input Summary:
   count  : 70000 (70000 for training, 0 for testing)
   shape  : [(28, 28, 1)]
   range  : (0.0, 1.0)
Target Summary:
   count  : 70000 (70000 for training, 0 for testing)
   shape  : [(784,)]
   range  : (0.0, 1.0)
In [24]:
net.dashboard()
In [25]:
net.propagate_to_features("pool", net.dataset.inputs[0], cols=1, scale=10)
Out[25]:

Feature 0

Feature 1

Feature 2
In [28]:
image = net.dataset.inputs[0]
output = net.propagate_to_image("output", image)
output.size
Out[28]:
(28, 28)
In [29]:
net.propagate_to("hidden3", image)
Out[29]:
[0.36987775564193726,
 0.5273115634918213,
 0.043583206832408905,
 0.0,
 0.0,
 0.0,
 0.0,
 0.0,
 0.0,
 0.1749943196773529,
 0.0,
 0.0,
 0.36818239092826843,
 0.5065754652023315,
 0.5271006226539612,
 0.0,
 0.0,
 0.0,
 0.2511788308620453,
 0.0,
 0.5432827472686768,
 0.0,
 0.270825058221817,
 0.0,
 0.45302289724349976]
In [30]:
net.dataset.slice(10)
In [31]:
net.train(accuracy=0.5, epochs=1000, report_rate=100, tolerance=.4, plot=True)
_images/Autoencoder_30_0.svg
========================================================================
       |  Training |  Training
Epochs |     Error |  Accuracy
------ | --------- | ---------
#  343 |   0.00222 |   0.50000
In [32]:
for i in range(10):
    net.propagate(net.dataset.inputs[i])
In [34]:
net.test(show_inputs=False, show_outputs=False, show=True)
========================================================
Testing train dataset with tolerance 0.4000...
# | result
---------------------------------------
0 | X
Total count: 1
      correct: 0
      incorrect: 1
Total percentage correct: 0.0