Autoencoding

One-hot Encoder

In [1]:
from conx import *
Using Theano backend.
In [2]:
size = 5
In [3]:
net = Network("Autoencoder")
net.add(Layer("input", size, minmax=(0,1)))
net.add(Layer("hidden", 5, activation="relu", visible=True))
net.add(Layer("output", size, activation="sigmoid"))
net.config["font_family"] = "monospace"
In [4]:
net.connect()
In [5]:
net.compile(loss='binary_crossentropy', optimizer="adam")
In [6]:
net
Out[6]:
_images/Autoencoder_7_1.svg
In [7]:
patterns = [one_hot(i, size) for i in range(size)]
In [8]:
patterns[0]
Out[8]:
[1.0, 0.0, 0.0, 0.0, 0.0]
In [9]:
dataset = [(p,p) for p in patterns]
In [10]:
net.set_dataset(dataset)
In [11]:
net
Out[11]:
_images/Autoencoder_12_1.svg
In [12]:
import time
for i in range(size):
    net.propagate(dataset[i][0])
    time.sleep(1)
In [13]:
dataset[0]
Out[13]:
([1.0, 0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0, 0.0])
In [14]:
net.set_dataset(dataset)
In [15]:
net.dataset.summary()
Input Summary:
   count  : 5 (5 for training, 0 for testing)
   shape  : (5,)
   range  : (0.0, 1.0)
Target Summary:
   count  : 5 (5 for training, 0 for testing)
   shape  : (5,)
   range  : (0.0, 1.0)
In [16]:
net.train(accuracy=1, epochs=10000, report_rate=200, tolerance=0.4)
Training...
Epoch #  200 | train error 0.51861 | train accuracy 0.80000 | validate% 0.00000
Epoch #  400 | train error 0.38372 | train accuracy 0.88000 | validate% 0.20000
Epoch #  600 | train error 0.27889 | train accuracy 0.92000 | validate% 0.40000
Epoch #  800 | train error 0.19699 | train accuracy 0.96000 | validate% 0.60000
Epoch # 1000 | train error 0.13138 | train accuracy 0.96000 | validate% 0.80000
========================================================================
Epoch # 1143 | train error 0.09470 | train accuracy 1.00000 | validate% 1.00000
In [17]:
net.propagate(dataset[0][0])
Out[17]:
[0.90688604, 0.012417966, 0.067174591, 0.069545172, 0.068990991]
In [18]:
net
Out[18]:
_images/Autoencoder_19_1.svg
In [19]:
for i in range(size):
    net.propagate(dataset[i][0])
    time.sleep(1)
In [20]:
net.test()
Testing on training dataset...
# | inputs | targets | outputs | result
---------------------------------------
0 | [1.0, 0.0, 0.0, 0.0, 0.0] | [1.0, 0.0, 0.0, 0.0, 0.0] | [0.9, 0.0, 0.1, 0.1, 0.1] | correct
1 | [0.0, 1.0, 0.0, 0.0, 0.0] | [0.0, 1.0, 0.0, 0.0, 0.0] | [0.0, 0.8, 0.0, 0.3, 0.1] | X
2 | [0.0, 0.0, 1.0, 0.0, 0.0] | [0.0, 0.0, 1.0, 0.0, 0.0] | [0.0, 0.0, 0.9, 0.0, 0.1] | X
3 | [0.0, 0.0, 0.0, 1.0, 0.0] | [0.0, 0.0, 0.0, 1.0, 0.0] | [0.0, 0.2, 0.1, 0.6, 0.0] | X
4 | [0.0, 0.0, 0.0, 0.0, 1.0] | [0.0, 0.0, 0.0, 0.0, 1.0] | [0.0, 0.0, 0.0, 0.0, 0.9] | X
Total count: 5
Total percentage correct: 0.2

MNIST Autoencoding

In [21]:
from conx import Network, Layer, Dataset
In [22]:
dataset = Dataset.get('mnist')
dataset.summary()
Input Summary:
   count  : 70000 (70000 for training, 0 for testing)
   shape  : (28, 28, 1)
   range  : (0.0, 1.0)
Target Summary:
   count  : 70000 (70000 for training, 0 for testing)
   shape  : (10,)
   range  : (0.0, 1.0)
In [23]:
28 * 28
Out[23]:
784
In [24]:
dataset.reshape_inputs(784)
dataset.set_targets_from_inputs()
In [25]:
dataset.summary()
Input Summary:
   count  : 70000 (70000 for training, 0 for testing)
   shape  : (784,)
   range  : (0.0, 1.0)
Target Summary:
   count  : 70000 (70000 for training, 0 for testing)
   shape  : (784,)
   range  : (0.0, 1.0)
In [26]:
net = Network("MNIST-Autoencoder")
In [27]:
net.add(Layer("input", 784, vshape=(28, 28)))
net.add(Layer("hidden1", 25, activation="relu"))
net.add(Layer("hidden2", 25, activation="relu"))
net.add(Layer("hidden3", 25, activation="relu"))
net.add(Layer("output", 784, vshape=(28, 28), activation="sigmoid"))
In [28]:
net.connect()
net.compile(error="binary_crossentropy", optimizer="adam")
In [29]:
net.set_dataset(dataset)
In [30]:
net.dashboard()
In [31]:
import numpy as np
In [32]:
image = np.random.rand(784)
In [33]:
output = net.propagate(image)
In [34]:
dataset.slice(100)
In [35]:
net.train(accuracy=0.5, epochs=1000, report_rate=100, tolerance=.4)
Training...
Epoch #  100 | train error 0.22877 | train accuracy 0.80088 | validate% 0.00000
Epoch #  200 | train error 0.16557 | train accuracy 0.80741 | validate% 0.00000
Epoch #  300 | train error 0.14010 | train accuracy 0.81056 | validate% 0.00000
Epoch #  400 | train error 0.12542 | train accuracy 0.81268 | validate% 0.00000
Epoch #  500 | train error 0.11231 | train accuracy 0.81467 | validate% 0.00000
Epoch #  600 | train error 0.10174 | train accuracy 0.81610 | validate% 0.01000
Epoch #  700 | train error 0.09486 | train accuracy 0.81695 | validate% 0.01000
Epoch #  800 | train error 0.09016 | train accuracy 0.81754 | validate% 0.03000
Epoch #  900 | train error 0.08684 | train accuracy 0.81772 | validate% 0.06000
Epoch # 1000 | train error 0.08438 | train accuracy 0.81795 | validate% 0.11000
========================================================================
Epoch # 1000 | train error 0.08438 | train accuracy 0.81795 | validate% 0.11000
In [36]:
for i in range(20):
    net.propagate(dataset.inputs[i])