Autoencoding

One-hot Encoder

In [1]:
from conx import *
conx, version 3.2.3
Using Theano backend.
In [2]:
size = 5
In [3]:
net = Network("Autoencoder")
net.add(Layer("input", size, minmax=(0,1)))
net.add(Layer("hidden", 5, activation="relu", visible=True))
net.add(Layer("output", size, activation="sigmoid"))
net.config["font_family"] = "monospace"
In [4]:
net.connect()
In [5]:
net.compile(error='binary_crossentropy', optimizer="adam")
In [6]:
net
Out[6]:
AutoencoderLayer: output (output) shape = (5,) Keras class = Dense activation = sigmoidoutputWeights from hidden to output output/kernel has shape (5, 5) output/bias has shape (5,)Layer: hidden (hidden) shape = (5,) Keras class = Dense activation = reluhiddenWeights from input to hidden hidden/kernel has shape (5, 5) hidden/bias has shape (5,)Layer: input (input) shape = (5,) Keras class = Inputinput
In [7]:
patterns = [onehot(i, size) for i in range(size)]
In [8]:
patterns[0]
Out[8]:
[1, 0, 0, 0, 0]
In [9]:
net.dataset.load([(p,p) for p in patterns])
In [10]:
net
Out[10]:
AutoencoderLayer: output (output) shape = (5,) Keras class = Dense activation = sigmoidoutputWeights from hidden to output output/kernel has shape (5, 5) output/bias has shape (5,)Layer: hidden (hidden) shape = (5,) Keras class = Dense activation = reluhiddenWeights from input to hidden hidden/kernel has shape (5, 5) hidden/bias has shape (5,)Layer: input (input) shape = (5,) Keras class = Inputinput
In [11]:
import time
for i in range(size):
    net.propagate(net.dataset.inputs[i])
    time.sleep(1)
In [12]:
net.dataset.summary()
Input Summary:
   count  : 5 (5 for training, 0 for testing)
   shape  : (5,)
   range  : (0.0, 1.0)
Target Summary:
   count  : 5 (5 for training, 0 for testing)
   shape  : (5,)
   range  : (0.0, 1.0)
In [13]:
net.train(accuracy=1, epochs=10000, report_rate=200, tolerance=0.4)
Training...
Epoch #  200 | train error 0.57706 | train accuracy 0.80000 | validate% 0.00000
Epoch #  400 | train error 0.47392 | train accuracy 0.88000 | validate% 0.20000
Epoch #  600 | train error 0.37294 | train accuracy 0.92000 | validate% 0.20000
Epoch #  800 | train error 0.29373 | train accuracy 0.88000 | validate% 0.60000
Epoch # 1000 | train error 0.22443 | train accuracy 0.88000 | validate% 0.60000
Epoch # 1200 | train error 0.16399 | train accuracy 0.96000 | validate% 0.60000
Epoch # 1400 | train error 0.11628 | train accuracy 1.00000 | validate% 0.80000
Epoch # 1600 | train error 0.08301 | train accuracy 1.00000 | validate% 0.80000
========================================================================
Epoch # 1605 | train error 0.08229 | train accuracy 1.00000 | validate% 1.00000
In [14]:
net.propagate(net.dataset.inputs[0])
Out[14]:
[0.8052372932434082,
 0.04953927919268608,
 0.00020874451729469,
 0.02787386253476143,
 0.0008814419852569699]
In [15]:
net.test(tolerance=0.4)
Testing on training dataset...
# | inputs | targets | outputs | result
---------------------------------------
0 | [1.00,0.00,0.00,0.00,0.00] | [1.00,0.00,0.00,0.00,0.00] | [0.81,0.05,0.00,0.03,0.00] | correct
1 | [0.00,1.00,0.00,0.00,0.00] | [0.00,1.00,0.00,0.00,0.00] | [0.40,0.70,0.11,0.03,0.03] | correct
2 | [0.00,0.00,1.00,0.00,0.00] | [0.00,0.00,1.00,0.00,0.00] | [0.01,0.03,0.91,0.04,0.05] | correct
3 | [0.00,0.00,0.00,1.00,0.00] | [0.00,0.00,0.00,1.00,0.00] | [0.04,0.00,0.01,0.92,0.01] | correct
4 | [0.00,0.00,0.00,0.00,1.00] | [0.00,0.00,0.00,0.00,1.00] | [0.05,0.00,0.08,0.07,0.86] | correct
Total count: 5
      correct: 5
      incorrect: 0
Total percentage correct: 1.0
In [16]:
for i in range(size):
    net.propagate(net.dataset.inputs[i])
    time.sleep(1)
In [17]:
net.dashboard()

MNIST Autoencoding

In [1]:
from conx import *
conx, version 3.2.3
Using Theano backend.
In [2]:
net = Network("MNIST-Autoencoder")
In [3]:
net.add(ImageLayer("input", (28,28), 1))
net.add(Conv2DLayer("conv", 3, (5,5), activation="relu"))
net.add(MaxPool2DLayer("pool", pool_size=(2,2)))
net.add(FlattenLayer("flatten"))
net.add(Layer("hidden3", 25, activation="relu"))
net.add(Layer("output", (28,28,1), activation="sigmoid"))
In [4]:
net.connect()
net.compile(error="mse", optimizer="adam")
net
Out[4]:
MNIST-AutoencoderLayer: output (output) shape = (28, 28, 1) Keras class = Dense activation = sigmoidoutputWeights from hidden3 to output output/kernel has shape (25, 784) output/bias has shape (784,)Layer: hidden3 (hidden) shape = (25,) Keras class = Dense activation = reluhidden3Weights from flatten to hidden3 hidden3/kernel has shape (432, 25) hidden3/bias has shape (25,)Layer: flatten (hidden) Keras class = FlattenflattenWeights from pool to flattenLayer: pool (hidden) Keras class = MaxPooling2D pool_size = (2, 2)pool30Weights from conv to poolLayer: conv (hidden) Keras class = Conv2D activation = reluconv30Weights from input to conv conv/kernel has shape (5, 5, 1, 3) conv/bias has shape (3,)Layer: input (input) shape = (28, 28, 1) Keras class = Inputinput
In [5]:
net.dataset.get('mnist')
net.dataset.set_targets_from_inputs()
net.dataset.targets.reshape(0, (28 * 28))
net.dataset.summary()
Input Summary:
   count  : 70000 (70000 for training, 0 for testing)
   shape  : (28, 28, 1)
   range  : (0.0, 1.0)
Target Summary:
   count  : 70000 (70000 for training, 0 for testing)
   shape  : (784,)
   range  : (0.0, 1.0)
In [6]:
net.dashboard()
In [7]:
net.propagate_to_features("pool", net.dataset.inputs[0], cols=1, scale=10)
Out[7]:

Feature 0

Feature 1

Feature 2
In [8]:
import numpy as np
from conx import get_form
image = np.random.rand(784).reshape((28, 28, 1))
In [9]:
get_form(net.propagate(image))
Out[9]:
[[[numbers.Number, 1], 28], 28]
In [10]:
output = net.propagate_to_image("output", image)
output.size
Out[10]:
(28, 28)
In [11]:
net.propagate_to("hidden3", image)
Out[11]:
[0.6238702535629272,
 0.0,
 0.0,
 0.0,
 0.653206467628479,
 0.0,
 0.7518496513366699,
 0.3881341814994812,
 1.2514455318450928,
 0.0,
 0.0,
 1.102600336074829,
 0.0,
 0.0,
 0.0,
 0.13875724375247955,
 0.4268752932548523,
 0.6694626808166504,
 0.23066118359565735,
 0.0,
 0.0,
 0.20673391222953796,
 0.2686443328857422,
 0.12682291865348816,
 0.0]
In [13]:
net.dataset.slice(10)
In [14]:
net.train(accuracy=0.5, epochs=1000, report_rate=100, tolerance=.4)
Training...
Epoch #  100 | train error 0.03947 | train accuracy 0.00000 | validate% 0.00000
Epoch #  200 | train error 0.00952 | train accuracy 0.00000 | validate% 0.00000
Epoch #  300 | train error 0.00393 | train accuracy 0.00000 | validate% 0.10000
Epoch #  400 | train error 0.00280 | train accuracy 0.00000 | validate% 0.20000
Epoch #  500 | train error 0.00213 | train accuracy 0.00000 | validate% 0.20000
Epoch #  600 | train error 0.00184 | train accuracy 0.00000 | validate% 0.20000
Epoch #  700 | train error 0.00162 | train accuracy 0.00000 | validate% 0.30000
Epoch #  800 | train error 0.00151 | train accuracy 0.00000 | validate% 0.30000
Epoch #  900 | train error 0.00129 | train accuracy 0.00000 | validate% 0.30000
Epoch # 1000 | train error 0.00126 | train accuracy 0.00000 | validate% 0.30000
========================================================================
Epoch # 1000 | train error 0.00126 | train accuracy 0.00000 | validate% 0.30000
In [15]:
for i in range(10):
    net.propagate(net.dataset.inputs[i])
In [17]:
net.test(show_inputs=False, show_outputs=False)
Testing on training dataset...
# | result
---------------------------------------
0 | X
1 | X
2 | X
3 | correct
4 | X
5 | X
6 | X
7 | X
8 | X
9 | X
Total count: 10
      correct: 1
      incorrect: 9
Total percentage correct: 0.1