3.18. Deep DreamsΒΆ

Work in progress. Attempting to turn a keras model into a conx model.

monalisa.jpg monalisa-generated.png

In [1]:
from keras import backend as K
from keras.applications import inception_v3
from keras.preprocessing.image import load_img, img_to_array
import keras

import numpy as np
import scipy
from IPython.display import Image
Using Theano backend.
In [2]:
base_image_path = "monalisa.jpg"
result_prefix = "monolisa-generated"

These are the names of the layers for which we try to maximize activation, as well as their weight in the final loss we try to maximize.

You can tweak these setting to obtain new visual effects.

In [3]:
settings = {
    'features': {
        'mixed2': 0.2,
        'mixed3': 0.5,
        'mixed4': 2.,
        'mixed5': 1.5,
    },
}
In [4]:
def preprocess_image(image_path):
    # Util function to open, resize and format pictures
    # into appropriate tensors.
    img = load_img(image_path)
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = inception_v3.preprocess_input(img)
    return img

def deprocess_image(x):
    # Util function to convert a tensor into a valid image.
    if K.image_data_format() == 'channels_first':
        x = x.reshape((3, x.shape[2], x.shape[3]))
        x = x.transpose((1, 2, 0))
    else:
        x = x.reshape((x.shape[1], x.shape[2], 3))
    x /= 2.
    x += 0.5
    x *= 255.
    x = np.clip(x, 0, 255).astype('uint8')
    return x

def eval_loss_and_grads(x):
    outs = fetch_loss_and_grads([x])
    loss_value = outs[0]
    grad_values = outs[1]
    return loss_value, grad_values

def resize_img(img, size):
    img = np.copy(img)
    if K.image_data_format() == 'channels_first':
        factors = (1, 1,
                   float(size[0]) / img.shape[2],
                   float(size[1]) / img.shape[3])
    else:
        factors = (1,
                   float(size[0]) / img.shape[1],
                   float(size[1]) / img.shape[2],
                   1)
    return scipy.ndimage.zoom(img, factors, order=1)

def gradient_ascent(x, iterations, step, max_loss=None):
    for i in range(iterations):
        loss_value, grad_values = eval_loss_and_grads(x)
        if max_loss is not None and loss_value > max_loss:
            break
        print('..Loss value at', i, ':', loss_value)
        x += step * grad_values
    return x

def save_img(img, fname):
    pil_img = deprocess_image(np.copy(img))
    scipy.misc.imsave(fname, pil_img)
In [5]:
K.set_learning_phase(0)

# Build the InceptionV3 network with our placeholder.
# The model will be loaded with pre-trained ImageNet weights.
model = inception_v3.InceptionV3(weights='imagenet',
                                 include_top=False)
dream = model.input
print('Model loaded.')
Model loaded.
In [6]:
from conx import import_keras_model
conx, version 3.4.3
In [7]:
network = import_keras_model(model, "Inception V3")
In [8]:
for clayer in network.layers:
    if clayer.kind() == "hidden":
        clayer.visible = False
In [9]:
img = preprocess_image(base_image_path)
In [10]:
network.dataset._inputs = [img]
In [11]:
network.layers[1].visible = True
In [12]:
network.dashboard()
In [13]:
network.propagate_to_features("conv2d_3", img[0])
Out[13]:

Feature 0

Feature 1

Feature 2

Feature 3

Feature 4

Feature 5

Feature 6

Feature 7

Feature 8

Feature 9

Feature 10

Feature 11

Feature 12

Feature 13

Feature 14

Feature 15

Feature 16

Feature 17

Feature 18

Feature 19

Feature 20

Feature 21

Feature 22

Feature 23

Feature 24

Feature 25

Feature 26

Feature 27

Feature 28

Feature 29

Feature 30

Feature 31

Feature 32

Feature 33

Feature 34

Feature 35

Feature 36

Feature 37

Feature 38

Feature 39

Feature 40

Feature 41

Feature 42

Feature 43

Feature 44

Feature 45

Feature 46

Feature 47

Feature 48

Feature 49

Feature 50

Feature 51

Feature 52

Feature 53

Feature 54

Feature 55

Feature 56

Feature 57

Feature 58