3.19. Deep DreamsΒΆ
Work in progress. Attempting to turn a keras model into a conx model.
In [1]:
from keras import backend as K
from keras.applications import inception_v3
from keras.preprocessing.image import load_img, img_to_array
import keras
import numpy as np
import scipy
from IPython.display import Image
Using Theano backend.
In [2]:
base_image_path = "monalisa.jpg"
result_prefix = "monolisa-generated"
These are the names of the layers for which we try to maximize activation, as well as their weight in the final loss we try to maximize.
You can tweak these setting to obtain new visual effects.
In [3]:
settings = {
'features': {
'mixed2': 0.2,
'mixed3': 0.5,
'mixed4': 2.,
'mixed5': 1.5,
},
}
In [4]:
def preprocess_image(image_path):
# Util function to open, resize and format pictures
# into appropriate tensors.
img = load_img(image_path)
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = inception_v3.preprocess_input(img)
return img
def deprocess_image(x):
# Util function to convert a tensor into a valid image.
if K.image_data_format() == 'channels_first':
x = x.reshape((3, x.shape[2], x.shape[3]))
x = x.transpose((1, 2, 0))
else:
x = x.reshape((x.shape[1], x.shape[2], 3))
x /= 2.
x += 0.5
x *= 255.
x = np.clip(x, 0, 255).astype('uint8')
return x
def eval_loss_and_grads(x):
outs = fetch_loss_and_grads([x])
loss_value = outs[0]
grad_values = outs[1]
return loss_value, grad_values
def resize_img(img, size):
img = np.copy(img)
if K.image_data_format() == 'channels_first':
factors = (1, 1,
float(size[0]) / img.shape[2],
float(size[1]) / img.shape[3])
else:
factors = (1,
float(size[0]) / img.shape[1],
float(size[1]) / img.shape[2],
1)
return scipy.ndimage.zoom(img, factors, order=1)
def gradient_ascent(x, iterations, step, max_loss=None):
for i in range(iterations):
loss_value, grad_values = eval_loss_and_grads(x)
if max_loss is not None and loss_value > max_loss:
break
print('..Loss value at', i, ':', loss_value)
x += step * grad_values
return x
def save_img(img, fname):
pil_img = deprocess_image(np.copy(img))
scipy.misc.imsave(fname, pil_img)
In [5]:
K.set_learning_phase(0)
# Build the InceptionV3 network with our placeholder.
# The model will be loaded with pre-trained ImageNet weights.
model = inception_v3.InceptionV3(weights='imagenet',
include_top=False)
dream = model.input
print('Model loaded.')
Model loaded.
In [6]:
from conx import import_keras_model
conx, version 3.4.3
In [7]:
network = import_keras_model(model, "Inception V3")
In [8]:
for clayer in network.layers:
if clayer.kind() == "hidden":
clayer.visible = False
In [9]:
img = preprocess_image(base_image_path)
In [10]:
network.dataset._inputs = [img]
In [11]:
network.layers[1].visible = True
In [12]:
network.dashboard()
In [13]:
network.propagate_to_features("conv2d_3", img[0])
Out[13]: