3.20. Vision QuestΒΆ

In [1]:
import jyro.simulator as jy
import conx as cx
from IPython.display import display
import random
import numpy as np
Using Theano backend.
Conx, version 3.6.0
In [2]:
def make_world(physics):
    physics.addBox(0, 0, 5, 5, fill="gray", wallcolor="gray")
    physics.addBox(0, 0, 0.5, 0.5, fill="blue", wallcolor="blue")
    physics.addBox(0, 5, 0.5, 4.5 , fill="red", wallcolor="red")
    physics.addBox(4.5, 4.5, 5, 5, fill="green", wallcolor="green")
    physics.addBox(4.5, 0, 5, 0.5, fill="purple", wallcolor="purple")
    physics.addBox(2, 1.75, 2.5, 3.25, fill="orange", wallcolor="orange")
    physics.addLight(3, 2.5, 1)

def make_robot():
    robot = jy.Pioneer("Pioneer", 3, 1, 0)
    robot.addDevice(jy.Camera())
    robot.addDevice(jy.Pioneer16Sonars())
    robot.addDevice(jy.PioneerFrontLightSensors(3))
    return robot

robot = make_robot()
robot.mystep = 0
robot.priority = random.choice(["left", "right"])
sim = jy.Simulator(robot, make_world)

def get_quadrant(x, y, max_x=5, max_y=5):
    if x <= max_x/2 and y <= max_y/2:
        return 1
    elif x <= max_x/2 and y >= max_y/2:
        return 2
    elif x >= max_x/2 and y >= max_y/2:
        return 3
    else:
        return 4

SAMPLES = 500

def controller(robot):
    if robot.mystep % 200 == 0:
        robot.priority = "left" if robot.priority == "right" else "right"
    image = robot["camera"].getData()

    x, y, h = robot.getPose()
    quad = get_quadrant(x, y)

    ls = list(robot.targets)
    counts = [ls.count(n) for n in [1,2,3,4]]

    if quad > len(counts) or counts[quad-1] < SAMPLES:
        robot.images.append(image)
        robot.targets.append(quad)

    sonar = robot["sonar"].getData()
    left = min(sonar[0:4])
    right = min(sonar[4:8])
    clearance = 0.5
    noise = random.gauss(0, 0.2)
    if robot.priority == "left":
        if left < clearance or right < clearance:
            robot.move(0, -0.5+noise)
        else:
            robot.move(0.5+noise, 0)
    else:
        if left < clearance or right < clearance:
            robot.move(0, 0.5+noise)
        else:
            robot.move(0.5+noise, 0)
    robot.mystep += 1

robot.brain = controller
robot.images = []
robot.targets = []

In [3]:
i = 0
while True:
    if i % 100 == 0:
        print(i, end=" ")
    #display(robot["camera"].getImage())
    sim.step(run_brain=True)
    ls = list(robot.targets)
    x = [ls.count(n) for n in [1,2,3,4]]
    if min(x) == SAMPLES:
        break
    i += 1

## Now trim all of them to same length

with open("vision_images.npy", "wb") as fp:
    np.save(fp, robot.images)
with open("vision_targets.npy", "wb") as fp:
    np.save(fp, robot.targets)
print("done collecting data")
0 100 200 300 400 500 600 700 800 900 1000 1100 1200 1300 1400 1500 1600 1700 1800 1900 2000 2100 2200 2300 2400 2500 2600 2700 2800 2900 3000 done collecting data
In [4]:
!ls -l *.npy
-rw-r--r-- 1 dblank dblank 57600128 Feb 11 23:53 vision_images.npy
-rw-r--r-- 1 dblank dblank    16128 Feb 11 23:53 vision_targets.npy
In [36]:
vision_images = np.load("vision_images.npy")
print(vision_images.shape)
vision_targets = np.load("vision_targets.npy")
print(vision_targets.shape)
(2000, 40, 60, 3)
(2000,)
In [6]:
ls = list(vision_targets)
x = [ls.count(n) for n in [1,2,3,4]]
print(x)
print(sum(x))
[500, 500, 500, 500]
2000
In [7]:
def vision_network(actf):
    net = cx.Network("Vision Controller")
    net.add(cx.ImageLayer("img_input", (40,60), 3),
            cx.Conv2DLayer("conv1", 10, (5, 5),
                        activation=actf),
            cx.Conv2DLayer("conv2", 10, (5, 5),
                        activation=actf),
            cx.MaxPool2DLayer("pool1",
                           pool_size=(2,2)),
            cx.FlattenLayer("flatten"),
            cx.Layer("hidden", 20,
                  activation=actf),
            cx.Layer("output", 4,
                  activation="softmax"))
    net.connect()
    net.compile(loss="categorical_crossentropy",
                optimizer="adam")
    return net

net = vision_network("relu")
net["conv1"].feature = 7
display(net)
net.propagate(vision_images[0])
_images/VisionQuest_7_1.svg
Out[7]:
[0.2061084508895874,
 0.2597593665122986,
 0.3951513469219208,
 0.13898083567619324]
In [9]:
net.picture(vision_images[19], rotate=True)
Out[9]:
Layer: output (output) output range: (0, 1) shape = (4,) Keras class = Dense activation = softmaxoutputWeights from hidden to output output/kernel has shape (20, 4) output/bias has shape (4,)Layer: hidden (hidden) output range: (0, +Infinity) shape = (20,) Keras class = Dense activation = reluhiddenWeights from flatten to hidden hidden/kernel has shape (4160, 20) hidden/bias has shape (20,)Layer: flatten (hidden) output range: (-Infinity, +Infinity) Keras class = FlattenflattenWeights from pool1 to flattenLayer: pool1 (hidden) output range: (-Infinity, +Infinity) Keras class = MaxPooling2D pool_size = (2, 2)pool1100Weights from conv2 to pool1Layer: conv2 (hidden) output range: (0, +Infinity) Keras class = Conv2D activation = reluconv2100Weights from conv1 to conv2 conv2/kernel has shape (5, 5, 10, 10) conv2/bias has shape (10,)Layer: conv1 (hidden) output range: (0, +Infinity) Keras class = Conv2D activation = reluconv1107Weights from img_input to conv1 conv1/kernel has shape (5, 5, 3, 10) conv1/bias has shape (10,)Layer: img_input (input) output range: (-Infinity, +Infinity) shape = (40, 60, 3) Keras class = Inputimg_inputVision Controller
In [14]:
net.propagate_to_features("conv1", vision_images[10], scale=3)
Out[14]:

Feature 0

Feature 1

Feature 2

Feature 3

Feature 4

Feature 5

Feature 6

Feature 7

Feature 8

Feature 9
In [15]:
net.propagate_to_features("conv1", vision_images[20], scale=3)
Out[15]:

Feature 0

Feature 1

Feature 2

Feature 3

Feature 4

Feature 5

Feature 6

Feature 7

Feature 8

Feature 9
In [18]:
img = cx.array_to_image(vision_images[0], scale=3.0)
img
Out[18]:
_images/VisionQuest_11_0.png
In [21]:
net.picture(vision_images[10], rotate=True)
Out[21]:
Layer: output (output) output range: (0, 1) shape = (4,) Keras class = Dense activation = softmaxoutputWeights from hidden to output output/kernel has shape (20, 4) output/bias has shape (4,)Layer: hidden (hidden) output range: (0, +Infinity) shape = (20,) Keras class = Dense activation = reluhiddenWeights from flatten to hidden hidden/kernel has shape (4160, 20) hidden/bias has shape (20,)Layer: flatten (hidden) output range: (-Infinity, +Infinity) Keras class = FlattenflattenWeights from pool1 to flattenLayer: pool1 (hidden) output range: (-Infinity, +Infinity) Keras class = MaxPooling2D pool_size = (2, 2)pool1100Weights from conv2 to pool1Layer: conv2 (hidden) output range: (0, +Infinity) Keras class = Conv2D activation = reluconv2100Weights from conv1 to conv2 conv2/kernel has shape (5, 5, 10, 10) conv2/bias has shape (10,)Layer: conv1 (hidden) output range: (0, +Infinity) Keras class = Conv2D activation = reluconv1107Weights from img_input to conv1 conv1/kernel has shape (5, 5, 3, 10) conv1/bias has shape (10,)Layer: img_input (input) output range: (-Infinity, +Infinity) shape = (40, 60, 3) Keras class = Inputimg_inputVision Controller
In [23]:
net.picture(vision_images[100], rotate=True)
Out[23]:
Layer: output (output) output range: (0, 1) shape = (4,) Keras class = Dense activation = softmaxoutputWeights from hidden to output output/kernel has shape (20, 4) output/bias has shape (4,)Layer: hidden (hidden) output range: (0, +Infinity) shape = (20,) Keras class = Dense activation = reluhiddenWeights from flatten to hidden hidden/kernel has shape (4160, 20) hidden/bias has shape (20,)Layer: flatten (hidden) output range: (-Infinity, +Infinity) Keras class = FlattenflattenWeights from pool1 to flattenLayer: pool1 (hidden) output range: (-Infinity, +Infinity) Keras class = MaxPooling2D pool_size = (2, 2)pool1100Weights from conv2 to pool1Layer: conv2 (hidden) output range: (0, +Infinity) Keras class = Conv2D activation = reluconv2100Weights from conv1 to conv2 conv2/kernel has shape (5, 5, 10, 10) conv2/bias has shape (10,)Layer: conv1 (hidden) output range: (0, +Infinity) Keras class = Conv2D activation = reluconv1107Weights from img_input to conv1 conv1/kernel has shape (5, 5, 3, 10) conv1/bias has shape (10,)Layer: img_input (input) output range: (-Infinity, +Infinity) shape = (40, 60, 3) Keras class = Inputimg_inputVision Controller
In [24]:
net.propagate_to_features("conv2", vision_images[100], scale=3.0)
Out[24]:

Feature 0

Feature 1

Feature 2

Feature 3

Feature 4

Feature 5

Feature 6

Feature 7

Feature 8

Feature 9
In [25]:
ds = net.dataset
In [26]:
ds.clear()
In [37]:
%%time
dataset = []
for i in range(len(vision_images)):
    dataset.append([vision_images[i], cx.onehot(vision_targets[i] - 1, 4)])
ds.load(dataset)
CPU times: user 213 ms, sys: 84.2 ms, total: 297 ms
Wall time: 309 ms
In [38]:
ds.split(.1)
In [39]:
ds.summary()
_________________________________________________________________
Vision Controller Dataset:
Patterns    Shape                 Range
=================================================================
inputs      (40, 60, 3)           (0.0, 1.0)
targets     (4,)                  (0.0, 1.0)
=================================================================
Total patterns: 2000
   Training patterns: 1800
   Testing patterns: 200
_________________________________________________________________
In [22]:
#net.delete()
#net.train(5, report_rate=1, plot=True)
#net.save()
In [34]:
if net.saved():
    net.load()
    net.plot_results()
else:
    net.train(5, report_rate=1, save=True)
_images/VisionQuest_21_0.png
In [35]:
net.dashboard()
In [40]:
robot["camera"].getImage().resize((240, 160))
Out[40]:
_images/VisionQuest_23_0.png
In [41]:
image = net.propagate_to_image("conv2", vision_images[0], scale=2.0)
image
Out[41]:
_images/VisionQuest_24_0.png
In [42]:
net.propagate_to_features("conv2", vision_images[0], scale=3.0)
Out[42]:

Feature 0

Feature 1

Feature 2

Feature 3

Feature 4

Feature 5

Feature 6

Feature 7

Feature 8

Feature 9
In [43]:
net.propagate(vision_images[10])
Out[43]:
[3.2931123106205717e-12,
 0.0011761356145143509,
 0.011836284771561623,
 0.9869875907897949]
In [45]:
net.propagate(cx.array_to_image(robot["camera"].getData()))
Out[45]:
[0.2795098125934601,
 0.5486608743667603,
 0.03144071251153946,
 0.1403886079788208]
In [32]:
from conx.widgets import CameraWidget
In [33]:
cam = CameraWidget()
cam
In [34]:
image = cam.get_image().resize((60, 40))
In [35]:
net.propagate(image)
Out[35]:
[0.2507583200931549, 0.2764703929424286, 0.1926824152469635, 0.280088871717453]
In [36]:
net.propagate(robot["camera"].getData())
Out[36]:
[0.14169691503047943,
 0.39364564418792725,
 0.3407711088657379,
 0.123886339366436]
In [37]:
net.test()
========================================================
Testing validation dataset with tolerance 0.1...
Total count: 1800
      correct: 1191
      incorrect: 609
Total percentage correct: 0.6616666666666666
In [46]:
def network_brain(robot):
    if robot.mystep % 200 == 0:
        robot.priority = "left" if robot.priority == "right" else "right"
    inputs = robot["camera"].getData()
    outputs = net.propagate(inputs)
    print(net.pf(outputs))
    sonar = robot["sonar"].getData()
    left = min(sonar[0:4])
    right = min(sonar[4:8])
    clearance = 0.5
    noise = random.gauss(0, 0.2)
    if robot.priority == "left":
        if left < clearance or right < clearance:
            robot.move(0, -0.5+noise)
        else:
            robot.move(0.5+noise, 0)
    else:
        if left < clearance or right < clearance:
            robot.move(0, 0.5+noise)
        else:
            robot.move(0.5+noise, 0)
    robot.mystep += 1
In [48]:
net.visualize = False
robot = make_robot()
robot.brain = network_brain
robot.mystep = 0
robot.priority = random.choice(["left", "right"])
vsim = jy.VSimulator(robot, make_world)