3.18. Vision QuestΒΆ

In [1]:
from jyro.simulator import *
from conx import *
from IPython.display import display
import random
import numpy as np
Using Theano backend.
conx, version 3.5.9
In [2]:
def make_world(physics):
    physics.addBox(0, 0, 5, 5, fill="gray", wallcolor="gray")
    physics.addBox(0, 0, 0.5, 0.5, fill="blue", wallcolor="blue")
    physics.addBox(0, 5, 0.5, 4.5 , fill="red", wallcolor="red")
    physics.addBox(4.5, 4.5, 5, 5, fill="green", wallcolor="green")
    physics.addBox(4.5, 0, 5, 0.5, fill="purple", wallcolor="purple")
    physics.addBox(2, 1.75, 2.5, 3.25, fill="orange", wallcolor="orange")
    physics.addLight(3, 2.5, 1)

def make_robot():
    robot = Pioneer("Pioneer", 3, 1, 0)
    robot.addDevice(Camera())
    robot.addDevice(Pioneer16Sonars())
    robot.addDevice(PioneerFrontLightSensors(3))
    return robot

robot = make_robot()
robot.mystep = 0
robot.priority = random.choice(["left", "right"])
sim = Simulator(robot, make_world)

def get_quadrant(x, y, max_x=5, max_y=5):
    if x <= max_x/2 and y <= max_y/2:
        return 1
    elif x <= max_x/2 and y >= max_y/2:
        return 2
    elif x >= max_x/2 and y >= max_y/2:
        return 3
    else:
        return 4

SAMPLES = 500

def controller(robot):
    if robot.mystep % 200 == 0:
        robot.priority = "left" if robot.priority == "right" else "right"
    image = robot["camera"].getData()

    x, y, h = robot.getPose()
    quad = get_quadrant(x, y)

    ls = list(robot.targets)
    counts = [ls.count(n) for n in [1,2,3,4]]

    if quad > len(counts) or counts[quad-1] < SAMPLES:
        robot.images.append(image)
        robot.targets.append(quad)

    sonar = robot["sonar"].getData()
    left = min(sonar[0:4])
    right = min(sonar[4:8])
    clearance = 0.5
    noise = random.gauss(0, 0.2)
    if robot.priority == "left":
        if left < clearance or right < clearance:
            robot.move(0, -0.5+noise)
        else:
            robot.move(0.5+noise, 0)
    else:
        if left < clearance or right < clearance:
            robot.move(0, 0.5+noise)
        else:
            robot.move(0.5+noise, 0)
    robot.mystep += 1

robot.brain = controller
robot.images = []
robot.targets = []

In [3]:
i = 0
while True:
    if i % 100 == 0:
        print(i, end=" ")
    #display(robot["camera"].getImage())
    sim.step(run_brain=True)
    ls = list(robot.targets)
    x = [ls.count(n) for n in [1,2,3,4]]
    if min(x) == SAMPLES:
        break
    i += 1

## Now trim all of them to same length

with open("vision_images.npy", "wb") as fp:
    np.save(fp, robot.images)
with open("vision_targets.npy", "wb") as fp:
    np.save(fp, robot.targets)
print("done collecting data")
0 100 200 300 400 500 600 700 800 900 1000 1100 1200 1300 1400 1500 1600 1700 1800 1900 2000 2100 2200 2300 2400 2500 2600 2700 2800 2900 3000 3100 3200 3300 3400 3500 3600 3700 3800 3900 4000 4100 done collecting data
In [4]:
!ls -l *.npy
-rw-r--r-- 1 dblank dblank 57600128 Jan 27 12:31 vision_images.npy
-rw-r--r-- 1 dblank dblank    16128 Jan 27 12:31 vision_targets.npy
In [5]:
vision_images = np.load("vision_images.npy")
print(vision_images.shape)
vision_targets = np.load("vision_targets.npy")
print(vision_targets.shape)
(2000, 40, 60, 3)
(2000,)
In [6]:
ls = list(vision_targets)
x = [ls.count(n) for n in [1,2,3,4]]
print(x)
print(sum(x))
[500, 500, 500, 500]
2000
In [7]:
from conx import *
def vision_network(actf):
    net = Network("Vision Controller")
    net.add(ImageLayer("img_input", (40,60), 3))
    net.add(Conv2DLayer("conv1", 10, (5, 5),
                        activation=actf))
    net.add(Conv2DLayer("conv2", 10, (5, 5),
                        activation=actf))
    net.add(MaxPool2DLayer("pool1",
                           pool_size=(2,2)))
    net.add(FlattenLayer("flatten"))
    net.add(Layer("hidden", 20,
                  activation=actf))
    net.add(Layer("output", 4,
                  activation="softmax"))
    net.connect()
    net.compile(loss="categorical_crossentropy",
                optimizer="adam")
    return net

net = vision_network("relu")
net["conv1"].feature = 7
display(net)
net.propagate(vision_images[0])
Vision ControllerLayer: output (output) shape = (4,) Keras class = Dense activation = softmaxoutputWeights from hidden to output output/kernel has shape (20, 4) output/bias has shape (4,)Layer: hidden (hidden) shape = (20,) Keras class = Dense activation = reluhiddenWeights from flatten to hidden hidden/kernel has shape (4160, 20) hidden/bias has shape (20,)Layer: flatten (hidden) Keras class = FlattenflattenWeights from pool1 to flattenLayer: pool1 (hidden) Keras class = MaxPooling2D pool_size = (2, 2)pool1100Weights from conv2 to pool1Layer: conv2 (hidden) Keras class = Conv2D activation = reluconv2100Weights from conv1 to conv2 conv2/kernel has shape (5, 5, 10, 10) conv2/bias has shape (10,)Layer: conv1 (hidden) Keras class = Conv2D activation = reluconv1107Weights from img_input to conv1 conv1/kernel has shape (5, 5, 3, 10) conv1/bias has shape (10,)Layer: img_input (input) shape = (40, 60, 3) Keras class = Inputimg_input
Out[7]:
[0.2408667504787445,
 0.19407963752746582,
 0.17542071640491486,
 0.3896328806877136]
In [8]:
net.propagate(vision_images[19], visualize=True)
Out[8]:
[0.24244247376918793,
 0.20360258221626282,
 0.18065685033798218,
 0.37329810857772827]
In [9]:
net.propagate_to_features("conv1", vision_images[10])
Out[9]:

Feature 0

Feature 1

Feature 2

Feature 3

Feature 4

Feature 5

Feature 6

Feature 7

Feature 8

Feature 9
In [14]:
net.propagate_to_features("conv1", vision_images[20], html=False, visualize=True)
In [15]:
img = array2image(vision_images[0], scale=3.0)
img
Out[15]:
_images/VisionQuest_11_0.png
In [16]:
net.propagate(vision_images[10])
Out[16]:
[0.2359803318977356,
 0.20740050077438354,
 0.1849192976951599,
 0.37169986963272095]
In [17]:
net.snapshot(vision_images[0])
Out[17]:
Vision ControllerLayer: output (output) shape = (4,) Keras class = Dense activation = softmaxoutputWeights from hidden to output output/kernel has shape (20, 4) output/bias has shape (4,)Layer: hidden (hidden) shape = (20,) Keras class = Dense activation = reluhiddenWeights from flatten to hidden hidden/kernel has shape (4160, 20) hidden/bias has shape (20,)Layer: flatten (hidden) Keras class = FlattenflattenWeights from pool1 to flattenLayer: pool1 (hidden) Keras class = MaxPooling2D pool_size = (2, 2)pool1100Weights from conv2 to pool1Layer: conv2 (hidden) Keras class = Conv2D activation = reluconv2100Weights from conv1 to conv2 conv2/kernel has shape (5, 5, 10, 10) conv2/bias has shape (10,)Layer: conv1 (hidden) Keras class = Conv2D activation = reluconv1107Weights from img_input to conv1 conv1/kernel has shape (5, 5, 3, 10) conv1/bias has shape (10,)Layer: img_input (input) shape = (40, 60, 3) Keras class = Inputimg_input
In [18]:
net.propagate_to_features("conv2", vision_images[0], scale=3.0)
Out[18]:

Feature 0

Feature 1

Feature 2

Feature 3

Feature 4

Feature 5

Feature 6

Feature 7

Feature 8

Feature 9
In [19]:
ds = net.dataset
In [20]:
ds.clear()
In [21]:
%%time
dataset = []
for i in range(len(vision_images)):
    dataset.append([vision_images[i], onehot(vision_targets[i] - 1, 4)])
ds.load(dataset)
CPU times: user 121 ms, sys: 24 ms, total: 145 ms
Wall time: 145 ms
In [22]:
ds.shuffle()
In [23]:
ds.split(.1)
In [24]:
ds.summary()

Dataset Split: * training : 1800 * testing : 200 * total : 2000

Input Summary: * shape : [(40, 60, 3)] * range : [(0.0, 1.0)]

Target Summary: * shape : [(4,)] * range : [(0.0, 1.0)]

In [22]:
#net.delete()
#net.train(5, report_rate=1, plot=True)
#net.save()
In [25]:
if net.saved():
    net.load()
    net.plot_loss_acc()
else:
    net.train(5, report_rate=1, plot=True)
    net.save()

_images/VisionQuest_22_0.svg
========================================================================
       |  Training |  Training |  Validate |  Validate
Epochs |     Error |  Accuracy |     Error |  Accuracy
------ | --------- | --------- | --------- | ---------
#    5 |   0.36111 |   0.50944 |   0.31159 |   0.60000
In [26]:
net.dashboard()
In [27]:
robot["camera"].getImage().resize((240, 160))
Out[27]:
_images/VisionQuest_24_0.png
In [28]:
image = net.propagate_to_image("conv2", vision_images[0], scale=2.0)
image
Out[28]:
_images/VisionQuest_25_0.png
In [29]:
net.propagate_to_features("conv2", vision_images[0], scale=3.0)
Out[29]:

Feature 0

Feature 1

Feature 2

Feature 3

Feature 4

Feature 5

Feature 6

Feature 7

Feature 8

Feature 9
In [30]:
net.propagate(vision_images[10])
Out[30]:
[2.0572382686268043e-10,
 2.5946392270270735e-05,
 0.02174180932343006,
 0.9782322645187378]
In [31]:
net.propagate(array2image(robot["camera"].getData()))
Out[31]:
[0.14169691503047943,
 0.39364564418792725,
 0.3407711088657379,
 0.123886339366436]
In [32]:
from conx.widgets import CameraWidget
In [33]:
cam = CameraWidget()
cam
In [34]:
image = cam.get_image().resize((60, 40))
In [35]:
net.propagate(image)
Out[35]:
[0.2507583200931549, 0.2764703929424286, 0.1926824152469635, 0.280088871717453]
In [36]:
net.propagate(robot["camera"].getData())
Out[36]:
[0.14169691503047943,
 0.39364564418792725,
 0.3407711088657379,
 0.123886339366436]
In [37]:
net.test()
========================================================
Testing validation dataset with tolerance 0.1...
Total count: 1800
      correct: 1191
      incorrect: 609
Total percentage correct: 0.6616666666666666
In [42]:
def network_brain(robot):
    if robot.mystep % 200 == 0:
        robot.priority = "left" if robot.priority == "right" else "right"
    inputs = robot["camera"].getData()
    outputs = net.propagate(inputs)
    print(net.pf(outputs))
    sonar = robot["sonar"].getData()
    left = min(sonar[0:4])
    right = min(sonar[4:8])
    clearance = 0.5
    noise = random.gauss(0, 0.2)
    if robot.priority == "left":
        if left < clearance or right < clearance:
            robot.move(0, -0.5+noise)
        else:
            robot.move(0.5+noise, 0)
    else:
        if left < clearance or right < clearance:
            robot.move(0, 0.5+noise)
        else:
            robot.move(0.5+noise, 0)
    robot.mystep += 1
In [43]:
net.visualize = False
robot = make_robot()
robot.brain = network_brain
robot.mystep = 0
robot.priority = random.choice(["left", "right"])
vsim = VSimulator(robot, make_world)