Robot SimulationΒΆ

In [1]:
from jyro.simulator import (Robot, Pioneer, Pioneer16Sonars,
                            PioneerFrontLightSensors, Camera,
                            VSimulator)
import random
import numpy as np
In [2]:
robot = Pioneer("Pioneer", 3.5, 2, 0)
In [3]:
robot.addDevice(Pioneer16Sonars())
robot.addDevice(Camera())
light_sensors = PioneerFrontLightSensors(3.0)
light_sensors.lightMode = 'ambient'
robot.addDevice(light_sensors)
Out[3]:
_images/Robot_Simulation_3_0.svg
In [4]:
def worldf(physics):
    physics.addBox(0, 0, 4, 4, fill="backgroundgreen", wallcolor="gray")
    physics.addLight(2, 0.75, 1.0) # increased brightness for new linear version of lights
In [5]:
sim = VSimulator(robot, worldf)
In [6]:
camera = robot.device["camera"]
In [7]:
image = camera.getImage()
image
Out[7]:
_images/Robot_Simulation_7_0.png
In [8]:
image.size
Out[8]:
(60, 40)
In [9]:
data = camera.getData()
data.shape
Out[9]:
(40, 60, 3)
In [10]:
robot.move(0.50, 0.35)
In [11]:
sim.step()
In [12]:
def random_action():
    """Generate a random action from a limited set of possible settings"""
    possible = [-1.0, -0.5, 0.0, 0.5, 1.0]
    return [random.choice(possible), random.choice(possible)]

def get_senses(robot):
    light = robot["light"].getData()
    sonar = [v/3.0 for v in robot["sonar"].getData()]
    camera = robot["camera"].getData()
    return [light, sonar, camera]
In [13]:
senses = get_senses(robot)
list(map(len, senses))
Out[13]:
[2, 16, 40]
In [14]:
def brain(robot):
    senses = get_senses(robot)
    net.propagate(senses)
    translate, rotate = random_action()
    #self.move(translate, rotate)
    robot.move(0.50, 0.35)
In [15]:
robot.brain = brain
In [16]:
from conx import Network, Layer, FlattenLayer, SGD, ImageLayer, Conv2DLayer
import numpy as np
Using Theano backend.
In [17]:
net = Network("Robot Prediction Network")
net.add(Layer("light", 2))
net.add(Layer("sonar", 16))
net.add(ImageLayer("camera", (40,60), 3))
net.add(FlattenLayer("flatten"))
net.add(Conv2DLayer("conv", 16, (3,3)))
net.add(Layer("hidden", 50, activation="relu"))
net.add(Layer("output1", 2, activation="sigmoid"))
net.add(Layer("hidden2", 5, activation="sigmoid"))
net.add(Layer("hidden3", 10, activation="sigmoid", dropout=0.25))
net.add(Layer("hidden4", 10, activation="sigmoid"))
net.add(Layer("output2", 5, activation="sigmoid"))
In [18]:
net.connect("sonar", "hidden2")
net.connect("light", "hidden")
net.connect("camera", "conv")
net.connect("conv", "flatten")
net.connect("flatten", "hidden2")
net.connect("hidden", "hidden2")
net.connect("hidden2", "hidden3")
##net.connect("hidden2", "output2")
net.connect("hidden3", "output2")
net.connect("hidden3", "hidden4")
net.connect("hidden4", "output1")
In [19]:
net.compile(optimizer="adam", error="mse")
net.config["hspace"] = 200
In [24]:
net
Out[24]:
_images/Robot_Simulation_20_1.svg
In [25]:
matrix = net.propagate_to("conv", get_senses(robot))
In [23]:
net["conv"].feature = 6
In [55]:
s =  get_senses(robot)
for i in range(16):
    net["conv"].feature = i
    image = net.propagate_to_image("conv", s)
    display(image)
_images/Robot_Simulation_23_0.png
_images/Robot_Simulation_23_1.png
_images/Robot_Simulation_23_2.png
_images/Robot_Simulation_23_3.png
_images/Robot_Simulation_23_4.png
_images/Robot_Simulation_23_5.png
_images/Robot_Simulation_23_6.png
_images/Robot_Simulation_23_7.png
_images/Robot_Simulation_23_8.png
_images/Robot_Simulation_23_9.png
_images/Robot_Simulation_23_10.png
_images/Robot_Simulation_23_11.png
_images/Robot_Simulation_23_12.png
_images/Robot_Simulation_23_13.png
_images/Robot_Simulation_23_14.png
_images/Robot_Simulation_23_15.png
In [61]:
len(matrix[0]) # 38 x 58 x 16
Out[61]:
58
In [63]:
import keras.backend as K
In [64]:
K.image_data_format()
Out[64]:
'channels_last'
In [81]:
s = slice(None, None)
np.array(matrix)[s,s,3]
Out[81]:
array([[ 0.14154796,  0.14154796,  0.14154796, ...,  0.14154796,
         0.14154796,  0.14154796],
       [ 0.14154796,  0.14154796,  0.14154796, ...,  0.14154796,
         0.14154796,  0.14154796],
       [ 0.14154796,  0.14154796,  0.14154796, ...,  0.14154796,
         0.14154796,  0.14154796],
       ...,
       [ 0.18892306,  0.18892306,  0.18892306, ...,  0.18892306,
         0.18892306,  0.18892306],
       [ 0.18892306,  0.18892306,  0.18892306, ...,  0.18892306,
         0.18892306,  0.18892306],
       [ 0.18892306,  0.18892306,  0.18892306, ...,  0.18892306,
         0.18892306,  0.18892306]])
In [65]:
[x for x in net.model.layers if x.name == "conv"][0]
Out[65]:
<keras.layers.convolutional.Conv2D at 0x7fe8f111fdd8>
In [22]:
net["camera"].__class__.__name__
Out[22]:
'ImageLayer'