Instrumented version - to look at timings

Including:
- replacing guassian with simpler triangle distribution
- Timing calculations -resulting in using only 50 points.
- Changes (which may need to be reconsidered) for optimization. These are likely to be simplified back.
This commit is contained in:
Danny Staple 2022-12-11 20:44:50 +00:00
parent 46daf66024
commit f57add5106
12 changed files with 534 additions and 104 deletions

View File

@ -14,7 +14,7 @@ class RobotDisplay:
self.arena = {} self.arena = {}
self.display_closed = False self.display_closed = False
self.fig, self.ax = plt.subplots() self.fig, self.ax = plt.subplots()
self.poses = np.zeros([200, 3], dtype=np.float32) self.poses = np.zeros([200, 3], dtype=np.int16)
def handle_close(self, _): def handle_close(self, _):
self.display_closed = True self.display_closed = True
@ -35,7 +35,10 @@ class RobotDisplay:
self.arena = message self.arena = message
if "poses" in message: if "poses" in message:
print(message) print(message)
self.poses[message["offset"]: message["offset"] + len(message["poses"])] = message["poses"] incoming_poses = np.array(message["poses"], dtype=np.int16)
print("Incoming poses shape", incoming_poses.shape)
print("Existing poses shape", self.poses.shape)
self.poses[message["offset"]: message["offset"] + incoming_poses.shape[0]] = incoming_poses
def draw(self): def draw(self):
self.ax.clear() self.ax.clear()

View File

@ -158,6 +158,14 @@ pyparsing = ">=2.2.1"
python-dateutil = ">=2.7" python-dateutil = ">=2.7"
setuptools_scm = ">=7" setuptools_scm = ">=7"
[[package]]
name = "msgpack"
version = "1.0.4"
description = "MessagePack serializer"
category = "main"
optional = false
python-versions = "*"
[[package]] [[package]]
name = "mypy-extensions" name = "mypy-extensions"
version = "0.4.3" version = "0.4.3"
@ -339,7 +347,7 @@ python-versions = ">=3.7"
[metadata] [metadata]
lock-version = "1.1" lock-version = "1.1"
python-versions = "^3.9" python-versions = "^3.9"
content-hash = "4ec6eb3464cbb49afad347e67bdf7c353d7ea4030fd8ed4257098a5df334eb99" content-hash = "a2ae390fc260b23b2aea772611acac3c8626867abcb0e477875407c9f24a5fa6"
[metadata.files] [metadata.files]
async-timeout = [ async-timeout = [
@ -616,6 +624,60 @@ matplotlib = [
{file = "matplotlib-3.6.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:5f97141e05baf160c3ec125f06ceb2a44c9bb62f42fcb8ee1c05313c73e99432"}, {file = "matplotlib-3.6.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:5f97141e05baf160c3ec125f06ceb2a44c9bb62f42fcb8ee1c05313c73e99432"},
{file = "matplotlib-3.6.1.tar.gz", hash = "sha256:e2d1b7225666f7e1bcc94c0bc9c587a82e3e8691da4757e357e5c2515222ee37"}, {file = "matplotlib-3.6.1.tar.gz", hash = "sha256:e2d1b7225666f7e1bcc94c0bc9c587a82e3e8691da4757e357e5c2515222ee37"},
] ]
msgpack = [
{file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4ab251d229d10498e9a2f3b1e68ef64cb393394ec477e3370c457f9430ce9250"},
{file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:112b0f93202d7c0fef0b7810d465fde23c746a2d482e1e2de2aafd2ce1492c88"},
{file = "msgpack-1.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:002b5c72b6cd9b4bafd790f364b8480e859b4712e91f43014fe01e4f957b8467"},
{file = "msgpack-1.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35bc0faa494b0f1d851fd29129b2575b2e26d41d177caacd4206d81502d4c6a6"},
{file = "msgpack-1.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4733359808c56d5d7756628736061c432ded018e7a1dff2d35a02439043321aa"},
{file = "msgpack-1.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb514ad14edf07a1dbe63761fd30f89ae79b42625731e1ccf5e1f1092950eaa6"},
{file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c23080fdeec4716aede32b4e0ef7e213c7b1093eede9ee010949f2a418ced6ba"},
{file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:49565b0e3d7896d9ea71d9095df15b7f75a035c49be733051c34762ca95bbf7e"},
{file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:aca0f1644d6b5a73eb3e74d4d64d5d8c6c3d577e753a04c9e9c87d07692c58db"},
{file = "msgpack-1.0.4-cp310-cp310-win32.whl", hash = "sha256:0dfe3947db5fb9ce52aaea6ca28112a170db9eae75adf9339a1aec434dc954ef"},
{file = "msgpack-1.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dea20515f660aa6b7e964433b1808d098dcfcabbebeaaad240d11f909298075"},
{file = "msgpack-1.0.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e83f80a7fec1a62cf4e6c9a660e39c7f878f603737a0cdac8c13131d11d97f52"},
{file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c11a48cf5e59026ad7cb0dc29e29a01b5a66a3e333dc11c04f7e991fc5510a9"},
{file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1276e8f34e139aeff1c77a3cefb295598b504ac5314d32c8c3d54d24fadb94c9"},
{file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c9566f2c39ccced0a38d37c26cc3570983b97833c365a6044edef3574a00c08"},
{file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:fcb8a47f43acc113e24e910399376f7277cf8508b27e5b88499f053de6b115a8"},
{file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:76ee788122de3a68a02ed6f3a16bbcd97bc7c2e39bd4d94be2f1821e7c4a64e6"},
{file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:0a68d3ac0104e2d3510de90a1091720157c319ceeb90d74f7b5295a6bee51bae"},
{file = "msgpack-1.0.4-cp36-cp36m-win32.whl", hash = "sha256:85f279d88d8e833ec015650fd15ae5eddce0791e1e8a59165318f371158efec6"},
{file = "msgpack-1.0.4-cp36-cp36m-win_amd64.whl", hash = "sha256:c1683841cd4fa45ac427c18854c3ec3cd9b681694caf5bff04edb9387602d661"},
{file = "msgpack-1.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a75dfb03f8b06f4ab093dafe3ddcc2d633259e6c3f74bb1b01996f5d8aa5868c"},
{file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9667bdfdf523c40d2511f0e98a6c9d3603be6b371ae9a238b7ef2dc4e7a427b0"},
{file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11184bc7e56fd74c00ead4f9cc9a3091d62ecb96e97653add7a879a14b003227"},
{file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ac5bd7901487c4a1dd51a8c58f2632b15d838d07ceedaa5e4c080f7190925bff"},
{file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1e91d641d2bfe91ba4c52039adc5bccf27c335356055825c7f88742c8bb900dd"},
{file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2a2df1b55a78eb5f5b7d2a4bb221cd8363913830145fad05374a80bf0877cb1e"},
{file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:545e3cf0cf74f3e48b470f68ed19551ae6f9722814ea969305794645da091236"},
{file = "msgpack-1.0.4-cp37-cp37m-win32.whl", hash = "sha256:2cc5ca2712ac0003bcb625c96368fd08a0f86bbc1a5578802512d87bc592fe44"},
{file = "msgpack-1.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:eba96145051ccec0ec86611fe9cf693ce55f2a3ce89c06ed307de0e085730ec1"},
{file = "msgpack-1.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:7760f85956c415578c17edb39eed99f9181a48375b0d4a94076d84148cf67b2d"},
{file = "msgpack-1.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:449e57cc1ff18d3b444eb554e44613cffcccb32805d16726a5494038c3b93dab"},
{file = "msgpack-1.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d603de2b8d2ea3f3bcb2efe286849aa7a81531abc52d8454da12f46235092bcb"},
{file = "msgpack-1.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f5d88c99f64c456413d74a975bd605a9b0526293218a3b77220a2c15458ba9"},
{file = "msgpack-1.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6916c78f33602ecf0509cc40379271ba0f9ab572b066bd4bdafd7434dee4bc6e"},
{file = "msgpack-1.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:81fc7ba725464651190b196f3cd848e8553d4d510114a954681fd0b9c479d7e1"},
{file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d5b5b962221fa2c5d3a7f8133f9abffc114fe218eb4365e40f17732ade576c8e"},
{file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:77ccd2af37f3db0ea59fb280fa2165bf1b096510ba9fe0cc2bf8fa92a22fdb43"},
{file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b17be2478b622939e39b816e0aa8242611cc8d3583d1cd8ec31b249f04623243"},
{file = "msgpack-1.0.4-cp38-cp38-win32.whl", hash = "sha256:2bb8cdf50dd623392fa75525cce44a65a12a00c98e1e37bf0fb08ddce2ff60d2"},
{file = "msgpack-1.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:26b8feaca40a90cbe031b03d82b2898bf560027160d3eae1423f4a67654ec5d6"},
{file = "msgpack-1.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:462497af5fd4e0edbb1559c352ad84f6c577ffbbb708566a0abaaa84acd9f3ae"},
{file = "msgpack-1.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2999623886c5c02deefe156e8f869c3b0aaeba14bfc50aa2486a0415178fce55"},
{file = "msgpack-1.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f0029245c51fd9473dc1aede1160b0a29f4a912e6b1dd353fa6d317085b219da"},
{file = "msgpack-1.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed6f7b854a823ea44cf94919ba3f727e230da29feb4a99711433f25800cf747f"},
{file = "msgpack-1.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0df96d6eaf45ceca04b3f3b4b111b86b33785683d682c655063ef8057d61fd92"},
{file = "msgpack-1.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a4192b1ab40f8dca3f2877b70e63799d95c62c068c84dc028b40a6cb03ccd0f"},
{file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0e3590f9fb9f7fbc36df366267870e77269c03172d086fa76bb4eba8b2b46624"},
{file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1576bd97527a93c44fa856770197dec00d223b0b9f36ef03f65bac60197cedf8"},
{file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:63e29d6e8c9ca22b21846234913c3466b7e4ee6e422f205a2988083de3b08cae"},
{file = "msgpack-1.0.4-cp39-cp39-win32.whl", hash = "sha256:fb62ea4b62bfcb0b380d5680f9a4b3f9a2d166d9394e9bbd9666c0ee09a3645c"},
{file = "msgpack-1.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:4d5834a2a48965a349da1c5a79760d94a1a0172fbb5ab6b5b33cbf8447e109ce"},
{file = "msgpack-1.0.4.tar.gz", hash = "sha256:f5d869c18f030202eb412f08b28d2afeea553d6613aee89e200d7aca7ef01f5f"},
]
mypy-extensions = [ mypy-extensions = [
{file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"}, {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"},
{file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"}, {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"},

View File

@ -11,6 +11,7 @@ python = "^3.9"
matplotlib = "3.6.1" matplotlib = "3.6.1"
numpy = "1.23.4" numpy = "1.23.4"
bleak = "0.19.0" bleak = "0.19.0"
msgpack = "^1.0.4"
[tool.poetry.group.dev.dependencies] [tool.poetry.group.dev.dependencies]

View File

@ -30,14 +30,14 @@ def point_is_inside_arena(x, y):
return True return True
## intention - we can use a distance squared function to avoid the square root, and just square the distance sensor readings too. ## intention - we can use a distance squared function to avoid the square root, and just square the distance sensor readings too.
def get_ray_distance_to_segment_squared(ray, segment): def get_ray_distance_to_segment_squared(ray_x, ray_y, ray_tan, ray_heading, segment):
"""Return the distance squared from the ray origin to the intersection point along the given ray heading. """Return the distance squared from the ray origin to the intersection point along the given ray heading.
The segments are boundary lines, which will be horizontal or vertical, and have known lengths. The segments are boundary lines, which will be horizontal or vertical, and have known lengths.
The ray can have any heading, and will be infinite in length. The ray can have any heading, and will be infinite in length.
Ray -> (x, y, heading) Ray -> (x, y, heading)
ray_tan -> tangent of the heading (optimization)
Segment -> ((x1, y1), (x2, y2)) Segment -> ((x1, y1), (x2, y2))
""" """
ray_x, ray_y, ray_heading = ray
segment_x1, segment_y1 = segment[0] segment_x1, segment_y1 = segment[0]
segment_x2, segment_y2 = segment[1] segment_x2, segment_y2 = segment[1]
# if the segment is horizontal, the ray will intersect it at a known y value # if the segment is horizontal, the ray will intersect it at a known y value
@ -46,7 +46,7 @@ def get_ray_distance_to_segment_squared(ray, segment):
if ray_heading == 0: if ray_heading == 0:
return None return None
# calculate the x value of the intersection point # calculate the x value of the intersection point
intersection_x = ray_x + (segment_y1 - ray_y) / math.tan(ray_heading) intersection_x = ray_x + (segment_y1 - ray_y) / ray_tan
# is the intersection point on the segment? # is the intersection point on the segment?
if intersection_x > max(segment_x1, segment_x2) or intersection_x < min(segment_x1, segment_x2): if intersection_x > max(segment_x1, segment_x2) or intersection_x < min(segment_x1, segment_x2):
return None return None
@ -58,7 +58,7 @@ def get_ray_distance_to_segment_squared(ray, segment):
if ray_heading == math.pi / 2: if ray_heading == math.pi / 2:
return None return None
# calculate the y value of the intersection point # calculate the y value of the intersection point
intersection_y = ray_y + (segment_x1 - ray_x) * math.tan(ray_heading) intersection_y = ray_y + (segment_x1 - ray_x) * ray_tan
# is the intersection point on the segment? # is the intersection point on the segment?
if intersection_y > max(segment_y1, segment_y2) or intersection_y < min(segment_y1, segment_y2): if intersection_y > max(segment_y1, segment_y2) or intersection_y < min(segment_y1, segment_y2):
return None return None
@ -75,8 +75,10 @@ def get_ray_distance_squared_to_nearest_boundary_segment(ray):
""" """
# find the distance to each segment # find the distance to each segment
distances = [] distances = []
ray_x, ray_y, ray_heading = ray
ray_tan = math.tan(ray_heading)
for segment in boundary_lines: for segment in boundary_lines:
distance_squared = get_ray_distance_to_segment_squared(ray, segment) distance_squared = get_ray_distance_to_segment_squared(ray_x, ray_y, ray_tan, ray_heading, segment)
if distance_squared is not None: if distance_squared is not None:
distances.append(distance_squared) distances.append(distance_squared)
# return the minimum distance # return the minimum distance

View File

@ -2,9 +2,10 @@ import asyncio
import json import json
import random import random
from ulab import numpy as np from ulab import numpy as np
from guassian import get_gaussian_sample
import arena import arena
import robot import robot
import math
import time
# initial sample set - uniform # initial sample set - uniform
# then apply sensor model # then apply sensor model
@ -12,19 +13,55 @@ import robot
# then apply motion model # then apply motion model
# and repeat # and repeat
class VaryingWallAvoid:
def __init__(self):
self.speed = 0.6
self.last_call = time.monotonic()
def speed_from_distance(self, distance):
limited_error = min(distance, 300) * self.speed
motor_speed = limited_error / 300
if motor_speed < 0.2:
motor_speed = -0.3
return motor_speed
def update(self, left_distance, right_distance):
# Currently being called every 1.6 seconds - that is far too long.
print("Since last call:", time.monotonic() - self.last_call)
left = self.speed_from_distance(left_distance)
right = self.speed_from_distance(right_distance)
# print("left speed:", left, "right speed:", right)
robot.set_left(left)
robot.set_right(right)
self.last_call = time.monotonic()
triangular_proportion = math.sqrt(6) / 2
def get_triangular_sample(mean, standard_deviation):
base = triangular_proportion * (random.uniform(-standard_deviation, standard_deviation) + random.uniform(-standard_deviation, standard_deviation))
return mean + base
class Simulation: class Simulation:
def __init__(self): def __init__(self):
self.population_size = 200 self.population_size = 50
self.left_distance = 100 self.left_distance = 100
self.right_distance = 100 self.right_distance = 100
self.imu_mix = 0.3 * 0.5
self.encoder_mix = 0.7
self.rotation_standard_dev = 2 # degrees
self.speed_standard_dev = 5 # mm
# Poses - each an array of [x, y, heading] # Poses - each an array of [x, y, heading]
self.poses = np.array( self.poses = np.array(
[(random.uniform(0, arena.width), random.uniform(0, arena.height), random.uniform(0, 360)) for _ in range(self.population_size)], [(
dtype=np.float, int(random.uniform(0, arena.width)),
int(random.uniform(0, arena.height)),
int(random.uniform(0, 360))) for _ in range(self.population_size)],
dtype=np.int16,
) )
self.distance_aim = 100 self.collision_avoider = VaryingWallAvoid()
def apply_sensor_model(self): async def apply_sensor_model(self):
# Timing is about 0.65s
# Based on vl53l1x sensor readings, create weight for each pose. # Based on vl53l1x sensor readings, create weight for each pose.
# vl53l1x standard dev is +/- 5 mm. Each distance is a mean reading # vl53l1x standard dev is +/- 5 mm. Each distance is a mean reading
# we will first determine sensor positions based on poses # we will first determine sensor positions based on poses
@ -32,63 +69,69 @@ class Simulation:
# then check this projected position against occupancy grid # then check this projected position against occupancy grid
# and weight accordingly # and weight accordingly
# distance sensor positions projected forward. left_x, left_y, right_x, right_y # distance sensor positions projected forward. x, y, heading, reading
distance_sensor_positions = np.zeros( fn_start = time.monotonic()
(self.poses.shape[0], 4), dtype=np.float) print("Starting apply sensor model")
distance_sensor_left_rays = np.zeros(
(self.poses.shape[0], 3), dtype=np.float)
distance_sensor_right_rays = np.zeros(
(self.poses.shape[0], 3), dtype=np.float)
# sensors - they are facing forward, either side of the robot. Project them out to the sides # sensors - they are facing forward, either side of the robot. Project them out to the sides
# based on each poses heading # based on each poses heading and turn sensors into rays,
# left sensor # left sensor
poses_left_90 = np.radians(self.poses[:, 2] + 90) poses_left_90 = np.radians(self.poses[:, 2] + 90)
# print("poses_left_90_shape:",poses_left_90.shape, "distance_sensor_positions_shape:",distance_sensor_positions.shape, "poses_shape:",self.poses.shape) # print("poses_left_90_shape:",poses_left_90.shape, "distance_sensor_positions_shape:",distance_sensor_positions.shape, "poses_shape:",self.poses.shape)
distance_sensor_positions[:, 0] = self.poses[:, 0] + np.cos(poses_left_90) * robot.distance_sensor_from_middle distance_sensor_left_rays[:, 0] = self.poses[:, 0] + np.cos(poses_left_90) * robot.distance_sensor_from_middle
distance_sensor_positions[:, 1] = self.poses[:, 1] + np.sin(poses_left_90) * robot.distance_sensor_from_middle distance_sensor_left_rays[:, 1] = self.poses[:, 1] + np.sin(poses_left_90) * robot.distance_sensor_from_middle
distance_sensor_left_rays[:, 2] = np.radians(self.poses[:, 2])
# right sensor # right sensor
poses_right_90 = np.radians(self.poses[:, 2] - 90) poses_right_90 = np.radians(self.poses[:, 2] - 90)
distance_sensor_positions[:, 2] = self.poses[:, 0] + np.cos(poses_right_90) * robot.distance_sensor_from_middle distance_sensor_right_rays[:, 0] = self.poses[:, 0] + np.cos(poses_right_90) * robot.distance_sensor_from_middle
distance_sensor_positions[:, 3] = self.poses[:, 1] + np.sin(poses_right_90) * robot.distance_sensor_from_middle distance_sensor_right_rays[:, 1] = self.poses[:, 1] + np.sin(poses_right_90) * robot.distance_sensor_from_middle
distance_sensor_right_rays[:, 2] = np.radians(self.poses[:, 2])
# for each sensor position, find the distance to the nearest obstacle # for each sensor position, find the distance to the nearest obstacle
distance_sensor_standard_dev = 5 distance_sensor_standard_dev = 5
dl_squared = self.left_distance ** 2 dl_squared = self.left_distance ** 2
dr_squared = self.right_distance ** 2 dr_squared = self.right_distance ** 2
await asyncio.sleep(0)
print("Time to calculate sensor positions:", time.monotonic() - fn_start)
fn_start = time.monotonic()
# weighted poses a numpy array of weights for each pose # weighted poses a numpy array of weights for each pose
weights = np.empty(self.poses.shape[0], dtype=np.float) weights = np.empty(self.poses.shape[0], dtype=np.float)
# 0.6 seconds in this loop!
for index, sensor_position in enumerate(distance_sensor_positions): for index in range(self.poses.shape[0]):
# difference between this distance and the distance sensed is the error # remove any that are outside the arena
# add noise to this error if not arena.point_is_inside_arena(self.poses[index,0], self.poses[index,1]) or \
if not arena.point_is_inside_arena(self.poses[index,0], self.poses[index,1]): not arena.point_is_inside_arena(distance_sensor_left_rays[index,0], distance_sensor_left_rays[index,1]) or \
not arena.point_is_inside_arena(distance_sensor_right_rays[index,0], distance_sensor_right_rays[index,1]):
weights[index] = 0 weights[index] = 0
continue continue
# difference between this distance and the distance sensed is the error
# add noise to this error
# left sensor # left sensor
left_ray = sensor_position[0], sensor_position[1], np.radians(self.poses[index, 2]) noise = get_triangular_sample(0, distance_sensor_standard_dev)
noise = get_gaussian_sample(0, distance_sensor_standard_dev) left_actual = arena.get_ray_distance_squared_to_nearest_boundary_segment(distance_sensor_left_rays[index])
left_actual = arena.get_ray_distance_squared_to_nearest_boundary_segment(left_ray) left_error = abs(left_actual - dl_squared + noise)
if left_actual is None:
print("left_actual is None. Ray was ", left_ray)
left_actual = 100
left_error = abs(left_actual - dl_squared + noise) # error
# right sensor # right sensor
right_ray = sensor_position[2], sensor_position[3], np.radians(self.poses[index, 2]) noise = get_triangular_sample(0, distance_sensor_standard_dev)
noise = get_gaussian_sample(0, distance_sensor_standard_dev) right_actual = arena.get_ray_distance_squared_to_nearest_boundary_segment(distance_sensor_right_rays[index])
right_actual = arena.get_ray_distance_squared_to_nearest_boundary_segment(right_ray) right_error = abs(right_actual - dr_squared + noise)
if right_actual is None:
print("right_actual is None. Ray was ", right_ray)
right_actual = 100
right_error = abs(right_actual - dr_squared + noise) #error
# weight is the inverse of the error # weight is the inverse of the error
weights[index] = 1 / (left_error + right_error) weights[index] = 1 / (left_error + right_error)
print("Time to calculate pose weights", time.monotonic() - fn_start)
await asyncio.sleep(0)
#normalise the weights #normalise the weights
print("Weights sum before normalising:", np.sum(weights)) # print("Weights sum before normalising:", np.sum(weights))
weights = weights / np.sum(weights) weights = weights / np.sum(weights)
print("Weights sum:", np.sum(weights)) # print("Weights sum:", np.sum(weights))
return weights return weights
def resample(self, weights): def resample(self, weights):
# Fast - 0.01 to 0.035 seconds
# based on the weights, resample the poses # based on the weights, resample the poses
# weights is a numpy array of weights # weights is a numpy array of weights
# resample is a numpy array of indices into the poses array # resample is a numpy array of indices into the poses array
# fn_start = time.monotonic()
samples = [] samples = []
# use low variance resampling # use low variance resampling
start = random.uniform(0, 1 / self.population_size) start = random.uniform(0, 1 / self.population_size)
@ -102,66 +145,97 @@ class Simulation:
samples.append(source_index) samples.append(source_index)
# set poses to the resampled poses # set poses to the resampled poses
self.poses = np.array([self.poses[n] for n in samples]) self.poses = np.array([self.poses[n] for n in samples])
# print("resample time", time.monotonic() - fn_start)
async def move_robot(self): def convert_odometry_to_motion(self, left_encoder_delta, right_encoder_delta):
# convert odometry to motion
# left_encoder is the change in the left encoder
# right_encoder is the change in the right encoder
# returns rot1, trans, rot2
# rot1 is the rotation of the robot in radians before the translation
# trans is the distance the robot has moved in mm
# rot2 is the rotation of the robot in radians
# convert encoder counts to mm
left_mm = left_encoder_delta * robot.ticks_to_mm
right_mm = right_encoder_delta * robot.ticks_to_mm
if left_mm == right_mm:
# no rotation
return 0, left_mm, 0
# calculate the ICC
radius = (robot.wheelbase_mm / 2) * (left_mm + right_mm) / (right_mm - left_mm)
theta = (right_mm - left_mm) / robot.wheelbase_mm
# For a small enough motion, assume that the chord length = arc length
arc_length = theta * radius
rot1 = np.degrees(theta/2)
rot2 = rot1
return rot1, arc_length, rot2
async def motion_model(self):
"""move forward, apply the motion model""" """move forward, apply the motion model"""
# fn_start = time.monotonic()
# Reading sensors - 0.001 to 0.002 seconds.
starting_heading = robot.imu.euler[0] starting_heading = robot.imu.euler[0]
encoder_left = robot.left_encoder.read() encoder_left = robot.left_encoder.read()
encoder_right = robot.right_encoder.read() encoder_right = robot.right_encoder.read()
# print("Reading sensors time", time.monotonic() - fn_start)
# move forward - with collision avoidance await asyncio.sleep(0.01)
print("left_distance:", self.left_distance, "right_distance:", self.right_distance) # fn_start = time.monotonic()
if min(self.left_distance, self.right_distance) < self.distance_aim: # record sensor changes - 0.001 to 0.002 seconds
# we are too close to the wall rot1, trans, rot2 = self.convert_odometry_to_motion(
# turn away from the wall robot.left_encoder.read() - encoder_left,
# turn right if left distance is smaller robot.right_encoder.read() - encoder_right)
# turn left if right distance is smaller
forward_speed = 0
if self.left_distance < self.right_distance:
# turn right
turn_speed = -0.5
else:
turn_speed = 0.5
else:
forward_speed = 0.8
print("forward_speed:", forward_speed, "turn_speed:", turn_speed)
robot.set_left(forward_speed + turn_speed)
robot.set_right(forward_speed - turn_speed)
await asyncio.sleep(0.05) try:
# record sensor changes new_heading = robot.imu.euler[0]
left_movement = robot.left_encoder.read() - encoder_left except OSError:
right_movement = robot.right_encoder.read() - encoder_right new_heading = None
speed_in_mm = robot.ticks_to_m * ((left_movement + right_movement) / 2) * 1000
new_heading = robot.imu.euler[0]
if new_heading: if new_heading:
heading_change = starting_heading - new_heading heading_change = starting_heading - new_heading
# blend with the encoder heading changes
rot1 = rot1 * self.encoder_mix + heading_change * self.imu_mix
rot2 = rot2 * self.encoder_mix + heading_change * self.imu_mix
else: else:
print("Failed to get heading") print("Failed to get heading")
heading_change = 0 # print("Got headings time", time.monotonic() - fn_start)
# fn_start = time.monotonic()
# move poses (this is a bit cheeky, and should be using icc) # move poses 0.07 - 0.08 seconds
heading_standard_dev = 2 # degrees rot1_model = np.array([get_triangular_sample(rot1, self.rotation_standard_dev) for _ in range(self.poses.shape[0])])
speed_standard_dev = 5 # mm trans_model = np.array([get_triangular_sample(trans, self.speed_standard_dev) for _ in range(self.poses.shape[0])])
rot2_model = np.array([get_triangular_sample(rot2, self.rotation_standard_dev) for _ in range(self.poses.shape[0])])
radians = np.radians(self.poses[:,2]) self.poses[:,2] += rot1_model
heading_model = np.array([get_gaussian_sample(0, heading_standard_dev) for _ in range(self.poses.shape[0])]) rot1_radians = np.radians(self.poses[:,2])
speed_model = np.array([get_gaussian_sample(speed_in_mm, speed_standard_dev) for _ in range(self.poses.shape[0])]) self.poses[:,0] += trans_model * np.sin(rot1_radians)
self.poses[:,0] += speed_model * np.sin(radians) self.poses[:,1] += trans_model * np.cos(rot1_radians)
self.poses[:,1] += speed_model * np.cos(radians) self.poses[:,2] += rot2_model
self.poses[:,2] += heading_change + heading_model
self.poses[:,2] = np.vectorize(lambda n: float(n % 360))(self.poses[:,2]) self.poses[:,2] = np.vectorize(lambda n: float(n % 360))(self.poses[:,2])
self.poses = np.array(self.poses, dtype=np.int16)
# print("Move poses times", time.monotonic() - fn_start)
async def distance_sensor_updater(self): async def distance_sensor_updater(self):
robot.left_distance.distance_mode = 2
robot.right_distance.distance_mode = 2
robot.left_distance.timing_budget = 50
robot.right_distance.timing_budget = 50
robot.left_distance.start_ranging() robot.left_distance.start_ranging()
robot.right_distance.start_ranging() robot.right_distance.start_ranging()
while True: while True:
# About 0.02 seconds
# loop_start = time.monotonic()
if robot.left_distance.data_ready and robot.left_distance.distance: if robot.left_distance.data_ready and robot.left_distance.distance:
self.left_distance = robot.left_distance.distance * 10 # convert to mm self.left_distance = robot.left_distance.distance * 10 # convert to mm
robot.left_distance.clear_interrupt() robot.left_distance.clear_interrupt()
if robot.right_distance.data_ready and robot.right_distance.distance: if robot.right_distance.data_ready and robot.right_distance.distance:
self.right_distance = robot.right_distance.distance * 10 self.right_distance = robot.right_distance.distance * 10
robot.right_distance.clear_interrupt() robot.right_distance.clear_interrupt()
print("left_distance:", self.left_distance, "right_distance:", self.right_distance)
# move forward - with collision avoidance 0.03 to 0.04 seconds
self.collision_avoider.update(self.left_distance, self.right_distance)
# print("distance_sensor_updater_used_time: ", time.monotonic() - loop_start)
await asyncio.sleep(0.01) await asyncio.sleep(0.01)
async def run(self): async def run(self):
@ -169,11 +243,11 @@ class Simulation:
try: try:
while True: while True:
# print("Applying sensor model") # print("Applying sensor model")
weights = self.apply_sensor_model() weights = await self.apply_sensor_model()
# print("Sensor model complete.\nResampling") # print("Sensor model complete.\nResampling")
self.resample(weights) self.resample(weights)
# print("Resampling complete.\nMoving robot") # print("Resampling complete.\nMoving robot")
await self.move_robot() await self.motion_model()
# print("Robot move complete") # print("Robot move complete")
finally: finally:
robot.stop() robot.stop()
@ -203,6 +277,8 @@ def read_command():
async def updater(simulation): async def updater(simulation):
print("starting updater") print("starting updater")
while True: while True:
loop_start = time.monotonic()
# Imu calibration and send - 0.0625 seconds
sys_status, gyro, accel, mag = robot.imu.calibration_status sys_status, gyro, accel, mag = robot.imu.calibration_status
if sys_status < 3: if sys_status < 3:
send_json( send_json(
@ -215,14 +291,18 @@ async def updater(simulation):
} }
} }
) )
print("Sent imu calibration in", time.monotonic() - loop_start)
# The big time delay is in sending the poses. # The big time delay is in sending the poses.
print("Sending poses", simulation.poses.shape[0]) print("Sending poses", simulation.poses.shape[0])
for n in range(0, simulation.poses.shape[0], 10): for n in range(0, simulation.poses.shape[0], 10):
loop_start = time.monotonic()
# each pose group is 0.2 seconds.
# print("Sending poses from ", n, "to", n+10, "of", simulation.poses.shape[0], "poses") # print("Sending poses from ", n, "to", n+10, "of", simulation.poses.shape[0], "poses")
send_json({ send_json({
"poses": simulation.poses[n:n+10].tolist(), "poses": simulation.poses[n:n+10].tolist(),
"offset": n, "offset": n,
}) })
print("Sent poses in", time.monotonic() - loop_start)
await asyncio.sleep(0.01) await asyncio.sleep(0.01)
await asyncio.sleep(0.5) await asyncio.sleep(0.5)
@ -231,6 +311,7 @@ async def command_handler(simulation):
print("Starting handler") print("Starting handler")
update_task = None update_task = None
simulation_task = None simulation_task = None
# simulation_task = asyncio.create_task(simulation.run())
while True: while True:
if robot.uart.in_waiting: if robot.uart.in_waiting:
print("Receiving data...") print("Receiving data...")
@ -247,7 +328,8 @@ async def command_handler(simulation):
if not update_task: if not update_task:
update_task = asyncio.create_task(updater(simulation)) update_task = asyncio.create_task(updater(simulation))
elif request["command"] == "start": elif request["command"] == "start":
simulation_task = asyncio.create_task(simulation.run()) if not simulation_task:
simulation_task = asyncio.create_task(simulation.run())
await asyncio.sleep(0.1) await asyncio.sleep(0.1)

View File

@ -1,16 +0,0 @@
import random
import math
def get_standard_normal_sample():
"""Using the Marsaglia Polar method"""
# timeit on mac says - 0.915
while True:
u = random.uniform(-1, 1)
v = random.uniform(-1, 1)
s = u * u + v * v
if s >= 1:
continue
return u * math.sqrt(-2 * math.log(s) / s)
def get_gaussian_sample(mean, standard_deviation):
return get_standard_normal_sample() * standard_deviation + mean

View File

@ -14,9 +14,10 @@ wheel_circumference_mm = math.pi * wheel_diameter_mm
gear_ratio = 298 gear_ratio = 298
encoder_poles = 28 encoder_poles = 28
ticks_per_revolution = encoder_poles * gear_ratio ticks_per_revolution = encoder_poles * gear_ratio
ticks_to_m = (wheel_circumference_mm / ticks_per_revolution) / 1000 ticks_to_mm = wheel_circumference_mm / ticks_per_revolution
ticks_to_m = ticks_to_mm / 1000
m_to_ticks = 1 / ticks_to_m m_to_ticks = 1 / ticks_to_m
wheelbase_mm = 170
motor_A2 = pwmio.PWMOut(board.GP17, frequency=100) motor_A2 = pwmio.PWMOut(board.GP17, frequency=100)
motor_A1 = pwmio.PWMOut(board.GP16, frequency=100) motor_A1 = pwmio.PWMOut(board.GP16, frequency=100)

View File

@ -0,0 +1,75 @@
import asyncio
import json
import time
import pid_controller
import robot
class RpmSpeedController:
def __init__(self, encoder, motor_fn):
self.encoder = encoder
self.motor_fn = motor_fn
self.pid = pid_controller.PIDController(3, 0, 1)
self.speed = 0
self.reset()
def reset(self):
self.last_ticks = self.encoder.read()
self.pwm = 0
self.pid.reset()
def update(self, dt):
current_ticks = self.encoder.read()
speed_in_ticks = (current_ticks - self.last_ticks) / dt
self.last_ticks = current_ticks
# calculate the error
error = self.speed - speed_in_ticks
# calculate the control signal
control_signal = self.pid.calculate(error, dt)
self.pwm += control_signal
self.motor_fn(self.pwm)
class MotionControl:
def __init__(self):
self.left_speed_controller = RpmSpeedController(robot.left_encoder, robot.set_left)
self.right_speed_controller = RpmSpeedController(robot.right_encoder, robot.set_right)
def plan_motion(self, turn1, distance, turn2):
"""Plan a motion - based on making a turn,
moving forward a distance,
and then making another turn."""
pass
def uodate(self, dt):
self.left_speed_controller.update(dt)
self.right_speed_controller.update(dt)
async def main_loop(dt):
motion_control = MotionControl()
while True:
await asyncio.sleep(dt)
current_time = time.monotonic()
dt = current_time - last_time
last_time = current_time
motion_control.uodate(dt)
async def command_handler():
main_loop = None
motion_control = MotionControl()
robot = robot.Robot()
while True:
command = json.loads(input())
if command["command"] == "start":
main_loop = asyncio.create_task(main_loop(0.2))
elif command["command"] == "get_speed":
print(json.dumps(robot.get_speed()))
else:
print("Unknown command")
asyncio.run(command_handler())

View File

@ -0,0 +1,27 @@
class PIDController:
def __init__(self, kp, ki, kd, d_filter_gain=0.1, imax=None, imin=None):
self.kp = kp
self.ki = ki
self.kd = kd
self.d_filter_gain = d_filter_gain
self.imax = imax
self.imin = imin
self.reset()
def reset(self):
self.integral = 0
self.error_prev = 0
self.derivative = 0
def calculate(self, error, dt):
self.integral += error * dt
if self.imax is not None and self.integral > self.imax:
self.integral = self.imax
if self.imin is not None and self.integral < self.imin:
self.integral = self.imin
# Add a low pass filter to the difference
difference = (error - self.error_prev) * self.d_filter_gain
self.error_prev += difference
self.derivative = difference / dt
return self.kp * error + self.ki * self.integral + self.kd * self.derivative

View File

@ -0,0 +1,84 @@
import rp2pio
import adafruit_pioasm
import array
import asyncio
program = """
; use the osr for count
; input pins c1 c2
set y, 0 ; clear y
mov osr, y ; and clear osr
read:
; x will be the old value
; y the new values
mov x, y ; store old Y in x
in null, 32 ; Clear ISR - using y
in pins, 2 ; read two pins into y
mov y, isr
jmp x!=y, different ; Jump if its different
jmp read ; otherwise loop back to read
different:
; x has old value, y has new.
; extract the upper bit of X.
in x, 31 ; get bit 31 - old p1 (remember which direction it came in)
in null, 31 ; keep only 1 bit
mov x, isr ; put this back in x
jmp !x, c1_old_zero
c1_old_not_zero:
jmp pin, count_up
jmp count_down
c1_old_zero:
jmp pin, count_down
; fall through
count_up:
; for a clockwise move - we'll add 1 by inverting
mov x, ~ osr ; store inverted OSR on x
jmp x--, fake ; use jump to take off 1
fake:
mov x, ~ x ; invert back
jmp send
count_down:
; for a clockwise move, just take one off
mov x, osr ; store osr in x
jmp x--, send ; dec and send
send:
; send x.
mov isr, x ; send it
push noblock ; put ISR into input FIFO
mov osr, x ; put X back in OSR
jmp read ; loop back
"""
assembled = adafruit_pioasm.assemble(program)
class QuadratureEncoder:
def __init__(self, first_pin, second_pin, reversed=False):
"""Encoder with 2 pins. Must use sequential pins on the board"""
self.sm = rp2pio.StateMachine(
assembled,
frequency=0,
first_in_pin=first_pin,
jmp_pin=second_pin,
in_pin_count=2,
)
self.reversed = reversed
self._buffer = array.array("i", [0])
asyncio.create_task(self.poll_loop())
async def poll_loop(self):
while True:
await asyncio.sleep(0)
while self.sm.in_waiting:
self.sm.readinto(self._buffer)
def read(self):
if self.reversed:
return -self._buffer[0]
else:
return self._buffer[0]

78
ch-13/layer-1/robot/robot.py Executable file
View File

@ -0,0 +1,78 @@
import board
import pwmio
import pio_encoder
import busio
import adafruit_vl53l1x
import math
import busio
import adafruit_bno055
uart = busio.UART(board.GP12, board.GP13, baudrate=9600)
wheel_diameter_mm = 70
wheel_circumference_mm = math.pi * wheel_diameter_mm
gear_ratio = 298
encoder_poles = 28
ticks_per_revolution = encoder_poles * gear_ratio
ticks_to_m = (wheel_circumference_mm / ticks_per_revolution) / 1000
m_to_ticks = 1 / ticks_to_m
motor_A2 = pwmio.PWMOut(board.GP17, frequency=100)
motor_A1 = pwmio.PWMOut(board.GP16, frequency=100)
motor_B2 = pwmio.PWMOut(board.GP18, frequency=100)
motor_B1 = pwmio.PWMOut(board.GP19, frequency=100)
right_motor = motor_A1, motor_A2
left_motor = motor_B1, motor_B2
right_encoder = pio_encoder.QuadratureEncoder(board.GP20, board.GP21)
left_encoder = pio_encoder.QuadratureEncoder(board.GP26, board.GP27, reversed=True)
i2c0 = busio.I2C(sda=board.GP0, scl=board.GP1)
i2c1 = busio.I2C(sda=board.GP2, scl=board.GP3)
left_distance = adafruit_vl53l1x.VL53L1X(i2c0)
right_distance = adafruit_vl53l1x.VL53L1X(i2c1)
distance_sensor_from_middle = 40 # approx mm
imu = adafruit_bno055.BNO055_I2C(i2c0)
imu.mode = adafruit_bno055.NDOF_MODE # should be in chapter 12!
def stop():
motor_A1.duty_cycle = 0
motor_A2.duty_cycle = 0
motor_B1.duty_cycle = 0
motor_B2.duty_cycle = 0
def set_speed(motor, speed):
# Swap motor pins if we reverse the speed
if abs(speed) < 0.1:
motor[0].duty_cycle = 0
motor[1].duty_cycle = 1
return
if speed < 0:
direction = motor[1], motor[0]
speed = -speed
else:
direction = motor
speed = min(speed, 1) # limit to 1.0
max_speed = 2 ** 16 - 1
direction[0].duty_cycle = int(max_speed * speed)
direction[1].duty_cycle = 0
def set_left(speed):
set_speed(left_motor, speed)
def set_right(speed):
set_speed(right_motor, speed)
def check_imu_status():
sys_status, gyro, accel, mag = imu.calibration_status
uart.write(f"Sys: {sys_status}, Gyro: {gyro}, Accel: {accel}, Mag: {mag}\n".encode())
return sys_status == 3

View File

@ -0,0 +1,31 @@
from ulab import numpy as np
import random
import msgpack
from io import BytesIO
data = np.array([[random.uniform(-200, 200), random.uniform(-200, 200), random.uniform(0, 360)] for i in range(20)], dtype=np.int16)
print(data)
print(data.shape)
buffer = BytesIO()
msgpack.pack({"offset": 10, "poses": data.tolist()}, buffer)
buffer.seek(0)
print(len(buffer.getvalue())) # json is 618 bytes, msgpack is 595 bytes. msgpack is 3.7% smaller
print(buffer.getvalue())
import json
as_int = np.array(data, dtype=np.int16)
as_int_json = json.dumps({"offset": 10, "poses": as_int.tolist()})
## on pc
import msgpack
import numpy as np
raw_data = "bytes from robot"
data = msgpack.unpackb(raw_data)
poses = np.array(data["poses"])
print(poses)
# Avoiding the text could make it smaller.
# From https://learn.adafruit.com/introducing-the-adafruit-bluefruit-le-uart-friend/hardware
# Note that we do not recommend using higher baudrates than 9600 because the nRF51 UART can drop characters!
# Is the complexity of the msgpack worth the 3.7% size reduction?