MSL Input and Drawing Programming Interface

Input: 
//True when key/button is down, false when up. 
bool input_check(const int key); 
 
//True when key/button is pressed once, after being pressed, the key must be released and then pressed down again. 
bool input_check_pressed(const int key); 
 
//True when the key/button is released. 
bool input_check_released(const int key); 
 
//Regular Keyboard Bindings 
kb_bang                 '!' 
kb_quote                '\"' 
kb_she                  '#' 
kb_dollar               '$' 
kb_percent              '%' 
kb_ampersand            '&' 
kb_apostrophe           '\'' 
kb_open_parentheses     '(' 
kb_close_parentheses    ')' 
kb_asterisk             '*' 
kb_plus                 '+' 
kb_comma                ',' 
kb_minus                '-' 
kb_period               '.' 
kb_forward_slash        '/' 
kb_0                    '0' 
kb_1                    '1' 
kb_2                    '2' 
kb_3                    '3' 
kb_4                    '4' 
kb_5                    '5' 
kb_6                    '6' 
kb_7                    '7' 
kb_8                    '8' 
kb_9                    '9' 
kb_colon                ':' 
kb_semi_colon           ';' 
kb_less_than            '<' 
kb_equal                '=' 
kb_greater_than         '>' 
kb_question             '?' 
kb_at                   '@' 
kb_A                    'A' 
kb_B                    'B' 
kb_C                    'C' 
kb_D                    'D' 
kb_E                    'E' 
kb_F                    'F' 
kb_G                    'G' 
kb_H                    'H' 
kb_I                    'I' 
kb_J                    'J' 
kb_K                    'K' 
kb_L                    'L' 
kb_M                    'M' 
kb_N                    'N' 
kb_O                    'O' 
kb_P                    'P' 
kb_Q                    'Q' 
kb_R                    'R' 
kb_S                    'S' 
kb_T                    'T' 
kb_U                    'U' 
kb_V                    'V' 
kb_W                    'W' 
kb_X                    'X' 
kb_Y                    'Y' 
kb_Z                    'Z' 
kb_open_square          '[' 
kb_back_slash           '\\' 
kb_close_square         ']' 
kb_carrot               '^' 
kb_under_score          '_' 
kb_accent               '`' 
kb_a                    'a' 
kb_b                    'b' 
kb_c                    'c' 
kb_d                    'd' 
kb_e                    'e' 
kb_f                    'f' 
kb_g                    'g' 
kb_h                    'h' 
kb_i                    'i' 
kb_j                    'j' 
kb_k                    'k' 
kb_l                    'l' 
kb_m                    'm' 
kb_n                    'n' 
kb_o                    'o' 
kb_p                    'p' 
kb_q                    'q' 
kb_r                    'r' 
kb_s                    's' 
kb_t                    't' 
kb_u                    'u' 
kb_v                    'v' 
kb_w                    'w' 
kb_x                    'x' 
kb_y                    'y' 
kb_z                    'z' 
kb_open_bracket         '{' 
kb_pipe                 '|' 
kb_close_bracket        '}' 
kb_tilde                '~' 
 
//Advanced Keyboard Bindings 
kb_backspace 
kb_tab 
kb_enter 
kb_escape 
kb_space 
kb_insert 
kb_numlock 
kb_delete 
 
//Special Keyboard Bindings 
kb_BASE 
kb_f1 
kb_f2 
kb_f3 
kb_f4 
kb_f5 
kb_f6 
kb_f7 
kb_f8 
kb_f9 
kb_f10 
kb_f11 
kb_f12 
kb_left 
kb_up 
kb_right 
kb_down 
kb_page_up 
kb_page_down 
kb_home 
kb_end 
 
//Mouse Bindings 
mb_BASE 
mb_left 
mb_middle 
mb_right 
mb_scroll_up 
mb_scroll_down
 
Drawing: 
//Color Class Declaration 
class color 
{ 
    public: 
        //Friend Classes 
        friend class sprite; 
 
        //Constructor (Default) 
        color(const float red=1,const float green=1,const float blue=1,const float alpha=1); 
 
        //Member Variables 
        float r; 
        float g; 
        float b; 
        float a; 
}; 
 
//Point Direction Function (Returns direction between two points in degrees) 
double point_direction(const double x1,const double y1,const double x2,const double y2); 
 
//Point Distance Function (Returns distance between two points) 
double point_distance(const double x1,const double y1,const double x2,const double y2); 
 
//Basic Shape Drawing Functions 
void draw_point(const double x,const double y,const msl::color& color=msl::color(1,1,1,1)); 
 
void draw_line(const double x1,const double y1,const double x2,const double y2,const msl::color& color=msl::color(1,1,1,1)); 
 
void draw_triangle(const double x1,const double y1,const double x2,const double y2,const double x3,const double y3, 
    const bool fill,const msl::color& color=msl::color(1,1,1,1)); 
 
void draw_rectangle(const double x,const double y,const double width,const double height,const bool fill,const msl::color& color=msl::color(1,1,1,1)); 
 
void draw_rectangle_center(const double x,const double y,const double width,const double height,const bool fill,const msl::color& color=msl::color(1,1,1,1)); 
 
void draw_rectangle_gradient(const double x,const double y,const double width,const double height,const bool fill, 
    const msl::color& color_top_left=msl::color(1,1,1,1), 
    const msl::color& color_top_right=msl::color(1,1,1,1), 
    const msl::color& color_bottom_right=msl::color(1,1,1,1), 
    const msl::color& color_bottom_left=msl::color(1,1,1,1)); 
 
void draw_rectangle_center_gradient(const double x,const double y,const double width,const double height,const bool fill, 
    const msl::color& color_top_left=msl::color(1,1,1,1), 
    const msl::color& color_top_right=msl::color(1,1,1,1), 
    const msl::color& color_bottom_right=msl::color(1,1,1,1), 
    const msl::color& color_bottom_left=msl::color(1,1,1,1)); 
 
void draw_circle(const double x,const double y,const double radius,const msl::color& color=msl::color(1,1,1,1));

UAV tracking with Bullseye!

To control the UAV from a computer, we need to know where the UAV is located, so we can send it piloting commands to make it go wherever we want.  Hopefully we can use the bullseye detector we built, which uses a webcam image.  Here’s Mike’s little UAV with a cardboard bullseye duct taped on top:

The bullseye tracks really well!

The next step is building an autopilot system for closed-loop UAV control, rather than the R/C controller Mike is using for manual control above.  We’re playing with the XBee right now, trying to set up a telemetry system for the computer to feed piloting commands to the UAV.

Automatic location & direction tracking, and physical/virtual video!

Our basic goal is to build “cyber-physical systems”, that combine physical parts like robots, with virtual parts like simulations.  This weekend, I finally got one working!

For robot localization, I’m now using the gradient-based bullseye detection algorithm described in my last post (or try my OpenCV bullseye detection code), but I’m using a slightly blockier bulls-eye to make it easier to see.  For direction, I’m looking at the color balance: around the bullseye, I compare the center of mass of red pixels (front of the robot) versus the center of mass of green pixels (back of the robot).  This gives me a robot direction reliably, and it only bounces around by a few degrees!  The direction finding code is here, the magic is cv::moments for finding the center of mass.

Small mobile robot with tank treads and a colored pattern on top.
Rovoduino, with a multi-color coarse bulls-eye on top. The red half of the bulls-eye is in front, and the green in back, so drawing a line from green to red gives the robot’s direction.  This is enough to process the output of the two ultrasonic range sensors on the front.

I’m using a coarse pattern so I can see the robot reliably from the ceiling, with my 120 degree wide-angle webcam.  The wide angle gives my robot more room to drive around before it runs off the screen!  There is some motion blur when the robot is moving, which is another reason a coarser bullseye works better.

Top-down view of room, with robot in center, and pilot to the side.
View from top-down webcam–the robot’s colors and pattern are clearly visible. This is actually only half the full frame, even though the camera is only 7 feet off the ground, since this camera has a very wide field of view.

The next step was writing a little MSL 2d graphical display program on my laptop, which combines the robot’s location and direction from the webcam, with ultrasonic sensor readings from the robot reported over XBee radio.  Here’s a screenshot:

Onscreen display of robot's path, with sensor data drawn in shades of gray
The robot is drawn at its true position and orientation. The current ultrasonic distance sensor readings are drawn in red. The robot’s understanding of the room is in shades of gray.

The main thing a mobile robot needs to understand is where it can safely drive.  Places the robot has already driven are drawn in black–definitely driveable.  Places the robot has seen are clear are in dark gray–probably driveable.  Detected obstacles are shown in white–you can’t drive there.

The end result?  I can combine physical locations and sensor readings with a cyber model.  The fact that physical and virtual models match up in realtime is really cool to watch!

P.S.  It’s pretty tricky to reconstruct true obstacle positions when all you get are distance readings–when the sensor is reading a distance of 50cm, this tells you there’s nothing closer than 50cm, and you know something is at 50cm somewhere along the sensor’s viewable arc, but not where it is along that arc!

In the simulation above, I’m treating “nothing detected here” as slightly higher priority than “something detected near hear”, an idea which works quite well in simulation but isn’t perfect in practice.

 

Robot Tracking with a Bulls-eye

One of our big needs has been a computer vision robot tracking system–once we know where the robots are, we can start working on wheel odometry, sensor fusion, multi-robot path planning, and the rest of our search and rescue setup.  The hard part here has been teaching the computer to recognize the robot and distinguish it from the background–this is something humans do really well, but it’s a hard problem.

Charlie had the good idea of tracking a bulls-eye image, which is nice because it has rotational symmetry.  Here’s an image, where we might want to find the blue bulls-eye symbol in the center.  (Sadly, the computer doesn’t understand “Look for the bulls-eye” until we teach it what that means!)

High contrast concentric circles target in blue against an office background.

There’s a cool little trick using “spatial brightness gradients” to detect circles.  A gradient points in the direction of greatest change, like dark to light, and it’s actually pretty easy to compute for an image.

The direction of the black shape’s gradient is shown with the gray bars. The gradient follows the outline of the shape, but is perpendicular to it, like railroad ties.

This afternoon I had the realization that the spatial brightness gradients in a bulls-eye are all lined up with the center of the circle (more formally, the gradient along an arc intersects the center of the arc).

Here’s an image where I’ve extended the brightness gradients out from each edge, by drawing a dim line through each point oriented along the gradient.  (Gradients are computed in OpenCV using cv::Sobel to get the X and Y components, but to draw dim lines I needed to rasterize them myself.  It’s a little faster if you skip low-magnitude gradients, which probably aren’t part of our high-contrast target.)

Black image with thin white gradient lines scattered around.

Now that we’ve drawn dim white lines oriented along the gradients, they all intersect at the center of the bulls-eye.  And this isn’t just for debugging, I can efficiently count intersecting lines by rasterizing them all.  So now I have a much easier computer vision problem: find the bright spot!

Finding the single brightest spot would be easy, but I wanted to support multiple bulls-eyes for multiple robots, and the program should be smart enough to realize it’s not seeing anything, so I needed to be a little smarter.  Finding all points brighter than some threshold would work too, except a big clear bulls-eye might have lots of points near the center that are all above the threshold.  So what I do is find all points brighter than a threshold (currently 100 crossing gradients) that are also brighter than any nearby point.

Here’s the final result, where I’ve automatically identified the bulls-eye, and figured out how big it is and how it’s oriented.  The computer can actually keep up with video, doing this 30 times per second, and can reliably track a dozen bulls-eyes at once!

To summarize, the code (1) finds strong brightness gradients, (2) extends lines along those gradients, (3) find where many of those lines are crossing.  Each bulls-eye center has tons of crossing gradients, one per pixel in the edges of the bulls-eye.  This works, and it’s very hard to fool because of all the ‘votes’ for a true bulls-eye–and very few other objects have so many gradients that all converge at a single point.  We did manage to get a white Rover5 wheel to count as a fake bullseye against a black monitor background, but it wasn’t easy.

How accurate is it?  Really accurate!  We can track a 3-inch bulls-eye image from 8 feet away with sub-pixel precision–the random location standard deviation is under 0.05 pixels; worst-case location error is only +-0.2 pixels, or under 1 millimeter!  I’m also estimating orientation by taking one quadrant out of the bulls-eye, and comparing the gradient-derived center with the center of mass; this orientation estimate is less reliable but still has a variance of only 2-3 degrees, and +-10 degrees worst case.

There are still problems; in particular if you tilt the bulls-eye more than about 45 degrees, the circles become ellipses and the gradients don’t line up.  If the shape moves too fast, the blurring destroys the gradients.  Also, if the bulls-eye is too close to the camera, my little gradient lines are too short to reach the center and the program fails.

If you’ve got a webcam and a command line, you can get the code and try it like this:

   sudo apt-get install git-core libopencv-dev build-essential
   git clone http://projects.cs.uaf.edu/cyberalaska
   cd cyberalaska/vision/bullseye
(print out images/bullseye.pdf)
   make
   ./track

Once this setup is working reliably, we’ll package up an easy to use robot tracking server package for you!

OpenCV for multi-color tracking and localization

Localization, or “where is my robot?”, is really important, since you can’t tell the robot where to go unless you know where you’re starting.  It’s also a hard problem to solve reliably, especially indoors where you don’t have GPS.  For the 2013 CPS challenge, we used a Kinect to find our flying UAV, but we’d like to support ground vehicle localization too, and that’s not easy in a depth image.

I’ve done webcam color matching for decades, but I’ve always used my own homebrew image access and processing libraries, which makes it hard to use, port, and modify my stuff–even for me!  This month, I’ve been finally learning a standard, portable video and image analysis library: OpenCV.  It’s even got a simple GUI library built in, so you can add sliders and such.

Here’s the program in action:

The red and green blobs to the right of my head are bright pink and green squares of paper, detected and highlighted in the image.  Note the bad matches on my red chair.

The basic process is to find all the pixels that match a given color, estimate their center of mass, then draw a smaller box around that center of mass for a second pass.  This produces a “trimmed mean” position, which is less sensitive to outliers.

The output of the program is the average (X,Y) coordinates of the center of mass of each color, and the number of pixels that matched.  If not enough pixels match, the target object probably isn’t visible.  The program has sliders so you can adjust the matching tolerances, and you can click to get a new hue, saturation, value (HSV) colorspace target color.

If you have the target color set exactly right, and your webcam is working well, you can get accuracy better than 0.1 pixel!  But if the colors are dancing around, you might get 2-3 pixel variance in the location.  And if you have the colors wrong, or something else is the same color, you can get entirely the wrong location.

Because the apparent color of a reflective object like a sheet of paper depends on the ambient lighting, I need to allow loose color tolerances or else tweak the target colors to get a good match.  We should try using some diffused LEDs or color lasers to produce inherent emissive colors; back in 2008 David Krnavek used an LED in a white ping-pong ball for color tracking, with good results.

Latency seems good, and I get about 20 frames per second even on my low-end backup laptop and random webcam.  However, the OpenCV graphical output is fairly expensive, so I don’t do it every frame.

Download the color tracker code at my github.  The plan is to build up something like this into a reliable web accessible multi-robot tracking system!

Aero Balance Beam: Control Theory Demo

At our workshop this week, we did some hands-on testing of our control theory knowledge with an aerodynamically-driven balance beam–basically just an electrically controlled fan that blows to lift up a weighted board.  The idea was to capture the difficulties of UAV control in a simpler and slightly less dangerous system.  Little did we know what we were signing up for!  (Arduino code and data after the break.)

Continue reading Aero Balance Beam: Control Theory Demo

OrionDev-GK12 Edition

Here’s a small zip file that contains everything you need to create 2d graphics in c++.  This comes pre-packed with OpenGL, GLU, GLUT, GLEW, GLUI, and SOIL.  Included are several examples.

LINK

Instructions for installation:

  1. Download zip.
  2. Unzip zip.
  3. Open the oriondev-gk12 folder and run editor.bat.
  4. Open an example file to get started!

This zip is for Windows, but if anyone wants to use this on a Unix system feel free to use our libraries:  msl (Mike’s Standard Library) and osl (Orion’s Standard Library).  The libraries are located in:  oriondev-gk12/compiler/include

RobotC Gamepad Library

Controlling an NXT with a gamepad in RobotC isn’t too difficult, but it does require some more advanced programming techniques.  Most programmers end up creating some helper functions and variables that they copy and paste throughout their projects, and here is my “library”.

LINK

Gamepad Layout

Example:

//RobotC Joystick Helper Example Source
//    Created By:        Mike Moss
//    Modified On:       01/09/2013

//Joystick Helper Code
#include "joystick_helper.c"

//Our Program
task main()
{
    //Do Forever
    while(true)
    {
        //Get Gamepad States
        js_update();

        //Move motor A with the X button on joystick 1
        if(js_button(1,js_x)==true)
        {
            motor[motorA]=100;
        }

        //Move motor C with the up arrow on the dpad (0 is up, 2 is right, 4 is down, etc...)
        if(js_dpad(0))
        {
            motor[motorC]=100;
        }

        //Move motor B based on the Y axis on the left thumb stick
        //    of joystick 2 with a deadzone of 15
        motor[motorB]=js_lthumb_y(2,15);
    }
}

Two Kinects: surprisingly functional!

The Kinect is a really handy sensor for robotics.  We’ve been talking about having one Kinect mounted on the wall, to impose a simple global coordinate system; and separate obstacle detection Kinects mounted on each robot.

Surprisingly, multiple Kinects don’t seem to interfere with each other too badly–the Primesense dot detection is smart enough to preferentially recognize its own dots over the dots from the other kinect.

Image from a Kinect, with a second Kinect active on the left side of the frame. Despite the bright glare from the second Kinect’s IR emitter, there is almost no interference in the depth image.