From b44b00a72d8030d5378a93efc27b6b75e1590d8a Mon Sep 17 00:00:00 2001
From: Farid Alijani <farid.alijani@student.lut.fi>
Date: Thu, 17 Mar 2016 20:06:06 +0100
Subject: [PATCH] ML started

---
 .../CamMark/camtomar/include/VisionControl.h  |  21 +-
 .../CamMark/camtomar/include/VisionControl.h~ |   4 +-
 .../CamMark/camtomar/src/VisionControl.cpp    | 109 +++++++--
 .../CamMark/camtomar/src/VisionControl.cpp~   |  20 +-
 .../Machine_Learning/DeepCL/ScenarioImage.cpp | 218 ++++++++++++++++++
 .../DeepCL/ScenarioImage.cpp~                 | 216 +++++++++++++++++
 .../Machine_Learning/DeepCL/ScenarioImage.h   |  68 ++++++
 .../DeepCL/learnScenarioImage.cpp             |  55 +++++
 .../DeepCL/learnScenarioImage.cpp~            |  54 +++++
 MobileRobot/Machine_Learning/Practice/a.out   | Bin 0 -> 14235 bytes
 .../Machine_Learning/Practice/rand_func.cpp   |  38 +++
 .../Machine_Learning/Practice/rand_func.cpp~  |  38 +++
 MobileRobot/Machine_Learning/Practice/random  | Bin 0 -> 8536 bytes
 .../Practice/reinforcement_Learning.cpp       | 188 +++++++++++++++
 .../Practice/reinforcement_Learning.cpp~      | 189 +++++++++++++++
 MobileRobot/Machine_Learning/Practice/timer   | Bin 0 -> 9241 bytes
 .../Machine_Learning/Practice/timer.cpp       |  49 ++++
 .../Machine_Learning/Practice/timer.cpp~      |  49 ++++
 18 files changed, 1280 insertions(+), 36 deletions(-)
 create mode 100644 MobileRobot/Machine_Learning/DeepCL/ScenarioImage.cpp
 create mode 100644 MobileRobot/Machine_Learning/DeepCL/ScenarioImage.cpp~
 create mode 100644 MobileRobot/Machine_Learning/DeepCL/ScenarioImage.h
 create mode 100644 MobileRobot/Machine_Learning/DeepCL/learnScenarioImage.cpp
 create mode 100644 MobileRobot/Machine_Learning/DeepCL/learnScenarioImage.cpp~
 create mode 100755 MobileRobot/Machine_Learning/Practice/a.out
 create mode 100644 MobileRobot/Machine_Learning/Practice/rand_func.cpp
 create mode 100644 MobileRobot/Machine_Learning/Practice/rand_func.cpp~
 create mode 100755 MobileRobot/Machine_Learning/Practice/random
 create mode 100644 MobileRobot/Machine_Learning/Practice/reinforcement_Learning.cpp
 create mode 100644 MobileRobot/Machine_Learning/Practice/reinforcement_Learning.cpp~
 create mode 100755 MobileRobot/Machine_Learning/Practice/timer
 create mode 100644 MobileRobot/Machine_Learning/Practice/timer.cpp
 create mode 100644 MobileRobot/Machine_Learning/Practice/timer.cpp~

diff --git a/MobileRobot/AugReaMarker/CamMark/camtomar/include/VisionControl.h b/MobileRobot/AugReaMarker/CamMark/camtomar/include/VisionControl.h
index cc66ff08..61091070 100644
--- a/MobileRobot/AugReaMarker/CamMark/camtomar/include/VisionControl.h
+++ b/MobileRobot/AugReaMarker/CamMark/camtomar/include/VisionControl.h
@@ -118,12 +118,14 @@ public:
   void createTrackbars();
   
   void myhandler(int value);
-  
-        static double Pos_Px,Pos_Ix,Pos_Dx;
-	static double Pos_Py,Pos_Iy,Pos_Dy;
+  void RandomPose();
+
+         double Pos_Px,Pos_Ix,Pos_Dx;
+	 double Pos_Py,Pos_Iy,Pos_Dy;
+
+         double S_Ang_P,S_Ang_I,S_Ang_D;
+         double L_Ang_P,L_Ang_I,L_Ang_D;
 
-        static double S_Ang_P,S_Ang_I,S_Ang_D;
-        static double L_Ang_P,L_Ang_I,L_Ang_D;
 	static double safety_margin_X;
         
         // ---- CONTROLL PARAMETERS ------ //
@@ -156,6 +158,15 @@ public:
         // ---- CONTROLL PARAMETERS ------ //
         
         
+	// ---- TIMER PARAMETERS ------ //
+
+	static int msec,sec,min,hr;
+
+
+	// ---- TIMER PARAMETERS ------ //
+
+
+
         static double zeroMin,zeroMax;
 	static double Py_eps,Pz_eps,A_eps;  	
 
diff --git a/MobileRobot/AugReaMarker/CamMark/camtomar/include/VisionControl.h~ b/MobileRobot/AugReaMarker/CamMark/camtomar/include/VisionControl.h~
index 42d85258..cc66ff08 100644
--- a/MobileRobot/AugReaMarker/CamMark/camtomar/include/VisionControl.h~
+++ b/MobileRobot/AugReaMarker/CamMark/camtomar/include/VisionControl.h~
@@ -122,11 +122,9 @@ public:
         static double Pos_Px,Pos_Ix,Pos_Dx;
 	static double Pos_Py,Pos_Iy,Pos_Dy;
 
-	static double S_yPos_P,S_yPos_I,S_yPos_D;
-	static double L_yPos_P,L_yPos_I,L_yPos_D;
-
         static double S_Ang_P,S_Ang_I,S_Ang_D;
         static double L_Ang_P,L_Ang_I,L_Ang_D;
+	static double safety_margin_X;
         
         // ---- CONTROLL PARAMETERS ------ //
         static double prev_errorX;
diff --git a/MobileRobot/AugReaMarker/CamMark/camtomar/src/VisionControl.cpp b/MobileRobot/AugReaMarker/CamMark/camtomar/src/VisionControl.cpp
index bda465fb..4d3213f5 100644
--- a/MobileRobot/AugReaMarker/CamMark/camtomar/src/VisionControl.cpp
+++ b/MobileRobot/AugReaMarker/CamMark/camtomar/src/VisionControl.cpp
@@ -4,6 +4,10 @@
 #include <array>
 #include <math.h>
 #include <unistd.h>
+#include <cstdlib>
+#include <stdio.h>      /* printf, scanf, puts, NULL */
+#include <stdlib.h>     /* srand, rand */
+#include <time.h>       /* time */
 
 #include <aruco/aruco.h>
 #include <aruco/cvdrawingutils.h>
@@ -41,21 +45,13 @@
 using namespace cv;
 using namespace aruco;
 using namespace std;
-double ImageConverter::Pos_Px = .08;
-double ImageConverter::Pos_Ix = 0.0025;
-double ImageConverter::Pos_Dx = 0;
 
-double ImageConverter::Pos_Py = 5 * ImageConverter::Pos_Px;
-double ImageConverter::Pos_Iy = 5 * ImageConverter::Pos_Ix;
-double ImageConverter::Pos_Dy = 5 * ImageConverter::Pos_Dx;
 
-double ImageConverter::S_Ang_P = .2 * ImageConverter::Pos_Px;
-double ImageConverter::S_Ang_I = .2 * ImageConverter::Pos_Ix;
-double ImageConverter::S_Ang_D = .2 * ImageConverter::Pos_Dx;
 
-double ImageConverter::L_Ang_P = .8 * ImageConverter::Pos_Px;
-double ImageConverter::L_Ang_I = .8 * ImageConverter::Pos_Ix;
-double ImageConverter::L_Ang_D = .8 * ImageConverter::Pos_Dx;
+/*double ImageConverter::Pos_Px = .1;
+double ImageConverter::Pos_Ix = 0.0028;
+double ImageConverter::Pos_Dx = 0;*/
+
 
 float ImageConverter::TheMarkerSize = .1; // Default marker size
 
@@ -105,6 +101,18 @@ double ImageConverter::d_termYAW = 0;
 double ImageConverter::control_signalYAW;
 // ---- CONTROLL PARAMETERS ------ //
 
+
+// ---- TIMER PARAMETERS ------ //
+
+int ImageConverter::msec = 0;
+int ImageConverter::sec = 0;
+int ImageConverter::min = 0;
+int ImageConverter::hr = 0;
+
+
+// ---- TIMER PARAMETERS ------ //
+
+
 double ImageConverter::zeroMax = .0000000000000000001;
 double ImageConverter::zeroMin = -.0000000000000000001;
 
@@ -121,12 +129,32 @@ double ImageConverter::safety_margin_X = .15; // 10 cm
     // subscribe to input video feed and publish output video feed
     //image_sub_ = it_.subscribe("/ar_follow/image", 1, &ImageConverter::imageCb, this);
     
+	/* initialize random seed: */
+  	srand (time(NULL));
+
+
+Pos_Px = ((double) rand() / (RAND_MAX)) * .12;
+Pos_Ix = ((double) rand() / (RAND_MAX)) * .01;
+Pos_Dx = ((double) rand() / (RAND_MAX)) * .001;
+
+Pos_Py = 5 * Pos_Px;
+Pos_Iy = 5 * Pos_Ix;
+Pos_Dy = 5 * Pos_Dx;
+
+S_Ang_P = .2 * Pos_Px;
+S_Ang_I = .2 * Pos_Ix;
+S_Ang_D = .2 * Pos_Dx;
+
+L_Ang_P = .8 * Pos_Px;
+L_Ang_I = .8 * Pos_Ix;
+L_Ang_D = .8 * Pos_Dx;
+
      //marSub = nh_.subscribe("/Marker_pose",1,&ImageConverter::marCB,this);
      
     // Publish pose message and buffer up to 100 messages
     MarPose_pub = nh_.advertise<geometry_msgs::PoseStamped>("/marker_pose", 100);
     
-    
+    //Move2RandPos = false;
     keepMoving = true;
     commandPub = nh_.advertise<geometry_msgs::Twist>("/base_controller/command",1);
     Sub = nh_.subscribe("/marker_pose",1,&ImageConverter::camCB,this);
@@ -137,11 +165,11 @@ double ImageConverter::safety_margin_X = .15; // 10 cm
    //nh_.shutdown();
    //destroyWindow("Preview");
   }
-
   Mat ImageConverter::getCurrentImage() 
   {
         return img;
   }
+
 void ImageConverter::cvTackBarEvents(int value,void* ptr)
 {
     ImageConverter* ic = (ImageConverter*)(ptr);
@@ -262,6 +290,22 @@ void ImageConverter::ProgStart(int argc,char** argv)
 	int index=0;
 	// Capture until press ESC or until the end of the video
 	while ((key != 'x') && (key!=27) && TheVideoCapturer.grab() && ros::ok() && keepMoving){
+                
+		if(msec == 10)
+                {
+                        ++sec;
+                        msec = 0;
+                }
+                if(sec == 60)
+                {
+                        ++min;
+                        sec = 0; 
+                }
+                if(min == 60)
+                {
+                        ++hr;
+                        min = 0;
+                }
 
                 ros::spinOnce();
 
@@ -286,7 +330,9 @@ void ImageConverter::ProgStart(int argc,char** argv)
 			}else
 			{
 			        found = false;
-				move2docking(-control_signalX, -control_signalY, control_signalYAW);
+				keepMoving = false;
+				ROS_INFO("SORRY, BUT MARKER IS LOST ... \n");					
+				//move2docking(-control_signalX, -control_signalY, control_signalYAW);
 			}
 			
 			if (ros::ok() && found)
@@ -414,6 +460,9 @@ void ImageConverter::ProgStart(int argc,char** argv)
 		if (key == ' '){
 			update_images = !update_images;
 		}
+	
+        ++msec;
+        usleep(100000);
 	}
 }
 
@@ -435,9 +484,10 @@ void ImageConverter::camCB(const geometry_msgs::PoseStamped::ConstPtr& CamFB)
 {
 // Ref. Values
 RefPose[0] = -.0957;
-RefPose[1] = -0.0193642;
-RefPose[2] = 0.307922;
-RefPose[3] = 0.705028;
+
+RefPose[1] = -0.0194805;
+RefPose[2] = 0.308654;
+RefPose[3] = 0.702366;
 
 camPose[0] = CamFB->pose.position.x;
 camPose[1] = CamFB->pose.position.y;
@@ -455,8 +505,17 @@ camPose[3] = CamFB->pose.orientation.x;
         
         // correspondingly ... 
         // roll in Marker coordinate => yaw in Robot coordinate
+        ROS_INFO_STREAM(" --------------------- TIMER ------------------ \n");
+	ROS_INFO_STREAM("Docking duration : "<< hr << " : " << min << " : " << sec << " . " << msec << "\n");
+
+
+	ROS_INFO_STREAM(" --------------------- GAINS ------------------ \n");
+	ROS_INFO_STREAM(" Kp = " << Pos_Px << " . \n");
+	ROS_INFO_STREAM(" Ki = " << Pos_Ix << " . \n");
+	ROS_INFO_STREAM(" Kd = " << Pos_Dx << " . \n");
         
-        ROS_INFO_STREAM(" Zmar = " << camPose[2] << " m. \n");
+	ROS_INFO_STREAM(" --------------------- Pose estimation ------------------ \n");
+	ROS_INFO_STREAM(" Zmar = " << camPose[2] << " m. \n");
         ROS_INFO_STREAM(" Zref = " << RefPose[2] << " m. \n");
         
         ROS_INFO_STREAM(" Ymar = " << camPose[1] << " m. \n");
@@ -506,7 +565,7 @@ void ImageConverter::Controller(double RefX, double MarPoseX, double RefY, doubl
         // -----------------X--------------------- //
         
 	if(abs(RefX - MarPoseX) > Pz_eps)
-	{	
+	{			
 		// e(t) = setpoint - actual value;
         	curr_errorX = RefX - MarPoseX;
         
@@ -580,7 +639,7 @@ void ImageConverter::Controller(double RefX, double MarPoseX, double RefY, doubl
         		d_termY = S_yPos_D * diffY;
 		}*/
 		
-		ROS_INFO_STREAM("prop_gainY = " << p_termY << ", integ_gainY = " << i_termY << " . \n");	      
+		//ROS_INFO_STREAM("prop_gainY = " << p_termY << ", integ_gainY = " << i_termY << " . \n");	      
         	// control signal
         	control_signalY = p_termY + i_termY + d_termY;
         	
@@ -709,3 +768,11 @@ void ImageConverter::move2docking(double VelX_est, double VelY_est, double omega
 	ROS_INFO_STREAM(" Current ESTIMATED speed of robot: \n" << msg << ".\n");
 }
 // ---- Controller part -------- END ------
+
+void ImageConverter::RandomPose()
+{
+
+
+
+}
+
diff --git a/MobileRobot/AugReaMarker/CamMark/camtomar/src/VisionControl.cpp~ b/MobileRobot/AugReaMarker/CamMark/camtomar/src/VisionControl.cpp~
index 42ce8d40..0ef07a00 100644
--- a/MobileRobot/AugReaMarker/CamMark/camtomar/src/VisionControl.cpp~
+++ b/MobileRobot/AugReaMarker/CamMark/camtomar/src/VisionControl.cpp~
@@ -4,6 +4,7 @@
 #include <array>
 #include <math.h>
 #include <unistd.h>
+#include <cstdlib>
 
 #include <aruco/aruco.h>
 #include <aruco/cvdrawingutils.h>
@@ -41,9 +42,14 @@
 using namespace cv;
 using namespace aruco;
 using namespace std;
-double ImageConverter::Pos_Px = .05;
-double ImageConverter::Pos_Ix = 0.002;
-double ImageConverter::Pos_Dx = 0;
+
+double ImageConverter::Pos_Px = ((double) rand() / (RAND_MAX)) * .1;
+double ImageConverter::Pos_Ix = ((double) rand() / (RAND_MAX)) * .01;
+double ImageConverter::Pos_Dx = ((double) rand() / (RAND_MAX)) * .001;
+
+/*double ImageConverter::Pos_Px = .08;
+double ImageConverter::Pos_Ix = 0.0025;
+double ImageConverter::Pos_Dx = 0;*/
 
 double ImageConverter::Pos_Py = 5 * ImageConverter::Pos_Px;
 double ImageConverter::Pos_Iy = 5 * ImageConverter::Pos_Ix;
@@ -474,7 +480,7 @@ camPose[3] = CamFB->pose.orientation.x;
             	)
         	{
                 
-                        ROS_INFO("--------/*******//----- Dock is completed ! -----/*********---- \n ");                        
+                        ROS_INFO("----/*****//----- Dock is completed successfully ! -----/******---- \n ");                        
 			keepMoving = false;
                  // to make sure that y & theta are within the threshold...
         	} else if (abs(RefPose[2] - camPose[2]) <= safety_margin_X)
@@ -492,7 +498,7 @@ camPose[3] = CamFB->pose.orientation.x;
 				(abs(RefPose[3] - abs(camPose[3])) <= A_eps)				
 				)
 			{
-				ROS_INFO("Y & THETA HAVE FIXED SUCCESSFULLY, MOVING STRAIGHT AHEAD ... \n");
+				ROS_INFO("y & theta have been fixed successfully, MOVING STRAIGHT AHEAD ... \n");
 				Controller(RefPose[2], camPose[2], RefPose[1], RefPose[1], RefPose[3], RefPose[3],.01);
 			}
 		}else
@@ -625,7 +631,7 @@ void ImageConverter::Controller(double RefX, double MarPoseX, double RefY, doubl
         		p_termYAW = S_Ang_P * curr_errorYAW;
         		i_termYAW = S_Ang_I * int_errorYAW;
         		d_termYAW = S_Ang_D * diffYAW;
-			ROS_INFO_STREAM("prop_gainYAW = " << p_termYAW << ", integ_gainYAW = " << i_termYAW << " . \n");
+			//ROS_INFO_STREAM("prop_gainYAW = " << p_termYAW << ", integ_gainYAW = " << i_termYAW << " . \n");
         	} else if (curr_errorYAW > ((CV_PI/180) * yaw_offset) || curr_errorYAW < ((CV_PI/180) * -yaw_offset)) //  err > +5 or err < -5
 		{
 			ROS_INFO_STREAM(" robot is OUTSIDE Reference boundry of " << yaw_offset << " deg. \n");			
@@ -633,7 +639,7 @@ void ImageConverter::Controller(double RefX, double MarPoseX, double RefY, doubl
         		p_termYAW = L_Ang_P * curr_errorYAW;
         		i_termYAW = L_Ang_I * int_errorYAW;
         		d_termYAW = L_Ang_D * diffYAW;
-			ROS_INFO_STREAM("prop_gainYAW = " << p_termYAW << ", integ_gainYAW = " << i_termYAW << " . \n");
+			//ROS_INFO_STREAM("prop_gainYAW = " << p_termYAW << ", integ_gainYAW = " << i_termYAW << " . \n");
 		}
         	// control signal
         	control_signalYAW = p_termYAW + i_termYAW + d_termYAW;
diff --git a/MobileRobot/Machine_Learning/DeepCL/ScenarioImage.cpp b/MobileRobot/Machine_Learning/DeepCL/ScenarioImage.cpp
new file mode 100644
index 00000000..f38c5438
--- /dev/null
+++ b/MobileRobot/Machine_Learning/DeepCL/ScenarioImage.cpp
@@ -0,0 +1,218 @@
+#include <vector>
+#include <stdexcept>
+
+#include "ScenarioImage.h"
+
+#include "util/stringhelper.h"
+#include "qlearning/array_helper.h"
+
+#include "net/NeuralNet.h"
+
+using namespace std;
+
+#undef STATIC
+#define STATIC
+#undef VIRTUAL
+#define VIRTUAL
+
+ScenarioImage::ScenarioImage( int size, bool appleMoves ) :
+        net(0), // this is simply used by the showQRepresentation method
+        size(size),
+        appleMoves(appleMoves) {
+//    size = 7;
+    posX = 1;
+    posY = 1;
+    appleX = 2;
+    appleY = 2;
+    game = 0;
+    numMoves = 0;
+//    appleMoves = true; // does apple move when reset?
+
+    reset();
+    print();
+}
+void ScenarioImage::setNet( NeuralNet *net ) 
+{
+    this->net = net;
+}
+void ScenarioImage::printQRepresentation() 
+{
+    ScenarioImage *scenario = this;
+    cout << "q directions:" << endl;
+    int size = scenario->getPerceptionSize();
+    float *input = new float[ size * size * 2 ];
+    arrayZero( input, size * size * 2 );
+    input[ scenario->appleY * size + scenario->appleX ] = 1;
+    
+    for( int y = 0; y < size; y++ )
+    {
+        string thisLine = "";
+        for( int x = 0; x < size; x++ )
+        {
+            float highestQ = 0;
+            int bestAction = 0;
+            input[ size * size + y * size + x ] = 1;
+            net->forward( input );
+            input[ size * size + y * size + x ] = 0;
+            float const*output = net->getOutput();
+            
+            for( int action = 0; action < 4; action++ )
+            {
+                float thisQ = output[action];
+                if( action == 0 || thisQ > highestQ ) {
+                    highestQ = thisQ;
+                    bestAction = action;
+                }
+            }
+            
+            if( bestAction == 0 )
+            {
+                thisLine += ">";
+            } else if( bestAction == 1 )
+            {  
+                thisLine += "<";
+            } else if( bestAction == 2 )
+            {
+                thisLine += "V";
+            } else
+            {
+                thisLine += "^";
+            }
+        }
+        cout << thisLine << endl;
+    }
+    delete[]input;
+}
+
+VIRTUAL void ScenarioImage::print() 
+{
+    for( int y = 0; y < size; y++ )
+    {
+        string line = "";
+        for( int x = 0; x < size; x++ )
+        {
+            if( x == posX && y == posY )
+            {
+                line += "X";
+            } else if( x == appleX && y == appleY )
+            {
+                line += "O";
+            } else
+            {
+                line += ".";
+            }
+        }
+        cout << line << endl;
+    }
+}
+
+VIRTUAL ScenarioImage::~ScenarioImage() 
+{
+
+}
+VIRTUAL int ScenarioImage::getNumActions()
+{
+    return 4;
+}
+
+VIRTUAL float ScenarioImage::act( int index )
+{ // returns reward
+    numMoves++;
+    int dx = 0;
+    int dy = 0;
+    
+    switch( index ) {
+        case 0:
+            dx = 1;
+            break;
+        case 1:
+            dx = -1;
+            break;
+        case 2:
+            dy = 1;
+            break;
+        case 3:
+            dy = -1;
+            break;
+    }
+    int newX = posX + dx;
+    int newY = posY + dy;
+    
+    if( newX < 0 || newX >= size || newY < 0 || newY >= size )
+    {
+        return -0.5f;
+    }
+    
+    if( newX == appleX && newY == appleY )
+    {
+        finished = true;
+        posX = newX;
+        posY = newY;
+        return 1;
+    } else 
+    {
+        posX = newX;
+        posY = newY;
+        return -0.1f;
+    }
+}
+
+VIRTUAL bool ScenarioImage::hasFinished()
+{
+    return finished;
+}
+
+VIRTUAL int ScenarioImage::getPerceptionSize()
+{
+    return size;
+}
+
+VIRTUAL int ScenarioImage::getPerceptionPlanes()
+{
+    return 2;
+}
+
+VIRTUAL void ScenarioImage::getPerception( float *perception )
+{
+    for( int i = 0; i < size * size * 2; i++ )
+    {
+        perception[i] = 0;
+    }
+    perception[appleY * size + appleX] = 1;
+    perception[size * size + posY * size + posX] = 1; 
+}
+
+VIRTUAL int ScenarioImage::getWorldSize()
+{
+    return size;
+}
+
+VIRTUAL void ScenarioImage::reset()
+{
+    if( net != 0 )
+    {
+        this->print();
+        this->printQRepresentation();
+        cout << "game: " << game << " moves: " << numMoves << endl;
+    }
+    if( appleMoves )
+    {
+        appleX = myrand() % size;
+        appleY = myrand() % size;
+    } else
+    {
+        appleX = appleY = size / 2;
+    }
+    finished = false;
+    bool sampledOnce = false;
+    
+    while( !sampledOnce || ( posX == appleX && posY == appleY ) )
+    {
+        posX = myrand() % size;
+        posY = myrand() % size;
+        sampledOnce = true;
+    }
+    
+    game++;
+    numMoves = 0;
+}
diff --git a/MobileRobot/Machine_Learning/DeepCL/ScenarioImage.cpp~ b/MobileRobot/Machine_Learning/DeepCL/ScenarioImage.cpp~
new file mode 100644
index 00000000..aaae2c46
--- /dev/null
+++ b/MobileRobot/Machine_Learning/DeepCL/ScenarioImage.cpp~
@@ -0,0 +1,216 @@
+#include <vector>
+#include <stdexcept>
+
+#include "ScenarioImage.h"
+
+#include "util/stringhelper.h"
+#include "qlearning/array_helper.h"
+
+#include "net/NeuralNet.h"
+
+using namespace std;
+
+#undef STATIC
+#define STATIC
+#undef VIRTUAL
+#define VIRTUAL
+
+ScenarioImage::ScenarioImage( int size, bool appleMoves ) :
+        net(0), // this is simply used by the showQRepresentation method
+        size(size),
+        appleMoves(appleMoves) {
+//    size = 7;
+    posX = 1;
+    posY = 1;
+    appleX = 2;
+    appleY = 2;
+    game = 0;
+    numMoves = 0;
+//    appleMoves = true; // does apple move when reset?
+
+    reset();
+    print();
+}
+void ScenarioImage::setNet( NeuralNet *net ) 
+{
+    this->net = net;
+}
+void ScenarioImage::printQRepresentation() 
+{
+    ScenarioImage *scenario = this;
+    cout << "q directions:" << endl;
+    int size = scenario->getPerceptionSize();
+    float *input = new float[ size * size * 2 ];
+    arrayZero( input, size * size * 2 );
+    input[ scenario->appleY * size + scenario->appleX ] = 1;
+    
+    for( int y = 0; y < size; y++ )
+    {
+        string thisLine = "";
+        for( int x = 0; x < size; x++ )
+        {
+            float highestQ = 0;
+            int bestAction = 0;
+            input[ size * size + y * size + x ] = 1;
+            net->forward( input );
+            input[ size * size + y * size + x ] = 0;
+            float const*output = net->getOutput();
+            
+            for( int action = 0; action < 4; action++ )
+            {
+                float thisQ = output[action];
+                if( action == 0 || thisQ > highestQ ) {
+                    highestQ = thisQ;
+                    bestAction = action;
+                }
+            }
+            
+            if( bestAction == 0 )
+            {
+                thisLine += ">";
+            } else if( bestAction == 1 )
+            {  
+                thisLine += "<";
+            } else if( bestAction == 2 )
+            {
+                thisLine += "V";
+            } else
+            {
+                thisLine += "^";
+            }
+        }
+        cout << thisLine << endl;
+    }
+    delete[]input;
+}
+
+VIRTUAL void ScenarioImage::print() 
+{
+    for( int y = 0; y < size; y++ )
+    {
+        string line = "";
+        for( int x = 0; x < size; x++ )
+        {
+            if( x == posX && y == posY )
+            {
+                line += "X";
+            } else if( x == appleX && y == appleY )
+            {
+                line += "O";
+            } else
+            {
+                line += ".";
+            }
+        }
+        cout << line << endl;
+    }
+}
+
+VIRTUAL ScenarioImage::~ScenarioImage() 
+{
+
+}
+VIRTUAL int ScenarioImage::getNumActions()
+{
+    return 4;
+}
+
+VIRTUAL float ScenarioImage::act( int index )
+{ // returns reward
+    numMoves++;
+    int dx = 0;
+    int dy = 0;
+    
+    switch( index ) {
+        case 0:
+            dx = 1;
+            break;
+        case 1:
+            dx = -1;
+            break;
+        case 2:
+            dy = 1;
+            break;
+        case 3:
+            dy = -1;
+            break;
+    }
+    int newX = posX + dx;
+    int newY = posY + dy;
+    
+    if( newX < 0 || newX >= size || newY < 0 || newY >= size )
+    {
+        return -0.5f;
+    }
+    
+    if( newX == appleX && newY == appleY )
+    {
+        finished = true;
+        posX = newX;
+        posY = newY;
+        return 1;
+    } else 
+    {
+        posX = newX;
+        posY = newY;
+        return -0.1f;
+    }
+}
+
+VIRTUAL bool ScenarioImage::hasFinished()
+{
+    return finished;
+}
+
+VIRTUAL int ScenarioImage::getPerceptionSize()
+{
+    return size;
+}
+
+VIRTUAL int ScenarioImage::getPerceptionPlanes()
+{
+    return 2;
+}
+
+VIRTUAL void ScenarioImage::getPerception( float *perception )
+{
+    for( int i = 0; i < size * size * 2; i++ )
+    {
+        perception[i] = 0;
+    }
+    perception[appleY * size + appleX] = 1;
+    perception[size * size + posY * size + posX] = 1; 
+}
+
+VIRTUAL int ScenarioImage::getWorldSize()
+{
+    return size;
+}
+
+VIRTUAL void ScenarioImage::reset()
+{
+    if( net != 0 )
+    {
+        this->print();
+        this->printQRepresentation();
+        cout << "game: " << game << " moves: " << numMoves << endl;
+    }
+    if( appleMoves )
+    {
+        appleX = myrand() % size;
+        appleY = myrand() % size;
+    } else
+    {
+        appleX = appleY = size / 2;
+    }
+    finished = false;
+    bool sampledOnce = false;
+    while( !sampledOnce || ( posX == appleX && posY == appleY ) )
+    {
+        posX = myrand() % size;
+        posY = myrand() % size;
+        sampledOnce = true;
+    }
+    game++;
+    numMoves = 0;
+}
diff --git a/MobileRobot/Machine_Learning/DeepCL/ScenarioImage.h b/MobileRobot/Machine_Learning/DeepCL/ScenarioImage.h
new file mode 100644
index 00000000..a9136e6e
--- /dev/null
+++ b/MobileRobot/Machine_Learning/DeepCL/ScenarioImage.h
@@ -0,0 +1,68 @@
+#include <iostream>
+#include <string>
+#include <algorithm>
+#include <random>
+
+#include "qlearning/Scenario.h"
+
+class NeuralNet;
+
+#define STATIC static
+#define VIRTUAL virtual
+
+// represents a small square world, with one apple,
+// that might be always in the centre (if appleMoves is false)
+// or in a random location (if appleMoves is true)
+// - our hero is at location (posX,posY)
+// - apple is at (appleX,appleY)
+// - size of world is 'size' (world is square)
+// two output planes are provided in the perception:
+// - plane 0 shows where is our hero (0 everywhere, except 1 where hero is)
+// - plane 1 shows where is the apple (0 everywhere, 1 where apple is)
+// rewards given:
+// - hit wall: -0.5
+// - get apple: +1.0
+// - make a move: -0.1
+class ScenarioImage : public Scenario {
+public:
+    NeuralNet *net;
+    const int size;
+    bool appleMoves; // does apple move when reset?
+
+    int posX;
+    int posY;
+    int appleX;
+    int appleY;
+    int game;
+    int numMoves;
+
+    int width;
+    int height;
+    bool finished;
+    std::mt19937 myrand;
+
+//    char**world;
+    
+    // [[[cog
+    // import cog_addheaders
+    // cog_addheaders.add()
+    // ]]]
+    // generated, using cog:
+    ScenarioImage( int size, bool appleMoves );
+    void setNet( NeuralNet *net );
+    void printQRepresentation();
+    VIRTUAL void print();
+    VIRTUAL ~ScenarioImage();
+    VIRTUAL int getNumActions();
+    VIRTUAL float act( int index );  // returns reward
+    VIRTUAL bool hasFinished();
+    VIRTUAL int getPerceptionSize();
+    VIRTUAL int getPerceptionPlanes();
+    VIRTUAL void getPerception( float *perception );
+    VIRTUAL int getWorldSize();
+    VIRTUAL void reset();
+
+    // [[[end]]]
+};
+
+
diff --git a/MobileRobot/Machine_Learning/DeepCL/learnScenarioImage.cpp b/MobileRobot/Machine_Learning/DeepCL/learnScenarioImage.cpp
new file mode 100644
index 00000000..5b98260a
--- /dev/null
+++ b/MobileRobot/Machine_Learning/DeepCL/learnScenarioImage.cpp
@@ -0,0 +1,55 @@
+#include <iostream>
+#include <random>
+//#include <thread>
+//#include <chrono>
+#include <vector>
+
+#include "ScenarioImage.h"
+#include "DeepCL.h"
+#include "qlearning/array_helper.h"
+#include "qlearning/QLearner.h"
+
+using namespace std;
+
+int main( int argc, char *argv[] )
+{
+//    ScenarioImage scenario;
+
+    ScenarioImage *scenario = new ScenarioImage( 5, true);
+
+    EasyCL *cl = new EasyCL();
+    NeuralNet *net = new NeuralNet( cl );
+    SGD *sgd = SGD::instance( cl, 0.1f, 0.0f );
+
+    const int size = scenario->getPerceptionSize();
+    const int planes = scenario->getPerceptionPlanes();
+    const int numActions = scenario->getNumActions();
+    
+    net->addLayer( InputLayerMaker::instance()->numPlanes(planes)->imageSize(size) );
+    net->addLayer( ConvolutionalMaker::instance()->filterSize(5)->numFilters(8)->biased()->padZeros() );
+    net->addLayer( ActivationMaker::instance()->relu() );
+    net->addLayer( ConvolutionalMaker::instance()->filterSize(5)->numFilters(8)->biased()->padZeros() );
+    net->addLayer( ActivationMaker::instance()->relu() );
+    net->addLayer( FullyConnectedMaker::instance()->imageSize(1)->numPlanes(100)->biased() );
+    net->addLayer( ActivationMaker::instance()->tanh() );
+    net->addLayer( FullyConnectedMaker::instance()->imageSize(1)->numPlanes(numActions)->biased() );
+    net->addLayer( SquareLossMaker::instance() );
+    net->print();
+
+    scenario->setNet( net ); // used by the printQRepresentation method
+
+    QLearner qLearner( sgd, scenario, net );
+    qLearner.run();
+    
+//    delete[] expectedOutputs;
+//    delete[] lastPerception;
+//    delete[] perception;
+    delete sgd;
+    delete net;
+    delete scenario;
+    delete cl;
+    
+    return 0;
+}
+
+
diff --git a/MobileRobot/Machine_Learning/DeepCL/learnScenarioImage.cpp~ b/MobileRobot/Machine_Learning/DeepCL/learnScenarioImage.cpp~
new file mode 100644
index 00000000..137526fe
--- /dev/null
+++ b/MobileRobot/Machine_Learning/DeepCL/learnScenarioImage.cpp~
@@ -0,0 +1,54 @@
+#include <iostream>
+#include <random>
+//#include <thread>
+//#include <chrono>
+#include <vector>
+
+#include "ScenarioImage.h"
+#include "DeepCL.h"
+#include "qlearning/array_helper.h"
+#include "qlearning/QLearner.h"
+
+using namespace std;
+
+int main( int argc, char *argv[] )
+{
+//    ScenarioImage scenario;
+
+    ScenarioImage *scenario = new ScenarioImage( 5, true);
+
+    EasyCL *cl = new EasyCL();
+    NeuralNet *net = new NeuralNet( cl );
+    SGD *sgd = SGD::instance( cl, 0.1f, 0.0f );
+
+    const int size = scenario->getPerceptionSize();
+    const int planes = scenario->getPerceptionPlanes();
+    const int numActions = scenario->getNumActions();
+    net->addLayer( InputLayerMaker::instance()->numPlanes(planes)->imageSize(size) );
+    net->addLayer( ConvolutionalMaker::instance()->filterSize(5)->numFilters(8)->biased()->padZeros() );
+    net->addLayer( ActivationMaker::instance()->relu() );
+    net->addLayer( ConvolutionalMaker::instance()->filterSize(5)->numFilters(8)->biased()->padZeros() );
+    net->addLayer( ActivationMaker::instance()->relu() );
+    net->addLayer( FullyConnectedMaker::instance()->imageSize(1)->numPlanes(100)->biased() );
+        net->addLayer( ActivationMaker::instance()->tanh() );
+    net->addLayer( FullyConnectedMaker::instance()->imageSize(1)->numPlanes(numActions)->biased() );
+    net->addLayer( SquareLossMaker::instance() );
+    net->print();
+
+    scenario->setNet( net ); // used by the printQRepresentation method
+
+    QLearner qLearner( sgd, scenario, net );
+    qLearner.run();
+    
+//    delete[] expectedOutputs;
+//    delete[] lastPerception;
+//    delete[] perception;
+    delete sgd;
+    delete net;
+    delete scenario;
+    delete cl;
+    
+    return 0;
+}
+
+
diff --git a/MobileRobot/Machine_Learning/Practice/a.out b/MobileRobot/Machine_Learning/Practice/a.out
new file mode 100755
index 0000000000000000000000000000000000000000..eed4cf56c11fae15fd8de4cb91d53e0026281bd3
GIT binary patch
literal 14235
zcmb<-^>JfjWMqH=CI&kO5U+s40W1U|85lB@z+5olz+l0^$>6{s&mhCV#=yY9%D}(?
zQ|AC>!RQ|#!x$JCU^EBV1O^6X1_lNe1_lNTCWwFq6T}1<Edvo|fYDH6z-|NC2bD&#
z86<W`5JWOCz-R^r1+V}}Kgg{gWWij953&hh28?z9If#LQfdg(hL?6g`AbkpQVC4)7
zau9tm`T)pa1_lNg4f7w!Zy+23GJt`BAq46_80`WvjsZr4)PjTpo|dG5*c13cJO+jd
z{E%>g(IpUJ1{e)e3la)^T9N{CCx}f9hPg8cY9Fp}5rDcMMnk>Jpr4bOWM-nDlcJlG
znO9n&TVY|QYi6QXoUdmDj#mK&25?-t`-Os?12P8WJ_%^Ni!eaa07$-8KUk1ST<wa#
z?3`V5q~4ge$O`0vqJtM|21p+Rg8%~~Bgj++29P)(0|SHN-!GgRu2+3TXYe?KEx|%e
zWnf?s!Xgeb2@=DolJX3&6oV?R%!u8bSRCR3IK=1UaE~hv^+Gtr-EfE-<1pVFhx+SK
z^#;)V4)b*}RQv-wB>lm}yBQc51R10l926n>Kme8?ax#;OOHz`xwLxjXj3GWgJvTou
zzPKc@s3bm~A>OMjJ}5Okv$!O+$T=smxHz?#AwDX&B&XOjIk?2oFgYW!D88g9F|(xD
zGuhQOD7eJXI4Q9>GdVuLxTGjGG1oIBKExp2HP|dZxWqI*IJKmlAwJ44xWpnezc@ZA
zu{hPlGcU8m#n813Y_3UierX9<MSf1PYbIDMr`VG?tCB<FU4u>I1H6;L`b<*uQgTRk
zCfxD>x1iws;CNTC0GfN9LGI1TOiG3XT5(ZgUJ64=W^O7&e0*|6VtisrYDH!VLwr0)
z4Ky%w6EpJ|+<iQqoa2r3jPy()?1+eXV?7g4EP-+w0|OHSGXpc21hF8PiGhiMk%5uH
znh}(nAnCkRDw7kG6Z4_*2$BaeLHPq@T8ZW)4v=ZpAVCHOh6nPHcn9T4h)p0>4Dj>-
zDn6k_7eqn<qMjX^mmz`-3>-+}urdrJCx9dlG7BUI!V*a0AiF_gAgq8S4k{BsVj!%6
zBo4A0BnH9;NaC<E5F~DaB+dyEfMN$EaZtGf6=v{266b;mGB7X%Ac=D$iANxb^B{?P
zG{50^A<W3Y@L~~&sAXqh@Mu25aTuoLzp0mm0>gh*9|;8pet8Fm|EeH<21xqlga7~k
z|5tUAP+-Ua#m>tMV7?WI4~m+X2f%zI5Fb>WzT5!jYk~NnAb+_4%vS>OK|%d;0+=ra
z;)8<tWdoQm1mc5&@MQs*&jsRxg6?Gkn9l^_gM#d30GR(vT!A426eKSl!2C}jJ}4+&
z8vOtN-|($x=fyaW&PN{2Pd)^Mcpg8%!0*7|ar|HbABZ|0!s)>9!u<dL|6e9d0ELZ5
zH>;(%0)t2EffA<w7r;R?0U3mPbUqCRDQvK9kW^q`DD~bgAeX@4(al;VsledT_?iR6
zJNTQ~qxm(Xhvki8l{9{N7f>J{VD#u@4V6@2_%Cu4tf-sS9i+wbM$w~Vtj3ZG42&S_
zqGJ!IrKRcd%eR2SoWY~n_JN230|Ut35}_R+F_7ChV)skPB{2Md5PNvS1dtraO&TxW
zGBGe5cfA2p^;-Y9>m4vdyxaAT$Bb^4)&_+HhR)+J${86LnqBWOHrL)^<)3$;<v{7Z
zX4e~x&9yf;A>3ofT_1pSzHDLu8TSOlejVNI`otsoM7Qe$kK_v;ofkY94|p&_$P?W%
ztqDpA4EsT``l6JDfuZx*eo%q^BJSV+|DE?dI*-5D^7sG$=GrGL_5Ax1_#GG^((gfn
zu1^@7Yo9<}GHYKFT=pVZ_5ova?E{D`e{25#|NoIxtpEG}|8dtBAosm&V_;zDc71{H
zC~KpH0>g3FHz3(=))EN?hJBz^+j;y2s14ZZ`lg#TUP1xxRN=q>|L;oyrH#(xFT(!*
z|KIKUrjylHLV=;1)m%b>q4@_3By=|KONTpA52OiHm4UqI2zKua#^%}=F!yHv|NkEn
zXU#`6qGJz(V#=fWjY4Pb8?f>B#1$A`=!0C&dRZJ~2P^*+P=vfb?)nGh?$?|zK<&$B
z*FTKSwSOR{mcBU&HU|{r68k`^_JeZIi(Cc<2C%O>#T6L3L*IBLpTZJTEUgO^5*S(!
zR9JP(CV&*WzUe&vf|H4Xq4Qw3>l=^c4<5;vQKR=mCl5G!!7kPTWsc?}5~!Z(EPZjz
z^*<>5yF=e}Uer9;d7}9MW2ftv7e0Uf|L0%t`sEY9)`3s_0=zZM3=E(6;|_k}*EzuK
z`l8eIM>p#YF_43Q5O6TdG1vc${M%grH-BJkIZ(Q^+w}(|AUvAi2<!)i!;AmF|Nrki
z{$j;%P_p;|5_;_f3rVeR*FWI&`vWsnj<bM02-1KW)Tf(We=s)J{(y%3W{8EIwQo8<
zbhv`OeBAX5NZT>je+<W6Ay%e!yZ*3t{ZhjLq3eRWU4MXWeuHUqH!NtscqD%Shd)~0
z`p^w=o8`eeu5Q;KovvSwx&C8Bbyh$BHrIdPP$>Zi5-2J_0o8JVzcu;)|NjlOFBnQ#
zyIsG$R_b>Bg3~Dwi{Ob=ptJT*H>-^(C~p4XiyKJP3c7yicKriRkDbR~T>ba|KSW%B
zcOoMw$|Dbc;*W6s0E%}=D1xG`v-VGC>5t~xKa8bXjkQ1i|7Tz*RowqYDS_d|<e&fl
z?*O$1cV#FfFzf?~fil^gpa1`#1gHOfAl3UpS^CA=|Ns9(+zoQG>mN{|2raZdI&)NR
z_;jYIJn-n%E$3HY@aa{}08yPeDldFGOH@Aibk?Z+@aT+Dx!}?K#==MQsbl9~&(3ci
z2mdmAc3yvR`p5tO9<6UnWDIYE>O-H-Up}4RTsq%7{ujC9(R`TE!}4|ME069N6%LQ?
z92Ehd?i3XXpY9wL1)uH`6%C*68WjVN<|7t|`Q;lx3FY%)sKcNXs6+teWDbvR)*E~Z
z3?9ut{+CGZ{vn&d0IfG(bNs*H(aj8!DE$bnAs}_ti~OJe|ARyhgZe(8Dwu&mhk=Vh
zM}ff<WEZGdm^BjwUfP2yNRSu=KX8PJGfFdCgZg%$wuZou|Nkd2FfbhX`Tswt4$1rd
z|39cxwe9!+{|bx@4Ay`C|My^IVA%iX|9?;mz~Jxy{~e4B48s5Z|6jq#z)<$@|Nj$=
z3=G@;{r~@hk%8g#KS=5K;NSoM983%hp!(bZ;!Flme+=Xn#;PC&#tH#OX&!ct35*O3
zDhv$ZK6u6V|Nk$56z~bS@kw~`bC+{8FxX33YZ<G6M+QLsbWmTG```cn;P#CppFkUv
zGcTJv4?C!_05T_qfq~)4kN^Kyff5N!-nE&Tc>yCt5!9|81_p+-U;qDmfb4^*V{!(o
z0hzOgfq|j#*Z=>m$nyTp%uIz~B_L6-o3Ai1FueZt|3A3qhMQ9lRs?b{sHE`!{r`U<
zNC#X#2QDwd$iT4R_y7NoQRMHz<xLnF7?S_||DTK^9|xBYVPs%f_UHe9a1RP*el=Jc
z?7k9428LsQ{{I(6)*k?o2l;afBLl;QKmY%Cg5(2xSlig*`5c<r`<eQfds&zw_(0(b
zinl$C3=9^3|Nn<ZB}nxs9u0xf5Eu=C(GVC7fzc2c4S~@R7!85Z5Eu;s1~)!%Z-fCf
zGz-G8AOW#K7{-UupspH-jZTB+5J2MKz8(VuLj!c42-I%{iF-ijEhRx51_t<it2C4k
z>broLU;q8j2hlU2^P`|4OAz0K1>#OnpBcnI0hNdKo7<rd0u3yK<U#X_AR6440x_T%
zH1G{#zJShK!1_p_{xe7%)PDlepaE_WEe;|W7#I@RAojz=4WN9OyA7b~Km*?(Cd~eS
z{~`YQzzWg-AIgW>`vJ<o0F?*nr3Z%1SEJjt4H|F9p!78;{R~QfgVJo!`9U!#tp=sd
zptKv54ujHZP#WDnbUr8=L2(3%cTgCCCMJ;8fYKx~2FW4wL2Q^9XnX<0hhdmFG9M;}
zPJ`6I#9-#Y_%Jz;xVy8nm4ZfSQfXdEslHEUUSd(cf{C7)p0TcBDOk`{6T)X;&?~OY
zElEsb&?_z}g3uW-R%TvFYEc1$US57ls-B~hr*27NI+T~5SE`p(nwgWLo0-C(2jXQU
z7H2T%rBvn>SLQ<Kk|KyqS!z*nW_}(DC%%Y5uP8Mq5u^diD#$5e(96urEMd?qNv$Yh
z&`ZnA%Vf|i%1=owNo3GV&4^DcO3Y1-&qygk@E|(k6N`!xD;e~XOY)0~8T3*hR912k
z*dd9znaK=#>G>sKLJw>Y#LT4PVg|kB{M_8sJkV$sC?qMvpmGD0RzPV4WD;zh2x#sA
z#0QP%fM^&7sfDpYv@!z&11K-U^uyL+z}9D=uLVF)hlXhSOQ7XZ3DiK)cn!!N7)I9*
z8XNoe|9?JIIRmV`n*kMol_#Kl05Sts&Vbk;?8m^s04nRC${Aqg;R&cf1JuKy@)E>^
z>4%j|VbJv!P~{A;^3?<CP*`~ivI~SkW`JlIj$>c|)fsU2!^*D<PzS@p56WeL*#o1q
z85kHqWh0CaD<3~VH9$&B2Jm_!kQ$i#VfI%+*NuSWKw_}+*Z|~V1_lOP;Ro|SXbceK
zRhWKQ`JMpP533JA&H>eFF#BQp`=ImJQ04IYt^t&&7#J8}>OgJ-VUS%Q8iqk*c%U$a
z>0baXR~JC_gU423iWwmD^B^v49)1d%{jl{}1yG0HfEf(ZgrpzJWtfGgA67qmKqu>w
z=gvWT(A~cPsvj195zzWQ0;=BvqzMT_odp+Jfu<j}o(Z<j3B8m+w|^U&ehvXhis66;
z&=pYN!fj(<K)3$@0|NudkD$5+qztz13%2eFT|GMg4Ag#@e$a#f$ebV0^<+?IfP}yp
zUH@e?{h%3VkU|dVx-vrgZ=vZ2%{GJdOF;Fb+lS7-&j86=F!#gS1F&^%u=Q)OWCYU>
za~Eh*7Zk)W{jhZ)4?qj1K<nl}jzPjO{V@71n)_kxu@_MN6-atPOqf0x{S8fj0(3n^
z0<_>k&p%LMkWpX^n!W{@1#`axbp4WnB3PPX2gq{}CYVIm&&UXx(?OIoU`3#E2dZBS
zCIF>j#=&UNv@y(Xs1h(01=Y_5QwSD?*ac;RDUdWQ?|}uNga*{&9U#v!Fff3U18A}d
z%7vFppmr)Y`yUuV4BCyB;Xw95WC)YY42%r0`WRU?X#Ekao<$XBW`Nb7sNyW}dJ$Ee
z6<*(=inGD%F;sDO23Y-sD$c<Gt9MYvIT>K}392|3ydQun&J8dBQN?)}VC6chI4=XN
zyhau0V}O;zsN(z#u<{jETmZfv6jfZ10ahNOiVMN}U8v&146yPGRa}GtRxY86i!#8<
z8&q*I_<C4WadG%MF;sC023WpF6_;dy<!4lJ#5zM%aZqazSp<?_m>75&=AiXEK>3b^
z;p2Z))sQ(&Mh47q0A(je2F!2()o+XpnBm~c09qZ#%WweA-e_=nCB+b+1X=$E%THNQ
z@d&6mET4kPKal$kI3e{K%)PkW4{FbV+|K}Aj|@{k5p2F7!wo@Hb3p4(vAKUW)cpd`
z`W$A?L2!EuGu%K+m>3zP7(ny?Ahj@my@r~@1|463*)9z(H~ATIpytDh8C9rw0aP3o
z4*F2>3aB_ty*(o&oMGb^uzr3FSUqOCDgrx92qT?0L&ahBKgbUtJPm9PW_q5_2wEqJ
znLa^t*n$j_44^e!ApNlRAE+L|w%+(Wc>OIe18B7yNIk49zYi5hFCU(P?FFes#b0rV
zgH{X)F<_P_{7l%_Et}vF4+e|#GA!nW#2|QXn}LBL4lIu977(ujDh^s10^-B)T(CGV
zgFg?%9Pk`F0|UcausBQ=g8sw=T33uH_h9kP$P98nFT+1Bh&kXj0SpWbyv(3BGG@F;
zLDiq;hNuVS6Ob(i%pmin7(nadKzz`;3J`4s6^FG4VC$JZLE=o344}m>AayVt1{N1#
z04*|viGk>3sJ#Wy^bcz9gT#wL>KUXMK0xz3%wG*)_ka9v0BwhW)PisiSiK|zXjK@9
z4|8W9*nE%(DqaXR=Lgh$nCvnf>W_fMc^TZG;R9Yn!N9<9gPDOriGi0P8tQ&f9|&aD
zW2pKA(0Bx|g#eA`fyEgZK#T1_>R{M{1r%Q}9)k8_0mT=le*<x-pNB(y16Z7w0klpO
zWI8Axg6OSaaV+(6a$-&n(pq$e<f4+|lG3y^y<~>?c;^uRpm-n8;E?!uhWL0F6rPte
zl2l55e0olPQesX#cnBmuv9y9AIX|}`C$%IsMbFIC*pLAv5uce-0pb}mKt@zjN^^57
zVfMKCxqz*43v%>zjfe3-*1*^ZTfpNjU|Wh(GxO5&i;`18Ln-k-sfk5-nR)4Y$pr=A
z#pym4@$s4Y;K7~vC=<hY&?51Y%;b2GNM>SAW>sQIW`16LVqQvoN@{UQQE4)mZ;%OE
z6YpbM7@S#^$`BvrW160ro0|w?85(4kq!xj-6*I)U`}jLK`ozZ<mnOw~!b}Y=Nh|>^
z;D<{3ySW9shQx<BI{CPQ!VzRQWZ)=1H7|uB9;^*20NxP55bqM{=jiL{%m5lK0y(*;
zgaM>0GcgBdc9dT*c<H`_19+`FbbUK?C46#mX*_rk3#ylx-3Ac#n0p35Yw)pJ0(NRV
zXt_O98oJ&-KFX{pwLGyXB@<<pKDr{Tn-idlV7`emG=O?Lvnmy`bAgOq3TTFU`o|ZS
zq-5sBmlmg{FvLfhrxs)u=clBCoQo9R@ll4R>8T|_iFqmcxsKpuk(tR52u@Yt?GtFm
z#-|kL$7dwwrR0EiCxG(`EZu^4CxGGv<QazK(xRf&yb^FkGQ`Ix6&FKeDTo2Q!2xP6
zEHuGM1RC40y%JC*h+r~F&dAR%PIb(KI2008CdH{G<)CzIo|{;anOmBhnFMh?SsOM`
n?F8+_fQrB!3JN^11r+RBftm^R8EC5qR0L`ZD7i6!GYkU&Nq#De

literal 0
HcmV?d00001

diff --git a/MobileRobot/Machine_Learning/Practice/rand_func.cpp b/MobileRobot/Machine_Learning/Practice/rand_func.cpp
new file mode 100644
index 00000000..ef870dc1
--- /dev/null
+++ b/MobileRobot/Machine_Learning/Practice/rand_func.cpp
@@ -0,0 +1,38 @@
+/* rand example: guess the number */
+#include <stdio.h>      /* printf, scanf, puts, NULL */
+#include <stdlib.h>     /* srand, rand */
+#include <time.h>       /* time */
+#include <cstdlib>
+
+
+
+int main ()
+{
+  double  Kp, Ki,Kd;
+
+  /* initialize random seed: */
+  srand (time(NULL));
+
+  /* generate secret number between 0 and 1: */
+  Kp = ((double) rand() / (RAND_MAX)) * .12;
+  Ki = ((double) rand() / (RAND_MAX)) * .01;
+  Kd = ((double) rand() / (RAND_MAX)) * .001;
+  
+  printf("Kp : %1.12f \n",Kp);
+  printf("Ki : %1.12f \n",Ki);
+  printf("Kd : %1.12f \n",Kd);
+  /*do {
+    printf ("Guess the number (0 to 1): ");
+    scanf ("%lf",&iGuess);
+    if (iSecret<iGuess) puts ("The secret number is lower");
+    else if (iSecret>iGuess) puts ("The secret number is higher");
+  } while (iSecret!=iGuess);
+
+  puts ("Congratulations!");*/
+  
+  
+  
+  
+  
+  return 0;
+}
diff --git a/MobileRobot/Machine_Learning/Practice/rand_func.cpp~ b/MobileRobot/Machine_Learning/Practice/rand_func.cpp~
new file mode 100644
index 00000000..d66d2a1c
--- /dev/null
+++ b/MobileRobot/Machine_Learning/Practice/rand_func.cpp~
@@ -0,0 +1,38 @@
+/* rand example: guess the number */
+#include <stdio.h>      /* printf, scanf, puts, NULL */
+#include <stdlib.h>     /* srand, rand */
+#include <time.h>       /* time */
+#include <cstdlib>
+
+
+
+int main ()
+{
+  double  Kp, Ki,Kd;
+
+  /* initialize random seed: */
+  srand (time(NULL));
+
+  /* generate secret number between 0 and 1: */
+  Kp = ((double) rand() / (RAND_MAX)) * .2;
+  Ki = ((double) rand() / (RAND_MAX)) * .01;
+  Kd = ((double) rand() / (RAND_MAX)) * .001;
+  
+  printf("Kp : %1.12f \n",Kp);
+  printf("Ki : %1.12f \n",Ki);
+  printf("Kd : %1.12f \n",Kd);
+  /*do {
+    printf ("Guess the number (0 to 1): ");
+    scanf ("%lf",&iGuess);
+    if (iSecret<iGuess) puts ("The secret number is lower");
+    else if (iSecret>iGuess) puts ("The secret number is higher");
+  } while (iSecret!=iGuess);
+
+  puts ("Congratulations!");*/
+  
+  
+  
+  
+  
+  return 0;
+}
diff --git a/MobileRobot/Machine_Learning/Practice/random b/MobileRobot/Machine_Learning/Practice/random
new file mode 100755
index 0000000000000000000000000000000000000000..e82aa5005931fe2ebc04c8922a3cad522a214cf0
GIT binary patch
literal 8536
zcmb<-^>JfjWMqH=CI&kO5bptt16T+`GB6|vg1KPAfx&`-lfi*OjzOA%je&uIm4Sf)
zrp^J%g3&)fhA}WOz-SJz2@DL(3=9k`3=9kwOb`JJCWr|zS_UG_0HdMCfZYbN4=Rmf
zGe~R;)B+gIz@PvU2I~X4Rf7-AWzgVD05f2;0Ms1{P#UHW#0BXKfa(i?>Vwe}pbmr4
zQ0u{d1NkomWB>yLLkQG=FxmxdI|BoZ2B`%J1w1WD0kI#jgLn)K57;5$0;6>x!VEAP
zq!uI;__QPi<W3Nq7z}e~5Y#?g;c^4&KNt<=GU(@ICYhP&=cMT7WagDt=vG*m>6)47
z73b?2f&DJPzyJ<&cfU}ub3nlWa-Re=-bEN7X#gZ&?D(cSpn_9C>YK7`VEgT=El(cV
zg6x4|umMC8stgPaLRi#-!V8<Y6b|(uyCEqD)i974HglwLn8OespPrka7hhbGSX2@p
z&ybUulne?bGlt@##Jm)SlFZyxhJvEZyplAA_;`>kRB3KvW*&pPkEfG!ypf)fo+$$Z
z0}~iRXhsG`22dD-Fvx|aQkk3}89NY%fq~%xKO}vF!W@=Hz)}JbaZvIC`4KF}z`($P
zBo2xzn79CvI4IA;#3hi#k@KShk~qk2n0k-qHykf!GcYi`&|+j@cyW}8fx)Bs2*=_7
zrdiwy4F6SgxD^=q<sBIQtAh9$AZ;%n{Qv*|ziJY<0z(GKsV^^p`B5M~$cZlxfcZfn
zKFDb=H-PzGAU?>;FBkm(|KISfXXnK@kIqLP%}+iAgm@l5V8HIc;Boxm0yYqJ{0OT9
z!;9(v|Ns9oVFJjF9^I@bxfB>YS`U;k{l5Tq;sj(6>e2Z$7^JYl_6L^&14F6zZU?>u
z29IvmPatiLuPs2lgTI+QnqM<|Sl%dBN#mDy0eSWSqemy}SuO>J{~|}hin>|%gS1%Q
zD0*~^bv2g)10%?~=-9()X=!@=@-3h+VDM<RUBIEhzyPwhL}&*{4CFSB*!>=S2@L-q
z#2%h70VL<q{6=9vNcP36fB*k?9)EH9-~azF-u?Uk{}aE~flvGbyq6dm7(VgG9R#t@
zf!UD<Kk*B?zIbs0N!1##3`EriFdL%k!;3{os@lLZ5LJ`FY>28KpZEn_U+ka2?!d4E
z6lE`T!I}hIKkT0e7n6jF{n)<-F2?%r|9`OOP98=M?Fkb+I&)NR_;jYIJn-n%EoN6>
z@aa{}0a2YfDldFGOH@Aibk?Z+@aT+Dx!}?K#==MQsbl9~&(3ci2mdmAc3ywc{P+KV
zkJh&(GKRN7>C30{mrv(6m(KT&|3&V2G#_U4uzX$m%A-3*g~OveM@7J=J4Hppr#nYQ
z!Kb@KMZ>4NM#aFR`H00~e)$Ga@P0;fA1Lg>>A|C$^#+>)gGcj^|0R;ULDdR0-d}V4
zzu?i$43a4Q2#sM#T6?h`<Tg+)0F{-XB+bC!U7%p4plYaRXq2YF#o(QZ!b?Hsfm{Q^
zpj>d@afe*%>%blR?5joAxj(FGvH!F3<0apu0DDGhW@}K{2g(o&{`~*%!N9=q<nRCg
z1q=)f2mby4KY@XPA>cnW@EI5wK;<&XRK}_x2F3~jMrj^)4p7~u!oUEoiv#}r{|_!f
z_ypYeB)s^!%Q+et?4_)=j8&9C`atzHs7}lI`~UwrkbonfKpT@YFB{WI9(E1}29P-^
z3=9k!pi~Ny5A0!WV~gi=XlCzc>SOL@VT#}b)lDFG^e`|mO!)WzKO|&8ibwHi2#kin
zXb6mk0KG#1)U*O&n7tr22*da=8dU3p*syjf4~WCSz>op$Px3<f70`ao*MI->K|(X2
z{UcagGJzS=?}4=;cR=O2K#Ca{7`&h~H<SkTD?m(95P^h24MPz31++a3t4k_Cf=Klp
zs67l45(g0s3=AKj_QS+)K>47$38VyOA18>zz`y|G|N9T|&km^i|4=?GTs}bgtDy2w
z?KG!g{Z({}ra|Ln8I;}zrH?`BYfu_ppS!cOm4ZfSQfXdEslHEUUSd(cf{C7)p0TcB
zDOk`{6T)X;&?~OYElEsb&?_z}g3uW-R%TvFYEc1$US57ls-B~hr*27NI+T~5SE`p(
znwgWLo0-C(2jXQU7H2T%rBvn>SLQ<Kk|KyqS!z*nW_}(DC%%Y5uP8Mq5u^diD#$5e
z(96urEMd?qNv$Yh&`ZnA%Vf|i%1=owNo3GV&4^DcO3Y1-&qygk@EG)xOY)0~8T3*h
zR912k*z&~O%wz_=^!ySqp$9e_Vop+VF@s)mer|4RUI}`zk<176D?w=omTqC=Bd~E3
zP<sp6P>@<=zA^&?11Jr{^uxv<VB->?{w%V3P&*NsZ-}P91ey;^paz24bI9t^^@G}i
z-~Rv4hbm`)<?|U(0hs$?`5u<PU}pL;Fff4fI8-?UtQ^<@6<7gP2r4H)euwFY<;O7S
zI0IBU1FT$0fEEm}as+A&c+3LE2GMa03=E(=4AT$Gx2vG~p~FJpF))yNm^v7p&A`9_
zDidIQSh=tRsvj2q=<WxZ1;SO(@e-JRSUGe9svqW0^zeg;gL;vmIDzShm1`fM`YS+|
zK*#VvWi@*E^+CsRK<)!6g_YwOAVZPLJ&+s-Lxn+1Fz!Ut-vBL78lVcneF?Bi2mz}%
zVDuC;{jhP#2T=X6bOh1O03H*8u|f1KH2tu8rvj8I7#J8}@dr{1!cb#DOfX&m)ej55
z4rsmA0o5M?R)rv7;Rh1|rB6`0gz1Nk<H5%FKz%cqS`dwH|28!H1vnt}q5#yQB_K^m
z7%B`GIRMoUjc%lIPlys&I74_04Cwlgqv;2YT!7SCKm(4Deo!9}<bRm`piv8ueh;XA
zbo<cxml+tqW6Uu9uzCzO9t-N5g3N=ZPndp~yY8U5A2u%bfFGm_)4wqNF!~{yepox=
z1ynyIHXvq!NSHnt{R&OL26TK(1L{C11=0$}FnwSSs2u_tn}e2NATbN5eha8ZC<T%N
zV|4vrq4pbqWf25SzZOCW%mT#~hykNP!~U?a2Z_UQ64c^<FaanH(+A~(%Trj|hlzpc
z2hak_0~&F%3=9l<P``rYq0$VX_7yh!HRM667#IxD3`e&gq6$07%)rP1t51<tFf%a0
z$LmqWnHga9AF4PDyk0{UXNA{SsN!t!dI(jVodH(Apo()a!0HWDaZUzUeSj*?1#iEi
zigUx;;i%#~46t$;Rh*XrR^FnD^D)56QB-k$23YxsDlPz@$3PVqWPp`tsNzEKatc*k
zm;qM)po)tyz{(X=aZv_Xd4Vb}1|J_s6&GiK<!@AR2?kicMHQE1faOP2anP&+vIuB=
z4>UT@%fQ2mT4I3m7YoD3|EQ`VV{MEKnBf4*uZ#?s;a~yI=a}K(3Z6IMWiUXqHyW%S
z;eJ@Y$%2YIK+73ec>-!DFfs5lEI=~{m-|8O4v_l|pz~!g|4js&FUYU~T93iR=Yz#D
z-M<>@eg|m13sZj(+%CclxAR~HLJTTs_C5fckC`4`;!ytwtR6Ex2r+`@mv|XKvj`xq
zu;NFF5j10nnND;WL2Z(c|IyQl3D_LWd}IX{2bqkD{TLY-1et^xK$8u~GSN8HSK$!v
z1-l2+owLC1M>ZM4UJ15Wg24cq9w4F&4BJ5B42b*=YquQ5VefUYI4=We+680=s9gx6
zpM%}=@xKR}`EQ`=(aWFDQ1$5H`5&qtG@k=97u4<m(JV}$cmc6cu>=z+9x=;9Ij}gY
zS}-p;F((IUDvu$#sHC{0G%ZapnIS&jImACG-p4aIBtD)YKHdd|=jDtfm69Kyo|B)H
zm=h1~PR1veRxl*z=N9ComZYZWnVA|JGJquFGgB%+JYxn(k2Iw;H@6aIkE@>x*c!JW
zM_<=?7!PC(jE%4b+;;`r0-9}%Pb<w!)=MrZV2F<g^@HORi;5B}<5Tle7~)+b{TzKg
zol#|qL9><&@$NqUPL4kD{%&r;t|9Ruj!r(VAb)~qE0c>$<H7xCQ0EsicgYZ+3gR$8
z#37TL4i3n3nb0tTxfg5@*sgfcv?r=s@H{E1P<%Yde3)fOu>ua4l;ZsOjKsW@oK%K*
zPyhJhl9bH6_|oE3$Yd+JNnpD{o&W_110*cKPGN|TPbw~kS^{zwL%dg6d{An7W^qYs
Vk#kOBadB!f12`NRK!J;DFaSnL&kz6r

literal 0
HcmV?d00001

diff --git a/MobileRobot/Machine_Learning/Practice/reinforcement_Learning.cpp b/MobileRobot/Machine_Learning/Practice/reinforcement_Learning.cpp
new file mode 100644
index 00000000..2c86a9f6
--- /dev/null
+++ b/MobileRobot/Machine_Learning/Practice/reinforcement_Learning.cpp
@@ -0,0 +1,188 @@
+// Author:		John McCullock
+// Date:		11-05-05
+// Description:	Q-Learning Example 1.
+
+#include <iostream>
+#include <iomanip>
+#include <ctime>
+#include <cstdlib>
+
+using namespace std;
+
+const int qSize = 6;
+const double gamma = 0.8;
+const int iterations = 10;
+int initialStates[qSize] = {1, 3, 5, 2, 4, 0};
+
+int R[qSize][qSize] =  {{-1, -1, -1, -1, 0, -1},
+			{-1, -1, -1, 0, -1, 100},
+			{-1, -1, -1, 0, -1, -1},
+			{-1, 0, 0, -1, 0, -1},
+			{0, -1, -1, 0, -1, 100},
+			{-1, 0, -1, -1, 0, 100}};
+
+int Q[qSize][qSize];
+int currentState;
+
+void episode(int initialState);
+void chooseAnAction();
+int getRandomAction(int upperBound, int lowerBound);
+void initialize();
+int maximum(int state, bool returnIndexOnly);
+int reward(int action);
+
+int main()
+{
+
+	int newState;
+
+	initialize();
+
+    //Perform learning trials starting at all initial states.
+    for(int j = 0; j <= (iterations - 1); j++)
+    {
+        for(int i = 0; i <= (qSize - 1); i++)
+        {
+            episode(initialStates[i]);
+	} // i
+    } // j
+
+    //Print out Q matrix.
+        for(int i = 0; i <= (qSize - 1); i++)
+        {
+                for(int j = 0; j <= (qSize - 1); j++)
+                {
+                        cout << setw(5) << Q[i][j];
+			if(j < qSize - 1)
+			{
+				cout << ",";
+			}
+		} // j
+        cout << "\n";
+        } // i
+    cout << "\n";
+
+	//Perform tests, starting at all initial states.
+	for(int i = 0; i <= (qSize - 1); i++)
+	{
+                currentState = initialStates[i];
+                newState = 0;
+		do {
+                newState = maximum(currentState, true);
+                cout << currentState << ", ";
+                currentState = newState;
+                } while(currentState < 5);
+                cout << "5" << endl;
+	} // i
+
+    return 0;
+}
+
+void episode(int initialState)
+{
+
+    currentState = initialState;
+
+    //Travel from state to state until goal state is reached.
+	do {
+                chooseAnAction();
+	   } while(currentState == 5);
+
+    //When currentState = 5, run through the set once more to
+    //for convergence.
+    for(int i = 0; i <= (qSize - 1); i++)
+    {
+        chooseAnAction();
+    } // i
+}
+
+void chooseAnAction()
+{
+
+	int possibleAction;
+
+    //Randomly choose a possible action connected to the current state.
+    possibleAction = getRandomAction(qSize, 0);
+
+	if(R[currentState][possibleAction] >= 0)
+	{
+                Q[currentState][possibleAction] = reward(possibleAction);
+                currentState = possibleAction;
+	}
+}
+
+int getRandomAction(int upperBound, int lowerBound){
+
+	int action;
+	bool choiceIsValid = false;
+	int range = (upperBound - lowerBound) + 1;
+
+    //Randomly choose a possible action connected to the current state.
+    do {
+        //Get a random value between 0 and 6.
+        action = lowerBound + int(range * rand() / (RAND_MAX + 1.0));
+		if(R[currentState][action] > -1)
+		{
+                        choiceIsValid = true;
+		}
+    } while(choiceIsValid == false);
+
+    return action;
+}
+
+void initialize(){
+
+	srand((unsigned)time(0));
+
+    for(int i = 0; i <= (qSize - 1); i++){
+        for(int j = 0; j <= (qSize - 1); j++){
+            Q[i][j] = 0;
+		} // j
+	} // i
+}
+
+int maximum(int state, bool returnIndexOnly)
+{
+// if returnIndexOnly = true, a Q matrix index is returned.
+// if returnIndexOnly = false, a Q matrix element is returned.
+
+	int winner;
+	bool foundNewWinner;
+	bool done = false;
+        winner = 0;
+    
+        do {
+                foundNewWinner = false;
+                for(int i = 0; i <= (qSize - 1); i++)
+                {
+                        if((i < winner) || (i > winner))
+                        {     //Avoid self-comparison.
+		                if(Q[state][i] > Q[state][winner])
+		                {
+                                        winner = i;
+                                        foundNewWinner = true;
+			        }
+		        }
+	        } // i
+
+	        if(foundNewWinner == false)
+	        {
+                        done = true;
+	        }
+
+        } while(done = false);
+
+	if(returnIndexOnly == true)
+	{
+		return winner;
+	}else
+	{
+		return Q[state][winner];
+	}
+}
+
+int reward(int action){
+				
+    return static_cast<int>(R[currentState][action] + (gamma * maximum(action, false)));
+}
+
diff --git a/MobileRobot/Machine_Learning/Practice/reinforcement_Learning.cpp~ b/MobileRobot/Machine_Learning/Practice/reinforcement_Learning.cpp~
new file mode 100644
index 00000000..004927f3
--- /dev/null
+++ b/MobileRobot/Machine_Learning/Practice/reinforcement_Learning.cpp~
@@ -0,0 +1,189 @@
+// Author:		John McCullock
+// Date:		11-05-05
+// Description:	Q-Learning Example 1.
+
+#include <iostream>
+#include <iomanip>
+#include <ctime>
+#include <cstdlib>
+
+using namespace std;
+
+const int qSize = 6;
+const double gamma = 0.8;
+const int iterations = 10;
+int initialStates[qSize] = {1, 3, 5, 2, 4, 0};
+
+int R[qSize][qSize] =  {{-1, -1, -1, -1, 0, -1},
+			{-1, -1, -1, 0, -1, 100},
+			{-1, -1, -1, 0, -1, -1},
+			{-1, 0, 0, -1, 0, -1},
+			{0, -1, -1, 0, -1, 100},
+			{-1, 0, -1, -1, 0, 100}};
+
+int Q[qSize][qSize];
+int currentState;
+
+void episode(int initialState);
+void chooseAnAction();
+int getRandomAction(int upperBound, int lowerBound);
+void initialize();
+int maximum(int state, bool returnIndexOnly);
+int reward(int action);
+
+int main()
+{
+
+	int newState;
+
+	initialize();
+
+    //Perform learning trials starting at all initial states.
+    for(int j = 0; j <= (iterations - 1); j++)
+    {
+        for(int i = 0; i <= (qSize - 1); i++)
+        {
+            episode(initialStates[i]);
+	} // i
+    } // j
+
+    //Print out Q matrix.
+        for(int i = 0; i <= (qSize - 1); i++)
+        {
+                for(int j = 0; j <= (qSize - 1); j++)
+                {
+                        cout << setw(5) << Q[i][j];
+			if(j < qSize - 1)
+			{
+				cout << ",";
+			}
+		} // j
+        cout << "\n";
+        } // i
+    cout << "\n";
+
+	//Perform tests, starting at all initial states.
+	for(int i = 0; i <= (qSize - 1); i++)
+	{
+                currentState = initialStates[i];
+                newState = 0;
+		do {
+                newState = maximum(currentState, true);
+                cout << currentState << ", ";
+                currentState = newState;
+                } while(currentState < 5);
+                cout << "5" << endl;
+	} // i
+
+    return 0;
+}
+
+void episode(int initialState)
+{
+
+    currentState = initialState;
+
+    //Travel from state to state until goal state is reached.
+	do {
+                chooseAnAction();
+	   } while(currentState == 5);
+
+    //When currentState = 5, run through the set once more to
+    //for convergence.
+    for(int i = 0; i <= (qSize - 1); i++)
+    {
+        chooseAnAction();
+    } // i
+}
+
+void chooseAnAction()
+{
+
+	int possibleAction;
+
+    //Randomly choose a possible action connected to the current state.
+    possibleAction = getRandomAction(qSize, 0);
+
+	if(R[currentState][possibleAction] >= 0)
+	{
+                Q[currentState][possibleAction] = reward(possibleAction);
+                currentState = possibleAction;
+	}
+}
+
+int getRandomAction(int upperBound, int lowerBound){
+
+	int action;
+	bool choiceIsValid = false;
+	int range = (upperBound - lowerBound) + 1;
+
+    //Randomly choose a possible action connected to the current state.
+    do {
+        //Get a random value between 0 and 6.
+        action = lowerBound + int(range * rand() / (RAND_MAX + 1.0));
+		if(R[currentState][action] > -1)
+		{
+                        choiceIsValid = true;
+		}
+    } while(choiceIsValid == false);
+
+    return action;
+}
+
+void initialize(){
+
+	srand((unsigned)time(0));
+
+    for(int i = 0; i <= (qSize - 1); i++){
+        for(int j = 0; j <= (qSize - 1); j++){
+            Q[i][j] = 0;
+		} // j
+	} // i
+}
+
+int maximum(int state, bool returnIndexOnly)
+{
+// if returnIndexOnly = true, a Q matrix index is returned.
+// if returnIndexOnly = false, a Q matrix element is returned.
+
+	int winner;
+	bool foundNewWinner;
+	bool done = false;
+
+        winner = 0;
+    
+        do {
+                foundNewWinner = false;
+                for(int i = 0; i <= (qSize - 1); i++)
+                {
+                        if((i < winner) || (i > winner))
+                        {     //Avoid self-comparison.
+		                if(Q[state][i] > Q[state][winner])
+		                {
+                                        winner = i;
+                                        foundNewWinner = true;
+			        }
+		        }
+	        } // i
+
+	        if(foundNewWinner == false)
+	        {
+                        done = true;
+	        }
+
+        } while(done = false);
+
+	if(returnIndexOnly == true)
+	{
+		return winner;
+	}else
+	{
+		return Q[state][winner];
+	}
+}
+
+int reward(int action){
+				
+    return static_cast<int>(R[currentState][action] + (gamma * maximum(action, false)));
+}
+
diff --git a/MobileRobot/Machine_Learning/Practice/timer b/MobileRobot/Machine_Learning/Practice/timer
new file mode 100755
index 0000000000000000000000000000000000000000..8d748460054d39cbb7a5c5d97dd16e0e895ba64f
GIT binary patch
literal 9241
zcmb<-^>JfjWMqH=CI&kO5HEq<0W1U|85kxAg1KPAfx&`-lfi*Oo<W9zje&uIm4Sf)
zrp^J%g3&)fhA}WOz-SJz2@DL(3=9k`3=9kwOb`JJCWr|zS_UG_0HdMCfZYbN4=Rmf
zGf3<UH;80lfYA&L3Sa?{evn%~@PfGvA9xeM3>cjOb;k^l!@x2SeIQ|wJ_SCoAcF!Q
zL?4Vk05X_?fdNLt{0H(I2#0_SU|?Vff%*?dyFiR%fYBheAfbS#B`F~G3N8?jfnfy~
zBwS!LOg)SSsRgMFd|HwMawmvQ42HQg2x=d$a1nsIA4WsH%b=f=nPg_7pOd1SlbKgq
zp<7{LrfX)RSDdeB1ddk$1_p3kx%-8JodYrk<UR>#yo)eE(f~;Q^5T1E^(TAJVZ7AY
zmtpCAOkLw$9w-_>K>)H3q>q6?fPs+_WGVv#NE{T+ihsXwYPeqY5uL%~47LOd0kT~P
zi#W(6NDQM&$}_-H4668J1_lOf=14MP7iYyG9*@HuAsp&2;ZQ#vDjopMr!dzVL&XKq
z#Jw397z7!l7!m{^=~n<0upmu2nMuVZDaqQ}ptv(*h>uUt&CiQ3E=epZiH~Q9_bQ7I
zN=?r!E=etN&PgmTPAz7LkMavHvB=CXj!#M~PBrn&%Pes*bS-0uj|wg^NzN}V0jtQ*
zDR#{Si{%u12A3EbCTAoT#g`N%W|kCtCcC-@1(z5aCnXkVCdcO&mlUNY=6WWF#JdKY
z#s_#OgY}uD=B4C#CKIPOBtFC--Zj`P9&UMnTTpO*aJ(y60L{J5Aou2ECMCl>om`O^
zpIDMwky*k}TAY)bTEGw=50ZxlL~dec9)r7&r;~HMk)DyBDTEyn5pS$#0t!S>>SkbI
zVqj)q29rz-5D3Zxj10dSK&cm!u1cjcIYB938yfzM3?K-R2bol&If(;gk~viV0Y4<Z
zKzRUS3rH0MJRU)1ArC|dLPF!09h(0jf(#5CNaC<E10*MaBn~nQBnH9~Na7&7L1G}R
zfFusegCH>w)<6;m*$ol{VFM&_SeXM7w?Go-1PMT~1Clr>pF@QiJeuEdytvK4!0;lA
zk%8ewDKi6uNAnSm!~adSI29QFtLktnF!0MeF#J~q@iRc$UOxE$|NnngB~Ar~3{Ysk
zya47)f%u@He0c!O7XtA?#m&nNU_KX!4+^4}3&4CP5FZpYFDHQczc>^aGC)D{vH{Hh
z1mc6d{IUSde+A-$y!tZX|NsAnZ#_FN#(8u;@@RhYAt1!__yGkj2L_Mh2PbfXsN*gi
z4h%2S|NsC0Wx@oIA3eHRJvkH@JX#NwF#W#(_TmI&5bDwSG#I3?!FB_u0s}*-_ih2c
z1O|_8)>WJe3?7ZIIY7LFznMLnUo(1G-Y8Z{<Ck{<1=ayZk51NBP+*B11uN=iEeC0_
zyixS%7;7@80s|w+y6D)$X=!PC{PHcJFl6v(w*A1Wz`y{qw?t?MNDSmQj@bPYd<hKy
zAH*J>Faadz(fmf>xa$Xy{BhSW5b6hn`U9d~Y+ztuXs-RhRm$D$`h^jyw7K?+4M^Yz
zBUDp!?GLcPAI9UZVC~(me>#ut2bEec6#xDIzpnuldY#8#r2PB;zdQ6t=QWSc<1g5e
zWxT*LUm!AX|Nj5KuLY{d04(zXB6Are11dd12AKW(|G(Mw1LOV$ZHx>rB>w&X|I+*a
z|NkJ5?d#xjV1PK_!{7h^V-JG_LB8#*{nJ_cqq+7EW2shS?T`Qe85l|x_csV8FuZ90
z`~UwAPzk&%Kp=r(A4m*j%Y?uG|DOcw-v?5?ACxFxto;u%6duJMojEEud^%H99(eTX
zhO;Oz`1GoJfT+$Kl@~spB`P0$I%`yZcyz|7T<~aqW8tIu)UorgXXiJMgMXPlJFmaE
z{OA9FkJh&(GKRN7`N602mrv(6m(KT&|3&V2G#_U4uzX$m%A-3*g~OveM@7J=J4Hpp
zr#nYQ!Kb@KMZ>4NM#aFR`H00~e)$GaynQ|lcN&-h@;x~Hd33YxU{+x8X#VlPL~{2J
z-UJ3{I(yCW|AI$1Gf1NJBQ!-p()J7czyJS(L=J<>MNk%GU{J79U{KIgVBo?J7#O9Q
ztwC)bP)3vZ^Z!4nRGjno|NjmK28Pmq|NpOGU|{I__y7M11_p*x|Nj4f0V*o~{r}Iw
z$iPtX|NnmjP+&7KFo4<}Ab&Ad1u-yI2rx?XuyagcWMEKXU;wu@H~jwpe*s7VpMV>m
zgcm<|IY$G7y_B_<v5FE%7}OpGxr68L|Nl2Z0*-tFZA{L*paKEJ1<9u{Ffbhb`~SZv
zNB|}uz{3tI6hQKzoVDxk|Nr2Y9ZbF&A-{xyf#DVo{UCQ9VPIgm@%R7#PLTP5J*;hP
z@q7-=?EOrA%)Km35quzfK>mCKY90Uk{~r?4AjPA2q=!HeCpgtHfT}7GhJ^=+4Z<)!
zjD}_#a9bWE2IGU;G$1~#?XiLlVlK3M#gGB*w@88%F)+aUThdTID2;-cU;q8j2hlU2
z{UcCC3*s}dK+J>Xs}oRpSew8Z>JV5P1k|qp=>z9E5Ce)q6+4Le0@{v(<%Js{L8S5k
zRIY%8#6biD1H%PYi2X3}1yDZB-3Cx~u(k`#{(t`={z-tU{}1KE?EL`cyFlfk#?YLC
z^;gj?x(1E6XHfbZlxBnWAH<-v8k9DJ(r!>X3`(P$>+bAqrJxa-RGL>(s_&DTmspgq
zV4`QHXRK>j3Klfggzy;{^olEUOA?b9^omQ0Aan+dm6=zPT2#QGmzQ6Xs^{qBsaukm
z4&|limFgvxX6B^mW~MOcfp{5-#Tg8GDV2G}mAMeQqzEEYmReMtnV*Nki7#T%D@x5t
z1ZjY>3UW#q^fL1@OBnP@QY%Ur^wKi(G8y!W@>3E^5*hSTGvd>V5_41IGg68WJcy3?
z#G<0aN(Q~;lKi4#2E7yrm6cosc1U7wW-^0bdVUF*&;#28F*B*Sm_aW&KQ}iuuLL~^
z$>M|Z7bt7M(i?0%1vbtCs(+9T2B}5nD>E=KfYLlnKWw}LHjaVbX9hKtk@Xv*=`Vri
z+Y+dOp!y40J-U8S-TLkS|9q%&23WZ;11bQ^cd&8-mLFke`Y|vtfXW1@at2traRMq}
z0rfDbTmdm*`eFGu3_30WRn7n_hrojx3=9mgatCG~$P5@8M8`2OfXY_5`(gRq1?pfa
zkRs@q8Hfq92S#TzFff415Evg;jwC?!LrNtE@VFaD4b1%@vp~2CI=%vu1My+y)&i)0
zT;T`vKd5a2iW8WASUGqBsvlNvg1DfvA7(#He;>5p2?}43{jhpr1ylg04kQP{AiF>`
z41?M<pfH6hXE*>Yj}AZuz-=>_Vrah_#08D#faob`_QS?86F`ZYfq{VoRv~~CBk6~7
z8D^pBht*>l(255-3<y&WOBQfG$bBI9!@{ouS`Su0^~1s+rU4dyFg~aqi>x0u?gtwW
z1mzEyS`dvMe%qk#huQxDYX1kQ{VX8EK^mbL-Tng%3=AMY!h#7lJ_;KjL|2c_KLfQN
zrXSQX09o||+Ax5{6G#k%(e+<O(+}#Yg4KZ>g(bew_1{9%59+0Y^h-eXqpL^f-)Dg2
zEtvaZ^(Jim7dFldO^ysO{V;!l`W>JkhUtfm-#p+4xq<<%9>PQLVdlI=b3d%z@&c+K
z)bD}m1<^2lF!~#seh=t)iU-tzPzt0KjA8n~90mpkP-h(^26I2C=M9pJfNInLc@881
z#pv|`BO|D9$H0J|&SCnsp!#4G%s3bgnr4964O0W6o1pe{!4!f7p?-xj!4yaumiEB{
zP$B@@5Ve4o19A)u3`U^x04fhFmq6_~Z1#6RJvtFh5@a9L2!a$d10w^hK1Nmy8Z(5|
zv#8?C46ynWRh$K0FQSUG!s|O!aW+Ifh9tqvz|H`xpOAzZm>D=2VD%2FI41+FK0y`d
zg147Z#km<^<v*%854>DQ73XDumDi}^eDMARsyIIbtb9ck7l6;3po$AJz{*2ZaUppB
z1XWy^0akvYii<G7$|Y2BQ3hCfgDNftpEp7k7iWN#52)f246uBUDlW+Y%g?Cdh;e;X
zaZqazSp<?dnHYE(deGYKpnS)|@bN#YYRH%fBLikQfbuaT17<i_fb%3~IJh!^Mzwhv
zHlW!X4OTD3P#_2?*<kr83o2d#6$h0`Ah%UO%~9ZhwS+*Tpt%jud?Kd%LG2lk`x&6~
zb`aGJ3=_fT3o?A*Mm1+XSRB*+tD*K9K<iVOIS0Y*Elhh)gUyj*0FC{D)WZA)s-HmN
zZv^d!!)ynYYanr0{~Xqjl>?WjnCZa;Y@iTE`gDbg!|H92-5?wRHU~2uCNhHNB{0)f
z4vzV#X0SLf185W*WG<|%oeUL6&%d+4=7UtC;*~hW_kzvA%<qT6;;3eTcrS2>vom3z
zms5m_gGOgT`eE1&Dh}hr=x!!Z+Cr2wu=dt0CeWM>X1FcKVa{%_If#4;n}53m5@(WR
z01Xtw>;%#G!Qvtepuv3*ABNw6&F5tRO&@^7K<#A^{RixxkN+c}^*u-oghB0H0fvwN
zJD~XpBnHB~%-F+68!8SO83(C@;bO2jFM}*J-NMpmJu@gAFzs!Fst3()fXuiFA{ZDL
zy20XD>V@RQoE)UNM~390lH!umv^2eBhWL2r5dWZfAJ5>B_;`l+co!6&mot)7N`8EL
zPJU8iPCU2^9-mlR!H}GvTac4llA5AtW@>E60FsE$OsN3zj2R$3`jpb#+)9`|u6{0H
zYuthyeO=>WJdiaoHo_Kge;;g1NoH<pkzR5^0eJ4x$09yHGao#b5Fce?7!R69EXhoc
z2Z>}R=44hSmSpDV#V6*a#HXYdmlTyIgZT!TnGEsnKK@RQKJoFzrAhIg@!+XPn3%tt
zTd-?Le2Alyk1Hr7K*m9a9^zB;QW)aF=0gR*Q@9NAE|GqYzMjqupz#KfD~gb&XB`~C
zbDGe3Oz4bfa&c)qc;Ex38Yxyl;eu^O7@{6?UKljbiq#UZ<G_A{N<-&W<Kq!Cv1l@w
zrh{gkp^9K$hzc$tW4am55KsU3;*yliy!g`MR8XKI1s!B^7^)uTX4t$pR2b%#_>|)O
z_>9E7l$=ypqJ<?O@YFIWpg|GH5FejZTnr0sa6Cr&1%u|#VN=*>27~M)Yi=Cf9P_-?
V@|?`P)G~(nRFEebAc+m^C;&kMEUf?l

literal 0
HcmV?d00001

diff --git a/MobileRobot/Machine_Learning/Practice/timer.cpp b/MobileRobot/Machine_Learning/Practice/timer.cpp
new file mode 100644
index 00000000..63042889
--- /dev/null
+++ b/MobileRobot/Machine_Learning/Practice/timer.cpp
@@ -0,0 +1,49 @@
+#include <iostream>
+#include <unistd.h>
+
+using namespace std;
+void newline(); 
+
+int main() {
+    
+int msec = 0;
+int sec = 0;
+int min = 0;
+int hr = 0;
+
+
+//cout << "Press any key to start:";
+//char start = _gtech();
+
+for (;;)
+{
+        newline();
+                if(msec == 10)
+                {
+                        ++sec;
+                        msec = 0;
+                }
+                if(sec == 60)
+                {
+                        ++min;
+                        sec = 0; 
+                }
+                if(min == 60)
+                {
+                        ++hr;
+                        min = 0;
+                }
+        cout << hr << " : " << min << " : " << sec << " . " << msec << endl;
+        ++msec;
+        usleep(100000); 
+
+}
+
+    return 0;
+}
+
+void newline()
+{
+        cout << "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n";
+}
+
diff --git a/MobileRobot/Machine_Learning/Practice/timer.cpp~ b/MobileRobot/Machine_Learning/Practice/timer.cpp~
new file mode 100644
index 00000000..63042889
--- /dev/null
+++ b/MobileRobot/Machine_Learning/Practice/timer.cpp~
@@ -0,0 +1,49 @@
+#include <iostream>
+#include <unistd.h>
+
+using namespace std;
+void newline(); 
+
+int main() {
+    
+int msec = 0;
+int sec = 0;
+int min = 0;
+int hr = 0;
+
+
+//cout << "Press any key to start:";
+//char start = _gtech();
+
+for (;;)
+{
+        newline();
+                if(msec == 10)
+                {
+                        ++sec;
+                        msec = 0;
+                }
+                if(sec == 60)
+                {
+                        ++min;
+                        sec = 0; 
+                }
+                if(min == 60)
+                {
+                        ++hr;
+                        min = 0;
+                }
+        cout << hr << " : " << min << " : " << sec << " . " << msec << endl;
+        ++msec;
+        usleep(100000); 
+
+}
+
+    return 0;
+}
+
+void newline()
+{
+        cout << "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n";
+}
+
-- 
GitLab