diff --git a/MobileRobot/AugReaMarker/CamMark/camtomar/include/VisionControl.h b/MobileRobot/AugReaMarker/CamMark/camtomar/include/VisionControl.h
index cc66ff08d4a993d988a747ae93ea07d3a06d07c5..61091070319204d56697eabfcefea319d405babe 100644
--- a/MobileRobot/AugReaMarker/CamMark/camtomar/include/VisionControl.h
+++ b/MobileRobot/AugReaMarker/CamMark/camtomar/include/VisionControl.h
@@ -118,12 +118,14 @@ public:
   void createTrackbars();
   
   void myhandler(int value);
-  
-        static double Pos_Px,Pos_Ix,Pos_Dx;
-	static double Pos_Py,Pos_Iy,Pos_Dy;
+  void RandomPose();
+
+         double Pos_Px,Pos_Ix,Pos_Dx;
+	 double Pos_Py,Pos_Iy,Pos_Dy;
+
+         double S_Ang_P,S_Ang_I,S_Ang_D;
+         double L_Ang_P,L_Ang_I,L_Ang_D;
 
-        static double S_Ang_P,S_Ang_I,S_Ang_D;
-        static double L_Ang_P,L_Ang_I,L_Ang_D;
 	static double safety_margin_X;
         
         // ---- CONTROLL PARAMETERS ------ //
@@ -156,6 +158,15 @@ public:
         // ---- CONTROLL PARAMETERS ------ //
         
         
+	// ---- TIMER PARAMETERS ------ //
+
+	static int msec,sec,min,hr;
+
+
+	// ---- TIMER PARAMETERS ------ //
+
+
+
         static double zeroMin,zeroMax;
 	static double Py_eps,Pz_eps,A_eps;  	
 
diff --git a/MobileRobot/AugReaMarker/CamMark/camtomar/include/VisionControl.h~ b/MobileRobot/AugReaMarker/CamMark/camtomar/include/VisionControl.h~
index 42d85258a9e57dc334a2749e0fb21d0babb4e1c4..cc66ff08d4a993d988a747ae93ea07d3a06d07c5 100644
--- a/MobileRobot/AugReaMarker/CamMark/camtomar/include/VisionControl.h~
+++ b/MobileRobot/AugReaMarker/CamMark/camtomar/include/VisionControl.h~
@@ -122,11 +122,9 @@ public:
         static double Pos_Px,Pos_Ix,Pos_Dx;
 	static double Pos_Py,Pos_Iy,Pos_Dy;
 
-	static double S_yPos_P,S_yPos_I,S_yPos_D;
-	static double L_yPos_P,L_yPos_I,L_yPos_D;
-
         static double S_Ang_P,S_Ang_I,S_Ang_D;
         static double L_Ang_P,L_Ang_I,L_Ang_D;
+	static double safety_margin_X;
         
         // ---- CONTROLL PARAMETERS ------ //
         static double prev_errorX;
diff --git a/MobileRobot/AugReaMarker/CamMark/camtomar/src/VisionControl.cpp b/MobileRobot/AugReaMarker/CamMark/camtomar/src/VisionControl.cpp
index bda465fbd9e44cb96f88861590cb3edaa7dd6b31..4d3213f59baf735993e86baff0420bd40f4f465c 100644
--- a/MobileRobot/AugReaMarker/CamMark/camtomar/src/VisionControl.cpp
+++ b/MobileRobot/AugReaMarker/CamMark/camtomar/src/VisionControl.cpp
@@ -4,6 +4,10 @@
 #include <array>
 #include <math.h>
 #include <unistd.h>
+#include <cstdlib>
+#include <stdio.h>      /* printf, scanf, puts, NULL */
+#include <stdlib.h>     /* srand, rand */
+#include <time.h>       /* time */
 
 #include <aruco/aruco.h>
 #include <aruco/cvdrawingutils.h>
@@ -41,21 +45,13 @@
 using namespace cv;
 using namespace aruco;
 using namespace std;
-double ImageConverter::Pos_Px = .08;
-double ImageConverter::Pos_Ix = 0.0025;
-double ImageConverter::Pos_Dx = 0;
 
-double ImageConverter::Pos_Py = 5 * ImageConverter::Pos_Px;
-double ImageConverter::Pos_Iy = 5 * ImageConverter::Pos_Ix;
-double ImageConverter::Pos_Dy = 5 * ImageConverter::Pos_Dx;
 
-double ImageConverter::S_Ang_P = .2 * ImageConverter::Pos_Px;
-double ImageConverter::S_Ang_I = .2 * ImageConverter::Pos_Ix;
-double ImageConverter::S_Ang_D = .2 * ImageConverter::Pos_Dx;
 
-double ImageConverter::L_Ang_P = .8 * ImageConverter::Pos_Px;
-double ImageConverter::L_Ang_I = .8 * ImageConverter::Pos_Ix;
-double ImageConverter::L_Ang_D = .8 * ImageConverter::Pos_Dx;
+/*double ImageConverter::Pos_Px = .1;
+double ImageConverter::Pos_Ix = 0.0028;
+double ImageConverter::Pos_Dx = 0;*/
+
 
 float ImageConverter::TheMarkerSize = .1; // Default marker size
 
@@ -105,6 +101,18 @@ double ImageConverter::d_termYAW = 0;
 double ImageConverter::control_signalYAW;
 // ---- CONTROLL PARAMETERS ------ //
 
+
+// ---- TIMER PARAMETERS ------ //
+
+int ImageConverter::msec = 0;
+int ImageConverter::sec = 0;
+int ImageConverter::min = 0;
+int ImageConverter::hr = 0;
+
+
+// ---- TIMER PARAMETERS ------ //
+
+
 double ImageConverter::zeroMax = .0000000000000000001;
 double ImageConverter::zeroMin = -.0000000000000000001;
 
@@ -121,12 +129,32 @@ double ImageConverter::safety_margin_X = .15; // 10 cm
     // subscribe to input video feed and publish output video feed
     //image_sub_ = it_.subscribe("/ar_follow/image", 1, &ImageConverter::imageCb, this);
     
+	/* initialize random seed: */
+  	srand (time(NULL));
+
+
+Pos_Px = ((double) rand() / (RAND_MAX)) * .12;
+Pos_Ix = ((double) rand() / (RAND_MAX)) * .01;
+Pos_Dx = ((double) rand() / (RAND_MAX)) * .001;
+
+Pos_Py = 5 * Pos_Px;
+Pos_Iy = 5 * Pos_Ix;
+Pos_Dy = 5 * Pos_Dx;
+
+S_Ang_P = .2 * Pos_Px;
+S_Ang_I = .2 * Pos_Ix;
+S_Ang_D = .2 * Pos_Dx;
+
+L_Ang_P = .8 * Pos_Px;
+L_Ang_I = .8 * Pos_Ix;
+L_Ang_D = .8 * Pos_Dx;
+
      //marSub = nh_.subscribe("/Marker_pose",1,&ImageConverter::marCB,this);
      
     // Publish pose message and buffer up to 100 messages
     MarPose_pub = nh_.advertise<geometry_msgs::PoseStamped>("/marker_pose", 100);
     
-    
+    //Move2RandPos = false;
     keepMoving = true;
     commandPub = nh_.advertise<geometry_msgs::Twist>("/base_controller/command",1);
     Sub = nh_.subscribe("/marker_pose",1,&ImageConverter::camCB,this);
@@ -137,11 +165,11 @@ double ImageConverter::safety_margin_X = .15; // 10 cm
    //nh_.shutdown();
    //destroyWindow("Preview");
   }
-
   Mat ImageConverter::getCurrentImage() 
   {
         return img;
   }
+
 void ImageConverter::cvTackBarEvents(int value,void* ptr)
 {
     ImageConverter* ic = (ImageConverter*)(ptr);
@@ -262,6 +290,22 @@ void ImageConverter::ProgStart(int argc,char** argv)
 	int index=0;
 	// Capture until press ESC or until the end of the video
 	while ((key != 'x') && (key!=27) && TheVideoCapturer.grab() && ros::ok() && keepMoving){
+                
+		if(msec == 10)
+                {
+                        ++sec;
+                        msec = 0;
+                }
+                if(sec == 60)
+                {
+                        ++min;
+                        sec = 0; 
+                }
+                if(min == 60)
+                {
+                        ++hr;
+                        min = 0;
+                }
 
                 ros::spinOnce();
 
@@ -286,7 +330,9 @@ void ImageConverter::ProgStart(int argc,char** argv)
 			}else
 			{
 			        found = false;
-				move2docking(-control_signalX, -control_signalY, control_signalYAW);
+				keepMoving = false;
+				ROS_INFO("SORRY, BUT MARKER IS LOST ... \n");					
+				//move2docking(-control_signalX, -control_signalY, control_signalYAW);
 			}
 			
 			if (ros::ok() && found)
@@ -414,6 +460,9 @@ void ImageConverter::ProgStart(int argc,char** argv)
 		if (key == ' '){
 			update_images = !update_images;
 		}
+	
+        ++msec;
+        usleep(100000);
 	}
 }
 
@@ -435,9 +484,10 @@ void ImageConverter::camCB(const geometry_msgs::PoseStamped::ConstPtr& CamFB)
 {
 // Ref. Values
 RefPose[0] = -.0957;
-RefPose[1] = -0.0193642;
-RefPose[2] = 0.307922;
-RefPose[3] = 0.705028;
+
+RefPose[1] = -0.0194805;
+RefPose[2] = 0.308654;
+RefPose[3] = 0.702366;
 
 camPose[0] = CamFB->pose.position.x;
 camPose[1] = CamFB->pose.position.y;
@@ -455,8 +505,17 @@ camPose[3] = CamFB->pose.orientation.x;
         
         // correspondingly ... 
         // roll in Marker coordinate => yaw in Robot coordinate
+        ROS_INFO_STREAM(" --------------------- TIMER ------------------ \n");
+	ROS_INFO_STREAM("Docking duration : "<< hr << " : " << min << " : " << sec << " . " << msec << "\n");
+
+
+	ROS_INFO_STREAM(" --------------------- GAINS ------------------ \n");
+	ROS_INFO_STREAM(" Kp = " << Pos_Px << " . \n");
+	ROS_INFO_STREAM(" Ki = " << Pos_Ix << " . \n");
+	ROS_INFO_STREAM(" Kd = " << Pos_Dx << " . \n");
         
-        ROS_INFO_STREAM(" Zmar = " << camPose[2] << " m. \n");
+	ROS_INFO_STREAM(" --------------------- Pose estimation ------------------ \n");
+	ROS_INFO_STREAM(" Zmar = " << camPose[2] << " m. \n");
         ROS_INFO_STREAM(" Zref = " << RefPose[2] << " m. \n");
         
         ROS_INFO_STREAM(" Ymar = " << camPose[1] << " m. \n");
@@ -506,7 +565,7 @@ void ImageConverter::Controller(double RefX, double MarPoseX, double RefY, doubl
         // -----------------X--------------------- //
         
 	if(abs(RefX - MarPoseX) > Pz_eps)
-	{	
+	{			
 		// e(t) = setpoint - actual value;
         	curr_errorX = RefX - MarPoseX;
         
@@ -580,7 +639,7 @@ void ImageConverter::Controller(double RefX, double MarPoseX, double RefY, doubl
         		d_termY = S_yPos_D * diffY;
 		}*/
 		
-		ROS_INFO_STREAM("prop_gainY = " << p_termY << ", integ_gainY = " << i_termY << " . \n");	      
+		//ROS_INFO_STREAM("prop_gainY = " << p_termY << ", integ_gainY = " << i_termY << " . \n");	      
         	// control signal
         	control_signalY = p_termY + i_termY + d_termY;
         	
@@ -709,3 +768,11 @@ void ImageConverter::move2docking(double VelX_est, double VelY_est, double omega
 	ROS_INFO_STREAM(" Current ESTIMATED speed of robot: \n" << msg << ".\n");
 }
 // ---- Controller part -------- END ------
+
+void ImageConverter::RandomPose()
+{
+
+
+
+}
+
diff --git a/MobileRobot/AugReaMarker/CamMark/camtomar/src/VisionControl.cpp~ b/MobileRobot/AugReaMarker/CamMark/camtomar/src/VisionControl.cpp~
index 42ce8d4097e43359ee289a14ea731ef3c3b53add..0ef07a00be5836f2bb5a443c6b7359ee705c228f 100644
--- a/MobileRobot/AugReaMarker/CamMark/camtomar/src/VisionControl.cpp~
+++ b/MobileRobot/AugReaMarker/CamMark/camtomar/src/VisionControl.cpp~
@@ -4,6 +4,7 @@
 #include <array>
 #include <math.h>
 #include <unistd.h>
+#include <cstdlib>
 
 #include <aruco/aruco.h>
 #include <aruco/cvdrawingutils.h>
@@ -41,9 +42,14 @@
 using namespace cv;
 using namespace aruco;
 using namespace std;
-double ImageConverter::Pos_Px = .05;
-double ImageConverter::Pos_Ix = 0.002;
-double ImageConverter::Pos_Dx = 0;
+
+double ImageConverter::Pos_Px = ((double) rand() / (RAND_MAX)) * .1;
+double ImageConverter::Pos_Ix = ((double) rand() / (RAND_MAX)) * .01;
+double ImageConverter::Pos_Dx = ((double) rand() / (RAND_MAX)) * .001;
+
+/*double ImageConverter::Pos_Px = .08;
+double ImageConverter::Pos_Ix = 0.0025;
+double ImageConverter::Pos_Dx = 0;*/
 
 double ImageConverter::Pos_Py = 5 * ImageConverter::Pos_Px;
 double ImageConverter::Pos_Iy = 5 * ImageConverter::Pos_Ix;
@@ -474,7 +480,7 @@ camPose[3] = CamFB->pose.orientation.x;
             	)
         	{
                 
-                        ROS_INFO("--------/*******//----- Dock is completed ! -----/*********---- \n ");                        
+                        ROS_INFO("----/*****//----- Dock is completed successfully ! -----/******---- \n ");                        
 			keepMoving = false;
                  // to make sure that y & theta are within the threshold...
         	} else if (abs(RefPose[2] - camPose[2]) <= safety_margin_X)
@@ -492,7 +498,7 @@ camPose[3] = CamFB->pose.orientation.x;
 				(abs(RefPose[3] - abs(camPose[3])) <= A_eps)				
 				)
 			{
-				ROS_INFO("Y & THETA HAVE FIXED SUCCESSFULLY, MOVING STRAIGHT AHEAD ... \n");
+				ROS_INFO("y & theta have been fixed successfully, MOVING STRAIGHT AHEAD ... \n");
 				Controller(RefPose[2], camPose[2], RefPose[1], RefPose[1], RefPose[3], RefPose[3],.01);
 			}
 		}else
@@ -625,7 +631,7 @@ void ImageConverter::Controller(double RefX, double MarPoseX, double RefY, doubl
         		p_termYAW = S_Ang_P * curr_errorYAW;
         		i_termYAW = S_Ang_I * int_errorYAW;
         		d_termYAW = S_Ang_D * diffYAW;
-			ROS_INFO_STREAM("prop_gainYAW = " << p_termYAW << ", integ_gainYAW = " << i_termYAW << " . \n");
+			//ROS_INFO_STREAM("prop_gainYAW = " << p_termYAW << ", integ_gainYAW = " << i_termYAW << " . \n");
         	} else if (curr_errorYAW > ((CV_PI/180) * yaw_offset) || curr_errorYAW < ((CV_PI/180) * -yaw_offset)) //  err > +5 or err < -5
 		{
 			ROS_INFO_STREAM(" robot is OUTSIDE Reference boundry of " << yaw_offset << " deg. \n");			
@@ -633,7 +639,7 @@ void ImageConverter::Controller(double RefX, double MarPoseX, double RefY, doubl
         		p_termYAW = L_Ang_P * curr_errorYAW;
         		i_termYAW = L_Ang_I * int_errorYAW;
         		d_termYAW = L_Ang_D * diffYAW;
-			ROS_INFO_STREAM("prop_gainYAW = " << p_termYAW << ", integ_gainYAW = " << i_termYAW << " . \n");
+			//ROS_INFO_STREAM("prop_gainYAW = " << p_termYAW << ", integ_gainYAW = " << i_termYAW << " . \n");
 		}
         	// control signal
         	control_signalYAW = p_termYAW + i_termYAW + d_termYAW;
diff --git a/MobileRobot/Machine_Learning/DeepCL/ScenarioImage.cpp b/MobileRobot/Machine_Learning/DeepCL/ScenarioImage.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f38c5438c1691b1c654f5f07db1491199100d0f9
--- /dev/null
+++ b/MobileRobot/Machine_Learning/DeepCL/ScenarioImage.cpp
@@ -0,0 +1,218 @@
+#include <vector>
+#include <stdexcept>
+
+#include "ScenarioImage.h"
+
+#include "util/stringhelper.h"
+#include "qlearning/array_helper.h"
+
+#include "net/NeuralNet.h"
+
+using namespace std;
+
+#undef STATIC
+#define STATIC
+#undef VIRTUAL
+#define VIRTUAL
+
+ScenarioImage::ScenarioImage( int size, bool appleMoves ) :
+        net(0), // this is simply used by the showQRepresentation method
+        size(size),
+        appleMoves(appleMoves) {
+//    size = 7;
+    posX = 1;
+    posY = 1;
+    appleX = 2;
+    appleY = 2;
+    game = 0;
+    numMoves = 0;
+//    appleMoves = true; // does apple move when reset?
+
+    reset();
+    print();
+}
+void ScenarioImage::setNet( NeuralNet *net ) 
+{
+    this->net = net;
+}
+void ScenarioImage::printQRepresentation() 
+{
+    ScenarioImage *scenario = this;
+    cout << "q directions:" << endl;
+    int size = scenario->getPerceptionSize();
+    float *input = new float[ size * size * 2 ];
+    arrayZero( input, size * size * 2 );
+    input[ scenario->appleY * size + scenario->appleX ] = 1;
+    
+    for( int y = 0; y < size; y++ )
+    {
+        string thisLine = "";
+        for( int x = 0; x < size; x++ )
+        {
+            float highestQ = 0;
+            int bestAction = 0;
+            input[ size * size + y * size + x ] = 1;
+            net->forward( input );
+            input[ size * size + y * size + x ] = 0;
+            float const*output = net->getOutput();
+            
+            for( int action = 0; action < 4; action++ )
+            {
+                float thisQ = output[action];
+                if( action == 0 || thisQ > highestQ ) {
+                    highestQ = thisQ;
+                    bestAction = action;
+                }
+            }
+            
+            if( bestAction == 0 )
+            {
+                thisLine += ">";
+            } else if( bestAction == 1 )
+            {  
+                thisLine += "<";
+            } else if( bestAction == 2 )
+            {
+                thisLine += "V";
+            } else
+            {
+                thisLine += "^";
+            }
+        }
+        cout << thisLine << endl;
+    }
+    delete[]input;
+}
+
+VIRTUAL void ScenarioImage::print() 
+{
+    for( int y = 0; y < size; y++ )
+    {
+        string line = "";
+        for( int x = 0; x < size; x++ )
+        {
+            if( x == posX && y == posY )
+            {
+                line += "X";
+            } else if( x == appleX && y == appleY )
+            {
+                line += "O";
+            } else
+            {
+                line += ".";
+            }
+        }
+        cout << line << endl;
+    }
+}
+
+VIRTUAL ScenarioImage::~ScenarioImage() 
+{
+
+}
+VIRTUAL int ScenarioImage::getNumActions()
+{
+    return 4;
+}
+
+VIRTUAL float ScenarioImage::act( int index )
+{ // returns reward
+    numMoves++;
+    int dx = 0;
+    int dy = 0;
+    
+    switch( index ) {
+        case 0:
+            dx = 1;
+            break;
+        case 1:
+            dx = -1;
+            break;
+        case 2:
+            dy = 1;
+            break;
+        case 3:
+            dy = -1;
+            break;
+    }
+    int newX = posX + dx;
+    int newY = posY + dy;
+    
+    if( newX < 0 || newX >= size || newY < 0 || newY >= size )
+    {
+        return -0.5f;
+    }
+    
+    if( newX == appleX && newY == appleY )
+    {
+        finished = true;
+        posX = newX;
+        posY = newY;
+        return 1;
+    } else 
+    {
+        posX = newX;
+        posY = newY;
+        return -0.1f;
+    }
+}
+
+VIRTUAL bool ScenarioImage::hasFinished()
+{
+    return finished;
+}
+
+VIRTUAL int ScenarioImage::getPerceptionSize()
+{
+    return size;
+}
+
+VIRTUAL int ScenarioImage::getPerceptionPlanes()
+{
+    return 2;
+}
+
+VIRTUAL void ScenarioImage::getPerception( float *perception )
+{
+    for( int i = 0; i < size * size * 2; i++ )
+    {
+        perception[i] = 0;
+    }
+    perception[appleY * size + appleX] = 1;
+    perception[size * size + posY * size + posX] = 1; 
+}
+
+VIRTUAL int ScenarioImage::getWorldSize()
+{
+    return size;
+}
+
+VIRTUAL void ScenarioImage::reset()
+{
+    if( net != 0 )
+    {
+        this->print();
+        this->printQRepresentation();
+        cout << "game: " << game << " moves: " << numMoves << endl;
+    }
+    if( appleMoves )
+    {
+        appleX = myrand() % size;
+        appleY = myrand() % size;
+    } else
+    {
+        appleX = appleY = size / 2;
+    }
+    finished = false;
+    bool sampledOnce = false;
+    
+    while( !sampledOnce || ( posX == appleX && posY == appleY ) )
+    {
+        posX = myrand() % size;
+        posY = myrand() % size;
+        sampledOnce = true;
+    }
+    
+    game++;
+    numMoves = 0;
+}
diff --git a/MobileRobot/Machine_Learning/DeepCL/ScenarioImage.cpp~ b/MobileRobot/Machine_Learning/DeepCL/ScenarioImage.cpp~
new file mode 100644
index 0000000000000000000000000000000000000000..aaae2c464443a41d078d164574d8875a55ce8077
--- /dev/null
+++ b/MobileRobot/Machine_Learning/DeepCL/ScenarioImage.cpp~
@@ -0,0 +1,216 @@
+#include <vector>
+#include <stdexcept>
+
+#include "ScenarioImage.h"
+
+#include "util/stringhelper.h"
+#include "qlearning/array_helper.h"
+
+#include "net/NeuralNet.h"
+
+using namespace std;
+
+#undef STATIC
+#define STATIC
+#undef VIRTUAL
+#define VIRTUAL
+
+ScenarioImage::ScenarioImage( int size, bool appleMoves ) :
+        net(0), // this is simply used by the showQRepresentation method
+        size(size),
+        appleMoves(appleMoves) {
+//    size = 7;
+    posX = 1;
+    posY = 1;
+    appleX = 2;
+    appleY = 2;
+    game = 0;
+    numMoves = 0;
+//    appleMoves = true; // does apple move when reset?
+
+    reset();
+    print();
+}
+void ScenarioImage::setNet( NeuralNet *net ) 
+{
+    this->net = net;
+}
+void ScenarioImage::printQRepresentation() 
+{
+    ScenarioImage *scenario = this;
+    cout << "q directions:" << endl;
+    int size = scenario->getPerceptionSize();
+    float *input = new float[ size * size * 2 ];
+    arrayZero( input, size * size * 2 );
+    input[ scenario->appleY * size + scenario->appleX ] = 1;
+    
+    for( int y = 0; y < size; y++ )
+    {
+        string thisLine = "";
+        for( int x = 0; x < size; x++ )
+        {
+            float highestQ = 0;
+            int bestAction = 0;
+            input[ size * size + y * size + x ] = 1;
+            net->forward( input );
+            input[ size * size + y * size + x ] = 0;
+            float const*output = net->getOutput();
+            
+            for( int action = 0; action < 4; action++ )
+            {
+                float thisQ = output[action];
+                if( action == 0 || thisQ > highestQ ) {
+                    highestQ = thisQ;
+                    bestAction = action;
+                }
+            }
+            
+            if( bestAction == 0 )
+            {
+                thisLine += ">";
+            } else if( bestAction == 1 )
+            {  
+                thisLine += "<";
+            } else if( bestAction == 2 )
+            {
+                thisLine += "V";
+            } else
+            {
+                thisLine += "^";
+            }
+        }
+        cout << thisLine << endl;
+    }
+    delete[]input;
+}
+
+VIRTUAL void ScenarioImage::print() 
+{
+    for( int y = 0; y < size; y++ )
+    {
+        string line = "";
+        for( int x = 0; x < size; x++ )
+        {
+            if( x == posX && y == posY )
+            {
+                line += "X";
+            } else if( x == appleX && y == appleY )
+            {
+                line += "O";
+            } else
+            {
+                line += ".";
+            }
+        }
+        cout << line << endl;
+    }
+}
+
+VIRTUAL ScenarioImage::~ScenarioImage() 
+{
+
+}
+VIRTUAL int ScenarioImage::getNumActions()
+{
+    return 4;
+}
+
+VIRTUAL float ScenarioImage::act( int index )
+{ // returns reward
+    numMoves++;
+    int dx = 0;
+    int dy = 0;
+    
+    switch( index ) {
+        case 0:
+            dx = 1;
+            break;
+        case 1:
+            dx = -1;
+            break;
+        case 2:
+            dy = 1;
+            break;
+        case 3:
+            dy = -1;
+            break;
+    }
+    int newX = posX + dx;
+    int newY = posY + dy;
+    
+    if( newX < 0 || newX >= size || newY < 0 || newY >= size )
+    {
+        return -0.5f;
+    }
+    
+    if( newX == appleX && newY == appleY )
+    {
+        finished = true;
+        posX = newX;
+        posY = newY;
+        return 1;
+    } else 
+    {
+        posX = newX;
+        posY = newY;
+        return -0.1f;
+    }
+}
+
+VIRTUAL bool ScenarioImage::hasFinished()
+{
+    return finished;
+}
+
+VIRTUAL int ScenarioImage::getPerceptionSize()
+{
+    return size;
+}
+
+VIRTUAL int ScenarioImage::getPerceptionPlanes()
+{
+    return 2;
+}
+
+VIRTUAL void ScenarioImage::getPerception( float *perception )
+{
+    for( int i = 0; i < size * size * 2; i++ )
+    {
+        perception[i] = 0;
+    }
+    perception[appleY * size + appleX] = 1;
+    perception[size * size + posY * size + posX] = 1; 
+}
+
+VIRTUAL int ScenarioImage::getWorldSize()
+{
+    return size;
+}
+
+VIRTUAL void ScenarioImage::reset()
+{
+    if( net != 0 )
+    {
+        this->print();
+        this->printQRepresentation();
+        cout << "game: " << game << " moves: " << numMoves << endl;
+    }
+    if( appleMoves )
+    {
+        appleX = myrand() % size;
+        appleY = myrand() % size;
+    } else
+    {
+        appleX = appleY = size / 2;
+    }
+    finished = false;
+    bool sampledOnce = false;
+    while( !sampledOnce || ( posX == appleX && posY == appleY ) )
+    {
+        posX = myrand() % size;
+        posY = myrand() % size;
+        sampledOnce = true;
+    }
+    game++;
+    numMoves = 0;
+}
diff --git a/MobileRobot/Machine_Learning/DeepCL/ScenarioImage.h b/MobileRobot/Machine_Learning/DeepCL/ScenarioImage.h
new file mode 100644
index 0000000000000000000000000000000000000000..a9136e6e6278b3378fc6fda2876cc71e6366b77f
--- /dev/null
+++ b/MobileRobot/Machine_Learning/DeepCL/ScenarioImage.h
@@ -0,0 +1,68 @@
+#include <iostream>
+#include <string>
+#include <algorithm>
+#include <random>
+
+#include "qlearning/Scenario.h"
+
+class NeuralNet;
+
+#define STATIC static
+#define VIRTUAL virtual
+
+// represents a small square world, with one apple,
+// that might be always in the centre (if appleMoves is false)
+// or in a random location (if appleMoves is true)
+// - our hero is at location (posX,posY)
+// - apple is at (appleX,appleY)
+// - size of world is 'size' (world is square)
+// two output planes are provided in the perception:
+// - plane 0 shows where is our hero (0 everywhere, except 1 where hero is)
+// - plane 1 shows where is the apple (0 everywhere, 1 where apple is)
+// rewards given:
+// - hit wall: -0.5
+// - get apple: +1.0
+// - make a move: -0.1
+class ScenarioImage : public Scenario {
+public:
+    NeuralNet *net;
+    const int size;
+    bool appleMoves; // does apple move when reset?
+
+    int posX;
+    int posY;
+    int appleX;
+    int appleY;
+    int game;
+    int numMoves;
+
+    int width;
+    int height;
+    bool finished;
+    std::mt19937 myrand;
+
+//    char**world;
+    
+    // [[[cog
+    // import cog_addheaders
+    // cog_addheaders.add()
+    // ]]]
+    // generated, using cog:
+    ScenarioImage( int size, bool appleMoves );
+    void setNet( NeuralNet *net );
+    void printQRepresentation();
+    VIRTUAL void print();
+    VIRTUAL ~ScenarioImage();
+    VIRTUAL int getNumActions();
+    VIRTUAL float act( int index );  // returns reward
+    VIRTUAL bool hasFinished();
+    VIRTUAL int getPerceptionSize();
+    VIRTUAL int getPerceptionPlanes();
+    VIRTUAL void getPerception( float *perception );
+    VIRTUAL int getWorldSize();
+    VIRTUAL void reset();
+
+    // [[[end]]]
+};
+
+
diff --git a/MobileRobot/Machine_Learning/DeepCL/learnScenarioImage.cpp b/MobileRobot/Machine_Learning/DeepCL/learnScenarioImage.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..5b98260a9dbba6c314c7103874209126c206adf7
--- /dev/null
+++ b/MobileRobot/Machine_Learning/DeepCL/learnScenarioImage.cpp
@@ -0,0 +1,55 @@
+#include <iostream>
+#include <random>
+//#include <thread>
+//#include <chrono>
+#include <vector>
+
+#include "ScenarioImage.h"
+#include "DeepCL.h"
+#include "qlearning/array_helper.h"
+#include "qlearning/QLearner.h"
+
+using namespace std;
+
+int main( int argc, char *argv[] )
+{
+//    ScenarioImage scenario;
+
+    ScenarioImage *scenario = new ScenarioImage( 5, true);
+
+    EasyCL *cl = new EasyCL();
+    NeuralNet *net = new NeuralNet( cl );
+    SGD *sgd = SGD::instance( cl, 0.1f, 0.0f );
+
+    const int size = scenario->getPerceptionSize();
+    const int planes = scenario->getPerceptionPlanes();
+    const int numActions = scenario->getNumActions();
+    
+    net->addLayer( InputLayerMaker::instance()->numPlanes(planes)->imageSize(size) );
+    net->addLayer( ConvolutionalMaker::instance()->filterSize(5)->numFilters(8)->biased()->padZeros() );
+    net->addLayer( ActivationMaker::instance()->relu() );
+    net->addLayer( ConvolutionalMaker::instance()->filterSize(5)->numFilters(8)->biased()->padZeros() );
+    net->addLayer( ActivationMaker::instance()->relu() );
+    net->addLayer( FullyConnectedMaker::instance()->imageSize(1)->numPlanes(100)->biased() );
+    net->addLayer( ActivationMaker::instance()->tanh() );
+    net->addLayer( FullyConnectedMaker::instance()->imageSize(1)->numPlanes(numActions)->biased() );
+    net->addLayer( SquareLossMaker::instance() );
+    net->print();
+
+    scenario->setNet( net ); // used by the printQRepresentation method
+
+    QLearner qLearner( sgd, scenario, net );
+    qLearner.run();
+    
+//    delete[] expectedOutputs;
+//    delete[] lastPerception;
+//    delete[] perception;
+    delete sgd;
+    delete net;
+    delete scenario;
+    delete cl;
+    
+    return 0;
+}
+
+
diff --git a/MobileRobot/Machine_Learning/DeepCL/learnScenarioImage.cpp~ b/MobileRobot/Machine_Learning/DeepCL/learnScenarioImage.cpp~
new file mode 100644
index 0000000000000000000000000000000000000000..137526fe824cf16805e05c82d1a6427fa5bdb8a1
--- /dev/null
+++ b/MobileRobot/Machine_Learning/DeepCL/learnScenarioImage.cpp~
@@ -0,0 +1,54 @@
+#include <iostream>
+#include <random>
+//#include <thread>
+//#include <chrono>
+#include <vector>
+
+#include "ScenarioImage.h"
+#include "DeepCL.h"
+#include "qlearning/array_helper.h"
+#include "qlearning/QLearner.h"
+
+using namespace std;
+
+int main( int argc, char *argv[] )
+{
+//    ScenarioImage scenario;
+
+    ScenarioImage *scenario = new ScenarioImage( 5, true);
+
+    EasyCL *cl = new EasyCL();
+    NeuralNet *net = new NeuralNet( cl );
+    SGD *sgd = SGD::instance( cl, 0.1f, 0.0f );
+
+    const int size = scenario->getPerceptionSize();
+    const int planes = scenario->getPerceptionPlanes();
+    const int numActions = scenario->getNumActions();
+    net->addLayer( InputLayerMaker::instance()->numPlanes(planes)->imageSize(size) );
+    net->addLayer( ConvolutionalMaker::instance()->filterSize(5)->numFilters(8)->biased()->padZeros() );
+    net->addLayer( ActivationMaker::instance()->relu() );
+    net->addLayer( ConvolutionalMaker::instance()->filterSize(5)->numFilters(8)->biased()->padZeros() );
+    net->addLayer( ActivationMaker::instance()->relu() );
+    net->addLayer( FullyConnectedMaker::instance()->imageSize(1)->numPlanes(100)->biased() );
+        net->addLayer( ActivationMaker::instance()->tanh() );
+    net->addLayer( FullyConnectedMaker::instance()->imageSize(1)->numPlanes(numActions)->biased() );
+    net->addLayer( SquareLossMaker::instance() );
+    net->print();
+
+    scenario->setNet( net ); // used by the printQRepresentation method
+
+    QLearner qLearner( sgd, scenario, net );
+    qLearner.run();
+    
+//    delete[] expectedOutputs;
+//    delete[] lastPerception;
+//    delete[] perception;
+    delete sgd;
+    delete net;
+    delete scenario;
+    delete cl;
+    
+    return 0;
+}
+
+
diff --git a/MobileRobot/Machine_Learning/Practice/a.out b/MobileRobot/Machine_Learning/Practice/a.out
new file mode 100755
index 0000000000000000000000000000000000000000..eed4cf56c11fae15fd8de4cb91d53e0026281bd3
Binary files /dev/null and b/MobileRobot/Machine_Learning/Practice/a.out differ
diff --git a/MobileRobot/Machine_Learning/Practice/rand_func.cpp b/MobileRobot/Machine_Learning/Practice/rand_func.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ef870dc1c658c671fdfcd8751d96e8327949dd4e
--- /dev/null
+++ b/MobileRobot/Machine_Learning/Practice/rand_func.cpp
@@ -0,0 +1,38 @@
+/* rand example: guess the number */
+#include <stdio.h>      /* printf, scanf, puts, NULL */
+#include <stdlib.h>     /* srand, rand */
+#include <time.h>       /* time */
+#include <cstdlib>
+
+
+
+int main ()
+{
+  double  Kp, Ki,Kd;
+
+  /* initialize random seed: */
+  srand (time(NULL));
+
+  /* generate secret number between 0 and 1: */
+  Kp = ((double) rand() / (RAND_MAX)) * .12;
+  Ki = ((double) rand() / (RAND_MAX)) * .01;
+  Kd = ((double) rand() / (RAND_MAX)) * .001;
+  
+  printf("Kp : %1.12f \n",Kp);
+  printf("Ki : %1.12f \n",Ki);
+  printf("Kd : %1.12f \n",Kd);
+  /*do {
+    printf ("Guess the number (0 to 1): ");
+    scanf ("%lf",&iGuess);
+    if (iSecret<iGuess) puts ("The secret number is lower");
+    else if (iSecret>iGuess) puts ("The secret number is higher");
+  } while (iSecret!=iGuess);
+
+  puts ("Congratulations!");*/
+  
+  
+  
+  
+  
+  return 0;
+}
diff --git a/MobileRobot/Machine_Learning/Practice/rand_func.cpp~ b/MobileRobot/Machine_Learning/Practice/rand_func.cpp~
new file mode 100644
index 0000000000000000000000000000000000000000..d66d2a1cf5d5adcea12b1114516f5db769c719b2
--- /dev/null
+++ b/MobileRobot/Machine_Learning/Practice/rand_func.cpp~
@@ -0,0 +1,38 @@
+/* rand example: guess the number */
+#include <stdio.h>      /* printf, scanf, puts, NULL */
+#include <stdlib.h>     /* srand, rand */
+#include <time.h>       /* time */
+#include <cstdlib>
+
+
+
+int main ()
+{
+  double  Kp, Ki,Kd;
+
+  /* initialize random seed: */
+  srand (time(NULL));
+
+  /* generate secret number between 0 and 1: */
+  Kp = ((double) rand() / (RAND_MAX)) * .2;
+  Ki = ((double) rand() / (RAND_MAX)) * .01;
+  Kd = ((double) rand() / (RAND_MAX)) * .001;
+  
+  printf("Kp : %1.12f \n",Kp);
+  printf("Ki : %1.12f \n",Ki);
+  printf("Kd : %1.12f \n",Kd);
+  /*do {
+    printf ("Guess the number (0 to 1): ");
+    scanf ("%lf",&iGuess);
+    if (iSecret<iGuess) puts ("The secret number is lower");
+    else if (iSecret>iGuess) puts ("The secret number is higher");
+  } while (iSecret!=iGuess);
+
+  puts ("Congratulations!");*/
+  
+  
+  
+  
+  
+  return 0;
+}
diff --git a/MobileRobot/Machine_Learning/Practice/random b/MobileRobot/Machine_Learning/Practice/random
new file mode 100755
index 0000000000000000000000000000000000000000..e82aa5005931fe2ebc04c8922a3cad522a214cf0
Binary files /dev/null and b/MobileRobot/Machine_Learning/Practice/random differ
diff --git a/MobileRobot/Machine_Learning/Practice/reinforcement_Learning.cpp b/MobileRobot/Machine_Learning/Practice/reinforcement_Learning.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2c86a9f6799af61e31720c569f4dd6e6a9f203c0
--- /dev/null
+++ b/MobileRobot/Machine_Learning/Practice/reinforcement_Learning.cpp
@@ -0,0 +1,188 @@
+// Author:		John McCullock
+// Date:		11-05-05
+// Description:	Q-Learning Example 1.
+
+#include <iostream>
+#include <iomanip>
+#include <ctime>
+#include <cstdlib>
+
+using namespace std;
+
+const int qSize = 6;
+const double gamma = 0.8;
+const int iterations = 10;
+int initialStates[qSize] = {1, 3, 5, 2, 4, 0};
+
+int R[qSize][qSize] =  {{-1, -1, -1, -1, 0, -1},
+			{-1, -1, -1, 0, -1, 100},
+			{-1, -1, -1, 0, -1, -1},
+			{-1, 0, 0, -1, 0, -1},
+			{0, -1, -1, 0, -1, 100},
+			{-1, 0, -1, -1, 0, 100}};
+
+int Q[qSize][qSize];
+int currentState;
+
+void episode(int initialState);
+void chooseAnAction();
+int getRandomAction(int upperBound, int lowerBound);
+void initialize();
+int maximum(int state, bool returnIndexOnly);
+int reward(int action);
+
+int main()
+{
+
+	int newState;
+
+	initialize();
+
+    //Perform learning trials starting at all initial states.
+    for(int j = 0; j <= (iterations - 1); j++)
+    {
+        for(int i = 0; i <= (qSize - 1); i++)
+        {
+            episode(initialStates[i]);
+	} // i
+    } // j
+
+    //Print out Q matrix.
+        for(int i = 0; i <= (qSize - 1); i++)
+        {
+                for(int j = 0; j <= (qSize - 1); j++)
+                {
+                        cout << setw(5) << Q[i][j];
+			if(j < qSize - 1)
+			{
+				cout << ",";
+			}
+		} // j
+        cout << "\n";
+        } // i
+    cout << "\n";
+
+	//Perform tests, starting at all initial states.
+	for(int i = 0; i <= (qSize - 1); i++)
+	{
+                currentState = initialStates[i];
+                newState = 0;
+		do {
+                newState = maximum(currentState, true);
+                cout << currentState << ", ";
+                currentState = newState;
+                } while(currentState < 5);
+                cout << "5" << endl;
+	} // i
+
+    return 0;
+}
+
+void episode(int initialState)
+{
+
+    currentState = initialState;
+
+    //Travel from state to state until goal state is reached.
+	do {
+                chooseAnAction();
+	   } while(currentState == 5);
+
+    //When currentState = 5, run through the set once more to
+    //for convergence.
+    for(int i = 0; i <= (qSize - 1); i++)
+    {
+        chooseAnAction();
+    } // i
+}
+
+void chooseAnAction()
+{
+
+	int possibleAction;
+
+    //Randomly choose a possible action connected to the current state.
+    possibleAction = getRandomAction(qSize, 0);
+
+	if(R[currentState][possibleAction] >= 0)
+	{
+                Q[currentState][possibleAction] = reward(possibleAction);
+                currentState = possibleAction;
+	}
+}
+
+int getRandomAction(int upperBound, int lowerBound){
+
+	int action;
+	bool choiceIsValid = false;
+	int range = (upperBound - lowerBound) + 1;
+
+    //Randomly choose a possible action connected to the current state.
+    do {
+        //Get a random value between 0 and 6.
+        action = lowerBound + int(range * rand() / (RAND_MAX + 1.0));
+		if(R[currentState][action] > -1)
+		{
+                        choiceIsValid = true;
+		}
+    } while(choiceIsValid == false);
+
+    return action;
+}
+
+void initialize(){
+
+	srand((unsigned)time(0));
+
+    for(int i = 0; i <= (qSize - 1); i++){
+        for(int j = 0; j <= (qSize - 1); j++){
+            Q[i][j] = 0;
+		} // j
+	} // i
+}
+
+int maximum(int state, bool returnIndexOnly)
+{
+// if returnIndexOnly = true, a Q matrix index is returned.
+// if returnIndexOnly = false, a Q matrix element is returned.
+
+	int winner;
+	bool foundNewWinner;
+	bool done = false;
+        winner = 0;
+    
+        do {
+                foundNewWinner = false;
+                for(int i = 0; i <= (qSize - 1); i++)
+                {
+                        if((i < winner) || (i > winner))
+                        {     //Avoid self-comparison.
+		                if(Q[state][i] > Q[state][winner])
+		                {
+                                        winner = i;
+                                        foundNewWinner = true;
+			        }
+		        }
+	        } // i
+
+	        if(foundNewWinner == false)
+	        {
+                        done = true;
+	        }
+
+        } while(done = false);
+
+	if(returnIndexOnly == true)
+	{
+		return winner;
+	}else
+	{
+		return Q[state][winner];
+	}
+}
+
+int reward(int action){
+				
+    return static_cast<int>(R[currentState][action] + (gamma * maximum(action, false)));
+}
+
diff --git a/MobileRobot/Machine_Learning/Practice/reinforcement_Learning.cpp~ b/MobileRobot/Machine_Learning/Practice/reinforcement_Learning.cpp~
new file mode 100644
index 0000000000000000000000000000000000000000..004927f3dbaa59b5d1e5d413bee42068d3e4e00b
--- /dev/null
+++ b/MobileRobot/Machine_Learning/Practice/reinforcement_Learning.cpp~
@@ -0,0 +1,189 @@
+// Author:		John McCullock
+// Date:		11-05-05
+// Description:	Q-Learning Example 1.
+
+#include <iostream>
+#include <iomanip>
+#include <ctime>
+#include <cstdlib>
+
+using namespace std;
+
+const int qSize = 6;
+const double gamma = 0.8;
+const int iterations = 10;
+int initialStates[qSize] = {1, 3, 5, 2, 4, 0};
+
+int R[qSize][qSize] =  {{-1, -1, -1, -1, 0, -1},
+			{-1, -1, -1, 0, -1, 100},
+			{-1, -1, -1, 0, -1, -1},
+			{-1, 0, 0, -1, 0, -1},
+			{0, -1, -1, 0, -1, 100},
+			{-1, 0, -1, -1, 0, 100}};
+
+int Q[qSize][qSize];
+int currentState;
+
+void episode(int initialState);
+void chooseAnAction();
+int getRandomAction(int upperBound, int lowerBound);
+void initialize();
+int maximum(int state, bool returnIndexOnly);
+int reward(int action);
+
+int main()
+{
+
+	int newState;
+
+	initialize();
+
+    //Perform learning trials starting at all initial states.
+    for(int j = 0; j <= (iterations - 1); j++)
+    {
+        for(int i = 0; i <= (qSize - 1); i++)
+        {
+            episode(initialStates[i]);
+	} // i
+    } // j
+
+    //Print out Q matrix.
+        for(int i = 0; i <= (qSize - 1); i++)
+        {
+                for(int j = 0; j <= (qSize - 1); j++)
+                {
+                        cout << setw(5) << Q[i][j];
+			if(j < qSize - 1)
+			{
+				cout << ",";
+			}
+		} // j
+        cout << "\n";
+        } // i
+    cout << "\n";
+
+	//Perform tests, starting at all initial states.
+	for(int i = 0; i <= (qSize - 1); i++)
+	{
+                currentState = initialStates[i];
+                newState = 0;
+		do {
+                newState = maximum(currentState, true);
+                cout << currentState << ", ";
+                currentState = newState;
+                } while(currentState < 5);
+                cout << "5" << endl;
+	} // i
+
+    return 0;
+}
+
+void episode(int initialState)
+{
+
+    currentState = initialState;
+
+    //Travel from state to state until goal state is reached.
+	do {
+                chooseAnAction();
+	   } while(currentState == 5);
+
+    //When currentState = 5, run through the set once more to
+    //for convergence.
+    for(int i = 0; i <= (qSize - 1); i++)
+    {
+        chooseAnAction();
+    } // i
+}
+
+void chooseAnAction()
+{
+
+	int possibleAction;
+
+    //Randomly choose a possible action connected to the current state.
+    possibleAction = getRandomAction(qSize, 0);
+
+	if(R[currentState][possibleAction] >= 0)
+	{
+                Q[currentState][possibleAction] = reward(possibleAction);
+                currentState = possibleAction;
+	}
+}
+
+int getRandomAction(int upperBound, int lowerBound){
+
+	int action;
+	bool choiceIsValid = false;
+	int range = (upperBound - lowerBound) + 1;
+
+    //Randomly choose a possible action connected to the current state.
+    do {
+        //Get a random value between 0 and 6.
+        action = lowerBound + int(range * rand() / (RAND_MAX + 1.0));
+		if(R[currentState][action] > -1)
+		{
+                        choiceIsValid = true;
+		}
+    } while(choiceIsValid == false);
+
+    return action;
+}
+
+void initialize(){
+
+	srand((unsigned)time(0));
+
+    for(int i = 0; i <= (qSize - 1); i++){
+        for(int j = 0; j <= (qSize - 1); j++){
+            Q[i][j] = 0;
+		} // j
+	} // i
+}
+
+int maximum(int state, bool returnIndexOnly)
+{
+// if returnIndexOnly = true, a Q matrix index is returned.
+// if returnIndexOnly = false, a Q matrix element is returned.
+
+	int winner;
+	bool foundNewWinner;
+	bool done = false;
+
+        winner = 0;
+    
+        do {
+                foundNewWinner = false;
+                for(int i = 0; i <= (qSize - 1); i++)
+                {
+                        if((i < winner) || (i > winner))
+                        {     //Avoid self-comparison.
+		                if(Q[state][i] > Q[state][winner])
+		                {
+                                        winner = i;
+                                        foundNewWinner = true;
+			        }
+		        }
+	        } // i
+
+	        if(foundNewWinner == false)
+	        {
+                        done = true;
+	        }
+
+        } while(done = false);
+
+	if(returnIndexOnly == true)
+	{
+		return winner;
+	}else
+	{
+		return Q[state][winner];
+	}
+}
+
+int reward(int action){
+				
+    return static_cast<int>(R[currentState][action] + (gamma * maximum(action, false)));
+}
+
diff --git a/MobileRobot/Machine_Learning/Practice/timer b/MobileRobot/Machine_Learning/Practice/timer
new file mode 100755
index 0000000000000000000000000000000000000000..8d748460054d39cbb7a5c5d97dd16e0e895ba64f
Binary files /dev/null and b/MobileRobot/Machine_Learning/Practice/timer differ
diff --git a/MobileRobot/Machine_Learning/Practice/timer.cpp b/MobileRobot/Machine_Learning/Practice/timer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..630428894c7e25728e718ddeff26fd921e5afc0b
--- /dev/null
+++ b/MobileRobot/Machine_Learning/Practice/timer.cpp
@@ -0,0 +1,49 @@
+#include <iostream>
+#include <unistd.h>
+
+using namespace std;
+void newline(); 
+
+int main() {
+    
+int msec = 0;
+int sec = 0;
+int min = 0;
+int hr = 0;
+
+
+//cout << "Press any key to start:";
+//char start = _gtech();
+
+for (;;)
+{
+        newline();
+                if(msec == 10)
+                {
+                        ++sec;
+                        msec = 0;
+                }
+                if(sec == 60)
+                {
+                        ++min;
+                        sec = 0; 
+                }
+                if(min == 60)
+                {
+                        ++hr;
+                        min = 0;
+                }
+        cout << hr << " : " << min << " : " << sec << " . " << msec << endl;
+        ++msec;
+        usleep(100000); 
+
+}
+
+    return 0;
+}
+
+void newline()
+{
+        cout << "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n";
+}
+
diff --git a/MobileRobot/Machine_Learning/Practice/timer.cpp~ b/MobileRobot/Machine_Learning/Practice/timer.cpp~
new file mode 100644
index 0000000000000000000000000000000000000000..630428894c7e25728e718ddeff26fd921e5afc0b
--- /dev/null
+++ b/MobileRobot/Machine_Learning/Practice/timer.cpp~
@@ -0,0 +1,49 @@
+#include <iostream>
+#include <unistd.h>
+
+using namespace std;
+void newline(); 
+
+int main() {
+    
+int msec = 0;
+int sec = 0;
+int min = 0;
+int hr = 0;
+
+
+//cout << "Press any key to start:";
+//char start = _gtech();
+
+for (;;)
+{
+        newline();
+                if(msec == 10)
+                {
+                        ++sec;
+                        msec = 0;
+                }
+                if(sec == 60)
+                {
+                        ++min;
+                        sec = 0; 
+                }
+                if(min == 60)
+                {
+                        ++hr;
+                        min = 0;
+                }
+        cout << hr << " : " << min << " : " << sec << " . " << msec << endl;
+        ++msec;
+        usleep(100000); 
+
+}
+
+    return 0;
+}
+
+void newline()
+{
+        cout << "\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n";
+}
+