diff --git a/README.md b/README.md
index ce4a929c3ee78efd00d40bd9cdfbaed1dcd690b1..346d9d92eac1f4b58654682bc219cf978915cf2d 100644
--- a/README.md
+++ b/README.md
@@ -83,7 +83,7 @@ In the examples directory.
 - pinocchio for robot dynamics calculations
 - meshcat for visualization (has nice integration with pinocchio)
 - crocoddyl for optimal control
-- qpsolvers and ecos for QP formulation of IK
+- qpsolvers and ecos for QP formulation of IK -> will add pink because it's more convenient
 - Dockerized ur simulator as a simulator -DOES NOT WORK ATM (it's a bad simulator, but it uses the same robot-communication API 
   as the real robot, making coding and testing easier)
 - Docker is used for shipping 
diff --git a/TODOS b/TODOS
index b57c0658eeb9d0ffc17df2e5a87ef04b415786af..973694db546a4deb03c53fa2f4fa7eb2e1dcc4b6 100644
--- a/TODOS
+++ b/TODOS
@@ -14,16 +14,15 @@ goal 2: challenge
 --------------------------------
 mpc for path following
 ------------------------
-1. just solve the ocp to move to a point
-	1.1. start ROS1 in docker, make it print out heron's urdf into a file, 
-         pass that file to pinocchio to construct the robot
-         (does not have to be from the same program, idgaf, just get it to work asap)
-	1.2. formulate the OCP to just go to a point -> done on almost the right model
-DEADLINE: 2024-11-04
-	1.3. prepare wrappers for HAL which will make it runnable on the real thing
-2. include the cart in the model, formulate OCP with that to move to a point
-	2.1. there's some arm constraints in pink, look how it's done there
-		 --> noup
+1.you need to manually make underactuation work in crocoddyl (action model updating etc)
+2. turn mpc from point-to-point to path-following
+3. prepare wrappers for HAL which will make it runnable on the real thing
+
+CART HANDLING
+----------------
+OPTION 1) FULL DYNAMIC MODELLING --> TOO DIFFICULT TO DO MODELLING-WISE IN SOFTWARE
+----------
+include the cart in the model, formulate OCP with that to move to a point
 	2.2. add a planar joint to one of the arms. it's a big box and that approximates
          the cart reasonably well. alternatively you can define it as an individual thing,
          and then attach the gripper to that. in any event, it's obviously a planar joint,
@@ -58,14 +57,29 @@ DEADLINE: 2024-11-04
          calc/extract wrenches and update them and the cart movement,
          where the dynamic model is blind to the existence of the cart.
          this gets rid of some nice properties, but it might be a decent hack.
-	2.3. you need to manually make underactuation work in crocoddyl
 		 --> start with action model in python, just like the acrobot example
 		 --> port that to cpp if it's too slow (almost certainly will be lmao)
-3. formulate the path following ocp with robot-cart model, put it in a while loop --> that's your mpc.
-   do this on a testing fixed path 
 4. integrate 3. with actual received path from node running albin's planner 
+	--> this boils down to just passing the correct references
 	DEADLINE: 2024-11-08 -> push to 2024-11-15
 5. run that on real heron, tune until it's not broken and ONLY up to that point.
+
+OPTION 2) DISREGARDING THE CART, JUST FOLLOWING END-EFFECTOR REFERENCES (path is defined for the cart)
+--------------
+1. make a cost for path following. you have a cost for a single final point, 
+   now make it path tracking for end-effector positions, where the cost
+   is not the distance to the final point, but overall path tracking.
+   if you have the same time-scaling for the path as for the nodes in the ocp,
+   then you can just do distances of corresponding path point and that node
+   (for each ee individually)
+ALTERNATIVE TO CREATING A ResidualModelPathPoseTracking
+could be to set (and update) reference points or residual model
+at each running model that comprises the problem.
+in fact that might be the correct way to go about it
+2. do a constraint on the ee poses so that they are in the plane defined by the handlebar
+
+BEHAVIOUR TREE
+------------------
 6. make a function that implements the get-cart-to-station behaviour tree.
    this is a control loop that select what control loop to run based on some 
    logic flags (smaller number = higher priority). simple select loop of the highest priority.
diff --git a/TODOS_2024_09_19 b/TODOS_2024_09_19
deleted file mode 100644
index 6adcdc6b3f42573b9b145752daa77d236fa652a6..0000000000000000000000000000000000000000
--- a/TODOS_2024_09_19
+++ /dev/null
@@ -1,94 +0,0 @@
-goal 1: starting points for student projects
----------------------------------------------
-1. PUSHING GROUP: some multiprocessing to get camera outputs 
-   available in the control loop
-   (process puts its result on some queue managed by robot_manager,
-    and this is popped in the control loop or something. 
-    think how to do this and then make a skeleton for how to use this).
-2. JENGA GROUP: create a starting point and example with pinocchio.casadi
-   optimal control (copy something from tutorial, and use the PD controller
-   to track the reference)
-2.1. FIGURE OUT ACTUAL UR5 TORQUE LIMITS TO HAVE FOLLOWABLE TRAJECTORIES FROM OCP
-
-goal 2: challenge
---------------------------------
-mpc for path following
-------------------------
-1. just solve the ocp to move to a point
-	1.1. start ROS1 in docker, make it print out heron's urdf into a file, 
-         pass that file to pinocchio to construct the robot
-         (does not have to be from the same program, idgaf, just get it to work asap)
-	1.2. formulate the OCP to just go to a point
-DEADLINE: 2024-11-04
-	1.3. prepare wrappers for HAL which will make it runnable on the real thing
-2. include the cart in the model, formulate OCP with that to move to a point
-3. formulate the path following ocp with robot-cart model, put it in a while loop --> that's your mpc.
-   do this on a testing fixed path 
-4. integrate 3. with actual received path from node running albin's planner 
-	DEADLINE: 2024-11-08
-5. run that on real heron, tune until it's not broken and ONLY up to that point.
-6. make a function that implements the get-cart-to-station behaviour tree.
-   this is a control loop that select what control loop to run based on some 
-   logic flags (smaller number = higher priority). simple select loop of the highest priority.
-   for every flag there is a process (node + topic) which determines whether to set it or unset it.
-	- PRIORITY 1: if obstacle flag, then run stopLoop (which is just make all velocities 0). flag on 
-                  only if there are people around.
-	- PRIORITY 2: if handle_bar flag, run loop to grasp the handle bar. if handle bar is grasped
-                  (vishnu's vision sets this flag) then the flag is unset. if not grasped (gripper
-                   status (maybe) + vision decides) then set the flag.
-	- PRIORITY 3: pull cart along prescribed path - runs MPC path-following loop. flag on until goal is reached
-	- PRIORITY 4: dock cart. flag on until cart is docked. i guess vision should check that but idk honestly.
-                  if this flag is set down, the program is down - shut down.
-	run this on the real thing
-	DEADLINE: 2024-11-15
-7. do all this but on mobile yumi in sim 
-	DEADLINE: 2024-11-23
-8. tune yumi solution
-	DEADLINE: 2024-11-30
-
-goal 3: usability, verifiability
-----------------------------------
-1. write some tests just too see that:
-	a) various parameter combinations work
-    b) controllers converge in situations they should converge in
-    c) most basic unit tests on functions
-    d) preferably some of this is runnable on the real robot,
-       or at least sim with --start-from-current pose flag
-	e) a speedrun of files in examples on the real robot,
-       again just to verify everything works 
-2. it would be nice to have a function to conveniently define points, namely,
-   some way to programatically (or on keyboard input) save the joint and end-effector
-   positions while manually guiding the robot in freedrive or with compliance control.
-   this obviously won't generate the best possibly trajectories according to literally any
-   metric other than convenience of use.
-3. implement a way to simulate interaction with the environment.
-   it will boil down to programatically putting in wrench readings.
-   it can be the just a step to put the wrench at 10N or whatever,
-   which is of course non-physical, but it will be good enough just to
-   verify that the code works. this is easier than putting the robot
-   in a simulator.
-4. use python's logging instead of printing
-5. put the robot in a simulator and use the simulator.
-6. think of some basic metrics to calculate along 
-   trajectories (print out some info on the performance of point-to-point and traj-following
-   runs, including comparing to same simulate runs etc. plots should be 
-   fine to start, but having running rms or something sounds like a good idea).
-   also make sure x-axis are labelled correctly (wall-clock time)
-
-goal 4: more controllers
-------------------------
-1. object regrasping with crocoddyl
-2. finish adding all ik algorithms
-3. casadi or crocoddyl optimal control with obstacle avoidance (manipulator itself + table/floor is enough)
-4. [hard] adjusting the dmp to get back on the path despite external forces 
-   (fix the problem of writing on a non-flat surface/whatever) --> publishable side-project
-
-
-
-goal 5: panda/yumi
-----------------
-finally, do what you promised and put this on another robot,
-thereby rendering this something publishable in joss
-
-1. transfer the library to panda or yumi or both
-
diff --git a/python/README.md b/python/README.md
index d638db88e17a37bbf7a7da9bcf6993a449be146d..a4d03eab6813b205fb9846d578d1fb0457f7c77a 100644
--- a/python/README.md
+++ b/python/README.md
@@ -1,6 +1,5 @@
 # TODO:
 ---------
-- go to a previous version, save basic version of click where everything is in one file
 - write test for every algorithm - just run it on sample data on simulation and pinocchio.
   using things on the real robot needs to be tested manually, but simulation in URsimulator should
   take care of that because the only difference is the IP address in that case.
@@ -10,13 +9,7 @@
   due to a misclick while typing out arguments
 - try to get reading and setting the payload from code. also figure out why doesn't it zero the f/t sensor
   --> works, but you need to change ur_rtde code, recompile, and install both c++ and python from there.
-so not terribly convenient. it will get fixed in a future ur_rtde release tho
-- merge existing visualization with current solution. no, it can't real real-time with the robot
-  because it really is that bad, but if you run it in a separate process and update it sporadicly,
-  it will be ok. it already has a bunch of the goodies you want so it's certainly easier to get
-  this to work than to re-do the same functionality from scratch. 
-  and you won't really be using it too much anyway let's be real. AND ALSO,
-  if you ditch your shitty integration for pinocchio's it will be faster.
+      so not terribly convenient. it will get fixed in a future ur_rtde release tho
 - use logging instead of a debug flag (you're reinventing the wheel and it's also ugly)
 - add types to everything, check them with mypy
 
@@ -30,13 +23,18 @@ from this directory. now the package is editable, which there's a chance you'd w
 
 # description
 ---------
-- organized as a library called ur_simple_control, made into a python package. the hope is that if this
+- organized as a library called ur_simple_control (should be Simple Manipulator Control), 
+  made into a python package. the hope is that if this
   is done well enough, you could mix-and-match different components
   and just have them work as intended. on the other hand,
   the code should still be simple enough to afford the quickest possible prototyping,
   which is the point of having it all in python anyway
 - initial python solution is age-old code which needs to be remapped into the 
   libraries used here. it will sit here until everything it offers has been translated.
+  this primarily concerns all the inverse kinematics algorithms in there,
+  primarily QP 
+    --> which could be outsourced to the pink library as it's literally QP ik,
+        but written competently and tested 
 
 # runnable things
 ---------------
diff --git a/python/convenience_tool_box/measuring_stick.py b/python/convenience_tool_box/measuring_stick.py
index ede2b40b02bcd5467044dd1ab1db5eebca58b491..a76be3d7b9b8b3b88b926c37ef3f69d273fb3b95 100644
--- a/python/convenience_tool_box/measuring_stick.py
+++ b/python/convenience_tool_box/measuring_stick.py
@@ -10,7 +10,7 @@ import os
 import copy
 import signal
 from ur_simple_control.managers import RobotManager
-from ur_simple_control.clik.clik_point_to_point import get_args
+from ur_simple_control.clik.clik import getClikArgs
 
 def handler(signum, frame):
     print('i will end freedrive and exit')
diff --git a/python/examples/camera_no_lag.py b/python/examples/camera_no_lag.py
index 180c52e8a8b151d4e6b08870ace21b8c42655415..122f08694037d7d4bb541a42305be3969954b630 100644
--- a/python/examples/camera_no_lag.py
+++ b/python/examples/camera_no_lag.py
@@ -1,8 +1,8 @@
 import cv2
 
-camera = cv2.VideoCapture(0)
+#camera = cv2.VideoCapture(0)
 # if you have ip_webcam set-up you can do this (but you have put in the correct ip)
-#camera = cv2.VideoCapture("http://192.168.219.153:8080/video")
+camera = cv2.VideoCapture("http://192.168.219.153:8080/video")
 
 while True:
     (grabbed, frame) = camera.read()
diff --git a/python/examples/data/latest_run b/python/examples/data/latest_run
index 4847c67d6795dc1aaf0da33991c1bbb123c3e83d..047b738479cb3ea89908733341a9abe6745ddf63 100644
Binary files a/python/examples/data/latest_run and b/python/examples/data/latest_run differ
diff --git a/python/examples/path_following_mpc.py b/python/examples/path_following_mpc.py
new file mode 100644
index 0000000000000000000000000000000000000000..5387c0173562bc75beddf72e9861beb6f1114d9f
--- /dev/null
+++ b/python/examples/path_following_mpc.py
@@ -0,0 +1,82 @@
+import numpy as np
+import time
+import argparse
+from functools import partial
+from ur_simple_control.managers import getMinimalArgParser, ControlLoopManager, RobotManager
+from ur_simple_control.optimal_control.crocoddyl_optimal_control import get_OCP_args, createCrocoPathFollowingOCP1, solveCrocoOCP
+from ur_simple_control.optimal_control.crocoddyl_mpc import CrocoPathFollowingMPCControlLoop, CrocoPathFollowingMPC
+from ur_simple_control.basics.basics import followKinematicJointTrajP
+from ur_simple_control.util.logging_utils import LogManager
+from ur_simple_control.visualize.visualize import plotFromDict
+from ur_simple_control.clik.clik import getClikArgs
+import pinocchio as pin
+import crocoddyl
+import mim_solvers
+
+
+def get_args():
+    parser = getMinimalArgParser()
+    parser = get_OCP_args(parser)
+    parser = getClikArgs(parser) # literally just for goal error
+    args = parser.parse_args()
+    return args
+
+
+if __name__ == "__main__": 
+    args = get_args()
+    robot = RobotManager(args)
+    # TODO: put this back for nicer demos
+    #Mgoal = robot.defineGoalPointCLI()
+    #Mgoal = pin.SE3.Random()
+
+    #robot.Mgoal = Mgoal.copy()
+    #if args.visualize_manipulator:
+    #    # TODO document this somewhere
+    #    robot.manipulator_visualizer_queue.put(
+    #            {"Mgoal" : Mgoal})
+    # create and solve the optimal control problem of
+    # getting from current to goal end-effector position.
+    # reference is position and velocity reference (as a dictionary),
+    # while solver is a crocoddyl object containing a lot more information
+    # starting state
+    x0 = np.concatenate([robot.getQ(), robot.getQd()])
+    robot._step()
+    T_w_e = robot.getT_w_e()
+    path = [T_w_e] * args.n_knots
+
+    problem = createCrocoPathFollowingOCP1(args, robot, x0, path)
+
+    # NOTE: this might be useful if we solve with a large time horizon,
+    # lower frequency, and then follow the predicted trajectory with velocity p-control
+    # this shouldn't really depend on x0 but i can't be bothered
+    #reference, solver = solveCrocoOCP(args, robot, problem, x0)
+    #if args.solver == "boxfddp":
+    #    log = solver.getCallbacks()[1]
+    #    crocoddyl.plotOCSolution(log.xs, log.us, figIndex=1, show=True)
+    #if args.solver == "csqp":
+    #    log = solver.getCallbacks()[1]
+    #    mim_solvers.plotOCSolution(log.xs, log.us, figIndex=1, show=True)
+
+    # we need a way to follow the reference trajectory,
+    # both because there can be disturbances,
+    # and because it is sampled at a much lower frequency
+    #followKinematicJointTrajP(args, robot, reference)
+
+    CrocoPathFollowingMPC(args, robot, x0, path)
+
+    print("final position:")
+    print(robot.getT_w_e())
+
+    if args.save_log:
+        robot.log_manager.plotAllControlLoops()
+
+    if not args.pinocchio_only:
+        robot.stopRobot()
+
+    if args.visualize_manipulator:
+        robot.killManipulatorVisualizer()
+    
+    if args.save_log:
+        robot.log_manager.saveLog()
+    #loop_manager.stopHandler(None, None)
+
diff --git a/python/examples/path_in_pixels.csv b/python/examples/path_in_pixels.csv
index 295ab5a4b74aaaa7dfb5c201039c6308e0cc469c..cfeb519c5eb52749fc2063c08f8e0d6af7931a98 100644
--- a/python/examples/path_in_pixels.csv
+++ b/python/examples/path_in_pixels.csv
@@ -1,80 +1,63 @@
-0.03192,0.33409
-0.03415,0.33409
-0.03415,0.33409
-0.03637,0.33409
-0.04303,0.33713
-0.04525,0.33713
-0.05191,0.33865
-0.05413,0.33865
-0.07190,0.34321
-0.07412,0.34321
-0.08078,0.34321
-0.09411,0.34473
-0.10743,0.34928
-0.12076,0.35232
-0.18516,0.35840
-0.19849,0.36143
-0.21182,0.36143
-0.22514,0.36447
-0.28510,0.37662
-0.29621,0.37662
-0.31175,0.37966
-0.36061,0.38574
-0.38282,0.38877
-0.44501,0.39485
-0.46055,0.39789
-0.47388,0.39789
-0.49386,0.40245
-0.53828,0.40700
-0.55161,0.40700
-0.56715,0.41004
-0.62268,0.41612
-0.62490,0.41612
-0.63156,0.41612
-0.64710,0.41915
-0.64710,0.41915
-0.65377,0.41915
-0.65377,0.42067
-0.65599,0.42067
-0.65599,0.42219
-0.65599,0.42219
-0.65599,0.42371
-0.65599,0.42523
-0.65377,0.42675
-0.64933,0.43738
-0.64488,0.44042
-0.64044,0.45105
-0.61823,0.47687
-0.61379,0.48143
-0.60269,0.49358
-0.58270,0.51637
-0.57826,0.52092
-0.56937,0.53156
-0.54939,0.55282
-0.54717,0.55738
-0.53384,0.56953
-0.52052,0.58776
-0.51829,0.58928
-0.51607,0.59535
-0.51607,0.59535
-0.51385,0.59839
-0.51385,0.59687
-0.51385,0.59687
-0.51385,0.59535
-0.51385,0.59080
-0.51385,0.58168
-0.50719,0.57105
-0.50497,0.56345
-0.49831,0.54067
-0.46499,0.47384
-0.45833,0.44801
-0.44501,0.42675
-0.42280,0.35991
-0.41836,0.34473
-0.41169,0.33561
-0.41169,0.32346
-0.40503,0.29916
-0.40503,0.30068
-0.40503,0.29916
-0.40503,0.29916
-0.40503,0.29916
+0.53498,0.68831
+0.53498,0.68698
+0.53498,0.68565
+0.53498,0.68432
+0.53350,0.67766
+0.52165,0.65634
+0.50091,0.63370
+0.47720,0.61505
+0.45202,0.58308
+0.40906,0.52581
+0.39721,0.51382
+0.35573,0.44056
+0.35128,0.43523
+0.32758,0.40193
+0.32314,0.39394
+0.32017,0.38994
+0.31869,0.38728
+0.31721,0.38595
+0.31721,0.38462
+0.31721,0.38328
+0.33351,0.38728
+0.37499,0.39660
+0.41647,0.39927
+0.46239,0.40193
+0.52017,0.40326
+0.55868,0.40326
+0.58831,0.40326
+0.61201,0.40193
+0.65201,0.40060
+0.66090,0.40060
+0.67572,0.39794
+0.69201,0.39794
+0.70534,0.39660
+0.71571,0.39527
+0.71868,0.39527
+0.71868,0.39527
+0.72016,0.39527
+0.72460,0.39927
+0.72757,0.41392
+0.71720,0.43656
+0.70090,0.45654
+0.68609,0.47386
+0.65942,0.49917
+0.62535,0.53114
+0.60609,0.55511
+0.59127,0.57642
+0.56461,0.61905
+0.55572,0.63104
+0.54387,0.64302
+0.54091,0.64569
+0.54091,0.64702
+0.53942,0.64835
+0.53794,0.64968
+0.53646,0.65102
+0.53054,0.66167
+0.52757,0.67499
+0.52461,0.68565
+0.52165,0.69364
+0.52017,0.69897
+0.51868,0.70163
+0.51868,0.70296
+0.51868,0.70430
+0.51868,0.70430
diff --git a/python/examples/pin_contact3d.py b/python/examples/pin_contact3d.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e3b159b04f7efc761dada69777d884f07b0d33c
--- /dev/null
+++ b/python/examples/pin_contact3d.py
@@ -0,0 +1,120 @@
+import pinocchio as pin
+from pinocchio.visualize import MeshcatVisualizer
+import numpy as np
+import time
+import argparse
+from functools import partial
+from ur_simple_control.managers import getMinimalArgParser, ControlLoopManager, RobotManager
+
+def get_args():
+    parser = getMinimalArgParser()
+    parser.description = 'trying to get 3dcontact to do something'
+    # add more arguments here from different Simple Manipulator Control modules
+    args = parser.parse_args()
+    return args
+
+if __name__ == "__main__": 
+    args = get_args()
+    robot = RobotManager(args)
+
+    # i'm not using a control loop because i'm copy pasting code
+    # so let's go pinocchio all the way
+    viz = MeshcatVisualizer(robot.model, robot.collision_model, robot.visual_model)
+    viz.initViewer(open=True)
+    viz.loadViewerModel()
+
+    q0 = pin.neutral(robot.model)
+    q0 = pin.randomConfiguration(robot.model)
+    viz.display(q0)
+
+    ee_constraint_placement = pin.SE3.Identity()
+    #ee_constraint_placement.translation = np.array([0.3,0.3,0.3])
+    ee_constraint_placement.translation = np.array([0.0,0.0,0.5])
+
+    # TODO
+    # 1. TODO
+    #    try the thing with 2 joints (same class pin.RigidConstraintModel)
+    #    to connect cart and hand
+    # 2. TODO
+    #    figure  out what the reference frame argument here means
+    # 3. TODO
+    #    figure out what different reference frames do in this context 
+    constraint_model = pin.RigidConstraintModel(
+        pin.ContactType.CONTACT_3D,
+        robot.model,
+        robot.JOINT_ID,
+        #pin.ReferenceFrame.LOCAL # but i have no idea what this is really (is it in ee frame?)
+        ee_constraint_placement # but i have no idea what this is really (is it in ee frame?)
+    )
+    constraint_data = constraint_model.createData()
+    constraint_dim = constraint_model.size()
+
+    # some inverse geometry for some reason unknown to me
+    rho = 1e-10 # what is this
+    mu = 1e-4  # what is this
+    eps = 1e-10
+    y = np.ones((constraint_dim))
+    robot.data.M = np.eye(robot.model.nv) * rho
+    kkt_constraint = pin.ContactCholeskyDecomposition(robot.model, [constraint_model])
+    # what is this?
+    # i guess it's internal calculation for constrained dynamics,
+    # but no idea
+    # maybe we need a feasible initial position
+    q = pin.randomConfiguration(robot.model)
+    for i in range(1000):
+        pin.computeJointJacobians(robot.model, robot.data, q)
+        kkt_constraint.compute(robot.model, robot.data, [constraint_model], [constraint_data], mu)
+        # i have no idea what this is
+        # is this a constraint force?
+        constraint_value = constraint_data.c1Mc2.translation
+        J = pin.getFrameJacobian(robot.model, robot.data, constraint_model.joint1_id, constraint_model.joint1_placement, constraint_model.reference_frame)[:3, :] # why cut off?
+        primal_feas = np.linalg.norm(constraint_value, np.inf) # why inf norm?
+        dual_feas = np.linalg.norm(J.T.dot(constraint_value + y), np.inf)
+        if primal_feas < eps and dual_feas < eps:
+            print("converged")
+            break
+        rhs = np.concatenate([-constraint_value - y * mu, np.zeros(robot.model.nv)])
+        # i'm assuming these shenanegans are here due to constraint_dim < 6
+        dz = kkt_constraint.solve(rhs)
+        dy = dz[:constraint_dim]
+        dq = dz[constraint_dim:]
+
+        alpha = 1.0
+        q = pin.integrate(robot.model, q, -alpha * dq)
+        y -= alpha * (-dy + y)
+
+    v = np.zeros(robot.model.nv)
+    tau = np.zeros(robot.model.nv)
+    dt = 5e-3
+    T_sim = 1000
+    t = 0
+    mu_sim = 1e-10
+    constraint_model.corrector.Kp[:] = 10
+    constraint_model.corrector.Kd[:] = 2.0 * np.sqrt(constraint_model.corrector.Kp)
+    pin.initConstraintDynamics(robot.model, robot.data, [constraint_model])
+    prox_settings = pin.ProximalSettings(1e-8, mu_sim, 10)
+
+
+    while t <= T_sim:
+        a = pin.constraintDynamics(
+            robot.model, robot.data, q, v, tau, [constraint_model], [constraint_data], prox_settings
+        )
+        v += a * dt
+        q = pin.integrate(robot.model, q, v * dt)
+        viz.display(q)
+        time.sleep(dt)
+        t += dt
+
+    # get expected behaviour here (library can't know what the end is - you have to do this here)
+    if not args.pinocchio_only:
+        robot.stopRobot()
+
+    if args.save_log:
+        robot.log_manager.plotAllControlLoops()
+
+    if args.visualize_manipulator:
+        robot.killManipulatorVisualizer()
+    
+    if args.save_log:
+        robot.log_manager.saveLog()
+    #loop_manager.stopHandler(None, None)
diff --git a/python/ur_simple_control/__pycache__/managers.cpython-312.pyc b/python/ur_simple_control/__pycache__/managers.cpython-312.pyc
index 0a834918a14e09f0bd0fb57d66922f2d7dd23576..cd749a0e949021d4abe04b5ee549106e839b4e09 100644
Binary files a/python/ur_simple_control/__pycache__/managers.cpython-312.pyc and b/python/ur_simple_control/__pycache__/managers.cpython-312.pyc differ
diff --git a/python/ur_simple_control/clik/clik.py b/python/ur_simple_control/clik/clik.py
index f5e0926553fb34fa0eba892b80f22fce73f3740b..3b6a437805cde5d30016b886469e36a23fe45abe 100644
--- a/python/ur_simple_control/clik/clik.py
+++ b/python/ur_simple_control/clik/clik.py
@@ -193,6 +193,7 @@ def moveUntilContactControlLoop(contact_detecting_force, speed, robot : RobotMan
     """
     breakFlag = False
     # know where you are, i.e. do forward kinematics
+    log_item = {}
     q = robot.getQ()
     # break if wrench is nonzero basically
     #wrench = robot.getWrench()
@@ -215,7 +216,8 @@ def moveUntilContactControlLoop(contact_detecting_force, speed, robot : RobotMan
     # compute the joint velocities.
     qd = clik_controller(J, speed)
     robot.sendQd(qd)
-    return breakFlag, {}, {}
+    log_item['wrench'] = wrench.reshape((6,))
+    return breakFlag, {}, log_item
 
 def moveUntilContact(args, robot, speed):
     """
@@ -228,7 +230,8 @@ def moveUntilContact(args, robot, speed):
     # TODO: just make the manager pass the robot or something, this is weird man
     controlLoop = partial(moveUntilContactControlLoop, args.contact_detecting_force, speed, robot, clik_controller)
     # we're not using any past data or logging, hence the empty arguments
-    loop_manager = ControlLoopManager(robot, controlLoop, args, {}, {})
+    log_item = {'wrench' : np.zeros(6)}
+    loop_manager = ControlLoopManager(robot, controlLoop, args, {}, log_item)
     loop_manager.run()
     # TODO: remove, this isn't doing anything
     time.sleep(0.01)
diff --git a/python/ur_simple_control/managers.py b/python/ur_simple_control/managers.py
index afacc9c87f178f5b16d896efd355eaf68ca5207e..49525cbe3832b0c5e43a750c88e054f08712579e 100644
--- a/python/ur_simple_control/managers.py
+++ b/python/ur_simple_control/managers.py
@@ -10,7 +10,7 @@ from ur_simple_control.util.grippers.robotiq.robotiq_gripper import RobotiqGripp
 from ur_simple_control.util.grippers.on_robot.twofg import TWOFG
 import copy
 import signal
-from ur_simple_control.util.get_model import get_model, heron_approximation
+from ur_simple_control.util.get_model import get_model, heron_approximation, getGripperlessUR5e
 from collections import deque
 from ur_simple_control.visualize.visualize import plotFromDict, realTimePlotter, manipulatorVisualizer
 from ur_simple_control.util.logging_utils import LogManager
@@ -71,7 +71,7 @@ def getMinimalArgParser():
     #################################################
     parser.add_argument('--robot', type=str, \
             help="which robot you're running or simulating", default="ur5e", \
-            choices=['ur5e', 'heron'])
+            choices=['ur5e', 'heron', 'gripperlessur5e'])
     parser.add_argument('--simulation', action=argparse.BooleanOptionalAction, \
             help="whether you are running the UR simulator", default=False)
     parser.add_argument('--robot-ip', type=str, 
@@ -433,6 +433,9 @@ class RobotManager:
         if self.robot_name == "heron":
             self.model, self.collision_model, self.visual_model, self.data = \
                  heron_approximation()
+        if self.robot_name == "gripperlessur5e":
+            self.model, self.collision_model, self.visual_model, self.data = \
+                 getGripperlessUR5e()
 
         # start visualize manipulator process if selected.
         # has to be started here because it lives throughout the whole run
@@ -445,7 +448,7 @@ class RobotManager:
                                                      args=(self.args, self.model, self.collision_model, self.visual_model, self.manipulator_visualizer_queue, ))
             self.manipulator_visualizer_process.start()
             # give it time to start
-            time.sleep(3)
+            time.sleep(5)
             if args.debug_prints:
                 print("ROBOT_MANAGER: manipulator_visualizer_process started")
             self.manipulator_visualizer_queue.put(np.zeros(self.model.nq))
diff --git a/python/ur_simple_control/optimal_control/crocoddyl_mpc.py b/python/ur_simple_control/optimal_control/crocoddyl_mpc.py
index d766a1b24bd2062a496a1d0a71a2cd330b09385b..abad479ee42044422e4da9ac11e249ceed9e5b9f 100644
--- a/python/ur_simple_control/optimal_control/crocoddyl_mpc.py
+++ b/python/ur_simple_control/optimal_control/crocoddyl_mpc.py
@@ -1,5 +1,5 @@
 from ur_simple_control.managers import getMinimalArgParser, ControlLoopManager, RobotManager
-from ur_simple_control.optimal_control.crocoddyl_optimal_control import createCrocoIKOCP
+from ur_simple_control.optimal_control.crocoddyl_optimal_control import createCrocoIKOCP, createCrocoPathFollowingOCP1
 import pinocchio as pin
 import crocoddyl
 import mim_solvers
@@ -89,3 +89,89 @@ def CrocoIKMPC(args, robot, x0, goal):
     save_past_dict = {}
     loop_manager = ControlLoopManager(robot, controlLoop, args, save_past_dict, log_item)
     loop_manager.run()
+
+def CrocoPathFollowingMPCControlLoop(args, robot : RobotManager, solver : crocoddyl.SolverBoxFDDP, path, i, past_data):
+    """
+    CrocoPathFollowingMPCControlLoop
+    -----------------------------
+    end-effector(s) follow their path(s)
+    """
+    breakFlag = False
+    log_item = {}
+    save_past_dict = {}
+
+    q = robot.getQ()
+    wrench = robot.getWrench()
+
+    # what i should have more or less - the path 
+    # changes, and should be updated from a different thread 
+    # so that it's ready when it gets here
+    # TODO: implement
+    #path = robot.getPath()
+    
+    # generate bullshit just to see it works
+    T_w_e = robot.getT_w_e()
+    translation = np.array([0.01, 0.0, 0.0])
+    path = []
+    for i in range(args.n_knots):
+        new = T_w_e.copy()
+        new.translation = new.translation + i * translation
+        path.append(new)
+
+    x0 = np.concatenate([robot.getQ(), robot.getQd()])
+    solver.problem.x0 = x0
+    # warmstart solver with previous solution
+    xs_init = list(solver.xs[1:]) + [solver.xs[-1]]
+    xs_init[0] = x0
+    us_init = list(solver.us[1:]) + [solver.us[-1]]
+
+    for i, runningModel in enumerate(solver.problem.runningModels):
+        runningModel.differential.costs.costs['gripperPose' + str(i)].cost.residual.reference = path[i]
+
+    # idk if that's necessary
+    solver.problem.terminalModel.differential.costs.costs['gripperPose'+str(args.n_knots)].cost.residual.reference = path[-1]
+
+    solver.solve(xs_init, us_init, args.max_solver_iter)
+    xs = np.array(solver.xs)
+    us = np.array(solver.us)
+    vel_cmds = xs[1, robot.model.nq:]
+
+    robot.sendQd(vel_cmds)
+    
+    log_item['qs'] = q.reshape((robot.model.nq,))
+    log_item['dqs'] = robot.getQd().reshape((robot.model.nv,))
+    return breakFlag, save_past_dict, log_item
+
+
+# NOTE: not using actual path rn
+def CrocoPathFollowingMPC(args, robot, x0, path):
+    """
+    IKMPC
+    -----
+    run mpc for a point-to-point inverse kinematics.
+    note that the actual problem is solved on
+    a dynamics level, and velocities we command
+    are actually extracted from the state x(q,dq)
+    """
+    problem = createCrocoPathFollowingOCP1(args, robot, x0, path)
+    if args.solver == "boxfddp":
+        solver = crocoddyl.SolverBoxFDDP(problem)
+    if args.solver == "csqp":
+        solver = mim_solvers.SolverCSQP(problem)
+
+    # technically should be done in controlloop because now
+    # it's solved 2 times before the first command,
+    # but we don't have time for details rn
+    x0 = np.concatenate([robot.getQ(), robot.getQd()])
+    xs_init = [x0] * (solver.problem.T + 1)
+    us_init = solver.problem.quasiStatic([x0] * solver.problem.T)
+    solver.solve(xs_init, us_init, args.max_solver_iter)
+
+    controlLoop = partial(CrocoPathFollowingMPCControlLoop, args, robot, solver, path)
+    log_item = {
+            'qs' : np.zeros(robot.model.nq),
+            'dqs' : np.zeros(robot.model.nq)
+        }
+    save_past_dict = {}
+    loop_manager = ControlLoopManager(robot, controlLoop, args, save_past_dict, log_item)
+    loop_manager.run()
diff --git a/python/ur_simple_control/optimal_control/crocoddyl_optimal_control.py b/python/ur_simple_control/optimal_control/crocoddyl_optimal_control.py
index 850c8452b1ec28f7bdebc604521b0c61316c1878..eb07d998ce05a5e57e738302fdac260be6fc7c65 100644
--- a/python/ur_simple_control/optimal_control/crocoddyl_optimal_control.py
+++ b/python/ur_simple_control/optimal_control/crocoddyl_optimal_control.py
@@ -33,6 +33,7 @@ def createCrocoIKOCP(args, robot : RobotManager, x0, goal : pin.SE3):
     #robot.model.effortLimit = robot.model.effortLimit * (args.acceleration / robot.MAX_ACCELERATION)
     #robot.model.effortLimit = robot.model.effortLimit * 0.5
     #robot.data = robot.model.createData()
+    # TODO: make it underactuated in mobile base's y-direction
     state = crocoddyl.StateMultibody(robot.model)
     # command input IS torque 
     # TODO: consider ActuationModelFloatingBaseTpl for heron
@@ -54,6 +55,10 @@ def createCrocoIKOCP(args, robot : RobotManager, x0, goal : pin.SE3):
     xResidual = crocoddyl.ResidualModelState(state, x0, state.nv)
     xRegCost = crocoddyl.CostModelResidual(state, xResidual)
     # cost 3) distance to goal -> SE(3) error
+    # TODO: make this follow a path.
+    # to do so, you need to implement a residualmodel for that in crocoddyl.
+    # there's an example of exending crocoddyl in colmpc repo
+    # (look at that to see how to compile, make python bindings etc)
     framePlacementResidual = crocoddyl.ResidualModelFramePlacement(
             state,
             # TODO: check if this is the frame we actually want (ee tip)
@@ -116,6 +121,8 @@ def createCrocoIKOCP(args, robot : RobotManager, x0, goal : pin.SE3):
         xlb = xlb[1:]
         xub = xub[1:]
 
+    # TODO: make these constraints-turned-to-objectives for end-effector following
+    # the handlebar position
     if args.solver == "boxfddp":
         bounds = crocoddyl.ActivationBounds(xlb, xub, 1.0)
         xLimitResidual = crocoddyl.ResidualModelState(state, x0, state.nv)
@@ -226,6 +233,190 @@ def solveCrocoOCP(args, robot, problem, x0):
     reference = {'qs' : qs, 'vs' : vs, 'dt' : args.ocp_dt}
     return reference, solver
 
+
+
+def createCrocoPathFollowingOCP1(args, robot : RobotManager, x0, path : list[pin.SE3]):
+    """
+    createCrocoPathFollowingOCP1
+    -------------------------------
+    follows the provided path.
+    NOTE: the path MUST be time indexed with the SAME time used between the knots
+    """
+    # create torque bounds which correspond to percentage
+    # of maximum allowed acceleration 
+    # NOTE: idk if this makes any sense, but let's go for it
+    #print(robot.model.effortLimit)
+    #exit()
+    #robot.model.effortLimit = robot.model.effortLimit * (args.acceleration / robot.MAX_ACCELERATION)
+    #robot.model.effortLimit = robot.model.effortLimit * 0.5
+    #robot.data = robot.model.createData()
+    # TODO: make it underactuated in mobile base's y-direction
+    state = crocoddyl.StateMultibody(robot.model)
+    # command input IS torque 
+    # TODO: consider ActuationModelFloatingBaseTpl for heron
+    # TODO: create a different actuation model (or whatever)
+    # for the mobile base - basically just remove the y movement in the base
+    # and update the corresponding derivates to 0
+    # there's python examples for this, ex. acrobot.
+    # you might want to implement the entire action model too idk what's really necessary here
+    actuation = crocoddyl.ActuationModelFull(state)
+
+    # we will be summing 4 different costs
+    # first 3 are for tracking, state and control regulation
+    terminalCostModel = crocoddyl.CostModelSum(state)
+    # cost 1) u residual (actuator cost)
+    uResidual = crocoddyl.ResidualModelControl(state, state.nv)
+    uRegCost = crocoddyl.CostModelResidual(state, uResidual)
+    # cost 2) x residual (overall amount of movement)
+    xResidual = crocoddyl.ResidualModelState(state, x0, state.nv)
+    xRegCost = crocoddyl.CostModelResidual(state, xResidual)
+    # cost 4) final ee velocity is 0 (can't directly formulate joint velocity,
+    #         because that's a part of the state, and we don't know final joint positions)
+    #frameVelocityResidual = crocoddyl.ResidualModelFrameVelocity(
+    #        state,
+    #        robot.model.getFrameId("tool0"),
+    #        pin.Motion(np.zeros(6)),
+    #        pin.ReferenceFrame.WORLD)
+    #frameVelocityCost = crocoddyl.CostModelResidual(state, frameVelocityResidual)
+
+    # put these costs into the running costs
+    # we put this one in later
+    # and add the terminal cost, which is the distance to the goal
+    # NOTE: shouldn't there be a final velocity = 0 here?
+    terminalCostModel.addCost("uReg", uRegCost, 1e3)
+    #terminalCostModel.addCost("velFinal", frameVelocityCost, 1e3)
+
+    ######################################################################
+    #  state constraints  #
+    #################################################
+    # - added to costs via barrier functions for fddp
+    # - added as actual constraints via crocoddyl.constraintmanager for csqp
+    ###########################################################################
+    
+
+    # the 4th cost is for defining bounds via costs
+    # NOTE: could have gotten the same info from state.lb and state.ub.
+    # the first state is unlimited there idk what that means really,
+    # but the arm's base isn't doing a full rotation anyway, let alone 2 or more
+    xlb = np.concatenate([
+        robot.model.lowerPositionLimit,
+        -1 * robot.model.velocityLimit])
+    xub = np.concatenate([
+        robot.model.upperPositionLimit,
+        robot.model.velocityLimit])
+    # we have no limits on the mobile base.
+    # the mobile base is a planar joint.
+    # since it is represented as [x,y,cos(theta),sin(theta)], there is no point
+    # to limiting cos(theta),sin(theta) even if we wanted limits,
+    # because we would then limit theta, or limit ct and st jointly.
+    # in any event, xlb and xub are 1 number too long --
+    # the residual has to be [x,y,theta] because it is in the tangent space - 
+    # the difference between two points on a manifold in pinocchio is defined
+    # as the velocity which if parallel transported for 1 unit of "time" 
+    # from one to point to the other.
+    # point activation input and the residual need to be of the same length obviously,
+    # and this should be 2 * model.nv the way things are defined here.
+
+
+    if robot.robot_name == "heron":
+        xlb = xlb[1:]
+        xub = xub[1:]
+
+    # TODO: make these constraints-turned-to-objectives for end-effector following
+    # the handlebar position
+    if args.solver == "boxfddp":
+        bounds = crocoddyl.ActivationBounds(xlb, xub, 1.0)
+        xLimitResidual = crocoddyl.ResidualModelState(state, x0, state.nv)
+        xLimitActivation = crocoddyl.ActivationModelQuadraticBarrier(bounds)
+
+        limitCost = crocoddyl.CostModelResidual(state, xLimitActivation, xLimitResidual)
+        terminalCostModel.addCost("limitCost", limitCost, 1e3)
+
+    # TODO: try using crocoddyl.ConstraintModelResidual
+    # instead to create actual box constraints (inequality constraints)
+    # TODO: same goes for control input
+    # NOTE: i'm not sure whether any solver uses these tho lel 
+    # --> you only do that for mim_solvers' csqp!
+
+    if args.solver == "csqp":
+        # this just store all the constraints
+        constraints = crocoddyl.ConstraintModelManager(state, robot.model.nv)
+        u_constraint = crocoddyl.ConstraintModelResidual(
+                state,
+                uResidual, 
+                -1 * robot.model.effortLimit * 0.1,
+                robot.model.effortLimit  * 0.1)
+        constraints.addConstraint("u_box_constraint", u_constraint)
+        x_constraint = crocoddyl.ConstraintModelResidual(
+                state,
+                xResidual, 
+                xlb,
+                xub)
+        constraints.addConstraint("x_box_constraint", x_constraint)
+
+
+    # Next, we need to create an action model for running and terminal knots. The
+    # forward dynamics (computed using ABA) are implemented
+    # inside DifferentialActionModelFreeFwdDynamics.
+
+    runningModels = []
+    for i in range(args.n_knots):
+        runningCostModel = crocoddyl.CostModelSum(state)
+        runningCostModel.addCost("xReg", xRegCost, 1e-3)
+        runningCostModel.addCost("uReg", uRegCost, 1e-3)
+        if args.solver == "boxfddp":
+            runningCostModel.addCost("limitCost", limitCost, 1e3)
+        framePlacementResidual = crocoddyl.ResidualModelFramePlacement(
+                state,
+                robot.model.getFrameId("tool0"),
+                path[i], # this better be done with the same time steps as the knots
+                         # NOTE: this should be done outside of this function to clarity
+                state.nv)
+        goalTrackingCost = crocoddyl.CostModelResidual(state, framePlacementResidual)
+        runningCostModel.addCost("gripperPose" + str(i), goalTrackingCost, 1e2)
+
+        if args.solver == "boxfddp":
+            runningModel = crocoddyl.IntegratedActionModelEuler(
+                crocoddyl.DifferentialActionModelFreeInvDynamics(
+                    state, actuation, runningCostModel
+                ),
+                args.ocp_dt,
+            )
+            runningModel.u_lb = -1 * robot.model.effortLimit * 0.1
+            runningModel.u_ub = robot.model.effortLimit  * 0.1
+        if args.solver == "csqp":
+            runningModel = crocoddyl.IntegratedActionModelEuler(
+                crocoddyl.DifferentialActionModelFreeInvDynamics(
+                    state, actuation, runningCostModel, constraints
+                ),
+                args.ocp_dt,
+            )
+        runningModels.append(runningModel)
+
+    # terminal model
+    if args.solver == "boxfddp":
+        terminalModel = crocoddyl.IntegratedActionModelEuler(
+            crocoddyl.DifferentialActionModelFreeInvDynamics(
+                state, actuation, terminalCostModel
+            ),
+            0.0,
+        )
+        terminalModel.u_lb = -1 * robot.model.effortLimit * 0.1 
+        terminalModel.u_ub = robot.model.effortLimit  * 0.1
+    if args.solver == "csqp":
+            terminalModel = crocoddyl.IntegratedActionModelEuler(
+                crocoddyl.DifferentialActionModelFreeInvDynamics(
+                    state, actuation, terminalCostModel, constraints
+                ),
+                0.0,
+            )
+
+    terminalCostModel.addCost("gripperPose" + str(args.n_knots), goalTrackingCost, 1e2)
+
+    # now we define the problem
+    problem = crocoddyl.ShootingProblem(x0, runningModels, terminalModel)
+    return problem 
+
 if __name__ == "__main__":
     parser = getMinimalArgParser()
     parser = get_OCP_args(parser)
diff --git a/python/ur_simple_control/optimal_control/notes.md b/python/ur_simple_control/optimal_control/notes.md
index 83489752a753b32e17450d09c79a0fe301bf164e..785f23289188d7a6d181ac2797aa20dba3de58b8 100644
--- a/python/ur_simple_control/optimal_control/notes.md
+++ b/python/ur_simple_control/optimal_control/notes.md
@@ -4,5 +4,11 @@ use crocoddyl to compute a whole trajectory in advance.
 the trajectory can be followed with some other controller,
 with controls here being feed-forward or whatever
 
+## alternative, easier for students
+-------------------------------------
+is to use casadi with pinocchio.casadi
+as it's way more transparent.
+examples of this stack are available in tutorials, hopefully they map
+it to Simple Manipulator Control themselves 
 
 
diff --git a/python/ur_simple_control/util/__pycache__/calib_board_hacks.cpython-312.pyc b/python/ur_simple_control/util/__pycache__/calib_board_hacks.cpython-312.pyc
index 9550e0a266df8ffbbcd7f29cd2dc60a649e809dd..16df281eec0a6cab5a3f34dfbf701488a8b725fe 100644
Binary files a/python/ur_simple_control/util/__pycache__/calib_board_hacks.cpython-312.pyc and b/python/ur_simple_control/util/__pycache__/calib_board_hacks.cpython-312.pyc differ
diff --git a/python/ur_simple_control/util/__pycache__/get_model.cpython-312.pyc b/python/ur_simple_control/util/__pycache__/get_model.cpython-312.pyc
index 172fcd77d0c1c699029769b4d378712d1d374d19..ff30b90edef8b10d19dfe9251aa3735474ea1fab 100644
Binary files a/python/ur_simple_control/util/__pycache__/get_model.cpython-312.pyc and b/python/ur_simple_control/util/__pycache__/get_model.cpython-312.pyc differ
diff --git a/python/ur_simple_control/util/calib_board_hacks.py b/python/ur_simple_control/util/calib_board_hacks.py
index 1d7887c99474bd17a18b30043a0628b05053a458..92bd1773ca839c3e1618d6ac572431284646e5b6 100644
--- a/python/ur_simple_control/util/calib_board_hacks.py
+++ b/python/ur_simple_control/util/calib_board_hacks.py
@@ -225,17 +225,7 @@ def getSpeedInDirectionOfN(R, board_axis):
     print("going in", speed, "direction")
     return speed
 
-# TODO
-# be internally consistent! this will also make this code modular,
-# and robot-agnostic!
-# drop all stupid ur code, and use your own implementation
-# of both movel and speedj.
-# --> then you don't even need to use the stupid rpy,
-# but instead rock only rotation matrices the way it should be.
-# TODO: replace all moveUntilContacts with your own implementation
-# and of course replace the stupid speeds
-# TODO move width and height, rock just args
-def calibratePlane(args, robot, plane_width, plane_height, n_tests):
+def calibratePlane(args, robot : RobotManager, plane_width, plane_height, n_tests):
     # i don't care which speed slider you have,
     # because 0.4 is the only reasonable one here
 #    old_speed_slider = robot.speed_slider
@@ -248,18 +238,13 @@ def calibratePlane(args, robot, plane_width, plane_height, n_tests):
     init_pose = copy.deepcopy(Mtool)
     new_pose = copy.deepcopy(init_pose)
 
-    # you just did it dawg.
-    # i mean i know it's in the robot
-    # TODO: don't manage forward kinematics yourself,
-    # just create a RobotManager.step() function, update everything there
-    #q = robot.getQ()
-    #pin.forwardKinematics(robot.model, robot.data, q)
-    # this apsolutely has to be deepcopied aka copy-constructed
-    #R_initial_estimate = copy.deepcopy(robot.data.oMi[robot.JOINT_ID].rotation)
     R_initial_estimate = copy.deepcopy(Mtool.rotation)
     print("R_initial_estimate", R_initial_estimate)
     R = copy.deepcopy(R_initial_estimate)
 
+    go_up_transf = pin.SE3.Identity()
+    go_up_transf.translation[2] = -0.1
+
     # go in the TCP z direction of the end-effector
     # our goal is to align that with board z
     #speed = getSpeedInDirectionOfN(R_initial_estimate, args.board_axis)
@@ -274,28 +259,28 @@ def calibratePlane(args, robot, plane_width, plane_height, n_tests):
         print("iteration number:", i)
         #robot.rtde_control.moveUntilContact(speed)
         moveUntilContact(args, robot, speed)
-        # TODO: replace forwardkinematics call with robot.step()
+        # no step because this isn't wrapped by controlLoopManager 
+        robot._step()
         q = robot.getQ()
-        pin.forwardKinematics(robot.model, robot.data, np.array(q))
-        print("pin:", *robot.data.oMi[robot.JOINT_ID].translation.round(4), \
-                *pin.rpy.matrixToRpy(robot.data.oMi[robot.JOINT_ID].rotation).round(4))
+        T_w_e = robot.getT_w_e()
+        print("pin:", *T_w_e.translation.round(4), \
+                *pin.rpy.matrixToRpy(T_w_e.rotation).round(4))
         print("ur5:", *np.array(robot.rtde_receive.getActualTCPPose()).round(4))
 
-        positions.append(copy.deepcopy(robot.data.oMi[robot.JOINT_ID].translation))
+        positions.append(copy.deepcopy(T_w_e.translation))
         if i < n_tests -1:
             current_pose = robot.getT_w_e()
             new_pose = copy.deepcopy(current_pose)
             # go back up (assuming top-left is highest incline)
             # TODO: make this assumption an argument, or print it at least
             # THIS IS Z
-            if args.board_axis == 'z':
-                new_pose.translation[2] = init_pose.translation[2]
-            # THIS IS Y
-            if args.board_axis == 'y':
-                new_pose.translation[1] = init_pose.translation[1]
+            #if args.board_axis == 'z':
+            #    new_pose.translation[2] = init_pose.translation[2]
+            ## THIS IS Y
+            #if args.board_axis == 'y':
+            #    new_pose.translation[1] = init_pose.translation[1]
+            new_pose = new_pose.act(go_up_transf)
             moveL(args, robot, new_pose)
-            q = robot.getQ()
-            pin.forwardKinematics(robot.model, robot.data, np.array(q))
             # TODO: make this not base-orientation dependent,
             # this is also an ugly ugly hack
             new_pose.translation[0] = init_pose.translation[0] + np.random.random() * plane_width
@@ -309,6 +294,7 @@ def calibratePlane(args, robot, plane_width, plane_height, n_tests):
             moveL(args, robot, new_pose)
             # fix orientation
             new_pose.rotation = R
+            print("updating orientation")
             moveL(args, robot, new_pose)
         # skip the first one
         if i > 2:
diff --git a/python/ur_simple_control/util/get_model.py b/python/ur_simple_control/util/get_model.py
index ce274d50aaf896af13d3306a6f6c636e338ede11..884298196ffcda2b08ebecbb847b0bb11ef3f104 100644
--- a/python/ur_simple_control/util/get_model.py
+++ b/python/ur_simple_control/util/get_model.py
@@ -10,6 +10,7 @@ import sys
 import os
 from importlib.resources import files
 import hppfcl as fcl
+import example_robot_data
 
 # can't get the urdf reading with these functions to save my life, idk what or why
 
@@ -84,6 +85,11 @@ def get_model():
 
     return model, collision_model, visual_model, data
 
+def getGripperlessUR5e():
+    robot = example_robot_data.load("ur5")
+    return robot.model, robot.collision_model, robot.visual_model, robot.data
+
+
 # this gives me a flying joint for the camera,
 # and a million joints for wheels -> it's unusable
 # TODO: look what's done in pink, see if this can be usable