diff --git a/python/examples/camera_no_lag.py b/python/examples/camera_no_lag.py
index c106e437409c45295ded85c6b879d75336963939..b03107c49cf8e16508335f7ce8b9fb4a78692040 100644
--- a/python/examples/camera_no_lag.py
+++ b/python/examples/camera_no_lag.py
@@ -1,11 +1,10 @@
 from smc.vision.vision import processCamera
-from smc.managers import (
-    getMinimalArgParser,
-    ControlLoopManager,
-    RobotManager,
-    ProcessManager,
-)
-import argcomplete, argparse
+from smc import getMinimalArgParser
+from smc.control.control_loop_manager import ControlLoopManager
+from smc.multiprocessing.process_manager import ProcessManager
+from smc.robots.robotmanager_abstract import AbstractRobotManager
+from smc.robots.utils import getRobotFromArgs
+import argparse
 import numpy as np
 import time
 import pinocchio as pin
@@ -16,13 +15,12 @@ def get_args():
     parser = getMinimalArgParser()
     parser.description = "dummy camera output, but being processed \
                           in a different process"
-    argcomplete.autocomplete(parser)
     args = parser.parse_args()
     return args
 
 
 def controlLoopWithCamera(
-    camera_manager: ProcessManager, args, robot: RobotManager, i, past_data
+    camera_manager: ProcessManager, args, robot: AbstractRobotManager, i, past_data
 ):
     """
     controlLoopWithCamera
@@ -32,24 +30,24 @@ def controlLoopWithCamera(
     breakFlag = False
     log_item = {}
     save_past_dict = {}
-    q = robot.getQ()
+    q = robot.q
 
     camera_output = camera_manager.getData()
     # print(camera_output)
 
     qd_cmd = np.zeros(robot.model.nv)
 
-    robot.sendQd(qd_cmd)
+    robot.sendVelocityCommand(qd_cmd)
 
     log_item["qs"] = q.reshape((robot.model.nq,))
-    log_item["dqs"] = robot.getQd().reshape((robot.model.nv,))
+    log_item["dqs"] = robot.v
     log_item["camera_output"] = np.array([camera_output["x"], camera_output["y"]])
     return breakFlag, save_past_dict, log_item
 
 
 if __name__ == "__main__":
     args = get_args()
-    robot = RobotManager(args)
+    robot = getRobotFromArgs(args)
 
     # cv2 camera device 0
     device = 0
@@ -68,12 +66,12 @@ if __name__ == "__main__":
     camera_manager.terminateProcess()
 
     # get expected behaviour here (library can't know what the end is - you have to do this here)
-    if not args.pinocchio_only:
+    if args.real:
         robot.stopRobot()
 
-    if args.visualize_manipulator:
+    if args.visualizer:
         robot.killManipulatorVisualizer()
 
     if args.save_log:
-        robot.log_manager.saveLog()
-        robot.log_manager.plotAllControlLoops()
+        robot._log_manager.saveLog()
+        robot._log_manager.plotAllControlLoops()
diff --git a/python/examples/cart_pulling/starify_nav2_map.py b/python/examples/cart_pulling/starify_nav2_map.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/python/examples/clik.py b/python/examples/clik.py
index b67853e4bb9d341e10f0838d5c33cfa28e507296..9383a64b82e57e556441321d9b6ccc0cf2c1ce32 100644
--- a/python/examples/clik.py
+++ b/python/examples/clik.py
@@ -1,17 +1,14 @@
 # PYTHON_ARGCOMPLETE_OK
-import numpy as np
-import pinocchio as pin
-import argcomplete, argparse
-from smc.managers import getMinimalArgParser, RobotManager
-from smc.clik.clik import (
-    getClikArgs,
-    getClikController,
-    controlLoopClik,
-    moveL,
-    compliantMoveL,
-    moveLDualArm,
-)
+from smc import getMinimalArgParser, getRobotFromArgs
+from smc.control.cartesian_space import getClikArgs
 from smc.util.define_random_goal import getRandomlyGeneratedGoal
+from smc.robots.utils import defineGoalPointCLI
+from smc.control.cartesian_space.cartesian_space_point_to_point import moveL
+
+import numpy as np
+
+# import pinocchio as pin
+import argparse
 
 
 def get_args():
@@ -25,36 +22,35 @@ def get_args():
         default=False,
         help="if true, rand generate a goal, if false you type it in via text input",
     )
-    argcomplete.autocomplete(parser)
     args = parser.parse_args()
     return args
 
 
 if __name__ == "__main__":
     args = get_args()
-    robot = RobotManager(args)
+    robot = getRobotFromArgs(args)
     if args.randomly_generate_goal:
         Mgoal = getRandomlyGeneratedGoal(args)
         if args.visualizer:
             robot.visualizer_manager.sendCommand({"Mgoal": Mgoal})
     else:
-        Mgoal = robot.defineGoalPointCLI()
+        Mgoal = defineGoalPointCLI(robot)
         Mgoal.rotation = np.eye(3)
     # compliantMoveL(args, robot, Mgoal)
     if robot.robot_name != "yumi":
         moveL(args, robot, Mgoal)
-    else:
-        goal_transform = pin.SE3.Identity()
-        goal_transform.translation[1] = 0.1
-        moveLDualArm(args, robot, Mgoal, goal_transform)
+    #    else:
+    #        goal_transform = pin.SE3.Identity()
+    #        goal_transform.translation[1] = 0.1
+    #        moveLDualArm(args, robot, Mgoal, goal_transform)
     robot.closeGripper()
     robot.openGripper()
-    if not args.pinocchio_only:
+    if args.real:
         robot.stopRobot()
 
     if args.visualizer:
         robot.killManipulatorVisualizer()
 
     if args.save_log:
-        robot.log_manager.saveLog()
+        robot._log_manager.saveLog()
     # loop_manager.stopHandler(None, None)
diff --git a/python/examples/point_impedance_control.py b/python/examples/point_impedance_control.py
index 641b4359ad076be65e71a04b0898cb185f1b6ab2..a21977556931b67cd410ca8ce19c9257d74ddc25 100644
--- a/python/examples/point_impedance_control.py
+++ b/python/examples/point_impedance_control.py
@@ -1,13 +1,8 @@
-# PYTHON_ARGCOMPLETE_OK
 import pinocchio as pin
 import numpy as np
 import copy
-import argparse, argcomplete
-import time
+import argparse
 from functools import partial
-from smc.visualize.visualize import plotFromDict
-from smc.util.draw_path import drawPath
-from smc.dmp.dmp import DMP, NoTC, TCVelAccConstrained
 from smc.clik.clik import (
     getClikArgs,
     getClikController,
diff --git a/python/smc/__init__.py b/python/smc/__init__.py
index d65ce69d29bf97e05e24deebb21056ec351711a8..d8adcf85076ac6c5d59cca3cf325fb96ef29bf8c 100644
--- a/python/smc/__init__.py
+++ b/python/smc/__init__.py
@@ -6,13 +6,14 @@ from smc import (
     visualization,
     vision,
     path_generation,
-    logging
+    logging,
 )
 
-from smc.robots.utils import getMinimalArgParser
+from smc.robots.utils import getMinimalArgParser, getRobotFromArgs
 
 __all__ = [
     "getMinimalArgParser",
+    "getRobotFromArgs",
     "robots",
     "control",
     "multiprocessing",
diff --git a/python/smc/control/cartesian_space/__init__.py b/python/smc/control/cartesian_space/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..374a25bf46e43fed5be82c41b0a5b0f2a0eadceb
--- /dev/null
+++ b/python/smc/control/cartesian_space/__init__.py
@@ -0,0 +1,7 @@
+from .cartesian_space_compliant_control import *
+from .cartesian_space_point_to_point import *
+from .cartesian_space_trajectory_following import *
+
+from .utils import getClikArgs
+
+# from .whole_body_clik import *
diff --git a/python/smc/control/cartesian_space/cartesian_space_compliant_control.py b/python/smc/control/cartesian_space/cartesian_space_compliant_control.py
new file mode 100644
index 0000000000000000000000000000000000000000..745d9edf16528d9e556dac333c1b3018054e634d
--- /dev/null
+++ b/python/smc/control/cartesian_space/cartesian_space_compliant_control.py
@@ -0,0 +1,106 @@
+from smc.control.control_loop_manager import ControlLoopManager
+from smc.robots.interfaces.force_torque_sensor_interface import (
+    ForceTorqueOnSingleArmWrist,
+)
+from smc.control.cartesian_space.ik_solvers import *
+from functools import partial
+import pinocchio as pin
+import numpy as np
+import copy
+from argparse import Namespace
+
+
+def controlLoopCompliantClik(
+    args, robot: ForceTorqueOnSingleArmWrist, ik_solver, T_w_goal: pin.SE3, i, past_data
+):
+    """
+    controlLoopCompliantClik
+    ---------------
+    generic control loop for clik (handling error to final point etc).
+    in some version of the universe this could be extended to a generic
+    point-to-point motion control loop.
+    """
+    breakFlag = False
+    log_item = {}
+    save_past_dict = {}
+    q = robot.q
+    T_w_e = robot.T_w_e
+    wrench = robot.wrench
+    # we need to overcome noise if we want to converge
+    if np.linalg.norm(wrench) < args.minimum_detectable_force_norm:
+        wrench = np.zeros(6)
+    save_past_dict["wrench"] = copy.deepcopy(wrench)
+    wrench = args.beta * wrench + (1 - args.beta) * np.average(
+        np.array(past_data["wrench"]), axis=0
+    )
+    if not args.z_only:
+        Z = np.diag(np.array([1.0, 1.0, 1.0, 10.0, 10.0, 10.0]))
+    else:
+        Z = np.diag(np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0]))
+    wrench = Z @ wrench
+    # first check whether we're at the goal
+    SEerror = T_w_e.actInv(T_w_goal)
+    err_vector = pin.log6(SEerror).vector
+    if np.linalg.norm(err_vector) < robot.args.goal_error:
+        breakFlag = True
+    J = pin.computeFrameJacobian(robot.model, robot.data, q, robot.ee_frame_id)
+    # compute the joint velocities.
+    # just plug and play different ones
+    v_cartesian_body_cmd = args.kp * err_vector + args.alpha * wrench
+    v_cmd = ik_solver(J, v_cartesian_body_cmd)
+    # qd = (
+    #    J.T
+    #    @ np.linalg.inv(J @ J.T + np.eye(J.shape[0], J.shape[0]) * args.tikhonov_damp)
+    #    @ err_vector
+    # )
+    # tau = J.T @ wrench
+    # tau = tau[:6].reshape((6,1))
+    # qd += args.alpha * tau
+    robot.sendVelocityCommand(v_cmd)
+
+    log_item["qs"] = q.reshape((robot.model.nq,))
+    # get actual velocity, not the commanded one.
+    # although that would be fun to compare i guess
+    # TODO: have both, but call the compute control signal like it should be
+    log_item["dqs"] = robot.v
+    log_item["wrench"] = robot.wrench.reshape((6,))
+    log_item["wrench_used"] = wrench.reshape((6,))
+    # log_item["tau"] = tau.reshape((robot.model.nv,))
+    # we're not saving here, but need to respect the API,
+    # hence the empty dict
+    return breakFlag, save_past_dict, log_item
+
+
+# add a threshold for the wrench
+def compliantMoveL(
+    args: Namespace, robot: ForceTorqueOnSingleArmWrist, T_w_goal: pin.SE3, run=True
+):
+    """
+    compliantMoveL
+    -----
+    does compliantMoveL - a moveL, but with compliance achieved
+    through f/t feedback
+    send a SE3 object as goal point.
+    if you don't care about rotation, make it np.zeros((3,3))
+    """
+    #    assert type(goal_point) == pin.pinocchio_pywrap.SE3
+    ik_solver = getIKSolver(args, robot)
+    controlLoop = partial(controlLoopCompliantClik, args, robot, ik_solver, T_w_goal)
+    # we're not using any past data or logging, hence the empty arguments
+    log_item = {
+        "qs": np.zeros(robot.model.nq),
+        "wrench": np.zeros(6),
+        "wrench_used": np.zeros(6),
+        #        "tau": np.zeros(robot.model.nv),
+        "dqs": np.zeros(robot.model.nv),
+    }
+    save_past_dict = {
+        "wrench": np.zeros(6),
+    }
+    loop_manager = ControlLoopManager(
+        robot, controlLoop, args, save_past_dict, log_item
+    )
+    if run:
+        loop_manager.run()
+    else:
+        return loop_manager
diff --git a/python/smc/control/cartesian_space/cartesian_space_point_to_point.py b/python/smc/control/cartesian_space/cartesian_space_point_to_point.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa84d9231c8567f25267d41c0104e079a828f7c2
--- /dev/null
+++ b/python/smc/control/cartesian_space/cartesian_space_point_to_point.py
@@ -0,0 +1,386 @@
+from smc.control.control_loop_manager import ControlLoopManager
+from smc.robots.interfaces.single_arm_interface import SingleArmInterface
+from smc.robots.interfaces.force_torque_sensor_interface import (
+    ForceTorqueOnSingleArmWrist,
+)
+from smc.control.cartesian_space.ik_solvers import *
+from functools import partial
+import pinocchio as pin
+import numpy as np
+from argparse import Namespace
+
+
+def controlLoopClik(robot: SingleArmInterface, ik_solver, T_w_goal: pin.SE3, _, __):
+    """
+    controlLoopClik
+    ---------------
+    generic control loop for clik (handling error to final point etc).
+    in some version of the universe this could be extended to a generic
+    point-to-point motion control loop.
+    """
+    breakFlag = False
+    log_item = {}
+    save_past_item = {}
+    q = robot.q
+    T_w_e = robot.T_w_e
+    # first check whether we're at the goal
+    SEerror = T_w_e.actInv(T_w_goal)
+    err_vector = pin.log6(SEerror).vector
+    if np.linalg.norm(err_vector) < robot.args.goal_error:
+        breakFlag = True
+    J = pin.computeFrameJacobian(robot.model, robot.data, q, robot.ee_frame_id)
+    # compute the joint velocities based on controller you passed
+    # qd = ik_solver(J, err_vector, past_qd=past_data['dqs_cmd'][-1])
+    qd = ik_solver(J, err_vector)
+    if qd is None:
+        print("the controller you chose didn't work, using dampedPseudoinverse instead")
+        qd = dampedPseudoinverse(1e-2, J, err_vector)
+    robot.sendVelocityCommand(qd)
+
+    log_item["qs"] = q.reshape((robot.model.nq,))
+    log_item["dqs"] = robot.v
+    log_item["dqs_cmd"] = qd.reshape((robot.model.nv,))
+    log_item["err_norm"] = np.linalg.norm(err_vector).reshape((1,))
+    # we're not saving here, but need to respect the API,
+    # hence the empty dict
+    save_past_item["dqs_cmd"] = qd.reshape((robot.model.nv,))
+    return breakFlag, save_past_item, log_item
+
+
+def moveL(args: Namespace, robot: SingleArmInterface, T_w_goal):
+    """
+    moveL
+    -----
+    does moveL.
+    send a SE3 object as goal point.
+    if you don't care about rotation, make it np.zeros((3,3))
+    """
+    # assert type(goal_point) == pin.pinocchio_pywrap.SE3
+    ik_solver = getIKSolver(args, robot)
+    controlLoop = partial(controlLoopClik, robot, ik_solver, T_w_goal)
+    # we're not using any past data or logging, hence the empty arguments
+    log_item = {
+        "qs": np.zeros(robot.model.nq),
+        "dqs": np.zeros(robot.model.nv),
+        "dqs_cmd": np.zeros(robot.model.nv),
+        "err_norm": np.zeros(1),
+    }
+    save_past_dict = {
+        "dqs_cmd": np.zeros(robot.model.nv),
+    }
+    loop_manager = ControlLoopManager(
+        robot, controlLoop, args, save_past_dict, log_item
+    )
+    loop_manager.run()
+
+
+# TODO: implement
+def moveLFollowingLine(args: Namespace, robot, goal_point):
+    """
+    moveLFollowingLine
+    ------------------
+    make a path from current to goal position, i.e.
+    just a straight line between them.
+    the question is what to do with orientations.
+    i suppose it makes sense to have one function that enforces/assumes
+    that the start and end positions have the same orientation.
+    then another version goes in a line and linearly updates the orientation
+    as it goes
+    """
+    pass
+
+
+def moveUntilContactControlLoop(
+    args: Namespace,
+    robot: ForceTorqueOnSingleArmWrist,
+    speed: np.ndarray,
+    ik_solver,
+    i,
+    past_data,
+):
+    """
+    moveUntilContactControlLoop
+    ---------------
+    generic control loop for clik (handling error to final point etc).
+    in some version of the universe this could be extended to a generic
+    point-to-point motion control loop.
+    """
+    breakFlag = False
+    # know where you are, i.e. do forward kinematics
+    log_item = {}
+    q = robot.q
+    # break if wrench is nonzero basically
+    # wrench = robot.getWrench()
+    # you're already giving the speed in the EE i.e. body frame
+    # so it only makes sense to have the wrench in the same frame
+    # wrench = robot._getWrenchInEE()
+    wrench = robot.wrench
+    # and furthermore it's a reasonable assumption that you'll hit the thing
+    # in the direction you're going in.
+    # thus we only care about wrenches in those direction coordinates
+    mask = speed != 0.0
+    # NOTE: contact getting force is a magic number
+    # it is a 100% empirical, with the goal being that it's just above noise.
+    # so far it's worked fine, and it's pretty soft too.
+    if np.linalg.norm(wrench[mask]) > args.contact_detecting_force:
+        print("hit with", np.linalg.norm(wrench[mask]))
+        breakFlag = True
+        robot.sendVelocityCommand(np.zeros(robot.model.nq))
+    if (args.pinocchio_only) and (i > 500):
+        print("let's say you hit something lule")
+        breakFlag = True
+    # pin.computeJointJacobian is much different than the C++ version lel
+    J = pin.computeFrameJacobian(robot.model, robot.data, q, robot.ee_frame_id)
+    # compute the joint velocities.
+    qd = ik_solver(J, speed)
+    robot.sendVelocityCommand(qd)
+    log_item["qs"] = q.reshape((robot.model.nq,))
+    log_item["wrench"] = wrench.reshape((6,))
+    return breakFlag, {}, log_item
+
+
+def moveUntilContact(
+    args: Namespace, robot: ForceTorqueOnSingleArmWrist, speed: np.ndarray
+):
+    """
+    moveUntilContact
+    -----
+    does clik until it feels something with the f/t sensor
+    """
+    assert type(speed) == np.ndarray
+    ik_solver = getIKSolver(args, robot)
+    controlLoop = partial(moveUntilContactControlLoop, args, robot, speed, ik_solver)
+    # we're not using any past data or logging, hence the empty arguments
+    log_item = {"wrench": np.zeros(6)}
+    log_item["qs"] = np.zeros((robot.model.nq,))
+    loop_manager = ControlLoopManager(robot, controlLoop, args, {}, log_item)
+    loop_manager.run()
+    print("Collision detected!!")
+
+
+# NOTE: this shit makes no sense, it's old and broken
+# but you will want to look at it for dual arm stuff so i'm leaving it here for the time being
+"""
+def controlLoopClikDualArmsOnly(
+    robot: AbstractRobotManager, ik_solver, goal_transform, i, past_data
+):
+
+    controlLoopClikDualArmsOnly
+    ---------------
+
+    breakFlag = False
+    log_item = {}
+    q = robot.q
+    T_w_e_left, T_w_e_right = robot.getT_w_e()
+    #
+    Mgoal_left = robot.Mgoal.act(goal_transform)
+    Mgoal_right = robot.Mgoal.act(goal_transform.inverse())
+
+    SEerror_left = T_w_e_left.actInv(Mgoal_left)
+    SEerror_right = T_w_e_right.actInv(Mgoal_right)
+
+    err_vector_left = pin.log6(SEerror_left).vector
+    err_vector_right = pin.log6(SEerror_right).vector
+
+    if (np.linalg.norm(err_vector_left) < robot.args.goal_error) and (
+        np.linalg.norm(err_vector_right) < robot.args.goal_error
+    ):
+        breakFlag = True
+    J_left = pin.computeFrameJacobian(robot.model, robot.data, q, robot.l_ee_frame_id)
+    J_left = J_left[:, 3:]
+    J_right = pin.computeFrameJacobian(robot.model, robot.data, q, robot.r_ee_frame_id)
+    J_right = J_right[:, 3:]
+
+    # compute the joint velocities based on controller you passed
+    qd_left = ik_solver(J_left, err_vector_left)
+    qd_right = ik_solver(J_right, err_vector_right)
+    # lb, ub = (-0.05 * robot.model.robot.model.velocityLimit, 0.05 * robot.model.robot.model.velocityLimit)
+    # qd_left = invKinmQP(J_left, err_vector_left, lb=lb[3:], ub=ub[3:])
+    # qd_right = invKinmQP(J_right, err_vector_right, lb=lb[3:], ub=ub[3:])
+    qd = qd_left + qd_right
+    qd = np.hstack((np.zeros(3), qd))
+    robot.sendVelocityCommand(qd)
+
+    log_item["qs"] = q.reshape((robot.model.nq,))
+    log_item["dqs"] = robot.getQd().reshape((robot.model.nv,))
+    log_item["dqs_cmd"] = qd.reshape((robot.model.nv,))
+    # we're not saving here, but need to respect the API,
+    # hence the empty dict
+    return breakFlag, {}, log_item
+"""
+
+
+# NOTE: this shit makes no sense, it's old and broken
+# but you will want to look at it for dual arm stuff so i'm leaving it here for the time being
+"""
+def moveLDualArmsOnly(
+    args, robot: AbstractRobotManager, goal_point, goal_transform, run=True
+):
+    ===
+    moveL
+    -----
+    does moveL.
+    send a SE3 object as goal point.
+    if you don't care about rotation, make it np.zeros((3,3))
+    ===
+    # assert type(goal_point) == pin.pinocchio_pywrap.SE3
+    ik_solver = getIKSolver(args, robot)
+    controlLoop = partial(controlLoopClikDualArmsOnly, robot, ik_solver, goal_transform)
+    # we're not using any past data or logging, hence the empty arguments
+    log_item = {
+        "qs": np.zeros(robot.model.nq),
+        "dqs": np.zeros(robot.model.nv),
+        "dqs_cmd": np.zeros(robot.model.nv),
+    }
+    loop_manager = ControlLoopManager(robot, controlLoop, args, {}, log_item)
+    if run:
+        loop_manager.run()
+    else:
+        return loop_manager
+"""
+
+# NOTE: this shit makes no sense, it's old and broken
+# but you will want to look at it for dual arm stuff so i'm leaving it here for the time being
+"""
+def controlLoopClikDualArm(
+    robot: DualArmInterface, ik_solver, goal_transform, i, past_data
+):
+#    controlLoopClikDualArm
+#    ---------------
+#    do point to point motion for each arm and its goal.
+#    that goal is generated from a single goal that you pass,
+#    and a transformation on the goal you also pass.
+#    the transformation is done in goal's frame (goal is and has
+#    to be an SE3 object).
+#    the transform is designed for the left arm,
+#    and its inverse is applied for the right arm.
+
+
+
+
+    breakFlag = False
+    log_item = {}
+    q = robot.q
+    T_w_e_left, T_w_e_right = robot.getT_w_e()
+    #
+    Mgoal_left = goal_transform.act(robot.Mgoal)
+    Mgoal_right = goal_transform.inverse().act(robot.Mgoal)
+    # print("robot.Mgoal", robot.Mgoal)
+    # print("Mgoal_left", Mgoal_left)
+    # print("Mgoal_right", Mgoal_right)
+
+    SEerror_left = T_w_e_left.actInv(Mgoal_left)
+    SEerror_right = T_w_e_right.actInv(Mgoal_right)
+
+    err_vector_left = pin.log6(SEerror_left).vector
+    err_vector_right = pin.log6(SEerror_right).vector
+
+    if (np.linalg.norm(err_vector_left) < robot.args.goal_error) and (
+        np.linalg.norm(err_vector_right) < robot.args.goal_error
+    ):
+        breakFlag = True
+    J_left = pin.computeFrameJacobian(robot.model, robot.data, q, robot.l_ee_frame_id)
+    J_right = pin.computeFrameJacobian(robot.model, robot.data, q, robot.r_ee_frame_id)
+    # compute the joint velocities based on controller you passed
+    # this works exactly the way you think it does:
+    # the velocities for the other arm are 0
+    # what happens to the base hasn't been investigated but it seems ok
+    qd_left = ik_solver(J_left, err_vector_left)
+    qd_right = ik_solver(J_right, err_vector_right)
+    qd = qd_left + qd_right
+    robot.sendVelocityCommand(qd)
+
+    log_item["qs"] = q.reshape((robot.model.nq,))
+    log_item["dqs"] = robot.getQd().reshape((robot.model.nv,))
+    log_item["dqs_cmd"] = qd.reshape((robot.model.nv,))
+    # we're not saving here, but need to respect the API,
+    # hence the empty dict
+    return breakFlag, {}, log_item
+"""
+
+
+# NOTE: this shit makes no sense, it's old and broken
+# but you will want to look at it for dual arm stuff so i'm leaving it here for the time being
+"""
+def clikDualArm(args, robot, goal, goal_transform, run=True):
+#   
+#   clikDualArm
+#   -----------
+#   
+   robot.Mgoal = copy.deepcopy(goal)
+   ik_solver = getClikController(args, robot)
+   controlLoop = partial(controlLoopClikDualArm, robot, ik_solver, goal_transform)
+   # we're not using any past data or logging, hence the empty arguments
+   log_item = {
+           'qs' : np.zeros(robot.model.nq),
+           'dqs' : np.zeros(robot.model.nv),
+           'dqs_cmd' : np.zeros(robot.model.nv),
+       }
+   save_past_dict = {}
+   loop_manager = ControlLoopManager(robot, controlLoop, args, save_past_dict, log_item)
+   if run:
+       loop_manager.run()
+   else:
+       return loop_manager
+"""
+
+
+# NOTE: this shit makes no sense, it's old and broken
+# but you will want to look at it for dual arm stuff so i'm leaving it here for the time being
+"""
+def controlLoopClikArmOnly(robot, ik_solver, _, _):
+    breakFlag = False
+    log_item = {}
+    q = robot.q
+    T_w_e = robot.getT_w_e()
+    # first check whether we're at the goal
+    SEerror = T_w_e.actInv(robot.Mgoal)
+    err_vector = pin.log6(SEerror).vector
+    if np.linalg.norm(err_vector) < robot.args.goal_error:
+        breakFlag = True
+    J = pin.computeFrameJacobian(robot.model, robot.data, q, robot.ee_frame_id)
+    # cut off base from jacobian
+    J = J[:, 3:]
+    # compute the joint velocities based on controller you passed
+    qd = ik_solver(J, err_vector)
+    # add the missing velocities back
+    qd = np.hstack((np.zeros(3), qd))
+    robot.sendVelocityCommand(qd)
+
+    log_item["qs"] = q.reshape((robot.model.nq,))
+    log_item["dqs"] = robot.getQd().reshape((robot.model.nv,))
+    log_item["dqs_cmd"] = qd.reshape((robot.model.nv,))
+    # we're not saving here, but need to respect the API,
+    # hence the empty dict
+    return breakFlag, {}, log_item
+"""
+
+# NOTE: this shit makes no sense, it's old and broken
+# but you will want to look at it for dual arm stuff so i'm leaving it here for the time being
+"""
+def moveLDualArm(
+    args, robot: AbstractRobotManager, goal_point, goal_transform, run=True
+):
+
+    moveL
+    -----
+    does moveL.
+    send a SE3 object as goal point.
+    if you don't care about rotation, make it np.zeros((3,3))
+
+    # assert type(goal_point) == pin.pinocchio_pywrap.SE3
+    robot.Mgoal = copy.deepcopy(goal_point)
+    ik_solver = getIKSolver(args, robot)
+    controlLoop = partial(controlLoopClikDualArm, robot, ik_solver, goal_transform)
+    # we're not using any past data or logging, hence the empty arguments
+    log_item = {
+        "qs": np.zeros(robot.model.nq),
+        "dqs": np.zeros(robot.model.nv),
+        "dqs_cmd": np.zeros(robot.model.nv),
+    }
+    loop_manager = ControlLoopManager(robot, controlLoop, args, {}, log_item)
+    if run:
+        loop_manager.run()
+    else:
+        return loop_manager
+"""
diff --git a/python/smc/control/cartesian_space/cartesian_space_trajectory_following.py b/python/smc/control/cartesian_space/cartesian_space_trajectory_following.py
new file mode 100644
index 0000000000000000000000000000000000000000..3626c7c86679a74a6bf42ee7dac5488fafaa78b3
--- /dev/null
+++ b/python/smc/control/cartesian_space/cartesian_space_trajectory_following.py
@@ -0,0 +1,91 @@
+from smc.control.control_loop_manager import ControlLoopManager
+from smc.robots.robotmanager_abstract import AbstractRobotManager
+from smc.multiprocessing.process_manager import ProcessManager
+from functools import partial
+import pinocchio as pin
+import numpy as np
+
+from importlib.util import find_spec
+
+if find_spec("shapely"):
+    from smc.path_generation.planner import (
+        path2D_to_timed_SE3,
+        pathPointFromPathParam,
+    )
+
+
+def cartesianPathFollowingWithPlannerControlLoop(
+    args, robot: AbstractRobotManager, path_planner: ProcessManager, i, past_data
+):
+    """
+    cartesianPathFollowingWithPlanner
+    -----------------------------
+    end-effector(s) follow their path(s) according to what a 2D path-planner spits out
+    """
+    breakFlag = False
+    log_item = {}
+    save_past_dict = {}
+
+    q = robot.q
+    T_w_e = robot.T_w_e()
+    p = T_w_e.translation[:2]
+    path_planner.sendFreshestCommand({"p": p})
+
+    # NOTE: it's pointless to recalculate the path every time
+    # if it's the same 2D path
+    data = path_planner.getData()
+    if data == None:
+        if args.debug_prints:
+            print("got no path so no doing anything")
+        robot.sendVelocityCommand(np.zeros(robot.model.nv))
+        log_item["qs"] = q.reshape((robot.model.nq,))
+        log_item["dqs"] = robot.getQd().reshape((robot.model.nv,))
+        return breakFlag, save_past_dict, log_item
+    if data == "done":
+        breakFlag = True
+
+    path_pol, path2D_untimed = data
+    path2D_untimed = np.array(path2D_untimed).reshape((-1, 2))
+    # should be precomputed somewhere but this is nowhere near the biggest problem
+    max_base_v = np.linalg.norm(robot.model.robot.model.velocityLimit[:2])
+
+    # 1) make 6D path out of [[x0,y0],...]
+    # that represents the center of the cart
+    pathSE3 = path2D_to_timed_SE3(args, path_pol, path2D_untimed, max_base_v)
+    # print(path2D_untimed)
+    # for pp in pathSE3:
+    #    print(pp.translation)
+    # TODO: EVIL AND HAS TO BE REMOVED FROM HERE
+    if args.visualizer:
+        robot.visualizer_manager.sendCommand({"path": pathSE3})
+
+    J = pin.computeFrameJacobian(robot.model, robot.data, q, robot.ee_frame_id)
+    # NOTE: obviously not path following but i want to see things working here
+    SEerror = T_w_e.actInv(pathSE3[-1])
+    err_vector = pin.log6(SEerror).vector
+    lb = -1 * robot.model.robot.model.velocityLimit
+    lb[1] = -0.001
+    ub = robot.model.robot.model.velocityLimit
+    ub[1] = 0.001
+    # vel_cmd = invKinmQP(J, err_vector, lb=lb, ub=ub)
+    vel_cmd = dampedPseudoinverse(0.002, J, err_vector)
+    robot.sendVelocityCommand(vel_cmd)
+
+    log_item["qs"] = q.reshape((robot.model.nq,))
+    log_item["dqs"] = robot.getQd().reshape((robot.model.nv,))
+    # time.sleep(0.01)
+    return breakFlag, save_past_dict, log_item
+
+
+def cartesianPathFollowingWithPlanner(
+    args, robot: AbstractRobotManager, path_planner: ProcessManager
+):
+    controlLoop = partial(
+        cartesianPathFollowingWithPlannerControlLoop, args, robot, path_planner
+    )
+    log_item = {"qs": np.zeros(robot.model.nq), "dqs": np.zeros(robot.model.nv)}
+    save_past_dict = {}
+    loop_manager = ControlLoopManager(
+        robot, controlLoop, args, save_past_dict, log_item
+    )
+    loop_manager.run()
diff --git a/python/smc/control/clik/traiettoria.py b/python/smc/control/cartesian_space/experimental/traiettoria.py
similarity index 100%
rename from python/smc/control/clik/traiettoria.py
rename to python/smc/control/cartesian_space/experimental/traiettoria.py
diff --git a/python/smc/control/clik/whole_body_clik.py b/python/smc/control/cartesian_space/experimental/whole_body_clik.py
similarity index 100%
rename from python/smc/control/clik/whole_body_clik.py
rename to python/smc/control/cartesian_space/experimental/whole_body_clik.py
diff --git a/python/smc/control/cartesian_space/ik_solvers.py b/python/smc/control/cartesian_space/ik_solvers.py
new file mode 100644
index 0000000000000000000000000000000000000000..f517d9b30a99bbc24356c76bf3986841b4d3a794
--- /dev/null
+++ b/python/smc/control/cartesian_space/ik_solvers.py
@@ -0,0 +1,240 @@
+import numpy as np
+from qpsolvers import solve_qp
+import proxsuite
+from functools import partial
+
+
+def getIKSolver(args, robot):
+    """
+    getIKSolver
+    -----------------
+    A string argument is used to select one of these.
+    It's a bit ugly, bit totally functional and OK solution.
+    we want all of theme to accept the same arguments, i.e. the jacobian and the error vector.
+    if they have extra stuff, just map it in the beginning with partial
+    NOTE: this could be changed to something else if it proves inappropriate later
+    TODO: write out other algorithms
+    """
+    if args.ik_solver == "dampedPseudoinverse":
+        return partial(dampedPseudoinverse, args.tikhonov_damp)
+    if args.ik_solver == "jacobianTranspose":
+        return jacobianTranspose
+    # TODO implement and add in the rest
+    # if controller_name == "invKinmQPSingAvoidE_kI":
+    #    return invKinmQPSingAvoidE_kI
+    # if controller_name == "invKinm_PseudoInv":
+    #    return invKinm_PseudoInv
+    # if controller_name == "invKinm_PseudoInv_half":
+    #    return invKinm_PseudoInv_half
+    if args.ik_solver == "invKinmQP":
+        lb = -1 * np.array(robot.model.velocityLimit, dtype="double")
+        # we do additional clipping
+        lb = np.clip(lb, -1 * robot.max_qd, robot.max_qd)
+        ub = np.array(robot.model.velocityLimit, dtype="double")
+        ub = np.clip(ub, -1 * robot.max_qd, robot.max_qd)
+        return partial(invKinmQP, lb=lb, ub=ub)
+    if args.ik_solver == "QPproxsuite":
+        H = np.eye(robot.model.nv)
+        g = np.zeros(robot.model.nv)
+        G = np.eye(robot.model.nv)
+        A = np.eye(6, robot.model.nv)
+        b = np.ones(6) * 0.1
+        # proxsuite does lb <= Cx <= ub
+        C = np.eye(robot.model.nv)
+        lb = -1 * np.array(robot.model.velocityLimit, dtype="double")
+        # we do additional clipping
+        lb = np.clip(lb, -1 * robot.max_qd, robot.max_qd)
+        ub = np.array(robot.model.velocityLimit, dtype="double")
+        ub = np.clip(ub, -1 * robot.max_qd, robot.max_qd)
+        qp = proxsuite.proxqp.dense.QP(robot.model.nv, 6, robot.model.nv)
+        qp.init(H, g, A, b, G, lb, ub)
+        qp.solve()
+        return partial(QPproxsuite, qp, lb, ub)
+
+    # if controller_name == "invKinmQPSingAvoidE_kI":
+    #    return invKinmQPSingAvoidE_kI
+    # if controller_name == "invKinmQPSingAvoidE_kM":
+    #    return invKinmQPSingAvoidE_kM
+    # if controller_name == "invKinmQPSingAvoidManipMax":
+    #    return invKinmQPSingAvoidManipMax
+
+    # default
+    return partial(dampedPseudoinverse, args.tikhonov_damp)
+
+
+def dampedPseudoinverse(tikhonov_damp, J, err_vector):
+    qd = (
+        J.T
+        @ np.linalg.inv(J @ J.T + np.eye(J.shape[0], J.shape[0]) * tikhonov_damp)
+        @ err_vector
+    )
+    return qd
+
+
+def jacobianTranspose(J, err_vector):
+    qd = J.T @ err_vector
+    return qd
+
+
+# TODO: put something into q of the QP
+# also, put in lb and ub
+# this one is with qpsolvers
+def invKinmQP(J, err_vector, lb=None, ub=None, past_qd=None):
+    """
+    invKinmQP
+    ---------
+    generic QP:
+        minimize 1/2 x^T P x + q^T x
+        subject to
+                 G x \leq h
+                 A x = b
+                 lb <= x <= ub
+    inverse kinematics QP:
+        minimize 1/2 qd^T P qd
+                    + q^T qd (optional secondary objective)
+        subject to
+                 G qd \leq h (optional)
+                 J qd = b    (mandatory)
+                 lb <= qd <= ub (optional)
+    """
+    P = np.eye(J.shape[1], dtype="double")
+    # secondary objective is given via q
+    # we set it to 0 here, but we should give a sane default here
+    q = np.array([0] * J.shape[1], dtype="double")
+    G = None
+    b = err_vector
+    A = J
+    # TODO: you probably want limits here
+    # lb = None
+    # ub = None
+    # lb *= 20
+    # ub *= 20
+    h = None
+    # (n_vars, n_eq_constraints, n_ineq_constraints)
+    # qp.init(H, g, A, b, C, l, u)
+    # print(J.shape)
+    # print(q.shape)
+    # print(A.shape)
+    # print(b.shape)
+    # NOTE: you want to pass the previous solver, not recreate it every time
+    ######################
+    # solve it
+    # qd = solve_qp(P, q, G, h, A, b, lb, ub, solver="ecos")
+    # qd = solve_qp(P, q, G, h, A, b, lb, ub, solver="quadprog", verbose=True, initvals=np.ones(len(lb)))
+    # if not (past_qd is None):
+    #    qd = solve_qp(P, q, G, h, A, b, lb, ub, solver="proxqp", verbose=False, initvals=past_qd)
+    # else:
+    #    qd = solve_qp(P, q, G, h, A, b, lb, ub, solver="proxqp", verbose=False, initvals=J.T@err_vector)
+    # qd = solve_qp(P, q, G, h, A, b, lb, ub, solver="proxqp", verbose=True, initvals=0.01*J.T@err_vector)
+    # qd = solve_qp(P, q, G, h, A, b, lb, ub, solver="quadprog", verbose=False, initvals=0.01*J.T@err_vector)
+    qd = solve_qp(P, q, G, h, A, b, lb, ub, solver="quadprog", verbose=False)
+    # qd = solve_qp(P, q, G, h, A, b, lb, ub, solver="proxqp")
+    return qd
+
+
+def QPproxsuite(qp, lb, ub, J, err_vector):
+    # proxsuite does lb <= Cx <= ub
+    qp.settings.initial_guess = (
+        proxsuite.proxqp.InitialGuess.WARM_START_WITH_PREVIOUS_RESULT
+    )
+    # qp.update(g=q, A=A, b=h, l=lb, u=ub)
+    qp.update(A=J, b=err_vector)
+    qp.solve()
+    qd = qp.results.x
+
+    if qp.results.info.status == proxsuite.proxqp.PROXQP_PRIMAL_INFEASIBLE:
+        # if np.abs(qp.results.info.duality_gap) > 0.1:
+        print("didn't solve shit")
+        qd = None
+    return qd
+
+
+# TODO: calculate nice q (in QP) as the secondary objective
+# this requires getting the forward kinematics hessian,
+# a.k.a jacobian derivative w.r.t. joint positions  dJ/dq .
+# the ways to do it are as follows:
+#   1) shitty and sad way to do it by computing dJ/dq \cdot \dot{q}
+#      with unit velocities (qd) and then stacking that (very easy tho)
+#   2) there is a function in c++ pinocchio for it getKinematicsHessian and you could write the pybind
+#   3) you can write it yourself following peter corke's quide (he has a tutorial on fwd kinm derivatives)
+#   4) figure out what pin.computeForwardKinematicDerivatives and pin.getJointAccelerationDerivatives
+#      actually do and use that
+# HA! found it in a winter school
+# use this
+# and test it with finite differencing!
+"""
+class CostManipulability:
+    def __init__(self,jointIndex=None,frameIndex=None):
+        if frameIndex is not None:
+            jointIndex = robot.model.frames[frameIndex].parent
+        self.jointIndex = jointIndex if jointIndex is not None else robot.model.njoints-1
+    def calc(self,q):
+        J = self.J=pin.computeJointJacobian(robot.model,robot.data,q,self.jointIndex)
+        return np.sqrt(det(J@J.T))
+    def calcDiff(self,q):
+        Jp = pinv(pin.computeJointJacobian(robot.model,robot.data,q,self.jointIndex))
+        res = np.zeros(robot.model.nv)
+        v0 = np.zeros(robot.model.nv)
+        for k in range(6):
+            pin.computeForwardKinematicsDerivatives(robot.model,robot.data,q,Jp[:,k],v0)
+            JqJpk = pin.getJointVelocityDerivatives(robot.model,robot.data,self.jointIndex,pin.LOCAL)[0]
+            res += JqJpk[k,:]
+        res *= self.calc(q)
+        return res
+
+# use this as a starting point for finite differencing
+def numdiff(func, x, eps=1e-6):
+    f0 = copy.copy(func(x))
+    xe = x.copy()
+    fs = []
+    for k in range(len(x)):
+        xe[k] += eps
+        fs.append((func(xe) - f0) / eps)
+        xe[k] -= eps
+    if isinstance(f0, np.ndarray) and len(f0) > 1:
+        return np.stack(fs,axis=1)
+    else:
+        return np.matrix(fs)
+
+# and here's example usage
+# Tdiffq is used to compute the tangent application in the configuration space.
+Tdiffq = lambda f,q: Tdiff1(f,lambda q,v:pin.integrate(robot.model,q,v),robot.model.nv,q)
+c=costManipulability
+Tg = costManipulability.calcDiff(q)
+Tgn = Tdiffq(costManipulability.calc,q)
+#assert( norm(Tg-Tgn)<1e-4)
+"""
+
+
+def QPManipMax(J, err_vector, lb=None, ub=None):
+    """
+    invKinmQP
+    ---------
+    generic QP:
+        minimize 1/2 x^T P x + q^T x
+        subject to
+                 G x \\leq h
+                 A x = b
+                 lb <= x <= ub
+    inverse kinematics QP:
+        minimize 1/2 qd^T P qd
+                    + q^T qd (optional secondary objective)
+        subject to
+                 G qd \\leq h (optional)
+                 J qd = b    (mandatory)
+                 lb <= qd <= ub (optional)
+    """
+    P = np.eye(J.shape[1], dtype="double")
+    # secondary objective is given via q
+    # we set it to 0 here, but we should give a sane default here
+    q = np.array([0] * J.shape[1], dtype="double")
+    G = None
+    b = err_vector
+    A = J
+    # TODO: you probably want limits here
+    # lb = None
+    # ub = None
+    h = None
+    # qd = solve_qp(P, q, G, h, A, b, lb, ub, solver="ecos")
+    qd = solve_qp(P, q, G, h, A, b, lb, ub, solver="quadprog")
+    return qd
diff --git a/python/smc/control/cartesian_space/utils.py b/python/smc/control/cartesian_space/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..f684489497e1976d20493acc42a94d7e8aa91de6
--- /dev/null
+++ b/python/smc/control/cartesian_space/utils.py
@@ -0,0 +1,87 @@
+import argparse
+
+
+def getClikArgs(parser: argparse.ArgumentParser):
+    """
+    getClikArgs
+    ------------
+    Every clik-related magic number, flag and setting is put in here.
+    This way the rest of the code is clean.
+    Force filtering is considered a core part of these control loops
+    because it's not necessarily the same as elsewhere.
+    If you want to know what these various arguments do, just grep
+    though the code to find them (but replace '-' with '_' in multi-word arguments).
+    All the sane defaults are here, and you can change any number of them as an argument
+    when running your program. If you want to change a drastic amount of them, and
+    not have to run a super long commands, just copy-paste this function and put your
+    own parameters as default ones.
+    """
+    # parser = argparse.ArgumentParser(description='Run closed loop inverse kinematics \
+    #        of various kinds. Make sure you know what the goal is before you run!',
+    #        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+
+    ####################
+    #  clik arguments  #
+    ####################
+    parser.add_argument(
+        "--goal-error",
+        type=float,
+        help="the final position error you are happy with",
+        default=1e-2,
+    )
+    parser.add_argument(
+        "--tikhonov-damp",
+        type=float,
+        help="damping scalar in tikhonov regularization",
+        default=1e-3,
+    )
+    # TODO add the rest
+    parser.add_argument(
+        "--ik-solver",
+        type=str,
+        help="select which click algorithm you want",
+        default="dampedPseudoinverse",
+        choices=[
+            "dampedPseudoinverse",
+            "jacobianTranspose",
+            "invKinmQP",
+            "QPproxsuite",
+        ],
+    )
+
+    ###########################################
+    #  force sensing and feedback parameters  #
+    ###########################################
+    parser.add_argument(
+        "--alpha",
+        type=float,
+        help="force feedback proportional coefficient",
+        default=0.01,
+    )
+    parser.add_argument(
+        "--beta", type=float, help="low-pass filter beta parameter", default=0.01
+    )
+
+    ###############################
+    #  path following parameters  #
+    ###############################
+    parser.add_argument(
+        "--max-init-clik-iterations",
+        type=int,
+        help="number of max clik iterations to get to the first point",
+        default=10000,
+    )
+    parser.add_argument(
+        "--max-running-clik-iterations",
+        type=int,
+        help="number of max clik iterations between path points",
+        default=1000,
+    )
+    parser.add_argument(
+        "--viz-test-path",
+        action=argparse.BooleanOptionalAction,
+        help="number of max clik iterations between path points",
+        default=False,
+    )
+
+    return parser
diff --git a/python/smc/control/clik/__init__.py b/python/smc/control/clik/__init__.py
deleted file mode 100644
index 05eae0dbb013f9dba70f1d1c9b8142b24bd900d9..0000000000000000000000000000000000000000
--- a/python/smc/control/clik/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from .clik import *
-#from .whole_body_clik import *
diff --git a/python/smc/control/clik/clik.py b/python/smc/control/clik/clik.py
deleted file mode 100644
index 4a7141d8d6a2ad21378db68f5ac340feb4f6826f..0000000000000000000000000000000000000000
--- a/python/smc/control/clik/clik.py
+++ /dev/null
@@ -1,957 +0,0 @@
-from smc.control.control_loop_manager import ControlLoopManager
-from smc.robots.robotmanager_abstract import AbstractRobotManager
-from smc.multiprocessing.process_manager import ProcessManager
-
-import pinocchio as pin
-import numpy as np
-import copy
-import argparse
-from functools import partial
-from qpsolvers import solve_qp
-import argparse
-import importlib
-import proxsuite
-
-if importlib.util.find_spec("shapely"):
-    from smc.path_generation.planner import (
-        path2D_to_timed_SE3,
-        pathPointFromPathParam,
-    )
-
-
-def getClikArgs(parser):
-    """
-    getClikArgs
-    ------------
-    Every clik-related magic number, flag and setting is put in here.
-    This way the rest of the code is clean.
-    Force filtering is considered a core part of these control loops
-    because it's not necessarily the same as elsewhere.
-    If you want to know what these various arguments do, just grep
-    though the code to find them (but replace '-' with '_' in multi-word arguments).
-    All the sane defaults are here, and you can change any number of them as an argument
-    when running your program. If you want to change a drastic amount of them, and
-    not have to run a super long commands, just copy-paste this function and put your
-    own parameters as default ones.
-    """
-    # parser = argparse.ArgumentParser(description='Run closed loop inverse kinematics \
-    #        of various kinds. Make sure you know what the goal is before you run!',
-    #        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
-
-    ####################
-    #  clik arguments  #
-    ####################
-    parser.add_argument(
-        "--goal-error",
-        type=float,
-        help="the final position error you are happy with",
-        default=1e-2,
-    )
-    parser.add_argument(
-        "--tikhonov-damp",
-        type=float,
-        help="damping scalar in tikhonov regularization",
-        default=1e-3,
-    )
-    # TODO add the rest
-    parser.add_argument(
-        "--clik-controller",
-        type=str,
-        help="select which click algorithm you want",
-        default="dampedPseudoinverse",
-        choices=[
-            "dampedPseudoinverse",
-            "jacobianTranspose",
-            "invKinmQP",
-            "QPproxsuite",
-        ],
-    )
-
-    ###########################################
-    #  force sensing and feedback parameters  #
-    ###########################################
-    parser.add_argument(
-        "--alpha",
-        type=float,
-        help="force feedback proportional coefficient",
-        default=0.01,
-    )
-    parser.add_argument(
-        "--beta", type=float, help="low-pass filter beta parameter", default=0.01
-    )
-
-    ###############################
-    #  path following parameters  #
-    ###############################
-    parser.add_argument(
-        "--max-init-clik-iterations",
-        type=int,
-        help="number of max clik iterations to get to the first point",
-        default=10000,
-    )
-    parser.add_argument(
-        "--max-running-clik-iterations",
-        type=int,
-        help="number of max clik iterations between path points",
-        default=1000,
-    )
-    parser.add_argument(
-        "--viz-test-path",
-        action=argparse.BooleanOptionalAction,
-        help="number of max clik iterations between path points",
-        default=False,
-    )
-
-    return parser
-
-
-#######################################################################
-#                             controllers                             #
-#######################################################################
-"""
-controllers general
------------------------
-really trully just the equation you are running.
-ideally, everything else is a placeholder,
-meaning you can swap these at will.
-if a controller has some additional arguments,
-those are put in via functools.partial in getClikController,
-so that you don't have to worry about how your controller is handled in
-the actual control loop after you've put it in getClikController,
-which constructs the controller from an argument to the file.
-"""
-
-
-def dampedPseudoinverse(tikhonov_damp, J, err_vector):
-    qd = (
-        J.T
-        @ np.linalg.inv(J @ J.T + np.eye(J.shape[0], J.shape[0]) * tikhonov_damp)
-        @ err_vector
-    )
-    return qd
-
-
-def jacobianTranspose(J, err_vector):
-    qd = J.T @ err_vector
-    return qd
-
-
-# TODO: put something into q of the QP
-# also, put in lb and ub
-# this one is with qpsolvers
-def invKinmQP(J, err_vector, lb=None, ub=None, past_qd=None):
-    """
-    invKinmQP
-    ---------
-    generic QP:
-        minimize 1/2 x^T P x + q^T x
-        subject to
-                 G x \leq h
-                 A x = b
-                 lb <= x <= ub
-    inverse kinematics QP:
-        minimize 1/2 qd^T P qd
-                    + q^T qd (optional secondary objective)
-        subject to
-                 G qd \leq h (optional)
-                 J qd = b    (mandatory)
-                 lb <= qd <= ub (optional)
-    """
-    P = np.eye(J.shape[1], dtype="double")
-    # secondary objective is given via q
-    # we set it to 0 here, but we should give a sane default here
-    q = np.array([0] * J.shape[1], dtype="double")
-    G = None
-    b = err_vector
-    A = J
-    # TODO: you probably want limits here
-    # lb = None
-    # ub = None
-    # lb *= 20
-    # ub *= 20
-    h = None
-    # (n_vars, n_eq_constraints, n_ineq_constraints)
-    # qp.init(H, g, A, b, C, l, u)
-    # print(J.shape)
-    # print(q.shape)
-    # print(A.shape)
-    # print(b.shape)
-    # NOTE: you want to pass the previous solver, not recreate it every time
-    ######################
-    # solve it
-    # qd = solve_qp(P, q, G, h, A, b, lb, ub, solver="ecos")
-    # qd = solve_qp(P, q, G, h, A, b, lb, ub, solver="quadprog", verbose=True, initvals=np.ones(len(lb)))
-    # if not (past_qd is None):
-    #    qd = solve_qp(P, q, G, h, A, b, lb, ub, solver="proxqp", verbose=False, initvals=past_qd)
-    # else:
-    #    qd = solve_qp(P, q, G, h, A, b, lb, ub, solver="proxqp", verbose=False, initvals=J.T@err_vector)
-    # qd = solve_qp(P, q, G, h, A, b, lb, ub, solver="proxqp", verbose=True, initvals=0.01*J.T@err_vector)
-    # qd = solve_qp(P, q, G, h, A, b, lb, ub, solver="quadprog", verbose=False, initvals=0.01*J.T@err_vector)
-    qd = solve_qp(P, q, G, h, A, b, lb, ub, solver="quadprog", verbose=False)
-    # qd = solve_qp(P, q, G, h, A, b, lb, ub, solver="proxqp")
-    return qd
-
-
-def QPproxsuite(qp, lb, ub, J, err_vector):
-    # proxsuite does lb <= Cx <= ub
-    qp.settings.initial_guess = (
-        proxsuite.proxqp.InitialGuess.WARM_START_WITH_PREVIOUS_RESULT
-    )
-    # qp.update(g=q, A=A, b=h, l=lb, u=ub)
-    qp.update(A=J, b=err_vector)
-    qp.solve()
-    qd = qp.results.x
-
-    if qp.results.info.status == proxsuite.proxqp.PROXQP_PRIMAL_INFEASIBLE:
-        # if np.abs(qp.results.info.duality_gap) > 0.1:
-        print("didn't solve shit")
-        qd = None
-    return qd
-
-
-# TODO: calculate nice q (in QP) as the secondary objective
-# this requires getting the forward kinematics hessian,
-# a.k.a jacobian derivative w.r.t. joint positions  dJ/dq .
-# the ways to do it are as follows:
-#   1) shitty and sad way to do it by computing dJ/dq \cdot \dot{q}
-#      with unit velocities (qd) and then stacking that (very easy tho)
-#   2) there is a function in c++ pinocchio for it getKinematicsHessian and you could write the pybind
-#   3) you can write it yourself following peter corke's quide (he has a tutorial on fwd kinm derivatives)
-#   4) figure out what pin.computeForwardKinematicDerivatives and pin.getJointAccelerationDerivatives
-#      actually do and use that
-# HA! found it in a winter school
-# use this
-# and test it with finite differencing!
-"""
-class CostManipulability:
-    def __init__(self,jointIndex=None,frameIndex=None):
-        if frameIndex is not None:
-            jointIndex = robot.model.frames[frameIndex].parent
-        self.jointIndex = jointIndex if jointIndex is not None else robot.model.njoints-1
-    def calc(self,q):
-        J = self.J=pin.computeJointJacobian(robot.model,robot.data,q,self.jointIndex)
-        return np.sqrt(det(J@J.T))
-    def calcDiff(self,q):
-        Jp = pinv(pin.computeJointJacobian(robot.model,robot.data,q,self.jointIndex))
-        res = np.zeros(robot.model.nv)
-        v0 = np.zeros(robot.model.nv)
-        for k in range(6):
-            pin.computeForwardKinematicsDerivatives(robot.model,robot.data,q,Jp[:,k],v0)
-            JqJpk = pin.getJointVelocityDerivatives(robot.model,robot.data,self.jointIndex,pin.LOCAL)[0]
-            res += JqJpk[k,:]
-        res *= self.calc(q)
-        return res
-
-# use this as a starting point for finite differencing
-def numdiff(func, x, eps=1e-6):
-    f0 = copy.copy(func(x))
-    xe = x.copy()
-    fs = []
-    for k in range(len(x)):
-        xe[k] += eps
-        fs.append((func(xe) - f0) / eps)
-        xe[k] -= eps
-    if isinstance(f0, np.ndarray) and len(f0) > 1:
-        return np.stack(fs,axis=1)
-    else:
-        return np.matrix(fs)
-
-# and here's example usage
-# Tdiffq is used to compute the tangent application in the configuration space.
-Tdiffq = lambda f,q: Tdiff1(f,lambda q,v:pin.integrate(robot.model,q,v),robot.model.nv,q)
-c=costManipulability
-Tg = costManipulability.calcDiff(q)
-Tgn = Tdiffq(costManipulability.calc,q)
-#assert( norm(Tg-Tgn)<1e-4)
-"""
-
-
-def QPManipMax(J, err_vector, lb=None, ub=None):
-    """
-    invKinmQP
-    ---------
-    generic QP:
-        minimize 1/2 x^T P x + q^T x
-        subject to
-                 G x \leq h
-                 A x = b
-                 lb <= x <= ub
-    inverse kinematics QP:
-        minimize 1/2 qd^T P qd
-                    + q^T qd (optional secondary objective)
-        subject to
-                 G qd \leq h (optional)
-                 J qd = b    (mandatory)
-                 lb <= qd <= ub (optional)
-    """
-    P = np.eye(J.shape[1], dtype="double")
-    # secondary objective is given via q
-    # we set it to 0 here, but we should give a sane default here
-    q = np.array([0] * J.shape[1], dtype="double")
-    G = None
-    b = err_vector
-    A = J
-    # TODO: you probably want limits here
-    # lb = None
-    # ub = None
-    h = None
-    # qd = solve_qp(P, q, G, h, A, b, lb, ub, solver="ecos")
-    qd = solve_qp(P, q, G, h, A, b, lb, ub, solver="quadprog")
-    return qd
-
-
-def getClikController(args, robot):
-    """
-    getClikController
-    -----------------
-    A string argument is used to select one of these.
-    It's a bit ugly, bit totally functional and OK solution.
-    we want all of theme to accept the same arguments, i.e. the jacobian and the error vector.
-    if they have extra stuff, just map it in the beginning with partial
-    NOTE: this could be changed to something else if it proves inappropriate later
-    TODO: write out other algorithms
-    """
-    if args.clik_controller == "dampedPseudoinverse":
-        return partial(dampedPseudoinverse, args.tikhonov_damp)
-    if args.clik_controller == "jacobianTranspose":
-        return jacobianTranspose
-    # TODO implement and add in the rest
-    # if controller_name == "invKinmQPSingAvoidE_kI":
-    #    return invKinmQPSingAvoidE_kI
-    # if controller_name == "invKinm_PseudoInv":
-    #    return invKinm_PseudoInv
-    # if controller_name == "invKinm_PseudoInv_half":
-    #    return invKinm_PseudoInv_half
-    if args.clik_controller == "invKinmQP":
-        lb = -1 * np.array(robot.model.velocityLimit, dtype="double")
-        # we do additional clipping
-        lb = np.clip(lb, -1 * robot.max_qd, robot.max_qd)
-        ub = np.array(robot.model.velocityLimit, dtype="double")
-        ub = np.clip(ub, -1 * robot.max_qd, robot.max_qd)
-        return partial(invKinmQP, lb=lb, ub=ub)
-    if args.clik_controller == "QPproxsuite":
-        H = np.eye(robot.model.nv)
-        g = np.zeros(robot.model.nv)
-        G = np.eye(robot.model.nv)
-        A = np.eye(6, robot.model.nv)
-        b = np.ones(6) * 0.1
-        # proxsuite does lb <= Cx <= ub
-        C = np.eye(robot.model.nv)
-        lb = -1 * np.array(robot.model.velocityLimit, dtype="double")
-        # we do additional clipping
-        lb = np.clip(lb, -1 * robot.max_qd, robot.max_qd)
-        ub = np.array(robot.model.velocityLimit, dtype="double")
-        ub = np.clip(ub, -1 * robot.max_qd, robot.max_qd)
-        qp = proxsuite.proxqp.dense.QP(robot.model.nv, 6, robot.model.nv)
-        qp.init(H, g, A, b, G, lb, ub)
-        qp.solve()
-        return partial(QPproxsuite, qp, lb, ub)
-
-    # if controller_name == "invKinmQPSingAvoidE_kI":
-    #    return invKinmQPSingAvoidE_kI
-    # if controller_name == "invKinmQPSingAvoidE_kM":
-    #    return invKinmQPSingAvoidE_kM
-    # if controller_name == "invKinmQPSingAvoidManipMax":
-    #    return invKinmQPSingAvoidManipMax
-
-    # default
-    return partial(dampedPseudoinverse, args.tikhonov_damp)
-
-
-def controlLoopClik(robot: AbstractRobotManager, clik_controller, i, past_data):
-    """
-    controlLoopClik
-    ---------------
-    generic control loop for clik (handling error to final point etc).
-    in some version of the universe this could be extended to a generic
-    point-to-point motion control loop.
-    """
-    breakFlag = False
-    log_item = {}
-    save_past_item = {}
-    q = robot.getQ()
-    T_w_e = robot.getT_w_e()
-    # first check whether we're at the goal
-    SEerror = T_w_e.actInv(robot.Mgoal)
-    err_vector = pin.log6(SEerror).vector
-    if np.linalg.norm(err_vector) < robot.args.goal_error:
-        breakFlag = True
-    J = pin.computeFrameJacobian(robot.model, robot.data, q, robot.ee_frame_id)
-    # compute the joint velocities based on controller you passed
-    # qd = clik_controller(J, err_vector, past_qd=past_data['dqs_cmd'][-1])
-    qd = clik_controller(J, err_vector)
-    if qd is None:
-        print("the controller you chose didn't work, using dampedPseudoinverse instead")
-        qd = dampedPseudoinverse(1e-2, J, err_vector)
-    robot.sendQd(qd)
-
-    log_item["qs"] = q.reshape((robot.model.nq,))
-    log_item["dqs"] = robot.getQd().reshape((robot.model.nv,))
-    log_item["dqs_cmd"] = qd.reshape((robot.model.nv,))
-    log_item["err_norm"] = np.linalg.norm(err_vector).reshape((1,))
-    # we're not saving here, but need to respect the API,
-    # hence the empty dict
-    save_past_item["dqs_cmd"] = qd.reshape((robot.model.nv,))
-    return breakFlag, save_past_item, log_item
-
-
-def controlLoopClikDualArm(
-    robot: AbstractRobotManager, clik_controller, goal_transform, i, past_data
-):
-    """
-    controlLoopClikDualArm
-    ---------------
-    do point to point motion for each arm and its goal.
-    that goal is generated from a single goal that you pass,
-    and a transformation on the goal you also pass.
-    the transformation is done in goal's frame (goal is and has
-    to be an SE3 object).
-    the transform is designed for the left arm,
-    and its inverse is applied for the right arm.
-    """
-    breakFlag = False
-    log_item = {}
-    q = robot.getQ()
-    T_w_e_left, T_w_e_right = robot.getT_w_e()
-    #
-    Mgoal_left = goal_transform.act(robot.Mgoal)
-    Mgoal_right = goal_transform.inverse().act(robot.Mgoal)
-    # print("robot.Mgoal", robot.Mgoal)
-    # print("Mgoal_left", Mgoal_left)
-    # print("Mgoal_right", Mgoal_right)
-
-    SEerror_left = T_w_e_left.actInv(Mgoal_left)
-    SEerror_right = T_w_e_right.actInv(Mgoal_right)
-
-    err_vector_left = pin.log6(SEerror_left).vector
-    err_vector_right = pin.log6(SEerror_right).vector
-
-    if (np.linalg.norm(err_vector_left) < robot.args.goal_error) and (
-        np.linalg.norm(err_vector_right) < robot.args.goal_error
-    ):
-        breakFlag = True
-    J_left = pin.computeFrameJacobian(robot.model, robot.data, q, robot.l_ee_frame_id)
-    J_right = pin.computeFrameJacobian(robot.model, robot.data, q, robot.r_ee_frame_id)
-    # compute the joint velocities based on controller you passed
-    # this works exactly the way you think it does:
-    # the velocities for the other arm are 0
-    # what happens to the base hasn't been investigated but it seems ok
-    qd_left = clik_controller(J_left, err_vector_left)
-    qd_right = clik_controller(J_right, err_vector_right)
-    qd = qd_left + qd_right
-    robot.sendQd(qd)
-
-    log_item["qs"] = q.reshape((robot.model.nq,))
-    log_item["dqs"] = robot.getQd().reshape((robot.model.nv,))
-    log_item["dqs_cmd"] = qd.reshape((robot.model.nv,))
-    # we're not saving here, but need to respect the API,
-    # hence the empty dict
-    return breakFlag, {}, log_item
-
-
-# def clikDualArm(args, robot, goal, goal_transform, run=True):
-#    """
-#    clikDualArm
-#    -----------
-#    """
-#    robot.Mgoal = copy.deepcopy(goal)
-#    clik_controller = getClikController(args, robot)
-#    controlLoop = partial(controlLoopClikDualArm, robot, clik_controller, goal_transform)
-#    # we're not using any past data or logging, hence the empty arguments
-#    log_item = {
-#            'qs' : np.zeros(robot.model.nq),
-#            'dqs' : np.zeros(robot.model.nv),
-#            'dqs_cmd' : np.zeros(robot.model.nv),
-#        }
-#    save_past_dict = {}
-#    loop_manager = ControlLoopManager(robot, controlLoop, args, save_past_dict, log_item)
-#    if run:
-#        loop_manager.run()
-#    else:
-#        return loop_manager
-
-
-def controlLoopClikArmOnly(robot, clik_controller, i, past_data):
-    breakFlag = False
-    log_item = {}
-    q = robot.getQ()
-    T_w_e = robot.getT_w_e()
-    # first check whether we're at the goal
-    SEerror = T_w_e.actInv(robot.Mgoal)
-    err_vector = pin.log6(SEerror).vector
-    if np.linalg.norm(err_vector) < robot.args.goal_error:
-        breakFlag = True
-    J = pin.computeFrameJacobian(robot.model, robot.data, q, robot.ee_frame_id)
-    # cut off base from jacobian
-    J = J[:, 3:]
-    # compute the joint velocities based on controller you passed
-    qd = clik_controller(J, err_vector)
-    # add the missing velocities back
-    qd = np.hstack((np.zeros(3), qd))
-    robot.sendQd(qd)
-
-    log_item["qs"] = q.reshape((robot.model.nq,))
-    log_item["dqs"] = robot.getQd().reshape((robot.model.nv,))
-    log_item["dqs_cmd"] = qd.reshape((robot.model.nv,))
-    # we're not saving here, but need to respect the API,
-    # hence the empty dict
-    return breakFlag, {}, log_item
-
-
-def moveUntilContactControlLoop(
-    args, robot: AbstractRobotManager, speed, clik_controller, i, past_data
-):
-    """
-    moveUntilContactControlLoop
-    ---------------
-    generic control loop for clik (handling error to final point etc).
-    in some version of the universe this could be extended to a generic
-    point-to-point motion control loop.
-    """
-    breakFlag = False
-    # know where you are, i.e. do forward kinematics
-    log_item = {}
-    q = robot.getQ()
-    # break if wrench is nonzero basically
-    # wrench = robot.getWrench()
-    # you're already giving the speed in the EE i.e. body frame
-    # so it only makes sense to have the wrench in the same frame
-    # wrench = robot._getWrenchInEE()
-    wrench = robot.getWrench()
-    # and furthermore it's a reasonable assumption that you'll hit the thing
-    # in the direction you're going in.
-    # thus we only care about wrenches in those direction coordinates
-    mask = speed != 0.0
-    # NOTE: contact getting force is a magic number
-    # it is a 100% empirical, with the goal being that it's just above noise.
-    # so far it's worked fine, and it's pretty soft too.
-    if np.linalg.norm(wrench[mask]) > args.contact_detecting_force:
-        print("hit with", np.linalg.norm(wrench[mask]))
-        breakFlag = True
-        robot.sendQd(np.zeros(robot.model.nq))
-    if (args.pinocchio_only) and (i > 500):
-        print("let's say you hit something lule")
-        breakFlag = True
-    # pin.computeJointJacobian is much different than the C++ version lel
-    J = pin.computeFrameJacobian(robot.model, robot.data, q, robot.ee_frame_id)
-    # compute the joint velocities.
-    qd = clik_controller(J, speed)
-    robot.sendQd(qd)
-    log_item["qs"] = q.reshape((robot.model.nq,))
-    log_item["wrench"] = wrench.reshape((6,))
-    return breakFlag, {}, log_item
-
-
-def moveUntilContact(args, robot, speed):
-    """
-    moveUntilContact
-    -----
-    does clik until it feels something with the f/t sensor
-    """
-    assert type(speed) == np.ndarray
-    clik_controller = getClikController(args, robot)
-    controlLoop = partial(
-        moveUntilContactControlLoop, args, robot, speed, clik_controller
-    )
-    # we're not using any past data or logging, hence the empty arguments
-    log_item = {"wrench": np.zeros(6)}
-    log_item["qs"] = np.zeros((robot.model.nq,))
-    loop_manager = ControlLoopManager(robot, controlLoop, args, {}, log_item)
-    loop_manager.run()
-    print("Collision detected!!")
-
-
-def moveL(args, robot: AbstractRobotManager, goal_point):
-    """
-    moveL
-    -----
-    does moveL.
-    send a SE3 object as goal point.
-    if you don't care about rotation, make it np.zeros((3,3))
-    """
-    # assert type(goal_point) == pin.pinocchio_pywrap.SE3
-    robot.Mgoal = copy.deepcopy(goal_point)
-    clik_controller = getClikController(args, robot)
-    controlLoop = partial(controlLoopClik, robot, clik_controller)
-    # we're not using any past data or logging, hence the empty arguments
-    log_item = {
-        "qs": np.zeros(robot.model.nq),
-        "dqs": np.zeros(robot.model.nv),
-        "dqs_cmd": np.zeros(robot.model.nv),
-        "err_norm": np.zeros(1),
-    }
-    save_past_dict = {
-        "dqs_cmd": np.zeros(robot.model.nv),
-    }
-    loop_manager = ControlLoopManager(
-        robot, controlLoop, args, save_past_dict, log_item
-    )
-    loop_manager.run()
-
-
-def moveLDualArm(
-    args, robot: AbstractRobotManager, goal_point, goal_transform, run=True
-):
-    """
-    moveL
-    -----
-    does moveL.
-    send a SE3 object as goal point.
-    if you don't care about rotation, make it np.zeros((3,3))
-    """
-    # assert type(goal_point) == pin.pinocchio_pywrap.SE3
-    robot.Mgoal = copy.deepcopy(goal_point)
-    clik_controller = getClikController(args, robot)
-    controlLoop = partial(
-        controlLoopClikDualArm, robot, clik_controller, goal_transform
-    )
-    # we're not using any past data or logging, hence the empty arguments
-    log_item = {
-        "qs": np.zeros(robot.model.nq),
-        "dqs": np.zeros(robot.model.nv),
-        "dqs_cmd": np.zeros(robot.model.nv),
-    }
-    loop_manager = ControlLoopManager(robot, controlLoop, args, {}, log_item)
-    if run:
-        loop_manager.run()
-    else:
-        return loop_manager
-
-
-# TODO: implement
-def moveLFollowingLine(args, robot, goal_point):
-    """
-    moveLFollowingLine
-    ------------------
-    make a path from current to goal position, i.e.
-    just a straight line between them.
-    the question is what to do with orientations.
-    i suppose it makes sense to have one function that enforces/assumes
-    that the start and end positions have the same orientation.
-    then another version goes in a line and linearly updates the orientation
-    as it goes
-    """
-    pass
-
-
-def cartesianPathFollowingWithPlannerControlLoop(
-    args, robot: AbstractRobotManager, path_planner: ProcessManager, i, past_data
-):
-    """
-    cartesianPathFollowingWithPlanner
-    -----------------------------
-    end-effector(s) follow their path(s) according to what a 2D path-planner spits out
-    """
-    breakFlag = False
-    log_item = {}
-    save_past_dict = {}
-
-    q = robot.getQ()
-    T_w_e = robot.getT_w_e()
-    p = T_w_e.translation[:2]
-    path_planner.sendFreshestCommand({"p": p})
-
-    # NOTE: it's pointless to recalculate the path every time
-    # if it's the same 2D path
-    data = path_planner.getData()
-    if data == None:
-        if args.debug_prints:
-            print("got no path so no doing anything")
-        robot.sendQd(np.zeros(robot.model.nv))
-        log_item["qs"] = q.reshape((robot.model.nq,))
-        log_item["dqs"] = robot.getQd().reshape((robot.model.nv,))
-        return breakFlag, save_past_dict, log_item
-    if data == "done":
-        breakFlag = True
-
-    path_pol, path2D_untimed = data
-    path2D_untimed = np.array(path2D_untimed).reshape((-1, 2))
-    # should be precomputed somewhere but this is nowhere near the biggest problem
-    max_base_v = np.linalg.norm(robot.model.robot.model.velocityLimit[:2])
-
-    # 1) make 6D path out of [[x0,y0],...]
-    # that represents the center of the cart
-    pathSE3 = path2D_to_timed_SE3(args, path_pol, path2D_untimed, max_base_v)
-    # print(path2D_untimed)
-    # for pp in pathSE3:
-    #    print(pp.translation)
-    # TODO: EVIL AND HAS TO BE REMOVED FROM HERE
-    if args.visualizer:
-        robot.visualizer_manager.sendCommand({"path": pathSE3})
-
-    J = pin.computeFrameJacobian(robot.model, robot.data, q, robot.ee_frame_id)
-    # NOTE: obviously not path following but i want to see things working here
-    SEerror = T_w_e.actInv(pathSE3[-1])
-    err_vector = pin.log6(SEerror).vector
-    lb = -1 * robot.model.robot.model.velocityLimit
-    lb[1] = -0.001
-    ub = robot.model.robot.model.velocityLimit
-    ub[1] = 0.001
-    # vel_cmd = invKinmQP(J, err_vector, lb=lb, ub=ub)
-    vel_cmd = dampedPseudoinverse(0.002, J, err_vector)
-    robot.sendQd(vel_cmd)
-
-    log_item["qs"] = q.reshape((robot.model.nq,))
-    log_item["dqs"] = robot.getQd().reshape((robot.model.nv,))
-    # time.sleep(0.01)
-    return breakFlag, save_past_dict, log_item
-
-
-def cartesianPathFollowingWithPlanner(
-    args, robot: AbstractRobotManager, path_planner: ProcessManager
-):
-    controlLoop = partial(
-        cartesianPathFollowingWithPlannerControlLoop, args, robot, path_planner
-    )
-    log_item = {"qs": np.zeros(robot.model.nq), "dqs": np.zeros(robot.model.nv)}
-    save_past_dict = {}
-    loop_manager = ControlLoopManager(
-        robot, controlLoop, args, save_past_dict, log_item
-    )
-    loop_manager.run()
-
-
-def controlLoopCompliantClik(args, robot: AbstractRobotManager, i, past_data):
-    """
-    controlLoopClik
-    ---------------
-    generic control loop for clik (handling error to final point etc).
-    in some version of the universe this could be extended to a generic
-    point-to-point motion control loop.
-    """
-    breakFlag = False
-    log_item = {}
-    save_past_dict = {}
-    # know where you are, i.e. do forward kinematics
-    q = robot.getQ()
-    T_w_e = robot.getT_w_e()
-    wrench = robot.getWrench()
-    # we need to overcome noise if we want to converge
-    if np.linalg.norm(wrench) < args.minimum_detectable_force_norm:
-        wrench = np.zeros(6)
-    save_past_dict["wrench"] = copy.deepcopy(wrench)
-    wrench = args.beta * wrench + (1 - args.beta) * np.average(
-        np.array(past_data["wrench"]), axis=0
-    )
-    Z = np.diag(np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0]))
-    wrench = Z @ wrench
-    # first check whether we're at the goal
-    SEerror = T_w_e.actInv(robot.Mgoal)
-    err_vector = pin.log6(SEerror).vector
-    if np.linalg.norm(err_vector) < robot.args.goal_error:
-        breakFlag = True
-    J = pin.computeFrameJacobian(robot.model, robot.data, q, robot.ee_frame_id)
-    # compute the joint velocities.
-    # just plug and play different ones
-    qd = (
-        J.T
-        @ np.linalg.inv(J @ J.T + np.eye(J.shape[0], J.shape[0]) * args.tikhonov_damp)
-        @ err_vector
-    )
-    tau = J.T @ wrench
-    # tau = tau[:6].reshape((6,1))
-    qd += args.alpha * tau
-    robot.sendQd(qd)
-
-    log_item["qs"] = q.reshape((robot.model.nq,))
-    # get actual velocity, not the commanded one.
-    # although that would be fun to compare i guess
-    # TODO: have both, but call the compute control signal like it should be
-    log_item["dqs"] = robot.getQd().reshape((robot.model.nv,))
-    log_item["wrench"] = wrench.reshape((6,))
-    log_item["tau"] = tau.reshape((robot.model.nv,))
-    # we're not saving here, but need to respect the API,
-    # hence the empty dict
-    return breakFlag, save_past_dict, log_item
-
-
-# add a threshold for the wrench
-def compliantMoveL(args, robot, goal_point, run=True):
-    """
-    compliantMoveL
-    -----
-    does compliantMoveL - a moveL, but with compliance achieved
-    through f/t feedback
-    send a SE3 object as goal point.
-    if you don't care about rotation, make it np.zeros((3,3))
-    """
-    #    assert type(goal_point) == pin.pinocchio_pywrap.SE3
-    robot.Mgoal = copy.deepcopy(goal_point)
-    clik_controller = getClikController(args, robot)
-    controlLoop = partial(controlLoopCompliantClik, args, robot)
-    # we're not using any past data or logging, hence the empty arguments
-    log_item = {
-        "qs": np.zeros(robot.model.nq),
-        "wrench": np.zeros(6),
-        "tau": np.zeros(robot.model.nv),
-        "dqs": np.zeros(robot.model.nv),
-    }
-    save_past_dict = {
-        "wrench": np.zeros(6),
-    }
-    loop_manager = ControlLoopManager(
-        robot, controlLoop, args, save_past_dict, log_item
-    )
-    if run:
-        loop_manager.run()
-    else:
-        return loop_manager
-
-
-def clikCartesianPathIntoJointPath(
-    args, robot, path, clikController, q_init, plane_pose
-):
-    """
-    clikCartesianPathIntoJointPath
-    ------------------------------
-    functionality
-    ------------
-    Follows a provided Cartesian path,
-    creates a joint space trajectory for it.
-    Output format and timing works only for what the dmp code expects
-    because it's only used there,
-    and I never gave that code a lift-up it needs.
-
-    return
-    ------
-    - joint_space_trajectory to follow the given path.
-
-    arguments
-    ----------
-    - path: cartesian path given in task frame
-    """
-    # we don't know how many there will be, so a linked list is
-    # clearly the best data structure here (instert is o(1) still,
-    # and we aren't pressed on time when turning it into an array later)
-    qs = []
-    # let's use the functions we already have. to do so
-    # we need to create a new RobotManagerAbstract with arguments for simulation,
-    # otherwise weird things will happen.
-    # we keep all the other args intact
-    sim_args = copy.deepcopy(args)
-    sim_args.pinocchio_only = True
-    sim_args.ctrl_freq = -1
-    sim_args.plotter = False
-    sim_args.visualizer = False
-    sim_args.save_log = False  # we're not using sim robot outside of this
-    sim_args.max_iterations = 10000  # more than enough
-    sim_robot = AbstractRobotManager(sim_args)
-    sim_robot.q = q_init.copy()
-    sim_robot._step()
-    for pose in path:
-        moveL(sim_args, sim_robot, pose)
-        # loop logs is a dict, dict keys list preserves input order
-        new_q = sim_robot.q.copy()
-        if args.viz_test_path:
-            # look into visualize.py for details on what's available
-            T_w_e = sim_robot.getT_w_e()
-            robot.updateViz({"q": new_q, "T_w_e": T_w_e, "point": T_w_e.copy()})
-            # time.sleep(0.005)
-        qs.append(new_q[:6])
-        # plot this on the real robot
-
-    ##############################################
-    #  save the obtained joint-space trajectory  #
-    ##############################################
-    qs = np.array(qs)
-    # we're putting a dmp over this so we already have the timing ready
-    # TODO: make this general, you don't want to depend on other random
-    # arguments (make this one traj_time, then put tau0 = traj_time there
-    t = np.linspace(0, args.tau0, len(qs)).reshape((len(qs), 1))
-    joint_trajectory = np.hstack((t, qs))
-    # TODO handle saving more consistently/intentionally
-    # (although this definitely works right now and isn't bad, just mid)
-    # os.makedir -p a data dir and save there, this is ugly
-    # TODO: check if we actually need this and if not remove the saving
-    # whatever code uses this is responsible to log it if it wants it,
-    # let's not have random files around.
-    np.savetxt("./joint_trajectory.csv", joint_trajectory, delimiter=",", fmt="%.5f")
-    return joint_trajectory
-
-
-def controlLoopClikDualArmsOnly(
-    robot: AbstractRobotManager, clik_controller, goal_transform, i, past_data
-):
-    """
-    controlLoopClikDualArmsOnly
-    ---------------
-    """
-    breakFlag = False
-    log_item = {}
-    q = robot.getQ()
-    T_w_e_left, T_w_e_right = robot.getT_w_e()
-    #
-    Mgoal_left = robot.Mgoal.act(goal_transform)
-    Mgoal_right = robot.Mgoal.act(goal_transform.inverse())
-
-    SEerror_left = T_w_e_left.actInv(Mgoal_left)
-    SEerror_right = T_w_e_right.actInv(Mgoal_right)
-
-    err_vector_left = pin.log6(SEerror_left).vector
-    err_vector_right = pin.log6(SEerror_right).vector
-
-    if (np.linalg.norm(err_vector_left) < robot.args.goal_error) and (
-        np.linalg.norm(err_vector_right) < robot.args.goal_error
-    ):
-        breakFlag = True
-    J_left = pin.computeFrameJacobian(robot.model, robot.data, q, robot.l_ee_frame_id)
-    J_left = J_left[:, 3:]
-    J_right = pin.computeFrameJacobian(robot.model, robot.data, q, robot.r_ee_frame_id)
-    J_right = J_right[:, 3:]
-
-    # compute the joint velocities based on controller you passed
-    qd_left = clik_controller(J_left, err_vector_left)
-    qd_right = clik_controller(J_right, err_vector_right)
-    # lb, ub = (-0.05 * robot.model.robot.model.velocityLimit, 0.05 * robot.model.robot.model.velocityLimit)
-    # qd_left = invKinmQP(J_left, err_vector_left, lb=lb[3:], ub=ub[3:])
-    # qd_right = invKinmQP(J_right, err_vector_right, lb=lb[3:], ub=ub[3:])
-    qd = qd_left + qd_right
-    qd = np.hstack((np.zeros(3), qd))
-    robot.sendQd(qd)
-
-    log_item["qs"] = q.reshape((robot.model.nq,))
-    log_item["dqs"] = robot.getQd().reshape((robot.model.nv,))
-    log_item["dqs_cmd"] = qd.reshape((robot.model.nv,))
-    # we're not saving here, but need to respect the API,
-    # hence the empty dict
-    return breakFlag, {}, log_item
-
-
-def moveLDualArmsOnly(
-    args, robot: AbstractRobotManager, goal_point, goal_transform, run=True
-):
-    """
-    moveL
-    -----
-    does moveL.
-    send a SE3 object as goal point.
-    if you don't care about rotation, make it np.zeros((3,3))
-    """
-    # assert type(goal_point) == pin.pinocchio_pywrap.SE3
-    robot.Mgoal = copy.deepcopy(goal_point)
-    clik_controller = getClikController(args, robot)
-    controlLoop = partial(
-        controlLoopClikDualArmsOnly, robot, clik_controller, goal_transform
-    )
-    # we're not using any past data or logging, hence the empty arguments
-    log_item = {
-        "qs": np.zeros(robot.model.nq),
-        "dqs": np.zeros(robot.model.nv),
-        "dqs_cmd": np.zeros(robot.model.nv),
-    }
-    loop_manager = ControlLoopManager(robot, controlLoop, args, {}, log_item)
-    if run:
-        loop_manager.run()
-    else:
-        return loop_manager
-
-
-if __name__ == "__main__":
-    args = get_args()
-    robot = AbstractRobotManager(args)
-    Mgoal = robot.defineGoalPointCLI()
-    clik_controller = getClikController(args, robot)
-    controlLoop = partial(controlLoopClik, robot, clik_controller)
-    # we're not using any past data or logging, hence the empty arguments
-    loop_manager = ControlLoopManager(robot, controlLoop, args, {}, {})
-    loop_manager.run()
diff --git a/python/smc/control/control_loop_manager.py b/python/smc/control/control_loop_manager.py
index c47b8757604880e37cc7059639146d7ddfa3a320..d85d12ffa3c6b2f4a2bac3ba1909c3e263fac69b 100644
--- a/python/smc/control/control_loop_manager.py
+++ b/python/smc/control/control_loop_manager.py
@@ -1,5 +1,7 @@
+from smc.robots.robotmanager_abstract import AbstractRobotManager
 from smc.multiprocessing.process_manager import ProcessManager
-from smc.visualize.visualize import realTimePlotter
+from smc.visualization.plotters import realTimePlotter
+from functools import partial
 
 import signal
 import time
@@ -52,7 +54,14 @@ class ControlLoopManager:
 
     """
 
-    def __init__(self, robot_manager, controlLoop, args, save_past_item, log_item):
+    def __init__(
+        self,
+        robot_manager: AbstractRobotManager,
+        controlLoop: partial,
+        args,
+        save_past_item,
+        log_item,
+    ):
         signal.signal(signal.SIGINT, self.stopHandler)
         self.pid = getpid()
         self.max_iterations = args.max_iterations
@@ -130,11 +139,11 @@ class ControlLoopManager:
                     self.robot_manager.visualizer_manager.sendCommand(
                         {
                             "q": self.robot_manager.q,
-                            "T_w_e": self.robot_manager.getT_w_e(),
+                            "T_w_e": self.robot_manager.T_w_e,
                         }
                     )
                 else:
-                    T_w_e_left, T_w_e_right = self.robot_manager.getT_w_e()
+                    T_w_e_left, T_w_e_right = self.robot_manager.T_w_e
                     self.robot_manager.visualizer_manager.sendCommand(
                         {"q": self.robot_manager.q, "T_w_e": T_w_e_left}
                     )
@@ -185,7 +194,7 @@ class ControlLoopManager:
         if self.args.plotter:
             self.plotter_manager.terminateProcess()
         if self.args.save_log:
-            self.robot_manager.log_manager.storeControlLoopRun(
+            self.robot_manager._log_manager.storeControlLoopRun(
                 self.log_dict, self.controlLoop.func.__name__, self.final_iteration
             )
         if i < self.max_iterations - 1:
@@ -211,26 +220,22 @@ class ControlLoopManager:
         # but if we exit immediatealy it's fine
         if getpid() != self.pid:
             return
+        # TODO: you want this kinda bullshit to be defined on a per-robot basis,
+        # and put it into robot_manager.stopRobot()
         print("sending 300 speedjs full of zeros and exiting")
         for _ in range(300):
             vel_cmd = np.zeros(self.robot_manager.model.nv)
-            self.robot_manager.sendQd(vel_cmd)
+            self.robot_manager.sendVelocityCommand(vel_cmd)
 
-        # hopefully this actually stops it
-        if not self.args.pinocchio_only:
-            self.robot_manager.rtde_control.speedStop(1)
-            print("sending a stopj as well")
-            self.robot_manager.rtde_control.stopJ(1)
-            print("putting it to freedrive for good measure too")
-            self.robot_manager.rtde_control.freedriveMode()
+        self.robot_manager.stopRobot()
 
         if self.args.save_log:
             print("saving log")
             # this does not get run if you exited with ctrl-c
-            self.robot_manager.log_manager.storeControlLoopRun(
+            self.robot_manager._log_manager.storeControlLoopRun(
                 self.log_dict, self.controlLoop.func.__name__, self.final_iteration
             )
-            self.robot_manager.log_manager.saveLog()
+            self.robot_manager._log_manager.saveLog()
 
         if self.args.plotter:
             self.plotter_manager.terminateProcess()
@@ -238,9 +243,4 @@ class ControlLoopManager:
         if self.args.visualizer:
             self.robot_manager.visualizer_manager.terminateProcess()
 
-        # TODO: this obviously only makes sense if you're on ur robots
-        # so this functionality should be wrapped in robotmanager
-        if not self.args.pinocchio_only:
-            self.robot_manager.rtde_control.endFreedriveMode()
-
         exit()
diff --git a/python/smc/control/dmp/dmp.py b/python/smc/control/dmp/dmp.py
index 0a14d9490e6f6450d109745231592980e279be0a..dbd1028325fdd855e71a1e3042f4691e48df5392 100644
--- a/python/smc/control/dmp/dmp.py
+++ b/python/smc/control/dmp/dmp.py
@@ -1,11 +1,13 @@
 import numpy as np
 import argparse
-from ur_simple_control.managers import RobotManager, ControlLoopManager
+from smc.robots.robotmanager_abstract import AbstractRobotManager
+from smc.control.control_loop_manager import ControlLoopManager
 from functools import partial
+
 # TODO:
 # 1. change the dimensions so that they make sense,
 #    i.e. shape = (N_points, dimension_of_points)
-# 2. change hand-written numerical differentiation 
+# 2. change hand-written numerical differentiation
 #    to numpy calls (for code style more than anything)
 # 3. separate x into z and s variables, this is unnecessarily complicated
 # 4. ask mentors if there's ever a reason not to use temporal coupling,
@@ -16,29 +18,46 @@ from functools import partial
 # see their effect. normally people set them so that you get critical damping
 # as the uncostrained system
 
+
 def getDMPArgs(parser):
     #############################
     #  dmp  specific arguments  #
     #############################
-    parser.add_argument('--temporal-coupling', action=argparse.BooleanOptionalAction, \
-            help="whether you want to use temporal coupling", default=True)
-    parser.add_argument('--tau0', type=float, \
-            help="total time needed for trajectory. if you use temporal coupling,\
-                  you can still follow the path even if it's too fast", \
-            default=10)
-    parser.add_argument('--gamma-nominal', type=float, \
-            help="positive constant for tuning temporal coupling: the higher,\
-            the fast the return rate to nominal tau", \
-            default=1.0)
-    parser.add_argument('--gamma-a', type=float, \
-            help="positive constant for tuning temporal coupling, potential term", \
-            default=0.5)
-    parser.add_argument('--eps-tc', type=float, \
-            help="temporal coupling term, should be small", \
-            default=0.001)
-            #default=0.05)
+    parser.add_argument(
+        "--temporal-coupling",
+        action=argparse.BooleanOptionalAction,
+        help="whether you want to use temporal coupling",
+        default=True,
+    )
+    parser.add_argument(
+        "--tau0",
+        type=float,
+        help="total time needed for trajectory. if you use temporal coupling,\
+                  you can still follow the path even if it's too fast",
+        default=10,
+    )
+    parser.add_argument(
+        "--gamma-nominal",
+        type=float,
+        help="positive constant for tuning temporal coupling: the higher,\
+            the fast the return rate to nominal tau",
+        default=1.0,
+    )
+    parser.add_argument(
+        "--gamma-a",
+        type=float,
+        help="positive constant for tuning temporal coupling, potential term",
+        default=0.5,
+    )
+    parser.add_argument(
+        "--eps-tc",
+        type=float,
+        help="temporal coupling term, should be small",
+        default=0.001,
+    )
+    # default=0.05)
     return parser
-    
+
 
 class DMP:
     def __init__(self, trajectory, k=100, d=20, a_s=1, n_bfs=100):
@@ -48,7 +67,7 @@ class DMP:
 
         # parameters
         # for whatever reason, k, d and a_s don't match the ,
-        # OG formulation: tau * z_dot = alpha_z * (beta_z * (g - y) - z) + f(x) 
+        # OG formulation: tau * z_dot = alpha_z * (beta_z * (g - y) - z) + f(x)
         # this formulation: tau * z_dot = k * (g - y) - d * z + f(x) = h (z, y, s)
         # i.e. k = beta_z * alpha_z
         #      d = alpha_z
@@ -58,23 +77,23 @@ class DMP:
         self.n_bfs = n_bfs
 
         # trajectory parameters
-        self.n = 0 # n of dofs
-        self.y0 = None # initial position
-        self.tau0 = None # final time
-        self.g = None # goal positions
-        self.tau = None # scaling factor, updated online to ensure traj is followed
+        self.n = 0  # n of dofs
+        self.y0 = None  # initial position
+        self.tau0 = None  # final time
+        self.g = None  # goal positions
+        self.tau = None  # scaling factor, updated online to ensure traj is followed
 
         # initialize basis functions for LWR
-        self.w = None # weights of basis functions
-        self.centers = None # centers of basis functions
-        self.widths = None # widths of basis functions
+        self.w = None  # weights of basis functions
+        self.centers = None  # centers of basis functions
+        self.widths = None  # widths of basis functions
 
         # state
         self.x = None
         self.theta = None
-        self.pos = None # position
-        self.vel = None # velocity 
-        self.acc = None # acceleration
+        self.pos = None  # position
+        self.vel = None  # velocity
+        self.acc = None  # acceleration
 
         # desired path
         self.path = None
@@ -89,8 +108,8 @@ class DMP:
 
     def load_trajectory_from_file(self, file_path):
         # load trajectory.  this is just joint positions.
-        trajectory = np.genfromtxt(file_path, delimiter=',')
-        self.time = trajectory [:, 0]
+        trajectory = np.genfromtxt(file_path, delimiter=",")
+        self.time = trajectory[:, 0]
         self.time = self.time.reshape(1, len(self.time))
         self.y = np.array(trajectory[:, 1:]).T
 
@@ -101,7 +120,7 @@ class DMP:
         self.y = np.array(trajectory[:, 1:]).T
 
     def reset(self):
-        self.x = np.vstack((np.zeros((self.n, 1)), self.y0, 1.))
+        self.x = np.vstack((np.zeros((self.n, 1)), self.y0, 1.0))
         self.tau = self.tau0
         self.theta = 0
         self.pos = self.y0
@@ -111,12 +130,12 @@ class DMP:
     def z(self, x=None):
         if x is None:
             x = self.x
-        return x[0:self.n]
+        return x[0 : self.n]
 
     def y_fun(self, x=None):
         if x is None:
             x = self.x
-        return x[self.n:2 * self.n]
+        return x[self.n : 2 * self.n]
 
     def s(self, x=None):
         if x is None:
@@ -133,13 +152,18 @@ class DMP:
         if i is not None and len(s) == 1:
             return np.exp(-1 / (2 * self.widths[i] ** 2) * (s - self.centers[i]) ** 2)
         if i is None:
-            return np.exp(-1 / (2 * self.widths ** 2) * (s - self.centers) ** 2)
+            return np.exp(-1 / (2 * self.widths**2) * (s - self.centers) ** 2)
 
     def h(self, x=None):
         if x is None:
             x = self.x
         psi = self.psi(self.s(x)).reshape((self.n_bfs, 1))
-        v = (self.w.dot(psi)) / np.maximum(np.sum(psi), 1e-8) * (self.g - self.y0) * self.s(x)
+        v = (
+            (self.w.dot(psi))
+            / np.maximum(np.sum(psi), 1e-8)
+            * (self.g - self.y0)
+            * self.s(x)
+        )
         h = self.k * (self.g - self.y_fun(x)) - self.d * self.z(x) + v
         return h
 
@@ -159,8 +183,8 @@ class DMP:
         self.vel = self.z() / self.tau
         self.acc = (self.vel - vel_prev) / dt
 
-# if you don't know what the letters mean,
-# look at the equation (which you need to know anyway)
+    # if you don't know what the letters mean,
+    # look at the equation (which you need to know anyway)
     def fit(self):
         # Set target trajectory parameters
         self.n = self.y.shape[0]
@@ -175,9 +199,13 @@ class DMP:
         self.widths = np.concatenate((widths, [widths[-1]]))
 
         # Calculate derivatives
-        yd_demo = (self.y[:, 1:] - self.y[:, :-1]) / (self.time[0, 1:] - self.time[0, :-1])
+        yd_demo = (self.y[:, 1:] - self.y[:, :-1]) / (
+            self.time[0, 1:] - self.time[0, :-1]
+        )
         yd_demo = np.concatenate((yd_demo, np.zeros((self.n, 1))), axis=1)
-        ydd_demo = (yd_demo[:, 1:] - yd_demo[:, :-1]) / (self.time[0, 1:] - self.time[0, :-1])
+        ydd_demo = (yd_demo[:, 1:] - yd_demo[:, :-1]) / (
+            self.time[0, 1:] - self.time[0, :-1]
+        )
         ydd_demo = np.concatenate((ydd_demo, np.zeros((self.n, 1))), axis=1)
 
         # Compute weights
@@ -187,8 +215,11 @@ class DMP:
             if abs(self.g[i] - self.y0[i]) < 1e-5:
                 continue
             f_gain = s_seq * (self.g[i] - self.y0[i])
-            f_target = self.tau0 ** 2 * ydd_demo[i, :] - self.k * (
-                        self.g[i] - self.y[i, :]) + self.d * self.tau0 * yd_demo[i, :]
+            f_target = (
+                self.tau0**2 * ydd_demo[i, :]
+                - self.k * (self.g[i] - self.y[i, :])
+                + self.d * self.tau0 * yd_demo[i, :]
+            )
             for j in range(self.n_bfs):
                 psi_j = self.psi(s_seq, j)
                 num = f_gain.dot((psi_j * f_target).T)
@@ -205,6 +236,7 @@ class NoTC:
     def update(self, dmp, dt):
         return 0
 
+
 class TCVelAccConstrained:
 
     def __init__(self, gamma_nominal, gamma_a, v_max, a_max, eps=0.001):
@@ -231,12 +263,12 @@ class TCVelAccConstrained:
         # Acceleration bounds
         i = np.squeeze(A < 0)
         if i.any():
-            taud_min_a = np.max(- (B[i] * dmp.tau ** 2 + C[i]) / A[i])
+            taud_min_a = np.max(-(B[i] * dmp.tau**2 + C[i]) / A[i])
         else:
             taud_min_a = -np.inf
         i = np.squeeze(A > 0)
         if i.any():
-            taud_max_a = np.min(- (B[i] * dmp.tau ** 2 + C[i]) / A[i])
+            taud_max_a = np.min(-(B[i] * dmp.tau**2 + C[i]) / A[i])
         else:
             taud_max_a = np.inf
         # Velocity bounds
@@ -264,10 +296,15 @@ class TCVelAccConstrained:
         # Base update law
         ydd_bar = dmp.h() / (dmp.tau**2 * self.a_max)
         if self.gamma_a > 0:
-            pot_a = self.gamma_a * np.sum(ydd_bar ** 2 / np.maximum(1 - ydd_bar ** 2, self.gamma_a * self.eps * np.ones((len(ydd_bar), 1))))
+            pot_a = self.gamma_a * np.sum(
+                ydd_bar**2
+                / np.maximum(
+                    1 - ydd_bar**2, self.gamma_a * self.eps * np.ones((len(ydd_bar), 1))
+                )
+            )
         else:
             pot_a = 0
-        #pot_a = self.gamma_a * np.amax(ydd_bar ** 2 / np.maximum(1 - ydd_bar ** 2, self.gamma_a * self.eps * np.ones((len(ydd_bar), 1))))
+        # pot_a = self.gamma_a * np.amax(ydd_bar ** 2 / np.maximum(1 - ydd_bar ** 2, self.gamma_a * self.eps * np.ones((len(ydd_bar), 1))))
         taud = self.gamma_nominal * (dmp.tau0 - dmp.tau) + dmp.tau * pot_a
 
         # Saturate
@@ -277,7 +314,7 @@ class TCVelAccConstrained:
         return taud
 
 
-def controlLoopDMP(args, robot : RobotManager, dmp : DMP, tc, i, past_data):
+def controlLoopDMP(args, robot: AbstractRobotManager, dmp: DMP, tc, i, past_data):
     """
     controlLoopDMP
     -----------------------------
@@ -287,41 +324,46 @@ def controlLoopDMP(args, robot : RobotManager, dmp : DMP, tc, i, past_data):
     log_item = {}
     save_past_dict = {}
 
-    q = robot.getQ()
+    q = robot.q
 
     dmp.step(robot.dt)
     tau_dmp = dmp.tau + tc.update(dmp, robot.dt) * robot.dt
     dmp.set_tau(tau_dmp)
 
-    vel_cmd = dmp.vel + args.kp * (dmp.pos - q[:6].reshape((6,1)))
+    vel_cmd = dmp.vel + args.kp * (dmp.pos - q.reshape((-1, 1)))
 
-    robot.sendQd(vel_cmd)
+    robot.sendVelocityCommand(vel_cmd)
 
     if (np.linalg.norm(dmp.vel) < 0.01) and (i > int(dmp.tau0 * 500)):
         breakFlag = True
-    
-    log_item['qs'] = q.reshape((robot.model.nq,))
-    log_item['dmp_qs'] = dmp.pos.reshape((6,))
-    log_item['dqs'] = robot.getQd().reshape((robot.model.nv,))
-    log_item['dmp_dqs'] = dmp.vel.reshape((6,))
+
+    log_item["qs"] = q.reshape((robot.model.nq,))
+    log_item["dmp_qs"] = dmp.pos.reshape((6,))
+    log_item["dqs"] = robot.v
+    log_item["dmp_dqs"] = dmp.vel.reshape((6,))
     return breakFlag, save_past_dict, log_item
 
-def followDMP(args, robot, qs : np.ndarray, tau0):
-    t = np.linspace(0, tau0, len(qs)).reshape((len(qs),1))
+
+def followDMP(args, robot, qs: np.ndarray, tau0):
+    t = np.linspace(0, tau0, len(qs)).reshape((len(qs), 1))
     joint_trajectory = np.hstack((t, qs))
     dmp = DMP(joint_trajectory)
     if not args.temporal_coupling:
         tc = NoTC()
     else:
-        v_max_ndarray = np.ones(robot.n_arm_joints) * robot.max_qd 
-        a_max_ndarray = np.ones(robot.n_arm_joints) * args.acceleration 
-        tc = TCVelAccConstrained(args.gamma_nominal, args.gamma_a, v_max_ndarray, a_max_ndarray, args.eps_tc)
+        v_max_ndarray = np.ones(robot.n_arm_joints) * robot.max_qd
+        a_max_ndarray = np.ones(robot.n_arm_joints) * args.acceleration
+        tc = TCVelAccConstrained(
+            args.gamma_nominal, args.gamma_a, v_max_ndarray, a_max_ndarray, args.eps_tc
+        )
     save_past_dict = {}
     log_item = {}
-    log_item['qs'] = np.zeros((robot.model.nq,))
-    log_item['dmp_qs'] = np.zeros((6,))
-    log_item['dqs'] = np.zeros((robot.model.nv,))
-    log_item['dmp_dqs'] = np.zeros((6,))
+    log_item["qs"] = np.zeros((robot.model.nq,))
+    log_item["dmp_qs"] = np.zeros((6,))
+    log_item["dqs"] = np.zeros((robot.model.nv,))
+    log_item["dmp_dqs"] = np.zeros((6,))
     controlLoop = partial(controlLoopDMP, args, robot, dmp, tc)
-    loop_manager = ControlLoopManager(robot, controlLoop, args, save_past_dict, log_item)
+    loop_manager = ControlLoopManager(
+        robot, controlLoop, args, save_past_dict, log_item
+    )
     loop_manager.run()
diff --git a/python/smc/control/joint_space/__init__.py b/python/smc/control/joint_space/__init__.py
index 5eff51317610b8d0f17e2404fa6675e21699882c..8c46f138e710e5ccc66770998f15f9c8040f47b3 100644
--- a/python/smc/control/joint_space/__init__.py
+++ b/python/smc/control/joint_space/__init__.py
@@ -1,3 +1,2 @@
-from .freedrive import *
 from .joint_space_point_to_point import *
 from .joint_space_trajectory_following import *
diff --git a/python/smc/control/optimal_control/crocoddyl_optimal_control.py b/python/smc/control/optimal_control/crocoddyl_optimal_control.py
index d5576b49c1dc3391896f1fb3d9fa85235c79820d..834466a4c9726d3eb55d89800facc404130bad6f 100644
--- a/python/smc/control/optimal_control/crocoddyl_optimal_control.py
+++ b/python/smc/control/optimal_control/crocoddyl_optimal_control.py
@@ -1,30 +1,36 @@
+from smc.control.joint_space.joint_space_trajectory_following import (
+    followKinematicJointTrajP,
+)
+from smc.visualization.plotters import plotFromDict
+from smc.robots.utils import getMinimalArgParser
+from smc.robots.robotmanager_abstract import AbstractRobotManager
+
 import numpy as np
 import pinocchio as pin
 import crocoddyl
-from ur_simple_control.managers import getMinimalArgParser, ControlLoopManager, RobotManager
-import argparse
-from ur_simple_control.basics.basics import followKinematicJointTrajP
-from ur_simple_control.visualize.visualize import plotFromDict
+from argparse import Namespace
 import example_robot_data
 import time
 import importlib.util
-if importlib.util.find_spec('mim_solvers'):
+
+if importlib.util.find_spec("mim_solvers"):
     import mim_solvers
 
-def createCrocoIKOCP(args, robot : RobotManager, x0, goal : pin.SE3):
+
+def createCrocoIKOCP(args: Namespace, robot: AbstractRobotManager, x0, goal: pin.SE3):
     if robot.robot_name == "yumi":
         goal_l, goal_r = goal
     # create torque bounds which correspond to percentage
-    # of maximum allowed acceleration 
+    # of maximum allowed acceleration
     # NOTE: idk if this makes any sense, but let's go for it
-    #print(robot.model.effortLimit)
-    #exit()
-    #robot.model.effortLimit = robot.model.effortLimit * (args.acceleration / robot.MAX_ACCELERATION)
-    #robot.model.effortLimit = robot.model.effortLimit * 0.5
-    #robot.data = robot.model.createData()
+    # print(robot.model.effortLimit)
+    # exit()
+    # robot.model.effortLimit = robot.model.effortLimit * (args.acceleration / robot.MAX_ACCELERATION)
+    # robot.model.effortLimit = robot.model.effortLimit * 0.5
+    # robot.data = robot.model.createData()
     # TODO: make it underactuated in mobile base's y-direction
     state = crocoddyl.StateMultibody(robot.model)
-    # command input IS torque 
+    # command input IS torque
     # TODO: consider ActuationModelFloatingBaseTpl for heron
     # TODO: create a different actuation model (or whatever)
     # for the mobile base - basically just remove the y movement in the base
@@ -50,53 +56,67 @@ def createCrocoIKOCP(args, robot : RobotManager, x0, goal : pin.SE3):
     # (look at that to see how to compile, make python bindings etc)
     if robot.robot_name != "yumi":
         framePlacementResidual = crocoddyl.ResidualModelFramePlacement(
-                state,
-                # TODO: check if this is the frame we actually want (ee tip)
-                # the id is an integer and that's what api wants
-                robot.model.getFrameId("tool0"),
-                goal.copy(),
-                state.nv)
+            state,
+            # TODO: check if this is the frame we actually want (ee tip)
+            # the id is an integer and that's what api wants
+            robot.model.getFrameId("tool0"),
+            goal.copy(),
+            state.nv,
+        )
         goalTrackingCost = crocoddyl.CostModelResidual(state, framePlacementResidual)
         # cost 4) final ee velocity is 0 (can't directly formulate joint velocity,
         #         because that's a part of the state, and we don't know final joint positions)
         frameVelocityResidual = crocoddyl.ResidualModelFrameVelocity(
-                state,
-                robot.model.getFrameId("tool0"),
-                pin.Motion(np.zeros(6)),
-                pin.ReferenceFrame.WORLD)
+            state,
+            robot.model.getFrameId("tool0"),
+            pin.Motion(np.zeros(6)),
+            pin.ReferenceFrame.WORLD,
+        )
         frameVelocityCost = crocoddyl.CostModelResidual(state, frameVelocityResidual)
         runningCostModel.addCost("gripperPose", goalTrackingCost, 1e2)
         terminalCostModel.addCost("gripperPose", goalTrackingCost, 1e2)
         terminalCostModel.addCost("velFinal", frameVelocityCost, 1e3)
     else:
         framePlacementResidual_l = crocoddyl.ResidualModelFramePlacement(
-                state,
-                # TODO: check if this is the frame we actually want (ee tip)
-                # the id is an integer and that's what api wants
-                robot.model.getFrameId("robl_joint_7"),
-                goal_l.copy(),
-                state.nv)
+            state,
+            # TODO: check if this is the frame we actually want (ee tip)
+            # the id is an integer and that's what api wants
+            robot.model.getFrameId("robl_joint_7"),
+            goal_l.copy(),
+            state.nv,
+        )
         framePlacementResidual_r = crocoddyl.ResidualModelFramePlacement(
-                state,
-                # TODO: check if this is the frame we actually want (ee tip)
-                # the id is an integer and that's what api wants
-                robot.model.getFrameId("robr_joint_7"),
-                goal_r.copy(),
-                state.nv)
-        goalTrackingCost_l = crocoddyl.CostModelResidual(state, framePlacementResidual_l)
-        goalTrackingCost_r = crocoddyl.CostModelResidual(state, framePlacementResidual_r)
+            state,
+            # TODO: check if this is the frame we actually want (ee tip)
+            # the id is an integer and that's what api wants
+            robot.model.getFrameId("robr_joint_7"),
+            goal_r.copy(),
+            state.nv,
+        )
+        goalTrackingCost_l = crocoddyl.CostModelResidual(
+            state, framePlacementResidual_l
+        )
+        goalTrackingCost_r = crocoddyl.CostModelResidual(
+            state, framePlacementResidual_r
+        )
         frameVelocityResidual_l = crocoddyl.ResidualModelFrameVelocity(
-                state,
-                robot.model.getFrameId("robl_joint_7"),
-                pin.Motion(np.zeros(6)),
-                pin.ReferenceFrame.WORLD)
+            state,
+            robot.model.getFrameId("robl_joint_7"),
+            pin.Motion(np.zeros(6)),
+            pin.ReferenceFrame.WORLD,
+        )
         frameVelocityResidual_r = crocoddyl.ResidualModelFrameVelocity(
-                state,
-                robot.model.getFrameId("robr_joint_7"),
-                pin.Motion(np.zeros(6)),
-                pin.ReferenceFrame.WORLD)
-        frameVelocityCost_l = crocoddyl.CostModelResidual(state, frameVelocityResidual_l)
-        frameVelocityCost_r = crocoddyl.CostModelResidual(state, frameVelocityResidual_r)
+            state,
+            robot.model.getFrameId("robr_joint_7"),
+            pin.Motion(np.zeros(6)),
+            pin.ReferenceFrame.WORLD,
+        )
+        frameVelocityCost_l = crocoddyl.CostModelResidual(
+            state, frameVelocityResidual_l
+        )
+        frameVelocityCost_r = crocoddyl.CostModelResidual(
+            state, frameVelocityResidual_r
+        )
         runningCostModel.addCost("gripperPose_l", goalTrackingCost_l, 1e2)
         runningCostModel.addCost("gripperPose_r", goalTrackingCost_r, 1e2)
         terminalCostModel.addCost("gripperPose_l", goalTrackingCost_l, 1e2)
@@ -117,33 +137,34 @@ def createCrocoIKOCP(args, robot : RobotManager, x0, goal : pin.SE3):
     # - added to costs via barrier functions for fddp
     # - added as actual constraints via crocoddyl.constraintmanager for csqp
     ###########################################################################
-    
 
     # the 4th cost is for defining bounds via costs
     # NOTE: could have gotten the same info from state.lb and state.ub.
     # the first state is unlimited there idk what that means really,
     # but the arm's base isn't doing a full rotation anyway, let alone 2 or more
-    xlb = np.concatenate([
-        robot.model.lowerPositionLimit,
-        -1 * robot.model.velocityLimit])
-    xub = np.concatenate([
-        robot.model.upperPositionLimit,
-        robot.model.velocityLimit])
+    xlb = np.concatenate(
+        [robot.model.lowerPositionLimit, -1 * robot.model.velocityLimit]
+    )
+    xub = np.concatenate([robot.model.upperPositionLimit, robot.model.velocityLimit])
     # we have no limits on the mobile base.
     # the mobile base is a planar joint.
     # since it is represented as [x,y,cos(theta),sin(theta)], there is no point
     # to limiting cos(theta),sin(theta) even if we wanted limits,
     # because we would then limit theta, or limit ct and st jointly.
     # in any event, xlb and xub are 1 number too long --
-    # the residual has to be [x,y,theta] because it is in the tangent space - 
+    # the residual has to be [x,y,theta] because it is in the tangent space -
     # the difference between two points on a manifold in pinocchio is defined
-    # as the velocity which if parallel transported for 1 unit of "time" 
+    # as the velocity which if parallel transported for 1 unit of "time"
     # from one to point to the other.
     # point activation input and the residual need to be of the same length obviously,
     # and this should be 2 * model.nv the way things are defined here.
 
-
-    if robot.robot_name == "heron" or robot.robot_name == "heronros" or robot.robot_name == "mirros" or robot.robot_name == "yumi":
+    if (
+        robot.robot_name == "heron"
+        or robot.robot_name == "heronros"
+        or robot.robot_name == "mirros"
+        or robot.robot_name == "yumi"
+    ):
         xlb = xlb[1:]
         xub = xub[1:]
 
@@ -161,26 +182,22 @@ def createCrocoIKOCP(args, robot : RobotManager, x0, goal : pin.SE3):
     # TODO: try using crocoddyl.ConstraintModelResidual
     # instead to create actual box constraints (inequality constraints)
     # TODO: same goes for control input
-    # NOTE: i'm not sure whether any solver uses these tho lel 
+    # NOTE: i'm not sure whether any solver uses these tho lel
     # --> you only do that for mim_solvers' csqp!
 
     if args.solver == "csqp":
         # this just store all the constraints
         constraints = crocoddyl.ConstraintModelManager(state, robot.model.nv)
         u_constraint = crocoddyl.ConstraintModelResidual(
-                state,
-                uResidual, 
-                -1 * robot.model.effortLimit * 0.1,
-                robot.model.effortLimit  * 0.1)
+            state,
+            uResidual,
+            -1 * robot.model.effortLimit * 0.1,
+            robot.model.effortLimit * 0.1,
+        )
         constraints.addConstraint("u_box_constraint", u_constraint)
-        x_constraint = crocoddyl.ConstraintModelResidual(
-                state,
-                xResidual, 
-                xlb,
-                xub)
+        x_constraint = crocoddyl.ConstraintModelResidual(state, xResidual, xlb, xub)
         constraints.addConstraint("x_box_constraint", x_constraint)
 
-
     # Next, we need to create an action model for running and terminal knots. The
     # forward dynamics (computed using ABA) are implemented
     # inside DifferentialActionModelFreeFwdDynamics.
@@ -192,15 +209,15 @@ def createCrocoIKOCP(args, robot : RobotManager, x0, goal : pin.SE3):
             args.ocp_dt,
         )
         runningModel.u_lb = -1 * robot.model.effortLimit * 0.1
-        runningModel.u_ub = robot.model.effortLimit  * 0.1
+        runningModel.u_ub = robot.model.effortLimit * 0.1
         terminalModel = crocoddyl.IntegratedActionModelEuler(
             crocoddyl.DifferentialActionModelFreeInvDynamics(
                 state, actuation, terminalCostModel
             ),
             0.0,
         )
-        terminalModel.u_lb = -1 * robot.model.effortLimit * 0.1 
-        terminalModel.u_ub = robot.model.effortLimit  * 0.1
+        terminalModel.u_lb = -1 * robot.model.effortLimit * 0.1
+        terminalModel.u_ub = robot.model.effortLimit * 0.1
     if args.solver == "csqp":
         runningModel = crocoddyl.IntegratedActionModelEuler(
             crocoddyl.DifferentialActionModelFreeInvDynamics(
@@ -215,10 +232,12 @@ def createCrocoIKOCP(args, robot : RobotManager, x0, goal : pin.SE3):
             0.0,
         )
 
-
     # now we define the problem
-    problem = crocoddyl.ShootingProblem(x0, [runningModel] * args.n_knots, terminalModel)
-    return problem 
+    problem = crocoddyl.ShootingProblem(
+        x0, [runningModel] * args.n_knots, terminalModel
+    )
+    return problem
+
 
 # this shouldn't really depend on x0 but i can't be bothered
 def solveCrocoOCP(args, robot, problem, x0):
@@ -233,13 +252,15 @@ def solveCrocoOCP(args, robot, problem, x0):
         solver = crocoddyl.SolverBoxFDDP(problem)
     if args.solver == "csqp":
         solver = mim_solvers.SolverCSQP(problem)
-    #solver = mim_solvers.SolverSQP(problem)
-    #solver = crocoddyl.SolverIpopt(problem)
+    # solver = mim_solvers.SolverSQP(problem)
+    # solver = crocoddyl.SolverIpopt(problem)
     # TODO: remove / place elsewhere once it works (make it an argument)
     if args.solver == "boxfddp":
         solver.setCallbacks([crocoddyl.CallbackVerbose(), crocoddyl.CallbackLogger()])
     if args.solver == "csqp":
-        solver.setCallbacks([mim_solvers.CallbackVerbose(), mim_solvers.CallbackLogger()])
+        solver.setCallbacks(
+            [mim_solvers.CallbackVerbose(), mim_solvers.CallbackLogger()]
+        )
     # run solve
     # NOTE: there are some possible parameters here that I'm not using
     xs = [x0] * (solver.problem.T + 1)
@@ -250,18 +271,17 @@ def solveCrocoOCP(args, robot, problem, x0):
     end = time.time()
     print("solved in:", end - start, "seconds")
 
-    #solver.solve()
+    # solver.solve()
     # get reference (state trajectory)
     # we aren't using controls because we only have velocity inputs
     xs = np.array(solver.xs)
-    qs = xs[:, :robot.model.nq]
-    vs = xs[:, robot.model.nq:]
-    reference = {'qs' : qs, 'vs' : vs, 'dt' : args.ocp_dt}
+    qs = xs[:, : robot.model.nq]
+    vs = xs[:, robot.model.nq :]
+    reference = {"qs": qs, "vs": vs, "dt": args.ocp_dt}
     return reference, solver
 
 
-
-def createCrocoEEPathFollowingOCP(args, robot : RobotManager, x0):
+def createCrocoEEPathFollowingOCP(args, robot: AbstractRobotManager, x0):
     """
     createCrocoEEPathFollowingOCP
     -------------------------------
@@ -287,19 +307,19 @@ def createCrocoEEPathFollowingOCP(args, robot : RobotManager, x0):
     xRegCost = crocoddyl.CostModelResidual(state, xResidual)
     # cost 4) final ee velocity is 0 (can't directly formulate joint velocity,
     #         because that's a part of the state, and we don't know final joint positions)
-    #frameVelocityResidual = crocoddyl.ResidualModelFrameVelocity(
+    # frameVelocityResidual = crocoddyl.ResidualModelFrameVelocity(
     #        state,
     #        robot.model.getFrameId("tool0"),
     #        pin.Motion(np.zeros(6)),
     #        pin.ReferenceFrame.WORLD)
-    #frameVelocityCost = crocoddyl.CostModelResidual(state, frameVelocityResidual)
+    # frameVelocityCost = crocoddyl.CostModelResidual(state, frameVelocityResidual)
 
     # put these costs into the running costs
     # we put this one in later
     # and add the terminal cost, which is the distance to the goal
     # NOTE: shouldn't there be a final velocity = 0 here?
     terminalCostModel.addCost("uReg", uRegCost, 1e3)
-    #terminalCostModel.addCost("velFinal", frameVelocityCost, 1e3)
+    # terminalCostModel.addCost("velFinal", frameVelocityCost, 1e3)
 
     ######################################################################
     #  state constraints  #
@@ -307,18 +327,15 @@ def createCrocoEEPathFollowingOCP(args, robot : RobotManager, x0):
     # - added to costs via barrier functions for fddp
     # - added as actual constraints via crocoddyl.constraintmanager for csqp
     ###########################################################################
-    
 
     # the 4th cost is for defining bounds via costs
     # NOTE: could have gotten the same info from state.lb and state.ub.
     # the first state is unlimited there idk what that means really,
     # but the arm's base isn't doing a full rotation anyway, let alone 2 or more
-    xlb = np.concatenate([
-        robot.model.lowerPositionLimit,
-        -1 * robot.model.velocityLimit])
-    xub = np.concatenate([
-        robot.model.upperPositionLimit,
-        robot.model.velocityLimit])
+    xlb = np.concatenate(
+        [robot.model.lowerPositionLimit, -1 * robot.model.velocityLimit]
+    )
+    xub = np.concatenate([robot.model.upperPositionLimit, robot.model.velocityLimit])
 
     # we have no limits on the mobile base.
     # the mobile base is a planar joint.
@@ -326,13 +343,17 @@ def createCrocoEEPathFollowingOCP(args, robot : RobotManager, x0):
     # to limiting cos(theta),sin(theta) even if we wanted limits,
     # because we would then limit theta, or limit ct and st jointly.
     # in any event, xlb and xub are 1 number too long --
-    # the residual has to be [x,y,theta] because it is in the tangent space - 
+    # the residual has to be [x,y,theta] because it is in the tangent space -
     # the difference between two points on a manifold in pinocchio is defined
-    # as the velocity which if parallel transported for 1 unit of "time" 
+    # as the velocity which if parallel transported for 1 unit of "time"
     # from one to point to the other.
     # point activation input and the residual need to be of the same length obviously,
     # and this should be 2 * model.nv the way things are defined here.
-    if robot.robot_name == "heron" or robot.robot_name == "heronros" or robot.robot_name == "mirros":
+    if (
+        robot.robot_name == "heron"
+        or robot.robot_name == "heronros"
+        or robot.robot_name == "mirros"
+    ):
         xlb = xlb[1:]
         xub = xub[1:]
 
@@ -351,19 +372,15 @@ def createCrocoEEPathFollowingOCP(args, robot : RobotManager, x0):
         # this just store all the constraints
         constraints = crocoddyl.ConstraintModelManager(state, robot.model.nv)
         u_constraint = crocoddyl.ConstraintModelResidual(
-                state,
-                uResidual, 
-                -1 * robot.model.effortLimit * 0.1,
-                robot.model.effortLimit  * 0.1)
+            state,
+            uResidual,
+            -1 * robot.model.effortLimit * 0.1,
+            robot.model.effortLimit * 0.1,
+        )
         constraints.addConstraint("u_box_constraint", u_constraint)
-        x_constraint = crocoddyl.ConstraintModelResidual(
-                state,
-                xResidual, 
-                xlb,
-                xub)
+        x_constraint = crocoddyl.ConstraintModelResidual(state, xResidual, xlb, xub)
         constraints.addConstraint("x_box_constraint", x_constraint)
 
-
     # Next, we need to create an action model for running and terminal knots. The
     # forward dynamics (computed using ABA) are implemented
     # inside DifferentialActionModelFreeFwdDynamics.
@@ -376,14 +393,15 @@ def createCrocoEEPathFollowingOCP(args, robot : RobotManager, x0):
         if args.solver == "boxfddp":
             runningCostModel.addCost("limitCost", limitCost, 1e3)
         framePlacementResidual = crocoddyl.ResidualModelFramePlacement(
-                state,
-                robot.model.getFrameId("tool0"),
-                path[i], # this better be done with the same time steps as the knots
-                         # NOTE: this should be done outside of this function to clarity
-                state.nv)
+            state,
+            robot.model.getFrameId("tool0"),
+            path[i],  # this better be done with the same time steps as the knots
+            # NOTE: this should be done outside of this function to clarity
+            state.nv,
+        )
         goalTrackingCost = crocoddyl.CostModelResidual(state, framePlacementResidual)
         runningCostModel.addCost("gripperPose" + str(i), goalTrackingCost, 1e2)
-        #runningCostModel.addCost("gripperPose", goalTrackingCost, 1e2)
+        # runningCostModel.addCost("gripperPose", goalTrackingCost, 1e2)
 
         if args.solver == "boxfddp":
             runningModel = crocoddyl.IntegratedActionModelEuler(
@@ -393,7 +411,7 @@ def createCrocoEEPathFollowingOCP(args, robot : RobotManager, x0):
                 args.ocp_dt,
             )
             runningModel.u_lb = -1 * robot.model.effortLimit * 0.1
-            runningModel.u_ub = robot.model.effortLimit  * 0.1
+            runningModel.u_ub = robot.model.effortLimit * 0.1
         if args.solver == "csqp":
             runningModel = crocoddyl.IntegratedActionModelEuler(
                 crocoddyl.DifferentialActionModelFreeInvDynamics(
@@ -411,24 +429,25 @@ def createCrocoEEPathFollowingOCP(args, robot : RobotManager, x0):
             ),
             0.0,
         )
-        terminalModel.u_lb = -1 * robot.model.effortLimit * 0.1 
-        terminalModel.u_ub = robot.model.effortLimit  * 0.1
+        terminalModel.u_lb = -1 * robot.model.effortLimit * 0.1
+        terminalModel.u_ub = robot.model.effortLimit * 0.1
     if args.solver == "csqp":
-            terminalModel = crocoddyl.IntegratedActionModelEuler(
-                crocoddyl.DifferentialActionModelFreeInvDynamics(
-                    state, actuation, terminalCostModel, constraints
-                ),
-                0.0,
-            )
+        terminalModel = crocoddyl.IntegratedActionModelEuler(
+            crocoddyl.DifferentialActionModelFreeInvDynamics(
+                state, actuation, terminalCostModel, constraints
+            ),
+            0.0,
+        )
 
     terminalCostModel.addCost("gripperPose" + str(args.n_knots), goalTrackingCost, 1e2)
-    #terminalCostModel.addCost("gripperPose", goalTrackingCost, 1e2)
+    # terminalCostModel.addCost("gripperPose", goalTrackingCost, 1e2)
 
     # now we define the problem
     problem = crocoddyl.ShootingProblem(x0, runningModels, terminalModel)
-    return problem 
+    return problem
+
 
-def createBaseAndEEPathFollowingOCP(args, robot : RobotManager, x0):
+def createBaseAndEEPathFollowingOCP(args, robot: AbstractRobotManager, x0):
     """
     createBaseAndEEPathFollowingOCP
     -------------------------------
@@ -471,27 +490,29 @@ def createBaseAndEEPathFollowingOCP(args, robot : RobotManager, x0):
     # - added to costs via barrier functions for fddp (4th cost function)
     # - added as actual constraints via crocoddyl.constraintmanager for csqp
     ###########################################################################
-    xlb = np.concatenate([
-        robot.model.lowerPositionLimit,
-        -1 * robot.model.velocityLimit])
-    xub = np.concatenate([
-        robot.model.upperPositionLimit,
-        robot.model.velocityLimit])
+    xlb = np.concatenate(
+        [robot.model.lowerPositionLimit, -1 * robot.model.velocityLimit]
+    )
+    xub = np.concatenate([robot.model.upperPositionLimit, robot.model.velocityLimit])
     # we have no limits on the mobile base.
     # the mobile base is a planar joint.
     # since it is represented as [x,y,cos(theta),sin(theta)], there is no point
     # to limiting cos(theta),sin(theta) even if we wanted limits,
     # because we would then limit theta, or limit ct and st jointly.
     # in any event, xlb and xub are 1 number too long --
-    # the residual has to be [x,y,theta] because it is in the tangent space - 
+    # the residual has to be [x,y,theta] because it is in the tangent space -
     # the difference between two points on a manifold in pinocchio is defined
-    # as the velocity which if parallel transported for 1 unit of "time" 
+    # as the velocity which if parallel transported for 1 unit of "time"
     # from one to point to the other.
     # point activation input and the residual need to be of the same length obviously,
     # and this should be 2 * model.nv the way things are defined here.
 
-
-    if robot.robot_name == "heron" or robot.robot_name == "heronros" or robot.robot_name == "mirros" or robot.robot_name == "yumi":
+    if (
+        robot.robot_name == "heron"
+        or robot.robot_name == "heronros"
+        or robot.robot_name == "mirros"
+        or robot.robot_name == "yumi"
+    ):
         xlb = xlb[1:]
         xub = xub[1:]
 
@@ -505,22 +526,18 @@ def createBaseAndEEPathFollowingOCP(args, robot : RobotManager, x0):
         limitCost = crocoddyl.CostModelResidual(state, xLimitActivation, xLimitResidual)
         terminalCostModel.addCost("limitCost", limitCost, 1e3)
 
-
     # csqp actually allows us to put in hard constraints so we do that
     if args.solver == "csqp":
         # this just store all the constraints
         constraints = crocoddyl.ConstraintModelManager(state, robot.model.nv)
         u_constraint = crocoddyl.ConstraintModelResidual(
-                state,
-                uResidual, 
-                -1 * robot.model.effortLimit * 0.1,
-                robot.model.effortLimit  * 0.1)
+            state,
+            uResidual,
+            -1 * robot.model.effortLimit * 0.1,
+            robot.model.effortLimit * 0.1,
+        )
         constraints.addConstraint("u_box_constraint", u_constraint)
-        x_constraint = crocoddyl.ConstraintModelResidual(
-                state,
-                xResidual, 
-                xlb,
-                xub)
+        x_constraint = crocoddyl.ConstraintModelResidual(state, xResidual, xlb, xub)
         constraints.addConstraint("x_box_constraint", x_constraint)
 
     # Next, we need to create an action model for running and terminal knots. The
@@ -539,42 +556,50 @@ def createBaseAndEEPathFollowingOCP(args, robot : RobotManager, x0):
         ##########################
         if robot.robot_name != "yumi":
             eePoseResidual = crocoddyl.ResidualModelFramePlacement(
-                    state,
-                    robot.model.getFrameId("tool0"),
-                    path_ee[i], # this better be done with the same time steps as the knots
-                             # NOTE: this should be done outside of this function to clarity
-                    state.nv)
+                state,
+                robot.model.getFrameId("tool0"),
+                path_ee[i],  # this better be done with the same time steps as the knots
+                # NOTE: this should be done outside of this function to clarity
+                state.nv,
+            )
             eeTrackingCost = crocoddyl.CostModelResidual(state, eePoseResidual)
-            runningCostModel.addCost("ee_pose" + str(i), eeTrackingCost, args.ee_pose_cost)
+            runningCostModel.addCost(
+                "ee_pose" + str(i), eeTrackingCost, args.ee_pose_cost
+            )
         #########################
         #  dual arm references  #
         #########################
         else:
             l_eePoseResidual = crocoddyl.ResidualModelFramePlacement(
-                    state,
-                    robot.model.getFrameId("robl_joint_7"),
-                    # MASSIVE TODO: actually put in reference for left arm
-                    path_ee[i],
-                    state.nv)
+                state,
+                robot.model.getFrameId("robl_joint_7"),
+                # MASSIVE TODO: actually put in reference for left arm
+                path_ee[i],
+                state.nv,
+            )
             l_eeTrackingCost = crocoddyl.CostModelResidual(state, l_eePoseResidual)
-            runningCostModel.addCost("l_ee_pose" + str(i), l_eeTrackingCost, args.ee_pose_cost)
+            runningCostModel.addCost(
+                "l_ee_pose" + str(i), l_eeTrackingCost, args.ee_pose_cost
+            )
             r_eePoseResidual = crocoddyl.ResidualModelFramePlacement(
-                    state,
-                    robot.model.getFrameId("robr_joint_7"),
-                    # MASSIVE TODO: actually put in reference for left arm
-                    path_ee[i], 
-                    state.nv)
+                state,
+                robot.model.getFrameId("robr_joint_7"),
+                # MASSIVE TODO: actually put in reference for left arm
+                path_ee[i],
+                state.nv,
+            )
             r_eeTrackingCost = crocoddyl.CostModelResidual(state, r_eePoseResidual)
-            runningCostModel.addCost("r_ee_pose" + str(i), r_eeTrackingCost, args.ee_pose_cost)
-
+            runningCostModel.addCost(
+                "r_ee_pose" + str(i), r_eeTrackingCost, args.ee_pose_cost
+            )
 
         baseTranslationResidual = crocoddyl.ResidualModelFrameTranslation(
-                state,
-                robot.model.getFrameId("mobile_base"),
-                path_base[i],
-                state.nv)
+            state, robot.model.getFrameId("mobile_base"), path_base[i], state.nv
+        )
         baseTrackingCost = crocoddyl.CostModelResidual(state, baseTranslationResidual)
-        runningCostModel.addCost("base_translation" + str(i), baseTrackingCost, args.base_translation_cost)
+        runningCostModel.addCost(
+            "base_translation" + str(i), baseTrackingCost, args.base_translation_cost
+        )
 
         if args.solver == "boxfddp":
             runningModel = crocoddyl.IntegratedActionModelEuler(
@@ -584,7 +609,7 @@ def createBaseAndEEPathFollowingOCP(args, robot : RobotManager, x0):
                 args.ocp_dt,
             )
             runningModel.u_lb = -1 * robot.model.effortLimit * 0.1
-            runningModel.u_ub = robot.model.effortLimit  * 0.1
+            runningModel.u_ub = robot.model.effortLimit * 0.1
         if args.solver == "csqp":
             runningModel = crocoddyl.IntegratedActionModelEuler(
                 crocoddyl.DifferentialActionModelFreeInvDynamics(
@@ -602,35 +627,46 @@ def createBaseAndEEPathFollowingOCP(args, robot : RobotManager, x0):
             ),
             0.0,
         )
-        terminalModel.u_lb = -1 * robot.model.effortLimit * 0.1 
-        terminalModel.u_ub = robot.model.effortLimit  * 0.1
+        terminalModel.u_lb = -1 * robot.model.effortLimit * 0.1
+        terminalModel.u_ub = robot.model.effortLimit * 0.1
     if args.solver == "csqp":
-            terminalModel = crocoddyl.IntegratedActionModelEuler(
-                crocoddyl.DifferentialActionModelFreeInvDynamics(
-                    state, actuation, terminalCostModel, constraints
-                ),
-                0.0,
-            )
+        terminalModel = crocoddyl.IntegratedActionModelEuler(
+            crocoddyl.DifferentialActionModelFreeInvDynamics(
+                state, actuation, terminalCostModel, constraints
+            ),
+            0.0,
+        )
 
     if robot.robot_name != "yumi":
-        terminalCostModel.addCost("ee_pose" + str(args.n_knots), eeTrackingCost, args.ee_pose_cost)
+        terminalCostModel.addCost(
+            "ee_pose" + str(args.n_knots), eeTrackingCost, args.ee_pose_cost
+        )
     else:
-        terminalCostModel.addCost("l_ee_pose" + str(args.n_knots), l_eeTrackingCost, args.ee_pose_cost)
-        terminalCostModel.addCost("r_ee_pose" + str(args.n_knots), r_eeTrackingCost, args.ee_pose_cost)
-    terminalCostModel.addCost("base_translation" + str(args.n_knots), baseTrackingCost, args.base_translation_cost)
+        terminalCostModel.addCost(
+            "l_ee_pose" + str(args.n_knots), l_eeTrackingCost, args.ee_pose_cost
+        )
+        terminalCostModel.addCost(
+            "r_ee_pose" + str(args.n_knots), r_eeTrackingCost, args.ee_pose_cost
+        )
+    terminalCostModel.addCost(
+        "base_translation" + str(args.n_knots),
+        baseTrackingCost,
+        args.base_translation_cost,
+    )
 
     # now we define the problem
     problem = crocoddyl.ShootingProblem(x0, runningModels, terminalModel)
-    return problem 
+    return problem
+
 
 if __name__ == "__main__":
     parser = getMinimalArgParser()
     parser = get_OCP_args(parser)
     args = parser.parse_args()
     ex_robot = example_robot_data.load("ur5_gripper")
-    robot = RobotManager(args)
+    robot = AbstractRobotManager(args)
     # TODO: remove once things work
-    robot.max_qd = 3.14 
+    robot.max_qd = 3.14
     print("velocity limits", robot.model.velocityLimit)
     robot.q = pin.randomConfiguration(robot.model)
     robot.q[0] = 0.1
@@ -640,13 +676,15 @@ if __name__ == "__main__":
     goal.translation = np.random.random(3) * 0.4
     reference, solver = CrocoIKOCP(args, robot, goal)
     # we only work within -pi - pi (or 0-2pi idk anymore)
-    #reference['qs'] = reference['qs'] % (2*np.pi) - np.pi
+    # reference['qs'] = reference['qs'] % (2*np.pi) - np.pi
     if args.solver == "boxfddp":
         log = solver.getCallbacks()[1]
         crocoddyl.plotOCSolution(log.xs, log.us, figIndex=1, show=True)
-        crocoddyl.plotConvergence(log.costs, log.pregs, log.dregs, log.grads, log.stops, log.steps, figIndex=2)
+        crocoddyl.plotConvergence(
+            log.costs, log.pregs, log.dregs, log.grads, log.stops, log.steps, figIndex=2
+        )
     followKinematicJointTrajP(args, robot, reference)
-    reference.pop('dt')
+    reference.pop("dt")
     plotFromDict(reference, args.n_knots + 1, args)
     print("achieved result", robot.getT_w_e())
     display = crocoddyl.MeshcatDisplay(ex_robot)
diff --git a/python/smc/multiprocessing/networking/TODO b/python/smc/multiprocessing/networking/TODO
deleted file mode 100644
index 9e843d64cb62df340ae9ec61d72e2057a7d6bcf7..0000000000000000000000000000000000000000
--- a/python/smc/multiprocessing/networking/TODO
+++ /dev/null
@@ -1 +0,0 @@
-rename receiver and sender to subscriber and publisher
diff --git a/python/smc/multiprocessing/networking/todos.md b/python/smc/multiprocessing/networking/todos.md
new file mode 100644
index 0000000000000000000000000000000000000000..9ea43e4b74ff9c55f7c3873fe477914bd5bc9381
--- /dev/null
+++ b/python/smc/multiprocessing/networking/todos.md
@@ -0,0 +1,40 @@
+1. rename receiver and sender to subscriber and publisher
+2. switch over to UDP - just drop a packed if deserialization fails + log it ideally
+3. add a checksum to see if packet is correct, for example:
+import zlib
+import sensor_pb2  # Generated from your .proto file
+
+def serialize_with_checksum(sensor_data):
+    # Clear checksum before computing
+    sensor_data.checksum = 0
+
+    # Serialize message (excluding checksum)
+    serialized_data = sensor_data.SerializeToString()
+
+    # Compute CRC32 checksum
+    checksum = zlib.crc32(serialized_data) & 0xFFFFFFFF  # Ensure unsigned 32-bit
+
+    # Set checksum field
+    sensor_data.checksum = checksum
+
+    # Serialize again with checksum
+    return sensor_data.SerializeToString()
+
+and similar to deserialize
+def deserialize_with_checksum(data):
+    received_data = sensor_pb2.SensorData()
+    received_data.ParseFromString(data)
+
+    # Extract the received checksum
+    received_checksum = received_data.checksum
+
+    # Recompute the checksum (excluding checksum field)
+    received_data.checksum = 0
+    recalculated_checksum = zlib.crc32(received_data.SerializeToString()) & 0xFFFFFFFF
+
+    # Validate checksum
+    if received_checksum != recalculated_checksum:
+        raise ValueError("Checksum mismatch! Corrupted data received.")
+
+    return received_data  # Valid message
+
diff --git a/python/smc/path_generation/cartesian_to_joint_space_path_mapper.py b/python/smc/path_generation/cartesian_to_joint_space_path_mapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..219ba53d96008caccf098f70c265037c31f1451d
--- /dev/null
+++ b/python/smc/path_generation/cartesian_to_joint_space_path_mapper.py
@@ -0,0 +1,74 @@
+from smc.robots.robotmanager_abstract import AbstractRobotManager
+import pinocchio as pin
+import numpy as np
+
+
+def clikCartesianPathIntoJointPath(
+    args, robot, path, clikController, q_init, plane_pose
+):
+    """
+    clikCartesianPathIntoJointPath
+    ------------------------------
+    functionality
+    ------------
+    Follows a provided Cartesian path,
+    creates a joint space trajectory for it.
+    Output format and timing works only for what the dmp code expects
+    because it's only used there,
+    and I never gave that code a lift-up it needs.
+
+    return
+    ------
+    - joint_space_trajectory to follow the given path.
+
+    arguments
+    ----------
+    - path: cartesian path given in task frame
+    """
+    # we don't know how many there will be, so a linked list is
+    # clearly the best data structure here (instert is o(1) still,
+    # and we aren't pressed on time when turning it into an array later)
+    qs = []
+    # let's use the functions we already have. to do so
+    # we need to create a new RobotManagerAbstract with arguments for simulation,
+    # otherwise weird things will happen.
+    # we keep all the other args intact
+    sim_args = copy.deepcopy(args)
+    sim_args.pinocchio_only = True
+    sim_args.ctrl_freq = -1
+    sim_args.plotter = False
+    sim_args.visualizer = False
+    sim_args.save_log = False  # we're not using sim robot outside of this
+    sim_args.max_iterations = 10000  # more than enough
+    sim_robot = AbstractRobotManager(sim_args)
+    sim_robot.q = q_init.copy()
+    sim_robot._step()
+    for pose in path:
+        moveL(sim_args, sim_robot, pose)
+        # loop logs is a dict, dict keys list preserves input order
+        new_q = sim_robot.q.copy()
+        if args.viz_test_path:
+            # look into visualize.py for details on what's available
+            T_w_e = sim_robot.getT_w_e()
+            robot.updateViz({"q": new_q, "T_w_e": T_w_e, "point": T_w_e.copy()})
+            # time.sleep(0.005)
+        qs.append(new_q[:6])
+        # plot this on the real robot
+
+    ##############################################
+    #  save the obtained joint-space trajectory  #
+    ##############################################
+    qs = np.array(qs)
+    # we're putting a dmp over this so we already have the timing ready
+    # TODO: make this general, you don't want to depend on other random
+    # arguments (make this one traj_time, then put tau0 = traj_time there
+    t = np.linspace(0, args.tau0, len(qs)).reshape((len(qs), 1))
+    joint_trajectory = np.hstack((t, qs))
+    # TODO handle saving more consistently/intentionally
+    # (although this definitely works right now and isn't bad, just mid)
+    # os.makedir -p a data dir and save there, this is ugly
+    # TODO: check if we actually need this and if not remove the saving
+    # whatever code uses this is responsible to log it if it wants it,
+    # let's not have random files around.
+    np.savetxt("./joint_trajectory.csv", joint_trajectory, delimiter=",", fmt="%.5f")
+    return joint_trajectory
diff --git a/python/smc/path_generation/planner.py b/python/smc/path_generation/planner.py
index 9d63181379b28f43c03be7bf490f0e9d9361937d..02bf2f184f33543e2ddd80a7f839d39c05f984cf 100644
--- a/python/smc/path_generation/planner.py
+++ b/python/smc/path_generation/planner.py
@@ -2,11 +2,14 @@ from typing import List
 from abc import ABC, abstractmethod
 
 import numpy as np
-from ur_simple_control.path_generation.starworlds.obstacles import StarshapedObstacle
-from ur_simple_control.path_generation.starworlds.starshaped_hull import cluster_and_starify, ObstacleCluster
-from ur_simple_control.path_generation.starworlds.utils.misc import tic, toc
-from ur_simple_control.path_generation.star_navigation.robot.unicycle import Unicycle
-from ur_simple_control.path_generation.starworlds.obstacles import StarshapedPolygon
+from smc.path_generation.starworlds.obstacles import StarshapedObstacle
+from smc.path_generation.starworlds.starshaped_hull import (
+    cluster_and_starify,
+    ObstacleCluster,
+)
+from smc.path_generation.starworlds.utils.misc import tic, toc
+from smc.path_generation.star_navigation.robot.unicycle import Unicycle
+from smc.path_generation.starworlds.obstacles import StarshapedPolygon
 import shapely
 import yaml
 import pinocchio as pin
@@ -17,40 +20,68 @@ import matplotlib.pyplot as plt
 import matplotlib.collections as plt_col
 from multiprocessing import Queue, Lock, shared_memory
 
+
 def getPlanningArgs(parser):
-    robot_params_file_path = files('ur_simple_control.path_generation').joinpath('robot_params.yaml')
-    tunnel_mpc_params_file_path = files('ur_simple_control.path_generation').joinpath('tunnel_mpc_params.yaml')
-    parser.add_argument('--planning-robot-params-file', type=str,
-                        default=robot_params_file_path,
-                        #default='/home/gospodar/lund/praxis/projects/ur_simple_control/python/ur_simple_control/path_generation/robot_params.yaml',
-                        #default='/home/gospodar/colcon_venv/ur_simple_control/python/ur_simple_control/path_generation/robot_params.yaml',
-                        help="path to robot params file, required for path planning because it takes kinematic constraints into account")
-    parser.add_argument('--tunnel-mpc-params-file', type=str,
-                        default=tunnel_mpc_params_file_path,
-                        #default='/home/gospodar/lund/praxis/projects/ur_simple_control/python/ur_simple_control/path_generation/tunnel_mpc_params.yaml',
-                        #default='/home/gospodar/colcon_venv/ur_simple_control/python/ur_simple_control/path_generation/tunnel_mpc_params.yaml',
-                        help="path to mpc (in original tunnel) params file, required for path planning because it takes kinematic constraints into account")
-    parser.add_argument('--n-pol', type=int,
-                        default='0',
-                        help="IDK, TODO, rn this is just a preset hack value put into args for easier data transfer")
-    parser.add_argument('--np', type=int,
-                        default='0',
-                        help="IDK, TODO, rn this is just a preset hack value put into args for easier data transfer")
+    robot_params_file_path = files("smc.path_generation").joinpath("robot_params.yaml")
+    tunnel_mpc_params_file_path = files("smc.path_generation").joinpath(
+        "tunnel_mpc_params.yaml"
+    )
+    parser.add_argument(
+        "--planning-robot-params-file",
+        type=str,
+        default=robot_params_file_path,
+        # default='/home/gospodar/lund/praxis/projects/smc/python/smc/path_generation/robot_params.yaml',
+        # default='/home/gospodar/colcon_venv/smc/python/smc/path_generation/robot_params.yaml',
+        help="path to robot params file, required for path planning because it takes kinematic constraints into account",
+    )
+    parser.add_argument(
+        "--tunnel-mpc-params-file",
+        type=str,
+        default=tunnel_mpc_params_file_path,
+        # default='/home/gospodar/lund/praxis/projects/smc/python/smc/path_generation/tunnel_mpc_params.yaml',
+        # default='/home/gospodar/colcon_venv/smc/python/smc/path_generation/tunnel_mpc_params.yaml',
+        help="path to mpc (in original tunnel) params file, required for path planning because it takes kinematic constraints into account",
+    )
+    parser.add_argument(
+        "--n-pol",
+        type=int,
+        default="0",
+        help="IDK, TODO, rn this is just a preset hack value put into args for easier data transfer",
+    )
+    parser.add_argument(
+        "--np",
+        type=int,
+        default="0",
+        help="IDK, TODO, rn this is just a preset hack value put into args for easier data transfer",
+    )
     return parser
 
-class SceneUpdater():
+
+class SceneUpdater:
     def __init__(self, params: dict, verbosity=0):
         self.params = params
         self.verbosity = verbosity
         self.reset()
 
     def reset(self):
-        self.obstacle_clusters : List[ObstacleCluster] = None
+        self.obstacle_clusters: List[ObstacleCluster] = None
         self.free_star = None
-    
+
     # TODO: Check why computational time varies so much for same static scene
     @staticmethod
-    def workspace_modification(obstacles, p, pg, rho0, max_compute_time, hull_epsilon, gamma=0.5, make_convex=0, obstacle_clusters_prev=None, free_star_prev=None, verbosity=0):
+    def workspace_modification(
+        obstacles,
+        p,
+        pg,
+        rho0,
+        max_compute_time,
+        hull_epsilon,
+        gamma=0.5,
+        make_convex=0,
+        obstacle_clusters_prev=None,
+        free_star_prev=None,
+        verbosity=0,
+    ):
 
         # Clearance variable initialization
         rho = rho0 / gamma  # First rho should be rho0
@@ -59,14 +90,18 @@ class SceneUpdater():
         while True:
             if toc(t_init) > max_compute_time:
                 if verbosity > 0:
-                    print("[Workspace modification]: Max compute time in rho iteration.")
+                    print(
+                        "[Workspace modification]: Max compute time in rho iteration."
+                    )
                 break
 
             # Reduce rho
             rho *= gamma
 
             # Pad obstacles with rho
-            obstacles_rho = [o.dilated_obstacle(padding=rho, id="duplicate") for o in obstacles]
+            obstacles_rho = [
+                o.dilated_obstacle(padding=rho, id="duplicate") for o in obstacles
+            ]
 
             # TODO: Fix boundaries
             free_rho = shapely.geometry.box(-20, -20, 20, 20)
@@ -82,26 +117,40 @@ class SceneUpdater():
                 break
 
         # Initial and goal reference position selection
-        r0_sh, _ = shapely.ops.nearest_points(initial_reference_set, shapely.geometry.Point(p))
+        r0_sh, _ = shapely.ops.nearest_points(
+            initial_reference_set, shapely.geometry.Point(p)
+        )
         r0 = np.array(r0_sh.coords[0])
         rg_sh, _ = shapely.ops.nearest_points(free_rho, shapely.geometry.Point(pg))
         rg = np.array(rg_sh.coords[0])
 
-
         # TODO: Check more thoroughly
         if free_star_prev is not None:
             free_star_prev = free_star_prev.buffer(-1e-4)
-        if free_star_prev is not None and free_star_prev.contains(r0_sh) and free_star_prev.contains(rg_sh) and free_rho.contains(free_star_prev):# not any([free_star_prev.covers(o.polygon()) for o in obstacles_rho]):
+        if (
+            free_star_prev is not None
+            and free_star_prev.contains(r0_sh)
+            and free_star_prev.contains(rg_sh)
+            and free_rho.contains(free_star_prev)
+        ):  # not any([free_star_prev.covers(o.polygon()) for o in obstacles_rho]):
             if verbosity > 1:
-                print("[Workspace modification]: Reuse workspace from previous time step.")
+                print(
+                    "[Workspace modification]: Reuse workspace from previous time step."
+                )
             obstacle_clusters = obstacle_clusters_prev
             exit_flag = 10
         else:
             # Apply cluster and starify
-            obstacle_clusters, obstacle_timing, exit_flag, n_iter = \
-                cluster_and_starify(obstacles_rho, r0, rg, hull_epsilon, max_compute_time=max_compute_time-toc(t_init),
-                                    previous_clusters=obstacle_clusters_prev,
-                                    make_convex=make_convex, verbose=verbosity)
+            obstacle_clusters, obstacle_timing, exit_flag, n_iter = cluster_and_starify(
+                obstacles_rho,
+                r0,
+                rg,
+                hull_epsilon,
+                max_compute_time=max_compute_time - toc(t_init),
+                previous_clusters=obstacle_clusters_prev,
+                make_convex=make_convex,
+                verbose=verbosity,
+            )
 
         free_star = shapely.geometry.box(-20, -20, 20, 20)
         for o in obstacle_clusters:
@@ -110,23 +159,37 @@ class SceneUpdater():
         compute_time = toc(t_init)
         return obstacle_clusters, r0, rg, rho, free_star, compute_time, exit_flag
 
-    def update(self, p: np.ndarray, pg: np.ndarray, obstacles) -> tuple[np.ndarray, np.ndarray, float, list[StarshapedObstacle]]:
+    def update(
+        self, p: np.ndarray, pg: np.ndarray, obstacles
+    ) -> tuple[np.ndarray, np.ndarray, float, list[StarshapedObstacle]]:
         # Update obstacles
-        if not self.params['use_prev_workspace']:
+        if not self.params["use_prev_workspace"]:
             self.free_star = None
-        self.obstacle_clusters, r0, rg, rho, self.free_star, _, _ = SceneUpdater.workspace_modification(
-            obstacles, p, pg, self.params['rho0'], self.params['max_obs_compute_time'],
-            self.params['hull_epsilon'], self.params['gamma'],
-            make_convex=self.params['make_convex'], obstacle_clusters_prev=self.obstacle_clusters,
-            free_star_prev=self.free_star, verbosity=self.verbosity)
-        obstacles_star : List[StarshapedObstacle] = [o.cluster_obstacle for o in self.obstacle_clusters]
+        self.obstacle_clusters, r0, rg, rho, self.free_star, _, _ = (
+            SceneUpdater.workspace_modification(
+                obstacles,
+                p,
+                pg,
+                self.params["rho0"],
+                self.params["max_obs_compute_time"],
+                self.params["hull_epsilon"],
+                self.params["gamma"],
+                make_convex=self.params["make_convex"],
+                obstacle_clusters_prev=self.obstacle_clusters,
+                free_star_prev=self.free_star,
+                verbosity=self.verbosity,
+            )
+        )
+        obstacles_star: List[StarshapedObstacle] = [
+            o.cluster_obstacle for o in self.obstacle_clusters
+        ]
         # Make sure all polygon representations are computed
         [o._compute_polygon_representation() for o in obstacles_star]
-        
+
         return r0, rg, rho, obstacles_star
 
 
-class PathGenerator():
+class PathGenerator:
     def __init__(self, params: dict, verbosity=0):
         self.params = params
         self.verbosity = verbosity
@@ -134,12 +197,23 @@ class PathGenerator():
 
     def reset(self):
         self.target_path = []
-    
+
     ### soads.py
 
     # TODO: Check if can make more computationally efficient
     @staticmethod
-    def soads_f(r, rg, obstacles: List[StarshapedObstacle], adapt_obstacle_velocity=False, unit_magnitude=False, crep=1., reactivity=1., tail_effect=False, convergence_tolerance=1e-4, d=False):
+    def soads_f(
+        r,
+        rg,
+        obstacles: List[StarshapedObstacle],
+        adapt_obstacle_velocity=False,
+        unit_magnitude=False,
+        crep=1.0,
+        reactivity=1.0,
+        tail_effect=False,
+        convergence_tolerance=1e-4,
+        d=False,
+    ):
         goal_vector = rg - r
         goal_dist = np.linalg.norm(goal_vector)
         if goal_dist < convergence_tolerance:
@@ -165,8 +239,8 @@ class PathGenerator():
             for i, obs in enumerate(obstacles):
                 xd_o[:, i] = obs.vel_intertial_frame(r)
 
-        kappa = 0.
-        f_mag = 0.
+        kappa = 0.0
+        f_mag = 0.0
         for i in range(No):
             # Compute basis matrix
             E = np.zeros((2, 2))
@@ -174,7 +248,11 @@ class PathGenerator():
             E[:, 1] = [-normal[i][1], normal[i][0]]
             # Compute eigenvalues
             D = np.zeros((2, 2))
-            D[0, 0] = 1 - crep / (gamma[i] ** (1 / reactivity)) if tail_effect or normal[i].dot(fa) < 0. else 1
+            D[0, 0] = (
+                1 - crep / (gamma[i] ** (1 / reactivity))
+                if tail_effect or normal[i].dot(fa) < 0.0
+                else 1
+            )
             D[1, 1] = 1 + 1 / gamma[i] ** (1 / reactivity)
             # Compute modulation
             M = E.dot(D).dot(np.linalg.inv(E))
@@ -189,10 +267,14 @@ class PathGenerator():
             kappa_i = np.arccos(np.clip(nu_i_hat[0], -1, 1)) * np.sign(nu_i_hat[1])
             kappa += w[i] * kappa_i
         kappa_norm = abs(kappa)
-        f_o = Rf.dot([np.cos(kappa_norm), np.sin(kappa_norm) / kappa_norm * kappa]) if kappa_norm > 0. else fa
+        f_o = (
+            Rf.dot([np.cos(kappa_norm), np.sin(kappa_norm) / kappa_norm * kappa])
+            if kappa_norm > 0.0
+            else fa
+        )
 
         if unit_magnitude:
-            f_mag = 1.
+            f_mag = 1.0
         return f_mag * f_o
 
     @staticmethod
@@ -231,12 +313,38 @@ class PathGenerator():
     @staticmethod
     def pol2pos(path_pol, s):
         n_pol = len(path_pol) // 2 - 1
-        return [sum([path_pol[j * (n_pol + 1) + i] * s ** (n_pol - i) for i in range(n_pol + 1)]) for j in range(2)]
+        return [
+            sum(
+                [
+                    path_pol[j * (n_pol + 1) + i] * s ** (n_pol - i)
+                    for i in range(n_pol + 1)
+                ]
+            )
+            for j in range(2)
+        ]
 
     @staticmethod
-    def path_generator(r0, rg, obstacles, dp_max, N, dt, max_compute_time, n_pol, ds_decay_rate=0.5,
-                       ds_increase_rate=2., max_nr_steps=1000, convergence_tolerance=1e-5, P_prev=None, s_prev=None,
-                       reactivity=1., crep=1., tail_effect=False, reference_step_size=0.5, verbosity=0):
+    def path_generator(
+        r0,
+        rg,
+        obstacles,
+        dp_max,
+        N,
+        dt,
+        max_compute_time,
+        n_pol,
+        ds_decay_rate=0.5,
+        ds_increase_rate=2.0,
+        max_nr_steps=1000,
+        convergence_tolerance=1e-5,
+        P_prev=None,
+        s_prev=None,
+        reactivity=1.0,
+        crep=1.0,
+        tail_effect=False,
+        reference_step_size=0.5,
+        verbosity=0,
+    ):
         """
         r0 : np.ndarray                       initial reference position
         rg : np.ndarray                       goal reference position
@@ -258,7 +366,7 @@ class PathGenerator():
         reference_step_size : float           ???
         verbosity : int                       you guessed it...
         """
-        
+
         t0 = tic()
 
         # Initialize
@@ -278,33 +386,52 @@ class PathGenerator():
             # Check exit conditions
             if dist_to_goal < convergence_tolerance:
                 if verbosity > 1:
-                    print(f"[Path Generator]: Path converged. {int(100 * (s[i - 1] / N))}% of path completed.")
+                    print(
+                        f"[Path Generator]: Path converged. {int(100 * (s[i - 1] / N))}% of path completed."
+                    )
                 break
             if s[i - 1] >= N:
                 if verbosity > 1:
-                    print(f"[Path Generator]: Completed path length. {int(100 * (s[i - 1] / N))}% of path completed.")
+                    print(
+                        f"[Path Generator]: Completed path length. {int(100 * (s[i - 1] / N))}% of path completed."
+                    )
                 break
             if toc(t0) > max_compute_time:
                 if verbosity > 1:
-                    print(f"[Path Generator]: Max compute time in path integrator. {int(100 * (s[i - 1] / N))}% of path completed.")
+                    print(
+                        f"[Path Generator]: Max compute time in path integrator. {int(100 * (s[i - 1] / N))}% of path completed."
+                    )
                 break
             if i >= max_nr_steps:
                 if verbosity > 1:
-                    print(f"[Path Generator]: Max steps taken in path integrator. {int(100 * (s[i - 1] / N))}% of path completed.")
+                    print(
+                        f"[Path Generator]: Max steps taken in path integrator. {int(100 * (s[i - 1] / N))}% of path completed."
+                    )
                 break
 
             # Movement using SOADS dynamics
-            dr = min(dp_max, dist_to_goal) * PathGenerator.soads_f(r[i - 1, :], rg, obstacles, adapt_obstacle_velocity=False,
-                                                                   unit_magnitude=True, crep=crep,
-                                                                   reactivity=reactivity, tail_effect=tail_effect,
-                                                                   convergence_tolerance=convergence_tolerance)
+            dr = min(dp_max, dist_to_goal) * PathGenerator.soads_f(
+                r[i - 1, :],
+                rg,
+                obstacles,
+                adapt_obstacle_velocity=False,
+                unit_magnitude=True,
+                crep=crep,
+                reactivity=reactivity,
+                tail_effect=tail_effect,
+                convergence_tolerance=convergence_tolerance,
+            )
 
             r[i, :] = r[i - 1, :] + dr * ds
 
             ri_in_obstacle = False
             while any([o.interior_point(r[i, :]) for o in obstacles]):
                 if verbosity > 1:
-                    print("[Path Generator]: Path inside obstacle. Reducing integration step from {:5f} to {:5f}.".format(ds, ds*ds_decay_rate))
+                    print(
+                        "[Path Generator]: Path inside obstacle. Reducing integration step from {:5f} to {:5f}.".format(
+                            ds, ds * ds_decay_rate
+                        )
+                    )
                 ds *= ds_decay_rate
                 r[i, :] = r[i - 1, :] + dr * ds
                 # Additional compute time check
@@ -328,25 +455,40 @@ class PathGenerator():
         s_vec = np.arange(0, s[-1] + reference_step_size, reference_step_size)
         xs, ys = np.interp(s_vec, s, r[:, 0]), np.interp(s_vec, s, r[:, 1])
         # Append not finished path with fixed final position
-        s_vec = np.append(s_vec, np.arange(s[-1] + reference_step_size, N + reference_step_size, reference_step_size))
-        xs = np.append(xs, xs[-1] * np.ones(len(s_vec)-len(xs)))
-        ys = np.append(ys, ys[-1] * np.ones(len(s_vec)-len(ys)))
+        s_vec = np.append(
+            s_vec,
+            np.arange(
+                s[-1] + reference_step_size,
+                N + reference_step_size,
+                reference_step_size,
+            ),
+        )
+        xs = np.append(xs, xs[-1] * np.ones(len(s_vec) - len(xs)))
+        ys = np.append(ys, ys[-1] * np.ones(len(s_vec) - len(ys)))
 
         reference_path = [el for p in zip(xs, ys) for el in p]  # [x0 y0 x1 y1 ...]
 
         # TODO: Fix when close to goal
         # TODO: Adjust for short arc length, skip higher order terms..
-        path_pol = np.polyfit(s_vec, reference_path[::2], n_pol).tolist() + \
-                np.polyfit(s_vec, reference_path[1::2], n_pol).tolist()  # [px0 px1 ... pxn py0 py1 ... pyn]
+        path_pol = (
+            np.polyfit(s_vec, reference_path[::2], n_pol).tolist()
+            + np.polyfit(s_vec, reference_path[1::2], n_pol).tolist()
+        )  # [px0 px1 ... pxn py0 py1 ... pyn]
         # Force init position to be correct
         path_pol[n_pol] = reference_path[0]
         path_pol[-1] = reference_path[1]
 
         # Compute polyfit approximation error
-        epsilon = [np.linalg.norm(np.array(reference_path[2 * i:2 * (i + 1)]) - np.array(PathGenerator.pol2pos(path_pol, s_vec[i]))) for i in range(N + 1)]
+        epsilon = [
+            np.linalg.norm(
+                np.array(reference_path[2 * i : 2 * (i + 1)])
+                - np.array(PathGenerator.pol2pos(path_pol, s_vec[i]))
+            )
+            for i in range(N + 1)
+        ]
 
         compute_time = toc(t0)
-        
+
         """
         path_pol : np.ndarray   the polynomial approximation of `reference_path`
         epsilon : [float]       approximation error between the polynomial fit and the actual path
@@ -354,48 +496,74 @@ class PathGenerator():
         compute_time : float    overall timing of this function
         """
         return path_pol, epsilon, reference_path, compute_time
-        
-    def prepare_prev(self, p: np.ndarray, rho: float, obstacles_star: List[StarshapedObstacle]):
+
+    def prepare_prev(
+        self, p: np.ndarray, rho: float, obstacles_star: List[StarshapedObstacle]
+    ):
         P_prev = np.array([self.target_path[::2], self.target_path[1::2]]).T
         # Shift path to start at point closest to robot position
-        P_prev = P_prev[np.argmin(np.linalg.norm(p - P_prev, axis=1)):, :]
+        P_prev = P_prev[np.argmin(np.linalg.norm(p - P_prev, axis=1)) :, :]
         # P_prev[0, :] = self.r0
         if np.linalg.norm(p - P_prev[0, :]) > rho:
             if self.verbosity > 0:
-                print("[Path Generator]: No reuse of previous path. Path not within distance rho from robot.")
+                print(
+                    "[Path Generator]: No reuse of previous path. Path not within distance rho from robot."
+                )
             P_prev = None
         else:
             for r in P_prev:
                 if any([o.interior_point(r) for o in obstacles_star]):
                     if self.verbosity > 0:
-                        print("[Path Generator]: No reuse of previous path. Path not collision-free.")
+                        print(
+                            "[Path Generator]: No reuse of previous path. Path not collision-free."
+                        )
                     P_prev = None
 
         if P_prev is not None:
             # Cut off stand still padding in previous path
             P_prev_stepsize = np.linalg.norm(np.diff(P_prev, axis=0), axis=1)
-            s_prev = np.hstack((0, np.cumsum(P_prev_stepsize) / self.params['dp_max']))
+            s_prev = np.hstack((0, np.cumsum(P_prev_stepsize) / self.params["dp_max"]))
             P_prev_mask = [True] + (P_prev_stepsize > 1e-8).tolist()
             P_prev = P_prev[P_prev_mask, :]
             s_prev = s_prev[P_prev_mask]
         else:
             s_prev = None
-        
+
         return P_prev, s_prev
-    
-    def update(self, p: np.ndarray, r0: np.ndarray, rg: np.ndarray, rho: float, obstacles_star: List[StarshapedObstacle]) -> tuple[List[float], float]:
+
+    def update(
+        self,
+        p: np.ndarray,
+        r0: np.ndarray,
+        rg: np.ndarray,
+        rho: float,
+        obstacles_star: List[StarshapedObstacle],
+    ) -> tuple[List[float], float]:
         # Buffer previous target path
-        if self.params['buffer'] and self.target_path:
+        if self.params["buffer"] and self.target_path:
             P_prev, s_prev = self.prepare_prev(p, rho, obstacles_star)
         else:
             P_prev, s_prev = None, None
         # Generate the new path
         path_pol, epsilon, self.target_path, _ = PathGenerator.path_generator(
-            r0, rg, obstacles_star, self.params['dp_max'], self.params['N'],
-            self.params['dt'], self.params['max_compute_time'], self.params['n_pol'],
-            ds_decay_rate=0.5, ds_increase_rate=2., max_nr_steps=1000, P_prev=P_prev, s_prev=s_prev,
-            reactivity=self.params['reactivity'], crep=self.params['crep'],
-            convergence_tolerance=self.params['convergence_tolerance'], verbosity=self.verbosity)
+            r0,
+            rg,
+            obstacles_star,
+            self.params["dp_max"],
+            self.params["N"],
+            self.params["dt"],
+            self.params["max_compute_time"],
+            self.params["n_pol"],
+            ds_decay_rate=0.5,
+            ds_increase_rate=2.0,
+            max_nr_steps=1000,
+            P_prev=P_prev,
+            s_prev=s_prev,
+            reactivity=self.params["reactivity"],
+            crep=self.params["crep"],
+            convergence_tolerance=self.params["convergence_tolerance"],
+            verbosity=self.verbosity,
+        )
         return path_pol, epsilon
 
 
@@ -407,30 +575,37 @@ def createMap():
     """
     # [lower_left, lower_right, top_right, top_left]
     map_as_list = [
-                    [[2, 2], [8, 2], [8, 3], [2, 3]]      ,
-                    [[2, 3], [3, 3], [3, 4.25], [2, 4.25]],
-                    [[2, 5], [8, 5], [8, 6], [2, 6]]      ,
-                    [[2, 8], [8, 8], [8, 9], [2, 9]]      ,
-                  ]
+        [[2, 2], [8, 2], [8, 3], [2, 3]],
+        [[2, 3], [3, 3], [3, 4.25], [2, 4.25]],
+        [[2, 5], [8, 5], [8, 6], [2, 6]],
+        [[2, 8], [8, 8], [8, 9], [2, 9]],
+    ]
 
     obstacles = []
     for map_element in map_as_list:
         obstacles.append(StarshapedPolygon(map_element))
     return obstacles, map_as_list
 
+
 def pathVisualizer(x0, goal, map_as_list, positions):
     # plotting
     fig = plt.figure()
     handle_goal = plt.plot(*pg, c="g")[0]
     handle_init = plt.plot(*x0[:2], c="b")[0]
-    handle_curr = plt.plot(*x0[:2], c="r", marker=(3, 0, np.rad2deg(x0[2]-np.pi/2)), markersize=10)[0]
-    handle_curr_dir = plt.plot(0, 0, marker=(2, 0, np.rad2deg(0)), markersize=5, color='w')[0]
+    handle_curr = plt.plot(
+        *x0[:2], c="r", marker=(3, 0, np.rad2deg(x0[2] - np.pi / 2)), markersize=10
+    )[0]
+    handle_curr_dir = plt.plot(
+        0, 0, marker=(2, 0, np.rad2deg(0)), markersize=5, color="w"
+    )[0]
     handle_path = plt.plot([], [], c="k")[0]
     coll = []
     for map_element in map_as_list:
         coll.append(plt_col.PolyCollection(np.array(map_element)))
     plt.gca().add_collection(coll)
-    handle_title = plt.text(5, 9.5, "", bbox={'facecolor':'w', 'alpha':0.5, 'pad':5}, ha="center")
+    handle_title = plt.text(
+        5, 9.5, "", bbox={"facecolor": "w", "alpha": 0.5, "pad": 5}, ha="center"
+    )
     plt.gca().set_aspect("equal")
     plt.xlim([0, 10])
     plt.ylim([0, 10])
@@ -439,17 +614,18 @@ def pathVisualizer(x0, goal, map_as_list, positions):
     # do the updating plotting
     for x in positions:
         handle_curr.set_data([x[0]], [x[1]])
-        handle_curr.set_marker((3, 0, np.rad2deg(x[2]-np.pi/2)))
+        handle_curr.set_marker((3, 0, np.rad2deg(x[2] - np.pi / 2)))
         handle_curr_dir.set_data([x[0]], [x[1]])
-        handle_curr_dir.set_marker((2, 0, np.rad2deg(x[2]-np.pi/2)))
+        handle_curr_dir.set_marker((2, 0, np.rad2deg(x[2] - np.pi / 2)))
         handle_path.set_data([path_gen.target_path[::2], path_gen.target_path[1::2]])
         handle_title.set_text(f"{t:5.3f}")
         fig.canvas.draw()
         plt.pause(0.005)
     plt.show()
 
+
 # stupid function for stupid data re-assembly
-#def path2D_to_SE2(path2D : list):
+# def path2D_to_SE2(path2D : list):
 #    path2D = np.array(path2D)
 #    # create (N,2) path for list [x0,y0,x1,y1,...]
 #    # of course it shouldn't be like that to begin with but
@@ -468,7 +644,11 @@ def pathVisualizer(x0, goal, map_as_list, positions):
 
 
 def pathPointFromPathParam(n_pol, path_dim, path_pol, s):
-    return [np.polyval(path_pol[j*(n_pol+1):(j+1)*(n_pol+1)], s) for j in range(path_dim)]
+    return [
+        np.polyval(path_pol[j * (n_pol + 1) : (j + 1) * (n_pol + 1)], s)
+        for j in range(path_dim)
+    ]
+
 
 def path2D_timed(args, path2D_untimed, max_base_v):
     """
@@ -504,31 +684,31 @@ def path2D_timed(args, path2D_untimed, max_base_v):
     ####################################################
     # the strategy is somewhat reasonable at least:
     # assume we're moving at 90% max velocity in the base,
-    # and use that. 
+    # and use that.
     perc_of_max_v = 0.9
-    base_v = perc_of_max_v * max_base_v 
-    
-    # 1) the length of the path divided by 0.9 * max_vel 
+    base_v = perc_of_max_v * max_base_v
+
+    # 1) the length of the path divided by 0.9 * max_vel
     #    gives us the total time of the trajectory,
     #    so we first compute that
     # easiest possible way to get approximate path length
     # (yes it should be from the polynomial approximation but that takes time to write)
-    x_i = path2D_untimed[:,0][:-1] # no last element
-    y_i = path2D_untimed[:,1][:-1] # no last element
-    x_i_plus_1 = path2D_untimed[:,0][1:] # no first element
-    y_i_plus_1 = path2D_untimed[:,1][1:] # no first element
+    x_i = path2D_untimed[:, 0][:-1]  # no last element
+    y_i = path2D_untimed[:, 1][:-1]  # no last element
+    x_i_plus_1 = path2D_untimed[:, 0][1:]  # no first element
+    y_i_plus_1 = path2D_untimed[:, 1][1:]  # no first element
     x_diff = x_i_plus_1 - x_i
-    x_diff = x_diff.reshape((-1,1))
+    x_diff = x_diff.reshape((-1, 1))
     y_diff = y_i_plus_1 - y_i
-    y_diff = y_diff.reshape((-1,1))
+    y_diff = y_diff.reshape((-1, 1))
     path_length = np.sum(np.linalg.norm(np.hstack((x_diff, y_diff)), axis=1))
     total_time = path_length / base_v
     # 2) we find the correspondence between s and time
     # TODO: read from where it should be, but seba checked that it's 5 for some reason
     # TODO THIS IS N IN PATH PLANNING, MAKE THIS A SHARED ARGUMENT
-    max_s = 5 
+    max_s = 5
     t_to_s = max_s / total_time
-    # 3) construct the ocp-timed 2D path 
+    # 3) construct the ocp-timed 2D path
     # TODO MAKE REFERENCE_STEP_SIZE A SHARED ARGUMENT
     # TODO: we should know max s a priori
     reference_step_size = 0.5
@@ -538,16 +718,23 @@ def path2D_timed(args, path2D_untimed, max_base_v):
     # time is i * args.ocp_dt
     for i in range(args.n_knots + 1):
         # what it should be but gets stuck if we're not yet on path
-        #s = (i * args.ocp_dt) * t_to_s
+        # s = (i * args.ocp_dt) * t_to_s
         # full path
         # NOTE: this should be wrong, and ocp_dt correct,
         # but it works better for some reason xd
         s = i * (max_s / args.n_knots)
-        path2D.append(np.array([np.interp(s, s_vec, path2D_untimed[:,0]), 
-                                np.interp(s, s_vec, path2D_untimed[:,1])]))
+        path2D.append(
+            np.array(
+                [
+                    np.interp(s, s_vec, path2D_untimed[:, 0]),
+                    np.interp(s, s_vec, path2D_untimed[:, 1]),
+                ]
+            )
+        )
     path2D = np.array(path2D)
     return path2D
 
+
 def path2D_to_SE3(path2D, path_height):
     """
     path2D_SE3
@@ -559,12 +746,12 @@ def path2D_to_SE3(path2D, path_height):
     #########################
     #  path2D into pathSE2  #
     #########################
-    # construct theta 
+    # construct theta
     # since it's a pairwise operation it can't be done on the last point
-    x_i = path2D[:,0][:-1] # no last element
-    y_i = path2D[:,1][:-1] # no last element
-    x_i_plus_1 = path2D[:,0][1:] # no first element
-    y_i_plus_1 = path2D[:,1][1:] # no first element
+    x_i = path2D[:, 0][:-1]  # no last element
+    y_i = path2D[:, 1][:-1]  # no last element
+    x_i_plus_1 = path2D[:, 0][1:]  # no first element
+    y_i_plus_1 = path2D[:, 1][1:]  # no first element
     x_diff = x_i_plus_1 - x_i
     y_diff = y_i_plus_1 - y_i
     # elementwise arctan2
@@ -579,21 +766,26 @@ def path2D_to_SE3(path2D, path_height):
     for i in range(len(path2D) - 1):
         # first set the x axis to be in the theta direction
         # TODO: make sure this one makes sense
-        rotation = np.array([
-                    [-np.cos(thetas[i]), np.sin(thetas[i]), 0.0],
-                    [-np.sin(thetas[i]), -np.cos(thetas[i]), 0.0],
-                    [0.0,                0.0,          -1.0]])
-        #rotation = pin.rpy.rpyToMatrix(0.0, 0.0, thetas[i])
-        #rotation = pin.rpy.rpyToMatrix(np.pi/2, np.pi/2,0.0) @ rotation
+        rotation = np.array(
+            [
+                [-np.cos(thetas[i]), np.sin(thetas[i]), 0.0],
+                [-np.sin(thetas[i]), -np.cos(thetas[i]), 0.0],
+                [0.0, 0.0, -1.0],
+            ]
+        )
+        # rotation = pin.rpy.rpyToMatrix(0.0, 0.0, thetas[i])
+        # rotation = pin.rpy.rpyToMatrix(np.pi/2, np.pi/2,0.0) @ rotation
         translation = np.array([path2D[i][0], path2D[i][1], path_height])
         pathSE3.append(pin.SE3(rotation, translation))
     pathSE3.append(pin.SE3(rotation, translation))
     return pathSE3
 
+
 def path2D_to_timed_SE3(todo):
     pass
 
-def starPlanner(goal, args, init_cmd, shm_name, lock : Lock, shm_data):
+
+def starPlanner(goal, args, init_cmd, shm_name, lock: Lock, shm_data):
     """
     starPlanner
     ------------
@@ -612,45 +804,47 @@ def starPlanner(goal, args, init_cmd, shm_name, lock : Lock, shm_data):
     p_shared = np.ndarray((2,), dtype=np.float64, buffer=shm.buf)
     p = np.zeros(2)
     # environment
-    obstacles, _ = createMap()    
+    obstacles, _ = createMap()
 
     robot_type = "Unicycle"
     with open(args.planning_robot_params_file) as f:
         params = yaml.safe_load(f)
     robot_params = params[robot_type]
-    planning_robot = Unicycle(width=robot_params['width'], 
-                     vel_min=[robot_params['lin_vel_min'], -robot_params['ang_vel_max']],
-                     vel_max=[robot_params['lin_vel_max'], robot_params['ang_vel_max']], 
-                     name=robot_type)
+    planning_robot = Unicycle(
+        width=robot_params["width"],
+        vel_min=[robot_params["lin_vel_min"], -robot_params["ang_vel_max"]],
+        vel_max=[robot_params["lin_vel_max"], robot_params["ang_vel_max"]],
+        name=robot_type,
+    )
 
     with open(args.tunnel_mpc_params_file) as f:
         params = yaml.safe_load(f)
     mpc_params = params["tunnel_mpc"]
-    mpc_params['dp_max'] = planning_robot.vmax * mpc_params['dt']
+    mpc_params["dp_max"] = planning_robot.vmax * mpc_params["dt"]
 
     verbosity = 1
     scene_updater = SceneUpdater(mpc_params, verbosity)
-    path_gen = PathGenerator(mpc_params, verbosity) 
+    path_gen = PathGenerator(mpc_params, verbosity)
 
     # TODO: make it an argument
     convergence_threshold = 0.05
     try:
         while True:
             # has to be a blocking call
-            #cmd = cmd_queue.get()
-            #p = cmd['p']
+            # cmd = cmd_queue.get()
+            # p = cmd['p']
             # TODO: make a sendCommand type thing which will
             # handle locking, pickling or numpy-arraying for you
             # if for no other reason than not copy-pasting code
             lock.acquire()
-            p[:] = p_shared[:] 
+            p[:] = p_shared[:]
             lock.release()
 
             if np.linalg.norm(p - goal) < convergence_threshold:
                 data_pickled = pickle.dumps("done")
                 lock.acquire()
-                shm_data.buf[:len(data_pickled)] = data_pickled
-                #shm_data.buf[len(data_pickled):] = bytes(0)
+                shm_data.buf[: len(data_pickled)] = data_pickled
+                # shm_data.buf[len(data_pickled):] = bytes(0)
                 lock.release()
                 break
 
@@ -661,12 +855,12 @@ def starPlanner(goal, args, init_cmd, shm_name, lock : Lock, shm_data):
             # compute the path
             path_pol, epsilon = path_gen.update(p, r0, rg, rho, obstacles_star)
             # TODO: this is stupid, just used shared memory bro
-            #if data_queue.qsize() < 1:
-                #data_queue.put((path_pol, path_gen.target_path))
+            # if data_queue.qsize() < 1:
+            # data_queue.put((path_pol, path_gen.target_path))
             data_pickled = pickle.dumps((path_pol, path_gen.target_path))
             lock.acquire()
-            shm_data.buf[:len(data_pickled)] = data_pickled
-            #shm_data.buf[len(data_pickled):] = bytes(0)
+            shm_data.buf[: len(data_pickled)] = data_pickled
+            # shm_data.buf[len(data_pickled):] = bytes(0)
             lock.release()
 
     except KeyboardInterrupt:
diff --git a/python/smc/robots/__init__.py b/python/smc/robots/__init__.py
index 62f1f0d1d6bdaa181ee981e97b00f4eb3468a0c7..dbe9d2fcebbbcb4240232eebf4bfda57a94166bd 100644
--- a/python/smc/robots/__init__.py
+++ b/python/smc/robots/__init__.py
@@ -1,5 +1,10 @@
 from .implementations.heron import RealHeronRobotManager
 from .implementations.mir import RealMirRobotManager
 from .implementations.mobile_yumi import RealMobileYumiRobotManager
-from .implementations.simulated_robot import SimulatedRobotManager
-from .implementations.ur5e import RealUR5eRobotManager
+from .implementations.ur5e import RealUR5eRobotManager, SimulatedUR5eRobotManager
+
+from .interfaces.single_arm_interface import SingleArmInterface
+from .interfaces.dual_arm_interface import DualArmInterface
+from .interfaces.force_torque_sensor_interface import ForceTorqueOnSingleArmWrist
+
+import utils
diff --git a/python/smc/robots/abstract_simulated_robot.py b/python/smc/robots/abstract_simulated_robot.py
new file mode 100644
index 0000000000000000000000000000000000000000..be9392a6b58bc3e5618b4a1e14edbe1d1a64b250
--- /dev/null
+++ b/python/smc/robots/abstract_simulated_robot.py
@@ -0,0 +1,60 @@
+from smc.robots.robotmanager_abstract import AbstractRealRobotManager
+
+import pinocchio as pin
+import numpy as np
+
+
+# TODO: rename this to pinocchio simulation somehow to account for the fact that this is pinocchio simulation.
+# it could be in pybullet, mujoco or wherever else
+class AbstractSimulatedRobotManager(AbstractRealRobotManager):
+    def __init__(self, args):
+        print("simulated robot init")
+        super().__init__(args)
+
+    # NOTE: can be override by any particular robot
+    def setInitialPose(self):
+        self._q = pin.randomConfiguration(
+            self.model, -1 * np.ones(self.model.nq), np.ones(self.model.nq)
+        )
+
+    def sendVelocityCommandToReal(self, v):
+        """
+        sendVelocityCommand
+        -----
+        in simulation we just integrate the velocity for a dt and that's it
+        """
+        self._v = v
+        # NOTE: we update joint angles here, and _updateQ does nothing (there is no communication)
+        self._q = pin.integrate(self.model, self._q, v * self._dt)
+
+    def _updateQ(self):
+        pass
+
+    def _updateV(self):
+        pass
+
+    # NOTE: simulation magic - it just stops immediatelly.
+    # if you want a more realistic simulation, use an actual physics simulator
+    def stopRobot(self):
+        self._v = np.zeros(self.model.nv)
+
+    # NOTE: since we're just integrating, nothign should happen here.
+    # if this was in a physics simulator, you might want to run compliant control here instead
+    def setFreedrive(self):
+        pass
+
+    # NOTE: since we're just integrating, nothign should happen here.
+    # if this was in a physics simulator, you might want to return form compliant control
+    # to whatever else instead
+    def unSetFreedrive(self):
+        pass
+
+    # NOTE: this is pointless here, but in the case of a proper simulation you'd start the simulator,
+    # or verify that it's running or something
+    def connectToRobot(self):
+        pass
+
+    # TODO: create simulated gripper class to support the move, open, close methods - it's a mess now
+    # in simulation we can just set the values directly
+    def connectToGripper(self):
+        pass
diff --git a/python/smc/robots/implementations/__init__.py b/python/smc/robots/implementations/__init__.py
index 5a211f16a31d2b89bb87e2311a8577bf58149632..78ca8f8c7c58e18b09f122a8bc5a6ed952642813 100644
--- a/python/smc/robots/implementations/__init__.py
+++ b/python/smc/robots/implementations/__init__.py
@@ -1,5 +1,4 @@
 from .heron import *
 from .mir import *
 from .mobile_yumi import *
-from .simulated_robot import *
 from .ur5e import *
diff --git a/python/smc/robots/implementations/simulated_robot.py b/python/smc/robots/implementations/simulated_robot.py
deleted file mode 100644
index 45091b32dd9a6c2e0223e585338ebea66d425402..0000000000000000000000000000000000000000
--- a/python/smc/robots/implementations/simulated_robot.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from smc.robots.robotmanager_abstract import AbstractRobotManager
-import pinocchio as pin
-
-
-class SimulatedRobotManager(AbstractRobotManager):
-    # TODO: find a way for this to inherit from the particular robot manager (the non-real part)
-    def __init__(self, args):
-        pass
-
-    def sendVelocityCommandToReal(self, v):
-        """
-        sendVelocityCommand
-        -----
-        in simulation we just integrate the velocity for a dt and that's it
-        """
-        v = super().sendVelocityCommand(v)
-        self._v = v
-        self._q = pin.integrate(self.model, self._q, v * self._dt)
diff --git a/python/smc/robots/implementations/ur5e.py b/python/smc/robots/implementations/ur5e.py
index d1044ca24ea99034a5bd276916c890fedff754fb..5d7f652cc10cb866bc9367b26353664560d19d39 100644
--- a/python/smc/robots/implementations/ur5e.py
+++ b/python/smc/robots/implementations/ur5e.py
@@ -3,6 +3,8 @@ from smc.robots.interfaces.force_torque_sensor_interface import (
 )
 from smc.robots.grippers.robotiq.robotiq_gripper import RobotiqGripper
 from smc.robots.grippers.on_robot.twofg import TwoFG
+from smc.robots.robotmanager_abstract import AbstractRealRobotManager
+from smc.robots.abstract_simulated_robot import AbstractSimulatedRobotManager
 
 import numpy as np
 import pinocchio as pin
@@ -14,8 +16,6 @@ from rtde_control import RTDEControlInterface
 from rtde_receive import RTDEReceiveInterface
 from rtde_io import RTDEIOInterface
 
-from smc.robots.robotmanager_abstract import AbstractRealRobotManager
-
 
 # NOTE: this one assumes a jaw gripper
 class RobotManagerUR5e(ForceTorqueOnSingleArmWrist):
@@ -45,6 +45,28 @@ class RobotManagerUR5e(ForceTorqueOnSingleArmWrist):
         super().__init__(args)
 
 
+class SimulatedUR5eRobotManager(AbstractSimulatedRobotManager, RobotManagerUR5e):
+    def __init__(self, args):
+        if args.debug_prints:
+            print("SimulatedRobotManagerUR5e init")
+        super().__init__(args)
+
+    # NOTE: overriding wrench stuff here
+    # there can be a debated whether there should be a simulated forcetorquesensorinterface,
+    # but it's annoying as hell and there is no immediate benefit in solving this problem
+    def _updateWrench(self):
+        self._wrench_base = np.random.random(6)
+        # NOTE: create a robot_math module, make this mapping a function called
+        # mapse3ToDifferent frame or something like that
+        mapping = np.zeros((6, 6))
+        mapping[0:3, 0:3] = self._T_w_e.rotation
+        mapping[3:6, 3:6] = self._T_w_e.rotation
+        self._wrench = mapping.T @ self._wrench_base
+
+    def zeroFtSensor(self) -> None:
+        self._wrench_bias = np.zeros(6)
+
+
 class RealUR5eRobotManager(RobotManagerUR5e, AbstractRealRobotManager):
     def __init__(self, args: argparse.Namespace):
         if args.debug_prints:
@@ -141,6 +163,10 @@ class RealUR5eRobotManager(RobotManagerUR5e, AbstractRealRobotManager):
         self._rtde_control.speedJ(v, self._acceleration, self._dt)
 
     def stopRobot(self):
+        self._rtde_control.speedStop(1)
+        print("sending a stopj as well")
+        self._rtde_control.stopJ(1)
+        print("putting it to freedrive for good measure too")
         print("stopping via freedrive lel")
         self._rtde_control.freedriveMode()
         time.sleep(0.5)
diff --git a/python/smc/robots/interfaces/force_torque_sensor_interface.py b/python/smc/robots/interfaces/force_torque_sensor_interface.py
index 39800d29e20947bf0efc8b78bdcc2645770383d3..1264e077dae4a8c5f87b5aa7744345919a8c2299 100644
--- a/python/smc/robots/interfaces/force_torque_sensor_interface.py
+++ b/python/smc/robots/interfaces/force_torque_sensor_interface.py
@@ -19,8 +19,9 @@ class ForceTorqueSensorInterface(abc.ABC):
     @abc.abstractmethod
     def ft_sensor_frame_id(self) -> int: ...
 
+    # NOTE: ideally we can specify that this array is of length 6, but what can you do
     @property
-    def wrench(self):
+    def wrench(self) -> np.ndarray:
         """
         getWrench
         ---------
@@ -30,8 +31,9 @@ class ForceTorqueSensorInterface(abc.ABC):
         """
         return self._wrench.copy()
 
+    # NOTE: ideally we can specify that this array is of length 6, but what can you do
     @property
-    def wrench_base(self):
+    def wrench_base(self) -> np.ndarray:
         """
         getWrench
         ---------
@@ -41,7 +43,7 @@ class ForceTorqueSensorInterface(abc.ABC):
         return self._wrench_base.copy()
 
     @abc.abstractmethod
-    def _updateWrench(self):
+    def _updateWrench(self) -> None:
         """
         get wrench reading from whatever interface you have.
         NOTE:
@@ -60,7 +62,7 @@ class ForceTorqueSensorInterface(abc.ABC):
     def zeroFtSensor(self) -> None:
         pass
 
-    def calibrateFT(self, dt):
+    def calibrateFT(self, dt) -> None:
         """
         calibrateFT
         -----------
diff --git a/python/smc/robots/interfaces/single_arm_interface.py b/python/smc/robots/interfaces/single_arm_interface.py
index 4cc7cf386a38118146f111fd1853777f99fb0e47..c486f974f34f286ddfb5ffbcc5522a16bb268351 100644
--- a/python/smc/robots/interfaces/single_arm_interface.py
+++ b/python/smc/robots/interfaces/single_arm_interface.py
@@ -11,6 +11,10 @@ class SingleArmInterface(AbstractRobotManager):
         self._T_w_e: pin.SE3
         super().__init__(args)
 
+    @property
+    def ee_frame_id(self):
+        return self._ee_frame_id
+
     @property
     def T_w_e(self):
         return self._T_w_e.copy()
diff --git a/python/smc/robots/robotmanager_abstract.py b/python/smc/robots/robotmanager_abstract.py
index cb8409f613decf1ddc29df901307cd60198897ea..5f59cde7484002fac9d7e3b0d35fad2b723b779f 100644
--- a/python/smc/robots/robotmanager_abstract.py
+++ b/python/smc/robots/robotmanager_abstract.py
@@ -91,6 +91,7 @@ class AbstractRobotManager(abc.ABC):
         #                    processes and utilities robotmanager owns     #
         ####################################################################
         # TODO: make a default
+        self._log_manager: Logger
         if args.save_log:
             self._log_manager = Logger(args)
 
diff --git a/python/smc/robots/utils.py b/python/smc/robots/utils.py
index 7a48694d4346e5d56310b24314a65d876a2019c9..dbe9508f7ccdad465f03d8fad107102385744ce2 100644
--- a/python/smc/robots/utils.py
+++ b/python/smc/robots/utils.py
@@ -1,3 +1,5 @@
+from smc.robots.implementations import *
+
 import numpy as np
 import pinocchio as pin
 import argparse
@@ -186,14 +188,13 @@ def defineGoalPointCLI(robot):
     in the case you're visualizing, makes no sense otherwise.
     """
     robot._step()
-    robot._getQ()
-    q = robot.getQ()
+    q = robot.q
     # define goal
     pin.forwardKinematics(robot.model, robot.data, np.array(q))
     pin.updateFramePlacement(robot.model, robot.data, robot.ee_frame_id)
     T_w_e = robot.data.oMf[robot.ee_frame_id]
     print("You can only specify the translation right now.")
-    if not robot.pinocchio_only:
+    if robot.args.real:
         print(
             "In the following, first 3 numbers are x,y,z position, and second 3 are r,p,y angles"
         )
@@ -234,7 +235,24 @@ def defineGoalPointCLI(robot):
     # NOTE i'm not deepcopying this on purpose
     # but that might be the preferred thing, we'll see
     robot.Mgoal = Mgoal
-    if robot.args.visualize_manipulator:
+    if robot.args.visualizer:
         # TODO document this somewhere
         robot.visualizer_manager.sendCommand({"Mgoal": Mgoal})
     return Mgoal
+
+
+# TODO: finish
+def getRobotFromArgs(args: argparse.Namespace) -> AbstractRobotManager:
+    if args.robot == "ur5e":
+        if args.real:
+            return RealUR5eRobotManager(args)
+        else:
+            return SimulatedUR5eRobotManager(args)
+
+
+#    if args.robot == "heron":
+#        return RealUR5eRobotManager(args)
+#    if args.robot == "mir":
+#        return RealUR5eRobotManager(args)
+#    if args.robot == "yumi":
+#        return RealUR5eRobotManager(args)