diff --git a/examples/cart_pulling/control_sub_problems/with_planner/base_and_ee_path_following_from_planner.py b/examples/cart_pulling/control_sub_problems/with_planner/base_and_ee_path_following_from_planner.py
index 67da1a3069cfd4d05916edfad0b9d3c04b69da46..e74dbd591649193532cfc1f6709dfe7f63bc7b9f 100644
--- a/examples/cart_pulling/control_sub_problems/with_planner/base_and_ee_path_following_from_planner.py
+++ b/examples/cart_pulling/control_sub_problems/with_planner/base_and_ee_path_following_from_planner.py
@@ -1,7 +1,7 @@
 from smc import getRobotFromArgs
 from smc import getMinimalArgParser
 from smc.path_generation.maps.premade_maps import createSampleStaticMap
-from smc.path_generation.path_math.path2d_to_6d import path2D_to_SE3_fixed
+from smc.path_generation.path_math.path2d_to_6d import path2D_to_SE3
 from smc.control.optimal_control.util import get_OCP_args
 from smc.control.cartesian_space import getClikArgs
 from smc.path_generation.planner import starPlanner, getPlanningArgs
@@ -74,7 +74,7 @@ if __name__ == "__main__":
 
     _, path2D = data
     path2D = np.array(path2D).reshape((-1, 2))
-    pathSE3 = path2D_to_SE3_fixed(path2D, args.handlebar_height)
+    pathSE3 = path2D_to_SE3(path2D, args.handlebar_height)
     if args.visualizer:
         # TODO: document this somewhere
         robot.visualizer_manager.sendCommand({"Mgoal": pathSE3[0]})
diff --git a/examples/cart_pulling/control_sub_problems/with_planner/ee_only_path_following_from_plannner.py b/examples/cart_pulling/control_sub_problems/with_planner/ee_only_path_following_from_plannner.py
index fcd16af1ed24fb60e5cfdf3e73c230240ef4d209..05efbb44f1dfa211317f6791be278dfbe6ef20cc 100644
--- a/examples/cart_pulling/control_sub_problems/with_planner/ee_only_path_following_from_plannner.py
+++ b/examples/cart_pulling/control_sub_problems/with_planner/ee_only_path_following_from_plannner.py
@@ -1,7 +1,7 @@
 from smc import getRobotFromArgs
 from smc import getMinimalArgParser
 from smc.path_generation.maps.premade_maps import createSampleStaticMap
-from smc.path_generation.path_math.path2d_to_6d import path2D_to_SE3_fixed
+from smc.path_generation.path_math.path2d_to_6d import path2D_to_SE3
 from smc.control.optimal_control.util import get_OCP_args
 from smc.control.cartesian_space import getClikArgs
 from smc.path_generation.planner import starPlanner, getPlanningArgs
@@ -65,7 +65,7 @@ if __name__ == "__main__":
 
     _, path2D = data
     path2D = np.array(path2D).reshape((-1, 2))
-    pathSE3 = path2D_to_SE3_fixed(path2D, args.handlebar_height)
+    pathSE3 = path2D_to_SE3(path2D, args.handlebar_height)
     if args.visualizer:
         # TODO: document this somewhere
         robot.visualizer_manager.sendCommand({"Mgoal": pathSE3[0]})
diff --git a/examples/cart_pulling/disjoint_control/demo.py b/examples/cart_pulling/disjoint_control/demo.py
index 29ed9f6d1401529067ee5cc4b446c643fc0f9f1e..0531b4cbd96fb4c093b2dc12d3a855b3fb9f90e2 100644
--- a/examples/cart_pulling/disjoint_control/demo.py
+++ b/examples/cart_pulling/disjoint_control/demo.py
@@ -1,7 +1,7 @@
 from smc import getRobotFromArgs
 from smc import getMinimalArgParser
 from smc.path_generation.maps.premade_maps import createSampleStaticMap
-from smc.path_generation.path_math.path2d_to_6d import path2D_to_SE3_fixed
+from smc.path_generation.path_math.path2d_to_6d import path2D_to_SE3
 from smc.control.optimal_control.util import get_OCP_args
 from smc.control.cartesian_space import getClikArgs
 from smc.path_generation.planner import starPlanner, getPlanningArgs
@@ -72,7 +72,7 @@ if __name__ == "__main__":
 
     _, path2D = data
     path2D = np.array(path2D).reshape((-1, 2))
-    pathSE3 = path2D_to_SE3_fixed(path2D, args.handlebar_height)
+    pathSE3 = path2D_to_SE3(path2D, args.handlebar_height)
     if args.visualizer:
         # TODO: document this somewhere
         robot.visualizer_manager.sendCommand({"Mgoal": pathSE3[0]})
diff --git a/examples/cart_pulling/disjoint_control/mpc_base_clik_arm_control_loop.py b/examples/cart_pulling/disjoint_control/mpc_base_clik_arm_control_loop.py
index 86100d8a434f7e0a7161ef64b52d22249538989d..ff8d908a9add0d39d3fd0139e51abf974437a8e2 100644
--- a/examples/cart_pulling/disjoint_control/mpc_base_clik_arm_control_loop.py
+++ b/examples/cart_pulling/disjoint_control/mpc_base_clik_arm_control_loop.py
@@ -6,12 +6,12 @@ from smc.control.optimal_control.path_following_croco_ocp import (
     BasePathFollowingOCP,
 )
 from smc.path_generation.path_math.path2d_to_6d import (
-    path2D_to_SE3_fixed,
+    path2D_to_SE3,
 )
 from smc.path_generation.path_math.cart_pulling_path_math import (
     construct_EE_path,
 )
-from smc.path_generation.path_math.path_to_trajectory import path2D_timed
+from smc.path_generation.path_math.path_to_trajectory import path2D_to_trajectory2D
 from smc.control.controller_templates.path_following_template import (
     PathFollowingFromPlannerControlLoop,
 )
@@ -39,7 +39,7 @@ def BaseMPCEECLIKPathFollowingFromPlannerMPCControlLoop(
     robot._mode = SingleArmWholeBodyInterface.control_mode.whole_body
     p = robot.T_w_b.translation[:2]
     max_base_v = np.linalg.norm(robot._max_v[:2])
-    path_base = path2D_timed(args, path2D_untimed_base, max_base_v)
+    path_base = path2D_to_trajectory2D(args, path2D_untimed_base, max_base_v)
     path_base = np.hstack((path_base, np.zeros((len(path_base), 1))))
 
     x0 = np.concatenate([robot.q, robot.v])
diff --git a/examples/cart_pulling/map_tests/topics_for_robot_localization.md b/examples/cart_pulling/map_tests/topics_for_robot_localization.md
new file mode 100644
index 0000000000000000000000000000000000000000..9ee4618279a8dccd1ac5392515fbf5a314cddce5
--- /dev/null
+++ b/examples/cart_pulling/map_tests/topics_for_robot_localization.md
@@ -0,0 +1,55 @@
+robot localization package
+==========================
+tf tree requirements
+--------------------
+There are 3 frames of interest
+1. base_link - rigidly fixed to the robot
+2. odom - world frame, typically aligned with robot's start position
+3. map - world frame
+
+The robot localization's output is a pose given in either the odom or the map frame,
+and a velocity given in the base_link frame.
+
+Topics that send the following messages are of interest:
+1. nav_msgs/Odometry
+2. geometry_msgs/PoseWithCovarianceStamped
+3. geometry_msgs/TwistWithCovarianceStamped
+4. sensor_msgs/Imu - frame in which it is given is not standardized - robot localization expects ENU frame.
+    However, hopefully the IMU message has a frame_id field specifying the frame. 
+    In this case robot localization maps it to the map or the odom frame.
+
+
+heron
+------
+1. /amcl_pose
+    Type: geometry_msgs/msg/PoseWithCovarianceStamped
+    0.668 Hz, but the robot needs to move for this to be published
+2. /base_pose_ground_truth
+    Type: nav_msgs/msg/Odometry
+    50 Hz
+3. /imu_data/data
+    Type: sensor_msgs/msg/Imu
+        -> should have frame_id in the header 
+    50 Hz
+4. /initialpose
+    Type: geometry_msgs/msg/PoseWithCovarianceStamped
+    pointless
+5. /odom
+    Type: nav_msgs/msg/Odometry
+    1000 Hz
+
+mobile yumi
+-----------
+1. /maskinn/navigation/amcl_pose
+    Type: geometry_msgs/msg/PoseWithCovarianceStamped
+    5.712 Hz, but the robot needs to move for this to be published
+2. /maskinn/navigation/initialpose
+    Type: geometry_msgs/msg/PoseWithCovarianceStamped
+    pointless
+3. /maskinn/platform/odometry
+    Type: nav_msgs/msg/Odometry
+    10 Hz
+4. /maskinn/sensors/camera/imu
+    Type: sensor_msgs/msg/Imu
+        -> should have frame_id in the header 
+    91.195 Hz
diff --git a/examples/cart_pulling/path_construction_tests/path6d_from_path2d.py b/examples/cart_pulling/path_construction_tests/path6d_from_path2d.py
index 00c0d23cbd060d4f2dacbe529b3bf2ba77618e0b..6bd31a6d02be2fa60908ab1175c027b5fd524caf 100644
--- a/examples/cart_pulling/path_construction_tests/path6d_from_path2d.py
+++ b/examples/cart_pulling/path_construction_tests/path6d_from_path2d.py
@@ -3,8 +3,8 @@ from smc import getMinimalArgParser
 from smc.control.cartesian_space import getClikArgs
 from smc.path_generation.planner import getPlanningArgs
 from smc.control.optimal_control.croco_mpc_path_following import initializePastData
-from smc.path_generation.path_math.path2d_to_6d import path2D_to_SE3_fixed
-from smc.path_generation.path_math.path_to_trajectory import path2D_timed
+from smc.path_generation.path_math.path2d_to_6d import path2D_to_SE3
+from smc.path_generation.path_math.path_to_trajectory import path2D_to_trajectory2D
 from smc.control.optimal_control.util import get_OCP_args
 
 from smc.path_generation.path_math.cart_pulling_path_math import (
diff --git a/examples/cart_pulling/technical_report/cart_pulling.aux b/examples/cart_pulling/technical_report/cart_pulling.aux
index 932fa4c6bc095b427310eb7b1ab8ed3f133d8324..f8314384612998f2341784573192a73941e88178 100644
--- a/examples/cart_pulling/technical_report/cart_pulling.aux
+++ b/examples/cart_pulling/technical_report/cart_pulling.aux
@@ -2,6 +2,7 @@
 \@writefile{toc}{\contentsline {section}{\numberline {1}Problem description}{1}{}\protected@file@percent }
 \newlabel{sec-problem-description}{{1}{1}{}{}{}}
 \@writefile{toc}{\contentsline {section}{\numberline {2}Solution approach}{2}{}\protected@file@percent }
+\newlabel{sec-solution-approach}{{2}{2}{}{}{}}
 \@writefile{toc}{\contentsline {subsection}{\numberline {2.1}Formal task description}{2}{}\protected@file@percent }
 \@writefile{toc}{\contentsline {subsection}{\numberline {2.2}System architecture}{2}{}\protected@file@percent }
 \@writefile{toc}{\contentsline {section}{\numberline {3}Control design}{2}{}\protected@file@percent }
@@ -16,7 +17,12 @@
 \@writefile{toc}{\contentsline {paragraph}{End-effector pose task}{5}{}\protected@file@percent }
 \@writefile{toc}{\contentsline {paragraph}{End-effector pose and base position}{5}{}\protected@file@percent }
 \@writefile{toc}{\contentsline {subsubsection}{\numberline {3.2.2}Path-following tasks}{5}{}\protected@file@percent }
+\@writefile{toc}{\contentsline {paragraph}{End-effector trajectory following cost function}{6}{}\protected@file@percent }
+\@writefile{toc}{\contentsline {paragraph}{Base path following cost function}{6}{}\protected@file@percent }
+\@writefile{toc}{\contentsline {paragraph}{Base and end-effector trajectory following cost function}{6}{}\protected@file@percent }
 \@writefile{toc}{\contentsline {subsubsection}{\numberline {3.2.3}Solver choice}{6}{}\protected@file@percent }
 \@writefile{toc}{\contentsline {subsection}{\numberline {3.3}Reference trajectory construction}{6}{}\protected@file@percent }
 \newlabel{sub-reference-traj-construction}{{3.3}{6}{}{}{}}
-\gdef \@abspage@last{6}
+\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.3.1}Base reference trajectory}{6}{}\protected@file@percent }
+\@writefile{toc}{\contentsline {subsubsection}{\numberline {3.3.2}End-effector reference trajectory}{7}{}\protected@file@percent }
+\gdef \@abspage@last{8}
diff --git a/examples/cart_pulling/technical_report/cart_pulling.log b/examples/cart_pulling/technical_report/cart_pulling.log
index c5967794efb1f4ba92483b5a6995f03abad9e627..c94d6154b9f3c5d1684906965a791e55fefc1b52 100644
--- a/examples/cart_pulling/technical_report/cart_pulling.log
+++ b/examples/cart_pulling/technical_report/cart_pulling.log
@@ -1,4 +1,4 @@
-This is pdfTeX, Version 3.141592653-2.6-1.40.26 (TeX Live 2024/Arch Linux) (preloaded format=pdflatex 2025.2.22)  24 FEB 2025 17:23
+This is pdfTeX, Version 3.141592653-2.6-1.40.26 (TeX Live 2024/Arch Linux) (preloaded format=pdflatex 2025.2.22)  26 FEB 2025 10:30
 entering extended mode
  \write18 enabled.
  %&-line parsing enabled.
@@ -251,18 +251,18 @@ File: umsb.fd 2013/01/14 v3.01 AMS symbols B
 [1
 
 {/var/lib/texmf/fonts/map/pdftex/updmap/pdftex.map}{/usr/share/texmf-dist/fonts
-/enc/dvips/cm-super/cm-super-t1.enc}] [2{/usr/share/texmf-dist/fonts/enc/dvips/
-cm-super/cm-super-ts1.enc}] [3] [4] [5] [6] (./cart_pulling.aux)
+/enc/dvips/cm-super/cm-super-t1.enc}] [2] [3{/usr/share/texmf-dist/fonts/enc/dv
+ips/cm-super/cm-super-ts1.enc}] [4] [5] [6] [7] [8] (./cart_pulling.aux)
  ***********
 LaTeX2e <2023-11-01> patch level 1
 L3 programming layer <2024-02-20>
  ***********
  ) 
 Here is how much of TeX's memory you used:
- 3584 strings out of 476076
- 51971 string characters out of 5793775
+ 3585 strings out of 476076
+ 51994 string characters out of 5793775
  1936187 words of memory out of 5000000
- 25617 multiletter control sequences out of 15000+600000
+ 25618 multiletter control sequences out of 15000+600000
  578758 words of font info for 92 fonts, out of 8000000 for 9000
  14 hyphenation exceptions out of 8191
  61i,16n,69p,240b,221s stack positions out of 10000i,1000n,20000p,200000b,200000s
@@ -273,17 +273,19 @@ ts/type1/public/amsfonts/cm/cmmi5.pfb></usr/share/texmf-dist/fonts/type1/public
 r10.pfb></usr/share/texmf-dist/fonts/type1/public/amsfonts/cm/cmr5.pfb></usr/sh
 are/texmf-dist/fonts/type1/public/amsfonts/cm/cmr7.pfb></usr/share/texmf-dist/f
 onts/type1/public/amsfonts/cm/cmsy10.pfb></usr/share/texmf-dist/fonts/type1/pub
-lic/amsfonts/cm/cmsy7.pfb></usr/share/texmf-dist/fonts/type1/public/cm-super/sf
-bx0700.pfb></usr/share/texmf-dist/fonts/type1/public/cm-super/sfbx1000.pfb></us
-r/share/texmf-dist/fonts/type1/public/cm-super/sfbx1200.pfb></usr/share/texmf-d
-ist/fonts/type1/public/cm-super/sfbx1440.pfb></usr/share/texmf-dist/fonts/type1
-/public/cm-super/sfrm0700.pfb></usr/share/texmf-dist/fonts/type1/public/cm-supe
-r/sfrm1000.pfb></usr/share/texmf-dist/fonts/type1/public/cm-super/sfrm1200.pfb>
-</usr/share/texmf-dist/fonts/type1/public/cm-super/sfrm1728.pfb>
-Output written on cart_pulling.pdf (6 pages, 249938 bytes).
+lic/amsfonts/cm/cmsy7.pfb></usr/share/texmf-dist/fonts/type1/public/amsfonts/sy
+mbols/msbm10.pfb></usr/share/texmf-dist/fonts/type1/public/cm-super/sfbx0700.pf
+b></usr/share/texmf-dist/fonts/type1/public/cm-super/sfbx1000.pfb></usr/share/t
+exmf-dist/fonts/type1/public/cm-super/sfbx1200.pfb></usr/share/texmf-dist/fonts
+/type1/public/cm-super/sfbx1440.pfb></usr/share/texmf-dist/fonts/type1/public/c
+m-super/sfrm0500.pfb></usr/share/texmf-dist/fonts/type1/public/cm-super/sfrm070
+0.pfb></usr/share/texmf-dist/fonts/type1/public/cm-super/sfrm1000.pfb></usr/sha
+re/texmf-dist/fonts/type1/public/cm-super/sfrm1200.pfb></usr/share/texmf-dist/f
+onts/type1/public/cm-super/sfrm1728.pfb>
+Output written on cart_pulling.pdf (8 pages, 288786 bytes).
 PDF statistics:
- 113 PDF objects out of 1000 (max. 8388607)
- 69 compressed objects within 1 object stream
+ 131 PDF objects out of 1000 (max. 8388607)
+ 81 compressed objects within 1 object stream
  0 named destinations out of 1000 (max. 500000)
  1 words of extra memory for PDF output out of 10000 (max. 10000000)
 
diff --git a/examples/cart_pulling/technical_report/cart_pulling.pdf b/examples/cart_pulling/technical_report/cart_pulling.pdf
index ef8d310f74a933b2857e3eefe9231e4b92e1be83..de7c9151127cbd4e479a974680060eb7de0f65c5 100644
Binary files a/examples/cart_pulling/technical_report/cart_pulling.pdf and b/examples/cart_pulling/technical_report/cart_pulling.pdf differ
diff --git a/examples/cart_pulling/technical_report/cart_pulling.synctex.gz b/examples/cart_pulling/technical_report/cart_pulling.synctex.gz
index cad3b26b6193f91c6e77cc65ad7363a2175f04ba..fafbe7f80a80ebfacefe75076a23a4d1886c3977 100644
Binary files a/examples/cart_pulling/technical_report/cart_pulling.synctex.gz and b/examples/cart_pulling/technical_report/cart_pulling.synctex.gz differ
diff --git a/examples/cart_pulling/technical_report/cart_pulling.tex b/examples/cart_pulling/technical_report/cart_pulling.tex
index 7658d68ff590c4884aa0d022c6561d78a90e544f..ff981d06574cd81ccec8fad4abb43a00100405bd 100644
--- a/examples/cart_pulling/technical_report/cart_pulling.tex
+++ b/examples/cart_pulling/technical_report/cart_pulling.tex
@@ -63,6 +63,7 @@ to on top of the base and the end-effector positions).
 \end{enumerate}
 
 \section{Solution approach}
+\label{sec-solution-approach}
 
 \subsection{Formal task description}
 \subsection{System architecture}
@@ -217,13 +218,13 @@ about the OCP at this point. This means that an OCP can be constructed
 as a combination of different dynamics models, constraints, 
 and objective functions, all defined just at one knot.
 This is used in path-following, where every knot is assigned
-a each own target (in this case a path point )to reach in the objective function.
+a each own target (in this case a path point) to reach in the objective function.
 Another useful feature is setting different cost coefficients,
 which most useful in setting zero terminal velocities at the final knot.
 Note that the last point is not a good idea in MPC!
 
 \subsubsection{Point-to-point tasks}
-In case there is a fixed end-goal to reach, we use the se3 error
+In case there is a fixed end-goal to reach, we use the $se(3)$ error
 norm as the task-specific function to put into the objective function.
 \paragraph{End-effector pose task}
 In the particular case of the goal being defined for the end-effector,
@@ -231,11 +232,7 @@ whose pose is denoted as $   T_{ w }^{ e }$ reaching a goal
 denoted as $   T_{ w }^{ \text{goal} }$:
 \begin{equation}
 l_{ e } (x)= \left\lVert 
-\log \left( 
-		\left( 
-T_{ w }^{ e }
- \right)^{ -1 } 
-\right) T_{ w }^{ \text{goal} }
+\log \left( \left( T_{ w }^{ e } \right)^{ -1 } \right) T_{ w }^{ \text{goal} }
 \right \rVert _{ 2 }
 \end{equation}
 Here we of course use only joint values $ q  $, not the full state $ x  $,
@@ -279,12 +276,35 @@ $ t = \lambda s   $ would be a decision variable in the OCP.
 However, we found it easier to manually construct a trajectory, i.e. a timing law
 on the path. This is because it is very easy to construct a trajectory following OCP.
 Namely, it is done by modifying the point-to-point formulation by replacing
-$ T_{ w }^{ goal }  $ with $ _{ i } T_{ w }^{ traj }  $ where 
-$  _{ i } T_{ w }^{ traj }    $ is the point on the desired trajectory corresponding
+$ T_{ w }^{ goal }  $ with $ T_{ w }^{ traj } [i]  $ where 
+$   T_{ w }^{ traj } [i]    $ is the point on the desired trajectory corresponding
 to the desired timing law on the path. More on path and trajectory construction can
 be found in \ref{sub-reference-traj-construction}.
+The important point here is that the trajectory needs
+to be interpolated so as to correspond to the timing of the knots.
+To be more specific, the time difference $ \Delta t  $
+between two indexes of the array defining the reference trajectory
+has to be the one in the OCP.
+For our purposes linear interpolation is sufficient,
+especially because the time-horizon in the MPC is rather short,
+leading to a short trajectory in space as well
+
+\paragraph{End-effector trajectory following cost function}
+The task-defining running cost for end-effector trajectory is 
+given by (here with explicit indexing, ):
+\begin{equation}
+l_{ path } (x [i]) = 
+\left\lVert \log 
+\left( \left( T_{ w }^{ e } [i] \right) _{ 1 } \right) T_{ w }^{ \text{ref} } [i]
+\right \rVert_{ 2 } 
+\end{equation}
+
+\paragraph{Base path following cost function}
+Works in the same way as the described above for the end-effector case,
+just with different translations instead.
 
-TODO write the function
+\paragraph{Base and end-effector trajectory following cost function}
+Is again simply the sum of the two costs.
 
 \subsubsection{Solver choice}
 Box-FDDP from Crocoddy seems to be the fastest, 
@@ -307,7 +327,115 @@ for practical implementation.
 
 \subsection{Reference trajectory construction}
 \label{sub-reference-traj-construction}
-The path planner produces a plan for the base of the robot.
+\subsubsection{Base reference trajectory}
+The path planner produces a path 
+$ \gamma_{ 2 }  \in \mathbb{R}^{2} $ 
+for the base of the robot.
+It is given by an array of points $ \Gamma \in \mathbb{R}^{n \times 2} [i] $
+where $ i  $ is the index denoting $ i  $th element (point) of the array,
+and indexing starts with 0.
+We begin by introducing a physically dimensionless parametrization 
+$ s \in [0,1]  $, resulting in $ \gamma_{ 2 } (s)  $.
+Importantly, we assume that $ \Gamma  $ is constructed
+from a uniform sampling of $ \gamma  $ (this is true with the
+chosen path planner).
+
+We want to impose a timing law so that the path is traversed at a fixed
+velocity, $ v_{ \text{ref} }  c_{ 3 } \max v_{ \text{base} }  $ 
+where $ c_{ 3 } \in [0,1]  $.
+A different timing law ensuring the same velocity will be constructed
+for the end-effector reference.
+
+We construct the timing law by calculating the time required
+to traverse the prescribed path at this velocity.
+To do so we first calculate the arclength of the path via
+$ L (\gamma) = \int_{s}^{} \left\lVert    \dot{\gamma} (s)\right \rVert  ds  $.
+In discrete space perform the following computation
+$ L (\Gamma) =  \sum_{i=1}^{n} \left\lVert 
+\Gamma [i+1] - \Gamma [i]  \right\rVert $.
+From this we get 
+$ t_{ \text{final} } = \frac{  L (\gamma) }{ v_{ \text{ref} }}  $,
+leading to the timing law $ t = t_{ \text{final} } s  $,
+and the reference trajectory $ \gamma (t (s))  $.
+
+\subsubsection{End-effector reference trajectory}
+As discussed in \ref{sec-solution-approach}, we want
+the cart to follow the past path of the base.
+We denote the continuous past path with 
+$ \phi (s) \in \mathbb{R}^{2}, s \in [0,1]  $, and 
+$ \Phi [i] \in \mathbb{R}^{n \times 2}  $ as the discrete collection of points.
+In the software implementation, $ \Phi  $ is a double ended queue
+where at every iteration of the control loop a new
+point $ p_{ \text{base} }  $ is inserted, and the oldest
+entry is removed. We refer to this double ended queue as the rolling past window.
+The size of the rolling past window $ n  $ is set by the user,
+and it should be long enough to construct the preferred trajectory.
+Note that $ \Phi  $ is initialized as a straight line from the end-effector
+to the base of the robot. 
+
+Assuming a stable control frequency (it empirically holds well enough),
+$ \Phi  $ is a uniform sampling of $ \phi  $.
+We want to maintain a fixed distance between the base and the handlebar of
+the cart. Since the handlebar is a rigid body, it also uniquely
+determines the references for the end-effector(s).
+Thus the first task is to find the point on the past path
+which is the preferred distance from the robot on the arclength 
+of the past path. As in the case of calculating the length
+of the path given by the path planner for the base, 
+we use 
+$ L (\Phi) = \sum_{i=1}^{n} \left\lVert 
+\Phi [i+1] - \Phi [i]
+\right \rVert   $. 
+By computing this sum in a loop, we find the index $ h  $
+for which $   $
+$ L (\Phi) = \sum_{i=1}^{h} \left\lVert 
+\Phi [i+1] - \Phi [i]
+\right \rVert  = d_{ \text{preferred} }  $,
+where $ d_{ \text{preferred} }  $ denotes the preferred
+distance of the handlebar to the base of the robot.
+Since we do not care about the past path beyond the index $ h  $,
+we cut them off of $ \Phi  $. The resulting
+path from the preferred handlebar distance to the bast of the robot
+can then be parametrized with $ s \in [0,1]  $.
+
+The next step is to construct an $ SE (3)  $ reference from the 2-dimensional
+past path in $ (x,y)  $ coordinates of the world frame.
+In the dual arm case this will be the absolute frame
+of the dual arm manipulation task.
+Since the robot is operating on flat ground, and the cart is rigid,
+the height, i.e. the $ z  $ coordinate is known in advance and fixed.
+Since the handlebar is rigidly grasped, and we want the cart
+to follow the base's path, the rotational axes are fixed as well.
+We set them by constructing a Frene frame on the past path.
+Without loss of generality, let the $ z  $ axis of the end-effector frame
+be perpendicular to the ground, and pointing down.
+Also let the $ x  $ axis of the end-effector frame be parallel
+to the tangent of the path (either direction is fine,
+we choose toward the mobile base). This ensures the same orientation
+of the cart as the base of the robot on the path.
+Finally, the $ y   $ axis simply completes the 
+right-handed frame.
+
+To make this construction, we need to compute the tangent of the past path 
+$ \dot{\phi} (s)  $. Alternatively base velocities could also be logged,
+but this approach is less prone to noise.
+TODO: this is not a 100\% obvious statement due to potential
+localization-induced teleporting.
+For simplicity, we compute the angle of the secant of the path
+in the world frame, and formulate the rotation matrix from it.
+In particular 
+\begin{equation}
+ \theta [i] = atan2 \left(  \frac{\phi_{ y } [i+1] - \phi _{ y } [i]}{\phi_{ x } [i+1] - \phi _{ x } [i]}  \right)  
+\end{equation}
+ which leads to
+ \begin{equation}
+ R_{ w }^{ \text{ref} } [i] = 
+\begin{bmatrix} 
+		\cos (\theta [i]]) &  \sin (\theta [i]) & 0  \\
+		\sin (\theta [i]]) & - \cos(\theta [i]) & 0  \\
+		0 & 0 & -1  
+\end{bmatrix} 
+ \end{equation}
 
 
 
diff --git a/examples/cart_pulling/technical_report/cart_pulling.toc b/examples/cart_pulling/technical_report/cart_pulling.toc
index 10fd76c26af60284e20ff24197e847d5eb7b1058..d849ffb28edc48ac5cf1fce4275c2ace3624249c 100644
--- a/examples/cart_pulling/technical_report/cart_pulling.toc
+++ b/examples/cart_pulling/technical_report/cart_pulling.toc
@@ -14,5 +14,10 @@
 \contentsline {paragraph}{End-effector pose task}{5}{}%
 \contentsline {paragraph}{End-effector pose and base position}{5}{}%
 \contentsline {subsubsection}{\numberline {3.2.2}Path-following tasks}{5}{}%
+\contentsline {paragraph}{End-effector trajectory following cost function}{6}{}%
+\contentsline {paragraph}{Base path following cost function}{6}{}%
+\contentsline {paragraph}{Base and end-effector trajectory following cost function}{6}{}%
 \contentsline {subsubsection}{\numberline {3.2.3}Solver choice}{6}{}%
 \contentsline {subsection}{\numberline {3.3}Reference trajectory construction}{6}{}%
+\contentsline {subsubsection}{\numberline {3.3.1}Base reference trajectory}{6}{}%
+\contentsline {subsubsection}{\numberline {3.3.2}End-effector reference trajectory}{7}{}%
diff --git a/examples/cart_pulling/todos.toml b/examples/cart_pulling/todos.toml
new file mode 100644
index 0000000000000000000000000000000000000000..bc1f78998f64063e1f4582f3a78dd813fd89f4a5
--- /dev/null
+++ b/examples/cart_pulling/todos.toml
@@ -0,0 +1,1044 @@
+[project_plan]
+name = "wara challenge paper"
+description = """
+just continuing with the wara challenge task 1 as if it never ended. it's automated cart pulling by a mobile robot. all deliverables should be done on both heron and yumi for the purposes of comparison and all the other benefits. 
+""" 
+priority = 1
+deadline = 2025-02-28
+dependencies = ["..."]
+engineering_purpose = """
+solving a real problem of having a mobile manipulator pushing and pulling a cart from point A to B  with SOTA solutions for every element of the stack 
+"""
+personal_purpose = """demostrating i can solve a real robotics problem"""
+hours_required = 0
+is_done = false
+
+[[project_plan.action_items]]
+name = "moving robots via their ROS interfaces" 
+description = """
+heron's mobile base uses something/vel_cmd. there has to be a verification that this topic actually works. mobile yumi uses abb_whole_body_driver. this works in simulation, but not on the real robot. there are multiple possible reasons for this. solving this task means:
+1) understanding how whole_body_driver works
+2) enumerating its possible breaking points
+3) lining up alternatives and testing them in sim (most likely just individual components of abb_whole_body_driver)
+"""
+priority = 1
+deadline = ["before going to vasteros", 2025-02-10]
+is_done = true
+dependencies = []
+purpose = """being able moving heron and yumi with code"""
+hours_required = 0
+
+
+[[project_plan.action_items.action_items]]
+name = "verifying vel_cmd and rest of heron interface"
+description = """
+make sure that we can control heron as expected.  this boils down to verifying we can simultaneously move the base, the arm, and the gripper. the arm and the gripper should pose no problems as they should be decoupled from the base. moving the base means using something/vel_cmd to move the base. 
+
+actual TODO: make it go back and forth while rotating the last joint and also open and close the gripper repeatedly. 
+
+UPDATE: pray to god it works, seba says he saw it happen
+"""
+priority = 1
+deadline = ["first thing to do on heron", 2025-01-20]
+is_done = true
+dependencies = []
+purpose = """verify heron interface to be able to rest assuredly"""
+hours_required = 2
+
+[[project_plan.action_items.action_items]]
+name = "exploring & understanding whole-body-driver" 
+description = """
+the abb_myumi_whole_body_driver does not work on the real robot. this needs to be debugged, which in turn requires knowing what's going on. in particular, looking at the ins and outs of the driver, focusing on what needs to be (or musn't be) running for it to do actually move the robot. 
+the outcome is: 
+1) produce a tutorial-type document for the startup process, including a checklist if something breaks during operation
+2) produce a list of possible failures of the driver
+3) run, test and document the components of the whole body driver, ex. egm for one arm etc
+
+UPDATE: turned into priority 3 because there is no time for this and we just have to pray it works
+"""
+priority = 3
+deadline = ["before going to vasteros", 2025-02-10]
+is_done = false
+dependencies = []
+purpose = """being able to move mobile yumi with code"""
+hours_required = 12
+
+
+[[project_plan.action_items.action_items]]
+name = "finding alternatives to whole body driver" 
+description = """
+the whole body driver might be doomed, so having a different way to control the robot is a good idea. this means controlling the arms and the base individually. the base is controllable through the nav2 stack as well. the arms are controllable through RAPID, but that's unusable. hopefully one can talk to egm of each arm directly. the point of this action item is to investigate that. 
+
+UPDATE: turned into priority 3 because there is no time for this and we just have to pray it works
+"""
+priority = 3
+deadline = ["before going to vasteros", 2025-02-10]
+is_done = false
+dependencies = ["exploring & understanding whole-body-driver"]
+purpose = """being able to move mobile yumi with something other than whole body driver"""
+hours_required = 7
+
+
+[[project_plan.action_items.action_items]]
+name = "verifying operational frequency" 
+description = """
+once both heron and myumi can be controlled, the control frequency needs to be determined. because both things run on ros2, and due to many processes running in parallel, it is difficult to say how often the commands are issued,  and also whether there are consistent delays between them. this quantities should be determined if possible. 
+"""
+priority = 3
+deadline = ["before going to vasteros", 2025-02-10]
+is_done = false
+dependencies = ["moving robots via their ROS interfaces"]
+purpose = """making sure we are getting the best control performance in the sense
+of frequency, also ensures the approach isn't fundamentally bad/incorrect (ROS2 qos
+profiles etc)"""
+hours_required = 4
+
+[[project_plan.action_items]]
+name = "fast localization"
+description = """
+amcl from ros2 nav2 stack seems very good, but gives updates at 2Hz, which causes the robot to teleport internaly.  for this reason, odometry should be used in-between updates, or even better, a kalman filter balancing odometry and  amcl updates should be used. some amount of teleporting will certainly occur, but  this can't be in meters for hopefully obvious reasons. 
+"""
+priority = 1
+deadline = ["before going to vasteros", 2025-02-21]
+is_done = false
+dependencies = []
+purpose = """localization is required for correctness of any kind of mobile robot movement,
+especially for whole-body control"""
+hours_required = 0
+
+
+[[project_plan.action_items.action_items]]
+name = "understanding and testing odometry" 
+description = """
+ros2 nav2 odometry supposedly already incorporates information from position encoders on the wheel, imu data, lidar readings etc., but it of course does not use a map to localize. understanding odometry implies being able to answer the following: 
+1) how precise it is and for how long is it realiable
+2) how does 1) change depending on the available sensors
+3) what's configurable and how much of that is useful
+4) obviously this needs to be tested on hardware as well
+"""
+priority = 1
+deadline = ["before going to vasteros", 2025-02-20]
+is_done = false
+dependencies = []
+purpose = """we need some kind of odometry for fast localization, and it's better to use the provided
+thing over wasting time hacking a sub-par solution from various topics"""
+hours_required = 6
+
+[[project_plan.action_items.action_items]]
+name = "testing whether raw odometry + amcl is enough" 
+description = """
+if we can use odometry until we get an amcl reading, then just put in  the amcl number over whatever odometry was, and then reset odometry and keep using that until the next update, great, we don't need to use a kalman filter. this todo is to actually check. 
+"""
+priority = 1
+deadline = ["before going to vasteros", 2025-02-20]
+is_done = false
+dependencies = ["understanding and testing odometry"]
+purpose = """trying to avoid the work of using a kalman filter"""
+hours_required = 3
+
+
+[[project_plan.action_items.action_items]]
+name = "configuring a kalman filter from robot_localization" 
+description = """
+in case raw odometry + amcl is not enough, a kalman filter has to be used
+to localize. amcl provides slow correct updates, odometry provides quick soon unreable updates. exactly such a kf is implemented in the ros2 robot_localization package, and is suggested by the nav2 stack. to actually use it, some config files need to be filled, and some sensor/overall measurement covariances need to be pulled out of a hat or estimated via measurements. 
+
+UPDATE: prior 2 -> 3, no time, can't be priority 2 anymore, other things are more important
+"""
+priority = 3
+deadline = ["before going to vasteros", 2025-02-21]
+is_done = false
+dependencies = ["testing whether raw odometry + amcl is enough"]
+purpose = """configuring a kf if necessary"""
+hours_required = 6
+
+[[project_plan.action_items]]
+name = "ros2 whatever map into path planning map"
+description = """
+we get a bitmap of the map from some ros2 topic. this needs to be starified for our path planning to work. 
+
+the todos go as follows:
+1) figure out how to get a matrix occupancy grid or whatever format from ros2. 
+2) know which format the path planner wants. it's something in shapely, which is then mapped into star shapes.
+   the outcome here is knowing which shapes are already supported and running with that. 
+   just rectangles and ellipses should be good enough
+3) create a fixed transformation for the static map of the lab into star shapes. ideally
+   save that and keep it cached.
+4) (optional, but highly preferable) take what isn't static in the occupancy map, convex hull it, hit a rectangle/ellipse on that
+"""
+priority = 1
+deadline = ["before going to vasteros", 2025-02-19]
+is_done = false
+dependencies = []
+purpose = """localization is required for correctness of any kind of mobile robot movement,
+especially for whole-body control"""
+hours_required = 0
+
+
+[[project_plan.action_items.action_items]]
+name = "getting map from ros2"
+description = """
+afaik, the map is build by SLAM from the Nav2 stack. this map is build using all possible sensing shoved into SLAM. it is static and the occupancy is colored black in rviz.
+
+on the same map there is an overlay of the laser scan, projected down onto the map. these points are colored red. these things should be obtainable in the same format as the static map.
+NOTE: try getting the second part here, but if it's something complicated fuck it
+"""
+priority = 1
+deadline = ["before going to vasteros", 2025-02-18]
+is_done = false
+dependencies = []
+purpose = """no map no planning, need to get it from nav2"""
+hours_required = 4
+
+[[project_plan.action_items.action_items]]
+name = "understanding starifiable obstacles"
+description = """
+need to know what the ros2 map can be made into
+"""
+priority = 1
+deadline = ["before going to vasteros", 2025-02-18]
+is_done = false
+dependencies = []
+purpose = """no map no planning, need to get it from nav2"""
+hours_required = 4
+
+[[project_plan.action_items.action_items]]
+name = "starify static map"
+description = """
+need to know what the ros2 map can be made into
+"""
+priority = 1
+deadline = ["before going to vasteros", 2025-02-19]
+is_done = false
+dependencies = []
+purpose = """no map no planning, need to get it from nav2"""
+hours_required = 3
+
+[[project_plan.action_items.action_items]]
+name = "starify dynamic obstacles"
+description = """
+need to know what the ros2 map can be made into
+"""
+priority = 2
+deadline = ["before going to vasteros", 2025-02-19]
+is_done = false
+dependencies = []
+purpose = """no map no planning, need to get it from nav2"""
+hours_required = 3
+
+[[project_plan.action_items.action_items]]
+name = "continuously starify dynamic obstacles"
+description = """
+need to know what the ros2 map can be made into
+"""
+priority = 2
+deadline = ["before going to vasteros", 2025-02-19]
+is_done = false
+dependencies = []
+purpose = """no map no planning, need to get it from nav2"""
+hours_required = 5
+
+[[project_plan.action_items]]
+name = "computer vision stuff"
+description = """
+remaining tasks in the the computer vision stack:
+1) debugging pose estimation from filtered depth pixels
+2) finishing integration with SMC
+3) maybe cross-validating with yolo
+4) simple color filtering for the bar during pushing/pulling?
+
+UPDATE: prior 2 -> 3, no time, can't be priority 2 anymore, other things are more important. we can do this blind and say how to do vision
+"""
+priority = 3
+deadline = ["before going to vasteros", 2025-02-10]
+is_done = false
+dependencies = []
+purpose = """required to solve the real problem of a robot pulling a real cart - 
+it has to see it. also easily usable in other projects if done right codewise."""
+hours_required = 0
+
+
+[[project_plan.action_items.action_items]]
+name = "generate dataset" 
+description = """
+tape phone to laptop. start dataset generating script. go around the cart and create the dataset.
+"""
+priority = 3
+deadline = ["before going to vasteros", 2025-02-10]
+is_done = false
+dependencies = []
+purpose = """needed for yolo cross-validation"""
+hours_required = 2
+
+
+[[project_plan.action_items.action_items]]
+name = "train yolo to detect cart" 
+description = """
+install yolo (v11 ideally) on computer with already install cuda opencv etc. put basic things into yolo config files, run training, validate it works with simplest possible 'show boxes on live camera input' script. 
+"""
+priority = 3
+deadline = ["before going to vasteros", 2025-02-10]
+is_done = false
+dependencies = []
+purpose = """required for cross-validation with yolo"""
+hours_required = 4
+
+[[project_plan.action_items.action_items]]
+name = "fix pose estimation from point cloud"
+description = """
+there's already a segmentation, but ransac/whatever needs to be fixed so that pose estimates are acquired. in case of low precision, there should also be some averaging over more than 1 estimate (and not necessarily from the same angle). also, there might be a need to filter or discard spurious results. 
+
+UPDATE: prior 1 -> 3, no time, can't be priority 2 anymore, other things are more important. we can do this blind and say how to do vision
+"""
+priority = 3
+deadline = ["before going to vasteros", 2025-02-10]
+is_done = false
+dependencies = []
+purpose = """last component of core vision stack, required for anything vision"""
+hours_required = 10
+
+[[project_plan.action_items.action_items]]
+name = "finding a local server to run grounding sam on"
+description = """
+see where to run grounding sam locally. ideally this should be a more permanent solution so that it can be used in the future. probably best solved by asking around. 
+"""
+priority = 3
+is_done = false
+deadline = 2025-02-05
+dependencies = ["fix pose estimation from point cloud"]
+purpose = """
+grounding sam is a very good off-the-shelf solution for  computer vision tasks. this should be available literally at any point in time to enable quick experiments 
+"""
+hours_required = 3
+
+[[project_plan.action_items]]
+name = "refactor optimal control code" 
+description = """
+it's literally unreadable and changeable. most of it is straight copy-pasting too. while some refactoring is kinda optional, this step most certainly is not.
+"""
+priority = 1
+is_done = true
+deadline = 2025-02-16
+dependencies = []
+purpose = """
+making optimal control fixes possible
+"""
+hours_required = 0
+
+
+[[project_plan.action_items.action_items]]
+name = "abstract croco optimal control class" 
+description = """
+abstract class to store basic things like the state definition, actuation model etc
+"""
+priority = 1
+is_done = true
+deadline = 2025-02-14
+dependencies = []
+purpose = """
+at least 100 less lines of copy-pasted code
+"""
+hours_required = 3
+
+[[project_plan.action_items.action_items]]
+name = "refactor p2p ocp"
+description = """
+refactor p2p ocp
+"""
+priority = 1
+is_done = true
+deadline = 2025-02-14
+dependencies = ["abstract croco optimal control class"]
+purpose = """
+making optimal control fixes possible
+"""
+hours_required = 3
+
+[[project_plan.action_items.action_items]]
+name = "refactor end-effector path following"
+description = """
+refactor end-effector path following
+"""
+priority = 1
+is_done = true
+deadline = 2025-02-14
+dependencies = ["refactor p2p ocp"]
+purpose = """
+making optimal control fixes possible
+"""
+hours_required = 3
+
+[[project_plan.action_items.action_items]]
+name = "rewrite ee construction from past path to be more efficient"
+description = """
+a large past window is needed to store a path long enough to find the point with prefered distance from base. by large i mean large enough that you can't copy-contruct a new np.ndarray from it at every single iteration. 
+
+hence, cache things like path arclength, do math directly on the deque.
+"""
+priority = 1
+is_done = false
+deadline = 2025-02-14
+dependencies = ["refactor p2p ocp"]
+purpose = """
+making optimal control fixes possible
+"""
+hours_required = 3
+
+[[project_plan.action_items.action_items]]
+name = "write end-effector and base path following"
+description = """
+two versions:
+1) follow predetermined sensible path (same way as for just the end-effector) - this makes sure that ocp by itself is fine
+2) do path-following from planner. there the key is how do you initialize the past dict so that it makes sense you also need to put the arms in a good spot relative to the end-effector. this likely requires a new point-to-point function. or solve 2 ocps one after the other - first for base position, then for the arm - doesn't matter
+"""
+priority = 1
+is_done = true
+deadline = 2025-02-14
+dependencies = ["refactor p2p ocp"]
+purpose = """
+making optimal control fixes possible
+"""
+hours_required = 7
+
+[[project_plan.action_items]]
+name = "debug control"
+description = """
+control stack is broken. could be the control itself, or trajectories' construction from obtained path.
+
+or there is something fundamentally fucked with changing the plan on the fly, idk. could be that you need to put in some smoothing into it. can't know unless you write tools to test this shit.
+"""
+priority = 1
+is_done = false
+deadline = 2025-02-14
+dependencies = ["refactor optimal control code"]
+purpose = """
+getting any base and ee path tracking to work
+"""
+hours_required = 0
+
+[[project_plan.action_items.action_items]]
+name = "manage path saving better"
+description = """
+right now we use a deque to store the path. but this breaks the system if the robot stays in place or moves slowly or not at all - that point eventually fills up the whole queue, the end-effector goes there and everything consequently turns to shit. 
+
+point is some micromanagement of the saved path is necessary. having a deque is a good strategy, but at the very least you have to be able to select whether to add in a new pint or not.
+
+ideally you have some math to thin out the path points or whatever. you construct the trajectory on top of the plan at every time-step anyway.
+
+one way to do this would be to inherit from ControlLoopManager 
+and then overwrite whatever necessary to make path management overall happen there.
+"""
+priority = 1
+is_done = false
+deadline = 2025-02-14
+dependencies = []
+purpose = """
+getting any base and ee path tracking to work
+"""
+hours_required = 3
+
+
+[[project_plan.action_items.action_items]]
+name = "debug past path initialization "
+description = """
+it's not what it should be - either my math is wrong/there's a bug, and my logic is right, or my logic isn't right, or both
+"""
+priority = 1
+is_done = true
+deadline = 2025-02-14
+dependencies = ["dual arm feature", "refactor end-effector path following"]
+purpose = """
+getting any base and ee path tracking to work
+"""
+hours_required = 3
+
+[[project_plan.action_items.action_items]]
+name = "ensure either reference doesn't change rapidly"
+description = """
+getting a suddent change in the path might lead to instability. especially since we are taking dynamics into account.
+
+first of all, a collective smoothness of the path has to be investigated. by this i mean concatenative all received paths, and then checking for norms of its derivative, or similar.
+
+this can be achieved with:
+1) a low-pass filter in-between received paths, same way as for wrench filtering. 
+2) increasing regularization costs in the mpc. IN FACT, you can even try changing the costs on the fly based on how suddenly the path reference shifted.
+"""
+priority = 1
+is_done = false
+deadline = 2025-02-14
+dependencies = ["dual arm feature", "refactor end-effector path following"]
+purpose = """
+getting any base and ee path tracking to work
+"""
+hours_required = 3
+
+[[project_plan.action_items.action_items]]
+name = "debug base and ee tracking"
+description = """
+the thing itself might be wrong. you have to test each sub-component to be sure.
+get static data from a non-planner. run forward without running ocp (literally vixed velocity).
+do just path reconstruction. it should be a live controlloop.p_base should go around in  a circle or something. just put trajectory reconstruction there. then go in something that's not a circle. point is do it with just move a mobile base and draw the constructed SE3 path as well.
+PLOT EVERY SINGLE STEP! make the plan providing be on a single processor.
+"""
+priority = 1
+is_done = false
+deadline = 2025-02-14
+dependencies = ["dual arm feature", "refactor end-effector path following"]
+purpose = """
+solving the same problem but with a single arm
+"""
+hours_required = 3
+
+[[project_plan.action_items.action_items]]
+name = "alternative if full-body mpc is not the right tool for the job"
+description = """
+control just the base with mpc for base tracking.
+make the handlebar be defined in a pose relative to the body. run clik on arms to get to a point made up by the path (still need to find corresponding path point) with the orientation being constructed with normal/tangent of the past path. then run p2p on that single goal. update the goal at every time step. an even better idea if getting whole trajectory works, just run trajectory following.
+"""
+priority = 1
+is_done = false
+deadline = 2025-02-14
+dependencies = ["dual arm feature", "refactor end-effector path following"]
+purpose = """
+half of paper problem
+"""
+hours_required = 3
+
+[[project_plan.action_items.action_items]]
+name = "dual arm p2p and path following"
+description = """
+refactor end-effector path following
+"""
+priority = 1
+is_done = false
+deadline = 2025-02-14
+dependencies = ["dual arm feature", "refactor end-effector path following"]
+purpose = """
+half of paper problem
+"""
+hours_required = 3
+
+[[project_plan.action_items.action_items]]
+name = "cart-pulling (ee and base path following)"
+description = """
+refactor end-effector path following
+"""
+priority = 1
+is_done = false
+deadline = 2025-02-14
+dependencies = ["dual arm feature", "whole-body feature", "dual arm p2p and path following"]
+purpose = """
+half of paper problem
+"""
+hours_required = 3
+
+[[project_plan.action_items.action_items]]
+name = "construct map showcasing my prefered setting (make the labyrinth!!!!!!!!)"
+description = """
+it's the experimtn
+"""
+priority = 1
+is_done = false
+deadline = 2025-02-14
+dependencies = []
+purpose = """
+
+"""
+hours_required = 3
+
+[[project_plan.action_items]]
+name = "tools for trajectory generation in smc" 
+description = """
+turning a path into a trajectory is bugged. making goals for 2 arms from a single goal is annoying and takes too long. designing SE3 trajectories is cumbersome, and it shouldn't be because  it's trivial to do visually or with your hand in the air.  there is no reason it should be more than 5x more difficult to input a trajectory on a computer then it is to do it by hand. the same goes for any kind of SE3 targets.  
+
+the todo is to build a tool to do this. the input does not need to be via GUI (the visualization needs to exist of course), but it needs to be fast and interactive in real-time. the cost of writing this tool is much smaller than all future cost of trajectory or goal design, so it should be done asap. 
+UPDATE: prior 1 -> 3, no time, for project purposes this is resolvable by properly using the dual arm absolute frame and thinking for 5 minutes
+UPDATE2: it is prio 1 because some form of this is required (obviously it's not the nice one, but you'll do some hack to not tap in the dark)
+
+"""
+priority = 1
+is_done = true
+deadline = 2025-02-14
+dependencies = ["smc refactor"]
+purpose = """
+this tool will solve issues with weird trajectories or wasting 20min to write the correct SE3 transformation, which simply persist otherwise and are super annoying to deal with -> it's much better to do geometry on screen than in your head if  you need specific numbers on that geometry. 
+"""
+hours_required = 0
+
+
+[[project_plan.action_items.action_items]]
+name = "just desing the trajectories for the problem"
+description = """
+debug existing trajectory generation
+"""
+priority = 1
+is_done = false
+deadline = 2025-02-14
+dependencies = ["smc refactor"]
+purpose = """creating the base of interactive goal/transform/trajectory design"""
+hours_required = 8
+
+[[project_plan.action_items.action_items]]
+name = "extend logging to include saving the whole of save-past-dict"
+description = """
+the easiest possible way to see what the fuck goes wrong with paths is to save the entire state of the system, and then reproduce it via logging. you already have 90% of this done with log comparison and the saving of the logs. you can just log the entire save-past-window, and not plot it (because it's a matrix at every timestep). save the calculated path as well.
+
+modifying other code to be able to do this live will polute the control code which HAST TO stay as simple as straight-forward as possible. 
+you don't want to modify control frequency, or go backwards or whatever the fuck in ControlLoopManager!!!!!!!!!!!!!!!!!! this HAS TO be FORBIDDEN on the code level in control.
+
+but you can do whatever you want when running logs - those don't touch any control function.
+
+UPDATE: not necessary, just drop into an ipython shell and have a parameter deciding whether you want to be in it or not.
+this has to be done on a base by case basis though, and it should be better documented
+"""
+priority = 1
+is_done = true
+deadline = 2025-02-14
+dependencies = ["smc refactor"]
+purpose = """creating the base of interactive goal/transform/trajectory design"""
+hours_required = 8
+
+[[project_plan.action_items.action_items]]
+name = "use drawing feature from drawing demo to draw a path to be followed"
+description = """
+use drawing feature from drawing demo to draw a path to be followed is very likely the easiest possible way to generate a path that we want to follow. certainly easier than constructing the map. 
+
+you can go the extra mile, and make the drawn line a series of rectangles or something and make a real map out of it. but for this purpose you could also lean 100% into albin's code.
+"""
+priority = 2
+is_done = false
+deadline = 2025-02-14
+dependencies = []
+purpose = """make test design for just path planning take 10 seconds"""
+hours_required = 10
+
+[[project_plan.action_items.action_items]]
+name = "interactive meshcat for setting a transformation"
+description = """
+there is a robot/frame/some object in meshcat.  there is a python program waiting for a user command input (keyboard input is fine), so that you don't need to restart the program every time (slow as shit). the thing on screen moves, executing the user's input, ex. left arrow is pressed and the frame goes 0.01m in positive y direction. or you just type in a new value for y, or both. this is probably best done if the cmd-viz is  integrated with a notebook/ipython shell/kernel. there also needs to be a button to save (print in correct code format) the designed transform, parameters of the trajectory, whatever it is so that it can be immediately integrated. if the robot's or whatever else state can be stopped while running some control loop, and then switched into the interactive session, that would be the best possible solution (again, having this in some kind of ipython kernel is probably the way to go). 
+
+UPDATE: prior 1 -> 3, no time, can't be priority 2 anymore, other things are more important. for project purposes this is resolvable by properly using the dual arm absolute frame
+
+UPDATE2: just work in a jupyter notebook or get dropped into an ipython shell. there is no need for anything else. document this better tho
+"""
+priority = 3
+is_done = true
+deadline = 2025-02-14
+dependencies = ["smc refactor"]
+purpose = """creating the base of interactive goal/transform/trajectory design"""
+hours_required = 10
+
+
+[[project_plan.action_items.action_items]]
+name = "reset function"
+description = """
+reset function is needed to save and reload relevant variables in robotmanager and in visualizer. otherwise you can't go back and try a different transform or parameter. 
+UPDATE: prior 1 -> 3, no time, can't be priority 2 anymore, other things are more important.
+
+UPDATE: you don't want to pollute the control code with this sorta shennanigans. instead, you want to do this on logs. so do it on logs
+"""
+priority = 3
+is_done = false
+deadline = 2025-02-14
+dependencies = ["interactive meshcat for setting a transformation"]
+purpose = """making interaction much more usable"""
+hours_required = 2
+
+
+[[project_plan.action_items.action_items]]
+name = "workspace visualization"
+description = """
+putting workspace objects in makes things easier to verify. this should be goal poses or objects (cart in this case), the table and so on. it should be as minimal as possible. the map visualization should be expanded to render the nav2 bitmap of the map. the abstract things like paths should also be visualized for easier understanding of the control algorithm. 
+
+NOTE: probably a good idea for a better paper picture
+"""
+priority = 2
+is_done = false
+deadline = 2025-02-14
+dependencies = ["interactive meshcat for setting a transformation"]
+purpose = """making visual verification more powerful and easier"""
+hours_required = 4
+
+
+[[project_plan.action_items.action_items]]
+name = "path generation parameters interactive tuning"
+description = """
+a similar type of interface should be developed for path generation parameters, and some of them should be visualized. this primarily concerns the length of the planned path and the safety radius around each point of the path. 
+
+UPDATE: prior 2 -> 3, no time, can't be priority 2 anymore, other things are more important. tuning is a luxury we don't need because we are comparing against a brain-dead method of not moving the arms at all
+"""
+priority = 3
+is_done = false
+deadline = 2025-02-01
+dependencies = ["interactive meshcat for setting a transformation"]
+purpose = """making tuning and understanding of path planning much easier"""
+hours_required = 6
+
+
+[[project_plan.action_items]]
+name = "time scaling on path as decision variable in mpc"
+description = """
+we should not pre-time the path passed to the mpc - it should be a collection of points and that's that. we then of course do need a time scaling on the path. from a math standpoint, it seems the best to have the time scaling of the path  as the decision variable. this way, we don't formulate the objective function as distance from knot point to a single pre-selected point on the path,  but we really just stay on it as much as possible while traversing it as quickly as possible. not to say that the predefined points are completely unreasonable, but this ios clearly better.  
+
+the problem is that then a new objective funtion and it's derivatives need to be defined in crocoddyl. this will take a while to write beucase it's cpp  and i haven't done it in a while.  the potential upside is that if the derivative is finite-differenced, then getting this done is much easier. it should be investigated whether support for arbitrary functions through finite differencing is a thing in crocoddyl. in any event, this can't be a priority 1 task due to the benefit-time-cost ratio. 
+
+UPDATE: prior 2 -> 3, no time, can't be priority 2 anymore, other things are more important. this takes too much time for what it's worth.
+"""
+priority = 3
+is_done = false
+deadline = 2025-02-01
+dependencies = ["tools for trajectory generation in smc"]
+purpose = """having the correct ocp formulation for the whole-body mpc controller for pushing and pulling"""
+hours_required = 20
+
+
+[[project_plan.action_items]]
+name = "additional possible modifications of the ocp"
+description = """
+there is a number of possibly interesting ocp formulations, collected here in case the final product is not good enough: 
+1) a constraint on the ee poses so that they are in the plane defined by the handlebar 
+"""
+priority = 3
+is_done = false
+deadline = 2025-02-01
+dependencies = []
+purpose = """designing a better ocp, testing agaist different options"""
+hours_required = 12
+
+
+[[project_plan.action_items]]
+name = "fixing behavior tree"
+description = """
+ideally the behavior tree todo in smc is done before this, but it's not necessary. what is necessary is to debug the logic errors in the state-selecting conditions, primarily the one deciding when do you grasp handlebar vs push/pull. this requires computer vision to be done because the amount of errors that gives is what determines how this should look. 
+possibly more states need to be added, like docking.
+
+the description of the desired behavior tree for the cart pulling system is given below:
+
+make a function that implements the get-cart-to-station behaviour tree.
+   this is a control loop that select what control loop to run based on some 
+   logic flags (smaller number = higher priority). simple select loop of the highest priority.
+   for every flag there is a process (node + topic) which determines whether to set it or unset it.
+	- PRIORITY 1: if obstacle flag, then run stopLoop (which is just make all velocities 0). flag on 
+                  only if there are people around.
+	- PRIORITY 2: if handle_bar flag, run loop to grasp the handle bar. if handle bar is grasped
+                  (vishnu's vision sets this flag) then the flag is unset. if not grasped (gripper
+                   status (maybe) + vision decides) then set the flag.
+	- PRIORITY 3: pull cart along prescribed path - runs MPC path-following loop. flag on until goal is reached
+	- PRIORITY 4: dock cart. flag on until cart is docked. i guess vision should check that but idk honestly.
+                  if this flag is set down, the program is down - shut down.
+	run this on the real thing
+
+UPDATE: prior 1 -> 3, no time, can't be priority 2 anymore, other things are more important. no properly working behaviour tree is needed if we don't use vision.
+"""
+priority = 3
+is_done = false
+deadline = 2025-02-01
+dependencies = []
+purpose = """having functional robotic system, not such 1 controller hopefully working as intended"""
+hours_required = 8
+
+
+[[project_plan.action_items]]
+name = "creating tests for whole system performance" 
+description = """
+there are different ways to manipulate a cart: pushing or pulling. using a single or a dual armed robot. having a differential drive or an omnidirectional mobile base. going fast as possible is good, but does it make the system less robust? how does the mass of the cart impact performance? which grasps are more reliable, which are more taxing on the robot's hardware? these and more perfomance metrics should be though of, and test to measure them need to be devised and carried out. 
+
+UPDATE: KEY COMPARISSONS:
+1) arms locked and rigid vs we move arms --> show locked arms failing - navigating narrow passage should be enough
+
+PREFERABLE, BUT NOT NECESSARY
+- heavy vs not heavy cart
+- errors in estimation of weight of cart
+
+NOT NECESSARY:
+- one vs two arms
+"""
+priority = 1
+is_done = false
+deadline = 2025-02-17
+dependencies = ["moving robots via their ROS interfaces", "localization", 
+                "computer vision stuff", "smc refactor", 
+                "tools for trajectory generation in smc"]
+purpose = """
+scientific perfomance measurement is required for a scientific publication
+"""
+hours_required = 0
+
+
+[[project_plan.action_items.action_items]]
+name = "devising perfomance metrics" 
+description = """
+we want to measure things like time needed to perform the cart-pulling operation, grasp reliability etc. this todo means creating numerical measurements for these things, AND writing code to calculate them from logs. these metrics are then to be measured on all the tests 
+
+UPDATE: focus on what differentiates locked arms from moving arms
+"""
+priority = 1
+deadline = ["before going to vasteros", 2025-02-17]
+is_done = false
+# NOTE: sub-task dependencies should have their parent's dependencies inherited
+dependencies = []
+purpose = """knowing what to measure and why"""
+hours_required = 12
+
+
+[[project_plan.action_items.action_items]]
+name = "performing experiments"
+description = """
+once everything is ready, we can go to vasteros and actually perform the experiments. this should ideally be done in 3 days. 
+the tests are:
+- 1 arm vs 2 arms
+- differential drive vs omnidirection in mobile base
+- kinematic vs dynamic level
+- heavy vs light load
+- different grasps
+- ...
+
+UPDATE: ONLY RIGIDLY LOCKED VS USABLE ARMS ARE IMPORTANT. SEPARATE THIS AND THE REST, MAKE ELSE PRIOR 3 AKA NOT HAPPENING
+"""
+priority = 1
+deadline = ["in vasteros", 2025-02-24]
+is_done = false
+# NOTE: sub-task dependencies should have their parent's dependencies inherited
+dependencies = []
+purpose = """getting data"""
+hours_required = 18
+
+[[project_plan.action_items]]
+name = "write research diary" 
+description = """
+project todos are good for knowing what needs to be done, replanning based on the current status and so on. but not for capturing the research process, formulating the questions, answering them, and doing so mathematically.
+
+this text is also not the paper text. the paper text is a condensed subsection of the research notes, plus all the flowers around it. 
+"""
+priority = 1
+deadline = ["iros paper submission deadline", 2025-03-01]
+is_done = false
+dependencies = []
+purpose = """this is the product of the project"""
+hours_required = 6
+assignee = "Marko"
+
+
+[[project_plan.action_items]]
+name = "writing the paper" 
+description = """
+the product of the project is the paper. the non-experiment and discussion part should be written before the experiments are done, as the code is  being written. writting is essential to knowing what you are doing and why. then, with all the experiments done, the experiment and discussion sections are to be written. finally, the paper needs to be polished. 
+
+ASSIGNED TO SEBA
+"""
+priority = 1
+deadline = ["iros paper submission deadline", 2025-03-01]
+is_done = false
+dependencies = []
+purpose = """this is the product of the project"""
+hours_required = 0
+assignee = "Seba"
+
+[[project_plan.action_items.action_items]]
+name = "setting colors to authors" 
+description = """
+in text who wrote what is colored.
+"""
+priority = 1
+deadline = ["iros paper submission deadline", 2025-03-01]
+is_done = false
+dependencies = []
+purpose = """this is the product of the project"""
+hours_required = 1
+assignee = "Zheng"
+
+
+[[project_plan.action_items.action_items]]
+name = "pretty picture for modelling" 
+description = """
+schema of robot and cart with path, image on overleaf
+"""
+priority = 1
+deadline = ["iros paper submission deadline", 2025-03-01]
+is_done = false
+dependencies = []
+purpose = """nice image explaining modelling"""
+hours_required = 4
+assignee = "Zheng"
+
+
+[[project_plan.action_items.action_items]]
+name = "nice plot showing our method can clear narrow corridors" 
+description = """
+the main thing we can do by moving the arms, and that means we can navigate narrower more cluttered maps.
+so we have a figure showing we can clear a corner, which a rigid arm solution can't.
+"""
+priority = 1
+deadline = ["iros paper submission deadline", 2025-03-01]
+is_done = false
+dependencies = []
+purpose = """nice image explaining modelling"""
+hours_required = 4
+assignee = "Zheng"
+
+
+[[project_plan.action_items.action_items]]
+name = "pulling is better than pushing" 
+description = """
+you can see the obstacles in front of you. if you're pushing you need more sensors. pulling is better. some sentences on this
+"""
+priority = 1
+deadline = ["iros paper submission deadline", 2025-03-01]
+is_done = false
+dependencies = []
+purpose = """nice image explaining modelling"""
+hours_required = 4
+assignee = "Zheng"
+
+[[project_plan.action_items.action_items]]
+name = "making skeleton of paper, assigning people what to write" 
+description = """
+you have to have a skeleton of the paper ready. have an idea of which figures to put where. know roughly how much text a particular topic needs. fill stuff with lore impsum or whatever to make it easier to see.
+
+NOTE: this has to be done with seba. then we assign to others. 
+NOTE NOTE NOTE: the result of this todo is a lot of smaller todos in this big action item!!!!!!
+"""
+priority = 1
+deadline = ["iros paper submission deadline", 2025-03-01]
+is_done = false
+dependencies = []
+purpose = """part of paper"""
+hours_required = 3
+assignee = "all"
+
+
+#[[project_plan.action_items.action_items]]
+#name = "writing introdution, related work, methodology" 
+#description = """
+#we already know what the technology stack is and what we are trying to accomplish. hence it can and should be written down now. this part will focus and guide the code development, and should be  done in parallel to it. 
+#
+#ASSIGNED TO SEBA
+#"""
+#priority = 1
+#deadline = ["iros paper submission deadline", 2025-03-01]
+#is_done = false
+#dependencies = []
+#purpose = """part of paper"""
+#hours_required = 20
+
+[[project_plan.action_items.action_items]]
+name = "writing introdution" 
+description = """
+we already know what the technology stack is and what we are trying to accomplish. hence it can and should be written down now. this part will focus and guide the code development, and should be  done in parallel to it. 
+
+ASSIGNED TO SEBA
+"""
+priority = 1
+deadline = ["iros paper submission deadline", 2025-03-01]
+is_done = false
+dependencies = []
+purpose = """part of paper"""
+hours_required = 12
+
+[[project_plan.action_items.action_items]]
+name = "writing related work" 
+description = """
+we already know what the technology stack is and what we are trying to accomplish. hence it can and should be written down now. this part will focus and guide the code development, and should be  done in parallel to it. 
+
+ASSIGNED TO SEBA
+"""
+priority = 1
+deadline = ["iros paper submission deadline", 2025-03-01]
+is_done = false
+dependencies = []
+purpose = """part of paper"""
+hours_required = 12
+
+[[project_plan.action_items.action_items]]
+name = "writing methodology" 
+description = """
+we already know what the technology stack is and what we are trying to accomplish. hence it can and should be written down now. this part will focus and guide the code development, and should be  done in parallel to it. 
+
+ASSIGNED TO SEBA
+"""
+priority = 1
+deadline = ["iros paper submission deadline", 2025-03-01]
+is_done = false
+dependencies = []
+purpose = """part of paper"""
+hours_required = 12
+
+[[project_plan.action_items.action_items.action_items]]
+name = "writing modelling" 
+description = """
+modelling
+"""
+priority = 1
+deadline = ["iros paper submission deadline", 2025-03-01]
+is_done = false
+dependencies = []
+purpose = """part of paper"""
+hours_required = 12
+
+[[project_plan.action_items.action_items.action_items]]
+name = "writing CV, how to grab a cart, small behavior tree mabe" 
+description = """
+writing CV, how to grab a cart, small behavior tree mabe
+"""
+priority = 1
+deadline = ["iros paper submission deadline", 2025-03-01]
+is_done = false
+dependencies = []
+purpose = """part of paper"""
+hours_required = 12
+
+[[project_plan.action_items.action_items.action_items]]
+name = "writing path planning" 
+description = """
+albin's path planning
+"""
+priority = 1
+deadline = ["iros paper submission deadline", 2025-03-01]
+is_done = false
+dependencies = []
+purpose = """part of paper"""
+hours_required = 12
+
+[[project_plan.action_items.action_items.action_items]]
+name = "writing controller" 
+description = """
+mpc
+"""
+priority = 1
+deadline = ["iros paper submission deadline", 2025-03-01]
+is_done = false
+dependencies = []
+purpose = """part of paper"""
+hours_required = 12
+
+
+[[project_plan.action_items.action_items]]
+name = "writing experiments, discussion" 
+description = """
+once the data is in, it has to be written about. this includes generating pretty figures as well. 
+
+ASSIGNED TO SEBA
+"""
+priority = 1
+deadline = ["iros paper submission deadline", 2025-03-01]
+is_done = false
+dependencies = ["performing experiments"]
+purpose = """part of paper"""
+hours_required = 18
+
+
+
+[[project_plan.action_items.action_items]]
+name = "writing conclusion, abstract; polishing" 
+description = """
+writing conclusion, abstract; polishing
+
+ASSIGNED TO SEBA
+"""
+priority = 1
+deadline = ["iros paper submission deadline", 2025-02-28]
+is_done = false
+dependencies = ["writing introdution, related work, methodology", 
+                "writing experiments, discussion"]
+purpose = """part of paper"""
+hours_required = 24
diff --git a/examples/navigation/mobile_base_navigation.py b/examples/navigation/mobile_base_navigation.py
index 3685bcb8249b41f8bc75747cc9f839328589c527..1dd1127efce60d9e2315e4ed27133cf5c9783d02 100644
--- a/examples/navigation/mobile_base_navigation.py
+++ b/examples/navigation/mobile_base_navigation.py
@@ -2,7 +2,7 @@ from smc import getRobotFromArgs
 from smc.robots.interfaces.whole_body_interface import SingleArmWholeBodyInterface
 from smc import getMinimalArgParser
 from smc.path_generation.maps.premade_maps import createSampleStaticMap
-from smc.path_generation.path_math.path2d_to_6d import path2D_to_SE3_fixed
+from smc.path_generation.path_math.path2d_to_6d import path2D_to_SE3
 from smc.control.optimal_control.util import get_OCP_args
 from smc.control.cartesian_space import getClikArgs
 from smc.path_generation.planner import starPlanner, getPlanningArgs
diff --git a/examples/path6d_from_path2d.py b/examples/path6d_from_path2d.py
index a2636e2f0dbb84c01ee982a9c7efeff03f339791..8efa02bc57dc3aa9b70960105369ed614f23c61a 100644
--- a/examples/path6d_from_path2d.py
+++ b/examples/path6d_from_path2d.py
@@ -11,8 +11,8 @@ import numpy as np
 from functools import partial
 from smc.robots.implementations.heron import heron_approximation
 from smc.visualization.visualizer import manipulatorVisualizer
-from smc.path_generation.path_math.path2d_to_6d import path2D_to_SE3_fixed
-from smc.path_generation.path_math.path_to_trajectory import path2D_timed
+from smc.path_generation.path_math.path2d_to_6d import path2D_to_SE3
+from smc.path_generation.path_math.path_to_trajectory import path2D_to_trajectory2D
 from smc.control.optimal_control.util import get_OCP_args
 
 
@@ -112,7 +112,7 @@ path2D_handlebar_1 = np.hstack(
 )
 
 # TODO: combine with base for maximum correctness
-pathSE3_handlebar = path2D_to_SE3_fixed(path2D_handlebar_1, args.handlebar_height)
+pathSE3_handlebar = path2D_to_SE3(path2D_handlebar_1, args.handlebar_height)
 for p in pathSE3_handlebar:
     print(p)
 
diff --git a/python/smc/control/cartesian_space/cartesian_space_trajectory_following.py b/python/smc/control/cartesian_space/cartesian_space_trajectory_following.py
index 6eaf3697bc17419cb3dd4f34edea8eb8e390a452..2283451ddf8594c4284c89fbddbc5693d6de138e 100644
--- a/python/smc/control/cartesian_space/cartesian_space_trajectory_following.py
+++ b/python/smc/control/cartesian_space/cartesian_space_trajectory_following.py
@@ -5,81 +5,82 @@ from functools import partial
 import pinocchio as pin
 import numpy as np
 
-from smc.path_generation.path_math.path_to_trajectory import path2D_to_timed_SE3
-
-
-def cartesianPathFollowingWithPlannerControlLoop(
-    args, robot: AbstractRobotManager, path_planner: ProcessManager, i, past_data
-):
-    """
-    cartesianPathFollowingWithPlanner
-    -----------------------------
-    end-effector(s) follow their path(s) according to what a 2D path-planner spits out
-    """
-    breakFlag = False
-    log_item = {}
-    save_past_dict = {}
-
-    q = robot.q
-    T_w_e = robot.T_w_e
-    p = T_w_e.translation[:2]
-    path_planner.sendFreshestCommand({"p": p})
-
-    # NOTE: it's pointless to recalculate the path every time
-    # if it's the same 2D path
-    data = path_planner.getData()
-    if data == None:
-        if args.debug_prints:
-            print("got no path so no doing anything")
-        robot.sendVelocityCommand(np.zeros(robot.model.nv))
-        log_item["qs"] = q.reshape((robot.model.nq,))
-        log_item["dqs"] = robot.getQd().reshape((robot.model.nv,))
-        return breakFlag, save_past_dict, log_item
-    if data == "done":
-        breakFlag = True
-
-    path_pol, path2D_untimed = data
-    path2D_untimed = np.array(path2D_untimed).reshape((-1, 2))
-    # should be precomputed somewhere but this is nowhere near the biggest problem
-    max_base_v = np.linalg.norm(robot.model.robot.model.velocityLimit[:2])
-
-    # 1) make 6D path out of [[x0,y0],...]
-    # that represents the center of the cart
-    pathSE3 = path2D_to_timed_SE3(args, path_pol, path2D_untimed, max_base_v)
-    # print(path2D_untimed)
-    # for pp in pathSE3:
-    #    print(pp.translation)
-    # TODO: EVIL AND HAS TO BE REMOVED FROM HERE
-    if args.visualizer:
-        robot.visualizer_manager.sendCommand({"path": pathSE3})
-
-    J = pin.computeFrameJacobian(robot.model, robot.data, q, robot.ee_frame_id)
-    # NOTE: obviously not path following but i want to see things working here
-    SEerror = T_w_e.actInv(pathSE3[-1])
-    err_vector = pin.log6(SEerror).vector
-    lb = -1 * robot.model.robot.model.velocityLimit
-    lb[1] = -0.001
-    ub = robot.model.robot.model.velocityLimit
-    ub[1] = 0.001
-    # vel_cmd = invKinmQP(J, err_vector, lb=lb, ub=ub)
-    vel_cmd = dampedPseudoinverse(0.002, J, err_vector)
-    robot.sendVelocityCommand(vel_cmd)
-
-    log_item["qs"] = q.reshape((robot.model.nq,))
-    log_item["dqs"] = robot.getQd().reshape((robot.model.nv,))
-    # time.sleep(0.01)
-    return breakFlag, save_past_dict, log_item
-
-
-def cartesianPathFollowingWithPlanner(
-    args, robot: AbstractRobotManager, path_planner: ProcessManager
-):
-    controlLoop = partial(
-        cartesianPathFollowingWithPlannerControlLoop, args, robot, path_planner
-    )
-    log_item = {"qs": np.zeros(robot.model.nq), "dqs": np.zeros(robot.model.nv)}
-    save_past_dict = {}
-    loop_manager = ControlLoopManager(
-        robot, controlLoop, args, save_past_dict, log_item
-    )
-    loop_manager.run()
+# TODO: update this with all the changes
+# from smc.path_generation.path_math.path_to_trajectory import path2D_to_timed_SE3
+#
+#
+# def cartesianPathFollowingWithPlannerControlLoop(
+#    args, robot: AbstractRobotManager, path_planner: ProcessManager, i, past_data
+# ):
+#    """
+#    cartesianPathFollowingWithPlanner
+#    -----------------------------
+#    end-effector(s) follow their path(s) according to what a 2D path-planner spits out
+#    """
+#    breakFlag = False
+#    log_item = {}
+#    save_past_dict = {}
+#
+#    q = robot.q
+#    T_w_e = robot.T_w_e
+#    p = T_w_e.translation[:2]
+#    path_planner.sendFreshestCommand({"p": p})
+#
+#    # NOTE: it's pointless to recalculate the path every time
+#    # if it's the same 2D path
+#    data = path_planner.getData()
+#    if data == None:
+#        if args.debug_prints:
+#            print("got no path so no doing anything")
+#        robot.sendVelocityCommand(np.zeros(robot.model.nv))
+#        log_item["qs"] = q.reshape((robot.model.nq,))
+#        log_item["dqs"] = robot.getQd().reshape((robot.model.nv,))
+#        return breakFlag, save_past_dict, log_item
+#    if data == "done":
+#        breakFlag = True
+#
+#    path_pol, path2D_untimed = data
+#    path2D_untimed = np.array(path2D_untimed).reshape((-1, 2))
+#    # should be precomputed somewhere but this is nowhere near the biggest problem
+#    max_base_v = np.linalg.norm(robot.model.robot.model.velocityLimit[:2])
+#
+#    # 1) make 6D path out of [[x0,y0],...]
+#    # that represents the center of the cart
+#    pathSE3 = path2D_to_timed_SE3(args, path_pol, path2D_untimed, max_base_v)
+#    # print(path2D_untimed)
+#    # for pp in pathSE3:
+#    #    print(pp.translation)
+#    # TODO: EVIL AND HAS TO BE REMOVED FROM HERE
+#    if args.visualizer:
+#        robot.visualizer_manager.sendCommand({"path": pathSE3})
+#
+#    J = pin.computeFrameJacobian(robot.model, robot.data, q, robot.ee_frame_id)
+#    # NOTE: obviously not path following but i want to see things working here
+#    SEerror = T_w_e.actInv(pathSE3[-1])
+#    err_vector = pin.log6(SEerror).vector
+#    lb = -1 * robot.model.robot.model.velocityLimit
+#    lb[1] = -0.001
+#    ub = robot.model.robot.model.velocityLimit
+#    ub[1] = 0.001
+#    # vel_cmd = invKinmQP(J, err_vector, lb=lb, ub=ub)
+#    vel_cmd = dampedPseudoinverse(0.002, J, err_vector)
+#    robot.sendVelocityCommand(vel_cmd)
+#
+#    log_item["qs"] = q.reshape((robot.model.nq,))
+#    log_item["dqs"] = robot.getQd().reshape((robot.model.nv,))
+#    # time.sleep(0.01)
+#    return breakFlag, save_past_dict, log_item
+#
+#
+# def cartesianPathFollowingWithPlanner(
+#    args, robot: AbstractRobotManager, path_planner: ProcessManager
+# ):
+#    controlLoop = partial(
+#        cartesianPathFollowingWithPlannerControlLoop, args, robot, path_planner
+#    )
+#    log_item = {"qs": np.zeros(robot.model.nq), "dqs": np.zeros(robot.model.nv)}
+#    save_past_dict = {}
+#    loop_manager = ControlLoopManager(
+#        robot, controlLoop, args, save_past_dict, log_item
+#    )
+#    loop_manager.run()
diff --git a/python/smc/control/controller_templates/path_following_template.py b/python/smc/control/controller_templates/path_following_template.py
index b854f5311f5c816c0bb7679501e3afe04664e3d3..746a159ae7f4a6fbfa517edbce4fc29b64f5b61a 100644
--- a/python/smc/control/controller_templates/path_following_template.py
+++ b/python/smc/control/controller_templates/path_following_template.py
@@ -24,7 +24,7 @@ def PathFollowingFromPlannerControlLoop(
             int,
             dict[str, deque[np.ndarray]],
         ],
-        tuple[np.ndarray, dict[str, np.ndarray]],
+        tuple[np.ndarray, dict[str, np.ndarray], dict[str, np.ndarray]],
     ],
     args: Namespace,
     robot: AbstractRobotManager,
@@ -58,16 +58,13 @@ def PathFollowingFromPlannerControlLoop(
     _, path2D_untimed = data
     path2D_untimed = np.array(path2D_untimed).reshape((-1, 2))
 
-    v_cmd, log_item_inner = control_loop(
+    v_cmd, log_item_inner, past_item_inner = control_loop(
         SOLVER, path2D_untimed, args, robot, t, past_data
     )
     robot.sendVelocityCommand(v_cmd)
     log_item.update(log_item_inner)
+    save_past_item.update(past_item_inner)
 
     log_item["qs"] = robot.q
     log_item["dqs"] = robot.v
-    # NOTE: shouldn't be here, but this temporarily makes my life easier
-    # sorry future guy looking at this
-    # TODO: enable if you want to see base and ee path following ocp in action
-    save_past_item["path2D_untimed"] = p
     return breakFlag, save_past_item, log_item
diff --git a/python/smc/control/optimal_control/croco_mpc_path_following.py b/python/smc/control/optimal_control/croco_mpc_path_following.py
index 5afb4b5021852fe7d0dd599aaf2225a0f4f52a0a..dfab194e89fc2a3fb31284e244f22baa23621f5d 100644
--- a/python/smc/control/optimal_control/croco_mpc_path_following.py
+++ b/python/smc/control/optimal_control/croco_mpc_path_following.py
@@ -12,12 +12,12 @@ from smc.control.optimal_control.path_following_croco_ocp import (
     BaseAndDualArmEEPathFollowingOCP,
 )
 from smc.path_generation.path_math.path2d_to_6d import (
-    path2D_to_SE3_fixed,
+    path2D_to_SE3,
 )
 from smc.path_generation.path_math.cart_pulling_path_math import (
     construct_EE_path,
 )
-from smc.path_generation.path_math.path_to_trajectory import path2D_timed
+from smc.path_generation.path_math.path_to_trajectory import path2D_to_trajectory2D
 from smc.control.controller_templates.path_following_template import (
     PathFollowingFromPlannerControlLoop,
 )
@@ -41,7 +41,7 @@ def CrocoBasePathFollowingFromPlannerMPCControlLoop(
 
     p = robot.T_w_b.translation[:2]
     max_base_v = np.linalg.norm(robot._max_v[:2])
-    path_base = path2D_timed(args, path2D_untimed, max_base_v)
+    path_base = path2D_to_trajectory2D(args, path2D_untimed, max_base_v)
     path_base = np.hstack((path_base, np.zeros((len(path_base), 1))))
 
     if args.visualizer:
@@ -58,6 +58,7 @@ def CrocoBasePathFollowingFromPlannerMPCControlLoop(
     log_item["err_norm_base"] = np.linalg.norm(err_vector_base).reshape((1,))
     return v_cmd, log_item
 
+
 def CrocoBasePathFollowingMPC(
     args: Namespace,
     robot: MobileBaseInterface,
@@ -89,7 +90,7 @@ def CrocoBasePathFollowingMPC(
     log_item = {
         "qs": np.zeros(robot.nq),
         "dqs": np.zeros(robot.nv),
-        "err_norm_base" : np.zeros((1,))
+        "err_norm_base": np.zeros((1,)),
     }
     save_past_item = {}
     loop_manager = ControlLoopManager(
@@ -151,7 +152,7 @@ def CrocoEEPathFollowingFromPlannerMPCControlLoop(
     robot: SingleArmInterface,
     t: int,
     _: dict[str, deque[np.ndarray]],
-) -> tuple[np.ndarray, dict[str, np.ndarray]]:
+) -> tuple[np.ndarray, dict[str, np.ndarray], dict[str, np.ndarray]]:
     """
     CrocoPathFollowingMPCControlLoop
     -----------------------------
@@ -162,12 +163,14 @@ def CrocoEEPathFollowingFromPlannerMPCControlLoop(
     by calling ProcessManager.getData()
     """
     max_base_v = np.linalg.norm(robot._max_v[:2])
-    path2D = path2D_timed(args, path2D_untimed, max_base_v)
+    perc_of_max_v = 0.5
+    velocity = perc_of_max_v * max_base_v
+    trajectory2D = path2D_to_trajectory2D(args, path2D_untimed, velocity)
 
     # create a 3D reference out of the path
-    path_EE_SE3 = path2D_to_SE3_fixed(path2D, args.handlebar_height)
-#    for pose in path_EE_SE3:
-#        print(pose.translation)
+    path_EE_SE3 = path2D_to_SE3(trajectory2D, args.handlebar_height)
+    #    for pose in path_EE_SE3:
+    #        print(pose.translation)
 
     # TODO: EVIL AND HAS TO BE REMOVED FROM HERE
     if args.visualizer:
@@ -182,7 +185,7 @@ def CrocoEEPathFollowingFromPlannerMPCControlLoop(
     err_vector_ee = log6(robot.T_w_e.actInv(path_EE_SE3[0]))
     log_item = {"err_vec_ee": err_vector_ee}
 
-    return v_cmd, log_item
+    return v_cmd, log_item, {}
 
 
 def CrocoEEPathFollowingMPC(
@@ -286,12 +289,12 @@ def BaseAndEEPathFollowingFromPlannerMPCControlLoop(
     robot: SingleArmWholeBodyInterface,
     t: int,
     past_data: dict[str, deque[np.ndarray]],
-) -> tuple[np.ndarray, dict[str, np.ndarray]]:
+) -> tuple[np.ndarray, dict[str, np.ndarray], dict[str, np.ndarray]]:
     p = robot.q[:2]
 
     # NOTE: this one is obtained as the future path from path planner
     max_base_v = np.linalg.norm(robot._max_v[:2])
-    path_base = path2D_timed(args, path2D_untimed_base, max_base_v)
+    path_base = path2D_to_trajectory2D(args, path2D_untimed_base, max_base_v)
     path_base = np.hstack((path_base, np.zeros((len(path_base), 1))))
 
     pathSE3_handlebar = construct_EE_path(args, p, past_data["path2D_untimed"])
@@ -307,7 +310,7 @@ def BaseAndEEPathFollowingFromPlannerMPCControlLoop(
     # SETTING ROTATION IS BROKEN ATM
     # BUT SETTING DISTANCES (TRANSLATIONS) IS TOO.
     # WE DEAL WITH DISTANCES FIRST, UNTIL THAT'S DONE THIS STAYS HERE
-    #for ppp in pathSE3_handlebar:
+    # for ppp in pathSE3_handlebar:
     #    ppp.rotation = robot.T_w_e.rotation
     ###########################################
 
@@ -327,7 +330,8 @@ def BaseAndEEPathFollowingFromPlannerMPCControlLoop(
     log_item["err_vec_ee"] = err_vector_ee
     log_item["err_norm_ee"] = np.linalg.norm(err_vector_ee).reshape((1,))
     log_item["err_norm_base"] = np.linalg.norm(err_vector_base).reshape((1,))
-    return v_cmd, log_item
+    save_past_item = {"path2D_untimed": p}
+    return v_cmd, log_item, save_past_item
 
 
 def initializePastData(
@@ -460,7 +464,7 @@ def BaseAndDualArmEEPathFollowingMPCControlLoop(
     # ideally should be precomputed somewhere
     max_base_v = np.linalg.norm(robot._max_v[:2])
     # base just needs timing on the path
-    path_base = path2D_timed(args, path2D_untimed_base, max_base_v)
+    path_base = path2D_to_trajectory2D(args, path2D_untimed_base, max_base_v)
     # and it's of height 0 (i.e. the height of base's planar joint)
     path_base = np.hstack((path_base, np.zeros((len(path_base), 1))))
 
@@ -512,7 +516,7 @@ def BaseAndDualArmEEPathFollowingMPCControlLoop(
         )
     )
 
-    pathSE3_handlebar = path2D_to_SE3_fixed(path2D_handlebar_1, args.handlebar_height)
+    pathSE3_handlebar = path2D_to_SE3(path2D_handlebar_1, args.handlebar_height)
     pathSE3_handlebar_left = []
     pathSE3_handlebar_right = []
     for pathSE3 in pathSE3_handlebar:
diff --git a/python/smc/path_generation/path_math/cart_pulling_path_math.py b/python/smc/path_generation/path_math/cart_pulling_path_math.py
index c3c1f12443b98898435f96608a19554c81f7a4e2..1a1e54da2a446bc1abb2ac77fe8b294304087859 100644
--- a/python/smc/path_generation/path_math/cart_pulling_path_math.py
+++ b/python/smc/path_generation/path_math/cart_pulling_path_math.py
@@ -1,6 +1,6 @@
-from smc.path_generation.path_math.path_to_trajectory import path2D_timed
+from smc.path_generation.path_math.path_to_trajectory import path2D_to_trajectory2D
 from smc.path_generation.path_math.path2d_to_6d import (
-    path2D_to_SE3_fixed,
+    path2D_to_SE3,
 )
 
 import numpy as np
@@ -123,5 +123,5 @@ def construct_EE_path(
     )
 
     # step (4)
-    ee_SE3trajectory = path2D_to_SE3_fixed(ee_2Dtrajectory, args.handlebar_height)
+    ee_SE3trajectory = path2D_to_SE3(ee_2Dtrajectory, args.handlebar_height)
     return ee_SE3trajectory
diff --git a/python/smc/path_generation/path_math/path2d_to_6d.py b/python/smc/path_generation/path_math/path2d_to_6d.py
index 6b19bb23560b60978170785ec86c55901b50d350..752429325a86aa80631ac172e0d25daf550b2f6b 100644
--- a/python/smc/path_generation/path_math/path2d_to_6d.py
+++ b/python/smc/path_generation/path_math/path2d_to_6d.py
@@ -2,7 +2,7 @@ import pinocchio as pin
 import numpy as np
 
 
-def path2D_to_SE3_fixed(path2D: np.ndarray, path_height: float) -> list[pin.SE3]:
+def path2D_to_SE3(path2D: np.ndarray, path_height: float) -> list[pin.SE3]:
     """
     path2D_SE3
     ----------
@@ -50,70 +50,6 @@ def path2D_to_SE3_fixed(path2D: np.ndarray, path_height: float) -> list[pin.SE3]
     return pathSE3
 
 
-# NOTE: this is a complete hack, but it does not need to be
-def path2D_to_SE3_with_transition_from_current_pose(
-    path2D: np.ndarray, path_height: float, T_w_current: pin.SE3
-) -> list[pin.SE3]:
-    """
-    path2D_SE3
-    ----------
-    we have a 2D path of (N,2) shape as reference
-    the OCP accepts SE3 (it could just rotations or translation too),
-    so this function constructs it from the 2D path.
-    """
-    #########################
-    #  path2D into pathSE2  #
-    #########################
-    # construct theta
-    # since it's a pairwise operation it can't be done on the last point
-    x_i = path2D[:, 0][:-1]  # no last element
-    y_i = path2D[:, 1][:-1]  # no last element
-    x_i_plus_1 = path2D[:, 0][1:]  # no first element
-    y_i_plus_1 = path2D[:, 1][1:]  # no first element
-    x_diff = x_i_plus_1 - x_i
-    y_diff = y_i_plus_1 - y_i
-    # elementwise arctan2
-    thetas = np.arctan2(x_diff, y_diff)
-
-    # there is a height difference, we can't teleport in height
-    height_current = T_w_current.translation[2]
-    height_difference = path_height - height_current
-    height_difference_step = height_difference / len(path2D)
-
-    ######################################
-    #  construct SE3 from SE2 reference  #
-    ######################################
-    # the plan is parallel to the ground because it's a mobile
-    # manipulation task
-    pathSE3 = []
-    for i in range(len(path2D) - 1):
-        # first set the x axis to be in the theta direction
-        # TODO: make sure this one makes sense
-        # should be: z pointing down (our arbitrary choice)
-        # x follows the path tangent
-        # y just completes the frame (in this case orthogonal to path tangent parallel to the floor).
-        # this is good for cart pulling, but could be anything else for anything else:
-        # you can slap any orientation you want on a 2D path.
-
-        # NOTE: for mpc to follow this reference, the path needs to be followable.
-        # this means we need to interpolate between the current thing and the reference
-        rotation = np.array(
-            [
-                [-np.cos(thetas[i]), np.sin(thetas[i]), 0.0],
-                [-np.sin(thetas[i]), -np.cos(thetas[i]), 0.0],
-                [0.0, 0.0, -1.0],
-            ]
-        )
-        # rotation = pin.rpy.rpyToMatrix(0.0, 0.0, thetas[i])
-        # rotation = pin.rpy.rpyToMatrix(np.pi/2, np.pi/2,0.0) @ rotation
-        translation = np.array(
-            [path2D[i][0], path2D[i][1], height_current + i * height_difference_step]
-        )
-        pathSE3.append(pin.SE3(rotation, translation))
-    pathSE3.append(pin.SE3(rotation, translation))
-    return pathSE3
-
-
 # stupid function for stupid data re-assembly
 # def path2D_to_SE2(path2D : list):
 #    path2D = np.array(path2D)
diff --git a/python/smc/path_generation/path_math/path_to_trajectory.py b/python/smc/path_generation/path_math/path_to_trajectory.py
index 7c0686e8e3a73a617da03685a79c75eab66d8547..d92d3065c9d37a87c47a3081cefe3925450f3803 100644
--- a/python/smc/path_generation/path_math/path_to_trajectory.py
+++ b/python/smc/path_generation/path_math/path_to_trajectory.py
@@ -1,98 +1,43 @@
 import numpy as np
+from argparse import Namespace
 
 
-def path2D_timed(args, path2D_untimed, max_base_v):
-    """
-    path2D_timed
-    ---------------------
-    we have a 2D path of (N,2) shape as reference.
-    it times it as this is what the current ocp formulation needs:
-        there should be a timing associated with the path,
-        because defining the cost function as the fit of the rollout to the path
-        is complicated to do software-wise.
-        as it is now, we're pre-selecting points of the path, and associating those
-        with rollout states at a set time step between the states in the rollout.
-        this isn't a problem if we have an idea of how fast the robot can go,
-        which gives us a heuristic of how to select the points (if you went at maximum
-        speed, this is how far you'd go in this time, so this is your point).
-        thankfully this does not need to be exact because we just care about the distance
-        between the current point and the point on the path, so if it's further out
-        that just means the error is larger, and that doesn't necessarily correspond
-        to a different action.
-    NOTE: we are constructing a possibly bullshit
-    trajectory here, it's a man-made heuristic,
-    and we should leave that to the MPC,
-    but that requires too much coding and there is no time rn.
-    the idea is to use compute the tangent of the path,
-    and use that to make a 2D frenet frame.
-    this is the put to some height, making it SE3.
-    i.e. roll and pitch are fixed to be 0,
-    but you could make them some other constant
-    """
-
-    ####################################################
-    #  getting a timed 2D trajectory from a heuristic  #
-    ####################################################
-    # the strategy is somewhat reasonable at least:
-    # assume we're moving at 90% max velocity in the base,
-    # and use that.
-    perc_of_max_v = 0.5
-    base_v = perc_of_max_v * max_base_v
-
-    # 1) the length of the path divided by 0.9 * max_vel
-    #    gives us the total time of the trajectory,
-    #    so we first compute that
-    # easiest possible way to get approximate path length
-    # (yes it should be from the polynomial approximation but that takes time to write)
-    x_i = path2D_untimed[:, 0][:-1]  # no last element
-    y_i = path2D_untimed[:, 1][:-1]  # no last element
-    x_i_plus_1 = path2D_untimed[:, 0][1:]  # no first element
-    y_i_plus_1 = path2D_untimed[:, 1][1:]  # no first element
+def computePath2DLength(path2D):
+    x_i = path2D[:, 0][:-1]  # no last element
+    y_i = path2D[:, 1][:-1]  # no last element
+    x_i_plus_1 = path2D[:, 0][1:]  # no first element
+    y_i_plus_1 = path2D[:, 1][1:]  # no first element
     x_diff = x_i_plus_1 - x_i
     x_diff = x_diff.reshape((-1, 1))
     y_diff = y_i_plus_1 - y_i
     y_diff = y_diff.reshape((-1, 1))
     path_length = np.sum(np.linalg.norm(np.hstack((x_diff, y_diff)), axis=1))
+    return path_length
+
+
+def path2D_to_trajectory2D(
+    args: Namespace, path2D: np.ndarray, velocity: float
+) -> np.ndarray:
+    """
+    path2D_to_trajectory2D
+    ---------------------
+    read the technical report for details
+    """
+    path_length = computePath2DLength(path2D)
     # NOTE: sometimes the path planner gives me the same god damn points
     # and that's it. can't do much about, expect set those point then.
     # and i need to return now so that the math doesn't break down the line
     if path_length < 1e-3:
-        return np.ones((args.n_knots + 1, 2)) * path2D_untimed[0]
-    total_time = path_length / base_v
-    # 2) we find the correspondence between s and time
-    # TODO: read from where it should be, but seba checked that it's 5 for some reason
-    # TODO THIS IS N IN PATH PLANNING, MAKE THIS A SHARED ARGUMENT
-    max_s = 5
-    t_to_s = max_s / total_time
-    TODO TODO trajectory for base and the end-effector are timed differently!!! this kill the MPC, and whatever else !!!
-    NOTE that the either this or the ee timing is set up wrong, but in any event they need to be the same
-    # 3) construct the ocp-timed 2D path
-    # TODO MAKE REFERENCE_STEP_SIZE A SHARED ARGUMENT
-    # TODO: we should know max s a priori
-    reference_step_size = 0.5
-    s_vec = np.arange(0, len(path2D_untimed)) / reference_step_size
-
-    path2D = []
-    # time is i * args.ocp_dt
-    for i in range(args.n_knots + 1):
-        # what it should be but gets stuck if we're not yet on path
-        s = (i * args.ocp_dt) * t_to_s
-        # full path
-        # NOTE: this should be wrong, and ocp_dt correct,
-        # but it works better for some reason xd
-        # s = i * (max_s / args.n_knots)
-        path2D.append(
-            np.array(
-                [
-                    np.interp(s, s_vec, path2D_untimed[:, 0]),
-                    np.interp(s, s_vec, path2D_untimed[:, 1]),
-                ]
-            )
-        )
-    path2D = np.array(path2D)
-    return path2D
-
+        return np.ones((args.n_knots + 1, 2)) * path2D[0]
+    total_time = path_length / velocity
+    # NOTE: assuming the path is uniformly sampled
+    t_path = np.linspace(0.0, total_time, len(path2D))
+    t_ocp = np.linspace(0.0, args.ocp_dt * (args.n_knots + 1), args.n_knots + 1)
 
-# TODO: todo
-def path2D_to_timed_SE3():
-    pass
+    trajectory2D = np.array(
+        [
+            np.interp(t_ocp, t_path, path2D[:, 0]),
+            np.interp(t_ocp, t_path, path2D[:, 1]),
+        ]
+    ).T
+    return trajectory2D