diff --git a/python/ur_simple_control/basics/__pycache__/basics.cpython-312.pyc b/python/ur_simple_control/basics/__pycache__/basics.cpython-312.pyc
index 4c97f924d4a9e9eaa656a4c4e56ccfcec555404b..e00e5fe4b51234988fa9300a62849b129253b847 100644
Binary files a/python/ur_simple_control/basics/__pycache__/basics.cpython-312.pyc and b/python/ur_simple_control/basics/__pycache__/basics.cpython-312.pyc differ
diff --git a/python/ur_simple_control/basics/basics.py b/python/ur_simple_control/basics/basics.py
index 5323bfb2551b0a63866758a3f4bd50a8c36c8967..444ea98d9c02d295ecbeafa3279dd2f119ea304b 100644
--- a/python/ur_simple_control/basics/basics.py
+++ b/python/ur_simple_control/basics/basics.py
@@ -63,12 +63,55 @@ def moveJP(args, robot, q_desired):
         print("MoveJP done: convergence achieved, reached destionation!")
 
 
-def jointTrajFollowingPIDControlLoop():
-    pass
+def jointTrajFollowingPDControlLoop(robot: RobotManager, reference, i, past_data):
+    breakFlag = False
+    save_past_dict = {}
+    log_item = {}
+    q = robot.getQ()
+    v = robot.getQd()
+    # NOTE: assuming we haven't missed a timestep,
+    # which is pretty much true
+    t = i * robot.dt
+    # take the future (next) one
+    t_index = int(np.ceil(t / reference['dt']))
+    
+    # TODO: check if that's really the last
+    if t_index == len(reference['qs']) - 1:
+        breakFlag = True
+
+    error_q = reference['qs'][t_index] - q
+    error_v = reference['vs'][t_index] - v
+    Kp = 1.0
+    Kd = 0.1
+
+    #          feedforward                      feedback 
+    v_cmd = reference['vs'][t_index] + Kp * error_q + Kd * error_v
+    qd_cmd = v_cmd[:6]
+    robot.sendQd(v_cmd)
+
+    log_item['error_q'] = error_q
+    log_item['error_v'] = error_v
+    log_item['reference_q'] = reference['qs'][t_index]
+    log_item['reference_v'] = reference['vs'][t_index]
 
+    return breakFlag, {}, log_item
 
-def jointTrajFollowingPID():
-    pass
+def jointTrajFollowingPD(args, robot, reference):
+    # we're not using any past data or logging, hence the empty arguments
+    controlLoop = partial(jointTrajFollowingPDControlLoop, robot, reference)
+    log_item = {
+        'error_q' : np.zeros(robot.model.nq),
+        'error_v' : np.zeros(robot.model.nv),
+        'reference_q' : np.zeros(robot.model.nq),
+        'reference_v' : np.zeros(robot.model.nv)
+    }
+    loop_manager = ControlLoopManager(robot, controlLoop, args, {}, log_item)
+    loop_manager.run()
+    # TODO: remove, this isn't doing anything
+    #time.sleep(0.01)
+    if args.debug_prints:
+        print("MoveJP done: convergence achieved, reached destionation!")
+    
 
 
 def moveJPIControlLoop(q_desired, robot : RobotManager, i, past_data):
diff --git a/python/ur_simple_control/optimal_control/optimal_control.py b/python/ur_simple_control/optimal_control/optimal_control.py
index 1f5cce5285a495c08a46d5c7ceb649b021c4fb15..c8daeb8d424934a5f3602a2d0557460245301e49 100644
--- a/python/ur_simple_control/optimal_control/optimal_control.py
+++ b/python/ur_simple_control/optimal_control/optimal_control.py
@@ -1,15 +1,34 @@
 import numpy as np
 import pinocchio as pin
 import crocoddyl
-from ur_simple_control.managers import ControlLoopManager, RobotManager
+from ur_simple_control.managers import getMinimalArgParser, ControlLoopManager, RobotManager
+import argparse
+from ur_simple_control.basics.basics import jointTrajFollowingPD
 
+
+def get_OCP_args(parser : argparse.ArgumentParser):
+    parser.add_argument("--ocp-dt", type=float, default=1e-2, \
+                        help="time length between state-control-pair decision variables")
+    parser.add_argument("--n-knots", type=int, default=100, \
+                        help="number of state-control-pair decision variables")
+    return parser
+                        
+
+# TODO: use fddp and incorporate torque (i.e. velocity constraints)
+#   --> those will correspond to max_qd and acceleration attributes in robotmanager 
 def IKOCP(args, robot : RobotManager, goal : pin.SE3):
+    # starting state
+    x0 = np.concatenate([robot.getQ(), robot.getQd()])
     state = crocoddyl.StateMultibody(robot.model)
     # command input IS torque 
     actuation = crocoddyl.ActuationModelFull(state)
     # we will be summing 3 different costs
     runningCostModel = crocoddyl.CostModelSum(state)
     terminalCostModel = crocoddyl.CostModelSum(state)
+    uResidual = crocoddyl.ResidualModelControl(state, state.nv)
+    xResidual = crocoddyl.ResidualModelState(state, x0, state.nv)
+    xRegCost = crocoddyl.CostModelResidual(state, xResidual)
+    uRegCost = crocoddyl.CostModelResidual(state, uResidual)
     # cost 1) distance to goal -> SE(3) error
     framePlacementResidual = crocoddyl.ResidualModelFramePlacement(
             state,
@@ -18,15 +37,17 @@ def IKOCP(args, robot : RobotManager, goal : pin.SE3):
             robot.model.getFrameId("tool0"),
             goal.copy(),
             state.nv)
+    goalTrackingCost = crocoddyl.CostModelResidual(state, framePlacementResidual)
     # cost 2) u residual (actuator cost)
-    uResidual = crocoddyl.ResidualModelControl(state, nu)
+    uResidual = crocoddyl.ResidualModelControl(state, state.nv)
     # cost 3) x residual (overall amount of movement)
-    xResidual = crocoddyl.ResidualModelState(state, x0, nu)
-    # now we put in the costs to our functions
-    # these seem fine 
+    xResidual = crocoddyl.ResidualModelState(state, x0, state.nv)
+    # put these costs into the running costs
     runningCostModel.addCost("gripperPose", goalTrackingCost, 1)
     runningCostModel.addCost("xReg", xRegCost, 1e-1)
     runningCostModel.addCost("uReg", uRegCost, 1e-1)
+    # and add the terminal cost, which is the distance to the goal
+    # NOTE: shouldn't there be a final velocity = 0 here?
     terminalCostModel.addCost("gripperPose", goalTrackingCost, 1e3)
 
     # Next, we need to create an action model for running and terminal knots. The
@@ -44,3 +65,29 @@ def IKOCP(args, robot : RobotManager, goal : pin.SE3):
         ),
         0.0,
     )
+
+    # now we define the problem
+    problem = crocoddyl.ShootingProblem(x0, [runningModel] * args.n_knots, terminalModel)
+    # and the solver
+    solver = crocoddyl.SolverFDDP(problem)
+    # run solve
+    solver.solve()
+    # get reference (state trajectory)
+    # we aren't using controls because we only have velocity inputs
+    xs = np.array(solver.xs)
+    qs = xs[:, :robot.model.nq]
+    vs = xs[:, robot.model.nq:]
+    reference = {'qs' : qs, 'vs' : vs, 'dt' : args.ocp_dt}
+    return reference
+
+if __name__ == "__main__":
+    parser = getMinimalArgParser()
+    parser = get_OCP_args(parser)
+    args = parser.parse_args()
+    robot = RobotManager(args)
+    goal = pin.SE3.Random()
+    goal.translation = np.random.random(3) * 0.4
+    print("set goal:", goal)
+    reference = IKOCP(args, robot, goal)
+    jointTrajFollowingPD(args, robot, reference)
+    print("achieved result", robot.getT_w_e())