diff --git a/Dockerfile b/Dockerfile
index 863126d837adfa2d34b271b082b8304980baa71d..d86042d07d4390221317d6099df115e6b20a01fa 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -42,7 +42,8 @@ WORKDIR /home/student/
 RUN passwd -d student
 USER student
 # copy repo to workdir
-COPY --chown=student . .
+RUN mkdir SimpleManipulatorControl
+COPY --chown=student . ./SimpleManipulatorControl
 RUN mkdir -p .cache/zsh/
 COPY --chown=student /dot_files_for_docker/.vimrc /home/student/
 COPY --chown=student /dot_files_for_docker/.zshrc /home/student/
@@ -50,9 +51,29 @@ COPY --chown=student /dot_files_for_docker/global_extra_conf.py /home/student/
 
 RUN vam install python-jedi && vam install youcompleteme
 
+# sh does not have sourcing 
+# and some packages (conda) want shell environment variables 
+# (which i can say a lot about, but can't do anything about)
+# ((the only reason to even use conda is to not have to compile pinocchio))
+SHELL ["/bin/bash", "--login", "-c"]
+#SHELL ["/bin/bash"]
+
 # this is enough to run clik
 WORKDIR /home/student/
-RUN pip install -e ./python/
-RUN pip install pin matplotlib meshcat ur_rtde \
-                qpsolvers ecos
-
+USER student
+RUN pip install -e ./SimpleManipulatorControl/python/
+# TODO: install casadi and pinochio 3.0+
+# TODO: verify this stuff below works
+# --> this can be done with conda
+RUN mkdir -p ~/miniconda3
+RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O /home/student/miniconda3/miniconda.sh
+RUN bash /home/student/miniconda3/miniconda.sh -b -u -p ~/miniconda3
+RUN rm /home/student/miniconda3/miniconda.sh
+ENV PATH=/home/student/miniconda3/bin:$PATH
+RUN source /home/student/miniconda3/bin/activate
+RUN conda config --add channels conda-forge 
+#RUN conda install --solver=classic conda-forge::conda-libmamba-solver conda-forge::libmamba conda-forge::libmambapy conda-forge::libarchive
+RUN conda install -y casadi 
+RUN conda install -y pinocchio -c conda-forge
+RUN pip install matplotlib meshcat ur_rtde \
+                qpsolvers ecos casadi example_robot_data
diff --git a/README.md b/README.md
index 3773ad268d5b396f0f7a478abeedb83d79894937..ce4a929c3ee78efd00d40bd9cdfbaed1dcd690b1 100644
--- a/README.md
+++ b/README.md
@@ -106,3 +106,4 @@ In the examples directory.
 - write all code in c++, make python bindings with pybind. have common documentation and examples
 - do the documentation with doxygen so that you get webpages from markdown as well
 - create a DMP from multiple trajectories (actually learn a DMP from multiple examples)
+
diff --git a/TODOS_2024_09_19 b/TODOS_2024_09_19
index 669d1df520a9f59104bee11c06487f128aee9d00..6adcdc6b3f42573b9b145752daa77d236fa652a6 100644
--- a/TODOS_2024_09_19
+++ b/TODOS_2024_09_19
@@ -1,55 +1,94 @@
-goal 1: usability, verifiability
+goal 1: starting points for student projects
+---------------------------------------------
+1. PUSHING GROUP: some multiprocessing to get camera outputs 
+   available in the control loop
+   (process puts its result on some queue managed by robot_manager,
+    and this is popped in the control loop or something. 
+    think how to do this and then make a skeleton for how to use this).
+2. JENGA GROUP: create a starting point and example with pinocchio.casadi
+   optimal control (copy something from tutorial, and use the PD controller
+   to track the reference)
+2.1. FIGURE OUT ACTUAL UR5 TORQUE LIMITS TO HAVE FOLLOWABLE TRAJECTORIES FROM OCP
+
+goal 2: challenge
+--------------------------------
+mpc for path following
+------------------------
+1. just solve the ocp to move to a point
+	1.1. start ROS1 in docker, make it print out heron's urdf into a file, 
+         pass that file to pinocchio to construct the robot
+         (does not have to be from the same program, idgaf, just get it to work asap)
+	1.2. formulate the OCP to just go to a point
+DEADLINE: 2024-11-04
+	1.3. prepare wrappers for HAL which will make it runnable on the real thing
+2. include the cart in the model, formulate OCP with that to move to a point
+3. formulate the path following ocp with robot-cart model, put it in a while loop --> that's your mpc.
+   do this on a testing fixed path 
+4. integrate 3. with actual received path from node running albin's planner 
+	DEADLINE: 2024-11-08
+5. run that on real heron, tune until it's not broken and ONLY up to that point.
+6. make a function that implements the get-cart-to-station behaviour tree.
+   this is a control loop that select what control loop to run based on some 
+   logic flags (smaller number = higher priority). simple select loop of the highest priority.
+   for every flag there is a process (node + topic) which determines whether to set it or unset it.
+	- PRIORITY 1: if obstacle flag, then run stopLoop (which is just make all velocities 0). flag on 
+                  only if there are people around.
+	- PRIORITY 2: if handle_bar flag, run loop to grasp the handle bar. if handle bar is grasped
+                  (vishnu's vision sets this flag) then the flag is unset. if not grasped (gripper
+                   status (maybe) + vision decides) then set the flag.
+	- PRIORITY 3: pull cart along prescribed path - runs MPC path-following loop. flag on until goal is reached
+	- PRIORITY 4: dock cart. flag on until cart is docked. i guess vision should check that but idk honestly.
+                  if this flag is set down, the program is down - shut down.
+	run this on the real thing
+	DEADLINE: 2024-11-15
+7. do all this but on mobile yumi in sim 
+	DEADLINE: 2024-11-23
+8. tune yumi solution
+	DEADLINE: 2024-11-30
+
+goal 3: usability, verifiability
 ----------------------------------
-1. have default arguments, you're adding/adapting new essential ones often, and copy-pasting
-   them around examples is idiotic and has to end immediatelly -> MOSTLY DONE
-2. fix the logging (as in save logs in an automated way + a parameter to check whether you want a new one or name it or sth)
-3. make logs importable to manipulator visualizer or wherever
-   to compare real and simulated runs specifically.
-   currently we genuinely don't know what the difference is, 
-   and we have no way of judging how well references are tracked.
-   this is obviously essential - we're not eyeballing stuff, 
-   we're verifying.
-4. BONUS to 3.: think of some basic metrics to calculate along 
-   trajectories (print out some info on the performance of point-to-point and traj-following
-   runs, including comparing to same simulate runs etc. plots should be 
-   fine to start, but having running rms or something sounds like a good idea).
-   also make sure x-axis are labelled correctly (wall-clock time)
-5. write some tests just too see that:
+1. write some tests just too see that:
 	a) various parameter combinations work
     b) controllers converge in situations they should converge in
     c) most basic unit tests on functions
     d) preferably some of this is runnable on the real robot,
        or at least sim with --start-from-current pose flag
-
-goal 2: clean up the code
----------------------------
-1. write basic tests to see everything works (if they require eyeballs so be it)
-2. fix logging a bit (add a function which combines logs of different runs 
-                      of a 'session' or at least names all logs in a run)
-3. use python's logging instead of printing
-
-goal 3:
-----------
-enforce simulation first, or at least make it easy to simulate a thing 
-STARTING FROM THE CURRENT ROBOT POSITION while you're connected.
-basically "--simulate-from-current-pose" 
-to the make the "simulate every time before you run maxim" not annoying.
+	e) a speedrun of files in examples on the real robot,
+       again just to verify everything works 
+2. it would be nice to have a function to conveniently define points, namely,
+   some way to programatically (or on keyboard input) save the joint and end-effector
+   positions while manually guiding the robot in freedrive or with compliance control.
+   this obviously won't generate the best possibly trajectories according to literally any
+   metric other than convenience of use.
+3. implement a way to simulate interaction with the environment.
+   it will boil down to programatically putting in wrench readings.
+   it can be the just a step to put the wrench at 10N or whatever,
+   which is of course non-physical, but it will be good enough just to
+   verify that the code works. this is easier than putting the robot
+   in a simulator.
+4. use python's logging instead of printing
+5. put the robot in a simulator and use the simulator.
+6. think of some basic metrics to calculate along 
+   trajectories (print out some info on the performance of point-to-point and traj-following
+   runs, including comparing to same simulate runs etc. plots should be 
+   fine to start, but having running rms or something sounds like a good idea).
+   also make sure x-axis are labelled correctly (wall-clock time)
 
 goal 4: more controllers
 ------------------------
-1. finish adding all ik algorithms
-2. crocoddyl optimal control with obstacle avoidance (manipulator itself + table/floor is enough)
-3. [hard] adjusting the dmp to get back on the path despite external forces 
+1. object regrasping with crocoddyl
+2. finish adding all ik algorithms
+3. casadi or crocoddyl optimal control with obstacle avoidance (manipulator itself + table/floor is enough)
+4. [hard] adjusting the dmp to get back on the path despite external forces 
    (fix the problem of writing on a non-flat surface/whatever) --> publishable side-project
 
-goal 5:
--------
-include the workspace somehow
 
-finally, do what you promised and put this on another robot,
-thereby rendering this something publishable in joss
 
 goal 5: panda/yumi
 ----------------
+finally, do what you promised and put this on another robot,
+thereby rendering this something publishable in joss
+
 1. transfer the library to panda or yumi or both
 
diff --git a/dot_files_for_docker/.zshrc b/dot_files_for_docker/.zshrc
index 01c34ba5ea5c31eeaacf1662b3d453539c9fa25a..6ba1e66f79e984363bc76b687502b811b8c99ea2 100644
--- a/dot_files_for_docker/.zshrc
+++ b/dot_files_for_docker/.zshrc
@@ -114,6 +114,19 @@ export SUDO_EDITOR="$VISUAL"
 [ -f "$HOME/.config/shortcutrc" ] && source "$HOME/.config/shortcutrc"
 [ -f "$HOME/.config/aliasrc" ] && source "$HOME/.config/aliasrc"
 
+# >>> conda initialize >>>
+# !! Contents within this block are managed by 'conda init' !!
+__conda_setup="$('/home/student/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)"
+if [ $? -eq 0 ]; then
+    eval "$__conda_setup"
+else
+    if [ -f "/home/student/miniconda3/etc/profile.d/conda.sh" ]; then
+        . "/home/student/miniconda3/etc/profile.d/conda.sh"
+    else
+        export PATH="/home/student/anaconda3/bin:$PATH"
+    fi
+fi
+unset __conda_setup
 
 #export PYTHONPATH=/usr/local/lib/python3.12/site-packages:$PYTHONPATH # Adapt your desired python version here