diff --git a/arduino/loadcell/loadcell.ino b/arduino/loadcell/loadcell.ino index ec5d642b7fab59bc06205ed2ce047b2b4b464f19..befc9701aedbfd778d6f09a0a63389b9dedaac01 100644 --- a/arduino/loadcell/loadcell.ino +++ b/arduino/loadcell/loadcell.ino @@ -6,9 +6,11 @@ const int LOADCELL_DOUT_PIN = 2; const int LOADCELL_SCK_PIN = 3; -const float calibration_weight = 500.0; +const float calibration_weight = 4900.0; float zero_reading, load_reading; +const float calibration_factor = -99.52; + HX711 scale; @@ -23,57 +25,31 @@ void setup() { // default "128" (Channel A) is used here. scale.begin(LOADCELL_DOUT_PIN, LOADCELL_SCK_PIN); - //Serial.print("offset: "); - //Serial.print(scale.get_offset(), DEC); - //Serial.print("\t scale: "); - //Serial.println(scale.get_scale(), DEC); - - Serial.println("tare the scale. in 3 sec. Make sure it is unloaded"); - delay(3); - // determine the offset - setup: no load - scale.tare(); // sets the offset value within scale to current value of reading - zero_reading = scale.read_average(10); //average over 10 vales - //scale.set_scale(); - //scale.get_units(10); - - //Serial.print("offset: "); - //Serial.print(scale.get_offset(), DEC); - //Serial.print("\t scale: "); - //Serial.println(scale.get_scale(), DEC); + scale.tare(10); // sets the offset value within scale to current value of reading + zero_reading = scale. //average over 10 vales //delay(2); - Serial.println("mount the known weight to the load cell and press ENTER to proceed with calibration"); - int incomingByte = 0; + //Serial.println("mount the known weight to the load cell and press ENTER to proceed with calibration"); + //int incomingByte = 0; // Calibration of the load cell - while(Serial.available() == 0) { - } - - load_reading = scale.read_average(10); - Serial.print("zero and load reading: "); - Serial.print(zero_reading, DEC); - Serial.print(" "); - Serial.println(load_reading, DEC); - scale.set_scale((load_reading - zero_reading)/calibration_weight); // this value is obtained by calibrating the scale with known weights; see the README for details - incomingByte = Serial.read(); // clear the receive buffer by assigning value to var - - Serial.println("set scale...."); - Serial.print("offset: "); - Serial.print(scale.get_offset(), DEC); - Serial.print("\t scale: "); - Serial.println(scale.get_scale(), DEC); - - Serial.print("get_value: "); - Serial.print(scale.get_value()); - Serial.print("\t get_units: "); - Serial.println(scale.get_units()); + //while(Serial.available() == 0) { + //} + //load_reading = scale.read_average(10); + //scale.set_scale((load_reading - zero_reading)/calibration_weight); // this value is obtained by calibrating the scale with known weights; see the README for details + //incomingByte = Serial.read(); // clear the receive buffer by assigning value to var + + //Serial.print("the calibration factor is: "); + //Serial.println(load_reading - zero_reading/calibration_weight); + + scale.set_scale(calibration_factor); - Serial.println("load cell is setup. Start reading now..."); + //Serial.println("load cell is setup. Start reading now..."); delay(1); // delay that python code sees the correct starting byte } @@ -82,6 +58,6 @@ void loop() { Serial.println(scale.get_units()); //scale.power_down(); // put the ADC in sleep mode - //delay(5); + delay(1/80); //scale.power_up(); } diff --git a/c++/CMakeLists.txt b/c++/CMakeLists.txt index 60869f8afefee7e8529c311e3b3db423af4e4b8c..37cd050b08162f4bb4eb5495e7c62d60cdb7350e 100644 --- a/c++/CMakeLists.txt +++ b/c++/CMakeLists.txt @@ -14,9 +14,9 @@ find_package (broccoli 3.0.0 COMPONENTS eigen REQUIRED) find_package(RL COMPONENTS MDL REQUIRED) # dependencies for gpm function -add_library(gpm SHARED src/gpm.cpp) +add_library(my_funcs SHARED src/yumi.cpp) target_link_libraries ( - gpm + my_funcs ${RL_LIBRARIES} eat::broccoli Eigen3::Eigen @@ -27,12 +27,12 @@ add_subdirectory(deps/pybind11) # include pybind11 as a submodule pybind11_add_module(invKin src/py2gpmbind.cpp) target_link_libraries( invKin PUBLIC - gpm + my_funcs ${RL_LIBRARIES} eat::broccoli ) -add_executable(myMain src/main.cpp src/gpm.cpp) +add_executable(myMain src/main.cpp src/yumi.cpp) target_link_libraries( myMain eat::broccoli diff --git a/c++/include/gpm.hpp b/c++/include/gpm.hpp deleted file mode 100644 index cbe1e7a0fb1013a5677bac9632ba4c1a1550cede..0000000000000000000000000000000000000000 --- a/c++/include/gpm.hpp +++ /dev/null @@ -1,5 +0,0 @@ -#include <Eigen/Eigen> - - -std::pair<Eigen::Matrix<double, 7, 1>, Eigen::Matrix<double, 6, 1>> gpm(Eigen::Matrix<double, 6, 1> &desPose, Eigen::Matrix<double, 6, 1> &desVelocity, -Eigen::Matrix<double, 7, 1> &jointAngles, Eigen::Matrix<double, 7, 1> &jointVelocity, const bool left_arm); \ No newline at end of file diff --git a/c++/include/yumi.hpp b/c++/include/yumi.hpp new file mode 100644 index 0000000000000000000000000000000000000000..7a48c70fe01e307e68e81e612aa9458ebe1129cb --- /dev/null +++ b/c++/include/yumi.hpp @@ -0,0 +1,102 @@ +#include <iostream> + +#include <Eigen/Eigen> + +#include <broccoli/control/kinematics/ComfortPoseGradient.hpp> +#include <broccoli/core/math.hpp> + +#include <rl/math/Transform.h> +#include <rl/mdl/Kinematic.h> +#include <rl/mdl/Model.h> +#include <rl/mdl/UrdfFactory.h> + + + +class Yumi { + public: + + //functions + Yumi(std::string path); + + void set_jointValues(Eigen::Matrix<double, 7, 1> &jointAngles, Eigen::Matrix<double, 7, 1> &jointVelocity); + void set_desPoseVel(Eigen::Matrix<double, 6, 1> &desPose, Eigen::Matrix<double, 6, 1> &desVelocity); + void set_driftCompGain(double gain); + void set_sampleTime(double sampleTime); + void set_force(double force); + void set_kp(double kp); + void set_operationPoint(double op); + void set_hybridControl(bool hybridControl); + void set_transitionTime(double transitionTime); + + void process(); + + Eigen::Matrix<double, 7, 1> get_newJointValues(); + Eigen::Matrix<double, 6, 1> get_pose(); + double get_manipulabilityMeasure(); + + // for debugging + void print_pose(); + + private: + // vars for rl library + std::shared_ptr<rl::mdl::Model> m_model; + + // flag for hybrid control + bool m_hybridControl = false; + + // vars for tuning + double m_driftCompGain = 1.0; + double m_sampleTime = 0.0125; + double m_activationFactorTaskSpace = 1.0; + + // var saving force + double m_force = 0; + double m_forceOP = 0; // operation around operating point of 2 newton + + // gain for control + double m_kp = 1.0; + + // time for transition between position control to force control in the wires axis + double m_transitionTime = 0.0; + double m_deltaTime = 0.0; + + // manipulability meassure + double m_manipulabilty; + + // vars to store configuration + rl::math::Vector3 m_desPosition = rl::math::Vector3::Zero(); + rl::math::Vector3 m_desPositionDot = rl::math::Vector3::Zero(); + rl::math::Vector3 m_desOrientation = rl::math::Vector3::Zero(); + rl::math::Vector3 m_desOrientationDot = rl::math::Vector3::Zero(); + Eigen::Matrix<double, 3, 3> m_rotationMatrix = Eigen::Matrix<double, 3, 3>::Zero(); + + Eigen::Matrix<double, 7, 1> m_jointAngles = Eigen::Matrix<double, 7, 1>::Zero(); + Eigen::Matrix<double, 7, 1> m_jointVelocity = Eigen::Matrix<double, 7, 1>::Zero(); + + rl::math::Vector3 m_position = rl::math::Vector3::Zero(); + rl::math::Vector3 m_orientation = rl::math::Vector3::Zero(); //Euler ZYX convention + + Eigen::Matrix<double, 6, 7> m_jacobian = Eigen::Matrix<double, 6, 7>::Zero(); + + Eigen::Matrix<double, 6, 1> m_effectiveTaskSpaceInput = Eigen::Matrix<double, 6, 1>::Zero(); + Eigen::Matrix<double, 3, 1> m_forceTaskSpaceInput = Eigen::Matrix<double, 3, 1>::Zero(); // only apply translational changes + + Eigen::Matrix<double, 7, 7> m_inverseWeighing = Eigen::Matrix<double, 7, 7>::Identity(); + Eigen::Matrix<double, 7, 1> m_nullSpaceGradient = Eigen::Matrix<double, 7, 1>::Zero(); + + Eigen::Matrix<double, 7, 1> m_jointAnglesDelta; + + Eigen::Matrix3d m_selectVelMatrix = Eigen::Matrix3d::Identity(); + Eigen::Matrix3d m_modSelectVelMatrix = Eigen::Matrix3d::Identity(); + + // private functions + void doForwardKinematics(); + Eigen::Matrix3d euler2rotMatrix(rl::math::Vector3 orientation); + void compTaskSpaceInput(); + void compForce2VelocityController(); + void compIK(); + void modifySelectionMatrix(); + + + + }; \ No newline at end of file diff --git a/c++/src/gpm.cpp b/c++/src/gpm.cpp deleted file mode 100644 index 514920e97314256fe235ccd891fc32edeece71d7..0000000000000000000000000000000000000000 --- a/c++/src/gpm.cpp +++ /dev/null @@ -1,187 +0,0 @@ -#include <iostream> -#include <string> - -#include <broccoli/control/kinematics/ComfortPoseGradient.hpp> -#include <broccoli/core/math.hpp> - -#include <rl/math/Transform.h> -#include <rl/mdl/Kinematic.h> -#include <rl/mdl/Model.h> -#include <rl/mdl/UrdfFactory.h> - - -std::pair<Eigen::Matrix<double, 7, 1>, Eigen::Matrix<double, 6, 1>> gpm(Eigen::Matrix<double, 6, 1> &desPose, Eigen::Matrix<double, 6, 1> &desVelocity, -Eigen::Matrix<double, 7, 1> &jointAngles, Eigen::Matrix<double, 7, 1> &jointVelocity, const bool left_arm) { - - // FORWARD KINEMATICS - rl::mdl::UrdfFactory factory; - std::string path2urdf; - - if(left_arm){ - path2urdf = "/home/joschua/Coding/forceControl/master-project/c++/models/urdf/yumi_left.urdf"; - } else{ - path2urdf = "/home/joschua/Coding/forceControl/master-project/c++/models/urdf/yumi_right.urdf"; - } - - std::shared_ptr<rl::mdl::Model> model(factory.create(path2urdf)); - - rl::mdl::Kinematic* kinematic = dynamic_cast<rl::mdl::Kinematic*>(model.get()); - - // forward kinematics for the right arm - kinematic->setPosition(jointAngles); - kinematic->forwardPosition(); - kinematic->calculateJacobian(); - - Eigen::Matrix<double, 6, 7> J; - // copy entries from RL jacobian to Eigen::jacobian in order to use it in brocolli function - for (int i = 0; i < 7; i++){ - for (int j = 0; j < 6; j++){ - J(j, i) = kinematic->getJacobian()(j, i); - } - } - - // check if matrices are the same - //std::cout << "RLJacobian \n" << kinematic->getJacobian() << std::endl; - //std::cout << "myJacobian \n" << J << std::endl; - //std::cout << "Manipulability meassure \n" << kinematic->calculateManipulabilityMeasure() << std::endl; - - // extract orientation and position for the right arm - rl::math::Transform t = kinematic->getOperationalPosition(0); - rl::math::Vector3 position = t.translation(); - rl::math::Vector3 orientation = t.rotation().eulerAngles(2, 1, 0).reverse(); - std::cout << "Joint configuration in degrees: " << jointAngles.transpose() * rl::math::RAD2DEG << std::endl; - std::cout << "FK end-effector position: [m] " << position.transpose() << " orientation [deg] " << orientation.transpose() * rl::math::RAD2DEG << std::endl; - - - // INVERSE KINEMATICS - // compute translation and orientation error - Eigen::Matrix3d desOrientation; - // go from Euler ZYX to rotation matrix - desOrientation = Eigen::AngleAxisd(desPose(5), Eigen::Vector3d::UnitZ()) - *Eigen::AngleAxisd(desPose(4), Eigen::Vector3d::UnitY()) - *Eigen::AngleAxisd(desPose(3), Eigen::Vector3d::UnitX()); - - //std::cout << "reverse euler angles" << desOrientation.eulerAngles(2, 1, 0).reverse(); - - // check if these two rotation matrices match! - //std::cout << "desOrientation \n" << desOrientation << std::endl; - //std::cout << "RL Orientation \n" << t.rotation() << std::endl; - - //Eigen::Vector3d einheitsvektor; - //einheitsvektor << 1.0, 0.0, 0.0; - //std::cout << "desOrientation x einheitsvektor \n" << desOrientation* einheitsvektor << std::endl; - //std::cout << "RL Orientation x einheitsvektor\n" << t.rotation()*einheitsvektor << std::endl; - - - // define Quaternion with coefficients in the order [x, y, z, w] - Eigen::Vector3d desiredTranslation = desPose.head(3); - //std::cout << "desiredTranslation" << desiredTranslation << std::endl; - Eigen::Quaterniond desiredOrientation(desOrientation); - - // set the current positions - Eigen::Matrix<double, 3, 1> currentTranslation; - currentTranslation << position.x(), position.y(), position.z(); - Eigen::Quaterniond currentOrientation(t.rotation()); - - // chose the correct quaternion such that the distance between desired and current - // quaternion is the shortest - if (desiredOrientation.dot(currentOrientation) < 0.0) { - currentOrientation.coeffs() *= -1.0; - } - // calculate delta between quaternions - Eigen::Quaterniond errorQuaternion = currentOrientation.inverse() * desiredOrientation; - Eigen::Vector3d errorRotationInWorldFrame = currentOrientation * errorQuaternion.vec(); - - // compute task space velocity with drift compensation - const double gainDriftCompensation = 0.1; - const double dt = 0.0125; // refers to 80 Hz - Eigen::Matrix<double, 6, 1> effectiveTaskSpaceInput = Eigen::Matrix<double, 6, 1>::Zero(); - effectiveTaskSpaceInput.head(3) = gainDriftCompensation/dt * (desiredTranslation - currentTranslation) - + desVelocity.head(3); - effectiveTaskSpaceInput.tail(3) = gainDriftCompensation/dt * errorRotationInWorldFrame + desVelocity.tail(3); - std::cout << "effectiveTaskSpaceInput: " << effectiveTaskSpaceInput << std::endl; - - - // COMPUTE CPG GRADIENT - // define min and max values for the joints of Yumi - Eigen::Matrix< double, 7, 1> q_min; - Eigen::Matrix< double, 7, 1> q_max; - q_min << -168.5, -143.5, -168.5, -123.5, -290, -88, -229; - q_min *= rl::math::DEG2RAD; - q_max << 168.5, 43.5, 168.5, 80, 290, 138, 229; - q_max *= rl::math::DEG2RAD; - // create CompfortPoseGradient object - broccoli::control::ComfortPoseGradient<7> cpg; - // compute CPG gradient - cpg.useRangeBasedScaling(q_min, q_max); - //cpg.setWeight() by default it is 1 - - Eigen::Matrix<double, 7, 1> cpgGradient = Eigen::Matrix<double, 7, 1>::Zero(); - cpgGradient = cpg.process(jointAngles, jointVelocity); // if gradient is zero then the ASC is just a resolved motion ik method - - // COMPUTE MANIPULABILTY GRADIENT - // compute Jacobian derivative - code snippet from Jonas Wittmann - std::array<Eigen::Matrix<double, 6, 7>, 7> dJ; // NOLINT - for (auto& matrix : dJ) { - matrix = Eigen::Matrix<double, 6, 7>::Zero(); - } - Eigen::Matrix<double, 3, 7> transJ = Eigen::Matrix<double, 3, 7>::Zero(); - transJ = J.block<3, 7>(0, 0); - Eigen::Matrix<double, 3, 7> rotJ = Eigen::Matrix<double, 3, 7>::Zero(); - rotJ = J.block<3, 7>(3, 0); - const int numOfJoints = 7; - for (int jj = 0; jj < numOfJoints; ++jj) { - for (int ii = jj; ii < numOfJoints; ++ii) { - dJ.at(jj).block<3, 1>(0, ii) = rotJ.col(jj).cross(transJ.col(ii)); - dJ.at(jj).block<3, 1>(3, ii) = rotJ.col(jj).cross(rotJ.col(ii)); - if (ii != jj) { - dJ.at(ii).block<3, 1>(0, jj) = dJ.at(jj).block<3, 1>(0, ii); - } - } - } - - Eigen::Matrix<double, 7, 1> manipGradient = Eigen::Matrix<double, 7, 1>::Zero(); - double cost = sqrt((J * J.transpose()).determinant()); - // Compute the manipulability gradient. - for (int jj = 0; jj < 7; ++jj) { - manipGradient[jj] = cost * ((J * J.transpose()).inverse() * dJ.at(jj) * J.transpose()).trace(); - } - // add both gradients - Eigen::Matrix<double, 7, 1> nullSpaceGradient = Eigen::Matrix<double, 7, 1>::Zero(); - nullSpaceGradient = 0*manipGradient + 0*cpgGradient; - //std::cout << "gradient \n" << nullSpaceGradient << std::endl; - - - // ASC desired effective velocity does not work -> implement myself - Eigen::Matrix<double, 7, 7> m_inverseWeighing = Eigen::Matrix<double, 7, 7> ::Identity(); - double m_activationFactorTaskSpace = 1.0; - Eigen::Matrix<double, 7, 1> nullSpaceVelocity = -m_inverseWeighing * nullSpaceGradient; - Eigen::Matrix<double, 7, 1> jointVelocities; - jointVelocities = broccoli::core::math::solvePseudoInverseEquation(J, m_inverseWeighing, effectiveTaskSpaceInput, nullSpaceVelocity, m_activationFactorTaskSpace); - std::cout << "resulting jointVelocities: \n" << jointVelocities << std::endl; - - // perform integration over one timestep to obtain positions that can be send to robot - Eigen::Matrix<double, 7, 1> jointAnglesDelta; - jointAnglesDelta << jointVelocities * dt; - - //std::cout << "current qs in DEG \n" << jointAngles* rl::math::RAD2DEG << std::endl; - //std::cout << "delta qs in DEG \n" << jointAnglesDelta * rl::math::RAD2DEG << std::endl; - //std::cout << "next qs in DEG \n" << (jointAngles+jointAnglesDelta)* rl::math::RAD2DEG << std::endl; - - // forward kinematics with the new joints values from IK - kinematic->setPosition(jointAngles+jointAnglesDelta); - kinematic->forwardPosition(); - - rl::math::Transform dest = kinematic->getOperationalPosition(0); - rl::math::Vector3 dposition = dest.translation(); - rl::math::Vector3 dorientation = dest.rotation().eulerAngles(2, 1, 0).reverse(); - std::cout << "IK joint configuration in degrees: " << (jointAngles+jointAnglesDelta).transpose() * rl::math::RAD2DEG << std::endl; - std::cout << "IK end-effector position: [m] " << dposition.transpose() << " orientation [deg] " << dorientation.transpose() * rl::math::RAD2DEG << std::endl; - - Eigen::Matrix<double, 6, 1> resPose; - resPose << dposition.transpose()(0), dposition.transpose()(1), dposition.transpose()(2), dorientation.transpose()(0), dorientation.transpose()(1), dorientation.transpose()(2); - - // return desired joint angles for the next step and pose for computing the IK accuracy - return std::make_pair((jointAngles+jointAnglesDelta), resPose); -} - diff --git a/c++/src/main.cpp b/c++/src/main.cpp index 2d933614437c37e571a509d248b28331e75860e7..0587fe14c331c93897b71411ff9bafbda062165c 100644 --- a/c++/src/main.cpp +++ b/c++/src/main.cpp @@ -1,13 +1,53 @@ #include <iostream> -#include "gpm.hpp" +#include "yumi.hpp" #include <Eigen/Eigen> #include <rl/math/Unit.h> +#include <rl/mdl/Kinematic.h> +#include <rl/mdl/Model.h> +#include <rl/mdl/UrdfFactory.h> int main(int, char**) { - enum yumi_arm{YUMI_RIGHT, YUMI_LEFT}; + std::string path2yumi_l = "/home/joschua/Coding/forceControl/master-project/c++/models/urdf/yumi_left.urdf"; + Yumi yumi_l(path2yumi_l); + + Eigen::Matrix<double, 7, 1> jointAngles; + jointAngles << 90.48, 17.87, -25.0, 48.0, -137.0, 122.0, -74.21; + jointAngles *= rl::math::DEG2RAD; + + Eigen::Matrix<double, 7, 1> jointVelocities; + jointVelocities << 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0; + + yumi_l.set_jointValues(jointAngles, jointVelocities); + + + // Desired Values + Eigen::Matrix<double, 6, 1> desPose; + desPose << 0.300, 0.200, 0.200, 0.0*rl::math::DEG2RAD, 0.0*rl::math::DEG2RAD, 0.0*rl::math::DEG2RAD; // obtained from RS with stated joint values + Eigen::Matrix<double, 6, 1> desVelocity; + desVelocity << 0.0, 0.0, 0.0, 0.0, 0.0, 0.0; + + yumi_l.set_desPoseVel(desPose, desVelocity); + + yumi_l.process(); + + + Eigen::Matrix<double, 7, 1> newJointValues; + newJointValues = yumi_l.get_newJointValues(); + + std::cout << "old joint values: " << jointAngles << std::endl; + std::cout << "new joint values: " << newJointValues << std::endl; + + + + //enum yumi_arm{YUMI_RIGHT, YUMI_LEFT}; + + +/* rl::mdl::UrdfFactory factory; + std::shared_ptr<rl::mdl::Model> model(factory.create("/home/joschua/Coding/forceControl/master-project/c++/models/urdf/yumi_left.urdf")); + rl::mdl::Kinematic* kinematic = dynamic_cast<rl::mdl::Kinematic*>(model.get()); // Is Values Eigen::Matrix<double, 6, 1> actualPosition; @@ -27,10 +67,10 @@ int main(int, char**) { desVelocity << 0.0, 0.0, 0.0, 0.0, 0.0, 0.0; std::pair<Eigen::Matrix<double, 7, 1>, Eigen::Matrix<double, 6, 1>> result; - result = gpm(desPose, desVelocity, jointAngles, jointVelocity, YUMI_RIGHT); + result = gpm(desPose, desVelocity, jointAngles, jointVelocity, kinematic); std::cout << "desired joint values: \n" << result.first << std::endl; - std::cout << "current pose: \n" << result.second << std::endl; + std::cout << "current pose: \n" << result.second << std::endl; */ return 0; -} \ No newline at end of file +} diff --git a/c++/src/py2gpmbind.cpp b/c++/src/py2gpmbind.cpp index 0cee6f9f5f3399fe6bd4a5e0cd5267efca2e6fd2..08efd535b5a5ec071ebe0d488acb7b8ee5221fe9 100644 --- a/c++/src/py2gpmbind.cpp +++ b/c++/src/py2gpmbind.cpp @@ -1,17 +1,27 @@ #include <pybind11/pybind11.h> #include <pybind11/eigen.h> -#include "gpm.hpp" +#include "yumi.hpp" +#include <rl/mdl/UrdfFactory.h> namespace py = pybind11; -int add(int i, int j) { - return i + j; -} PYBIND11_MODULE(invKin, m) { m.doc() = "pybind11 binding to C++ function that computes IK based on GPM"; // optional module docstring - m.def("gpm", &gpm, "A function to compute the invserse kinematics for a 7 DOF serial manipulator on vecocity level with comfort pose and manipulabilty gradient", - py::arg("desired pose"),py::arg("desired Velocities"), py::arg("joint angles"), py::arg("joint velocities"), py::arg("left arm -> 1, right arm -> 0"), py::return_value_policy::copy); - + + py::class_<Yumi>(m, "Yumi") + .def(py::init<std::string>()) + .def("set_jointValues", &Yumi::set_jointValues, py::arg("joint angles"), py::arg("joint velocities")) + .def("set_desPoseVel", &Yumi::set_desPoseVel, py::arg("desired pose"), py::arg("desired velocities")) + .def("process", &Yumi::process) + .def("get_newJointValues", &Yumi::get_newJointValues, py::return_value_policy::copy) + .def("get_pose", &Yumi::get_pose, py::return_value_policy::copy) + .def("printPose", &Yumi::print_pose) + .def("set_kp", &Yumi::set_kp) + .def("set_operationPoint", &Yumi::set_operationPoint) + .def("set_hybridControl", &Yumi::set_hybridControl) + .def("set_transitionTime", &Yumi::set_transitionTime) + .def("get_manipulabilityMeasure", &Yumi::get_manipulabilityMeasure) + .def("set_force", &Yumi::set_force); } \ No newline at end of file diff --git a/c++/src/yumi.cpp b/c++/src/yumi.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cf7399c7019dd3b5c29303d15b1b67529971fe9d --- /dev/null +++ b/c++/src/yumi.cpp @@ -0,0 +1,183 @@ +#include <iostream> + +#include <broccoli/control/kinematics/ComfortPoseGradient.hpp> +#include <broccoli/core/math.hpp> + +#include <rl/math/Transform.h> +#include <rl/mdl/Kinematic.h> +#include <rl/mdl/Model.h> +#include <rl/mdl/UrdfFactory.h> + +#include <yumi.hpp> + +// constructor +Yumi::Yumi(std::string path){ + rl::mdl::UrdfFactory factory; + m_model = (std::shared_ptr<rl::mdl::Model>)(factory.create(path)); + //m_modelObj= *m_model; +} + +void Yumi::set_jointValues(Eigen::Matrix<double, 7, 1> &jointAngles, Eigen::Matrix<double, 7, 1> &jointVelocity){ + m_jointAngles = jointAngles; + m_jointVelocity = jointVelocity; +} + +void Yumi::doForwardKinematics(){ + rl::mdl::Kinematic* kinematic = dynamic_cast<rl::mdl::Kinematic*>(m_model.get()); + kinematic->setPosition(m_jointAngles); + kinematic->forwardPosition(); + kinematic->calculateJacobian(); + rl::math::Transform t = kinematic->getOperationalPosition(0); + m_position = t.translation(); + m_orientation = t.rotation().eulerAngles(2, 1, 0).reverse(); + m_rotationMatrix = t.rotation(); // rotation from world frame to ee frame + m_jacobian = kinematic->getJacobian(); + m_manipulabilty = kinematic->calculateManipulabilityMeasure(); +} + +void Yumi::print_pose(){ + doForwardKinematics(); + std::cout << "pose " << m_position << "\n" << m_orientation << std::endl; +} + +void Yumi::set_driftCompGain(double gain){ + m_driftCompGain = gain; +} + +void Yumi::set_sampleTime(double sampleTime){ + m_sampleTime = sampleTime; +} + +void Yumi::set_desPoseVel(Eigen::Matrix<double, 6, 1> &desPose, Eigen::Matrix<double, 6, 1> &desVelocity){ + + m_desPosition = desPose.head(3); + m_desOrientation = desPose.tail(3); + m_desPositionDot = desVelocity.head(3); + m_desOrientationDot = desVelocity.tail(3); +} + +Eigen::Matrix3d Yumi::euler2rotMatrix(rl::math::Vector3 eulerAngles){ + Eigen::Matrix3d desOrientation; + // go from Euler ZYX to rotation matrix + desOrientation = Eigen::AngleAxisd(eulerAngles(2), Eigen::Vector3d::UnitZ()) + *Eigen::AngleAxisd(eulerAngles(1), Eigen::Vector3d::UnitY()) + *Eigen::AngleAxisd(eulerAngles(0), Eigen::Vector3d::UnitX()); + return desOrientation; +} + +void Yumi::compTaskSpaceInput(){ + Eigen::Quaterniond desiredOrientation(euler2rotMatrix(m_desOrientation)); + Eigen::Quaterniond currentOrientation(euler2rotMatrix(m_orientation)); + + // chose the correct quaternion such that the distance between desired and current + // quaternion is the shortest + if (desiredOrientation.dot(currentOrientation) < 0.0) { + currentOrientation.coeffs() *= -1.0; + } + // calculate delta between quaternions + Eigen::Quaterniond errorQuaternion = currentOrientation.inverse() * desiredOrientation; + Eigen::Vector3d errorRotationInWorldFrame = currentOrientation * errorQuaternion.vec(); + + m_effectiveTaskSpaceInput.head(3) = m_modSelectVelMatrix * (m_driftCompGain/m_sampleTime * (m_desPosition - m_position) + m_desPositionDot) + + m_forceTaskSpaceInput; + m_effectiveTaskSpaceInput.tail(3) = m_driftCompGain/m_sampleTime * errorRotationInWorldFrame + m_desOrientationDot; + +} + +void Yumi::process(){ + + doForwardKinematics(); + modifySelectionMatrix(); + compForce2VelocityController(); + compTaskSpaceInput(); + compIK(); +} + +Eigen::Matrix<double, 7, 1> Yumi::get_newJointValues(){ + return m_jointAngles; +} + +Eigen::Matrix<double, 6, 1> Yumi::get_pose(){ + doForwardKinematics(); + Eigen::Matrix<double, 6, 1> pose; + pose << m_position, m_orientation; + return pose; +} + +void Yumi::set_force(double force){ + if (m_hybridControl){ + m_force = force; + } +} + +void Yumi::compForce2VelocityController(){ + Eigen::Vector3d velocityEE; + // for a postive force outcome the ee of the right arm should move in postive y direction, + // have a look at chosen ee frame in RS + velocityEE << 0, m_force-m_forceOP, 0; + velocityEE *= m_kp; + velocityEE = (Eigen::Matrix3d::Identity() - m_modSelectVelMatrix) * velocityEE; // perform blending - transition from position control to force control + std::cout << "selectionmatrix force: " << (Eigen::Matrix3d::Identity() - m_modSelectVelMatrix) << std::endl; + std::cout << "selectionmatrix velocity: " << m_modSelectVelMatrix << std::endl; + std::cout << "velocityEE force: " << velocityEE << std::endl; + // transform the velocities computed in the ee frame to the world frame + m_forceTaskSpaceInput = m_rotationMatrix.transpose() * velocityEE; +} + +void Yumi::compIK(){ + Eigen::Matrix<double, 7, 1> jointVelocities; + Eigen::Matrix<double, 7, 1> nullSpaceVelocity = -m_inverseWeighing * m_nullSpaceGradient; + + jointVelocities = broccoli::core::math::solvePseudoInverseEquation(m_jacobian, m_inverseWeighing, m_effectiveTaskSpaceInput, + nullSpaceVelocity, m_activationFactorTaskSpace); + + m_jointAnglesDelta << jointVelocities * m_sampleTime; + + m_jointAngles += m_jointAnglesDelta; +} + +void Yumi::set_kp(double kp){ + m_kp = kp; +} + +void Yumi::set_operationPoint(double op){ + m_forceOP = op; +} + +void Yumi::set_hybridControl(bool hybridControl){ + if (hybridControl != m_hybridControl){ + m_hybridControl = hybridControl; + if (hybridControl){ + m_selectVelMatrix << 1, 0, 0, // y direction is force controlled in this case + 0, 0, 0, + 0, 0, 1; + + } else { // position control + m_selectVelMatrix = Eigen::Matrix3d::Identity(); + m_deltaTime = 0.0; // reset variable to apply blending when changing to hybrid control again + } + } + m_hybridControl = hybridControl; +} + +void Yumi::set_transitionTime(double transitionTime){ + m_transitionTime = transitionTime; +} + +void Yumi::modifySelectionMatrix(){ + Eigen::Matrix3d blendingMatrix; + if(m_deltaTime < m_transitionTime){ + m_deltaTime += m_sampleTime; + blendingMatrix << 0, 0, 0, + 0, (1 - m_deltaTime/m_transitionTime), 0, + 0, 0, 0; + } + else { + blendingMatrix = Eigen::Matrix3d::Zero(); + } + m_modSelectVelMatrix = m_selectVelMatrix + blendingMatrix; +} + +double Yumi::get_manipulabilityMeasure(){ + return m_manipulabilty; +} \ No newline at end of file diff --git a/cad/adapter_right_v2.3mf b/cad/adapter_right_v2.3mf new file mode 100644 index 0000000000000000000000000000000000000000..7b58fb0819d045ced74dcfd3fcf78d6d7c961d80 Binary files /dev/null and b/cad/adapter_right_v2.3mf differ diff --git a/cad/adapter_right_v2.stl b/cad/adapter_right_v2.stl new file mode 100644 index 0000000000000000000000000000000000000000..5fd21cd92e3a1725f8fcc0ed92edec6ddef1a14c Binary files /dev/null and b/cad/adapter_right_v2.stl differ diff --git a/cad/adpater_right_v2.FCStd b/cad/adpater_right_v2.FCStd new file mode 100644 index 0000000000000000000000000000000000000000..055d3063ca735077f293d2c85ed125c84ca1efb1 Binary files /dev/null and b/cad/adpater_right_v2.FCStd differ diff --git a/cad/adpater_right_v2.FCStd1 b/cad/adpater_right_v2.FCStd1 new file mode 100644 index 0000000000000000000000000000000000000000..af3fb67a73464cccd5b0f91929b786feef0b01f2 Binary files /dev/null and b/cad/adpater_right_v2.FCStd1 differ diff --git a/python/__init__.py b/python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/python/abb_egm_pyclient/abb_egm_pyclient/feedback/control_final.py b/python/abb_egm_pyclient/abb_egm_pyclient/feedback/control_final.py new file mode 100644 index 0000000000000000000000000000000000000000..027c0725c17491f6daf52e15495830f35e1ba53c --- /dev/null +++ b/python/abb_egm_pyclient/abb_egm_pyclient/feedback/control_final.py @@ -0,0 +1,341 @@ +# TODO: check if all devices and neccessary files are online/in the right place +# show if thats the case and ask user if he wants to start control + +#!/usr/bin/env python +import keyboard +from typing import Sequence +import time +import copy +import numpy as np +from abb_egm_pyclient import EGMClient +import libs.invKin as invKin +from data.get_data import get_trajectory, transform2yumi_workspace, place_trajectory, logic2abb +from matplotlib import pyplot as plt +import serial.tools.list_ports +from tqdm import tqdm +from stateMachine import FoamCuttingMachine + + +foamCutting = FoamCuttingMachine() + +def get_realJointAngles(egm_client): + robot_msg = egm_client.receive_msg() + conf: Sequence[float] = robot_msg.feedBack.joints.joints + joint7 = robot_msg.feedBack.externalJoints.joints[0] + conf.insert(2, joint7) + jointAngles = np.radians(np.array(conf)) + return jointAngles + +# READ IN THE TRAJECTORY +# get the data in the yumi workspace +p1, v1, p2, v2, phi_delta, dphi = get_trajectory() +p1, v1, p2, v2, phi_delta, dphi = transform2yumi_workspace(p1, v1, p2, v2, phi_delta, dphi) + +# define staring postition in workspace for left arm - found by try and error in RS +p1_start_des = np.array([0.32, 0.3, 0.1]) # synced fine pose was np.array([0.3, 0.2, 0.2]) +p1, p2 = place_trajectory(p1_start_des, p1, p2) + +# setup for UDP connection +UDP_PORT_LEFT = 6510 +UDP_PORT_RIGHT = 6511 +rate = 80 + +# take the first desired taskSpaceInput for the right arm and simulate force sensor data +#translation_R_start = copy.copy(p2[0, :]) +syncTranslation_R = np.array([0.3, -0.2, 0.2]) # starting pose synched with robot +#rotation_R_start = copy.copy(phi_delta[0, :]) +syncRotation_R = np.array([0, 0, 0]) +syncPose_R = np.concatenate((syncTranslation_R, syncRotation_R), axis=0) +desPose_R_start = np.concatenate((p2[0, :], phi_delta[0, :]), axis=0) + +#translation_L_start = copy.copy(p1[0, :]) +syncTranslation_L = np.array([0.3, 0.2, 0.2]) # starting pose synched with robot +syncRotation_L = np.array([0, 0, 0]) +syncPose_L = np.concatenate((syncTranslation_L, syncRotation_L), axis=0) +desPose_L_start = np.concatenate((p1[0, :], phi_delta[0, :]), axis=0) + +egm_client_R = EGMClient(port=UDP_PORT_RIGHT) +egm_client_L = EGMClient(port=UDP_PORT_LEFT) + +yumi_right = invKin.Yumi("/home/joschua/Coding/forceControl/master-project/c++/models/urdf/yumi_right.urdf") +yumi_right.set_operationPoint(5.0) +yumi_right.set_kp(0.05) +yumi_right.set_hybridControl(False) +yumi_right.set_transitionTime(3.0) + +yumi_left = invKin.Yumi("/home/joschua/Coding/forceControl/master-project/c++/models/urdf/yumi_left.urdf") + +desVelocities_R_start = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) +jointVelocities_R = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) + +desVelocities_L_start = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) +jointVelocities_L = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) + +# SETUP SERIAL INTERFACE +# find active ports +ports = serial.tools.list_ports.comports() +arduino = serial.Serial() + +myPort = '/dev/ttyACM0' +myBaudRate = 38400 +portList = [] + +print("AVAILABLE PORTS \n") +for onePort in ports: + portList.append(str(onePort)) + print(str(onePort)) + +# setup configuration for serial interface +arduino.baudrate = myBaudRate +arduino.port = myPort +arduino.open() + +time.sleep(2) +arduino.flushInput() + +# wait until first serial data from arduino is available +while not arduino.in_waiting: + pass + +i = 0 +force = 0.0 # 0.5 [N] seems to be a good value for the control +timestamp = time.time() +traj_samples = len(p1[:, 0]) +moveToStartVel = 0.005 + +# delta is for both arms the same +poseDelta = desPose_R_start - syncPose_R + +numIterations = round(np.fabs(poseDelta).max()/moveToStartVel * rate) +toStartPose_R = np.zeros((numIterations, 6)) +toStartPose_L = np.zeros((numIterations, 6)) + +for i in range(numIterations): + toStartPose_L[i, :] = syncPose_L + poseDelta/numIterations * i + toStartPose_R[i, :] = syncPose_R + poseDelta/numIterations * i + +toStartVel_R = (toStartPose_R[1, :] - toStartPose_R[0, :]) * rate +toStartVel_L = (toStartPose_L[1, :] - toStartPose_L[0, :]) * rate + +# arrays to store the results for later plotting +log_realPose_R = np.zeros((traj_samples, 6)) +log_compPose_R = np.zeros((traj_samples, 6)) +log_realJoints_R = np.zeros((traj_samples, 7)) +log_compJoints_R = np.zeros((traj_samples, 7)) + +log_realPose_L = np.zeros((traj_samples, 6)) +log_compPose_L = np.zeros((traj_samples, 6)) +log_realJoints_L = np.zeros((traj_samples, 7)) +log_compJoints_L = np.zeros((traj_samples, 7)) + +log_force = np.zeros((traj_samples, 1)) + +print("\n Force control only to tension the wire...") + +foamCutting.start_syncronizing() + +i = 0 +while True and arduino.isOpen(): + # check for new force data + if arduino.in_waiting: # get the number of bytes in the input buffer + packet = arduino.readline() # type: bytes + str_receive = packet.decode('utf-8').rstrip('\n') + force = float(str_receive)/1000.0 # mN to Newton + + if foamCutting.is_moveToStartPose: + if (time.time() - timestamp) >= (1.0/rate): + timestamp = time.time() + + # get the current joints angles for the right arm + jointAngles_R = get_realJointAngles(egm_client_R) + jointAngles_L = get_realJointAngles(egm_client_L) + + # compute the resulting jointVelocities + if i > 0: + jointVelocities_R = (jointAngles_R - jointAngles_R_old)/rate + jointVelocities_L = (jointAngles_L - jointAngles_L_old)/rate + + yumi_right.set_jointValues(jointAngles_R, jointVelocities_R) + yumi_right.set_desPoseVel(toStartPose_R[i, :], toStartVel_R) + yumi_right.process() + pose_R = yumi_right.get_pose() + poseError_R = pose_R - syncPose_R + + yumi_left.set_jointValues(jointAngles_L, jointVelocities_L) + yumi_left.set_desPoseVel(toStartPose_L[i, :], toStartVel_L) + yumi_left.process() + pose_L = yumi_left.get_pose() + poseError_L = pose_L - syncPose_L + + ik_jointAngles_R = yumi_right.get_newJointValues() # computed joint values from IK + ik_jointAngles_L = yumi_left.get_newJointValues() + + # transform to degrees as egm wants it + des_conf_R = np.degrees(ik_jointAngles_R) + des_conf_L = np.degrees(ik_jointAngles_L) + + egm_client_R.send_planned_configuration(logic2abb(des_conf_R)) + egm_client_L.send_planned_configuration(logic2abb(des_conf_L)) + + # save joint values to compute joint velocities in the next iteration + jointAngles_R_old = copy.copy(jointAngles_R) + jointAngles_L_old = copy.copy(jointAngles_L) + + i = i+1 + if i == numIterations: + print('end effectors are at starting poses, ready to mount wire') + i = 0 # reset counter + foamCutting.start_mounting() + + elif (foamCutting.is_mountWire): + if keyboard.is_pressed('enter'): + yumi_right.set_hybridControl(True) + foamCutting.start_tensioning() + + + # force control only until wire is tensioned + elif foamCutting.is_tensionWire: + if (time.time() - timestamp) >= (1.0/rate): + timestamp = time.time() + + # get the current joints angles for the right arm + jointAngles_R = get_realJointAngles(egm_client_R) + jointAngles_L = get_realJointAngles(egm_client_L) + + # compute the resulting jointVelocities + if i > 0: + jointVelocities_R = (jointAngles_R - jointAngles_R_old)/rate + jointVelocities_L = (jointAngles_L - jointAngles_L_old)/rate + + yumi_right.set_jointValues(jointAngles_R, jointVelocities_R) + yumi_right.set_desPoseVel(desPose_R_start, desVelocities_R_start) + yumi_right.set_force(force) + yumi_right.process() + + yumi_left.set_jointValues(jointAngles_L, jointVelocities_L) + yumi_left.set_desPoseVel(desPose_L_start, desVelocities_L_start) + yumi_left.process() + + print("force in [N] is: %5.2f" % force) + + ik_jointAngles_R = yumi_right.get_newJointValues() # computed joint values from IK + ik_jointAngles_L = yumi_left.get_newJointValues() + + # transform to degrees as egm wants it + des_conf_R = np.degrees(ik_jointAngles_R) + des_conf_L = np.degrees(ik_jointAngles_L) + + egm_client_R.send_planned_configuration(logic2abb(des_conf_R)) + egm_client_L.send_planned_configuration(logic2abb(des_conf_L)) + + # save joint values to compute joint velocities in the next iteration + jointAngles_R_old = copy.copy(jointAngles_R) + jointAngles_L_old = copy.copy(jointAngles_L) + + i = i+1 + if (force > 0.65) and keyboard.is_pressed('enter'): + i = 0 # reset counter + print("Changing to hybrid control now...") + foamCutting.start_cutting() + pbar = tqdm(total=traj_samples) + + # hybrid control + elif foamCutting.is_cutting: + if (time.time() - timestamp) >= (1.0/rate): + timestamp = time.time() + # copy array entries to local variables + pos1 = p1[i, :] + vel1 = v1[i, :] + pos2 = p2[i, :] + vel2 = v2[i, :] + phi = phi_delta[i, :] + phi_dot = dphi[i, :] + + # get the current joints angles for the left arm + jointAngles_L = get_realJointAngles(egm_client_L) + log_realJoints_L[i, :] = jointAngles_L + + # get the current joints angles for the right arm + jointAngles_R = get_realJointAngles(egm_client_R) + log_realJoints_R[i, :] = jointAngles_R + + # compute the resulting jointVelocities + if i > 0: + jointVelocities_L = (jointAngles_L - jointAngles_L_old)/rate + jointVelocities_R = (jointAngles_R - jointAngles_R_old)/rate + + desPose_L = np.concatenate((pos1, phi), axis=0) + desVelocities_L = np.concatenate((vel1, phi_dot), axis=0) + + desPose_R = np.concatenate((pos2, phi), axis=0) + desVelocities_R = np.concatenate((vel2, phi_dot), axis=0) + + yumi_left.set_jointValues(jointAngles_L, jointVelocities_L) + log_realPose_L[i, :] = yumi_left.get_pose() + yumi_left.set_desPoseVel(desPose_L, desVelocities_L) + yumi_left.process() + log_compPose_L[i, :] = yumi_left.get_pose() + + yumi_right.set_jointValues(jointAngles_R, jointVelocities_R) + log_realPose_R[i, :] = yumi_right.get_pose() + yumi_right.set_desPoseVel(desPose_R, desVelocities_R) + yumi_right.set_force(force) + yumi_right.process() + log_compPose_R[i, :] = yumi_right.get_pose() + log_force[i, :] = force + + ik_jointAngles_L = yumi_left.get_newJointValues() # computed joint values from IK + log_compJoints_L[i, :] = ik_jointAngles_L + ik_jointAngles_R = yumi_right.get_newJointValues() # computed joint values from IK + log_compJoints_R[i, :] = ik_jointAngles_R + + # transform to degrees as egm wants it + des_conf_L = np.degrees(ik_jointAngles_L) + des_conf_R = np.degrees(ik_jointAngles_R) + + egm_client_L.send_planned_configuration(logic2abb(des_conf_L)) + egm_client_R.send_planned_configuration(logic2abb(des_conf_R)) + + # save joint values to compute joint velocities in the next iteration + jointAngles_L_old = copy.copy(jointAngles_L) + jointAngles_R_old = copy.copy(jointAngles_R) + i = i + 1 + pbar.update(1) + + if (i >= (traj_samples-1)): + pbar.close() + foamCutting.end_cutting() + break # break out of while loop + + +# check if end pose for right arm has been reached +n = 0 +while n < 10: + robot_msg_R = egm_client_R.receive_msg() + if robot_msg_R.mciConvergenceMet: + print("Joint positions for right arm converged.") + break + else: + print("Waiting for convergence.") + n += 1 + time.sleep(0.1) +else: + raise TimeoutError(f"Joint positions for the right arm did not converge.") + +# check if poses have been reached for the left arm +n = 0 +while n < 10: + robot_msg_L = egm_client_L.receive_msg() + if robot_msg_L.mciConvergenceMet: + print("Joint positions for left arm converged.") + break + else: + print("Waiting for convergence.") + n += 1 + time.sleep(0.1) +else: + raise TimeoutError(f"Joint positions for the left arm did not converge.") + + +experimentLogs = np.hstack((p2, phi_delta, log_compPose_R, log_realPose_R, log_compJoints_R, log_realJoints_R, log_force, p1, log_compJoints_R, log_realPose_L, log_compJoints_L, log_realJoints_L)) +#np.save('./data/experimentLogs300-0,05-1-x', experimentLogs) \ No newline at end of file diff --git a/python/abb_egm_pyclient/abb_egm_pyclient/feedback/control_realSensor.py b/python/abb_egm_pyclient/abb_egm_pyclient/feedback/control_realSensor.py new file mode 100644 index 0000000000000000000000000000000000000000..ae75d49fc4a1b382618ec6af22439a736c9ad56b --- /dev/null +++ b/python/abb_egm_pyclient/abb_egm_pyclient/feedback/control_realSensor.py @@ -0,0 +1,136 @@ + +#!/usr/bin/env python +from typing import Sequence +import time +import copy +import numpy as np +from abb_egm_pyclient import EGMClient +import libs.invKin as invKin +from data.get_data import get_trajectory, transform2yumi_workspace, place_trajectory, logic2abb +from matplotlib import pyplot as plt +import serial.tools.list_ports + +''' +Before running this script make sure that the starting pose of the robot (either real one or in RS) match the +starting pose of the computed joint angles. This script sends desired joint position to the robot. It takes to real +joint positions at the robot into account +''' + +# READ IN THE TRAJECTORY +# get the data in the yumi workspace +p1, v1, p2, v2, phi_delta, dphi = get_trajectory() +p1, v1, p2, v2, phi_delta, dphi = transform2yumi_workspace(p1, v1, p2, v2, phi_delta, dphi) + +# define staring postition in workspace for left arm - found by try and error in RS +p1_start_des = np.array([0.3, 0.2, 0.2]) +p1, p2 = place_trajectory(p1_start_des, p1, p2) + +# setup for UDP connection +UDP_PORT_RIGHT = 6511 +rate = 80 + +# take the first desired taskSpaceInput for the right arm and simulate force sensor data +trans_const = copy.copy(p2[0, :]) +#rot_const = copy.copy(phi_delta[0, :]) +rot_const = np.array([0, 0, 0]) +desPose_R_const = np.concatenate((trans_const, rot_const), axis=0) + +egm_client_R = EGMClient(port=UDP_PORT_RIGHT) + +yumi_right = invKin.Yumi("/home/joschua/Coding/forceControl/master-project/c++/models/urdf/yumi_right.urdf") +yumi_right.set_operationPoint(0.7) +yumi_right.set_kp(0.2) +yumi_right.set_hybridControl(True) + +desVelocities_R_const = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) +jointVelocities_R = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) + +# SETUP SERIAL INTERFACE +# find active ports +ports = serial.tools.list_ports.comports() +arduino = serial.Serial() + +myPort = '/dev/ttyACM0' +myBaudRate = 38400 +portList = [] + +print("AVAILABLE PORTS \n") +for onePort in ports: + portList.append(str(onePort)) + print(str(onePort)) + + +# setup configuration for serial interface +arduino.baudrate = myBaudRate +arduino.port = myPort +arduino.open() + +time.sleep(2) +arduino.flushInput() + +# wait until first serial data from arduino is available +while not arduino.in_waiting: + pass + +i = 0 +force = 0.0 # 0.5 [N] seems to be a good value for the control +timestamp = time.time() + +while True and arduino.isOpen(): + + # check for new force data + if arduino.in_waiting: # get the number of bytes in the input buffer + packet = arduino.readline() # type: bytes + str_receive = packet.decode('utf-8').rstrip('\n') + force = float(str_receive)/1000.0 # mN to Newtion + + if (time.time() - timestamp) >= (1.0/rate): + # get the current joints angles for the right arm + robot_msg_R = egm_client_R.receive_msg() + conf_R: Sequence[float] = robot_msg_R.feedBack.joints.joints + joint7 = robot_msg_R.feedBack.externalJoints.joints[0] + conf_R.insert(2, joint7) + jointAngles_R = np.radians(np.array(conf_R)) + + # compute the resulting jointVelocities + if i > 0: + jointVelocities_R = (jointAngles_R - jointAngles_R_old)/rate + + yumi_right.set_jointValues(jointAngles_R, jointVelocities_R) + yumi_right.set_desPoseVel(desPose_R_const, desVelocities_R_const) + yumi_right.set_force(force) + yumi_right.process() + + print(force) + + ik_jointAngles_R = yumi_right.get_newJointValues() # computed joint values from IK + + #print(f"Current configuration of left arm {conf_yumi_L}") + #print(f"Current configuration of right arm {conf_yumi_R}") + + # transform to degrees as egm wants it + des_conf_R = np.degrees(ik_jointAngles_R) + + egm_client_R.send_planned_configuration(logic2abb(des_conf_R)) + + + # save joint values to compute joint velocities in the next iteration + jointAngles_R_old = copy.copy(jointAngles_R) + + i = i+1 + timestamp = time.time() + + +# check if poses have been reached +n = 0 +while n < 10: + robot_msg = egm_client_R.receive_msg() + if robot_msg.mciConvergenceMet: + print("Joint positions converged.") + break + else: + print("Waiting for convergence.") + n += 1 + time.sleep(1) +else: + raise TimeoutError(f"Joint positions did not converge after {n} s timeout.") diff --git a/python/abb_egm_pyclient/abb_egm_pyclient/feedback/control_virtualSensor.py b/python/abb_egm_pyclient/abb_egm_pyclient/feedback/control_virtualSensor.py new file mode 100644 index 0000000000000000000000000000000000000000..6083b4d41ea596cd1a81d0fa3f0f3fcefb05ee04 --- /dev/null +++ b/python/abb_egm_pyclient/abb_egm_pyclient/feedback/control_virtualSensor.py @@ -0,0 +1,116 @@ +# TODO: check if all devices and neccessary files are online/in the right place +# show if thats the case and ask user if he wants to start control + +#!/usr/bin/env python +from typing import Sequence +import time +import copy +import numpy as np +from abb_egm_pyclient import EGMClient +import libs.invKin as invKin +from data.get_data import get_trajectory, transform2yumi_workspace, place_trajectory, logic2abb +from matplotlib import pyplot as plt + +''' +Before running this script make sure that the starting pose of the robot (either real one or in RS) match the +starting pose of the computed joint angles. This script sends desired joint position to the robot. It takes to real +joint positions at the robot into account +''' + +# READ IN THE TRAJECTORY +# get the data in the yumi workspace +p1, v1, p2, v2, phi_delta, dphi = get_trajectory() +p1, v1, p2, v2, phi_delta, dphi = transform2yumi_workspace(p1, v1, p2, v2, phi_delta, dphi) + +# define staring postition in workspace for left arm - found by try and error in RS +p1_start_des = np.array([0.3, 0.2, 0.2]) +p1, p2 = place_trajectory(p1_start_des, p1, p2) + +# setup for UDP connection +UDP_PORT_RIGHT = 6511 +rate = 80 + +# take the first desired taskSpaceInput for the right arm and simulate force sensor data +trans_const = copy.copy(p2[0, :]) +#rot_const = copy.copy(phi_delta[0, :]) +rot_const = np.array([np.pi/2, 0, 0]) +desPose_R_const = np.concatenate((trans_const, rot_const), axis=0) + + +# generate sinusoidal test data for the virtual force sensor +# sin(t/pi *4) + 2, test function, test for 5 seconds +numDataPoints = round(5/(1/rate)) +forceInput = np.zeros((numDataPoints)) +for i in range(numDataPoints): + forceInput[i] = np.sin(i/np.pi * 1/rate * 4)+2 + +m_time = np.linspace(0, 5, num=numDataPoints) + +""" fig = plt.figure() +plt.plot(m_time, forceInput, label='desired test signal') # +fig.get_axes()[0].set_xlabel('time') +fig.get_axes()[0].set_ylabel('virtual force signal') +plt.legend() +plt.title('test signal') +plt.show() """ + +egm_client_R = EGMClient(port=UDP_PORT_RIGHT) + +yumi_right = invKin.Yumi("/home/joschua/Coding/forceControl/master-project/c++/models/urdf/yumi_right.urdf") + +desVelocities_R_const = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) +jointVelocities_R = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) + +for index, force in enumerate(forceInput): + + # get the current joints angles for the right arm + robot_msg_R = egm_client_R.receive_msg() + conf_R: Sequence[float] = robot_msg_R.feedBack.joints.joints + joint7 = robot_msg_R.feedBack.externalJoints.joints[0] + conf_R.insert(2, joint7) + jointAngles_R = np.radians(np.array(conf_R)) + + # compute the resulting jointVelocities + if index > 0: + jointVelocities_R = (jointAngles_R - jointAngles_R_old)/rate + + + yumi_right.set_jointValues(jointAngles_R, jointVelocities_R) + yumi_right.set_desPoseVel(desPose_R_const, desVelocities_R_const) + yumi_right.set_force(force) + yumi_right.process() + + + ik_jointAngles_R = yumi_right.get_newJointValues() # computed joint values from IK + + + #print(f"Current configuration of left arm {conf_yumi_L}") + #print(f"Current configuration of right arm {conf_yumi_R}") + + # transform to degrees as egm wants it + des_conf_R = np.degrees(ik_jointAngles_R) + + egm_client_R.send_planned_configuration(logic2abb(des_conf_R)) + + + # save joint values to compute joint velocities in the next iteration + jointAngles_R_old = copy.copy(jointAngles_R) + + + time.sleep(1/rate) + + + +# check if poses have been reached +n = 0 +while n < 10: + robot_msg = egm_client_R.receive_msg() + if robot_msg.mciConvergenceMet: + print("Joint positions converged.") + break + else: + print("Waiting for convergence.") + n += 1 + time.sleep(1) +else: + raise TimeoutError(f"Joint positions did not converge after {n} s timeout.") diff --git a/python/abb_egm_pyclient/abb_egm_pyclient/feedback/stateMachine.py b/python/abb_egm_pyclient/abb_egm_pyclient/feedback/stateMachine.py new file mode 100644 index 0000000000000000000000000000000000000000..4bd8b825f594b4aac519cebfae9b6183fcf9de06 --- /dev/null +++ b/python/abb_egm_pyclient/abb_egm_pyclient/feedback/stateMachine.py @@ -0,0 +1,46 @@ +from statemachine import StateMachine, State + +class FoamCuttingMachine(StateMachine): + # green = State('Green', initial=True) + # yellow = State('Yellow') + # red = State('Red') + + # define states + standby = State('Standby', initial=True) + moveToStartPose = State('MoveToStartPose') + mountWire = State('MountWire') + tensionWire = State('TensionWire') + cutting = State('Cutting') + + # define transitions + start_syncronizing = standby.to(moveToStartPose) + start_mounting = moveToStartPose.to(mountWire) + start_tensioning = mountWire.to(tensionWire) + start_cutting = tensionWire.to(cutting) + end_cutting = cutting.to(standby) + + def on_enter_moveToStartPose(self): + print('State changed to moveToStartPose') + + def on_enter_mountWire(self): + print('State changed to mountWire') + + def on_enter_tensionWire(self): + print('State changed to tensionWire') + + def on_enter_cutting(self): + print('State changed to cutting') + + def on_enter_standby(self): + print('State changed to standby') + + + # slowdown = green.to(yellow) + # stop = yellow.to(red) + # go = red.to(green) + + # states + # standby + # moveToStartPose + # tensionWire + # cutting \ No newline at end of file diff --git a/python/abb_egm_pyclient/abb_egm_pyclient/feedforward_offline/send2robot.py b/python/abb_egm_pyclient/abb_egm_pyclient/feedforward_offline/send2robot.py index 84fcc46d90f94d6aa2323254c05ee3ace51474bc..aac704d54762b531750bc9ad0271342fb33bb528 100644 --- a/python/abb_egm_pyclient/abb_egm_pyclient/feedforward_offline/send2robot.py +++ b/python/abb_egm_pyclient/abb_egm_pyclient/feedforward_offline/send2robot.py @@ -1,11 +1,10 @@ #!/usr/bin/env python from typing import Sequence import time - import numpy as np - - from abb_egm_pyclient import EGMClient +from data.get_data import get_desJoints_L, get_desJoints_R, logic2abb + ''' Before running this script make sure that the starting pose of the robot (either real one or in RS) match the @@ -16,18 +15,14 @@ the IK does not take the real joint angles and velocities into account but suppo UDP_PORT_LEFT = 6510 UDP_PORT_RIGHT = 6511 -comp_conf_left = np.load('./abb_egm_pyclient/abb_egm_pyclient/feedforward_offline/desJointAngles_left.npy') -comp_conf_right = np.load('./abb_egm_pyclient/abb_egm_pyclient/feedforward_offline/desJointAngles_right.npy') +comp_conf_left = get_desJoints_L() +comp_conf_right = get_desJoints_R() rate = 80 -# rearrange the logic joint values to the strange convention abb has -def logic2abb(joint_values): - return joint_values[[0, 1, 3, 4, 5, 6, 2]] - - egm_client_left = EGMClient(port=UDP_PORT_LEFT) egm_client_right = EGMClient(port=UDP_PORT_RIGHT) + for n in range(len(comp_conf_left[:,0])): # do stuff for the left arm diff --git a/python/abb_egm_pyclient/abb_egm_pyclient/feedforward_online/send2robot.py b/python/abb_egm_pyclient/abb_egm_pyclient/feedforward_online/send2robot.py new file mode 100644 index 0000000000000000000000000000000000000000..f01c1b108ac4f772d9530e033a36e467b53e9e3c --- /dev/null +++ b/python/abb_egm_pyclient/abb_egm_pyclient/feedforward_online/send2robot.py @@ -0,0 +1,130 @@ +#!/usr/bin/env python +from typing import Sequence +import time +import copy +import numpy as np +from abb_egm_pyclient import EGMClient +import libs.invKin as invKin +from data.get_data import get_trajectory, transform2yumi_workspace, place_trajectory, logic2abb + +''' +Before running this script make sure that the starting pose of the robot (either real one or in RS) match the +starting pose of the computed joint angles. This script sends desired joint position to the robot. It takes to real +joint positions at the robot into account +''' + +# READ IN THE TRAJECTORY +# get the data in the yumi workspace +p1, v1, p2, v2, phi_delta, dphi = get_trajectory() +p1, v1, p2, v2, phi_delta, dphi = transform2yumi_workspace(p1, v1, p2, v2, phi_delta, dphi) + +# define staring postition in workspace for left arm - found by try and error in RS +p1_start_des = np.array([0.3, 0.2, 0.2]) +p1, p2 = place_trajectory(p1_start_des, p1, p2) + +# setup for UDP connection +UDP_PORT_LEFT = 6510 +UDP_PORT_RIGHT = 6511 +rate = 80 + +egm_client_L = EGMClient(port=UDP_PORT_LEFT) +egm_client_R = EGMClient(port=UDP_PORT_RIGHT) + +yumi_right = invKin.Yumi("/home/joschua/Coding/forceControl/master-project/c++/models/urdf/yumi_right.urdf") +yumi_left = invKin.Yumi("/home/joschua/Coding/forceControl/master-project/c++/models/urdf/yumi_left.urdf") + + +jointVelocities_L = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) +jointVelocities_R = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) + + + +for index, (pos1, vel1, pos2, vel2, phi, phi_dot) in enumerate(zip(p1, v1, p2, v2, phi_delta, dphi)): + tstart = time.time() + t0 = time.time() + # get the current joints angles for the left arm + + robot_msg_L = egm_client_L.receive_msg() + conf_L: Sequence[float] = robot_msg_L.feedBack.joints.joints + joint7 = robot_msg_L.feedBack.externalJoints.joints[0] + conf_L.insert(2, joint7) + jointAngles_L = np.radians(np.array(conf_L)) + + # get the current joints angles for the right arm + robot_msg_R = egm_client_R.receive_msg() + conf_R: Sequence[float] = robot_msg_R.feedBack.joints.joints + joint7 = robot_msg_R.feedBack.externalJoints.joints[0] + conf_R.insert(2, joint7) + jointAngles_R = np.radians(np.array(conf_R)) + + + + t1 = time.time() + + # compute the resulting jointVelocities + if index > 0: + jointVelocities_L = (jointAngles_L - jointAngles_L_old)/rate + jointVelocities_R = (jointAngles_R - jointAngles_R_old)/rate + + desPose_L = np.concatenate((pos1, phi), axis=0) + desVelocities_L = np.concatenate((vel1, phi_dot), axis=0) + + desPose_R = np.concatenate((pos2, phi), axis=0) + desVelocities_R = np.concatenate((vel2, phi_dot), axis=0) + + t2 = time.time() + + yumi_left.set_jointValues(jointAngles_L, jointVelocities_L) + yumi_left.set_desPoseVel(desPose_L, desVelocities_L) + yumi_left.process() + + yumi_right.set_jointValues(jointAngles_R, jointVelocities_R) + yumi_right.set_desPoseVel(desPose_R, desVelocities_R) + yumi_right.process() + + ik_jointAngles_L = yumi_left.get_newJointValues() # computed joint values from IK + + ik_jointAngles_R = yumi_right.get_newJointValues() # computed joint values from IK + + t3 = time.time() + + #print(f"Current configuration of left arm {conf_yumi_L}") + #print(f"Current configuration of right arm {conf_yumi_R}") + + # transform to degrees as egm wants it + des_conf_L = np.degrees(ik_jointAngles_L) + des_conf_R = np.degrees(ik_jointAngles_R) + + egm_client_L.send_planned_configuration(logic2abb(des_conf_L)) + egm_client_R.send_planned_configuration(logic2abb(des_conf_R)) + + t4 = time.time() + + # save joint values to compute joint velocities in the next iteration + jointAngles_L_old = copy.copy(jointAngles_L) + jointAngles_R_old = copy.copy(jointAngles_R) + + tend = time.time() + + time.sleep(1/rate) + + +print("time per iteration: ", (tend-tstart)/len(p1[:,0])) +print("time to receive msg from RS: ", t1-t0) +print("time to build desPose & desVel: ", t2-t1) +print("time to perform ik: ", t3-t2) +print("time to send config to RS: ", t4-t3) + +# check if poses have been reached +n = 0 +while n < 10: + robot_msg = egm_client_L.receive_msg() + if robot_msg.mciConvergenceMet: + print("Joint positions converged.") + break + else: + print("Waiting for convergence.") + n += 1 + time.sleep(1) +else: + raise TimeoutError(f"Joint positions did not converge after {n} s timeout.") diff --git a/python/data/__init__.py b/python/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/python/data/get_data.py b/python/data/get_data.py new file mode 100644 index 0000000000000000000000000000000000000000..8f9bf004e9866a1309a902b7b9523a1273ecff41 --- /dev/null +++ b/python/data/get_data.py @@ -0,0 +1,53 @@ +import numpy as np +import copy +from enum import Enum + +def get_trajectory(): + try: + data = np.load('data/traj_data.npy') + except FileNotFoundError: + print('It seems like you need to run the script first that generate the requested data') + + p1 = data[:, 0:3] + v1 = data[:, 3:6] + p2 = data[:, 6:9] + v2 = data[:, 9:12] + phi_delta = data[:, 12:15] + dphi = data[:, 15:18] + return p1, v1, p2, v2, phi_delta, dphi + +def get_desJoints_L(): + try: + comp_conf_left = np.load('data/compJointAngles_left.npy') + except: + print('It seems like you need to run the script first that generate the requested data') + return comp_conf_left + +def get_desJoints_R(): + try: + comp_conf_right = np.load('data/compJointAngles_right.npy') + except: + print('It seems like you need to run the script first that generate the requested data') + return comp_conf_right + +def transform2yumi_workspace(p1, v1, p2, v2, phi_delta, dphi): + for m in [p1, v1, p2, v2, phi_delta, dphi]: + copy_col = copy.copy(m[:, 2]) # copy z + m[:, 2] = m[:, 1] # shift y to z + m[:, 1] = -copy_col # copy z to y + return p1, v1, p2, v2, phi_delta, dphi + +def place_trajectory(start_des, p1, p2): + p_start = p1[0, :] + offset = start_des - p_start + + # apply offset to all position coordinates + for i in range(len(p1[:,0])): + p1[i,:] = p1[i,:] + offset + p2[i,:] = p2[i,:] + offset + + return p1, p2 + +# rearrange the logic joint values to the strange convention abb has +def logic2abb(joint_values): + return joint_values[[0, 1, 3, 4, 5, 6, 2]] diff --git a/python/experiment/plots.py b/python/experiment/plots.py new file mode 100644 index 0000000000000000000000000000000000000000..d102870fce8dead8d8cd136e6a1245f9b86549ec --- /dev/null +++ b/python/experiment/plots.py @@ -0,0 +1,82 @@ +from cProfile import label +import numpy as np +import matplotlib.pyplot as plt + + +try: + data = np.load('data/experimentLogs.npy') + data2 = np.load('data/forceSignal-500.npy') +except FileNotFoundError: + print('It seems like you need to run the script first that generate the requested data') + +# p2, phi_delta, log_compPose_R, log_realPose_R, log_compJoints_R, log_realJoints_R, force +desPose = data[:, 0:6] +compPose = data[:, 6:12] +realPose = data[:, 12:18] +compJoints = data[:, 8:25] +realJoints = data[2:, 5:32] +force = data[:, 32:33] +noSamples = len(force) + +time = np.linspace(0, round(1.0/80.0 * noSamples), num=noSamples) +time2 = np.linspace(0, round(1.0/80.0 * len(data2)), num=len(data2)) +#time.shape = (time.size//1, 1) + + + +fig = plt.figure() + +ax1 = fig.add_subplot(211) +ax1.plot(time, force[:, 0]*5) +ax1.set_title('force trajectory') +ax1.set_ylabel('N') +ax1.set_xlabel('s') + +ax2 = fig.add_subplot(212) +ax2.plot(time2, data2) +ax2.set_title('force constant weight') +ax2.set_ylabel('N') +ax2.set_xlabel('s') + + + + +""" ax2 = fig.add_subplot(122) +ax2.plot(time, compJoints[:, 0], label="J1") +ax2.plot(time, compJoints[:, 1], label="J2") +ax2.plot(time, compJoints[:, 2], label="J3") +ax2.plot(time, compJoints[:, 3], label="J4") + +ax2.plot(time[0:-2], realJoints[:, 0], label="J1", linestyle="dashed") +ax2.plot(time[0:-2], realJoints[:, 1], label="J2", linestyle="dashed") +ax2.plot(time[0:-2], realJoints[:, 2], label="J3", linestyle="dashed") +ax2.plot(time[0:-2], realJoints[:, 3], label="J4", linestyle="dashed") +ax2.set_title('joint angles') +ax2.set_ylabel('rad') +ax2.set_xlabel('s') +ax2.legend() """ + + +fig = plt.figure() +ax = fig.add_subplot(111, projection='3d') +ax.plot(desPose[1:-1, 0], desPose[1:-1, 1], desPose[1:-1, 2], label="desPose") +ax.plot(compPose[1:-1, 0], compPose[1:-1, 1], compPose[1:-1, 2], label="compPose") +ax.plot(realPose[1:-1, 0], realPose[1:-1, 1], realPose[1:-1, 2], label="realPose") +ax.set_ylabel('y') +ax.set_xlabel('x') +ax.set_zlabel('z') +ax.set_title('trajectory right arm') +ax.legend() + + + + + + + + + +""" axs[1, 0].set_title('Axis [1, 0]') +axs[1, 1].plot(x, -y, 'tab:red') +axs[1, 1].set_title('Axis [1, 1]') """ +plt.show() \ No newline at end of file diff --git a/python/optimizeTaskSpacePlacement/checkManipulabiltyMeasure.py b/python/optimizeTaskSpacePlacement/checkManipulabiltyMeasure.py new file mode 100644 index 0000000000000000000000000000000000000000..3801b7974d0af3944d57c0c10d535f8f71355988 --- /dev/null +++ b/python/optimizeTaskSpacePlacement/checkManipulabiltyMeasure.py @@ -0,0 +1,149 @@ +from data.get_data import get_trajectory, transform2yumi_workspace, place_trajectory +import numpy as np +import libs.invKin as invKin +import copy + +import matplotlib.cm as cmx +from mpl_toolkits.mplot3d import Axes3D +from matplotlib import pyplot as plt +import matplotlib + +# READ IN THE TRAJECTORY +# get the data in the yumi workspace +p1, v1, p2, v2, phi_delta, dphi = get_trajectory() +p1, v1, p2, v2, phi_delta, dphi = transform2yumi_workspace(p1, v1, p2, v2, phi_delta, dphi) + +# define staring postition in workspace for left arm - found by try and error in RS +p1_start_des = np.array([0.3, 0.2, 0.2]) +p1, p2 = place_trajectory(p1_start_des, p1, p2) + +rate = 1.0/80.0 + +yumi_right = invKin.Yumi("/home/joschua/Coding/forceControl/master-project/c++/models/urdf/yumi_right.urdf") +yumi_left = invKin.Yumi("/home/joschua/Coding/forceControl/master-project/c++/models/urdf/yumi_left.urdf") + +manipulability_R = np.zeros((len(p1))) +manipulability_L = np.zeros((len(p1))) +manipulabilityMin_R = np.zeros((3, 3, 3)) +manipulabilityMin_L = np.zeros((3, 3, 3)) + +jointAngles_R = np.array([-110.0, 29.85, 35.92, 49.91, 117.0, 123.0, -117.0]) * np.pi/180.0 +jointAngles_L = np.array([90.48, 17.87, -25.09, 48.0, -137.0, 122.0, -74.21]) * np.pi/180.0 + +jointVelocities_L = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) +jointVelocities_R = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) + +myX_R = [] +myY_R = [] +myZ_R = [] +myMan_R = [] + +myX_L = [] +myY_L = [] +myZ_L = [] +myMan_L = [] + +oldPos = np.zeros((3)) +newPos = np.zeros((3)) + +for x in range(-10, 11, 10): + # add 5 cm to every point of trajectory in x direction + dx = np.array([x*0.01, 0.0, 0.0]) + dX = np.ones((len(p1), 3)) * dx # elementwise multiplication + + for y in range(-10, 11, 10): + dy = np.array([0.0, y*0.01, 0.0]) + dY = np.ones((len(p1), 3)) * dy # elementwise multiplication + + for z in range(-10, 11, 10): + dz = np.array([0.0, 0.0, z*0.01]) + dZ = np.ones((len(p1), 3)) * dz # elementwise multiplication + p1_ = copy.copy(p1) + dX + dY + dZ + p2_ = copy.copy(p2) + dX + dY + dZ + + # synchronize joints angles to new start position + newPos = [dx[0], dy[1], dz[2]] + vel2NewStartPoint = (newPos - oldPos) * rate + desPose_L = np.concatenate((p1_[0,:], phi_delta[0,:]), axis=0) + desVelocities_L = np.concatenate((vel2NewStartPoint, np.array([0, 0, 0])), axis=0) # rotation does not change + + desPose_R = np.concatenate((p2_[0,:], phi_delta[0,:]), axis=0) + desVelocities_R = np.concatenate((vel2NewStartPoint, np.array([0, 0, 0])), axis=0) + + yumi_left.set_jointValues(jointAngles_L, jointVelocities_L) + yumi_left.set_desPoseVel(desPose_L, desVelocities_L) + yumi_left.process() + + yumi_right.set_jointValues(jointAngles_R, jointVelocities_R) + yumi_right.set_desPoseVel(desPose_R, desVelocities_R) + yumi_right.process() + + jointAngles_L = yumi_left.get_newJointValues() # computed joint values from IK + jointAngles_R = yumi_right.get_newJointValues() # computed joint values from IK + + + for index, (pos1, vel1, pos2, vel2, phi, phi_dot) in enumerate(zip(p1_, v1, p2_, v2, phi_delta, dphi)): + + # compute the resulting jointVelocities + if index > 0: + jointVelocities_L = (jointAngles_L - jointAngles_L_old)/rate + jointVelocities_R = (jointAngles_R - jointAngles_R_old)/rate + + desPose_L = np.concatenate((pos1, phi), axis=0) + desVelocities_L = np.concatenate((vel1, phi_dot), axis=0) + + desPose_R = np.concatenate((pos2, phi), axis=0) + desVelocities_R = np.concatenate((vel2, phi_dot), axis=0) + + + yumi_left.set_jointValues(jointAngles_L, jointVelocities_L) + yumi_left.set_desPoseVel(desPose_L, desVelocities_L) + yumi_left.process() + manipulability_L[index] = yumi_left.get_manipulabilityMeasure() + + yumi_right.set_jointValues(jointAngles_R, jointVelocities_R) + yumi_right.set_desPoseVel(desPose_R, desVelocities_R) + yumi_right.process() + manipulability_R[index] = yumi_right.get_manipulabilityMeasure() + + jointAngles_L = yumi_left.get_newJointValues() # computed joint values from IK + jointAngles_R = yumi_right.get_newJointValues() # computed joint values from IK + + # save joint values to compute joint velocities in the next iteration + jointAngles_L_old = copy.copy(jointAngles_L) + jointAngles_R_old = copy.copy(jointAngles_R) + + #manipulabilityMin_L[x, y, z] = np.min(manipulability_L) + #manipulabilityMin_R[x, y, z] = np.min(manipulability_R) + myX_R.append(p2_[0, 0]) + myY_R.append(p2_[0, 1]) + myZ_R.append(p2_[0, 2]) + myMan_R.append(np.min(manipulability_R)) + + myX_L.append(p1_[0, 0]) + myY_L.append(p1_[0, 1]) + myZ_L.append(p1_[0, 2]) + myMan_L.append(np.min(manipulability_L)) + +print("manipulability measure min right: ", manipulabilityMin_R) +print("manipulability measure min left: ", manipulabilityMin_L) + + + + +def scatter3d(x,y,z, cs, colorsMap='jet'): + cm = plt.get_cmap(colorsMap) + cNorm = matplotlib.colors.Normalize(vmin=min(cs), vmax=max(cs)) + scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm) + fig = plt.figure() + ax = Axes3D(fig) + ax.scatter(x, y, z, c=scalarMap.to_rgba(cs)) + scalarMap.set_array(cs) + fig.colorbar(scalarMap) + plt.show() + + +scatter3d(np.array(myX_R), np.array(myY_R), np.array(myZ_R), np.array(myMan_R)) +scatter3d(np.array(myX_L), np.array(myY_L), np.array(myZ_L), np.array(myMan_L)) +scatter3d(np.array(myX_L), np.array(myY_L), np.array(myZ_L), np.array(myMan_L)+np.array(myMan_R)) +# do not add, but search iterate through and choose index where both array entries are higher diff --git a/python/plots/postprocessing/plotting.py b/python/plots/postprocessing/plotting.py new file mode 100644 index 0000000000000000000000000000000000000000..a70106273df6f76e07babc090ef9077244e450c0 --- /dev/null +++ b/python/plots/postprocessing/plotting.py @@ -0,0 +1,60 @@ +import numpy as np +import matplotlib.pyplot as plt + + +R_01 = np.load('/home/joschua/Coding/forceControl/master-project/python/plots/postprocessing/R_01.npy') + +#experimentLogs = np.hstack((p2, phi_delta, log_compPose_R, log_realPose_R, log_compJoints_R, log_realJoints_R, log_force, p1, log_compJoints_R, log_realPose_L, log_compJoints_L, log_realJoints_L)) + +data = np.load('./data/experimentLogs300-0,2-1.npy') +p2_des = data[:, 0:3] +p2_is = data[:, 12:15] +p2_comp = data[:, 6:9] +force = data[:, 32:33]*5-5 + +# error in workspace +e = (p2_des - p2_is) * 1000 # m 2 mm +e_ik = (p2_des - p2_comp) * 1000 # m 2 mm + +noSamples = len(e) + +# transform error into ee frame +for i in range(noSamples): + e[i, :] = np.transpose(R_01[i] @ e[i, :].T) + e_ik[i, :] = np.transpose(R_01[i] @ e_ik[i, :].T) + + + + +time = np.linspace(0, round(1.0/80.0 * noSamples), num=noSamples) + + + +fig = plt.figure() + +ax1 = fig.add_subplot(311) +ax2 = fig.add_subplot(312) +ax3 = fig.add_subplot(313) + + +ax1.plot(time[0:-10], e[0:-10, 0], label='e_x') +#ax.plot(time[0:-10], e_ik[0:-10, 0], label='eik_x_ee') +ax2.plot(time[0:-10], e[0:-10, 1]*(-1), label='e_y') +ax3.plot(time[0:-10], e[0:-10, 2], label='e_z') + +ax4 = ax2.twinx() +ax4.plot(time[0:-10], force[0:-10], label='force', color='r') +ax4.set_ylim(-1, 1) +ax4.set_ylabel('N') + +ax1.set_title('position error in EE frame') +ax1.set_ylabel('mm') +ax1.set_xlabel('s') +ax2.set_ylabel('mm') +ax2.set_xlabel('s') +ax3.set_ylabel('mm') +ax3.set_xlabel('s') +ax1.legend() +ax2.legend() +ax3.legend() +plt.show() \ No newline at end of file diff --git a/python/plots/preprocessing/plotting.py b/python/plots/preprocessing/plotting.py new file mode 100644 index 0000000000000000000000000000000000000000..aa92766abbefd1c3ce53a9e638e4bccc71a5d03b --- /dev/null +++ b/python/plots/preprocessing/plotting.py @@ -0,0 +1,119 @@ +import numpy as np +import matplotlib.pyplot as plt +from mpl_toolkits.mplot3d import Axes3D +from matplotlib.patches import Rectangle +from numpy.linalg import norm + +import_path = '/home/joschua/Coding/forceControl/master-project/python/plots/preprocessing/' +p1 = np.load(import_path+'p1.npy') +p2 = np.load(import_path+'p2.npy') +p1m = np.load(import_path+'p1m.npy') +p2m = np.load(import_path+'p2m.npy') +pos1 = np.load(import_path+'pos1.npy') +pos2 = np.load(import_path+'pos2.npy') +p2m_ref = np.load(import_path+'p2m_ref.npy') +v1 = np.load(import_path+'v1.npy') +v2 = np.load(import_path+'v2.npy') + +# use self-defined tum-cycler for TUM-blue colors +plt.style.use('mylatex') + +# plotting for thesis +plt.rcParams.update({ + "font.family": "serif", # use serif/main font for text elements + "text.usetex": True, # use inline math for ticks + "pgf.rcfonts": False, # don't setup fonts from rc parameters + #"legend.loc": 'upper right', + "savefig.directory": '/home/joschua/Documents/Studium/TUM/Thesis/documentation/thesis/myWorkFiles/AMStudentThesis/figures/plots/' + }) + +""" # do plots for tcp1 +#fig = plt.figure() +fig = plt.figure(figsize=(6.25, 2)) +ax = fig.add_subplot(111) +ax.scatter(p1[:,0]*1000, p1[:,1]*1000-53.7, label='$p_{1*}$', color='#005293') +ax.scatter(pos1[:,0]*1000, pos1[:,1]*1000-53.7, marker="x", label='$p_1$', color='#80a9c9') +# part view +ax.set_xlim(10.8, 14.8) +ax.set_ylim(-4.4, -1.95) +# overview +#ax.set_xlim(-10, 175) +#ax.set_ylim(-10, 25) +ax.set_xlabel('$x_1$') +ax.set_ylabel('$y_1$') +ax.legend() +#ax.add_patch(Rectangle((10.8, -4.4), 4, 2.45, +# edgecolor = 'black', +# #facecolor = 'blue', +# fill=False, +# lw=1)) +plt.show() + +fig.tight_layout() +#fig.savefig('/home/joschua/Documents/Studium/TUM/Thesis/documentation/thesis/myWorkFiles/AMStudentThesis/figures/plots/lefttraj_part.pgf', format='pgf') + """ + +fig = plt.figure(figsize=(6.25, 2)) +ax = fig.add_subplot(111) +v1_abs = np.array(list(map(lambda x: norm(x, 2), v1))).reshape(len(v1[:,0]),1) +v2_abs = np.array(list(map(lambda x: norm(x, 2), v2))).reshape(len(v2[:,0]),1) +timeline = np.linspace(0, (len(v1[:,0])-1)*1.0/80.0, len(v1[:,0])).reshape(len(v1[:,0]),1) +v_des = np.ones((len(timeline), 1))* 1000 +ax.plot( timeline, v1_abs*1000*60, label='$v_1$') +ax.plot( timeline, v2_abs*1000*60, label='$v_2$') +ax.plot( timeline, v_des, linestyle='dashed', label='$v_f$') +ax.set_xlabel('s') +ax.set_ylabel('mm/min') +ax.legend() +#plt.ylim(bottom=0) +fig.tight_layout() +plt.show() + + +""" +# make a 3D visualization +fig = plt.figure(figsize=(10, 5)) +ax = fig.add_subplot(111, projection='3d') +ax.plot(p1[:,0]*1000, p1[:,1]*1000, np.zeros(len(p1[:,0]))) +#ax.scatter(pos1[:,0], pos1[:,1], np.zeros(len(pos1[:,0])), marker="x") +ax.plot(p1[:,0]*1000, p1[:,1]*1000, np.squeeze(v1_abs)* 1000) +ax.set_xlim(54, 66) +ax.set_ylim(0, 100) +ax.view_init(elev=25., azim=-115.) +plt.show() + +# do plot for tcp2 +fig = plt.figure() +plt.scatter(p2[:,0], p2[:,1]) +plt.scatter(pos2[:,0], pos2[:,1], marker="x") +plt.show() + + +fig = plt.figure() +plt.scatter(p1m[200:300, 0], p1m[200:300, 2], label="p1") +plt.scatter(p2m_ref[200:300, 0], p2m_ref[200:300,2], label="p2_ref") +plt.scatter(p2m[200:300, 0], p2m[200:300,2], label="p2") +plt.legend() +plt.show() + + + +# plot the data for visualisation +fig = plt.figure(figsize=(3, 3)) +ax = fig.add_subplot(111, projection='3d') +#ax.plot(p1m[:,0]*1000, p1m[:,2]*1000, p1m[:,1]*1000-53.7, color='#005293', label='$p_{1*}$') +ax.plot(p2m_ref[:,0]*1000,p2m_ref[:,2]*1000, p2m_ref[:,1]*1000-53.7, label='$p_2$') +ax.plot(p2m[:,0]*1000,p2m[:,2]*1000, p2m[:,1]*1000-53.7, label='$p_{2*}$') +#ax.set_xlim(-10,170) +#ax.set_ylim(-5, 5)#axes were switched to better rotate plot is z +ax.set_ylim(405, 395)#axes were switched to better rotate plot is z +ax.set_zlim(-50,15)#axes were switched to better rotate plot is y +#ax.set_zlim(-15,25) +ax.view_init(elev=13., azim=36.) +ax.set_xlabel('$x_2$') +ax.set_ylabel('$z_2$') +ax.set_zlabel('$y_2$') +ax.legend() +plt.show() + +fig.tight_layout() """ \ No newline at end of file diff --git a/python/preprocessing/gcode.txt b/python/preprocessing/gcode.txt index de7bbdd3a88cf6bd05f11a7638f748b88f523ad0..0f12dc382ea66f8ac1c809dee611eea0ba2090fe 100644 --- a/python/preprocessing/gcode.txt +++ b/python/preprocessing/gcode.txt @@ -1,3 +1,4 @@ +G1 X00.00 Y53.70 U00.00 V53.70 G1 X10.00 Y53.70 U10.00 V53.70 G1 X10.00 Y53.70 U10.00 V53.70 G1 X10.14 Y54.86 U10.48 V54.56 @@ -73,3 +74,4 @@ G1 X11.50 Y51.00 U10.61 V51.07 G1 X10.68 Y51.79 U10.07 V51.97 G1 X10.18 Y52.68 U9.87 V52.84 G1 X10.00 Y53.70 U10.00 V53.70 +G1 X00.00 Y53.70 U00.00 V53.70 diff --git a/python/preprocessing/genPaths.py b/python/preprocessing/genPaths.py index 910dbf32c8ca4e949a9e7d5f2003c21641347fbb..40819a58727d0daa9dab1d730d493e5fa50bbf5b 100644 --- a/python/preprocessing/genPaths.py +++ b/python/preprocessing/genPaths.py @@ -3,6 +3,8 @@ from numpy.linalg import norm from math import sqrt, cos, sin import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D +from matplotlib.patches import Rectangle + # read g-code data from txt-file with open('preprocessing/gcode.txt') as f: @@ -31,7 +33,7 @@ pos1, pos2 = pos_split[0]*0.001, pos_split[1]*0.001 # transform to SI-units - [m # TODO: compute trajectory based on path of g-code # set a cutting speed, sample rate and compute necessary # position at every time step -c_speed = 3000 * 0.001 * 1/60 # define cutting speed as 300 mm/min [m/s] +c_speed = 300 * 0.001 * 1/60 # define cutting speed as 300 mm/min [m/s] st = 1.0/80.0 # highest possible sample time - 250 Hz # modificatin to delete in order to check angles in RS @@ -55,7 +57,7 @@ for i in range(1, len(pos1[:,0])): dp1_len = norm(dp1, 2) dp2_len = norm(dp2, 2) # number of section in between original points - nr_sect = int(round(dp1_len/(c_speed*st))) + nr_sect = round(dp1_len/(c_speed*st)) #print(len(p1x), "+ ", nr_sect) # append everthing to lists @@ -81,33 +83,6 @@ v1.append(v1[-1]) v1 = np.array(v1) v1 = np.hstack((v1, np.zeros((len(v1[:,0]), 1)))) - -# do plots for tcp1 -fig = plt.figure() -plt.scatter(p1[:,0], p1[:,1]) -plt.scatter(pos1[:,0], pos1[:,1], marker="x") -plt.show() - -fig = plt.figure() -v1_abs = np.array(list(map(lambda x: norm(x, 2), v1))).reshape(len(v1[:,0]),1) -plt.scatter(np.linspace(0, len(v1[:,0])-1, len(v1[:,0])).reshape(len(v1[:,0]),1), v1_abs, marker='x') -plt.show() - -# make a 3D visualization -fig = plt.figure() -ax = fig.add_subplot(111, projection='3d') -ax.scatter(p1[:,0], p1[:,1], np.zeros(len(p1[:,0]))) -ax.scatter(pos1[:,0], pos1[:,1], np.zeros(len(pos1[:,0])), marker="x") -ax.plot(p1[:,0], p1[:,1], np.squeeze(v1_abs)) -plt.show() - -# do plot for tcp2 -fig = plt.figure() -plt.scatter(p2[:,0], p2[:,1]) -plt.scatter(pos2[:,0], pos2[:,1], marker="x") -plt.show() - - # number of points in paths pNum = len(p1[:,0]) # length of wire, defined in 4-axis setup @@ -122,6 +97,7 @@ p2m_ref = np.hstack((p2,z2)) # original coordinates effLen = np.zeros((pNum,1)) # using the zyx euler convention to store orientation - same in robot studio ang = np.zeros((pNum,3)) +R_01 = [] for i in range(pNum): x1, x2 = p1[i,0], p2[i,0] @@ -156,7 +132,7 @@ for i in range(pNum): # rotation from first frame to initial frame R10 = np.transpose(R01) - # build transformation matrix (rotation and translation) - r02 = T01 x r12 + # build transformation matrix (rotation and translation) - r02 = T01 x r12 # vector 1 in inital frame r0_01 = np.array([[x1], [y1], [0]]) # vector in wires direction in first frame @@ -166,6 +142,9 @@ for i in range(pNum): # store values, cut last entry being 1 that was added for the transformation p2m[i,:] = np.transpose(r0_02) + + # saving rotation matrices for easier transformation in postprocessing for plotting + R_01.append(R10) # compute the respective vecolity of p2 - after doing the interpolation it takes the sample time st to the next point # slice the first and the last sample @@ -201,24 +180,23 @@ for i, (r12,r22_) in enumerate(zip(dist_12, dist_22_)): assert (effLen[i, 0] - wLen - r22_) < 0.00001, "path coordinate of p2 violates against to be compensated length difference" -fig = plt.figure() -plt.scatter(p1m[200:300, 0], p1m[200:300, 2], label="p1") -plt.scatter(p2m_ref[200:300, 0], p2m_ref[200:300,2], label="p2_ref") -plt.scatter(p2m[200:300, 0], p2m[200:300,2], label="p2") -plt.legend() -plt.show() - -# plot the data for visualisation -fig = plt.figure() -ax = fig.add_subplot(111, projection='3d') -ax.scatter(p1m[:,0],p1m[:,1],p1m[:,2]) -ax.scatter(p2m[:,0],p2m[:,1],p2m[:,2]) -ax.scatter(p2m_ref[:,0],p2m_ref[:,1],p2m_ref[:,2]) -ax.set_xlim(0,0.170) -ax.set_ylim(0,0.100) -ax.set_zlim(0.395,0.405) -plt.show() - # make data ready for export traj_data = np.hstack((p1m, v1, p2m, v2, ang, odot)) -np.save('traj_data', traj_data) \ No newline at end of file +# save data for application +#np.save('./data/traj_data', traj_data) + + +# save data to decouple preprocessing and plotting +# p1, pos1, p2, pos2, p2m_ref, p1m , p2m, v1 +plot_path = '/home/joschua/Coding/forceControl/master-project/python/plots/preprocessing/' +np.save(plot_path +'p1', p1) +np.save(plot_path+'p2', p2) +np.save(plot_path+'pos1', pos1) +np.save(plot_path+'pos2', pos2) +np.save(plot_path+'p1m', p1m) +np.save(plot_path+'p2m', p2m) +np.save(plot_path+'p2m_ref', p2m_ref) +np.save(plot_path+'v1', v1) +np.save(plot_path+'v2', v2) + +np.save('/home/joschua/Coding/forceControl/master-project/python/plots/postprocessing/R_01', R_01) \ No newline at end of file diff --git a/python/serial_interface/readSerial.py b/python/serial_interface/readSerial.py index 466fb88194191b80d561f06aa561a2a249810a34..b8e816fd93524fe340effab26925b0eeaacd6d34 100644 --- a/python/serial_interface/readSerial.py +++ b/python/serial_interface/readSerial.py @@ -1,5 +1,6 @@ import serial.tools.list_ports from time import sleep, time +import numpy as np ''' Instructions how to use this: @@ -9,10 +10,9 @@ Instructions how to use this: 4. readings that are sent from arduino are read and ready for processing ''' - # find active ports ports = serial.tools.list_ports.comports() -serialInst = serial.Serial() +arduino = serial.Serial() myPort = '/dev/ttyACM0' myBaudRate = 38400 @@ -25,32 +25,30 @@ for onePort in ports: # setup configuration for serial interface -serialInst.baudrate = myBaudRate -serialInst.port = myPort -serialInst.open() +arduino.baudrate = myBaudRate +arduino.port = myPort +arduino.open() sleep(1) -serialInst.flushInput() +arduino.flushInput() # wait until first serial data from arduino is available -while not serialInst.in_waiting: +while not arduino.in_waiting: pass -#print("Mount the calibration weight to the load cell and then hit ENTER") -print("message from arduino: " + serialInst.readline().decode('utf-8').rstrip('\n')) -print("message from arduino: " + serialInst.readline().decode('utf-8').rstrip('\n')) -input() -serialInst.write(bytes('0', 'utf-8')) - -#sleep(1) -time_prev = time() +force_log = [] +t0 = time() while True: - if serialInst.in_waiting: # get the number of bytes in the input buffer - packet = serialInst.readline() # type: bytes - time_diff = time() - time_prev + if arduino.in_waiting: # get the number of bytes in the input buffer + packet = arduino.readline() # type: bytes str_receive = packet.decode('utf-8').rstrip('\n') - print(str_receive) - #print("serial freq: " + str(1/time_diff)) - time_prev = time() - + force = float(str_receive)/1000.0 + print(force) + force_log.append(force) + if time()-t0 > 180.0: + break + + +force_save = np.array(force_log) +np.save('./data/forceSignal-500', force_save) \ No newline at end of file diff --git a/python/setup.cfg b/python/setup.cfg new file mode 100644 index 0000000000000000000000000000000000000000..08b28ab900c0f9fd44a12715755370cbbffde0c1 --- /dev/null +++ b/python/setup.cfg @@ -0,0 +1,19 @@ +[metadata] +name = data +version = 0.1.0 + +[options] +packages = data +python_requires = >= 3.7 + + +[options.extras_require] +dev = + black + flake8 + isort >= 5.0.0 + mypy + +[flake8] +max-line-length = 88 +extend-ignore = E203 diff --git a/python/setup.py b/python/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..b908cbe55cb344569d32de1dfc10ca7323828dc5 --- /dev/null +++ b/python/setup.py @@ -0,0 +1,3 @@ +import setuptools + +setuptools.setup() diff --git a/python/taskspace_placement/computeJoints.py b/python/taskspace_placement/computeJoints.py index f4ecc913480a82da468fd3157f698ad5ace82795..c7bce378ba3d197f3de6c462618dbc22c3823fee 100644 --- a/python/taskspace_placement/computeJoints.py +++ b/python/taskspace_placement/computeJoints.py @@ -1,46 +1,22 @@ -from enum import Enum -from matplotlib import pyplot as plt, transforms + + +from matplotlib import pyplot as plt import numpy as np -from scipy.spatial.transform import Rotation as R import copy -import invKin +import libs.invKin as invKin + +from data.get_data import get_trajectory, transform2yumi_workspace, place_trajectory, Yumi -class Yumi(Enum): - RIGHT = False - LEFT = True # READ IN THE TRAJECTORY -# define staring postition in workspace for left arm - found by try and error in RS -desp_start = np.array([0.3, 0.2, 0.2]) - -# import the preprocessing data -data = np.load('./taskspace_placement/traj_data.npy') -# for each var x | y | z -p1 = data[:, 0:3] -v1 = data[:, 3:6] -p2 = data[:, 6:9] -v2 = data[:, 9:12] -phi_delta = data[:, 12:15] -dphi = data[:, 15:18] - -# coordinates system differ and need to be synchronized - y -> z, x -> x, z -> -y -for m in [p1, v1, p2, v2, phi_delta, dphi]: - copy_col = copy.copy(m[:, 2]) # copy z - m[:, 2] = m[:, 1] # shift y to z - m[:, 1] = -copy_col # copy z to y - -# place the trajectories within the workspace of the robot -# read the coordinates of p1 (that refers to the left arm) and modify it to match to desired -# starting postion -p_start = p1[0, :] -offset = desp_start - p_start - -# apply offset to all position coordinates -for i in range(len(p1[:,0])): - p1[i,:] = p1[i,:] + offset - p2[i,:] = p2[i,:] + offset +# get the data in the yumi workspace +p1, v1, p2, v2, phi_delta, dphi = get_trajectory() +p1, v1, p2, v2, phi_delta, dphi = transform2yumi_workspace(p1, v1, p2, v2, phi_delta, dphi) +# define staring postition in workspace for left arm - found by try and error in RS +p1_start_des = np.array([0.3, 0.2, 0.2]) +p1, p2 = place_trajectory(p1_start_des, p1, p2) # START CONFIGURATION FOR THE LEFT ARM # set the joint angles that map to the desired start position - read from RobotStudio @@ -52,55 +28,64 @@ jointVelocities = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) dt = 0.0125 # create arrays to store values of loop -desJointAngles_left = np.zeros((len(p1[:,0]),7)) +compJointAngles_left = np.zeros((len(p1[:,0]),7)) computedPose_left = np.zeros((len(p1[:,0]),6)) error_left = np.zeros((len(p1[:,0]),6)) +yumi_right = invKin.Yumi("/home/joschua/Coding/forceControl/master-project/c++/models/urdf/yumi_right.urdf") +yumi_left = invKin.Yumi("/home/joschua/Coding/forceControl/master-project/c++/models/urdf/yumi_left.urdf") + # loop for the left arm for index, (pos, vel, phi, phi_dot) in enumerate(zip(p1, v1, phi_delta, dphi)): # loop through all the desired position of left arm desPose = np.concatenate((pos, phi), axis=0) desVelocities = np.concatenate((vel, phi_dot), axis=0) # call the c++ egm function, return joint values and resulting pose - result = invKin.gpm(desPose, desVelocities, jointAngles, jointVelocities, Yumi.LEFT.value) - desJointAngles_left[index,:] = result[0] # computed joint values from IK - computedPose_left[index, :] = result[1] # resulting pose with joint values from IK + yumi_left.set_jointValues(jointAngles, jointVelocities) + yumi_left.set_desPoseVel(desPose, desVelocities) + yumi_left.process() + + compJointAngles_left[index,:] = yumi_left.get_newJointValues() # computed joint values from IK + computedPose_left[index, :] = yumi_left.get_newPose() # resulting pose with joint values from IK if index > 0: - jointVelocities = (desJointAngles_left[index, :] - desJointAngles_left[index-1, :])/dt # only true in the ideal case where result of ik matches the desired pose - #print('IK joints:', result[0]) - #print('IK resulting pose', result[1]) - print('\n error', desPose - result[1]) - error_left[index, :] = desPose - result[1] - jointAngles = result[0] + jointVelocities = (compJointAngles_left[index, :] - compJointAngles_left[index-1, :])/dt # only true in the ideal case where result of ik matches the desired pose + error_left[index, :] = desPose - computedPose_left[index, :] + jointAngles = compJointAngles_left[index, :] + -desJointAngles_right = np.zeros((len(p1[:,0]),7)) + +compJointAngles_right = np.zeros((len(p1[:,0]),7)) computedPose_right = np.zeros((len(p1[:,0]),6)) error_right = np.zeros((len(p1[:,0]),6)) jointAngles = np.array([-110.0, 29.85, 35.92, 49.91, 117.0, 123.0, -117.0]) * np.pi/180.0 +jointVelocities = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]) # loop for the right arm for index, (pos, vel, phi, phi_dot) in enumerate(zip(p2, v2, phi_delta, dphi)): # loop through all the desired position of left arm desPose = np.concatenate((pos, phi), axis=0) desVelocities = np.concatenate((vel, phi_dot), axis=0) - result = invKin.gpm(desPose, desVelocities, jointAngles, jointVelocities, Yumi.RIGHT.value) - desJointAngles_right[index,:] = result[0] - computedPose_right[index, :] = result[1] + yumi_right.set_jointValues(jointAngles, jointVelocities) + yumi_right.set_desPoseVel(desPose, desVelocities) + yumi_right.process() + + compJointAngles_right[index, :] = yumi_right.get_newJointValues() # computed joint values from IK + computedPose_right[index, :] = yumi_right.get_newPose() # resulting pose with joint values from IK + if index > 0: - jointVelocities = (desJointAngles_left[index, :] - desJointAngles_left[index-1, :])/dt - print('\n error', desPose - result[1]) - error_right[index, :] = desPose - result[1] - jointAngles = result[0] + jointVelocities = (compJointAngles_left[index, :] - compJointAngles_left[index-1, :])/dt + error_right[index, :] = desPose - computedPose_right[index, :] + jointAngles = compJointAngles_right[index, :] # see development of joint values fig = plt.figure() -plt.plot(desJointAngles_left[:,0], label='joint1') -plt.plot(desJointAngles_left[:,1], label='joint2') -plt.plot(desJointAngles_left[:,2], label='joint3') -plt.plot(desJointAngles_left[:,3], label='joint4') -plt.plot(desJointAngles_left[:,4], label='joint5') -plt.plot(desJointAngles_left[:,5], label='joint6') -plt.plot(desJointAngles_left[:,6], label='joint7') +plt.plot(compJointAngles_left[:,0], label='joint1') +plt.plot(compJointAngles_left[:,1], label='joint2') +plt.plot(compJointAngles_left[:,2], label='joint3') +plt.plot(compJointAngles_left[:,3], label='joint4') +plt.plot(compJointAngles_left[:,4], label='joint5') +plt.plot(compJointAngles_left[:,5], label='joint6') +plt.plot(compJointAngles_left[:,6], label='joint7') plt.title('joint values over samples, left arm') plt.legend() plt.show() @@ -124,25 +109,25 @@ plt.scatter(computedPose_left[:,0], computedPose_left[:,2], label='resulting pos fig.get_axes()[0].set_xlabel('x axis of yumi') fig.get_axes()[0].set_ylabel('z axis of yumi') plt.legend() -plt.title('view on the x-z plane from the right arm side of yumi') +plt.title('poses left') plt.show() -fig = plt.figure() +""" fig = plt.figure() plt.plot(computedPose_left[:,3], label='rx') plt.plot(computedPose_left[:,4], label='ry') plt.plot(computedPose_left[:,5], label='rz') plt.legend() plt.title('euler angles over trajectories') -plt.show() +plt.show() """ fig = plt.figure() -plt.plot(desJointAngles_right[:,0], label='joint1') -plt.plot(desJointAngles_right[:,1], label='joint2') -plt.plot(desJointAngles_right[:,2], label='joint3') -plt.plot(desJointAngles_right[:,3], label='joint4') -plt.plot(desJointAngles_right[:,4], label='joint5') -plt.plot(desJointAngles_right[:,5], label='joint6') -plt.plot(desJointAngles_right[:,6], label='joint7') +plt.plot(compJointAngles_right[:,0], label='joint1') +plt.plot(compJointAngles_right[:,1], label='joint2') +plt.plot(compJointAngles_right[:,2], label='joint3') +plt.plot(compJointAngles_right[:,3], label='joint4') +plt.plot(compJointAngles_right[:,4], label='joint5') +plt.plot(compJointAngles_right[:,5], label='joint6') +plt.plot(compJointAngles_right[:,6], label='joint7') plt.title('joint values over samples, right arm') plt.legend() plt.show() @@ -158,6 +143,16 @@ plt.legend() plt.title('errors right') plt.show() -np.save('desJointAngles_left', desJointAngles_left) -np.save('desJointAngles_right', desJointAngles_right) +# show trajectory in workspace of yumi +fig = plt.figure() +plt.plot(p2[:,0], p2[:,2], label='desired profile') # plot z over x +plt.scatter(computedPose_right[:,0], computedPose_right[:,2], label='resulting pose from IK') +fig.get_axes()[0].set_xlabel('x axis of yumi') +fig.get_axes()[0].set_ylabel('z axis of yumi') +plt.legend() +plt.title('poses right') +plt.show() + +np.save('data/compJointAngles_left', compJointAngles_left) +np.save('data/compJointAngles_right', compJointAngles_right)