diff --git a/README.md b/README.md index ea516de..52e23d7 100644 --- a/README.md +++ b/README.md @@ -1 +1,32 @@ # ERVII_Image_Processing + +## Introduction +The main idea of this file is to present the analysis, design, problems and implementation of a c++ program instance that could control the ERVII (ER7) arm in order to detect several different objects and manipulate them in such a way that could be staffed in a pyramid ordered by size. + +## Goal + +This project's main goal is stated as \emph{Given a set of randomly picked objects, you need to locate these objects through a given camera and pick and place these objects as a stack.} With this idea in mind, the logic of the processes needed to solve this were stated as +1. Obtain images from camera. +2. Work on camera calibration. +3. Work on the inverse kinematics (when solvable) +4. Design and implement a c++ program to communicate with the ER7 arm via ACL. + +## Camera software and calibration + +In order to use the camera's images and make an analysis of the objects captured by it, we used *openCV* libraries that already have several different image processing characteristics inside their methods. + +## Datastructure + +The *main()* clause in our c++ implementation is stated as follows: +```c++ +int main(){ + hCom = rs232_open("COM1"); + if(hCom == 0) + exit(-1); + //Initialize arm + initializeArm(); + //Make camera calibration and obtain objects in obtainObjectsAndNumbers() + er7Actions(obtainObjectsAndNumbers()); + return 0; +} +``` diff --git a/main/main.cpp b/main/main.cpp new file mode 100644 index 0000000..d02fb94 --- /dev/null +++ b/main/main.cpp @@ -0,0 +1,417 @@ +#include "RS232.h" +#include "ER7.h" +#include +#include "cv.h" +#include "highgui.h" +#include "math.h" +#include + +using namespace std; +using namespace cv; + +int speedArm = 50; +HANDLE hCom; +#define pi acos(-1.0) + +struct object{ + //properties + double area; + double phi; + double gyrumAngle; + double principalAngle; + Point2f centroid; +}; + +struct objectsAndNumbers{ + //properties + vector vectorObject; + int numberOfObjects; + //Constructor + objectsAndNumbers(vector vectorObjectInput,int numberOfObjectsInput): vectorObject(vectorObjectInput), numberOfObjects(numberOfObjectsInput){ } +}; + +//Header +void initializeArm(); +Point2f matrixTransformation(Point2f inputPoint); +objectsAndNumbers obtainObjectsAndNumbers(); +void sendInstructionToER7(char* s1); +void sendInstructionToER7(char* s1, char* s2); +void sendInstructionToER7(char* s1, int i1);//NOTE: s1 must have %d +void sendReceiveInstructionToER7(int i1, char* s1); +void sendReceiveInstructionToER7(char* s1, int i1, char* reply);//NOTE: s1 DOES NOT NEED %d +void sendInstructionToER7(char* s1, int i1, char* s2, int i2); //NOTE: s2 needs space before and after: eg: " z ", " r " +double phiCalculation(Point2f virtualPosition); +bool tryToReach(int x, int y, int z, int p, int r, int objectNumber); +void er7Actions(objectsAndNumbers objectsAndNumbersInput); +void gotoPointNew(int x, int y, int z, int p, int r, int objectNumber); +//Ends header + +//int main(int argc, char ** argv){ +int main(){ + //TODO Look for general form of main + hCom = rs232_open("COM1"); + if(hCom == 0) + exit(-1); + //Initialize arm + initializeArm(); + // + er7Actions(obtainObjectsAndNumbers()); + return 0; +} + + + +void initializeArm(){ + //Open hand + sendInstructionToER7("OPEN"); + //Sends to H0 + Sleep(1000); + sendInstructionToER7("MOVE H0"); + Sleep(1000); + //Sets speed + sendInstructionToER7("SPEED %d", speedArm); + //Move to position out of camera + //Deletes point + sendInstructionToER7("DELP C0"); + sendInstructionToER7("YES"); + //Recreates point + sendInstructionToER7("DEFP C0"); + sendInstructionToER7("TEACH C0"); + //TODO: Set values of coordinates for point out of camera + sendInstructionToER7("0");//Value X //10,000 + sendInstructionToER7("0");//Value Y //1000 + sendInstructionToER7("0");//Value Z //1000 + sendInstructionToER7("0");//Value P //1000 + sendInstructionToER7("0");//Value R //7000 + //Moves to point + sendInstructionToER7("MOVE C0"); + //Teaches Up + //Deletes point + sendInstructionToER7("DELP UP"); + sendInstructionToER7("YES"); + //Defines point + sendInstructionToER7("DEFP UP"); + //Teaches point + sendInstructionToER7("TEACHR UP"); + sendInstructionToER7("0");//x + sendInstructionToER7("0");//y + sendInstructionToER7("2000");//z //TODO: Check correct z value + sendInstructionToER7("0");//p + sendInstructionToER7("0");//r + + //Teaches Up + //Deletes point + sendInstructionToER7("DELP DOWN"); + sendInstructionToER7("YES"); + //Defines point + sendInstructionToER7("DEFP DOWN"); + //Teaches point + sendInstructionToER7("TEACHR DOWN"); + sendInstructionToER7("0");//x + sendInstructionToER7("0");//y + sendInstructionToER7("-2000");//z //TODO: Check correct z value + sendInstructionToER7("0");//p + sendInstructionToER7("0");//r +} + +double phiCalculation(Point2f virtualPosition){ + //Calculation of hypotenuse + double hypotenuse=sqrt(virtualPosition.x*virtualPosition.x+virtualPosition.y*virtualPosition.y); + return asin(353/hypotenuse)+asin(virtualPosition.y/hypotenuse); +} + +objectsAndNumbers obtainObjectsAndNumbers(){ + //Open camera + VideoCapture videoCapture(0); + if(!videoCapture.isOpened()){ + exit (-1);//TODO: Find return + } + Mat realWorldImage; + namedWindow("Real world",1); + cout << "Press a key in order to start analysis" << endl; + //Waits for the user to press a key and start analysis + while(1){ + Mat frameRealWorldImage; + videoCapture >> frameRealWorldImage; + cvtColor(frameRealWorldImage, realWorldImage, CV_BGR2GRAY); + imshow("Real world image", realWorldImage); + if(waitKey(30)>=0) break; + } + videoCapture.release(); + //Modification of image in copy + Mat resultRealWorldImage; + resultRealWorldImage = realWorldImage.clone(); + //Apply blur, binarization and dilatation + blur(resultRealWorldImage, resultRealWorldImage, Size(6,6)); + threshold(resultRealWorldImage, resultRealWorldImage, 80, 255, CV_THRESH_BINARY); + erode(resultRealWorldImage,resultRealWorldImage, Mat(6,6,0)); + dilate(resultRealWorldImage,resultRealWorldImage, Mat(6,6,0)); + //Finding contours process begins + //Objects for managing obtained information of contours + vector > foundContours; + vector outputArrayHierarchy; + //Finding contour methods + Canny(resultRealWorldImage, resultRealWorldImage, 100, 255, 3); //Applies Canny edge detector and produces the edge map. + findContours(resultRealWorldImage, foundContours, outputArrayHierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0,0));//INPUTS: InputOutputArray image, OutputArrayOfArrays contours,OutputArray hierarchy, int mode, int method, Point offset=Point() + //Finding contours process ends + //Finding of moments + //Related structure creation + //Creation of foundMoments vector + vector foundMoments(foundContours.size()); + //Creation of structure to get centroid information using moments + vector momentCentroid(foundContours.size()); + //Creation of structure for principal angle + vector principalAngleVector(foundContours.size()); + //Creation of offset value + double offset = 2.5/180.0*pi; + //Creation of objects to be returned + vector objectVectorToReturn(foundContours.size()/2); + //Start cycles: + //Cycle 1: Calculation of the moments + for(int i0=0; i00){ + maximumObjectArea=0; + for(int i0=0; i0maximumObjectArea) { + biggestObject=i0; + maximumObjectArea = objectsAndNumbersInput.vectorObject[i0].area; + } + } + //Override biggest area object area parameter + objectsAndNumbersInput.vectorObject[biggestObject].area = 0.0; + if(heightOfObject == 0){//GET INSIDE FOR BIGGEST OBJECT + rollOfObject = -201+((-1)*objectsAndNumbersInput.vectorObject[biggestObject].phi-objectsAndNumbersInput.vectorObject[biggestObject].gyrumAngle)/pi*(180*10); + //Try to see if the object is in the working area, if it is, teaches it to grab it, if not, throws an error + if(!tryToReach(objectsAndNumbersInput.vectorObject[biggestObject].centroid.x, objectsAndNumbersInput.vectorObject[biggestObject].centroid.y, z, -900, rollOfObject,numerOfObjects)){ + cout << "[OBJECT IS OUT OF WORKING AREA]" << endl; + }else{ + //Change coordinates for object + heightOfObject++; + baseObjectIndex=biggestObject; + cout << "[Object inside working area; base selected:" << biggestObject << "]"<