Visual Servoing Platform version 3.7.0
Loading...
Searching...
No Matches
servoPioneerPoint2DDepthWithoutVpServo.cpp
1/*
2 * ViSP, open source Visual Servoing Platform software.
3 * Copyright (C) 2005 - 2025 by Inria. All rights reserved.
4 *
5 * This software is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 * See the file LICENSE.txt at the root directory of this source
10 * distribution for additional information about the GNU GPL.
11 *
12 * For using ViSP with software that can not be combined with the GNU
13 * GPL, please contact Inria about acquiring a ViSP Professional
14 * Edition License.
15 *
16 * See https://visp.inria.fr for more information.
17 *
18 * This software was developed at:
19 * Inria Rennes - Bretagne Atlantique
20 * Campus Universitaire de Beaulieu
21 * 35042 Rennes Cedex
22 * France
23 *
24 * If you have questions regarding the use of this file, please contact
25 * Inria at visp@inria.fr
26 *
27 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29 *
30 * Description:
31 * IBVS on Pioneer P3DX mobile platform
32 */
33#include <iostream>
34
35#include <visp3/core/vpConfig.h>
36
38// Comment / uncomment following lines to use the specific 3rd party compatible with your camera
39// #undef VISP_HAVE_V4L2
40// #undef VISP_HAVE_DC1394
41// #undef VISP_HAVE_CMU1394
42// #undef HAVE_OPENCV_HIGHGUI
43// #undef HAVE_OPENCV_VIDEOIO
45
46#include <visp3/blob/vpDot2.h>
47#include <visp3/core/vpCameraParameters.h>
48#include <visp3/core/vpHomogeneousMatrix.h>
49#include <visp3/core/vpImage.h>
50#include <visp3/core/vpImageConvert.h>
51#include <visp3/core/vpVelocityTwistMatrix.h>
52#include <visp3/robot/vpRobotPioneer.h> // Include first to avoid build issues with Status, None, isfinite
53#include <visp3/gui/vpDisplayFactory.h>
54#include <visp3/sensor/vp1394CMUGrabber.h>
55#include <visp3/sensor/vp1394TwoGrabber.h>
56#include <visp3/sensor/vpV4l2Grabber.h>
57#include <visp3/visual_features/vpFeatureBuilder.h>
58#include <visp3/visual_features/vpFeatureDepth.h>
59#include <visp3/visual_features/vpFeaturePoint.h>
60
61#if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI)
62#include <opencv2/highgui/highgui.hpp> // for cv::VideoCapture
63#elif defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_VIDEOIO)
64#include <opencv2/videoio/videoio.hpp> // for cv::VideoCapture
65#endif
66
67#if defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_CMU1394) || defined(VISP_HAVE_OPENCV) && \
68 (((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI)) || \
69 ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_VIDEOIO)))
70#if defined(VISP_HAVE_DISPLAY)
71#if defined(VISP_HAVE_PIONEER)
72#define TEST_COULD_BE_ACHIEVED
73#endif
74#endif
75#endif
76
99#ifdef TEST_COULD_BE_ACHIEVED
100int main(int argc, char **argv)
101{
102#ifdef ENABLE_VISP_NAMESPACE
103 using namespace VISP_NAMESPACE_NAME;
104#endif
105#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
106 std::shared_ptr<vpDisplay> display;
107#else
108 vpDisplay *display = nullptr;
109#endif
110
111 try {
112 vpImage<unsigned char> I; // Create a gray level image container
113 double depth = 1.;
114 double lambda = 0.6;
115 double coef = 1. / 6.77; // Scale parameter used to estimate the depth Z
116 // of the blob from its surface
117
118 vpRobotPioneer robot;
119 ArArgumentParser parser(&argc, argv);
120 parser.loadDefaultArguments();
121
122 // ArRobotConnector connects to the robot, get some initial data from it
123 // such as type and name, and then loads parameter files for this robot.
124 ArRobotConnector robotConnector(&parser, &robot);
125 if (!robotConnector.connectRobot()) {
126 ArLog::log(ArLog::Terse, "Could not connect to the robot.");
127 if (parser.checkHelpAndWarnUnparsed()) {
128 Aria::logOptions();
129 Aria::exit(1);
130 }
131 }
132 if (!Aria::parseArgs()) {
133 Aria::logOptions();
134 Aria::shutdown();
135 return false;
136 }
137
138 // Wait 3 sec to be sure that the low level Aria thread used to control
139 // the robot is started. Without this delay we experienced a delay
140 // (around 2.2 sec) between the velocity send to the robot and the
141 // velocity that is really applied to the wheels.
142 vpTime::sleepMs(3000);
143
144 std::cout << "Robot connected" << std::endl;
145
146 // Camera parameters. In this experiment we don't need a precise
147 // calibration of the camera
149
150 // Create the camera framegrabber
151#if defined(VISP_HAVE_OPENCV) && \
152 (((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI)) || \
153 ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_VIDEOIO)))
154 int device = 1;
155 std::cout << "Use device: " << device << std::endl;
156 cv::VideoCapture g(device); // open the default camera
157 g.set(CV_CAP_PROP_FRAME_WIDTH, 640);
158 g.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
159 if (!g.isOpened()) // check if we succeeded
160 return EXIT_FAILURE;
161 cv::Mat frame;
162 g >> frame; // get a new frame from camera
163 vpImageConvert::convert(frame, I);
164
165 // Logitec sphere parameters
166 cam.initPersProjWithoutDistortion(558, 555, 312, 210);
167#elif defined(VISP_HAVE_V4L2)
168 // Create a grabber based on v4l2 third party lib (for usb cameras under
169 // Linux)
171 g.setScale(1);
172 g.setInput(0);
173 g.setDevice("/dev/video1");
174 g.open(I);
175 // Logitec sphere parameters
176 cam.initPersProjWithoutDistortion(558, 555, 312, 210);
177#elif defined(VISP_HAVE_DC1394)
178 // Create a grabber based on libdc1394-2.x third party lib (for firewire
179 // cameras under Linux)
180 vp1394TwoGrabber g(false);
183 // AVT Pike 032C parameters
184 cam.initPersProjWithoutDistortion(800, 795, 320, 216);
185#elif defined(VISP_HAVE_CMU1394)
186 // Create a grabber based on CMU 1394 third party lib (for firewire
187 // cameras under windows)
189 g.setVideoMode(0, 5); // 640x480 MONO8
190 g.setFramerate(4); // 30 Hz
191 g.open(I);
192 // AVT Pike 032C parameters
193 cam.initPersProjWithoutDistortion(800, 795, 320, 216);
194#endif
195
196 // Acquire an image from the grabber
197#if defined(VISP_HAVE_OPENCV) && \
198 (((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI)) || \
199 ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_VIDEOIO)))
200 g >> frame; // get a new frame from camera
201 vpImageConvert::convert(frame, I);
202#else
203 g.acquire(I);
204#endif
205
206 // Create an image viewer
207#if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
208 display = vpDisplayFactory::createDisplay(I, 10, 10, "Current frame");
209#else
210 display = vpDisplayFactory::allocateDisplay(I, 10, 10, "Current frame");
211#endif
214
215 // Create a blob tracker
216 vpDot2 dot;
217 dot.setGraphics(true);
218 dot.setComputeMoments(true);
219 dot.setEllipsoidShapePrecision(0.); // to track a blob without any constraint on the shape
220 dot.setGrayLevelPrecision(0.9); // to set the blob gray level bounds for binarisation
221 dot.setEllipsoidBadPointsPercentage(0.5); // to be accept 50% of bad inner
222 // and outside points with bad
223 // gray level
224 dot.initTracking(I);
226
227 // Current and desired visual feature associated to the x coordinate of
228 // the point
229 vpFeaturePoint s_x, s_xd;
230
231 // Create the current x visual feature
232 vpFeatureBuilder::create(s_x, cam, dot);
233
234 // Create the desired x* visual feature
235 s_xd.buildFrom(0, 0, depth);
237
238 // Create the current log(Z/Z*) visual feature
239 vpFeatureDepth s_Z;
240 // Surface of the blob estimated from the image moment m00 and converted
241 // in meters
242 double surface = 1. / sqrt(dot.m00 / (cam.get_px() * cam.get_py()));
243 double Z, Zd;
244 // Initial depth of the blob in from of the camera
245 Z = coef * surface;
246 // Desired depth Z* of the blob. This depth is learned and equal to the
247 // initial depth
248 Zd = Z;
249 s_Z.buildFrom(s_x.get_x(), s_x.get_y(), Z,
250 0); // log(Z/Z*) = 0 that's why the last parameter is 0
251 vpMatrix L_Z = s_Z.interaction();
252
253 vpVelocityTwistMatrix cVe = robot.get_cVe();
254 vpMatrix eJe; // pioneer jacobian
255 robot.get_eJe(eJe);
256
257 vpMatrix L; // Interaction matrix
258 L.stack(L_x); // constant since build with the desired feature
259 L.stack(L_Z); // not constant since it corresponds to log(Z/Z*) that
260 // evolves at each iteration
261
262 vpColVector v; // vz, wx
263
264 vpFeatureDepth s_Zd;
265 s_Zd.buildFrom(0, 0, 1, 0); // The value of s* is 0 with Z=1 meter.
266
267 while (1) {
268 // Acquire a new image
269#if defined(VISP_HAVE_OPENCV) && \
270 (((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI)) || \
271 ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_VIDEOIO)))
272 g >> frame; // get a new frame from camera
273 vpImageConvert::convert(frame, I);
274#else
275 g.acquire(I);
276#endif
277 // Set the image as background of the viewer
279
280 // Does the blob tracking
281 dot.track(I);
282 // Update the current x feature
283 vpFeatureBuilder::create(s_x, cam, dot);
284
285 // Update log(Z/Z*) feature. Since the depth Z change, we need to update
286 // the intection matrix
287 surface = 1. / sqrt(dot.m00 / (cam.get_px() * cam.get_py()));
288 Z = coef * surface;
289 s_Z.buildFrom(s_x.get_x(), s_x.get_y(), Z, log(Z / Zd));
290 L_Z = s_Z.interaction();
291
292 // Update the global interaction matrix
293 vpMatrix L;
294 L.stack(L_x); // constant since build with the desired feature
295 L.stack(L_Z); // not constant since it corresponds to log(Z/Z*) that
296 // evolves at each iteration
297
298 // Update the global error s-s*
300 error.stack(s_x.error(s_xd, vpFeaturePoint::selectX()));
301 error.stack(s_Z.error(s_Zd));
302
303 // Compute the control law. Velocities are computed in the mobile robot
304 // reference frame
305 v = -lambda * (L * cVe * eJe).pseudoInverse() * error;
306
307 std::cout << "Send velocity to the pionner: " << v[0] << " m/s " << vpMath::deg(v[1]) << " deg/s" << std::endl;
308
309 // Send the velocity to the robot
310 robot.setVelocity(vpRobot::REFERENCE_FRAME, v);
311
312 // Draw a vertical line which corresponds to the desired x coordinate of
313 // the dot cog
314 vpDisplay::displayLine(I, 0, 320, 479, 320, vpColor::red);
316
317 // A click in the viewer to exit
318 if (vpDisplay::getClick(I, false))
319 break;
320 }
321
322 std::cout << "Ending robot thread..." << std::endl;
323 robot.stopRunning();
324
325 // wait for the thread to stop
326 robot.waitForRunExit();
327#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11)
328 if (display != nullptr) {
329 delete display;
330 }
331#endif
332 return EXIT_SUCCESS;
333 }
334 catch (const vpException &e) {
335 std::cout << "Catch an exception: " << e << std::endl;
336#if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11)
337 if (display != nullptr) {
338 delete display;
339 }
340#endif
341 return EXIT_FAILURE;
342 }
343}
344#else
345int main()
346{
347 std::cout << "You don't have the right 3rd party libraries to run this example..." << std::endl;
348 return EXIT_SUCCESS;
349}
350#endif
Firewire cameras video capture based on CMU 1394 Digital Camera SDK.
void setVideoMode(unsigned long format, unsigned long mode)
void acquire(vpImage< unsigned char > &I)
void setFramerate(unsigned long fps)
void open(vpImage< unsigned char > &I)
Class for firewire ieee1394 video devices using libdc1394-2.x api.
Generic class defining intrinsic camera parameters.
Implementation of column vector and the associated operations.
static const vpColor red
Definition vpColor.h:198
Class that defines generic functionalities for display.
Definition vpDisplay.h:171
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void flush(const vpImage< unsigned char > &I)
This tracker is meant to track a blob (connex pixels with same gray level) on a vpImage.
Definition vpDot2.h:127
void track(const vpImage< unsigned char > &I, bool canMakeTheWindowGrow=true)
Definition vpDot2.cpp:441
void setGraphics(bool activate)
Definition vpDot2.h:320
double m00
Definition vpDot2.h:394
void setGrayLevelPrecision(const double &grayLevelPrecision)
Definition vpDot2.cpp:715
void setEllipsoidBadPointsPercentage(const double &percentage=0.0)
Definition vpDot2.h:292
void setEllipsoidShapePrecision(const double &ellipsoidShapePrecision)
Definition vpDot2.cpp:790
void setComputeMoments(bool activate)
Definition vpDot2.h:278
void initTracking(const vpImage< unsigned char > &I, unsigned int size=0)
Definition vpDot2.cpp:263
error that can be emitted by ViSP classes.
Definition vpException.h:60
static void create(vpFeaturePoint &s, const vpCameraParameters &cam, const vpDot &d)
Class that defines a 3D point visual feature which is composed by one parameters that is that defin...
vpMatrix interaction(unsigned int select=FEATURE_ALL) VP_OVERRIDE
vpFeatureDepth & buildFrom(const double &x, const double &y, const double &Z, const double &LogZoverZstar)
vpColVector error(const vpBasicFeature &s_star, unsigned int select=FEATURE_ALL) VP_OVERRIDE
Class that defines a 2D point visual feature which is composed by two parameters that are the cartes...
vpFeaturePoint & buildFrom(const double &x, const double &y, const double &Z)
static unsigned int selectX()
vpColVector error(const vpBasicFeature &s_star, unsigned int select=FEATURE_ALL) VP_OVERRIDE
vpMatrix interaction(unsigned int select=FEATURE_ALL) VP_OVERRIDE
double get_y() const
double get_x() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Definition of the vpImage class member functions.
Definition vpImage.h:131
static double deg(double rad)
Definition vpMath.h:119
Implementation of a matrix and operations on matrices.
Definition vpMatrix.h:175
void stack(const vpMatrix &A)
Interface for Pioneer mobile robots based on Aria 3rd party library.
@ REFERENCE_FRAME
Definition vpRobot.h:75
Class that is a wrapper over the Video4Linux2 (V4L2) driver.
void setFramerate(vpV4l2FramerateType framerate)
void setInput(unsigned input=vpV4l2Grabber::DEFAULT_INPUT)
void open(vpImage< unsigned char > &I)
void setScale(unsigned scale=vpV4l2Grabber::DEFAULT_SCALE)
void setDevice(const std::string &devname)
std::shared_ptr< vpDisplay > createDisplay()
Return a smart pointer vpDisplay specialization if a GUI library is available or nullptr otherwise.
vpDisplay * allocateDisplay()
Return a newly allocated vpDisplay specialization if a GUI library is available or nullptr otherwise.
VISP_EXPORT void sleepMs(double t)