/usr/share/ompl/demos/OptimalPlanning.cpp is in ompl-demos 1.0.0+ds2-1build1.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 | /*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Rice University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the Rice University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
/* Author: Luis G. Torres */
#include <ompl/base/SpaceInformation.h>
#include <ompl/base/objectives/PathLengthOptimizationObjective.h>
#include <ompl/base/objectives/StateCostIntegralObjective.h>
#include <ompl/base/objectives/MaximizeMinClearanceObjective.h>
#include <ompl/base/spaces/RealVectorStateSpace.h>
#include <ompl/geometric/planners/rrt/RRTstar.h>
#include <fstream>
namespace ob = ompl::base;
namespace og = ompl::geometric;
/// @cond IGNORE
// Our "collision checker". For this demo, our robot's state space
// lies in [0,1]x[0,1], with a circular obstacle of radius 0.25
// centered at (0.5,0.5). Any states lying in this circular region are
// considered "in collision".
class ValidityChecker : public ob::StateValidityChecker
{
public:
ValidityChecker(const ob::SpaceInformationPtr& si) :
ob::StateValidityChecker(si) {}
// Returns whether the given state's position overlaps the
// circular obstacle
bool isValid(const ob::State* state) const
{
return this->clearance(state) > 0.0;
}
// Returns the distance from the given state's position to the
// boundary of the circular obstacle.
double clearance(const ob::State* state) const
{
// We know we're working with a RealVectorStateSpace in this
// example, so we downcast state into the specific type.
const ob::RealVectorStateSpace::StateType* state2D =
state->as<ob::RealVectorStateSpace::StateType>();
// Extract the robot's (x,y) position from its state
double x = state2D->values[0];
double y = state2D->values[1];
// Distance formula between two points, offset by the circle's
// radius
return sqrt((x-0.5)*(x-0.5) + (y-0.5)*(y-0.5)) - 0.25;
}
};
ob::OptimizationObjectivePtr getPathLengthObjective(const ob::SpaceInformationPtr& si);
ob::OptimizationObjectivePtr getThresholdPathLengthObj(const ob::SpaceInformationPtr& si);
ob::OptimizationObjectivePtr getClearanceObjective(const ob::SpaceInformationPtr& si);
ob::OptimizationObjectivePtr getBalancedObjective1(const ob::SpaceInformationPtr& si);
ob::OptimizationObjectivePtr getBalancedObjective2(const ob::SpaceInformationPtr& si);
ob::OptimizationObjectivePtr getPathLengthObjWithCostToGo(const ob::SpaceInformationPtr& si);
void plan(int argc, char** argv)
{
// Construct the robot state space in which we're planning. We're
// planning in [0,1]x[0,1], a subset of R^2.
ob::StateSpacePtr space(new ob::RealVectorStateSpace(2));
// Set the bounds of space to be in [0,1].
space->as<ob::RealVectorStateSpace>()->setBounds(0.0, 1.0);
// Construct a space information instance for this state space
ob::SpaceInformationPtr si(new ob::SpaceInformation(space));
// Set the object used to check which states in the space are valid
si->setStateValidityChecker(ob::StateValidityCheckerPtr(new ValidityChecker(si)));
si->setup();
// Set our robot's starting state to be the bottom-left corner of
// the environment, or (0,0).
ob::ScopedState<> start(space);
start->as<ob::RealVectorStateSpace::StateType>()->values[0] = 0.0;
start->as<ob::RealVectorStateSpace::StateType>()->values[1] = 0.0;
// Set our robot's goal state to be the top-right corner of the
// environment, or (1,1).
ob::ScopedState<> goal(space);
goal->as<ob::RealVectorStateSpace::StateType>()->values[0] = 1.0;
goal->as<ob::RealVectorStateSpace::StateType>()->values[1] = 1.0;
// Create a problem instance
ob::ProblemDefinitionPtr pdef(new ob::ProblemDefinition(si));
// Set the start and goal states
pdef->setStartAndGoalStates(start, goal);
// Since we want to find an optimal plan, we need to define what
// is optimal with an OptimizationObjective structure. Un-comment
// exactly one of the following 6 lines to see some examples of
// optimization objectives.
pdef->setOptimizationObjective(getPathLengthObjective(si));
// pdef->setOptimizationObjective(getThresholdPathLengthObj(si));
// pdef->setOptimizationObjective(getClearanceObjective(si));
// pdef->setOptimizationObjective(getBalancedObjective1(si));
// pdef->setOptimizationObjective(getBalancedObjective2(si));
// pdef->setOptimizationObjective(getPathLengthObjWithCostToGo(si));
// Construct our optimal planner using the RRTstar algorithm.
ob::PlannerPtr optimizingPlanner(new og::RRTstar(si));
// Set the problem instance for our planner to solve
optimizingPlanner->setProblemDefinition(pdef);
optimizingPlanner->setup();
// attempt to solve the planning problem within one second of
// planning time
ob::PlannerStatus solved = optimizingPlanner->solve(1.0);
if (solved)
{
// Output the length of the path found
std::cout
<< "Found solution of path length "
<< pdef->getSolutionPath()->length() << std::endl;
// If a filename was specified, output the path as a matrix to
// that file for visualization
if (argc > 1)
{
std::ofstream outFile(argv[1]);
boost::static_pointer_cast<og::PathGeometric>(pdef->getSolutionPath())->
printAsMatrix(outFile);
outFile.close();
}
}
else
std::cout << "No solution found." << std::endl;
}
int main(int argc, char** argv)
{
plan(argc, argv);
return 0;
}
/** Returns a structure representing the optimization objective to use
for optimal motion planning. This method returns an objective
which attempts to minimize the length in configuration space of
computed paths. */
ob::OptimizationObjectivePtr getPathLengthObjective(const ob::SpaceInformationPtr& si)
{
return ob::OptimizationObjectivePtr(new ob::PathLengthOptimizationObjective(si));
}
/** Returns an optimization objective which attempts to minimize path
length that is satisfied when a path of length shorter than 1.51
is found. */
ob::OptimizationObjectivePtr getThresholdPathLengthObj(const ob::SpaceInformationPtr& si)
{
ob::OptimizationObjectivePtr obj(new ob::PathLengthOptimizationObjective(si));
obj->setCostThreshold(ob::Cost(1.51));
return obj;
}
/** Defines an optimization objective which attempts to steer the
robot away from obstacles. To formulate this objective as a
minimization of path cost, we can define the cost of a path as a
summation of the costs of each of the states along the path, where
each state cost is a function of that state's clearance from
obstacles.
The class StateCostIntegralObjective represents objectives as
summations of state costs, just like we require. All we need to do
then is inherit from that base class and define our specific state
cost function by overriding the stateCost() method.
*/
class ClearanceObjective : public ob::StateCostIntegralObjective
{
public:
ClearanceObjective(const ob::SpaceInformationPtr& si) :
ob::StateCostIntegralObjective(si, true)
{
}
// Our requirement is to maximize path clearance from obstacles,
// but we want to represent the objective as a path cost
// minimization. Therefore, we set each state's cost to be the
// reciprocal of its clearance, so that as state clearance
// increases, the state cost decreases.
ob::Cost stateCost(const ob::State* s) const
{
return ob::Cost(1 / si_->getStateValidityChecker()->clearance(s));
}
};
/** Return an optimization objective which attempts to steer the robot
away from obstacles. */
ob::OptimizationObjectivePtr getClearanceObjective(const ob::SpaceInformationPtr& si)
{
return ob::OptimizationObjectivePtr(new ClearanceObjective(si));
}
/** Create an optimization objective which attempts to optimize both
path length and clearance. We do this by defining our individual
objectives, then adding them to a MultiOptimizationObjective
object. This results in an optimization objective where path cost
is equivalent to adding up each of the individual objectives' path
costs.
When adding objectives, we can also optionally specify each
objective's weighting factor to signify how important it is in
optimal planning. If no weight is specified, the weight defaults to
1.0.
*/
ob::OptimizationObjectivePtr getBalancedObjective1(const ob::SpaceInformationPtr& si)
{
ob::OptimizationObjectivePtr lengthObj(new ob::PathLengthOptimizationObjective(si));
ob::OptimizationObjectivePtr clearObj(new ClearanceObjective(si));
ob::MultiOptimizationObjective* opt = new ob::MultiOptimizationObjective(si);
opt->addObjective(lengthObj, 10.0);
opt->addObjective(clearObj, 1.0);
return ob::OptimizationObjectivePtr(opt);
}
/** Create an optimization objective equivalent to the one returned by
getBalancedObjective1(), but use an alternate syntax.
*/
ob::OptimizationObjectivePtr getBalancedObjective2(const ob::SpaceInformationPtr& si)
{
ob::OptimizationObjectivePtr lengthObj(new ob::PathLengthOptimizationObjective(si));
ob::OptimizationObjectivePtr clearObj(new ClearanceObjective(si));
return 10.0*lengthObj + clearObj;
}
/** Create an optimization objective for minimizing path length, and
specify a cost-to-go heuristic suitable for this optimal planning
problem. */
ob::OptimizationObjectivePtr getPathLengthObjWithCostToGo(const ob::SpaceInformationPtr& si)
{
ob::OptimizationObjectivePtr obj(new ob::PathLengthOptimizationObjective(si));
obj->setCostToGoHeuristic(&ob::goalRegionCostToGo);
return obj;
}
/// @endcond
|