/usr/include/llvm-4.0/llvm/Passes/PassBuilder.h is in llvm-4.0-dev 1:4.0.1-10.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 | //===- Parsing, selection, and construction of pass pipelines --*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
///
/// Interfaces for registering analysis passes, producing common pass manager
/// configurations, and parsing of pass pipelines.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_PASSES_PASSBUILDER_H
#define LLVM_PASSES_PASSBUILDER_H
#include "llvm/ADT/Optional.h"
#include "llvm/Analysis/CGSCCPassManager.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Transforms/Scalar/LoopPassManager.h"
#include <vector>
namespace llvm {
class StringRef;
class AAManager;
class TargetMachine;
/// \brief This class provides access to building LLVM's passes.
///
/// It's members provide the baseline state available to passes during their
/// construction. The \c PassRegistry.def file specifies how to construct all
/// of the built-in passes, and those may reference these members during
/// construction.
class PassBuilder {
TargetMachine *TM;
public:
/// \brief LLVM-provided high-level optimization levels.
///
/// This enumerates the LLVM-provided high-level optimization levels. Each
/// level has a specific goal and rationale.
enum OptimizationLevel {
/// Disable as many optimizations as possible. This doesn't completely
/// disable the optimizer in all cases, for example always_inline functions
/// can be required to be inlined for correctness.
O0,
/// Optimize quickly without destroying debuggability.
///
/// FIXME: The current and historical behavior of this level does *not*
/// agree with this goal, but we would like to move toward this goal in the
/// future.
///
/// This level is tuned to produce a result from the optimizer as quickly
/// as possible and to avoid destroying debuggability. This tends to result
/// in a very good development mode where the compiled code will be
/// immediately executed as part of testing. As a consequence, where
/// possible, we would like to produce efficient-to-execute code, but not
/// if it significantly slows down compilation or would prevent even basic
/// debugging of the resulting binary.
///
/// As an example, complex loop transformations such as versioning,
/// vectorization, or fusion might not make sense here due to the degree to
/// which the executed code would differ from the source code, and the
/// potential compile time cost.
O1,
/// Optimize for fast execution as much as possible without triggering
/// significant incremental compile time or code size growth.
///
/// The key idea is that optimizations at this level should "pay for
/// themselves". So if an optimization increases compile time by 5% or
/// increases code size by 5% for a particular benchmark, that benchmark
/// should also be one which sees a 5% runtime improvement. If the compile
/// time or code size penalties happen on average across a diverse range of
/// LLVM users' benchmarks, then the improvements should as well.
///
/// And no matter what, the compile time needs to not grow superlinearly
/// with the size of input to LLVM so that users can control the runtime of
/// the optimizer in this mode.
///
/// This is expected to be a good default optimization level for the vast
/// majority of users.
O2,
/// Optimize for fast execution as much as possible.
///
/// This mode is significantly more aggressive in trading off compile time
/// and code size to get execution time improvements. The core idea is that
/// this mode should include any optimization that helps execution time on
/// balance across a diverse collection of benchmarks, even if it increases
/// code size or compile time for some benchmarks without corresponding
/// improvements to execution time.
///
/// Despite being willing to trade more compile time off to get improved
/// execution time, this mode still tries to avoid superlinear growth in
/// order to make even significantly slower compile times at least scale
/// reasonably. This does not preclude very substantial constant factor
/// costs though.
O3,
/// Similar to \c O2 but tries to optimize for small code size instead of
/// fast execution without triggering significant incremental execution
/// time slowdowns.
///
/// The logic here is exactly the same as \c O2, but with code size and
/// execution time metrics swapped.
///
/// A consequence of the different core goal is that this should in general
/// produce substantially smaller executables that still run in
/// a reasonable amount of time.
Os,
/// A very specialized mode that will optimize for code size at any and all
/// costs.
///
/// This is useful primarily when there are absolute size limitations and
/// any effort taken to reduce the size is worth it regardless of the
/// execution time impact. You should expect this level to produce rather
/// slow, but very small, code.
Oz
};
explicit PassBuilder(TargetMachine *TM = nullptr) : TM(TM) {}
/// \brief Cross register the analysis managers through their proxies.
///
/// This is an interface that can be used to cross register each
// AnalysisManager with all the others analysis managers.
void crossRegisterProxies(LoopAnalysisManager &LAM,
FunctionAnalysisManager &FAM,
CGSCCAnalysisManager &CGAM,
ModuleAnalysisManager &MAM);
/// \brief Registers all available module analysis passes.
///
/// This is an interface that can be used to populate a \c
/// ModuleAnalysisManager with all registered module analyses. Callers can
/// still manually register any additional analyses. Callers can also
/// pre-register analyses and this will not override those.
void registerModuleAnalyses(ModuleAnalysisManager &MAM);
/// \brief Registers all available CGSCC analysis passes.
///
/// This is an interface that can be used to populate a \c CGSCCAnalysisManager
/// with all registered CGSCC analyses. Callers can still manually register any
/// additional analyses. Callers can also pre-register analyses and this will
/// not override those.
void registerCGSCCAnalyses(CGSCCAnalysisManager &CGAM);
/// \brief Registers all available function analysis passes.
///
/// This is an interface that can be used to populate a \c
/// FunctionAnalysisManager with all registered function analyses. Callers can
/// still manually register any additional analyses. Callers can also
/// pre-register analyses and this will not override those.
void registerFunctionAnalyses(FunctionAnalysisManager &FAM);
/// \brief Registers all available loop analysis passes.
///
/// This is an interface that can be used to populate a \c LoopAnalysisManager
/// with all registered loop analyses. Callers can still manually register any
/// additional analyses.
void registerLoopAnalyses(LoopAnalysisManager &LAM);
/// Construct the core LLVM function canonicalization and simplification
/// pipeline.
///
/// This is a long pipeline and uses most of the per-function optimization
/// passes in LLVM to canonicalize and simplify the IR. It is suitable to run
/// repeatedly over the IR and is not expected to destroy important
/// information about the semantics of the IR.
///
/// Note that \p Level cannot be `O0` here. The pipelines produced are
/// only intended for use when attempting to optimize code. If frontends
/// require some transformations for semantic reasons, they should explicitly
/// build them.
FunctionPassManager
buildFunctionSimplificationPipeline(OptimizationLevel Level,
bool DebugLogging = false);
/// Build a per-module default optimization pipeline.
///
/// This provides a good default optimization pipeline for per-module
/// optimization and code generation without any link-time optimization. It
/// typically correspond to frontend "-O[123]" options for optimization
/// levels \c O1, \c O2 and \c O3 resp.
///
/// Note that \p Level cannot be `O0` here. The pipelines produced are
/// only intended for use when attempting to optimize code. If frontends
/// require some transformations for semantic reasons, they should explicitly
/// build them.
ModulePassManager buildPerModuleDefaultPipeline(OptimizationLevel Level,
bool DebugLogging = false);
/// Build a pre-link, LTO-targeting default optimization pipeline to a pass
/// manager.
///
/// This adds the pre-link optimizations tuned to work well with a later LTO
/// run. It works to minimize the IR which needs to be analyzed without
/// making irreversible decisions which could be made better during the LTO
/// run.
///
/// Note that \p Level cannot be `O0` here. The pipelines produced are
/// only intended for use when attempting to optimize code. If frontends
/// require some transformations for semantic reasons, they should explicitly
/// build them.
ModulePassManager buildLTOPreLinkDefaultPipeline(OptimizationLevel Level,
bool DebugLogging = false);
/// Build an LTO default optimization pipeline to a pass manager.
///
/// This provides a good default optimization pipeline for link-time
/// optimization and code generation. It is particularly tuned to fit well
/// when IR coming into the LTO phase was first run through \c
/// addPreLinkLTODefaultPipeline, and the two coordinate closely.
///
/// Note that \p Level cannot be `O0` here. The pipelines produced are
/// only intended for use when attempting to optimize code. If frontends
/// require some transformations for semantic reasons, they should explicitly
/// build them.
ModulePassManager buildLTODefaultPipeline(OptimizationLevel Level,
bool DebugLogging = false);
/// Build the default `AAManager` with the default alias analysis pipeline
/// registered.
AAManager buildDefaultAAPipeline();
/// \brief Parse a textual pass pipeline description into a \c ModulePassManager.
///
/// The format of the textual pass pipeline description looks something like:
///
/// module(function(instcombine,sroa),dce,cgscc(inliner,function(...)),...)
///
/// Pass managers have ()s describing the nest structure of passes. All passes
/// are comma separated. As a special shortcut, if the very first pass is not
/// a module pass (as a module pass manager is), this will automatically form
/// the shortest stack of pass managers that allow inserting that first pass.
/// So, assuming function passes 'fpassN', CGSCC passes 'cgpassN', and loop passes
/// 'lpassN', all of these are valid:
///
/// fpass1,fpass2,fpass3
/// cgpass1,cgpass2,cgpass3
/// lpass1,lpass2,lpass3
///
/// And they are equivalent to the following (resp.):
///
/// module(function(fpass1,fpass2,fpass3))
/// module(cgscc(cgpass1,cgpass2,cgpass3))
/// module(function(loop(lpass1,lpass2,lpass3)))
///
/// This shortcut is especially useful for debugging and testing small pass
/// combinations. Note that these shortcuts don't introduce any other magic. If
/// the sequence of passes aren't all the exact same kind of pass, it will be
/// an error. You cannot mix different levels implicitly, you must explicitly
/// form a pass manager in which to nest passes.
bool parsePassPipeline(ModulePassManager &MPM, StringRef PipelineText,
bool VerifyEachPass = true, bool DebugLogging = false);
/// Parse a textual alias analysis pipeline into the provided AA manager.
///
/// The format of the textual AA pipeline is a comma separated list of AA
/// pass names:
///
/// basic-aa,globals-aa,...
///
/// The AA manager is set up such that the provided alias analyses are tried
/// in the order specified. See the \c AAManaager documentation for details
/// about the logic used. This routine just provides the textual mapping
/// between AA names and the analyses to register with the manager.
///
/// Returns false if the text cannot be parsed cleanly. The specific state of
/// the \p AA manager is unspecified if such an error is encountered and this
/// returns false.
bool parseAAPipeline(AAManager &AA, StringRef PipelineText);
private:
/// A struct to capture parsed pass pipeline names.
struct PipelineElement {
StringRef Name;
std::vector<PipelineElement> InnerPipeline;
};
static Optional<std::vector<PipelineElement>>
parsePipelineText(StringRef Text);
bool parseModulePass(ModulePassManager &MPM, const PipelineElement &E,
bool VerifyEachPass, bool DebugLogging);
bool parseCGSCCPass(CGSCCPassManager &CGPM, const PipelineElement &E,
bool VerifyEachPass, bool DebugLogging);
bool parseFunctionPass(FunctionPassManager &FPM, const PipelineElement &E,
bool VerifyEachPass, bool DebugLogging);
bool parseLoopPass(LoopPassManager &LPM, const PipelineElement &E,
bool VerifyEachPass, bool DebugLogging);
bool parseAAPassName(AAManager &AA, StringRef Name);
bool parseLoopPassPipeline(LoopPassManager &LPM,
ArrayRef<PipelineElement> Pipeline,
bool VerifyEachPass, bool DebugLogging);
bool parseFunctionPassPipeline(FunctionPassManager &FPM,
ArrayRef<PipelineElement> Pipeline,
bool VerifyEachPass, bool DebugLogging);
bool parseCGSCCPassPipeline(CGSCCPassManager &CGPM,
ArrayRef<PipelineElement> Pipeline,
bool VerifyEachPass, bool DebugLogging);
bool parseModulePassPipeline(ModulePassManager &MPM,
ArrayRef<PipelineElement> Pipeline,
bool VerifyEachPass, bool DebugLogging);
};
}
#endif
|