003 File Manager
Current Path:
/usr/src/contrib/llvm-project/llvm/lib/Transforms/Scalar
usr
/
src
/
contrib
/
llvm-project
/
llvm
/
lib
/
Transforms
/
Scalar
/
📁
..
📄
ADCE.cpp
(24.71 KB)
📄
AlignmentFromAssumptions.cpp
(15.9 KB)
📄
BDCE.cpp
(7.42 KB)
📄
CallSiteSplitting.cpp
(21.43 KB)
📄
ConstantHoisting.cpp
(38.95 KB)
📄
ConstantProp.cpp
(4.13 KB)
📄
CorrelatedValuePropagation.cpp
(32.36 KB)
📄
DCE.cpp
(6.84 KB)
📄
DeadStoreElimination.cpp
(91.51 KB)
📄
DivRemPairs.cpp
(15.01 KB)
📄
EarlyCSE.cpp
(57.05 KB)
📄
FlattenCFGPass.cpp
(2.74 KB)
📄
Float2Int.cpp
(18.19 KB)
📄
GVN.cpp
(100.11 KB)
📄
GVNHoist.cpp
(44.85 KB)
📄
GVNSink.cpp
(29.97 KB)
📄
GuardWidening.cpp
(32.36 KB)
📄
IVUsersPrinter.cpp
(839 B)
📄
IndVarSimplify.cpp
(113.49 KB)
📄
InductiveRangeCheckElimination.cpp
(72.96 KB)
📄
InferAddressSpaces.cpp
(44.22 KB)
📄
InstSimplifyPass.cpp
(5.39 KB)
📄
JumpThreading.cpp
(115.06 KB)
📄
LICM.cpp
(92.71 KB)
📄
LoopAccessAnalysisPrinter.cpp
(977 B)
📄
LoopDataPrefetch.cpp
(14.54 KB)
📄
LoopDeletion.cpp
(11.37 KB)
📄
LoopDistribute.cpp
(40.25 KB)
📄
LoopFuse.cpp
(68.6 KB)
📄
LoopIdiomRecognize.cpp
(69.09 KB)
📄
LoopInstSimplify.cpp
(9.38 KB)
📄
LoopInterchange.cpp
(60.65 KB)
📄
LoopLoadElimination.cpp
(26.71 KB)
📄
LoopPassManager.cpp
(4.02 KB)
📄
LoopPredication.cpp
(49.21 KB)
📄
LoopRerollPass.cpp
(58.54 KB)
📄
LoopRotation.cpp
(4.79 KB)
📄
LoopSimplifyCFG.cpp
(28.59 KB)
📄
LoopSink.cpp
(14.93 KB)
📄
LoopStrengthReduce.cpp
(216.88 KB)
📄
LoopUnrollAndJamPass.cpp
(21.07 KB)
📄
LoopUnrollPass.cpp
(60.73 KB)
📄
LoopUnswitch.cpp
(64.17 KB)
📄
LoopVersioningLICM.cpp
(23.65 KB)
📄
LowerAtomic.cpp
(5.12 KB)
📄
LowerConstantIntrinsics.cpp
(5.8 KB)
📄
LowerExpectIntrinsic.cpp
(15 KB)
📄
LowerGuardIntrinsic.cpp
(2.82 KB)
📄
LowerMatrixIntrinsics.cpp
(73.36 KB)
📄
LowerWidenableCondition.cpp
(2.75 KB)
📄
MakeGuardsExplicit.cpp
(3.83 KB)
📄
MemCpyOptimizer.cpp
(52.16 KB)
📄
MergeICmps.cpp
(35.61 KB)
📄
MergedLoadStoreMotion.cpp
(15.18 KB)
📄
NaryReassociate.cpp
(19.86 KB)
📄
NewGVN.cpp
(170.95 KB)
📄
PartiallyInlineLibCalls.cpp
(6.22 KB)
📄
PlaceSafepoints.cpp
(27.5 KB)
📄
Reassociate.cpp
(95.61 KB)
📄
Reg2Mem.cpp
(4.34 KB)
📄
RewriteStatepointsForGC.cpp
(116.35 KB)
📄
SCCP.cpp
(76.12 KB)
📄
SROA.cpp
(183.83 KB)
📄
Scalar.cpp
(10.08 KB)
📄
Scalarizer.cpp
(32.93 KB)
📄
SeparateConstOffsetFromGEP.cpp
(52.99 KB)
📄
SimpleLoopUnswitch.cpp
(124.57 KB)
📄
SimplifyCFGPass.cpp
(12.31 KB)
📄
Sink.cpp
(10.81 KB)
📄
SpeculateAroundPHIs.cpp
(35.69 KB)
📄
SpeculativeExecution.cpp
(11.54 KB)
📄
StraightLineStrengthReduce.cpp
(28.92 KB)
📄
StructurizeCFG.cpp
(32.49 KB)
📄
TailRecursionElimination.cpp
(34.9 KB)
📄
WarnMissedTransforms.cpp
(6.14 KB)
Editing: AlignmentFromAssumptions.cpp
//===----------------------- AlignmentFromAssumptions.cpp -----------------===// // Set Load/Store Alignments From Assumptions // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements a ScalarEvolution-based transformation to set // the alignments of load, stores and memory intrinsics based on the truth // expressions of assume intrinsics. The primary motivation is to handle // complex alignment assumptions that apply to vector loads and stores that // appear after vectorization and unrolling. // //===----------------------------------------------------------------------===// #include "llvm/InitializePasses.h" #define AA_NAME "alignment-from-assumptions" #define DEBUG_TYPE AA_NAME #include "llvm/Transforms/Scalar/AlignmentFromAssumptions.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AssumptionCache.h" #include "llvm/Analysis/GlobalsModRef.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/IR/Constant.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Module.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" using namespace llvm; STATISTIC(NumLoadAlignChanged, "Number of loads changed by alignment assumptions"); STATISTIC(NumStoreAlignChanged, "Number of stores changed by alignment assumptions"); STATISTIC(NumMemIntAlignChanged, "Number of memory intrinsics changed by alignment assumptions"); namespace { struct AlignmentFromAssumptions : public FunctionPass { static char ID; // Pass identification, replacement for typeid AlignmentFromAssumptions() : FunctionPass(ID) { initializeAlignmentFromAssumptionsPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired<AssumptionCacheTracker>(); AU.addRequired<ScalarEvolutionWrapperPass>(); AU.addRequired<DominatorTreeWrapperPass>(); AU.setPreservesCFG(); AU.addPreserved<AAResultsWrapperPass>(); AU.addPreserved<GlobalsAAWrapperPass>(); AU.addPreserved<LoopInfoWrapperPass>(); AU.addPreserved<DominatorTreeWrapperPass>(); AU.addPreserved<ScalarEvolutionWrapperPass>(); } AlignmentFromAssumptionsPass Impl; }; } char AlignmentFromAssumptions::ID = 0; static const char aip_name[] = "Alignment from assumptions"; INITIALIZE_PASS_BEGIN(AlignmentFromAssumptions, AA_NAME, aip_name, false, false) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_END(AlignmentFromAssumptions, AA_NAME, aip_name, false, false) FunctionPass *llvm::createAlignmentFromAssumptionsPass() { return new AlignmentFromAssumptions(); } // Given an expression for the (constant) alignment, AlignSCEV, and an // expression for the displacement between a pointer and the aligned address, // DiffSCEV, compute the alignment of the displaced pointer if it can be reduced // to a constant. Using SCEV to compute alignment handles the case where // DiffSCEV is a recurrence with constant start such that the aligned offset // is constant. e.g. {16,+,32} % 32 -> 16. static MaybeAlign getNewAlignmentDiff(const SCEV *DiffSCEV, const SCEV *AlignSCEV, ScalarEvolution *SE) { // DiffUnits = Diff % int64_t(Alignment) const SCEV *DiffUnitsSCEV = SE->getURemExpr(DiffSCEV, AlignSCEV); LLVM_DEBUG(dbgs() << "\talignment relative to " << *AlignSCEV << " is " << *DiffUnitsSCEV << " (diff: " << *DiffSCEV << ")\n"); if (const SCEVConstant *ConstDUSCEV = dyn_cast<SCEVConstant>(DiffUnitsSCEV)) { int64_t DiffUnits = ConstDUSCEV->getValue()->getSExtValue(); // If the displacement is an exact multiple of the alignment, then the // displaced pointer has the same alignment as the aligned pointer, so // return the alignment value. if (!DiffUnits) return cast<SCEVConstant>(AlignSCEV)->getValue()->getAlignValue(); // If the displacement is not an exact multiple, but the remainder is a // constant, then return this remainder (but only if it is a power of 2). uint64_t DiffUnitsAbs = std::abs(DiffUnits); if (isPowerOf2_64(DiffUnitsAbs)) return Align(DiffUnitsAbs); } return None; } // There is an address given by an offset OffSCEV from AASCEV which has an // alignment AlignSCEV. Use that information, if possible, to compute a new // alignment for Ptr. static Align getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV, const SCEV *OffSCEV, Value *Ptr, ScalarEvolution *SE) { const SCEV *PtrSCEV = SE->getSCEV(Ptr); // On a platform with 32-bit allocas, but 64-bit flat/global pointer sizes // (*cough* AMDGPU), the effective SCEV type of AASCEV and PtrSCEV // may disagree. Trunc/extend so they agree. PtrSCEV = SE->getTruncateOrZeroExtend( PtrSCEV, SE->getEffectiveSCEVType(AASCEV->getType())); const SCEV *DiffSCEV = SE->getMinusSCEV(PtrSCEV, AASCEV); // On 32-bit platforms, DiffSCEV might now have type i32 -- we've always // sign-extended OffSCEV to i64, so make sure they agree again. DiffSCEV = SE->getNoopOrSignExtend(DiffSCEV, OffSCEV->getType()); // What we really want to know is the overall offset to the aligned // address. This address is displaced by the provided offset. DiffSCEV = SE->getMinusSCEV(DiffSCEV, OffSCEV); LLVM_DEBUG(dbgs() << "AFI: alignment of " << *Ptr << " relative to " << *AlignSCEV << " and offset " << *OffSCEV << " using diff " << *DiffSCEV << "\n"); if (MaybeAlign NewAlignment = getNewAlignmentDiff(DiffSCEV, AlignSCEV, SE)) { LLVM_DEBUG(dbgs() << "\tnew alignment: " << DebugStr(NewAlignment) << "\n"); return *NewAlignment; } if (const SCEVAddRecExpr *DiffARSCEV = dyn_cast<SCEVAddRecExpr>(DiffSCEV)) { // The relative offset to the alignment assumption did not yield a constant, // but we should try harder: if we assume that a is 32-byte aligned, then in // for (i = 0; i < 1024; i += 4) r += a[i]; not all of the loads from a are // 32-byte aligned, but instead alternate between 32 and 16-byte alignment. // As a result, the new alignment will not be a constant, but can still // be improved over the default (of 4) to 16. const SCEV *DiffStartSCEV = DiffARSCEV->getStart(); const SCEV *DiffIncSCEV = DiffARSCEV->getStepRecurrence(*SE); LLVM_DEBUG(dbgs() << "\ttrying start/inc alignment using start " << *DiffStartSCEV << " and inc " << *DiffIncSCEV << "\n"); // Now compute the new alignment using the displacement to the value in the // first iteration, and also the alignment using the per-iteration delta. // If these are the same, then use that answer. Otherwise, use the smaller // one, but only if it divides the larger one. MaybeAlign NewAlignment = getNewAlignmentDiff(DiffStartSCEV, AlignSCEV, SE); MaybeAlign NewIncAlignment = getNewAlignmentDiff(DiffIncSCEV, AlignSCEV, SE); LLVM_DEBUG(dbgs() << "\tnew start alignment: " << DebugStr(NewAlignment) << "\n"); LLVM_DEBUG(dbgs() << "\tnew inc alignment: " << DebugStr(NewIncAlignment) << "\n"); if (!NewAlignment || !NewIncAlignment) return Align(1); const Align NewAlign = *NewAlignment; const Align NewIncAlign = *NewIncAlignment; if (NewAlign > NewIncAlign) { LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << DebugStr(NewIncAlign) << "\n"); return NewIncAlign; } if (NewIncAlign > NewAlign) { LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << DebugStr(NewAlign) << "\n"); return NewAlign; } assert(NewIncAlign == NewAlign); LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << DebugStr(NewAlign) << "\n"); return NewAlign; } return Align(1); } bool AlignmentFromAssumptionsPass::extractAlignmentInfo(CallInst *I, Value *&AAPtr, const SCEV *&AlignSCEV, const SCEV *&OffSCEV) { // An alignment assume must be a statement about the least-significant // bits of the pointer being zero, possibly with some offset. ICmpInst *ICI = dyn_cast<ICmpInst>(I->getArgOperand(0)); if (!ICI) return false; // This must be an expression of the form: x & m == 0. if (ICI->getPredicate() != ICmpInst::ICMP_EQ) return false; // Swap things around so that the RHS is 0. Value *CmpLHS = ICI->getOperand(0); Value *CmpRHS = ICI->getOperand(1); const SCEV *CmpLHSSCEV = SE->getSCEV(CmpLHS); const SCEV *CmpRHSSCEV = SE->getSCEV(CmpRHS); if (CmpLHSSCEV->isZero()) std::swap(CmpLHS, CmpRHS); else if (!CmpRHSSCEV->isZero()) return false; BinaryOperator *CmpBO = dyn_cast<BinaryOperator>(CmpLHS); if (!CmpBO || CmpBO->getOpcode() != Instruction::And) return false; // Swap things around so that the right operand of the and is a constant // (the mask); we cannot deal with variable masks. Value *AndLHS = CmpBO->getOperand(0); Value *AndRHS = CmpBO->getOperand(1); const SCEV *AndLHSSCEV = SE->getSCEV(AndLHS); const SCEV *AndRHSSCEV = SE->getSCEV(AndRHS); if (isa<SCEVConstant>(AndLHSSCEV)) { std::swap(AndLHS, AndRHS); std::swap(AndLHSSCEV, AndRHSSCEV); } const SCEVConstant *MaskSCEV = dyn_cast<SCEVConstant>(AndRHSSCEV); if (!MaskSCEV) return false; // The mask must have some trailing ones (otherwise the condition is // trivial and tells us nothing about the alignment of the left operand). unsigned TrailingOnes = MaskSCEV->getAPInt().countTrailingOnes(); if (!TrailingOnes) return false; // Cap the alignment at the maximum with which LLVM can deal (and make sure // we don't overflow the shift). uint64_t Alignment; TrailingOnes = std::min(TrailingOnes, unsigned(sizeof(unsigned) * CHAR_BIT - 1)); Alignment = std::min(1u << TrailingOnes, +Value::MaximumAlignment); Type *Int64Ty = Type::getInt64Ty(I->getParent()->getParent()->getContext()); AlignSCEV = SE->getConstant(Int64Ty, Alignment); // The LHS might be a ptrtoint instruction, or it might be the pointer // with an offset. AAPtr = nullptr; OffSCEV = nullptr; if (PtrToIntInst *PToI = dyn_cast<PtrToIntInst>(AndLHS)) { AAPtr = PToI->getPointerOperand(); OffSCEV = SE->getZero(Int64Ty); } else if (const SCEVAddExpr* AndLHSAddSCEV = dyn_cast<SCEVAddExpr>(AndLHSSCEV)) { // Try to find the ptrtoint; subtract it and the rest is the offset. for (SCEVAddExpr::op_iterator J = AndLHSAddSCEV->op_begin(), JE = AndLHSAddSCEV->op_end(); J != JE; ++J) if (const SCEVUnknown *OpUnk = dyn_cast<SCEVUnknown>(*J)) if (PtrToIntInst *PToI = dyn_cast<PtrToIntInst>(OpUnk->getValue())) { AAPtr = PToI->getPointerOperand(); OffSCEV = SE->getMinusSCEV(AndLHSAddSCEV, *J); break; } } if (!AAPtr) return false; // Sign extend the offset to 64 bits (so that it is like all of the other // expressions). unsigned OffSCEVBits = OffSCEV->getType()->getPrimitiveSizeInBits(); if (OffSCEVBits < 64) OffSCEV = SE->getSignExtendExpr(OffSCEV, Int64Ty); else if (OffSCEVBits > 64) return false; AAPtr = AAPtr->stripPointerCasts(); return true; } bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) { Value *AAPtr; const SCEV *AlignSCEV, *OffSCEV; if (!extractAlignmentInfo(ACall, AAPtr, AlignSCEV, OffSCEV)) return false; // Skip ConstantPointerNull and UndefValue. Assumptions on these shouldn't // affect other users. if (isa<ConstantData>(AAPtr)) return false; const SCEV *AASCEV = SE->getSCEV(AAPtr); // Apply the assumption to all other users of the specified pointer. SmallPtrSet<Instruction *, 32> Visited; SmallVector<Instruction*, 16> WorkList; for (User *J : AAPtr->users()) { if (J == ACall) continue; if (Instruction *K = dyn_cast<Instruction>(J)) if (isValidAssumeForContext(ACall, K, DT)) WorkList.push_back(K); } while (!WorkList.empty()) { Instruction *J = WorkList.pop_back_val(); if (LoadInst *LI = dyn_cast<LoadInst>(J)) { Align NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, LI->getPointerOperand(), SE); if (NewAlignment > LI->getAlign()) { LI->setAlignment(NewAlignment); ++NumLoadAlignChanged; } } else if (StoreInst *SI = dyn_cast<StoreInst>(J)) { Align NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, SI->getPointerOperand(), SE); if (NewAlignment > SI->getAlign()) { SI->setAlignment(NewAlignment); ++NumStoreAlignChanged; } } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(J)) { Align NewDestAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MI->getDest(), SE); LLVM_DEBUG(dbgs() << "\tmem inst: " << DebugStr(NewDestAlignment) << "\n";); if (NewDestAlignment > *MI->getDestAlign()) { MI->setDestAlignment(NewDestAlignment); ++NumMemIntAlignChanged; } // For memory transfers, there is also a source alignment that // can be set. if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { Align NewSrcAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MTI->getSource(), SE); LLVM_DEBUG(dbgs() << "\tmem trans: " << DebugStr(NewSrcAlignment) << "\n";); if (NewSrcAlignment > *MTI->getSourceAlign()) { MTI->setSourceAlignment(NewSrcAlignment); ++NumMemIntAlignChanged; } } } // Now that we've updated that use of the pointer, look for other uses of // the pointer to update. Visited.insert(J); for (User *UJ : J->users()) { Instruction *K = cast<Instruction>(UJ); if (!Visited.count(K) && isValidAssumeForContext(ACall, K, DT)) WorkList.push_back(K); } } return true; } bool AlignmentFromAssumptions::runOnFunction(Function &F) { if (skipFunction(F)) return false; auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); return Impl.runImpl(F, AC, SE, DT); } bool AlignmentFromAssumptionsPass::runImpl(Function &F, AssumptionCache &AC, ScalarEvolution *SE_, DominatorTree *DT_) { SE = SE_; DT = DT_; bool Changed = false; for (auto &AssumeVH : AC.assumptions()) if (AssumeVH) Changed |= processAssumption(cast<CallInst>(AssumeVH)); return Changed; } PreservedAnalyses AlignmentFromAssumptionsPass::run(Function &F, FunctionAnalysisManager &AM) { AssumptionCache &AC = AM.getResult<AssumptionAnalysis>(F); ScalarEvolution &SE = AM.getResult<ScalarEvolutionAnalysis>(F); DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F); if (!runImpl(F, AC, &SE, &DT)) return PreservedAnalyses::all(); PreservedAnalyses PA; PA.preserveSet<CFGAnalyses>(); PA.preserve<AAManager>(); PA.preserve<ScalarEvolutionAnalysis>(); PA.preserve<GlobalsAA>(); return PA; }
Upload File
Create Folder