003 File Manager
Current Path:
/usr/src/contrib/llvm-project/llvm/lib/Target/X86
usr
/
src
/
contrib
/
llvm-project
/
llvm
/
lib
/
Target
/
X86
/
📁
..
📁
AsmParser
📁
Disassembler
📄
ImmutableGraph.h
(15.15 KB)
📁
MCTargetDesc
📁
TargetInfo
📄
X86.h
(7.41 KB)
📄
X86.td
(68.44 KB)
📄
X86AsmPrinter.cpp
(27.18 KB)
📄
X86AsmPrinter.h
(5.96 KB)
📄
X86AvoidStoreForwardingBlocks.cpp
(27.94 KB)
📄
X86AvoidTrailingCall.cpp
(4.91 KB)
📄
X86CallFrameOptimization.cpp
(23.07 KB)
📄
X86CallLowering.cpp
(17.62 KB)
📄
X86CallLowering.h
(1.74 KB)
📄
X86CallingConv.cpp
(13.34 KB)
📄
X86CallingConv.h
(1.09 KB)
📄
X86CallingConv.td
(46.15 KB)
📄
X86CmovConversion.cpp
(34.07 KB)
📄
X86CondBrFolding.cpp
(18.4 KB)
📄
X86DiscriminateMemOps.cpp
(7.11 KB)
📄
X86DomainReassignment.cpp
(25.87 KB)
📄
X86EvexToVex.cpp
(8.8 KB)
📄
X86ExpandPseudo.cpp
(16.95 KB)
📄
X86FastISel.cpp
(139.28 KB)
📄
X86FixupBWInsts.cpp
(18.09 KB)
📄
X86FixupLEAs.cpp
(24.44 KB)
📄
X86FixupSetCC.cpp
(4.44 KB)
📄
X86FlagsCopyLowering.cpp
(40.36 KB)
📄
X86FloatingPoint.cpp
(62.66 KB)
📄
X86FrameLowering.cpp
(138.71 KB)
📄
X86FrameLowering.h
(11.64 KB)
📄
X86GenRegisterBankInfo.def
(3.32 KB)
📄
X86ISelDAGToDAG.cpp
(208.37 KB)
📄
X86ISelLowering.cpp
(1.94 MB)
📄
X86ISelLowering.h
(60.88 KB)
📄
X86IndirectBranchTracking.cpp
(6.17 KB)
📄
X86IndirectThunks.cpp
(9.78 KB)
📄
X86InsertPrefetch.cpp
(9.64 KB)
📄
X86InsertWait.cpp
(4.47 KB)
📄
X86Instr3DNow.td
(5.24 KB)
📄
X86InstrAMX.td
(5.6 KB)
📄
X86InstrAVX512.td
(653.76 KB)
📄
X86InstrArithmetic.td
(75.61 KB)
📄
X86InstrBuilder.h
(8.45 KB)
📄
X86InstrCMovSetCC.td
(5.76 KB)
📄
X86InstrCompiler.td
(95.78 KB)
📄
X86InstrControl.td
(20.53 KB)
📄
X86InstrExtension.td
(11.64 KB)
📄
X86InstrFMA.td
(33.23 KB)
📄
X86InstrFMA3Info.cpp
(6.21 KB)
📄
X86InstrFMA3Info.h
(3.25 KB)
📄
X86InstrFPStack.td
(39.52 KB)
📄
X86InstrFoldTables.cpp
(393.01 KB)
📄
X86InstrFoldTables.h
(3.03 KB)
📄
X86InstrFormats.td
(41.05 KB)
📄
X86InstrFragmentsSIMD.td
(61.14 KB)
📄
X86InstrInfo.cpp
(322.72 KB)
📄
X86InstrInfo.h
(29.34 KB)
📄
X86InstrInfo.td
(169.76 KB)
📄
X86InstrMMX.td
(29.55 KB)
📄
X86InstrMPX.td
(3.63 KB)
📄
X86InstrSGX.td
(1.12 KB)
📄
X86InstrSSE.td
(385.01 KB)
📄
X86InstrSVM.td
(2.16 KB)
📄
X86InstrShiftRotate.td
(49.56 KB)
📄
X86InstrSystem.td
(34.03 KB)
📄
X86InstrTSX.td
(2.1 KB)
📄
X86InstrVMX.td
(3.53 KB)
📄
X86InstrVecCompiler.td
(21.09 KB)
📄
X86InstrXOP.td
(23.81 KB)
📄
X86InstructionSelector.cpp
(61.11 KB)
📄
X86InterleavedAccess.cpp
(32.7 KB)
📄
X86IntrinsicsInfo.h
(73.96 KB)
📄
X86LegalizerInfo.cpp
(15.6 KB)
📄
X86LegalizerInfo.h
(1.65 KB)
📄
X86LoadValueInjectionLoadHardening.cpp
(32.4 KB)
📄
X86LoadValueInjectionRetHardening.cpp
(4.93 KB)
📄
X86MCInstLower.cpp
(96.53 KB)
📄
X86MachineFunctionInfo.cpp
(1.1 KB)
📄
X86MachineFunctionInfo.h
(8.87 KB)
📄
X86MacroFusion.cpp
(2.62 KB)
📄
X86MacroFusion.h
(992 B)
📄
X86OptimizeLEAs.cpp
(27.47 KB)
📄
X86PadShortFunction.cpp
(7.33 KB)
📄
X86PartialReduction.cpp
(15.46 KB)
📄
X86PfmCounters.td
(10.18 KB)
📄
X86RegisterBankInfo.cpp
(10.55 KB)
📄
X86RegisterBankInfo.h
(2.87 KB)
📄
X86RegisterBanks.td
(629 B)
📄
X86RegisterInfo.cpp
(29 KB)
📄
X86RegisterInfo.h
(5.61 KB)
📄
X86RegisterInfo.td
(26.07 KB)
📄
X86SchedBroadwell.td
(69.45 KB)
📄
X86SchedHaswell.td
(73.96 KB)
📄
X86SchedPredicates.td
(4.23 KB)
📄
X86SchedSandyBridge.td
(50 KB)
📄
X86SchedSkylakeClient.td
(74.65 KB)
📄
X86SchedSkylakeServer.td
(113.85 KB)
📄
X86Schedule.td
(36.9 KB)
📄
X86ScheduleAtom.td
(38.26 KB)
📄
X86ScheduleBdVer2.td
(56.78 KB)
📄
X86ScheduleBtVer2.td
(46.98 KB)
📄
X86ScheduleSLM.td
(22.91 KB)
📄
X86ScheduleZnver1.td
(48.97 KB)
📄
X86ScheduleZnver2.td
(48.12 KB)
📄
X86SelectionDAGInfo.cpp
(12.02 KB)
📄
X86SelectionDAGInfo.h
(1.8 KB)
📄
X86ShuffleDecodeConstantPool.cpp
(11.22 KB)
📄
X86ShuffleDecodeConstantPool.h
(2.13 KB)
📄
X86SpeculativeExecutionSideEffectSuppression.cpp
(6.97 KB)
📄
X86SpeculativeLoadHardening.cpp
(93.16 KB)
📄
X86Subtarget.cpp
(13.25 KB)
📄
X86Subtarget.h
(32.08 KB)
📄
X86TargetMachine.cpp
(18.88 KB)
📄
X86TargetMachine.h
(2.04 KB)
📄
X86TargetObjectFile.cpp
(2.61 KB)
📄
X86TargetObjectFile.h
(2.13 KB)
📄
X86TargetTransformInfo.cpp
(189.14 KB)
📄
X86TargetTransformInfo.h
(9.63 KB)
📄
X86VZeroUpper.cpp
(12.59 KB)
📄
X86WinAllocaExpander.cpp
(9.54 KB)
📄
X86WinEHState.cpp
(28.97 KB)
Editing: X86PartialReduction.cpp
//===-- X86PartialReduction.cpp -------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This pass looks for add instructions used by a horizontal reduction to see // if we might be able to use pmaddwd or psadbw. Some cases of this require // cross basic block knowledge and can't be done in SelectionDAG. // //===----------------------------------------------------------------------===// #include "X86.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/CodeGen/TargetPassConfig.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicsX86.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Operator.h" #include "llvm/Pass.h" #include "X86TargetMachine.h" using namespace llvm; #define DEBUG_TYPE "x86-partial-reduction" namespace { class X86PartialReduction : public FunctionPass { const DataLayout *DL; const X86Subtarget *ST; public: static char ID; // Pass identification, replacement for typeid. X86PartialReduction() : FunctionPass(ID) { } bool runOnFunction(Function &Fn) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesCFG(); } StringRef getPassName() const override { return "X86 Partial Reduction"; } private: bool tryMAddReplacement(Instruction *Op); bool trySADReplacement(Instruction *Op); }; } FunctionPass *llvm::createX86PartialReductionPass() { return new X86PartialReduction(); } char X86PartialReduction::ID = 0; INITIALIZE_PASS(X86PartialReduction, DEBUG_TYPE, "X86 Partial Reduction", false, false) bool X86PartialReduction::tryMAddReplacement(Instruction *Op) { if (!ST->hasSSE2()) return false; // Need at least 8 elements. if (cast<FixedVectorType>(Op->getType())->getNumElements() < 8) return false; // Element type should be i32. if (!cast<VectorType>(Op->getType())->getElementType()->isIntegerTy(32)) return false; auto *Mul = dyn_cast<BinaryOperator>(Op); if (!Mul || Mul->getOpcode() != Instruction::Mul) return false; Value *LHS = Mul->getOperand(0); Value *RHS = Mul->getOperand(1); // LHS and RHS should be only used once or if they are the same then only // used twice. Only check this when SSE4.1 is enabled and we have zext/sext // instructions, otherwise we use punpck to emulate zero extend in stages. The // trunc/ we need to do likely won't introduce new instructions in that case. if (ST->hasSSE41()) { if (LHS == RHS) { if (!isa<Constant>(LHS) && !LHS->hasNUses(2)) return false; } else { if (!isa<Constant>(LHS) && !LHS->hasOneUse()) return false; if (!isa<Constant>(RHS) && !RHS->hasOneUse()) return false; } } auto CanShrinkOp = [&](Value *Op) { auto IsFreeTruncation = [&](Value *Op) { if (auto *Cast = dyn_cast<CastInst>(Op)) { if (Cast->getParent() == Mul->getParent() && (Cast->getOpcode() == Instruction::SExt || Cast->getOpcode() == Instruction::ZExt) && Cast->getOperand(0)->getType()->getScalarSizeInBits() <= 16) return true; } return isa<Constant>(Op); }; // If the operation can be freely truncated and has enough sign bits we // can shrink. if (IsFreeTruncation(Op) && ComputeNumSignBits(Op, *DL, 0, nullptr, Mul) > 16) return true; // SelectionDAG has limited support for truncating through an add or sub if // the inputs are freely truncatable. if (auto *BO = dyn_cast<BinaryOperator>(Op)) { if (BO->getParent() == Mul->getParent() && IsFreeTruncation(BO->getOperand(0)) && IsFreeTruncation(BO->getOperand(1)) && ComputeNumSignBits(Op, *DL, 0, nullptr, Mul) > 16) return true; } return false; }; // Both Ops need to be shrinkable. if (!CanShrinkOp(LHS) && !CanShrinkOp(RHS)) return false; IRBuilder<> Builder(Mul); auto *MulTy = cast<FixedVectorType>(Op->getType()); unsigned NumElts = MulTy->getNumElements(); // Extract even elements and odd elements and add them together. This will // be pattern matched by SelectionDAG to pmaddwd. This instruction will be // half the original width. SmallVector<int, 16> EvenMask(NumElts / 2); SmallVector<int, 16> OddMask(NumElts / 2); for (int i = 0, e = NumElts / 2; i != e; ++i) { EvenMask[i] = i * 2; OddMask[i] = i * 2 + 1; } // Creating a new mul so the replaceAllUsesWith below doesn't replace the // uses in the shuffles we're creating. Value *NewMul = Builder.CreateMul(Mul->getOperand(0), Mul->getOperand(1)); Value *EvenElts = Builder.CreateShuffleVector(NewMul, NewMul, EvenMask); Value *OddElts = Builder.CreateShuffleVector(NewMul, NewMul, OddMask); Value *MAdd = Builder.CreateAdd(EvenElts, OddElts); // Concatenate zeroes to extend back to the original type. SmallVector<int, 32> ConcatMask(NumElts); std::iota(ConcatMask.begin(), ConcatMask.end(), 0); Value *Zero = Constant::getNullValue(MAdd->getType()); Value *Concat = Builder.CreateShuffleVector(MAdd, Zero, ConcatMask); Mul->replaceAllUsesWith(Concat); Mul->eraseFromParent(); return true; } bool X86PartialReduction::trySADReplacement(Instruction *Op) { if (!ST->hasSSE2()) return false; // TODO: There's nothing special about i32, any integer type above i16 should // work just as well. if (!cast<VectorType>(Op->getType())->getElementType()->isIntegerTy(32)) return false; // Operand should be a select. auto *SI = dyn_cast<SelectInst>(Op); if (!SI) return false; // Select needs to implement absolute value. Value *LHS, *RHS; auto SPR = matchSelectPattern(SI, LHS, RHS); if (SPR.Flavor != SPF_ABS) return false; // Need a subtract of two values. auto *Sub = dyn_cast<BinaryOperator>(LHS); if (!Sub || Sub->getOpcode() != Instruction::Sub) return false; // Look for zero extend from i8. auto getZeroExtendedVal = [](Value *Op) -> Value * { if (auto *ZExt = dyn_cast<ZExtInst>(Op)) if (cast<VectorType>(ZExt->getOperand(0)->getType()) ->getElementType() ->isIntegerTy(8)) return ZExt->getOperand(0); return nullptr; }; // Both operands of the subtract should be extends from vXi8. Value *Op0 = getZeroExtendedVal(Sub->getOperand(0)); Value *Op1 = getZeroExtendedVal(Sub->getOperand(1)); if (!Op0 || !Op1) return false; IRBuilder<> Builder(SI); auto *OpTy = cast<FixedVectorType>(Op->getType()); unsigned NumElts = OpTy->getNumElements(); unsigned IntrinsicNumElts; Intrinsic::ID IID; if (ST->hasBWI() && NumElts >= 64) { IID = Intrinsic::x86_avx512_psad_bw_512; IntrinsicNumElts = 64; } else if (ST->hasAVX2() && NumElts >= 32) { IID = Intrinsic::x86_avx2_psad_bw; IntrinsicNumElts = 32; } else { IID = Intrinsic::x86_sse2_psad_bw; IntrinsicNumElts = 16; } Function *PSADBWFn = Intrinsic::getDeclaration(SI->getModule(), IID); if (NumElts < 16) { // Pad input with zeroes. SmallVector<int, 32> ConcatMask(16); for (unsigned i = 0; i != NumElts; ++i) ConcatMask[i] = i; for (unsigned i = NumElts; i != 16; ++i) ConcatMask[i] = (i % NumElts) + NumElts; Value *Zero = Constant::getNullValue(Op0->getType()); Op0 = Builder.CreateShuffleVector(Op0, Zero, ConcatMask); Op1 = Builder.CreateShuffleVector(Op1, Zero, ConcatMask); NumElts = 16; } // Intrinsics produce vXi64 and need to be casted to vXi32. auto *I32Ty = FixedVectorType::get(Builder.getInt32Ty(), IntrinsicNumElts / 4); assert(NumElts % IntrinsicNumElts == 0 && "Unexpected number of elements!"); unsigned NumSplits = NumElts / IntrinsicNumElts; // First collect the pieces we need. SmallVector<Value *, 4> Ops(NumSplits); for (unsigned i = 0; i != NumSplits; ++i) { SmallVector<int, 64> ExtractMask(IntrinsicNumElts); std::iota(ExtractMask.begin(), ExtractMask.end(), i * IntrinsicNumElts); Value *ExtractOp0 = Builder.CreateShuffleVector(Op0, Op0, ExtractMask); Value *ExtractOp1 = Builder.CreateShuffleVector(Op1, Op0, ExtractMask); Ops[i] = Builder.CreateCall(PSADBWFn, {ExtractOp0, ExtractOp1}); Ops[i] = Builder.CreateBitCast(Ops[i], I32Ty); } assert(isPowerOf2_32(NumSplits) && "Expected power of 2 splits"); unsigned Stages = Log2_32(NumSplits); for (unsigned s = Stages; s > 0; --s) { unsigned NumConcatElts = cast<FixedVectorType>(Ops[0]->getType())->getNumElements() * 2; for (unsigned i = 0; i != 1U << (s - 1); ++i) { SmallVector<int, 64> ConcatMask(NumConcatElts); std::iota(ConcatMask.begin(), ConcatMask.end(), 0); Ops[i] = Builder.CreateShuffleVector(Ops[i*2], Ops[i*2+1], ConcatMask); } } // At this point the final value should be in Ops[0]. Now we need to adjust // it to the final original type. NumElts = cast<FixedVectorType>(OpTy)->getNumElements(); if (NumElts == 2) { // Extract down to 2 elements. Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ArrayRef<int>{0, 1}); } else if (NumElts >= 8) { SmallVector<int, 32> ConcatMask(NumElts); unsigned SubElts = cast<FixedVectorType>(Ops[0]->getType())->getNumElements(); for (unsigned i = 0; i != SubElts; ++i) ConcatMask[i] = i; for (unsigned i = SubElts; i != NumElts; ++i) ConcatMask[i] = (i % SubElts) + SubElts; Value *Zero = Constant::getNullValue(Ops[0]->getType()); Ops[0] = Builder.CreateShuffleVector(Ops[0], Zero, ConcatMask); } SI->replaceAllUsesWith(Ops[0]); SI->eraseFromParent(); return true; } // Walk backwards from the ExtractElementInst and determine if it is the end of // a horizontal reduction. Return the input to the reduction if we find one. static Value *matchAddReduction(const ExtractElementInst &EE) { // Make sure we're extracting index 0. auto *Index = dyn_cast<ConstantInt>(EE.getIndexOperand()); if (!Index || !Index->isNullValue()) return nullptr; const auto *BO = dyn_cast<BinaryOperator>(EE.getVectorOperand()); if (!BO || BO->getOpcode() != Instruction::Add || !BO->hasOneUse()) return nullptr; unsigned NumElems = cast<FixedVectorType>(BO->getType())->getNumElements(); // Ensure the reduction size is a power of 2. if (!isPowerOf2_32(NumElems)) return nullptr; const Value *Op = BO; unsigned Stages = Log2_32(NumElems); for (unsigned i = 0; i != Stages; ++i) { const auto *BO = dyn_cast<BinaryOperator>(Op); if (!BO || BO->getOpcode() != Instruction::Add) return nullptr; // If this isn't the first add, then it should only have 2 users, the // shuffle and another add which we checked in the previous iteration. if (i != 0 && !BO->hasNUses(2)) return nullptr; Value *LHS = BO->getOperand(0); Value *RHS = BO->getOperand(1); auto *Shuffle = dyn_cast<ShuffleVectorInst>(LHS); if (Shuffle) { Op = RHS; } else { Shuffle = dyn_cast<ShuffleVectorInst>(RHS); Op = LHS; } // The first operand of the shuffle should be the same as the other operand // of the bin op. if (!Shuffle || Shuffle->getOperand(0) != Op) return nullptr; // Verify the shuffle has the expected (at this stage of the pyramid) mask. unsigned MaskEnd = 1 << i; for (unsigned Index = 0; Index < MaskEnd; ++Index) if (Shuffle->getMaskValue(Index) != (int)(MaskEnd + Index)) return nullptr; } return const_cast<Value *>(Op); } // See if this BO is reachable from this Phi by walking forward through single // use BinaryOperators with the same opcode. If we get back then we know we've // found a loop and it is safe to step through this Add to find more leaves. static bool isReachableFromPHI(PHINode *Phi, BinaryOperator *BO) { // The PHI itself should only have one use. if (!Phi->hasOneUse()) return false; Instruction *U = cast<Instruction>(*Phi->user_begin()); if (U == BO) return true; while (U->hasOneUse() && U->getOpcode() == BO->getOpcode()) U = cast<Instruction>(*U->user_begin()); return U == BO; } // Collect all the leaves of the tree of adds that feeds into the horizontal // reduction. Root is the Value that is used by the horizontal reduction. // We look through single use phis, single use adds, or adds that are used by // a phi that forms a loop with the add. static void collectLeaves(Value *Root, SmallVectorImpl<Instruction *> &Leaves) { SmallPtrSet<Value *, 8> Visited; SmallVector<Value *, 8> Worklist; Worklist.push_back(Root); while (!Worklist.empty()) { Value *V = Worklist.pop_back_val(); if (!Visited.insert(V).second) continue; if (auto *PN = dyn_cast<PHINode>(V)) { // PHI node should have single use unless it is the root node, then it // has 2 uses. if (!PN->hasNUses(PN == Root ? 2 : 1)) break; // Push incoming values to the worklist. for (Value *InV : PN->incoming_values()) Worklist.push_back(InV); continue; } if (auto *BO = dyn_cast<BinaryOperator>(V)) { if (BO->getOpcode() == Instruction::Add) { // Simple case. Single use, just push its operands to the worklist. if (BO->hasNUses(BO == Root ? 2 : 1)) { for (Value *Op : BO->operands()) Worklist.push_back(Op); continue; } // If there is additional use, make sure it is an unvisited phi that // gets us back to this node. if (BO->hasNUses(BO == Root ? 3 : 2)) { PHINode *PN = nullptr; for (auto *U : Root->users()) if (auto *P = dyn_cast<PHINode>(U)) if (!Visited.count(P)) PN = P; // If we didn't find a 2-input PHI then this isn't a case we can // handle. if (!PN || PN->getNumIncomingValues() != 2) continue; // Walk forward from this phi to see if it reaches back to this add. if (!isReachableFromPHI(PN, BO)) continue; // The phi forms a loop with this Add, push its operands. for (Value *Op : BO->operands()) Worklist.push_back(Op); } } } // Not an add or phi, make it a leaf. if (auto *I = dyn_cast<Instruction>(V)) { if (!V->hasNUses(I == Root ? 2 : 1)) continue; // Add this as a leaf. Leaves.push_back(I); } } } bool X86PartialReduction::runOnFunction(Function &F) { if (skipFunction(F)) return false; auto *TPC = getAnalysisIfAvailable<TargetPassConfig>(); if (!TPC) return false; auto &TM = TPC->getTM<X86TargetMachine>(); ST = TM.getSubtargetImpl(F); DL = &F.getParent()->getDataLayout(); bool MadeChange = false; for (auto &BB : F) { for (auto &I : BB) { auto *EE = dyn_cast<ExtractElementInst>(&I); if (!EE) continue; // First find a reduction tree. // FIXME: Do we need to handle other opcodes than Add? Value *Root = matchAddReduction(*EE); if (!Root) continue; SmallVector<Instruction *, 8> Leaves; collectLeaves(Root, Leaves); for (Instruction *I : Leaves) { if (tryMAddReplacement(I)) { MadeChange = true; continue; } // Don't do SAD matching on the root node. SelectionDAG already // has support for that and currently generates better code. if (I != Root && trySADReplacement(I)) MadeChange = true; } } } return MadeChange; }
Upload File
Create Folder