003 File Manager
Current Path:
/usr/src/contrib/llvm-project/llvm/lib/Target/X86
usr
/
src
/
contrib
/
llvm-project
/
llvm
/
lib
/
Target
/
X86
/
📁
..
📁
AsmParser
📁
Disassembler
📄
ImmutableGraph.h
(15.15 KB)
📁
MCTargetDesc
📁
TargetInfo
📄
X86.h
(7.41 KB)
📄
X86.td
(68.44 KB)
📄
X86AsmPrinter.cpp
(27.18 KB)
📄
X86AsmPrinter.h
(5.96 KB)
📄
X86AvoidStoreForwardingBlocks.cpp
(27.94 KB)
📄
X86AvoidTrailingCall.cpp
(4.91 KB)
📄
X86CallFrameOptimization.cpp
(23.07 KB)
📄
X86CallLowering.cpp
(17.62 KB)
📄
X86CallLowering.h
(1.74 KB)
📄
X86CallingConv.cpp
(13.34 KB)
📄
X86CallingConv.h
(1.09 KB)
📄
X86CallingConv.td
(46.15 KB)
📄
X86CmovConversion.cpp
(34.07 KB)
📄
X86CondBrFolding.cpp
(18.4 KB)
📄
X86DiscriminateMemOps.cpp
(7.11 KB)
📄
X86DomainReassignment.cpp
(25.87 KB)
📄
X86EvexToVex.cpp
(8.8 KB)
📄
X86ExpandPseudo.cpp
(16.95 KB)
📄
X86FastISel.cpp
(139.28 KB)
📄
X86FixupBWInsts.cpp
(18.09 KB)
📄
X86FixupLEAs.cpp
(24.44 KB)
📄
X86FixupSetCC.cpp
(4.44 KB)
📄
X86FlagsCopyLowering.cpp
(40.36 KB)
📄
X86FloatingPoint.cpp
(62.66 KB)
📄
X86FrameLowering.cpp
(138.71 KB)
📄
X86FrameLowering.h
(11.64 KB)
📄
X86GenRegisterBankInfo.def
(3.32 KB)
📄
X86ISelDAGToDAG.cpp
(208.37 KB)
📄
X86ISelLowering.cpp
(1.94 MB)
📄
X86ISelLowering.h
(60.88 KB)
📄
X86IndirectBranchTracking.cpp
(6.17 KB)
📄
X86IndirectThunks.cpp
(9.78 KB)
📄
X86InsertPrefetch.cpp
(9.64 KB)
📄
X86InsertWait.cpp
(4.47 KB)
📄
X86Instr3DNow.td
(5.24 KB)
📄
X86InstrAMX.td
(5.6 KB)
📄
X86InstrAVX512.td
(653.76 KB)
📄
X86InstrArithmetic.td
(75.61 KB)
📄
X86InstrBuilder.h
(8.45 KB)
📄
X86InstrCMovSetCC.td
(5.76 KB)
📄
X86InstrCompiler.td
(95.78 KB)
📄
X86InstrControl.td
(20.53 KB)
📄
X86InstrExtension.td
(11.64 KB)
📄
X86InstrFMA.td
(33.23 KB)
📄
X86InstrFMA3Info.cpp
(6.21 KB)
📄
X86InstrFMA3Info.h
(3.25 KB)
📄
X86InstrFPStack.td
(39.52 KB)
📄
X86InstrFoldTables.cpp
(393.01 KB)
📄
X86InstrFoldTables.h
(3.03 KB)
📄
X86InstrFormats.td
(41.05 KB)
📄
X86InstrFragmentsSIMD.td
(61.14 KB)
📄
X86InstrInfo.cpp
(322.72 KB)
📄
X86InstrInfo.h
(29.34 KB)
📄
X86InstrInfo.td
(169.76 KB)
📄
X86InstrMMX.td
(29.55 KB)
📄
X86InstrMPX.td
(3.63 KB)
📄
X86InstrSGX.td
(1.12 KB)
📄
X86InstrSSE.td
(385.01 KB)
📄
X86InstrSVM.td
(2.16 KB)
📄
X86InstrShiftRotate.td
(49.56 KB)
📄
X86InstrSystem.td
(34.03 KB)
📄
X86InstrTSX.td
(2.1 KB)
📄
X86InstrVMX.td
(3.53 KB)
📄
X86InstrVecCompiler.td
(21.09 KB)
📄
X86InstrXOP.td
(23.81 KB)
📄
X86InstructionSelector.cpp
(61.11 KB)
📄
X86InterleavedAccess.cpp
(32.7 KB)
📄
X86IntrinsicsInfo.h
(73.96 KB)
📄
X86LegalizerInfo.cpp
(15.6 KB)
📄
X86LegalizerInfo.h
(1.65 KB)
📄
X86LoadValueInjectionLoadHardening.cpp
(32.4 KB)
📄
X86LoadValueInjectionRetHardening.cpp
(4.93 KB)
📄
X86MCInstLower.cpp
(96.53 KB)
📄
X86MachineFunctionInfo.cpp
(1.1 KB)
📄
X86MachineFunctionInfo.h
(8.87 KB)
📄
X86MacroFusion.cpp
(2.62 KB)
📄
X86MacroFusion.h
(992 B)
📄
X86OptimizeLEAs.cpp
(27.47 KB)
📄
X86PadShortFunction.cpp
(7.33 KB)
📄
X86PartialReduction.cpp
(15.46 KB)
📄
X86PfmCounters.td
(10.18 KB)
📄
X86RegisterBankInfo.cpp
(10.55 KB)
📄
X86RegisterBankInfo.h
(2.87 KB)
📄
X86RegisterBanks.td
(629 B)
📄
X86RegisterInfo.cpp
(29 KB)
📄
X86RegisterInfo.h
(5.61 KB)
📄
X86RegisterInfo.td
(26.07 KB)
📄
X86SchedBroadwell.td
(69.45 KB)
📄
X86SchedHaswell.td
(73.96 KB)
📄
X86SchedPredicates.td
(4.23 KB)
📄
X86SchedSandyBridge.td
(50 KB)
📄
X86SchedSkylakeClient.td
(74.65 KB)
📄
X86SchedSkylakeServer.td
(113.85 KB)
📄
X86Schedule.td
(36.9 KB)
📄
X86ScheduleAtom.td
(38.26 KB)
📄
X86ScheduleBdVer2.td
(56.78 KB)
📄
X86ScheduleBtVer2.td
(46.98 KB)
📄
X86ScheduleSLM.td
(22.91 KB)
📄
X86ScheduleZnver1.td
(48.97 KB)
📄
X86ScheduleZnver2.td
(48.12 KB)
📄
X86SelectionDAGInfo.cpp
(12.02 KB)
📄
X86SelectionDAGInfo.h
(1.8 KB)
📄
X86ShuffleDecodeConstantPool.cpp
(11.22 KB)
📄
X86ShuffleDecodeConstantPool.h
(2.13 KB)
📄
X86SpeculativeExecutionSideEffectSuppression.cpp
(6.97 KB)
📄
X86SpeculativeLoadHardening.cpp
(93.16 KB)
📄
X86Subtarget.cpp
(13.25 KB)
📄
X86Subtarget.h
(32.08 KB)
📄
X86TargetMachine.cpp
(18.88 KB)
📄
X86TargetMachine.h
(2.04 KB)
📄
X86TargetObjectFile.cpp
(2.61 KB)
📄
X86TargetObjectFile.h
(2.13 KB)
📄
X86TargetTransformInfo.cpp
(189.14 KB)
📄
X86TargetTransformInfo.h
(9.63 KB)
📄
X86VZeroUpper.cpp
(12.59 KB)
📄
X86WinAllocaExpander.cpp
(9.54 KB)
📄
X86WinEHState.cpp
(28.97 KB)
Editing: X86LegalizerInfo.cpp
//===- X86LegalizerInfo.cpp --------------------------------------*- C++ -*-==// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// \file /// This file implements the targeting of the Machinelegalizer class for X86. /// \todo This should be generated by TableGen. //===----------------------------------------------------------------------===// #include "X86LegalizerInfo.h" #include "X86Subtarget.h" #include "X86TargetMachine.h" #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" #include "llvm/CodeGen/TargetOpcodes.h" #include "llvm/CodeGen/ValueTypes.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Type.h" using namespace llvm; using namespace TargetOpcode; using namespace LegalizeActions; /// FIXME: The following static functions are SizeChangeStrategy functions /// that are meant to temporarily mimic the behaviour of the old legalization /// based on doubling/halving non-legal types as closely as possible. This is /// not entirly possible as only legalizing the types that are exactly a power /// of 2 times the size of the legal types would require specifying all those /// sizes explicitly. /// In practice, not specifying those isn't a problem, and the below functions /// should disappear quickly as we add support for legalizing non-power-of-2 /// sized types further. static void addAndInterleaveWithUnsupported(LegalizerInfo::SizeAndActionsVec &result, const LegalizerInfo::SizeAndActionsVec &v) { for (unsigned i = 0; i < v.size(); ++i) { result.push_back(v[i]); if (i + 1 < v[i].first && i + 1 < v.size() && v[i + 1].first != v[i].first + 1) result.push_back({v[i].first + 1, Unsupported}); } } static LegalizerInfo::SizeAndActionsVec widen_1(const LegalizerInfo::SizeAndActionsVec &v) { assert(v.size() >= 1); assert(v[0].first > 1); LegalizerInfo::SizeAndActionsVec result = {{1, WidenScalar}, {2, Unsupported}}; addAndInterleaveWithUnsupported(result, v); auto Largest = result.back().first; result.push_back({Largest + 1, Unsupported}); return result; } X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI, const X86TargetMachine &TM) : Subtarget(STI), TM(TM) { setLegalizerInfo32bit(); setLegalizerInfo64bit(); setLegalizerInfoSSE1(); setLegalizerInfoSSE2(); setLegalizerInfoSSE41(); setLegalizerInfoAVX(); setLegalizerInfoAVX2(); setLegalizerInfoAVX512(); setLegalizerInfoAVX512DQ(); setLegalizerInfoAVX512BW(); setLegalizeScalarToDifferentSizeStrategy(G_PHI, 0, widen_1); for (unsigned BinOp : {G_SUB, G_MUL, G_AND, G_OR, G_XOR}) setLegalizeScalarToDifferentSizeStrategy(BinOp, 0, widen_1); for (unsigned MemOp : {G_LOAD, G_STORE}) setLegalizeScalarToDifferentSizeStrategy(MemOp, 0, narrowToSmallerAndWidenToSmallest); setLegalizeScalarToDifferentSizeStrategy( G_PTR_ADD, 1, widenToLargerTypesUnsupportedOtherwise); setLegalizeScalarToDifferentSizeStrategy( G_CONSTANT, 0, widenToLargerTypesAndNarrowToLargest); computeTables(); verify(*STI.getInstrInfo()); } bool X86LegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const { MachineIRBuilder &MIRBuilder = Helper.MIRBuilder; switch (MI.getIntrinsicID()) { case Intrinsic::memcpy: case Intrinsic::memset: case Intrinsic::memmove: if (createMemLibcall(MIRBuilder, *MIRBuilder.getMRI(), MI) == LegalizerHelper::UnableToLegalize) return false; MI.eraseFromParent(); return true; default: break; } return true; } void X86LegalizerInfo::setLegalizerInfo32bit() { const LLT p0 = LLT::pointer(0, TM.getPointerSizeInBits(0)); const LLT s1 = LLT::scalar(1); const LLT s8 = LLT::scalar(8); const LLT s16 = LLT::scalar(16); const LLT s32 = LLT::scalar(32); const LLT s64 = LLT::scalar(64); const LLT s128 = LLT::scalar(128); for (auto Ty : {p0, s1, s8, s16, s32}) setAction({G_IMPLICIT_DEF, Ty}, Legal); for (auto Ty : {s8, s16, s32, p0}) setAction({G_PHI, Ty}, Legal); for (unsigned BinOp : {G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR}) for (auto Ty : {s8, s16, s32}) setAction({BinOp, Ty}, Legal); for (unsigned Op : {G_UADDE}) { setAction({Op, s32}, Legal); setAction({Op, 1, s1}, Legal); } for (unsigned MemOp : {G_LOAD, G_STORE}) { for (auto Ty : {s8, s16, s32, p0}) setAction({MemOp, Ty}, Legal); // And everything's fine in addrspace 0. setAction({MemOp, 1, p0}, Legal); } // Pointer-handling setAction({G_FRAME_INDEX, p0}, Legal); setAction({G_GLOBAL_VALUE, p0}, Legal); setAction({G_PTR_ADD, p0}, Legal); setAction({G_PTR_ADD, 1, s32}, Legal); if (!Subtarget.is64Bit()) { getActionDefinitionsBuilder(G_PTRTOINT) .legalForCartesianProduct({s1, s8, s16, s32}, {p0}) .maxScalar(0, s32) .widenScalarToNextPow2(0, /*Min*/ 8); getActionDefinitionsBuilder(G_INTTOPTR).legalFor({{p0, s32}}); // Shifts and SDIV getActionDefinitionsBuilder( {G_SDIV, G_SREM, G_UDIV, G_UREM}) .legalFor({s8, s16, s32}) .clampScalar(0, s8, s32); getActionDefinitionsBuilder( {G_SHL, G_LSHR, G_ASHR}) .legalFor({{s8, s8}, {s16, s8}, {s32, s8}}) .clampScalar(0, s8, s32) .clampScalar(1, s8, s8); } // Control-flow setAction({G_BRCOND, s1}, Legal); // Constants for (auto Ty : {s8, s16, s32, p0}) setAction({TargetOpcode::G_CONSTANT, Ty}, Legal); // Extensions for (auto Ty : {s8, s16, s32}) { setAction({G_ZEXT, Ty}, Legal); setAction({G_SEXT, Ty}, Legal); setAction({G_ANYEXT, Ty}, Legal); } setAction({G_ANYEXT, s128}, Legal); getActionDefinitionsBuilder(G_SEXT_INREG).lower(); // Comparison setAction({G_ICMP, s1}, Legal); for (auto Ty : {s8, s16, s32, p0}) setAction({G_ICMP, 1, Ty}, Legal); // Merge/Unmerge for (const auto &Ty : {s16, s32, s64}) { setAction({G_MERGE_VALUES, Ty}, Legal); setAction({G_UNMERGE_VALUES, 1, Ty}, Legal); } for (const auto &Ty : {s8, s16, s32}) { setAction({G_MERGE_VALUES, 1, Ty}, Legal); setAction({G_UNMERGE_VALUES, Ty}, Legal); } } void X86LegalizerInfo::setLegalizerInfo64bit() { if (!Subtarget.is64Bit()) return; const LLT p0 = LLT::pointer(0, TM.getPointerSizeInBits(0)); const LLT s1 = LLT::scalar(1); const LLT s8 = LLT::scalar(8); const LLT s16 = LLT::scalar(16); const LLT s32 = LLT::scalar(32); const LLT s64 = LLT::scalar(64); const LLT s128 = LLT::scalar(128); setAction({G_IMPLICIT_DEF, s64}, Legal); // Need to have that, as tryFoldImplicitDef will create this pattern: // s128 = EXTEND (G_IMPLICIT_DEF s32/s64) -> s128 = G_IMPLICIT_DEF setAction({G_IMPLICIT_DEF, s128}, Legal); setAction({G_PHI, s64}, Legal); for (unsigned BinOp : {G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR}) setAction({BinOp, s64}, Legal); for (unsigned MemOp : {G_LOAD, G_STORE}) setAction({MemOp, s64}, Legal); // Pointer-handling setAction({G_PTR_ADD, 1, s64}, Legal); getActionDefinitionsBuilder(G_PTRTOINT) .legalForCartesianProduct({s1, s8, s16, s32, s64}, {p0}) .maxScalar(0, s64) .widenScalarToNextPow2(0, /*Min*/ 8); getActionDefinitionsBuilder(G_INTTOPTR).legalFor({{p0, s64}}); // Constants setAction({TargetOpcode::G_CONSTANT, s64}, Legal); // Extensions for (unsigned extOp : {G_ZEXT, G_SEXT, G_ANYEXT}) { setAction({extOp, s64}, Legal); } getActionDefinitionsBuilder(G_SITOFP) .legalForCartesianProduct({s32, s64}) .clampScalar(1, s32, s64) .widenScalarToNextPow2(1) .clampScalar(0, s32, s64) .widenScalarToNextPow2(0); getActionDefinitionsBuilder(G_FPTOSI) .legalForCartesianProduct({s32, s64}) .clampScalar(1, s32, s64) .widenScalarToNextPow2(0) .clampScalar(0, s32, s64) .widenScalarToNextPow2(1); // Comparison setAction({G_ICMP, 1, s64}, Legal); getActionDefinitionsBuilder(G_FCMP) .legalForCartesianProduct({s8}, {s32, s64}) .clampScalar(0, s8, s8) .clampScalar(1, s32, s64) .widenScalarToNextPow2(1); // Divisions getActionDefinitionsBuilder( {G_SDIV, G_SREM, G_UDIV, G_UREM}) .legalFor({s8, s16, s32, s64}) .clampScalar(0, s8, s64); // Shifts getActionDefinitionsBuilder( {G_SHL, G_LSHR, G_ASHR}) .legalFor({{s8, s8}, {s16, s8}, {s32, s8}, {s64, s8}}) .clampScalar(0, s8, s64) .clampScalar(1, s8, s8); // Merge/Unmerge setAction({G_MERGE_VALUES, s128}, Legal); setAction({G_UNMERGE_VALUES, 1, s128}, Legal); setAction({G_MERGE_VALUES, 1, s128}, Legal); setAction({G_UNMERGE_VALUES, s128}, Legal); } void X86LegalizerInfo::setLegalizerInfoSSE1() { if (!Subtarget.hasSSE1()) return; const LLT s32 = LLT::scalar(32); const LLT s64 = LLT::scalar(64); const LLT v4s32 = LLT::vector(4, 32); const LLT v2s64 = LLT::vector(2, 64); for (unsigned BinOp : {G_FADD, G_FSUB, G_FMUL, G_FDIV}) for (auto Ty : {s32, v4s32}) setAction({BinOp, Ty}, Legal); for (unsigned MemOp : {G_LOAD, G_STORE}) for (auto Ty : {v4s32, v2s64}) setAction({MemOp, Ty}, Legal); // Constants setAction({TargetOpcode::G_FCONSTANT, s32}, Legal); // Merge/Unmerge for (const auto &Ty : {v4s32, v2s64}) { setAction({G_CONCAT_VECTORS, Ty}, Legal); setAction({G_UNMERGE_VALUES, 1, Ty}, Legal); } setAction({G_MERGE_VALUES, 1, s64}, Legal); setAction({G_UNMERGE_VALUES, s64}, Legal); } void X86LegalizerInfo::setLegalizerInfoSSE2() { if (!Subtarget.hasSSE2()) return; const LLT s32 = LLT::scalar(32); const LLT s64 = LLT::scalar(64); const LLT v16s8 = LLT::vector(16, 8); const LLT v8s16 = LLT::vector(8, 16); const LLT v4s32 = LLT::vector(4, 32); const LLT v2s64 = LLT::vector(2, 64); const LLT v32s8 = LLT::vector(32, 8); const LLT v16s16 = LLT::vector(16, 16); const LLT v8s32 = LLT::vector(8, 32); const LLT v4s64 = LLT::vector(4, 64); for (unsigned BinOp : {G_FADD, G_FSUB, G_FMUL, G_FDIV}) for (auto Ty : {s64, v2s64}) setAction({BinOp, Ty}, Legal); for (unsigned BinOp : {G_ADD, G_SUB}) for (auto Ty : {v16s8, v8s16, v4s32, v2s64}) setAction({BinOp, Ty}, Legal); setAction({G_MUL, v8s16}, Legal); setAction({G_FPEXT, s64}, Legal); setAction({G_FPEXT, 1, s32}, Legal); setAction({G_FPTRUNC, s32}, Legal); setAction({G_FPTRUNC, 1, s64}, Legal); // Constants setAction({TargetOpcode::G_FCONSTANT, s64}, Legal); // Merge/Unmerge for (const auto &Ty : {v16s8, v32s8, v8s16, v16s16, v4s32, v8s32, v2s64, v4s64}) { setAction({G_CONCAT_VECTORS, Ty}, Legal); setAction({G_UNMERGE_VALUES, 1, Ty}, Legal); } for (const auto &Ty : {v16s8, v8s16, v4s32, v2s64}) { setAction({G_CONCAT_VECTORS, 1, Ty}, Legal); setAction({G_UNMERGE_VALUES, Ty}, Legal); } } void X86LegalizerInfo::setLegalizerInfoSSE41() { if (!Subtarget.hasSSE41()) return; const LLT v4s32 = LLT::vector(4, 32); setAction({G_MUL, v4s32}, Legal); } void X86LegalizerInfo::setLegalizerInfoAVX() { if (!Subtarget.hasAVX()) return; const LLT v16s8 = LLT::vector(16, 8); const LLT v8s16 = LLT::vector(8, 16); const LLT v4s32 = LLT::vector(4, 32); const LLT v2s64 = LLT::vector(2, 64); const LLT v32s8 = LLT::vector(32, 8); const LLT v64s8 = LLT::vector(64, 8); const LLT v16s16 = LLT::vector(16, 16); const LLT v32s16 = LLT::vector(32, 16); const LLT v8s32 = LLT::vector(8, 32); const LLT v16s32 = LLT::vector(16, 32); const LLT v4s64 = LLT::vector(4, 64); const LLT v8s64 = LLT::vector(8, 64); for (unsigned MemOp : {G_LOAD, G_STORE}) for (auto Ty : {v8s32, v4s64}) setAction({MemOp, Ty}, Legal); for (auto Ty : {v32s8, v16s16, v8s32, v4s64}) { setAction({G_INSERT, Ty}, Legal); setAction({G_EXTRACT, 1, Ty}, Legal); } for (auto Ty : {v16s8, v8s16, v4s32, v2s64}) { setAction({G_INSERT, 1, Ty}, Legal); setAction({G_EXTRACT, Ty}, Legal); } // Merge/Unmerge for (const auto &Ty : {v32s8, v64s8, v16s16, v32s16, v8s32, v16s32, v4s64, v8s64}) { setAction({G_CONCAT_VECTORS, Ty}, Legal); setAction({G_UNMERGE_VALUES, 1, Ty}, Legal); } for (const auto &Ty : {v16s8, v32s8, v8s16, v16s16, v4s32, v8s32, v2s64, v4s64}) { setAction({G_CONCAT_VECTORS, 1, Ty}, Legal); setAction({G_UNMERGE_VALUES, Ty}, Legal); } } void X86LegalizerInfo::setLegalizerInfoAVX2() { if (!Subtarget.hasAVX2()) return; const LLT v32s8 = LLT::vector(32, 8); const LLT v16s16 = LLT::vector(16, 16); const LLT v8s32 = LLT::vector(8, 32); const LLT v4s64 = LLT::vector(4, 64); const LLT v64s8 = LLT::vector(64, 8); const LLT v32s16 = LLT::vector(32, 16); const LLT v16s32 = LLT::vector(16, 32); const LLT v8s64 = LLT::vector(8, 64); for (unsigned BinOp : {G_ADD, G_SUB}) for (auto Ty : {v32s8, v16s16, v8s32, v4s64}) setAction({BinOp, Ty}, Legal); for (auto Ty : {v16s16, v8s32}) setAction({G_MUL, Ty}, Legal); // Merge/Unmerge for (const auto &Ty : {v64s8, v32s16, v16s32, v8s64}) { setAction({G_CONCAT_VECTORS, Ty}, Legal); setAction({G_UNMERGE_VALUES, 1, Ty}, Legal); } for (const auto &Ty : {v32s8, v16s16, v8s32, v4s64}) { setAction({G_CONCAT_VECTORS, 1, Ty}, Legal); setAction({G_UNMERGE_VALUES, Ty}, Legal); } } void X86LegalizerInfo::setLegalizerInfoAVX512() { if (!Subtarget.hasAVX512()) return; const LLT v16s8 = LLT::vector(16, 8); const LLT v8s16 = LLT::vector(8, 16); const LLT v4s32 = LLT::vector(4, 32); const LLT v2s64 = LLT::vector(2, 64); const LLT v32s8 = LLT::vector(32, 8); const LLT v16s16 = LLT::vector(16, 16); const LLT v8s32 = LLT::vector(8, 32); const LLT v4s64 = LLT::vector(4, 64); const LLT v64s8 = LLT::vector(64, 8); const LLT v32s16 = LLT::vector(32, 16); const LLT v16s32 = LLT::vector(16, 32); const LLT v8s64 = LLT::vector(8, 64); for (unsigned BinOp : {G_ADD, G_SUB}) for (auto Ty : {v16s32, v8s64}) setAction({BinOp, Ty}, Legal); setAction({G_MUL, v16s32}, Legal); for (unsigned MemOp : {G_LOAD, G_STORE}) for (auto Ty : {v16s32, v8s64}) setAction({MemOp, Ty}, Legal); for (auto Ty : {v64s8, v32s16, v16s32, v8s64}) { setAction({G_INSERT, Ty}, Legal); setAction({G_EXTRACT, 1, Ty}, Legal); } for (auto Ty : {v32s8, v16s16, v8s32, v4s64, v16s8, v8s16, v4s32, v2s64}) { setAction({G_INSERT, 1, Ty}, Legal); setAction({G_EXTRACT, Ty}, Legal); } /************ VLX *******************/ if (!Subtarget.hasVLX()) return; for (auto Ty : {v4s32, v8s32}) setAction({G_MUL, Ty}, Legal); } void X86LegalizerInfo::setLegalizerInfoAVX512DQ() { if (!(Subtarget.hasAVX512() && Subtarget.hasDQI())) return; const LLT v8s64 = LLT::vector(8, 64); setAction({G_MUL, v8s64}, Legal); /************ VLX *******************/ if (!Subtarget.hasVLX()) return; const LLT v2s64 = LLT::vector(2, 64); const LLT v4s64 = LLT::vector(4, 64); for (auto Ty : {v2s64, v4s64}) setAction({G_MUL, Ty}, Legal); } void X86LegalizerInfo::setLegalizerInfoAVX512BW() { if (!(Subtarget.hasAVX512() && Subtarget.hasBWI())) return; const LLT v64s8 = LLT::vector(64, 8); const LLT v32s16 = LLT::vector(32, 16); for (unsigned BinOp : {G_ADD, G_SUB}) for (auto Ty : {v64s8, v32s16}) setAction({BinOp, Ty}, Legal); setAction({G_MUL, v32s16}, Legal); /************ VLX *******************/ if (!Subtarget.hasVLX()) return; const LLT v8s16 = LLT::vector(8, 16); const LLT v16s16 = LLT::vector(16, 16); for (auto Ty : {v8s16, v16s16}) setAction({G_MUL, Ty}, Legal); }
Upload File
Create Folder