003 File Manager
Current Path:
/usr/src/contrib/llvm-project/llvm/lib/Target/X86
usr
/
src
/
contrib
/
llvm-project
/
llvm
/
lib
/
Target
/
X86
/
📁
..
📁
AsmParser
📁
Disassembler
📄
ImmutableGraph.h
(15.15 KB)
📁
MCTargetDesc
📁
TargetInfo
📄
X86.h
(7.41 KB)
📄
X86.td
(68.44 KB)
📄
X86AsmPrinter.cpp
(27.18 KB)
📄
X86AsmPrinter.h
(5.96 KB)
📄
X86AvoidStoreForwardingBlocks.cpp
(27.94 KB)
📄
X86AvoidTrailingCall.cpp
(4.91 KB)
📄
X86CallFrameOptimization.cpp
(23.07 KB)
📄
X86CallLowering.cpp
(17.62 KB)
📄
X86CallLowering.h
(1.74 KB)
📄
X86CallingConv.cpp
(13.34 KB)
📄
X86CallingConv.h
(1.09 KB)
📄
X86CallingConv.td
(46.15 KB)
📄
X86CmovConversion.cpp
(34.07 KB)
📄
X86CondBrFolding.cpp
(18.4 KB)
📄
X86DiscriminateMemOps.cpp
(7.11 KB)
📄
X86DomainReassignment.cpp
(25.87 KB)
📄
X86EvexToVex.cpp
(8.8 KB)
📄
X86ExpandPseudo.cpp
(16.95 KB)
📄
X86FastISel.cpp
(139.28 KB)
📄
X86FixupBWInsts.cpp
(18.09 KB)
📄
X86FixupLEAs.cpp
(24.44 KB)
📄
X86FixupSetCC.cpp
(4.44 KB)
📄
X86FlagsCopyLowering.cpp
(40.36 KB)
📄
X86FloatingPoint.cpp
(62.66 KB)
📄
X86FrameLowering.cpp
(138.71 KB)
📄
X86FrameLowering.h
(11.64 KB)
📄
X86GenRegisterBankInfo.def
(3.32 KB)
📄
X86ISelDAGToDAG.cpp
(208.37 KB)
📄
X86ISelLowering.cpp
(1.94 MB)
📄
X86ISelLowering.h
(60.88 KB)
📄
X86IndirectBranchTracking.cpp
(6.17 KB)
📄
X86IndirectThunks.cpp
(9.78 KB)
📄
X86InsertPrefetch.cpp
(9.64 KB)
📄
X86InsertWait.cpp
(4.47 KB)
📄
X86Instr3DNow.td
(5.24 KB)
📄
X86InstrAMX.td
(5.6 KB)
📄
X86InstrAVX512.td
(653.76 KB)
📄
X86InstrArithmetic.td
(75.61 KB)
📄
X86InstrBuilder.h
(8.45 KB)
📄
X86InstrCMovSetCC.td
(5.76 KB)
📄
X86InstrCompiler.td
(95.78 KB)
📄
X86InstrControl.td
(20.53 KB)
📄
X86InstrExtension.td
(11.64 KB)
📄
X86InstrFMA.td
(33.23 KB)
📄
X86InstrFMA3Info.cpp
(6.21 KB)
📄
X86InstrFMA3Info.h
(3.25 KB)
📄
X86InstrFPStack.td
(39.52 KB)
📄
X86InstrFoldTables.cpp
(393.01 KB)
📄
X86InstrFoldTables.h
(3.03 KB)
📄
X86InstrFormats.td
(41.05 KB)
📄
X86InstrFragmentsSIMD.td
(61.14 KB)
📄
X86InstrInfo.cpp
(322.72 KB)
📄
X86InstrInfo.h
(29.34 KB)
📄
X86InstrInfo.td
(169.76 KB)
📄
X86InstrMMX.td
(29.55 KB)
📄
X86InstrMPX.td
(3.63 KB)
📄
X86InstrSGX.td
(1.12 KB)
📄
X86InstrSSE.td
(385.01 KB)
📄
X86InstrSVM.td
(2.16 KB)
📄
X86InstrShiftRotate.td
(49.56 KB)
📄
X86InstrSystem.td
(34.03 KB)
📄
X86InstrTSX.td
(2.1 KB)
📄
X86InstrVMX.td
(3.53 KB)
📄
X86InstrVecCompiler.td
(21.09 KB)
📄
X86InstrXOP.td
(23.81 KB)
📄
X86InstructionSelector.cpp
(61.11 KB)
📄
X86InterleavedAccess.cpp
(32.7 KB)
📄
X86IntrinsicsInfo.h
(73.96 KB)
📄
X86LegalizerInfo.cpp
(15.6 KB)
📄
X86LegalizerInfo.h
(1.65 KB)
📄
X86LoadValueInjectionLoadHardening.cpp
(32.4 KB)
📄
X86LoadValueInjectionRetHardening.cpp
(4.93 KB)
📄
X86MCInstLower.cpp
(96.53 KB)
📄
X86MachineFunctionInfo.cpp
(1.1 KB)
📄
X86MachineFunctionInfo.h
(8.87 KB)
📄
X86MacroFusion.cpp
(2.62 KB)
📄
X86MacroFusion.h
(992 B)
📄
X86OptimizeLEAs.cpp
(27.47 KB)
📄
X86PadShortFunction.cpp
(7.33 KB)
📄
X86PartialReduction.cpp
(15.46 KB)
📄
X86PfmCounters.td
(10.18 KB)
📄
X86RegisterBankInfo.cpp
(10.55 KB)
📄
X86RegisterBankInfo.h
(2.87 KB)
📄
X86RegisterBanks.td
(629 B)
📄
X86RegisterInfo.cpp
(29 KB)
📄
X86RegisterInfo.h
(5.61 KB)
📄
X86RegisterInfo.td
(26.07 KB)
📄
X86SchedBroadwell.td
(69.45 KB)
📄
X86SchedHaswell.td
(73.96 KB)
📄
X86SchedPredicates.td
(4.23 KB)
📄
X86SchedSandyBridge.td
(50 KB)
📄
X86SchedSkylakeClient.td
(74.65 KB)
📄
X86SchedSkylakeServer.td
(113.85 KB)
📄
X86Schedule.td
(36.9 KB)
📄
X86ScheduleAtom.td
(38.26 KB)
📄
X86ScheduleBdVer2.td
(56.78 KB)
📄
X86ScheduleBtVer2.td
(46.98 KB)
📄
X86ScheduleSLM.td
(22.91 KB)
📄
X86ScheduleZnver1.td
(48.97 KB)
📄
X86ScheduleZnver2.td
(48.12 KB)
📄
X86SelectionDAGInfo.cpp
(12.02 KB)
📄
X86SelectionDAGInfo.h
(1.8 KB)
📄
X86ShuffleDecodeConstantPool.cpp
(11.22 KB)
📄
X86ShuffleDecodeConstantPool.h
(2.13 KB)
📄
X86SpeculativeExecutionSideEffectSuppression.cpp
(6.97 KB)
📄
X86SpeculativeLoadHardening.cpp
(93.16 KB)
📄
X86Subtarget.cpp
(13.25 KB)
📄
X86Subtarget.h
(32.08 KB)
📄
X86TargetMachine.cpp
(18.88 KB)
📄
X86TargetMachine.h
(2.04 KB)
📄
X86TargetObjectFile.cpp
(2.61 KB)
📄
X86TargetObjectFile.h
(2.13 KB)
📄
X86TargetTransformInfo.cpp
(189.14 KB)
📄
X86TargetTransformInfo.h
(9.63 KB)
📄
X86VZeroUpper.cpp
(12.59 KB)
📄
X86WinAllocaExpander.cpp
(9.54 KB)
📄
X86WinEHState.cpp
(28.97 KB)
Editing: X86CallingConv.cpp
//=== X86CallingConv.cpp - X86 Custom Calling Convention Impl -*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file contains the implementation of custom routines for the X86 // Calling Convention that aren't done by tablegen. // //===----------------------------------------------------------------------===// #include "X86CallingConv.h" #include "X86Subtarget.h" #include "llvm/ADT/SmallVector.h" #include "llvm/CodeGen/CallingConvLower.h" #include "llvm/IR/CallingConv.h" using namespace llvm; /// When regcall calling convention compiled to 32 bit arch, special treatment /// is required for 64 bit masks. /// The value should be assigned to two GPRs. /// \return true if registers were allocated and false otherwise. static bool CC_X86_32_RegCall_Assign2Regs(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State) { // List of GPR registers that are available to store values in regcall // calling convention. static const MCPhysReg RegList[] = {X86::EAX, X86::ECX, X86::EDX, X86::EDI, X86::ESI}; // The vector will save all the available registers for allocation. SmallVector<unsigned, 5> AvailableRegs; // searching for the available registers. for (auto Reg : RegList) { if (!State.isAllocated(Reg)) AvailableRegs.push_back(Reg); } const size_t RequiredGprsUponSplit = 2; if (AvailableRegs.size() < RequiredGprsUponSplit) return false; // Not enough free registers - continue the search. // Allocating the available registers. for (unsigned I = 0; I < RequiredGprsUponSplit; I++) { // Marking the register as located. unsigned Reg = State.AllocateReg(AvailableRegs[I]); // Since we previously made sure that 2 registers are available // we expect that a real register number will be returned. assert(Reg && "Expecting a register will be available"); // Assign the value to the allocated register State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo)); } // Successful in allocating registers - stop scanning next rules. return true; } static ArrayRef<MCPhysReg> CC_X86_VectorCallGetSSEs(const MVT &ValVT) { if (ValVT.is512BitVector()) { static const MCPhysReg RegListZMM[] = {X86::ZMM0, X86::ZMM1, X86::ZMM2, X86::ZMM3, X86::ZMM4, X86::ZMM5}; return makeArrayRef(std::begin(RegListZMM), std::end(RegListZMM)); } if (ValVT.is256BitVector()) { static const MCPhysReg RegListYMM[] = {X86::YMM0, X86::YMM1, X86::YMM2, X86::YMM3, X86::YMM4, X86::YMM5}; return makeArrayRef(std::begin(RegListYMM), std::end(RegListYMM)); } static const MCPhysReg RegListXMM[] = {X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, X86::XMM4, X86::XMM5}; return makeArrayRef(std::begin(RegListXMM), std::end(RegListXMM)); } static ArrayRef<MCPhysReg> CC_X86_64_VectorCallGetGPRs() { static const MCPhysReg RegListGPR[] = {X86::RCX, X86::RDX, X86::R8, X86::R9}; return makeArrayRef(std::begin(RegListGPR), std::end(RegListGPR)); } static bool CC_X86_VectorCallAssignRegister(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State) { ArrayRef<MCPhysReg> RegList = CC_X86_VectorCallGetSSEs(ValVT); bool Is64bit = static_cast<const X86Subtarget &>( State.getMachineFunction().getSubtarget()) .is64Bit(); for (auto Reg : RegList) { // If the register is not marked as allocated - assign to it. if (!State.isAllocated(Reg)) { unsigned AssigedReg = State.AllocateReg(Reg); assert(AssigedReg == Reg && "Expecting a valid register allocation"); State.addLoc( CCValAssign::getReg(ValNo, ValVT, AssigedReg, LocVT, LocInfo)); return true; } // If the register is marked as shadow allocated - assign to it. if (Is64bit && State.IsShadowAllocatedReg(Reg)) { State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); return true; } } llvm_unreachable("Clang should ensure that hva marked vectors will have " "an available register."); return false; } /// Vectorcall calling convention has special handling for vector types or /// HVA for 64 bit arch. /// For HVAs shadow registers might be allocated on the first pass /// and actual XMM registers are allocated on the second pass. /// For vector types, actual XMM registers are allocated on the first pass. /// \return true if registers were allocated and false otherwise. static bool CC_X86_64_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State) { // On the second pass, go through the HVAs only. if (ArgFlags.isSecArgPass()) { if (ArgFlags.isHva()) return CC_X86_VectorCallAssignRegister(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State); return true; } // Process only vector types as defined by vectorcall spec: // "A vector type is either a floating-point type, for example, // a float or double, or an SIMD vector type, for example, __m128 or __m256". if (!(ValVT.isFloatingPoint() || (ValVT.isVector() && ValVT.getSizeInBits() >= 128))) { // If R9 was already assigned it means that we are after the fourth element // and because this is not an HVA / Vector type, we need to allocate // shadow XMM register. if (State.isAllocated(X86::R9)) { // Assign shadow XMM register. (void)State.AllocateReg(CC_X86_VectorCallGetSSEs(ValVT)); } return false; } if (!ArgFlags.isHva() || ArgFlags.isHvaStart()) { // Assign shadow GPR register. (void)State.AllocateReg(CC_X86_64_VectorCallGetGPRs()); // Assign XMM register - (shadow for HVA and non-shadow for non HVA). if (unsigned Reg = State.AllocateReg(CC_X86_VectorCallGetSSEs(ValVT))) { // In Vectorcall Calling convention, additional shadow stack can be // created on top of the basic 32 bytes of win64. // It can happen if the fifth or sixth argument is vector type or HVA. // At that case for each argument a shadow stack of 8 bytes is allocated. const TargetRegisterInfo *TRI = State.getMachineFunction().getSubtarget().getRegisterInfo(); if (TRI->regsOverlap(Reg, X86::XMM4) || TRI->regsOverlap(Reg, X86::XMM5)) State.AllocateStack(8, Align(8)); if (!ArgFlags.isHva()) { State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); return true; // Allocated a register - Stop the search. } } } // If this is an HVA - Stop the search, // otherwise continue the search. return ArgFlags.isHva(); } /// Vectorcall calling convention has special handling for vector types or /// HVA for 32 bit arch. /// For HVAs actual XMM registers are allocated on the second pass. /// For vector types, actual XMM registers are allocated on the first pass. /// \return true if registers were allocated and false otherwise. static bool CC_X86_32_VectorCall(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State) { // On the second pass, go through the HVAs only. if (ArgFlags.isSecArgPass()) { if (ArgFlags.isHva()) return CC_X86_VectorCallAssignRegister(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State); return true; } // Process only vector types as defined by vectorcall spec: // "A vector type is either a floating point type, for example, // a float or double, or an SIMD vector type, for example, __m128 or __m256". if (!(ValVT.isFloatingPoint() || (ValVT.isVector() && ValVT.getSizeInBits() >= 128))) { return false; } if (ArgFlags.isHva()) return true; // If this is an HVA - Stop the search. // Assign XMM register. if (unsigned Reg = State.AllocateReg(CC_X86_VectorCallGetSSEs(ValVT))) { State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); return true; } // In case we did not find an available XMM register for a vector - // pass it indirectly. // It is similar to CCPassIndirect, with the addition of inreg. if (!ValVT.isFloatingPoint()) { LocVT = MVT::i32; LocInfo = CCValAssign::Indirect; ArgFlags.setInReg(); } return false; // No register was assigned - Continue the search. } static bool CC_X86_AnyReg_Error(unsigned &, MVT &, MVT &, CCValAssign::LocInfo &, ISD::ArgFlagsTy &, CCState &) { llvm_unreachable("The AnyReg calling convention is only supported by the " "stackmap and patchpoint intrinsics."); // gracefully fallback to X86 C calling convention on Release builds. return false; } static bool CC_X86_32_MCUInReg(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State) { // This is similar to CCAssignToReg<[EAX, EDX, ECX]>, but makes sure // not to split i64 and double between a register and stack static const MCPhysReg RegList[] = {X86::EAX, X86::EDX, X86::ECX}; static const unsigned NumRegs = sizeof(RegList) / sizeof(RegList[0]); SmallVectorImpl<CCValAssign> &PendingMembers = State.getPendingLocs(); // If this is the first part of an double/i64/i128, or if we're already // in the middle of a split, add to the pending list. If this is not // the end of the split, return, otherwise go on to process the pending // list if (ArgFlags.isSplit() || !PendingMembers.empty()) { PendingMembers.push_back( CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo)); if (!ArgFlags.isSplitEnd()) return true; } // If there are no pending members, we are not in the middle of a split, // so do the usual inreg stuff. if (PendingMembers.empty()) { if (unsigned Reg = State.AllocateReg(RegList)) { State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); return true; } return false; } assert(ArgFlags.isSplitEnd()); // We now have the entire original argument in PendingMembers, so decide // whether to use registers or the stack. // Per the MCU ABI: // a) To use registers, we need to have enough of them free to contain // the entire argument. // b) We never want to use more than 2 registers for a single argument. unsigned FirstFree = State.getFirstUnallocated(RegList); bool UseRegs = PendingMembers.size() <= std::min(2U, NumRegs - FirstFree); for (auto &It : PendingMembers) { if (UseRegs) It.convertToReg(State.AllocateReg(RegList[FirstFree++])); else It.convertToMem(State.AllocateStack(4, Align(4))); State.addLoc(It); } PendingMembers.clear(); return true; } /// X86 interrupt handlers can only take one or two stack arguments, but if /// there are two arguments, they are in the opposite order from the standard /// convention. Therefore, we have to look at the argument count up front before /// allocating stack for each argument. static bool CC_X86_Intr(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State) { const MachineFunction &MF = State.getMachineFunction(); size_t ArgCount = State.getMachineFunction().getFunction().arg_size(); bool Is64Bit = static_cast<const X86Subtarget &>(MF.getSubtarget()).is64Bit(); unsigned SlotSize = Is64Bit ? 8 : 4; unsigned Offset; if (ArgCount == 1 && ValNo == 0) { // If we have one argument, the argument is five stack slots big, at fixed // offset zero. Offset = State.AllocateStack(5 * SlotSize, Align(4)); } else if (ArgCount == 2 && ValNo == 0) { // If we have two arguments, the stack slot is *after* the error code // argument. Pretend it doesn't consume stack space, and account for it when // we assign the second argument. Offset = SlotSize; } else if (ArgCount == 2 && ValNo == 1) { // If this is the second of two arguments, it must be the error code. It // appears first on the stack, and is then followed by the five slot // interrupt struct. Offset = 0; (void)State.AllocateStack(6 * SlotSize, Align(4)); } else { report_fatal_error("unsupported x86 interrupt prototype"); } // FIXME: This should be accounted for in // X86FrameLowering::getFrameIndexReference, not here. if (Is64Bit && ArgCount == 2) Offset += SlotSize; State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); return true; } // Provides entry points of CC_X86 and RetCC_X86. #include "X86GenCallingConv.inc"
Upload File
Create Folder