003 File Manager
Current Path:
/usr/src/contrib/llvm-project/compiler-rt/lib/xray
usr
/
src
/
contrib
/
llvm-project
/
compiler-rt
/
lib
/
xray
/
📁
..
📄
weak_symbols.txt
(88 B)
📄
xray_AArch64.cpp
(4.62 KB)
📄
xray_allocator.h
(9.33 KB)
📄
xray_always_instrument.txt
(255 B)
📄
xray_arm.cpp
(5.87 KB)
📄
xray_basic_flags.cpp
(1.52 KB)
📄
xray_basic_flags.h
(1.18 KB)
📄
xray_basic_flags.inc
(1011 B)
📄
xray_basic_logging.cpp
(17.59 KB)
📄
xray_basic_logging.h
(1.68 KB)
📄
xray_buffer_queue.cpp
(7.35 KB)
📄
xray_buffer_queue.h
(8.79 KB)
📄
xray_defs.h
(967 B)
📄
xray_fdr_controller.h
(11.79 KB)
📄
xray_fdr_flags.cpp
(1.47 KB)
📄
xray_fdr_flags.h
(1.2 KB)
📄
xray_fdr_flags.inc
(1.28 KB)
📄
xray_fdr_log_records.h
(2.5 KB)
📄
xray_fdr_log_writer.h
(8.72 KB)
📄
xray_fdr_logging.cpp
(27.62 KB)
📄
xray_fdr_logging.h
(1.53 KB)
📄
xray_flags.cpp
(2.76 KB)
📄
xray_flags.h
(1.12 KB)
📄
xray_flags.inc
(2.29 KB)
📄
xray_function_call_trie.h
(22.36 KB)
📄
xray_init.cpp
(4.6 KB)
📄
xray_interface.cpp
(16.43 KB)
📄
xray_interface_internal.h
(3.2 KB)
📄
xray_log_interface.cpp
(7.53 KB)
📄
xray_mips.cpp
(7.16 KB)
📄
xray_mips64.cpp
(7.66 KB)
📄
xray_never_instrument.txt
(282 B)
📄
xray_powerpc64.cpp
(3.63 KB)
📄
xray_powerpc64.inc
(1019 B)
📄
xray_profile_collector.cpp
(14.06 KB)
📄
xray_profile_collector.h
(2.73 KB)
📄
xray_profiling.cpp
(17.55 KB)
📄
xray_profiling_flags.cpp
(1.34 KB)
📄
xray_profiling_flags.h
(1.17 KB)
📄
xray_profiling_flags.inc
(1.4 KB)
📄
xray_recursion_guard.h
(1.8 KB)
📄
xray_segmented_array.h
(21.22 KB)
📄
xray_trampoline_AArch64.S
(6.1 KB)
📄
xray_trampoline_arm.S
(3.85 KB)
📄
xray_trampoline_mips.S
(2.67 KB)
📄
xray_trampoline_mips64.S
(3.47 KB)
📄
xray_trampoline_powerpc64.cpp
(420 B)
📄
xray_trampoline_powerpc64_asm.S
(3.91 KB)
📄
xray_trampoline_x86_64.S
(7.06 KB)
📄
xray_tsc.h
(2.75 KB)
📄
xray_utils.cpp
(6.04 KB)
📄
xray_utils.h
(2.18 KB)
📄
xray_x86_64.cpp
(11.73 KB)
📄
xray_x86_64.inc
(997 B)
Editing: xray_trampoline_x86_64.S
//===-- xray_trampoline_x86.s -----------------------------------*- ASM -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file is a part of XRay, a dynamic runtime instrumentation system. // // This implements the X86-specific assembler for the trampolines. // //===----------------------------------------------------------------------===// #include "../builtins/assembly.h" #include "../sanitizer_common/sanitizer_asm.h" .macro SAVE_REGISTERS pushfq subq $240, %rsp CFI_DEF_CFA_OFFSET(248) movq %rbp, 232(%rsp) movupd %xmm0, 216(%rsp) movupd %xmm1, 200(%rsp) movupd %xmm2, 184(%rsp) movupd %xmm3, 168(%rsp) movupd %xmm4, 152(%rsp) movupd %xmm5, 136(%rsp) movupd %xmm6, 120(%rsp) movupd %xmm7, 104(%rsp) movq %rdi, 96(%rsp) movq %rax, 88(%rsp) movq %rdx, 80(%rsp) movq %rsi, 72(%rsp) movq %rcx, 64(%rsp) movq %r8, 56(%rsp) movq %r9, 48(%rsp) movq %r10, 40(%rsp) movq %r11, 32(%rsp) movq %r12, 24(%rsp) movq %r13, 16(%rsp) movq %r14, 8(%rsp) movq %r15, 0(%rsp) .endm .macro RESTORE_REGISTERS movq 232(%rsp), %rbp movupd 216(%rsp), %xmm0 movupd 200(%rsp), %xmm1 movupd 184(%rsp), %xmm2 movupd 168(%rsp), %xmm3 movupd 152(%rsp), %xmm4 movupd 136(%rsp), %xmm5 movupd 120(%rsp) , %xmm6 movupd 104(%rsp) , %xmm7 movq 96(%rsp), %rdi movq 88(%rsp), %rax movq 80(%rsp), %rdx movq 72(%rsp), %rsi movq 64(%rsp), %rcx movq 56(%rsp), %r8 movq 48(%rsp), %r9 movq 40(%rsp), %r10 movq 32(%rsp), %r11 movq 24(%rsp), %r12 movq 16(%rsp), %r13 movq 8(%rsp), %r14 movq 0(%rsp), %r15 addq $240, %rsp popfq CFI_DEF_CFA_OFFSET(8) .endm .macro ALIGNED_CALL_RAX // Call the logging handler, after aligning the stack to a 16-byte boundary. // The approach we're taking here uses additional stack space to stash the // stack pointer twice before aligning the pointer to 16-bytes. If the stack // was 8-byte aligned, it will become 16-byte aligned -- when restoring the // pointer, we can always look -8 bytes from the current position to get // either of the values we've stashed in the first place. pushq %rsp pushq (%rsp) andq $-0x10, %rsp callq *%rax movq 8(%rsp), %rsp .endm .text #if !defined(__APPLE__) .section .text .file "xray_trampoline_x86.S" #else .section __TEXT,__text #endif //===----------------------------------------------------------------------===// .globl ASM_SYMBOL(__xray_FunctionEntry) ASM_HIDDEN(__xray_FunctionEntry) .align 16, 0x90 ASM_TYPE_FUNCTION(__xray_FunctionEntry) # LLVM-MCA-BEGIN __xray_FunctionEntry ASM_SYMBOL(__xray_FunctionEntry): CFI_STARTPROC SAVE_REGISTERS // This load has to be atomic, it's concurrent with __xray_patch(). // On x86/amd64, a simple (type-aligned) MOV instruction is enough. movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax testq %rax, %rax je .Ltmp0 // The patched function prologue puts its xray_instr_map index into %r10d. movl %r10d, %edi xor %esi,%esi ALIGNED_CALL_RAX .Ltmp0: RESTORE_REGISTERS retq # LLVM-MCA-END ASM_SIZE(__xray_FunctionEntry) CFI_ENDPROC //===----------------------------------------------------------------------===// .globl ASM_SYMBOL(__xray_FunctionExit) ASM_HIDDEN(__xray_FunctionExit) .align 16, 0x90 ASM_TYPE_FUNCTION(__xray_FunctionExit) # LLVM-MCA-BEGIN __xray_FunctionExit ASM_SYMBOL(__xray_FunctionExit): CFI_STARTPROC // Save the important registers first. Since we're assuming that this // function is only jumped into, we only preserve the registers for // returning. subq $56, %rsp CFI_DEF_CFA_OFFSET(64) movq %rbp, 48(%rsp) movupd %xmm0, 32(%rsp) movupd %xmm1, 16(%rsp) movq %rax, 8(%rsp) movq %rdx, 0(%rsp) movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax testq %rax,%rax je .Ltmp2 movl %r10d, %edi movl $1, %esi ALIGNED_CALL_RAX .Ltmp2: // Restore the important registers. movq 48(%rsp), %rbp movupd 32(%rsp), %xmm0 movupd 16(%rsp), %xmm1 movq 8(%rsp), %rax movq 0(%rsp), %rdx addq $56, %rsp CFI_DEF_CFA_OFFSET(8) retq # LLVM-MCA-END ASM_SIZE(__xray_FunctionExit) CFI_ENDPROC //===----------------------------------------------------------------------===// .globl ASM_SYMBOL(__xray_FunctionTailExit) ASM_HIDDEN(__xray_FunctionTailExit) .align 16, 0x90 ASM_TYPE_FUNCTION(__xray_FunctionTailExit) # LLVM-MCA-BEGIN __xray_FunctionTailExit ASM_SYMBOL(__xray_FunctionTailExit): CFI_STARTPROC SAVE_REGISTERS movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax testq %rax,%rax je .Ltmp4 movl %r10d, %edi movl $2, %esi ALIGNED_CALL_RAX .Ltmp4: RESTORE_REGISTERS retq # LLVM-MCA-END ASM_SIZE(__xray_FunctionTailExit) CFI_ENDPROC //===----------------------------------------------------------------------===// .globl ASM_SYMBOL(__xray_ArgLoggerEntry) ASM_HIDDEN(__xray_ArgLoggerEntry) .align 16, 0x90 ASM_TYPE_FUNCTION(__xray_ArgLoggerEntry) # LLVM-MCA-BEGIN __xray_ArgLoggerEntry ASM_SYMBOL(__xray_ArgLoggerEntry): CFI_STARTPROC SAVE_REGISTERS // Again, these function pointer loads must be atomic; MOV is fine. movq ASM_SYMBOL(_ZN6__xray13XRayArgLoggerE)(%rip), %rax testq %rax, %rax jne .Larg1entryLog // If [arg1 logging handler] not set, defer to no-arg logging. movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax testq %rax, %rax je .Larg1entryFail .Larg1entryLog: // First argument will become the third movq %rdi, %rdx // XRayEntryType::LOG_ARGS_ENTRY into the second mov $0x3, %esi // 32-bit function ID becomes the first movl %r10d, %edi ALIGNED_CALL_RAX .Larg1entryFail: RESTORE_REGISTERS retq # LLVM-MCA-END ASM_SIZE(__xray_ArgLoggerEntry) CFI_ENDPROC //===----------------------------------------------------------------------===// .global ASM_SYMBOL(__xray_CustomEvent) ASM_HIDDEN(__xray_CustomEvent) .align 16, 0x90 ASM_TYPE_FUNCTION(__xray_CustomEvent) # LLVM-MCA-BEGIN __xray_CustomEvent ASM_SYMBOL(__xray_CustomEvent): CFI_STARTPROC SAVE_REGISTERS // We take two arguments to this trampoline, which should be in rdi and rsi // already. movq ASM_SYMBOL(_ZN6__xray22XRayPatchedCustomEventE)(%rip), %rax testq %rax,%rax je .LcustomEventCleanup ALIGNED_CALL_RAX .LcustomEventCleanup: RESTORE_REGISTERS retq # LLVM-MCA-END ASM_SIZE(__xray_CustomEvent) CFI_ENDPROC //===----------------------------------------------------------------------===// .global ASM_SYMBOL(__xray_TypedEvent) ASM_HIDDEN(__xray_TypedEvent) .align 16, 0x90 ASM_TYPE_FUNCTION(__xray_TypedEvent) # LLVM-MCA-BEGIN __xray_TypedEvent ASM_SYMBOL(__xray_TypedEvent): CFI_STARTPROC SAVE_REGISTERS // We pass three arguments to this trampoline, which should be in rdi, rsi // and rdx without our intervention. movq ASM_SYMBOL(_ZN6__xray21XRayPatchedTypedEventE)(%rip), %rax testq %rax,%rax je .LtypedEventCleanup ALIGNED_CALL_RAX .LtypedEventCleanup: RESTORE_REGISTERS retq # LLVM-MCA-END ASM_SIZE(__xray_TypedEvent) CFI_ENDPROC //===----------------------------------------------------------------------===// NO_EXEC_STACK_DIRECTIVE
Upload File
Create Folder