arch/x86/kernel/fpu/signal.o: file format elf64-x86-64 Disassembly of section .text: 0000000000000000 : /* * Signal frame handlers. */ static inline int save_fsave_header(struct task_struct *tsk, void __user *buf) { 0: 55 push %rbp 1: 48 89 e5 mov %rsp,%rbp 4: 41 54 push %r12 6: 49 89 fc mov %rdi,%r12 9: 53 push %rbx a: 48 89 f3 mov %rsi,%rbx if (use_fxsr()) { struct xregs_state *xsave = &tsk->thread.fpu.state.xsave; struct user_i387_ia32_struct env; struct _fpstate_32 __user *fp = buf; convert_from_fxsr(&env, tsk); d: 48 89 fe mov %rdi,%rsi 10: 48 8d bd 7c ff ff ff lea -0x84(%rbp),%rdi { 17: 48 83 ec 78 sub $0x78,%rsp 1b: 65 48 8b 04 25 28 00 mov %gs:0x28,%rax 22: 00 00 24: 48 89 45 e8 mov %rax,-0x18(%rbp) 28: 31 c0 xor %eax,%eax convert_from_fxsr(&env, tsk); 2a: e8 00 00 00 00 callq 2f /* * If CPU has ERMS feature, use copy_user_enhanced_fast_string. * Otherwise, if CPU has rep_good feature, use copy_user_generic_string. * Otherwise, use copy_user_generic_unrolled. */ alternative_call_2(copy_user_generic_unrolled, 2f: 48 89 df mov %rbx,%rdi 32: ba 6c 00 00 00 mov $0x6c,%edx 37: 48 8d b5 7c ff ff ff lea -0x84(%rbp),%rsi 3e: e8 00 00 00 00 callq 43 if (__copy_to_user(buf, &env, sizeof(env)) || 43: 85 c0 test %eax,%eax 45: 75 4f jne 96 __put_user(xsave->i387.swd, &fp->status) || 47: 41 0f b7 84 24 c2 13 movzwl 0x13c2(%r12),%eax 4e: 00 00 } static __always_inline void stac(void) { /* Note: a barrier is implicit in alternative() */ alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP); 50: 90 nop 51: 90 nop 52: 90 nop 53: 66 89 43 6c mov %ax,0x6c(%rbx) 57: 31 c0 xor %eax,%eax alternative("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP); 59: 90 nop 5a: 90 nop 5b: 90 nop if (__copy_to_user(buf, &env, sizeof(env)) || 5c: 85 c0 test %eax,%eax 5e: 75 36 jne 96 alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP); 60: 90 nop 61: 90 nop 62: 90 nop __put_user(X86_FXSR_MAGIC, &fp->magic)) 63: 66 c7 43 6e 00 00 movw $0x0,0x6e(%rbx) alternative("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP); 69: 90 nop 6a: 90 nop 6b: 90 nop __put_user(xsave->i387.swd, &fp->status) || 6c: 85 c0 test %eax,%eax 6e: 75 26 jne 96 if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status)) return -1; } return 0; } 70: 48 8b 4d e8 mov -0x18(%rbp),%rcx 74: 65 48 33 0c 25 28 00 xor %gs:0x28,%rcx 7b: 00 00 7d: 75 1e jne 9d 7f: 48 83 c4 78 add $0x78,%rsp 83: 5b pop %rbx 84: 41 5c pop %r12 86: 5d pop %rbp 87: c3 retq __put_user(xsave->i387.swd, &fp->status) || 88: b8 f2 ff ff ff mov $0xfffffff2,%eax 8d: eb ca jmp 59 __put_user(X86_FXSR_MAGIC, &fp->magic)) 8f: b8 f2 ff ff ff mov $0xfffffff2,%eax 94: eb d3 jmp 69 return -1; 96: b8 ff ff ff ff mov $0xffffffff,%eax 9b: eb d3 jmp 70 } 9d: e8 00 00 00 00 callq a2 a2: 66 66 2e 0f 1f 84 00 data16 nopw %cs:0x0(%rax,%rax,1) a9: 00 00 00 00 ad: 0f 1f 00 nopl (%rax) 00000000000000b0 <__fpu__restore_sig>: } else return copy_user_to_fregs(buf); } static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) { b0: e8 00 00 00 00 callq b5 <__fpu__restore_sig+0x5> b5: 55 push %rbp b6: 48 89 e5 mov %rsp,%rbp b9: 41 57 push %r15 bb: 41 56 push %r14 bd: 41 55 push %r13 bf: 41 54 push %r12 c1: 53 push %rbx DECLARE_PER_CPU(struct task_struct *, current_task); static __always_inline struct task_struct *get_current(void) { return this_cpu_read_stable(current_task); c2: 65 4c 8b 2c 25 00 00 mov %gs:0x0,%r13 c9: 00 00 cb: 48 81 ec 90 00 00 00 sub $0x90,%rsp d2: 65 48 8b 04 25 28 00 mov %gs:0x28,%rax d9: 00 00 db: 48 89 45 d0 mov %rax,-0x30(%rbp) df: 31 c0 xor %eax,%eax struct user_i387_ia32_struct *envp = NULL; int state_size = fpu_kernel_xstate_size; int ia32_fxstate = (buf != buf_fx); struct task_struct *tsk = current; struct fpu *fpu = &tsk->thread.fpu; e1: 49 8d 85 80 13 00 00 lea 0x1380(%r13),%rax e8: 48 89 85 48 ff ff ff mov %rax,-0xb8(%rbp) int ret = 0; ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) || IS_ENABLED(CONFIG_IA32_EMULATION)); if (!buf) { ef: 48 85 ff test %rdi,%rdi f2: 0f 84 1c 04 00 00 je 514 <__fpu__restore_sig+0x464> fpu__clear(fpu); return 0; } if (!access_ok(buf, size)) f8: 48 63 d2 movslq %edx,%rdx if (__builtin_constant_p(size)) return unlikely(addr > limit - size); /* Arbitrary sizes? Be careful about overflow */ addr += size; if (unlikely(addr < size)) fb: 31 c9 xor %ecx,%ecx fd: 49 8b 85 58 13 00 00 mov 0x1358(%r13),%rax 104: 48 89 fb mov %rdi,%rbx 107: 48 01 fa add %rdi,%rdx 10a: 0f 92 c1 setb %cl 10d: 48 89 8d 58 ff ff ff mov %rcx,-0xa8(%rbp) 114: 0f 82 f0 03 00 00 jb 50a <__fpu__restore_sig+0x45a> 11a: 48 39 d0 cmp %rdx,%rax 11d: 0f 82 e7 03 00 00 jb 50a <__fpu__restore_sig+0x45a> 123: 49 89 f4 mov %rsi,%r12 * These will statically patch the target code for additional * performance. */ static __always_inline __pure bool _static_cpu_has(u16 bit) { asm_volatile_goto("1: jmp 6f\n" 126: e9 00 00 00 00 jmpq 12b <__fpu__restore_sig+0x7b> if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw))) 12b: 49 8d b4 24 d0 01 00 lea 0x1d0(%r12),%rsi 132: 00 133: 48 8d bd 60 ff ff ff lea -0xa0(%rbp),%rdi 13a: ba 30 00 00 00 mov $0x30,%edx 13f: e8 00 00 00 00 callq 144 <__fpu__restore_sig+0x94> 144: 85 c0 test %eax,%eax 146: 0f 85 dc 02 00 00 jne 428 <__fpu__restore_sig+0x378> if (fx_sw->magic1 != FP_XSTATE_MAGIC1 || 14c: 81 bd 60 ff ff ff 53 cmpl $0x46505853,-0xa0(%rbp) 153: 58 50 46 156: 0f 85 cc 02 00 00 jne 428 <__fpu__restore_sig+0x378> fx_sw->xstate_size < min_xstate_size || 15c: 8b 95 70 ff ff ff mov -0x90(%rbp),%edx if (fx_sw->magic1 != FP_XSTATE_MAGIC1 || 162: 81 fa 3f 02 00 00 cmp $0x23f,%edx 168: 0f 86 ba 02 00 00 jbe 428 <__fpu__restore_sig+0x378> fx_sw->xstate_size < min_xstate_size || 16e: 3b 15 00 00 00 00 cmp 0x0(%rip),%edx # 174 <__fpu__restore_sig+0xc4> 174: 0f 87 ae 02 00 00 ja 428 <__fpu__restore_sig+0x378> fx_sw->xstate_size > fpu_user_xstate_size || 17a: 3b 95 64 ff ff ff cmp -0x9c(%rbp),%edx 180: 0f 87 a2 02 00 00 ja 428 <__fpu__restore_sig+0x378> alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP); 186: 90 nop 187: 90 nop 188: 90 nop if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size)) 189: 90 nop 18a: 90 nop 18b: 90 nop 18c: 8b 95 70 ff ff ff mov -0x90(%rbp),%edx 192: 41 8b 14 14 mov (%r12,%rdx,1),%edx alternative("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP); 196: 90 nop 197: 90 nop 198: 90 nop || magic2 != FP_XSTATE_MAGIC2) 199: 81 fa 45 58 50 46 cmp $0x46505845,%edx 19f: 0f 85 83 02 00 00 jne 428 <__fpu__restore_sig+0x378> 1a5: 85 c0 test %eax,%eax 1a7: 0f 85 7b 02 00 00 jne 428 <__fpu__restore_sig+0x378> */ state_size = sizeof(struct fxregs_state); fx_only = 1; trace_x86_fpu_xstate_check_failed(fpu); } else { state_size = fx_sw_user.xstate_size; 1ad: 8b 85 70 ff ff ff mov -0x90(%rbp),%eax int fx_only = 0; 1b3: 45 31 f6 xor %r14d,%r14d state_size = fx_sw_user.xstate_size; 1b6: 89 85 54 ff ff ff mov %eax,-0xac(%rbp) xfeatures = fx_sw_user.xfeatures; 1bc: 48 8b 85 68 ff ff ff mov -0x98(%rbp),%rax 1c3: 48 89 85 58 ff ff ff mov %rax,-0xa8(%rbp) 1ca: eb 0f jmp 1db <__fpu__restore_sig+0x12b> int state_size = fpu_kernel_xstate_size; 1cc: 8b 05 00 00 00 00 mov 0x0(%rip),%eax # 1d2 <__fpu__restore_sig+0x122> int fx_only = 0; 1d2: 45 31 f6 xor %r14d,%r14d int state_size = fpu_kernel_xstate_size; 1d5: 89 85 54 ff ff ff mov %eax,-0xac(%rbp) 1db: 65 48 8b 04 25 00 00 mov %gs:0x0,%rax 1e2: 00 00 */ static __always_inline void set_bit(long nr, volatile unsigned long *addr) { if (IS_IMMEDIATE(nr)) { asm volatile(LOCK_PREFIX "orb %1,%0" 1e4: f0 80 48 01 40 lock orb $0x40,0x1(%rax) */ set_thread_flag(TIF_NEED_FPU_LOAD); __fpu_invalidate_fpregs_state(fpu); if ((unsigned long)buf_fx % 64) fx_only = 1; 1e9: ba 01 00 00 00 mov $0x1,%edx 1ee: 41 f6 c4 3f test $0x3f,%r12b __this_cpu_write(fpu_fpregs_owner_ctx, NULL); } static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu) { fpu->last_cpu = -1; 1f2: 41 c7 85 80 13 00 00 movl $0xffffffff,0x1380(%r13) 1f9: ff ff ff ff 1fd: 44 0f 45 f2 cmovne %edx,%r14d /* * For 32-bit frames with fxstate, copy the fxstate so it can be * reconstructed later. */ if (ia32_fxstate) { 201: 4c 39 e3 cmp %r12,%rbx 204: 0f 84 48 01 00 00 je 352 <__fpu__restore_sig+0x2a2> 20a: 48 8d bd 60 ff ff ff lea -0xa0(%rbp),%rdi 211: 48 89 de mov %rbx,%rsi 214: ba 6c 00 00 00 mov $0x6c,%edx 219: e8 00 00 00 00 callq 21e <__fpu__restore_sig+0x16e> ret = __copy_from_user(&env, buf, sizeof(env)); if (ret) goto err_out; envp = &env; 21e: 48 8d 9d 60 ff ff ff lea -0xa0(%rbp),%rbx if (ret) 225: 85 c0 test %eax,%eax 227: 0f 85 0b 01 00 00 jne 338 <__fpu__restore_sig+0x288> 22d: e9 00 00 00 00 jmpq 232 <__fpu__restore_sig+0x182> 232: 4d 8d bd c0 13 00 00 lea 0x13c0(%r13),%r15 } fpregs_unlock(); } if (use_xsave() && !fx_only) { 239: 45 85 f6 test %r14d,%r14d 23c: 0f 84 00 02 00 00 je 442 <__fpu__restore_sig+0x392> if (unlikely(init_bv)) copy_kernel_to_xregs(&init_fpstate.xsave, init_bv); ret = copy_kernel_to_xregs_err(&fpu->state.xsave, xfeatures); } else if (use_fxsr()) { ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size); 242: 48 63 b5 54 ff ff ff movslq -0xac(%rbp),%rsi static __always_inline void check_object_size(const void *ptr, unsigned long n, bool to_user) { if (!__builtin_constant_p(n)) __check_object_size(ptr, n, to_user); 249: 31 d2 xor %edx,%edx 24b: 4c 89 ff mov %r15,%rdi 24e: e8 00 00 00 00 callq 253 <__fpu__restore_sig+0x1a3> 253: 8b 95 54 ff ff ff mov -0xac(%rbp),%edx 259: 4c 89 ff mov %r15,%rdi 25c: 4c 89 e6 mov %r12,%rsi 25f: e8 00 00 00 00 callq 264 <__fpu__restore_sig+0x1b4> 264: 41 89 c4 mov %eax,%r12d if (ret) { ret = -EFAULT; 267: b8 f2 ff ff ff mov $0xfffffff2,%eax if (ret) { 26c: 45 85 e4 test %r12d,%r12d 26f: 0f 85 c3 00 00 00 jne 338 <__fpu__restore_sig+0x288> 275: e9 00 00 00 00 jmpq 27a <__fpu__restore_sig+0x1ca> if (fx_only) 27a: 45 85 f6 test %r14d,%r14d 27d: 0f 84 cb 02 00 00 je 54e <__fpu__restore_sig+0x49e> header->xfeatures = XFEATURE_MASK_FPSSE; 283: 49 c7 85 c0 15 00 00 movq $0x3,0x15c0(%r13) 28a: 03 00 00 00 xsave->i387.mxcsr &= mxcsr_feature_mask; 28e: 8b 05 00 00 00 00 mov 0x0(%rip),%eax # 294 <__fpu__restore_sig+0x1e4> 294: 41 21 85 d8 13 00 00 and %eax,0x13d8(%r13) if (ia32_env) 29b: 48 85 db test %rbx,%rbx 29e: 74 0b je 2ab <__fpu__restore_sig+0x1fb> convert_to_fxsr(&state->fxsave, ia32_env); 2a0: 48 89 de mov %rbx,%rsi 2a3: 4c 89 ff mov %r15,%rdi 2a6: e8 00 00 00 00 callq 2ab <__fpu__restore_sig+0x1fb> 2ab: f3 0f 1e fa endbr64 * The various preempt_count add/sub methods */ static __always_inline void __preempt_count_add(int val) { raw_cpu_add_4(__preempt_count, val); 2af: 65 81 05 00 00 00 00 addl $0x200,%gs:0x0(%rip) # 2ba <__fpu__restore_sig+0x20a> 2b6: 00 02 00 00 2ba: e9 00 00 00 00 jmpq 2bf <__fpu__restore_sig+0x20f> sanitize_restored_xstate(&fpu->state, envp, xfeatures, fx_only); fpregs_lock(); if (use_xsave()) { u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE; 2bf: 48 8b 15 00 00 00 00 mov 0x0(%rip),%rdx # 2c6 <__fpu__restore_sig+0x216> XSTATE_XRESTORE(xstate, lmask, hmask); 2c6: 48 c7 c7 00 00 00 00 mov $0x0,%rdi 2cd: 48 89 d0 mov %rdx,%rax u32 hmask = mask >> 32; 2d0: 48 c1 ea 20 shr $0x20,%rdx 2d4: 48 83 e0 fc and $0xfffffffffffffffc,%rax XSTATE_XRESTORE(xstate, lmask, hmask); 2d8: 48 0f ae 2f xrstor64 (%rdi) 2dc: f3 0f 1e fa endbr64 return kernel_insn_err(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); 2e0: 31 c0 xor %eax,%eax 2e2: 49 0f ae 0f fxrstor64 (%r15) goto err_out; fpregs_lock(); ret = copy_kernel_to_fregs_err(&fpu->state.fsave); } if (!ret) 2e6: 85 c0 test %eax,%eax 2e8: 0f 84 ff 00 00 00 je 3ed <__fpu__restore_sig+0x33d> 2ee: f3 0f 1e fa endbr64 __local_bh_enable_ip(ip, SOFTIRQ_DISABLE_OFFSET); } static inline void local_bh_enable(void) { __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); 2f2: be 00 02 00 00 mov $0x200,%esi 2f7: 48 c7 c7 00 00 00 00 mov $0x0,%rdi 2fe: 89 85 58 ff ff ff mov %eax,-0xa8(%rbp) 304: e8 00 00 00 00 callq 309 <__fpu__restore_sig+0x259> fpregs_mark_activate(); fpregs_unlock(); err_out: if (ret) 309: 8b 85 58 ff ff ff mov -0xa8(%rbp),%eax 30f: 85 c0 test %eax,%eax 311: 75 25 jne 338 <__fpu__restore_sig+0x288> fpu__clear(fpu); return ret; } 313: 48 8b 4d d0 mov -0x30(%rbp),%rcx 317: 65 48 33 0c 25 28 00 xor %gs:0x28,%rcx 31e: 00 00 320: 0f 85 e1 02 00 00 jne 607 <__fpu__restore_sig+0x557> 326: 48 81 c4 90 00 00 00 add $0x90,%rsp 32d: 5b pop %rbx 32e: 41 5c pop %r12 330: 41 5d pop %r13 332: 41 5e pop %r14 334: 41 5f pop %r15 336: 5d pop %rbp 337: c3 retq fpu__clear(fpu); 338: 48 8b bd 48 ff ff ff mov -0xb8(%rbp),%rdi 33f: 89 85 58 ff ff ff mov %eax,-0xa8(%rbp) 345: e8 00 00 00 00 callq 34a <__fpu__restore_sig+0x29a> 34a: 8b 85 58 ff ff ff mov -0xa8(%rbp),%eax 350: eb c1 jmp 313 <__fpu__restore_sig+0x263> 352: f3 0f 1e fa endbr64 356: 65 81 05 00 00 00 00 addl $0x200,%gs:0x0(%rip) # 361 <__fpu__restore_sig+0x2b1> 35d: 00 02 00 00 } #endif static __always_inline void pagefault_disabled_inc(void) { current->pagefault_disabled++; 361: 83 80 78 12 00 00 01 addl $0x1,0x1278(%rax) 368: e9 00 00 00 00 jmpq 36d <__fpu__restore_sig+0x2bd> if (fx_only) { 36d: 48 8b 15 00 00 00 00 mov 0x0(%rip),%rdx # 374 <__fpu__restore_sig+0x2c4> 374: 45 85 f6 test %r14d,%r14d 377: 0f 85 86 00 00 00 jne 403 <__fpu__restore_sig+0x353> u64 init_bv = xfeatures_mask & ~xbv; 37d: 48 8b 85 58 ff ff ff mov -0xa8(%rbp),%rax 384: 48 f7 d0 not %rax if (unlikely(init_bv)) 387: 48 21 d0 and %rdx,%rax 38a: 0f 85 49 02 00 00 jne 5d9 <__fpu__restore_sig+0x529> alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP); 390: 90 nop 391: 90 nop 392: 90 nop u32 hmask = mask >> 32; 393: 48 8b 85 58 ff ff ff mov -0xa8(%rbp),%rax XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); 39a: 4c 89 e7 mov %r12,%rdi u32 hmask = mask >> 32; 39d: 48 89 c2 mov %rax,%rdx 3a0: 48 c1 ea 20 shr $0x20,%rdx XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); 3a4: 48 0f ae 2f xrstor64 (%rdi) 3a8: 31 c0 xor %eax,%eax alternative("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP); 3aa: 90 nop 3ab: 90 nop 3ac: 90 nop 3ad: 65 48 8b 14 25 00 00 mov %gs:0x0,%rdx 3b4: 00 00 } static __always_inline void pagefault_disabled_dec(void) { current->pagefault_disabled--; 3b6: 83 aa 78 12 00 00 01 subl $0x1,0x1278(%rdx) if (!ret) { 3bd: 85 c0 test %eax,%eax 3bf: 0f 84 5e 01 00 00 je 523 <__fpu__restore_sig+0x473> 3c5: f3 0f 1e fa endbr64 3c9: be 00 02 00 00 mov $0x200,%esi 3ce: 48 c7 c7 00 00 00 00 mov $0x0,%rdi 3d5: e8 00 00 00 00 callq 3da <__fpu__restore_sig+0x32a> struct user_i387_ia32_struct *envp = NULL; 3da: 31 db xor %ebx,%ebx static inline void fpregs_unlock(void) { local_bh_enable(); preempt_enable(); } 3dc: e9 4c fe ff ff jmpq 22d <__fpu__restore_sig+0x17d> 3e1: 4d 8d bd c0 13 00 00 lea 0x13c0(%r13),%r15 3e8: e9 55 fe ff ff jmpq 242 <__fpu__restore_sig+0x192> 3ed: 89 85 58 ff ff ff mov %eax,-0xa8(%rbp) fpregs_mark_activate(); 3f3: e8 00 00 00 00 callq 3f8 <__fpu__restore_sig+0x348> 3f8: 8b 85 58 ff ff ff mov -0xa8(%rbp),%eax static inline void local_bh_enable(void) 3fe: e9 eb fe ff ff jmpq 2ee <__fpu__restore_sig+0x23e> u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE; 403: 48 89 d0 mov %rdx,%rax XSTATE_XRESTORE(xstate, lmask, hmask); 406: 48 c7 c7 00 00 00 00 mov $0x0,%rdi u32 hmask = mask >> 32; 40d: 48 c1 ea 20 shr $0x20,%rdx 411: 48 83 e0 fc and $0xfffffffffffffffc,%rax XSTATE_XRESTORE(xstate, lmask, hmask); 415: 48 0f ae 2f xrstor64 (%rdi) return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); 419: 31 c0 xor %eax,%eax 41b: 90 nop 41c: 90 nop 41d: 90 nop 41e: 49 0f ae 0c 24 fxrstor64 (%r12) 423: 90 nop 424: 90 nop 425: 90 nop return copy_user_to_fxregs(buf); 426: eb 85 jmp 3ad <__fpu__restore_sig+0x2fd> #include #include static __always_inline bool arch_static_branch(struct static_key *key, bool branch) { asm_volatile_goto("1:" 428: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1) state_size = sizeof(struct fxregs_state); 42d: c7 85 54 ff ff ff 00 movl $0x200,-0xac(%rbp) 434: 02 00 00 fx_only = 1; 437: 41 be 01 00 00 00 mov $0x1,%r14d 43d: e9 99 fd ff ff jmpq 1db <__fpu__restore_sig+0x12b> u64 init_bv = xfeatures_mask & ~xfeatures; 442: 4c 8b 35 00 00 00 00 mov 0x0(%rip),%r14 # 449 <__fpu__restore_sig+0x399> if (using_compacted_format()) { 449: e8 00 00 00 00 callq 44e <__fpu__restore_sig+0x39e> 44e: 85 c0 test %eax,%eax 450: 0f 85 73 01 00 00 jne 5c9 <__fpu__restore_sig+0x519> ret = __copy_from_user(&fpu->state.xsave, buf_fx, state_size); 456: 48 63 b5 54 ff ff ff movslq -0xac(%rbp),%rsi 45d: 31 d2 xor %edx,%edx 45f: 4c 89 ff mov %r15,%rdi 462: e8 00 00 00 00 callq 467 <__fpu__restore_sig+0x3b7> 467: 4c 89 e6 mov %r12,%rsi 46a: 44 8b a5 54 ff ff ff mov -0xac(%rbp),%r12d 471: 4c 89 ff mov %r15,%rdi 474: 44 89 e2 mov %r12d,%edx 477: e8 00 00 00 00 callq 47c <__fpu__restore_sig+0x3cc> if (!ret && state_size > offsetof(struct xregs_state, header)) 47c: 85 c0 test %eax,%eax 47e: 0f 85 b4 fe ff ff jne 338 <__fpu__restore_sig+0x288> 484: 41 81 fc 00 02 00 00 cmp $0x200,%r12d 48b: 0f 87 27 01 00 00 ja 5b8 <__fpu__restore_sig+0x508> if (ret) 491: 85 c0 test %eax,%eax 493: 0f 85 9f fe ff ff jne 338 <__fpu__restore_sig+0x288> 499: e9 00 00 00 00 jmpq 49e <__fpu__restore_sig+0x3ee> header->xfeatures &= xfeatures; 49e: 48 8b 85 58 ff ff ff mov -0xa8(%rbp),%rax 4a5: 49 21 85 c0 15 00 00 and %rax,0x15c0(%r13) xsave->i387.mxcsr &= mxcsr_feature_mask; 4ac: 8b 05 00 00 00 00 mov 0x0(%rip),%eax # 4b2 <__fpu__restore_sig+0x402> 4b2: 41 21 85 d8 13 00 00 and %eax,0x13d8(%r13) if (ia32_env) 4b9: 48 85 db test %rbx,%rbx 4bc: 74 0b je 4c9 <__fpu__restore_sig+0x419> convert_to_fxsr(&state->fxsave, ia32_env); 4be: 48 89 de mov %rbx,%rsi 4c1: 4c 89 ff mov %r15,%rdi 4c4: e8 00 00 00 00 callq 4c9 <__fpu__restore_sig+0x419> 4c9: f3 0f 1e fa endbr64 u64 init_bv = xfeatures_mask & ~xfeatures; 4cd: 48 8b 85 58 ff ff ff mov -0xa8(%rbp),%rax 4d4: 65 81 05 00 00 00 00 addl $0x200,%gs:0x0(%rip) # 4df <__fpu__restore_sig+0x42f> 4db: 00 02 00 00 4df: 48 f7 d0 not %rax 4e2: 4c 21 f0 and %r14,%rax if (unlikely(init_bv)) 4e5: 48 85 c0 test %rax,%rax 4e8: 0f 85 02 01 00 00 jne 5f0 <__fpu__restore_sig+0x540> u32 hmask = mask >> 32; 4ee: 48 8b 85 58 ff ff ff mov -0xa8(%rbp),%rax XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); 4f5: 4c 89 ff mov %r15,%rdi u32 hmask = mask >> 32; 4f8: 48 89 c2 mov %rax,%rdx 4fb: 48 c1 ea 20 shr $0x20,%rdx XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); 4ff: 48 0f ae 2f xrstor64 (%rdi) 503: 31 c0 xor %eax,%eax if (use_xsave() && !fx_only) { 505: e9 dc fd ff ff jmpq 2e6 <__fpu__restore_sig+0x236> return -EACCES; 50a: b8 f3 ff ff ff mov $0xfffffff3,%eax 50f: e9 ff fd ff ff jmpq 313 <__fpu__restore_sig+0x263> fpu__clear(fpu); 514: 48 89 c7 mov %rax,%rdi 517: e8 00 00 00 00 callq 51c <__fpu__restore_sig+0x46c> return 0; 51c: 31 c0 xor %eax,%eax 51e: e9 f0 fd ff ff jmpq 313 <__fpu__restore_sig+0x263> 523: f3 0f 1e fa endbr64 527: 89 85 58 ff ff ff mov %eax,-0xa8(%rbp) fpregs_mark_activate(); 52d: e8 00 00 00 00 callq 532 <__fpu__restore_sig+0x482> __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); 532: be 00 02 00 00 mov $0x200,%esi 537: 48 c7 c7 00 00 00 00 mov $0x0,%rdi 53e: e8 00 00 00 00 callq 543 <__fpu__restore_sig+0x493> return 0; 543: 8b 85 58 ff ff ff mov -0xa8(%rbp),%eax 549: e9 c5 fd ff ff jmpq 313 <__fpu__restore_sig+0x263> header->xfeatures &= xfeatures; 54e: 48 8b 85 58 ff ff ff mov -0xa8(%rbp),%rax 555: 49 21 85 c0 15 00 00 and %rax,0x15c0(%r13) 55c: e9 2d fd ff ff jmpq 28e <__fpu__restore_sig+0x1de> DEFINE_EVENT(x86_fpu, x86_fpu_copy_dst, TP_PROTO(struct fpu *fpu), TP_ARGS(fpu) ); DEFINE_EVENT(x86_fpu, x86_fpu_xstate_check_failed, 561: 65 8b 05 00 00 00 00 mov %gs:0x0(%rip),%eax # 568 <__fpu__restore_sig+0x4b8> * * Returns 1 if @cpu is set in @cpumask, else returns 0 */ static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask) { return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); 568: 89 c0 mov %eax,%eax static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr) { bool oldbit; asm volatile(__ASM_SIZE(bt) " %2,%1" 56a: 48 0f a3 05 00 00 00 bt %rax,0x0(%rip) # 572 <__fpu__restore_sig+0x4c2> 571: 00 572: 0f 83 b5 fe ff ff jae 42d <__fpu__restore_sig+0x37d> }) static __always_inline void __read_once_size(const volatile void *p, void *res, int size) { __READ_ONCE_SIZE; 578: 4c 8b 35 00 00 00 00 mov 0x0(%rip),%r14 # 57f <__fpu__restore_sig+0x4cf> 57f: 4d 85 f6 test %r14,%r14 582: 74 1f je 5a3 <__fpu__restore_sig+0x4f3> 584: 49 8b 06 mov (%r14),%rax 587: 4d 8d bd 80 13 00 00 lea 0x1380(%r13),%r15 58e: 49 8b 7e 08 mov 0x8(%r14),%rdi 592: 49 83 c6 18 add $0x18,%r14 596: 4c 89 fe mov %r15,%rsi 599: ff d0 callq *%rax 59b: 49 8b 06 mov (%r14),%rax 59e: 48 85 c0 test %rax,%rax 5a1: 75 eb jne 58e <__fpu__restore_sig+0x4de> state_size = sizeof(struct fxregs_state); 5a3: c7 85 54 ff ff ff 00 movl $0x200,-0xac(%rbp) 5aa: 02 00 00 fx_only = 1; 5ad: 41 be 01 00 00 00 mov $0x1,%r14d 5b3: e9 23 fc ff ff jmpq 1db <__fpu__restore_sig+0x12b> ret = validate_xstate_header(&fpu->state.xsave.header); 5b8: 49 8d bd c0 15 00 00 lea 0x15c0(%r13),%rdi 5bf: e8 00 00 00 00 callq 5c4 <__fpu__restore_sig+0x514> 5c4: e9 c8 fe ff ff jmpq 491 <__fpu__restore_sig+0x3e1> ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx); 5c9: 4c 89 e6 mov %r12,%rsi 5cc: 4c 89 ff mov %r15,%rdi 5cf: e8 00 00 00 00 callq 5d4 <__fpu__restore_sig+0x524> 5d4: e9 b8 fe ff ff jmpq 491 <__fpu__restore_sig+0x3e1> u32 hmask = mask >> 32; 5d9: 48 89 c2 mov %rax,%rdx XSTATE_XRESTORE(xstate, lmask, hmask); 5dc: 48 c7 c7 00 00 00 00 mov $0x0,%rdi u32 hmask = mask >> 32; 5e3: 48 c1 ea 20 shr $0x20,%rdx XSTATE_XRESTORE(xstate, lmask, hmask); 5e7: 48 0f ae 2f xrstor64 (%rdi) } 5eb: e9 a0 fd ff ff jmpq 390 <__fpu__restore_sig+0x2e0> u32 hmask = mask >> 32; 5f0: 48 89 c2 mov %rax,%rdx XSTATE_XRESTORE(xstate, lmask, hmask); 5f3: 48 c7 c7 00 00 00 00 mov $0x0,%rdi u32 hmask = mask >> 32; 5fa: 48 c1 ea 20 shr $0x20,%rdx XSTATE_XRESTORE(xstate, lmask, hmask); 5fe: 48 0f ae 2f xrstor64 (%rdi) } 602: e9 e7 fe ff ff jmpq 4ee <__fpu__restore_sig+0x43e> } 607: e8 00 00 00 00 callq 60c <__fpu__restore_sig+0x55c> 60c: 0f 1f 40 00 nopl 0x0(%rax) 0000000000000610 : { 610: f3 0f 1e fa endbr64 614: e8 00 00 00 00 callq 619 619: 55 push %rbp if (!access_ok(buf, size)) 61a: 48 63 d2 movslq %edx,%rdx { 61d: 48 89 e5 mov %rsp,%rbp 620: 41 57 push %r15 622: 41 56 push %r14 624: 41 55 push %r13 626: 41 54 push %r12 628: 53 push %rbx 629: 48 83 ec 40 sub $0x40,%rsp 62d: 48 89 7d 98 mov %rdi,-0x68(%rbp) 631: 65 48 8b 04 25 28 00 mov %gs:0x28,%rax 638: 00 00 63a: 48 89 45 d0 mov %rax,-0x30(%rbp) 63e: 31 c0 xor %eax,%eax 640: 65 48 8b 04 25 00 00 mov %gs:0x0,%rax 647: 00 00 649: 48 89 45 a0 mov %rax,-0x60(%rbp) if (!access_ok(buf, size)) 64d: 48 8b 80 58 13 00 00 mov 0x1358(%rax),%rax addr += size; 654: 48 01 fa add %rdi,%rdx 657: 0f 82 db 02 00 00 jb 938 65d: 48 39 d0 cmp %rdx,%rax 660: 0f 82 d2 02 00 00 jb 938 666: 4c 8b 75 a0 mov -0x60(%rbp),%r14 66a: 49 89 f4 mov %rsi,%r12 66d: 49 c7 c7 00 00 00 00 mov $0x0,%r15 } static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu) { return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; 674: 65 48 8b 04 25 00 00 mov %gs:0x0,%rax 67b: 00 00 67d: 4c 89 75 a8 mov %r14,-0x58(%rbp) 681: 4d 89 f5 mov %r14,%r13 684: 48 89 45 b0 mov %rax,-0x50(%rbp) 688: f3 0f 1e fa endbr64 68c: 65 81 05 00 00 00 00 addl $0x200,%gs:0x0(%rip) # 697 693: 00 02 00 00 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; 697: 49 8b 45 00 mov 0x0(%r13),%rax if (test_thread_flag(TIF_NEED_FPU_LOAD)) 69b: f6 c4 40 test $0x40,%ah 69e: 0f 85 09 01 00 00 jne 7ad current->pagefault_disabled++; 6a4: 41 83 86 78 12 00 00 addl $0x1,0x1278(%r14) 6ab: 01 6ac: e9 00 00 00 00 jmpq 6b1 err = __clear_user(&buf->header, sizeof(buf->header)); 6b1: 49 8d bc 24 00 02 00 lea 0x200(%r12),%rdi 6b8: 00 6b9: be 40 00 00 00 mov $0x40,%esi 6be: e8 00 00 00 00 callq 6c3 if (unlikely(err)) 6c3: 85 c0 test %eax,%eax 6c5: 0f 85 b7 01 00 00 jne 882 alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP); 6cb: 90 nop 6cc: 90 nop 6cd: 90 nop XSTATE_OP(XSAVE, buf, -1, -1, err); 6ce: bb ff ff ff ff mov $0xffffffff,%ebx 6d3: 4c 89 e7 mov %r12,%rdi 6d6: 89 d8 mov %ebx,%eax 6d8: 89 da mov %ebx,%edx 6da: 48 0f ae 27 xsave64 (%rdi) 6de: 31 db xor %ebx,%ebx alternative("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP); 6e0: 90 nop 6e1: 90 nop 6e2: 90 nop if (unlikely(err) && __clear_user(buf, fpu_user_xstate_size)) 6e3: 85 db test %ebx,%ebx 6e5: 0f 85 a4 00 00 00 jne 78f 6eb: f3 0f 1e fa endbr64 6ef: 65 48 8b 04 25 00 00 mov %gs:0x0,%rax 6f6: 00 00 current->pagefault_disabled--; 6f8: 83 a8 78 12 00 00 01 subl $0x1,0x1278(%rax) 6ff: be 00 02 00 00 mov $0x200,%esi 704: 48 c7 c7 00 00 00 00 mov $0x0,%rdi 70b: e8 00 00 00 00 callq 710 if (ret) { 710: 85 db test %ebx,%ebx 712: 0f 84 74 01 00 00 je 88c aligned_size = offset_in_page(buf_fx) + fpu_user_xstate_size; 718: 44 89 e6 mov %r12d,%esi ret = get_user_pages_unlocked((unsigned long)buf_fx, nr_pages, 71b: 31 d2 xor %edx,%edx 71d: b9 01 00 00 00 mov $0x1,%ecx 722: 4c 89 e7 mov %r12,%rdi aligned_size = offset_in_page(buf_fx) + fpu_user_xstate_size; 725: 81 e6 ff 0f 00 00 and $0xfff,%esi 72b: 03 35 00 00 00 00 add 0x0(%rip),%esi # 731 nr_pages = DIV_ROUND_UP(aligned_size, PAGE_SIZE); 731: 48 63 f6 movslq %esi,%rsi 734: 48 81 c6 ff 0f 00 00 add $0xfff,%rsi 73b: 48 89 f3 mov %rsi,%rbx ret = get_user_pages_unlocked((unsigned long)buf_fx, nr_pages, 73e: 48 c1 fe 0c sar $0xc,%rsi nr_pages = DIV_ROUND_UP(aligned_size, PAGE_SIZE); 742: 48 c1 eb 0c shr $0xc,%rbx ret = get_user_pages_unlocked((unsigned long)buf_fx, nr_pages, 746: e8 00 00 00 00 callq 74b if (ret == nr_pages) 74b: 39 c3 cmp %eax,%ebx 74d: 0f 84 35 ff ff ff je 688 return -EFAULT; 753: b8 f2 ff ff ff mov $0xfffffff2,%eax } 758: 48 8b 4d d0 mov -0x30(%rbp),%rcx 75c: 65 48 33 0c 25 28 00 xor %gs:0x28,%rcx 763: 00 00 765: 0f 85 e1 01 00 00 jne 94c 76b: 48 83 c4 40 add $0x40,%rsp 76f: 5b pop %rbx 770: 41 5c pop %r12 772: 41 5d pop %r13 774: 41 5e pop %r14 776: 41 5f pop %r15 778: 5d pop %rbp 779: c3 retq return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx)); 77a: 31 db xor %ebx,%ebx 77c: 90 nop 77d: 90 nop 77e: 90 nop 77f: 49 0f ae 04 24 fxsave64 (%r12) 784: 90 nop 785: 90 nop 786: 90 nop if (unlikely(err) && __clear_user(buf, fpu_user_xstate_size)) 787: 85 db test %ebx,%ebx 789: 0f 84 5c ff ff ff je 6eb 78f: 8b 35 00 00 00 00 mov 0x0(%rip),%esi # 795 795: 4c 89 e7 mov %r12,%rdi 798: e8 00 00 00 00 callq 79d err = -EFAULT; 79d: 48 85 c0 test %rax,%rax 7a0: b8 f2 ff ff ff mov $0xfffffff2,%eax 7a5: 0f 45 d8 cmovne %eax,%ebx 7a8: e9 3e ff ff ff jmpq 6eb /* * Internal helper, do not use directly. Use switch_fpu_return() instead. */ static inline void __fpregs_load_activate(void) { struct fpu *fpu = ¤t->thread.fpu; 7ad: 49 8d 8d 80 13 00 00 lea 0x1380(%r13),%rcx int cpu = smp_processor_id(); 7b4: 65 8b 05 00 00 00 00 mov %gs:0x0(%rip),%eax # 7bb if (WARN_ON_ONCE(current->mm == NULL)) 7bb: 49 83 bd f8 07 00 00 cmpq $0x0,0x7f8(%r13) 7c2: 00 int cpu = smp_processor_id(); 7c3: 89 45 bc mov %eax,-0x44(%rbp) if (WARN_ON_ONCE(current->mm == NULL)) 7c6: 0f 84 4d 01 00 00 je 919 return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; 7cc: 48 3b 4d b0 cmp -0x50(%rbp),%rcx 7d0: 0f 84 9a 00 00 00 je 870 return; if (!fpregs_state_valid(fpu, cpu)) { copy_kernel_to_fpregs(&fpu->state); 7d6: 49 8d bd c0 13 00 00 lea 0x13c0(%r13),%rdi 7dd: 48 89 7d c8 mov %rdi,-0x38(%rbp) 7e1: e9 00 00 00 00 jmpq 7e6 asm volatile( 7e6: db e2 fnclex 7e8: 0f 77 emms 7ea: db 45 c8 fildl -0x38(%rbp) 7ed: e9 00 00 00 00 jmpq 7f2 XSTATE_XRESTORE(xstate, lmask, hmask); 7f2: b8 ff ff ff ff mov $0xffffffff,%eax 7f7: 89 c2 mov %eax,%edx 7f9: 48 0f ae 2f xrstor64 (%rdi) this_cpu_write(fpu_fpregs_owner_ctx, fpu); 7fd: 65 48 89 0d 00 00 00 mov %rcx,%gs:0x0(%rip) # 805 804: 00 805: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1) fpregs_activate(fpu); fpu->last_cpu = cpu; 80a: 8b 45 bc mov -0x44(%rbp),%eax 80d: 41 89 85 80 13 00 00 mov %eax,0x1380(%r13) asm volatile(LOCK_PREFIX "andb %1,%0" 814: 48 8b 45 a8 mov -0x58(%rbp),%rax 818: f0 80 60 01 bf lock andb $0xbf,0x1(%rax) } 81d: e9 82 fe ff ff jmpq 6a4 kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); 822: 49 0f ae 8d c0 13 00 fxrstor64 0x13c0(%r13) 829: 00 } 82a: eb d1 jmp 7fd DEFINE_EVENT(x86_fpu, x86_fpu_regs_activated, 82c: 65 8b 05 00 00 00 00 mov %gs:0x0(%rip),%eax # 833 833: 89 c0 mov %eax,%eax asm volatile(__ASM_SIZE(bt) " %2,%1" 835: 48 0f a3 05 00 00 00 bt %rax,0x0(%rip) # 83d 83c: 00 83d: 73 cb jae 80a 83f: 48 8b 05 00 00 00 00 mov 0x0(%rip),%rax # 846 846: 48 85 c0 test %rax,%rax 849: 74 23 je 86e 84b: 48 8b 10 mov (%rax),%rdx 84e: 48 89 c3 mov %rax,%rbx 851: 48 8b 7b 08 mov 0x8(%rbx),%rdi 855: 48 83 c3 18 add $0x18,%rbx 859: 48 89 4d c0 mov %rcx,-0x40(%rbp) 85d: 48 89 ce mov %rcx,%rsi 860: ff d2 callq *%rdx 862: 48 8b 13 mov (%rbx),%rdx 865: 48 8b 4d c0 mov -0x40(%rbp),%rcx 869: 48 85 d2 test %rdx,%rdx 86c: 75 e3 jne 851 86e: eb 9a jmp 80a return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; 870: 8b 45 bc mov -0x44(%rbp),%eax 873: 41 3b 85 80 13 00 00 cmp 0x1380(%r13),%eax 87a: 0f 85 56 ff ff ff jne 7d6 880: eb 92 jmp 814 return -EFAULT; 882: bb f2 ff ff ff mov $0xfffffff2,%ebx 887: e9 03 ff ff ff jmpq 78f if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf)) 88c: 48 8b 45 98 mov -0x68(%rbp),%rax 890: 4c 39 e0 cmp %r12,%rax 893: 0f 84 93 00 00 00 je 92c 899: 48 8b 7d a0 mov -0x60(%rbp),%rdi 89d: 48 89 c6 mov %rax,%rsi 8a0: e8 5b f7 ff ff callq 0 8a5: 85 c0 test %eax,%eax 8a7: 0f 85 95 00 00 00 jne 942 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved; 8ad: 48 c7 c6 00 00 00 00 mov $0x0,%rsi err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes)); 8b4: 49 8d bc 24 d0 01 00 lea 0x1d0(%r12),%rdi 8bb: 00 8bc: ba 30 00 00 00 mov $0x30,%edx 8c1: e8 00 00 00 00 callq 8c6 8c6: 89 c2 mov %eax,%edx 8c8: e9 00 00 00 00 jmpq 8cd alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP); 8cd: 90 nop 8ce: 90 nop 8cf: 90 nop err |= __put_user(FP_XSTATE_MAGIC2, 8d0: 8b 15 00 00 00 00 mov 0x0(%rip),%edx # 8d6 8d6: 41 c7 04 14 45 58 50 movl $0x46505845,(%r12,%rdx,1) 8dd: 46 alternative("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP); 8de: 90 nop 8df: 90 nop 8e0: 90 nop alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP); 8e1: 90 nop 8e2: 90 nop 8e3: 90 nop err |= __get_user(xfeatures, (__u32 __user *)&x->header.xfeatures); 8e4: 90 nop 8e5: 90 nop 8e6: 90 nop 8e7: 31 d2 xor %edx,%edx 8e9: 41 8b 8c 24 00 02 00 mov 0x200(%r12),%ecx 8f0: 00 alternative("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP); 8f1: 90 nop 8f2: 90 nop 8f3: 90 nop 8f4: 09 d3 or %edx,%ebx 8f6: 09 d8 or %ebx,%eax 8f8: 89 c2 mov %eax,%edx alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP); 8fa: 90 nop 8fb: 90 nop 8fc: 90 nop xfeatures |= XFEATURE_MASK_FPSSE; 8fd: 83 c9 03 or $0x3,%ecx err |= __put_user(xfeatures, (__u32 __user *)&x->header.xfeatures); 900: 41 89 8c 24 00 02 00 mov %ecx,0x200(%r12) 907: 00 alternative("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP); 908: 90 nop 909: 90 nop 90a: 90 nop if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate)) 90b: 31 c0 xor %eax,%eax 90d: 85 d2 test %edx,%edx 90f: 0f 95 c0 setne %al 912: f7 d8 neg %eax 914: e9 3f fe ff ff jmpq 758 if (WARN_ON_ONCE(current->mm == NULL)) 919: 0f 0b ud2 91b: e9 84 fd ff ff jmpq 6a4 err |= __put_user(FP_XSTATE_MAGIC2, 920: bb f2 ff ff ff mov $0xfffffff2,%ebx 925: eb b7 jmp 8de 927: 83 ca f2 or $0xfffffff2,%edx 92a: eb dc jmp 908 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved; 92c: 48 c7 c6 00 00 00 00 mov $0x0,%rsi 933: e9 7c ff ff ff jmpq 8b4 return -EACCES; 938: b8 f3 ff ff ff mov $0xfffffff3,%eax 93d: e9 16 fe ff ff jmpq 758 return -1; 942: b8 ff ff ff ff mov $0xffffffff,%eax 947: e9 0c fe ff ff jmpq 758 } 94c: e8 00 00 00 00 callq 951 951: 66 66 2e 0f 1f 84 00 data16 nopw %cs:0x0(%rax,%rax,1) 958: 00 00 00 00 95c: 0f 1f 40 00 nopl 0x0(%rax) 0000000000000960 : /* * Restore FPU state from a sigframe: */ int fpu__restore_sig(void __user *buf, int ia32_frame) { 960: f3 0f 1e fa endbr64 964: e8 00 00 00 00 callq 969 969: 55 push %rbp 96a: 48 89 e5 mov %rsp,%rbp 96d: e9 00 00 00 00 jmpq 972 return use_xsave() ? fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE : 972: 8b 05 00 00 00 00 mov 0x0(%rip),%eax # 978 978: 8d 50 04 lea 0x4(%rax),%edx void __user *buf_fx = buf; 97b: 49 89 f8 mov %rdi,%r8 int size = xstate_sigframe_size(); if (ia32_frame && use_fxsr()) { 97e: 85 f6 test %esi,%esi 980: 74 07 je 989 buf_fx = buf + sizeof(struct fregs_state); 982: 4c 8d 47 70 lea 0x70(%rdi),%r8 size += sizeof(struct fregs_state); 986: 83 c2 70 add $0x70,%edx } return __fpu__restore_sig(buf, buf_fx, size); 989: 4c 89 c6 mov %r8,%rsi 98c: e8 1f f7 ff ff callq b0 <__fpu__restore_sig> } 991: 5d pop %rbp 992: c3 retq return use_xsave() ? fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE : 993: 8b 15 00 00 00 00 mov 0x0(%rip),%edx # 999 999: eb e0 jmp 97b 99b: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1) 00000000000009a0 : unsigned long fpu__alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx, unsigned long *size) { 9a0: f3 0f 1e fa endbr64 9a4: e8 00 00 00 00 callq 9a9 9a9: 55 push %rbp 9aa: 48 89 e5 mov %rsp,%rbp 9ad: e9 00 00 00 00 jmpq 9b2 return use_xsave() ? fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE : 9b2: 8b 05 00 00 00 00 mov 0x0(%rip),%eax # 9b8 9b8: 44 8d 40 04 lea 0x4(%rax),%r8d unsigned long frame_size = xstate_sigframe_size(); 9bc: 4d 63 c0 movslq %r8d,%r8 *buf_fx = sp = round_down(sp - frame_size, 64); 9bf: 4c 29 c7 sub %r8,%rdi 9c2: 48 89 f8 mov %rdi,%rax 9c5: 48 83 e0 c0 and $0xffffffffffffffc0,%rax 9c9: 48 89 02 mov %rax,(%rdx) if (ia32_frame && use_fxsr()) { 9cc: 85 f6 test %esi,%esi 9ce: 74 08 je 9d8 frame_size += sizeof(struct fregs_state); 9d0: 49 83 c0 70 add $0x70,%r8 sp -= sizeof(struct fregs_state); 9d4: 48 83 e8 70 sub $0x70,%rax } *size = frame_size; 9d8: 4c 89 01 mov %r8,(%rcx) return sp; } 9db: 5d pop %rbp 9dc: c3 retq return use_xsave() ? fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE : 9dd: 44 8b 05 00 00 00 00 mov 0x0(%rip),%r8d # 9e4 9e4: eb d6 jmp 9bc 9e6: 66 2e 0f 1f 84 00 00 nopw %cs:0x0(%rax,%rax,1) 9ed: 00 00 00 00000000000009f0 : * pointed by the fpstate pointer in the sigcontext. * This will be saved when ever the FP and extended state context is * saved on the user stack during the signal handler delivery to the user. */ void fpu__init_prepare_fx_sw_frame(void) { 9f0: f3 0f 1e fa endbr64 9f4: e8 00 00 00 00 callq 9f9 int size = fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE; 9f9: 8b 05 00 00 00 00 mov 0x0(%rip),%eax # 9ff { 9ff: 55 push %rbp fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1; a00: c7 05 00 00 00 00 53 movl $0x46505853,0x0(%rip) # a0a a07: 58 50 46 int size = fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE; a0a: 8d 50 04 lea 0x4(%rax),%edx fx_sw_reserved.extended_size = size; fx_sw_reserved.xfeatures = xfeatures_mask; fx_sw_reserved.xstate_size = fpu_user_xstate_size; a0d: 89 05 00 00 00 00 mov %eax,0x0(%rip) # a13 if (IS_ENABLED(CONFIG_IA32_EMULATION) || IS_ENABLED(CONFIG_X86_32)) { int fsave_header_size = sizeof(struct fregs_state); fx_sw_reserved_ia32 = fx_sw_reserved; fx_sw_reserved_ia32.extended_size = size + fsave_header_size; a13: 83 c0 74 add $0x74,%eax int size = fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE; a16: 89 15 00 00 00 00 mov %edx,0x0(%rip) # a1c fx_sw_reserved.xfeatures = xfeatures_mask; a1c: 48 8b 15 00 00 00 00 mov 0x0(%rip),%rdx # a23 { a23: 48 89 e5 mov %rsp,%rbp fx_sw_reserved_ia32 = fx_sw_reserved; a26: 48 8b 0d 00 00 00 00 mov 0x0(%rip),%rcx # a2d } } a2d: 5d pop %rbp fx_sw_reserved.xfeatures = xfeatures_mask; a2e: 48 89 15 00 00 00 00 mov %rdx,0x0(%rip) # a35 fx_sw_reserved_ia32 = fx_sw_reserved; a35: 48 89 15 00 00 00 00 mov %rdx,0x0(%rip) # a3c a3c: 48 8b 15 00 00 00 00 mov 0x0(%rip),%rdx # a43 a43: 48 89 0d 00 00 00 00 mov %rcx,0x0(%rip) # a4a a4a: 48 89 15 00 00 00 00 mov %rdx,0x0(%rip) # a51 a51: 48 8b 15 00 00 00 00 mov 0x0(%rip),%rdx # a58 fx_sw_reserved_ia32.extended_size = size + fsave_header_size; a58: 89 05 00 00 00 00 mov %eax,0x0(%rip) # a5e fx_sw_reserved_ia32 = fx_sw_reserved; a5e: 48 89 15 00 00 00 00 mov %rdx,0x0(%rip) # a65 a65: 48 8b 15 00 00 00 00 mov 0x0(%rip),%rdx # a6c a6c: 48 89 15 00 00 00 00 mov %rdx,0x0(%rip) # a73 a73: 48 8b 15 00 00 00 00 mov 0x0(%rip),%rdx # a7a a7a: 48 89 15 00 00 00 00 mov %rdx,0x0(%rip) # a81 } a81: c3 retq Disassembly of section .altinstr_replacement: 0000000000000000 <.altinstr_replacement>: 0: e8 00 00 00 00 callq 5 <.altinstr_replacement+0x5> 5: e8 00 00 00 00 callq a <.altinstr_replacement+0xa> a: 0f 01 cb stac d: 0f 01 ca clac 10: 0f 01 cb stac 13: 0f 01 ca clac 16: e9 00 00 00 00 jmpq 1b <.altinstr_replacement+0x1b> 1b: e8 00 00 00 00 callq 20 <.altinstr_replacement+0x20> 20: e8 00 00 00 00 callq 25 <.altinstr_replacement+0x25> 25: 0f 01 cb stac 28: 0f ae f0 mfence 2b: 0f ae e8 lfence 2e: 0f 01 ca clac 31: e8 00 00 00 00 callq 36 <.altinstr_replacement+0x36> 36: e8 00 00 00 00 callq 3b <.altinstr_replacement+0x3b> 3b: e9 00 00 00 00 jmpq 40 <.altinstr_replacement+0x40> 40: e8 00 00 00 00 callq 45 <.altinstr_replacement+0x45> 45: e8 00 00 00 00 callq 4a <.altinstr_replacement+0x4a> 4a: e9 00 00 00 00 jmpq 4f <.altinstr_replacement+0x4f> 4f: e9 00 00 00 00 jmpq 54 <.altinstr_replacement+0x54> 54: 48 0f c7 1f xrstors64 (%rdi) 58: e9 00 00 00 00 jmpq 5d <.altinstr_replacement+0x5d> 5d: 0f 01 cb stac 60: 0f 01 ca clac 63: 48 0f c7 1f xrstors64 (%rdi) 67: 0f 01 cb stac 6a: 0f 01 ca clac 6d: e8 00 00 00 00 callq 72 <.altinstr_replacement+0x72> 72: e8 00 00 00 00 callq 77 <.altinstr_replacement+0x77> 77: e9 00 00 00 00 jmpq 7c <.altinstr_replacement+0x7c> 7c: 48 0f c7 1f xrstors64 (%rdi) 80: 48 0f c7 1f xrstors64 (%rdi) 84: e9 00 00 00 00 jmpq 89 <.altinstr_replacement+0x89> 89: 0f 01 cb stac 8c: 0f 01 ca clac 8f: 0f 01 cb stac 92: 0f 01 ca clac 95: e9 00 00 00 00 jmpq 9a <.altinstr_replacement+0x9a> 9a: e9 00 00 00 00 jmpq 9f <.altinstr_replacement+0x9f> 9f: 48 0f c7 1f xrstors64 (%rdi) a3: e8 00 00 00 00 callq a8 <.altinstr_replacement+0xa8> a8: e8 00 00 00 00 callq ad <.altinstr_replacement+0xad> ad: e9 00 00 00 00 jmpq b2 <.altinstr_replacement+0xb2> b2: 0f 01 cb stac b5: 0f 01 ca clac b8: 0f 01 cb stac bb: 0f ae f0 mfence be: 0f ae e8 lfence c1: 0f 01 ca clac c4: 0f 01 cb stac c7: 0f 01 ca clac ca: e9 00 00 00 00 jmpq cf <.altinstr_replacement+0xcf> cf: e9 00 00 00 00 jmpq d4 <__fpu__restore_sig+0x24> Disassembly of section .altinstr_aux: 0000000000000000 <.altinstr_aux>: 0: f6 05 00 00 00 00 04 testb $0x4,0x0(%rip) # 7 <.altinstr_aux+0x7> 7: 0f 85 00 00 00 00 jne d <.altinstr_aux+0xd> d: e9 00 00 00 00 jmpq 12 <.altinstr_aux+0x12> 12: f6 05 00 00 00 00 04 testb $0x4,0x0(%rip) # 19 <.altinstr_aux+0x19> 19: 0f 85 00 00 00 00 jne 1f <.altinstr_aux+0x1f> 1f: e9 00 00 00 00 jmpq 24 <.altinstr_aux+0x24> 24: f6 05 00 00 00 00 04 testb $0x4,0x0(%rip) # 2b <.altinstr_aux+0x2b> 2b: 0f 85 00 00 00 00 jne 31 <.altinstr_aux+0x31> 31: e9 00 00 00 00 jmpq 36 <.altinstr_aux+0x36> 36: f6 05 00 00 00 00 04 testb $0x4,0x0(%rip) # 3d <.altinstr_aux+0x3d> 3d: 0f 85 00 00 00 00 jne 43 <.altinstr_aux+0x43> 43: e9 00 00 00 00 jmpq 48 <.altinstr_aux+0x48> 48: f6 05 00 00 00 00 04 testb $0x4,0x0(%rip) # 4f <.altinstr_aux+0x4f> 4f: 0f 85 00 00 00 00 jne 55 <.altinstr_aux+0x55> 55: e9 00 00 00 00 jmpq 5a <.altinstr_aux+0x5a> 5a: f6 05 00 00 00 00 04 testb $0x4,0x0(%rip) # 61 <.altinstr_aux+0x61> 61: 0f 85 00 00 00 00 jne 67 <.altinstr_aux+0x67> 67: e9 00 00 00 00 jmpq 6c <.altinstr_aux+0x6c> 6c: 41 f6 47 27 04 testb $0x4,0x27(%r15) 71: 0f 85 00 00 00 00 jne 77 <.altinstr_aux+0x77> 77: e9 00 00 00 00 jmpq 7c <.altinstr_aux+0x7c> 7c: 41 f6 47 60 40 testb $0x40,0x60(%r15) 81: 0f 85 00 00 00 00 jne 87 <.altinstr_aux+0x87> 87: e9 00 00 00 00 jmpq 8c <.altinstr_aux+0x8c> 8c: 41 f6 47 27 04 testb $0x4,0x27(%r15) 91: 0f 85 00 00 00 00 jne 97 <.altinstr_aux+0x97> 97: e9 00 00 00 00 jmpq 9c <.altinstr_aux+0x9c> 9c: f6 05 00 00 00 00 04 testb $0x4,0x0(%rip) # a3 <.altinstr_aux+0xa3> a3: 0f 85 00 00 00 00 jne a9 <.altinstr_aux+0xa9> a9: e9 00 00 00 00 jmpq ae <.altinstr_aux+0xae> ae: f6 05 00 00 00 00 04 testb $0x4,0x0(%rip) # b5 <.altinstr_aux+0xb5> b5: 0f 85 00 00 00 00 jne bb <.altinstr_aux+0xbb> bb: e9 00 00 00 00 jmpq c0 <.altinstr_aux+0xc0> c0: f6 05 00 00 00 00 04 testb $0x4,0x0(%rip) # c7 <.altinstr_aux+0xc7> c7: 0f 85 00 00 00 00 jne cd <.altinstr_aux+0xcd> cd: e9 00 00 00 00 jmpq d2 <__fpu__restore_sig+0x22> Disassembly of section .fixup: 0000000000000000 <.fixup>: 0: b8 f2 ff ff ff mov $0xfffffff2,%eax 5: 31 d2 xor %edx,%edx 7: e9 00 00 00 00 jmpq c <.fixup+0xc> c: b8 ff ff ff ff mov $0xffffffff,%eax 11: e9 00 00 00 00 jmpq 16 <.fixup+0x16> 16: b8 fe ff ff ff mov $0xfffffffe,%eax 1b: e9 00 00 00 00 jmpq 20 <.fixup+0x20> 20: b8 ff ff ff ff mov $0xffffffff,%eax 25: e9 00 00 00 00 jmpq 2a <.fixup+0x2a> 2a: b8 fe ff ff ff mov $0xfffffffe,%eax 2f: e9 00 00 00 00 jmpq 34 <.fixup+0x34> 34: bb fe ff ff ff mov $0xfffffffe,%ebx 39: e9 00 00 00 00 jmpq 3e <.fixup+0x3e> 3e: bb ff ff ff ff mov $0xffffffff,%ebx 43: e9 00 00 00 00 jmpq 48 <.fixup+0x48> 48: ba f2 ff ff ff mov $0xfffffff2,%edx 4d: 31 c9 xor %ecx,%ecx 4f: e9 00 00 00 00 jmpq 54