1 | /* |
---|
2 | * linux/arch/i386/entry.S |
---|
3 | * |
---|
4 | * Copyright (C) 1991, 1992 Linus Torvalds |
---|
5 | */ |
---|
6 | |
---|
7 | /* |
---|
8 | * entry.S contains the system-call and fault low-level handling routines. |
---|
9 | * This also contains the timer-interrupt handler, as well as all interrupts |
---|
10 | * and faults that can result in a task-switch. |
---|
11 | * |
---|
12 | * NOTE: This code handles signal-recognition, which happens every time |
---|
13 | * after a timer-interrupt and after each system call. |
---|
14 | * |
---|
15 | * I changed all the .align's to 4 (16 byte alignment), as that's faster |
---|
16 | * on a 486. |
---|
17 | * |
---|
18 | * Stack layout in 'ret_from_system_call': |
---|
19 | * ptrace needs to have all regs on the stack. |
---|
20 | * if the order here is changed, it needs to be |
---|
21 | * updated in fork.c:copy_process, signal.c:do_signal, |
---|
22 | * ptrace.c and ptrace.h |
---|
23 | * |
---|
24 | * 0(%esp) - %ebx |
---|
25 | * 4(%esp) - %ecx |
---|
26 | * 8(%esp) - %edx |
---|
27 | * C(%esp) - %esi |
---|
28 | * 10(%esp) - %edi |
---|
29 | * 14(%esp) - %ebp |
---|
30 | * 18(%esp) - %eax |
---|
31 | * 1C(%esp) - %ds |
---|
32 | * 20(%esp) - %es |
---|
33 | * 24(%esp) - orig_eax |
---|
34 | * 28(%esp) - %eip |
---|
35 | * 2C(%esp) - %cs |
---|
36 | * 30(%esp) - %eflags |
---|
37 | * 34(%esp) - %oldesp |
---|
38 | * 38(%esp) - %oldss |
---|
39 | * |
---|
40 | * "current" is in register %ebx during any slow entries. |
---|
41 | */ |
---|
42 | |
---|
43 | #include <linux/linkage.h> |
---|
44 | #include <asm/thread_info.h> |
---|
45 | #include <asm/irqflags.h> |
---|
46 | #include <asm/errno.h> |
---|
47 | #include <asm/segment.h> |
---|
48 | #include <asm/smp.h> |
---|
49 | #include <asm/page.h> |
---|
50 | #include <asm/desc.h> |
---|
51 | #include <asm/dwarf2.h> |
---|
52 | #include "irq_vectors.h" |
---|
53 | #include <xen/interface/xen.h> |
---|
54 | |
---|
55 | #define nr_syscalls ((syscall_table_size)/4) |
---|
56 | |
---|
57 | EBX = 0x00 |
---|
58 | ECX = 0x04 |
---|
59 | EDX = 0x08 |
---|
60 | ESI = 0x0C |
---|
61 | EDI = 0x10 |
---|
62 | EBP = 0x14 |
---|
63 | EAX = 0x18 |
---|
64 | DS = 0x1C |
---|
65 | ES = 0x20 |
---|
66 | ORIG_EAX = 0x24 |
---|
67 | EIP = 0x28 |
---|
68 | CS = 0x2C |
---|
69 | EFLAGS = 0x30 |
---|
70 | OLDESP = 0x34 |
---|
71 | OLDSS = 0x38 |
---|
72 | |
---|
73 | CF_MASK = 0x00000001 |
---|
74 | TF_MASK = 0x00000100 |
---|
75 | IF_MASK = 0x00000200 |
---|
76 | DF_MASK = 0x00000400 |
---|
77 | NT_MASK = 0x00004000 |
---|
78 | VM_MASK = 0x00020000 |
---|
79 | /* Pseudo-eflags. */ |
---|
80 | NMI_MASK = 0x80000000 |
---|
81 | |
---|
82 | #ifndef CONFIG_XEN |
---|
83 | #define DISABLE_INTERRUPTS cli |
---|
84 | #define ENABLE_INTERRUPTS sti |
---|
85 | #else |
---|
86 | /* Offsets into shared_info_t. */ |
---|
87 | #define evtchn_upcall_pending /* 0 */ |
---|
88 | #define evtchn_upcall_mask 1 |
---|
89 | |
---|
90 | #define sizeof_vcpu_shift 6 |
---|
91 | |
---|
92 | #ifdef CONFIG_SMP |
---|
93 | #define GET_VCPU_INFO movl TI_cpu(%ebp),%esi ; \ |
---|
94 | shl $sizeof_vcpu_shift,%esi ; \ |
---|
95 | addl HYPERVISOR_shared_info,%esi |
---|
96 | #else |
---|
97 | #define GET_VCPU_INFO movl HYPERVISOR_shared_info,%esi |
---|
98 | #endif |
---|
99 | |
---|
100 | #define __DISABLE_INTERRUPTS movb $1,evtchn_upcall_mask(%esi) |
---|
101 | #define __ENABLE_INTERRUPTS movb $0,evtchn_upcall_mask(%esi) |
---|
102 | #define DISABLE_INTERRUPTS GET_VCPU_INFO ; \ |
---|
103 | __DISABLE_INTERRUPTS |
---|
104 | #define ENABLE_INTERRUPTS GET_VCPU_INFO ; \ |
---|
105 | __ENABLE_INTERRUPTS |
---|
106 | #define __TEST_PENDING testb $0xFF,evtchn_upcall_pending(%esi) |
---|
107 | #endif |
---|
108 | |
---|
109 | #ifdef CONFIG_PREEMPT |
---|
110 | #define preempt_stop cli; TRACE_IRQS_OFF |
---|
111 | #else |
---|
112 | #define preempt_stop |
---|
113 | #define resume_kernel restore_nocheck |
---|
114 | #endif |
---|
115 | |
---|
116 | .macro TRACE_IRQS_IRET |
---|
117 | #ifdef CONFIG_TRACE_IRQFLAGS |
---|
118 | testl $IF_MASK,EFLAGS(%esp) # interrupts off? |
---|
119 | jz 1f |
---|
120 | TRACE_IRQS_ON |
---|
121 | 1: |
---|
122 | #endif |
---|
123 | .endm |
---|
124 | |
---|
125 | #ifdef CONFIG_VM86 |
---|
126 | #define resume_userspace_sig check_userspace |
---|
127 | #else |
---|
128 | #define resume_userspace_sig resume_userspace |
---|
129 | #endif |
---|
130 | |
---|
131 | #define SAVE_ALL \ |
---|
132 | cld; \ |
---|
133 | pushl %es; \ |
---|
134 | CFI_ADJUST_CFA_OFFSET 4;\ |
---|
135 | /*CFI_REL_OFFSET es, 0;*/\ |
---|
136 | pushl %ds; \ |
---|
137 | CFI_ADJUST_CFA_OFFSET 4;\ |
---|
138 | /*CFI_REL_OFFSET ds, 0;*/\ |
---|
139 | pushl %eax; \ |
---|
140 | CFI_ADJUST_CFA_OFFSET 4;\ |
---|
141 | CFI_REL_OFFSET eax, 0;\ |
---|
142 | pushl %ebp; \ |
---|
143 | CFI_ADJUST_CFA_OFFSET 4;\ |
---|
144 | CFI_REL_OFFSET ebp, 0;\ |
---|
145 | pushl %edi; \ |
---|
146 | CFI_ADJUST_CFA_OFFSET 4;\ |
---|
147 | CFI_REL_OFFSET edi, 0;\ |
---|
148 | pushl %esi; \ |
---|
149 | CFI_ADJUST_CFA_OFFSET 4;\ |
---|
150 | CFI_REL_OFFSET esi, 0;\ |
---|
151 | pushl %edx; \ |
---|
152 | CFI_ADJUST_CFA_OFFSET 4;\ |
---|
153 | CFI_REL_OFFSET edx, 0;\ |
---|
154 | pushl %ecx; \ |
---|
155 | CFI_ADJUST_CFA_OFFSET 4;\ |
---|
156 | CFI_REL_OFFSET ecx, 0;\ |
---|
157 | pushl %ebx; \ |
---|
158 | CFI_ADJUST_CFA_OFFSET 4;\ |
---|
159 | CFI_REL_OFFSET ebx, 0;\ |
---|
160 | movl $(__USER_DS), %edx; \ |
---|
161 | movl %edx, %ds; \ |
---|
162 | movl %edx, %es; |
---|
163 | |
---|
164 | #define RESTORE_INT_REGS \ |
---|
165 | popl %ebx; \ |
---|
166 | CFI_ADJUST_CFA_OFFSET -4;\ |
---|
167 | CFI_RESTORE ebx;\ |
---|
168 | popl %ecx; \ |
---|
169 | CFI_ADJUST_CFA_OFFSET -4;\ |
---|
170 | CFI_RESTORE ecx;\ |
---|
171 | popl %edx; \ |
---|
172 | CFI_ADJUST_CFA_OFFSET -4;\ |
---|
173 | CFI_RESTORE edx;\ |
---|
174 | popl %esi; \ |
---|
175 | CFI_ADJUST_CFA_OFFSET -4;\ |
---|
176 | CFI_RESTORE esi;\ |
---|
177 | popl %edi; \ |
---|
178 | CFI_ADJUST_CFA_OFFSET -4;\ |
---|
179 | CFI_RESTORE edi;\ |
---|
180 | popl %ebp; \ |
---|
181 | CFI_ADJUST_CFA_OFFSET -4;\ |
---|
182 | CFI_RESTORE ebp;\ |
---|
183 | popl %eax; \ |
---|
184 | CFI_ADJUST_CFA_OFFSET -4;\ |
---|
185 | CFI_RESTORE eax |
---|
186 | |
---|
187 | #define RESTORE_REGS \ |
---|
188 | RESTORE_INT_REGS; \ |
---|
189 | 1: popl %ds; \ |
---|
190 | CFI_ADJUST_CFA_OFFSET -4;\ |
---|
191 | /*CFI_RESTORE ds;*/\ |
---|
192 | 2: popl %es; \ |
---|
193 | CFI_ADJUST_CFA_OFFSET -4;\ |
---|
194 | /*CFI_RESTORE es;*/\ |
---|
195 | .section .fixup,"ax"; \ |
---|
196 | 3: movl $0,(%esp); \ |
---|
197 | jmp 1b; \ |
---|
198 | 4: movl $0,(%esp); \ |
---|
199 | jmp 2b; \ |
---|
200 | .previous; \ |
---|
201 | .section __ex_table,"a";\ |
---|
202 | .align 4; \ |
---|
203 | .long 1b,3b; \ |
---|
204 | .long 2b,4b; \ |
---|
205 | .previous |
---|
206 | |
---|
207 | #define RING0_INT_FRAME \ |
---|
208 | CFI_STARTPROC simple;\ |
---|
209 | CFI_DEF_CFA esp, 3*4;\ |
---|
210 | /*CFI_OFFSET cs, -2*4;*/\ |
---|
211 | CFI_OFFSET eip, -3*4 |
---|
212 | |
---|
213 | #define RING0_EC_FRAME \ |
---|
214 | CFI_STARTPROC simple;\ |
---|
215 | CFI_DEF_CFA esp, 4*4;\ |
---|
216 | /*CFI_OFFSET cs, -2*4;*/\ |
---|
217 | CFI_OFFSET eip, -3*4 |
---|
218 | |
---|
219 | #define RING0_PTREGS_FRAME \ |
---|
220 | CFI_STARTPROC simple;\ |
---|
221 | CFI_DEF_CFA esp, OLDESP-EBX;\ |
---|
222 | /*CFI_OFFSET cs, CS-OLDESP;*/\ |
---|
223 | CFI_OFFSET eip, EIP-OLDESP;\ |
---|
224 | /*CFI_OFFSET es, ES-OLDESP;*/\ |
---|
225 | /*CFI_OFFSET ds, DS-OLDESP;*/\ |
---|
226 | CFI_OFFSET eax, EAX-OLDESP;\ |
---|
227 | CFI_OFFSET ebp, EBP-OLDESP;\ |
---|
228 | CFI_OFFSET edi, EDI-OLDESP;\ |
---|
229 | CFI_OFFSET esi, ESI-OLDESP;\ |
---|
230 | CFI_OFFSET edx, EDX-OLDESP;\ |
---|
231 | CFI_OFFSET ecx, ECX-OLDESP;\ |
---|
232 | CFI_OFFSET ebx, EBX-OLDESP |
---|
233 | |
---|
234 | ENTRY(ret_from_fork) |
---|
235 | CFI_STARTPROC |
---|
236 | pushl %eax |
---|
237 | CFI_ADJUST_CFA_OFFSET 4 |
---|
238 | call schedule_tail |
---|
239 | GET_THREAD_INFO(%ebp) |
---|
240 | popl %eax |
---|
241 | CFI_ADJUST_CFA_OFFSET -4 |
---|
242 | pushl $0x0202 # Reset kernel eflags |
---|
243 | CFI_ADJUST_CFA_OFFSET 4 |
---|
244 | popfl |
---|
245 | CFI_ADJUST_CFA_OFFSET -4 |
---|
246 | jmp syscall_exit |
---|
247 | CFI_ENDPROC |
---|
248 | |
---|
249 | /* |
---|
250 | * Return to user mode is not as complex as all this looks, |
---|
251 | * but we want the default path for a system call return to |
---|
252 | * go as quickly as possible which is why some of this is |
---|
253 | * less clear than it otherwise should be. |
---|
254 | */ |
---|
255 | |
---|
256 | # userspace resumption stub bypassing syscall exit tracing |
---|
257 | ALIGN |
---|
258 | RING0_PTREGS_FRAME |
---|
259 | ret_from_exception: |
---|
260 | preempt_stop |
---|
261 | ret_from_intr: |
---|
262 | GET_THREAD_INFO(%ebp) |
---|
263 | check_userspace: |
---|
264 | movl EFLAGS(%esp), %eax # mix EFLAGS and CS |
---|
265 | movb CS(%esp), %al |
---|
266 | testl $(VM_MASK | 2), %eax |
---|
267 | jz resume_kernel |
---|
268 | ENTRY(resume_userspace) |
---|
269 | DISABLE_INTERRUPTS # make sure we don't miss an interrupt |
---|
270 | # setting need_resched or sigpending |
---|
271 | # between sampling and the iret |
---|
272 | movl TI_flags(%ebp), %ecx |
---|
273 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done on |
---|
274 | # int/exception return? |
---|
275 | jne work_pending |
---|
276 | jmp restore_all |
---|
277 | |
---|
278 | #ifdef CONFIG_PREEMPT |
---|
279 | ENTRY(resume_kernel) |
---|
280 | cli |
---|
281 | cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ? |
---|
282 | jnz restore_nocheck |
---|
283 | need_resched: |
---|
284 | movl TI_flags(%ebp), %ecx # need_resched set ? |
---|
285 | testb $_TIF_NEED_RESCHED, %cl |
---|
286 | jz restore_all |
---|
287 | testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ? |
---|
288 | jz restore_all |
---|
289 | call preempt_schedule_irq |
---|
290 | jmp need_resched |
---|
291 | #endif |
---|
292 | CFI_ENDPROC |
---|
293 | |
---|
294 | /* SYSENTER_RETURN points to after the "sysenter" instruction in |
---|
295 | the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ |
---|
296 | |
---|
297 | # sysenter call handler stub |
---|
298 | ENTRY(sysenter_entry) |
---|
299 | CFI_STARTPROC simple |
---|
300 | CFI_DEF_CFA esp, 0 |
---|
301 | CFI_REGISTER esp, ebp |
---|
302 | movl SYSENTER_stack_esp0(%esp),%esp |
---|
303 | sysenter_past_esp: |
---|
304 | /* |
---|
305 | * No need to follow this irqs on/off section: the syscall |
---|
306 | * disabled irqs and here we enable it straight after entry: |
---|
307 | */ |
---|
308 | sti |
---|
309 | pushl $(__USER_DS) |
---|
310 | CFI_ADJUST_CFA_OFFSET 4 |
---|
311 | /*CFI_REL_OFFSET ss, 0*/ |
---|
312 | pushl %ebp |
---|
313 | CFI_ADJUST_CFA_OFFSET 4 |
---|
314 | CFI_REL_OFFSET esp, 0 |
---|
315 | pushfl |
---|
316 | CFI_ADJUST_CFA_OFFSET 4 |
---|
317 | pushl $(__USER_CS) |
---|
318 | CFI_ADJUST_CFA_OFFSET 4 |
---|
319 | /*CFI_REL_OFFSET cs, 0*/ |
---|
320 | /* |
---|
321 | * Push current_thread_info()->sysenter_return to the stack. |
---|
322 | * A tiny bit of offset fixup is necessary - 4*4 means the 4 words |
---|
323 | * pushed above; +8 corresponds to copy_thread's esp0 setting. |
---|
324 | */ |
---|
325 | pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) |
---|
326 | CFI_ADJUST_CFA_OFFSET 4 |
---|
327 | CFI_REL_OFFSET eip, 0 |
---|
328 | |
---|
329 | /* |
---|
330 | * Load the potential sixth argument from user stack. |
---|
331 | * Careful about security. |
---|
332 | */ |
---|
333 | cmpl $__PAGE_OFFSET-3,%ebp |
---|
334 | jae syscall_fault |
---|
335 | 1: movl (%ebp),%ebp |
---|
336 | .section __ex_table,"a" |
---|
337 | .align 4 |
---|
338 | .long 1b,syscall_fault |
---|
339 | .previous |
---|
340 | |
---|
341 | pushl %eax |
---|
342 | CFI_ADJUST_CFA_OFFSET 4 |
---|
343 | SAVE_ALL |
---|
344 | GET_THREAD_INFO(%ebp) |
---|
345 | |
---|
346 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ |
---|
347 | testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) |
---|
348 | jnz syscall_trace_entry |
---|
349 | cmpl $(nr_syscalls), %eax |
---|
350 | jae syscall_badsys |
---|
351 | call *sys_call_table(,%eax,4) |
---|
352 | movl %eax,EAX(%esp) |
---|
353 | DISABLE_INTERRUPTS |
---|
354 | TRACE_IRQS_OFF |
---|
355 | movl TI_flags(%ebp), %ecx |
---|
356 | testw $_TIF_ALLWORK_MASK, %cx |
---|
357 | jne syscall_exit_work |
---|
358 | /* if something modifies registers it must also disable sysexit */ |
---|
359 | movl EIP(%esp), %edx |
---|
360 | movl OLDESP(%esp), %ecx |
---|
361 | xorl %ebp,%ebp |
---|
362 | #ifdef CONFIG_XEN |
---|
363 | TRACE_IRQS_ON |
---|
364 | __ENABLE_INTERRUPTS |
---|
365 | sysexit_scrit: /**** START OF SYSEXIT CRITICAL REGION ****/ |
---|
366 | __TEST_PENDING |
---|
367 | jnz 14f # process more events if necessary... |
---|
368 | movl ESI(%esp), %esi |
---|
369 | sysexit |
---|
370 | 14: __DISABLE_INTERRUPTS |
---|
371 | TRACE_IRQS_OFF |
---|
372 | sysexit_ecrit: /**** END OF SYSEXIT CRITICAL REGION ****/ |
---|
373 | push %esp |
---|
374 | call evtchn_do_upcall |
---|
375 | add $4,%esp |
---|
376 | jmp ret_from_intr |
---|
377 | #else |
---|
378 | TRACE_IRQS_ON |
---|
379 | sti |
---|
380 | sysexit |
---|
381 | #endif /* !CONFIG_XEN */ |
---|
382 | CFI_ENDPROC |
---|
383 | |
---|
384 | |
---|
385 | # system call handler stub |
---|
386 | ENTRY(system_call) |
---|
387 | RING0_INT_FRAME # can't unwind into user space anyway |
---|
388 | pushl %eax # save orig_eax |
---|
389 | CFI_ADJUST_CFA_OFFSET 4 |
---|
390 | SAVE_ALL |
---|
391 | GET_THREAD_INFO(%ebp) |
---|
392 | testl $TF_MASK,EFLAGS(%esp) |
---|
393 | jz no_singlestep |
---|
394 | orl $_TIF_SINGLESTEP,TI_flags(%ebp) |
---|
395 | no_singlestep: |
---|
396 | # system call tracing in operation / emulation |
---|
397 | /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */ |
---|
398 | testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp) |
---|
399 | jnz syscall_trace_entry |
---|
400 | cmpl $(nr_syscalls), %eax |
---|
401 | jae syscall_badsys |
---|
402 | syscall_call: |
---|
403 | call *sys_call_table(,%eax,4) |
---|
404 | movl %eax,EAX(%esp) # store the return value |
---|
405 | syscall_exit: |
---|
406 | DISABLE_INTERRUPTS # make sure we don't miss an interrupt |
---|
407 | # setting need_resched or sigpending |
---|
408 | # between sampling and the iret |
---|
409 | TRACE_IRQS_OFF |
---|
410 | movl TI_flags(%ebp), %ecx |
---|
411 | testw $_TIF_ALLWORK_MASK, %cx # current->work |
---|
412 | jne syscall_exit_work |
---|
413 | |
---|
414 | restore_all: |
---|
415 | #ifndef CONFIG_XEN |
---|
416 | movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS |
---|
417 | # Warning: OLDSS(%esp) contains the wrong/random values if we |
---|
418 | # are returning to the kernel. |
---|
419 | # See comments in process.c:copy_thread() for details. |
---|
420 | movb OLDSS(%esp), %ah |
---|
421 | movb CS(%esp), %al |
---|
422 | andl $(VM_MASK | (4 << 8) | 3), %eax |
---|
423 | cmpl $((4 << 8) | 3), %eax |
---|
424 | CFI_REMEMBER_STATE |
---|
425 | je ldt_ss # returning to user-space with LDT SS |
---|
426 | restore_nocheck: |
---|
427 | #else |
---|
428 | restore_nocheck: |
---|
429 | movl EFLAGS(%esp), %eax |
---|
430 | testl $(VM_MASK|NMI_MASK), %eax |
---|
431 | CFI_REMEMBER_STATE |
---|
432 | jnz hypervisor_iret |
---|
433 | shr $9, %eax # EAX[0] == IRET_EFLAGS.IF |
---|
434 | GET_VCPU_INFO |
---|
435 | andb evtchn_upcall_mask(%esi),%al |
---|
436 | andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask |
---|
437 | CFI_REMEMBER_STATE |
---|
438 | jnz restore_all_enable_events # != 0 => enable event delivery |
---|
439 | #endif |
---|
440 | TRACE_IRQS_IRET |
---|
441 | restore_nocheck_notrace: |
---|
442 | RESTORE_REGS |
---|
443 | addl $4, %esp |
---|
444 | CFI_ADJUST_CFA_OFFSET -4 |
---|
445 | 1: iret |
---|
446 | .section .fixup,"ax" |
---|
447 | iret_exc: |
---|
448 | #ifndef CONFIG_XEN |
---|
449 | TRACE_IRQS_ON |
---|
450 | sti |
---|
451 | #endif |
---|
452 | pushl $0 # no error code |
---|
453 | pushl $do_iret_error |
---|
454 | jmp error_code |
---|
455 | .previous |
---|
456 | .section __ex_table,"a" |
---|
457 | .align 4 |
---|
458 | .long 1b,iret_exc |
---|
459 | .previous |
---|
460 | |
---|
461 | CFI_RESTORE_STATE |
---|
462 | #ifndef CONFIG_XEN |
---|
463 | ldt_ss: |
---|
464 | larl OLDSS(%esp), %eax |
---|
465 | jnz restore_nocheck |
---|
466 | testl $0x00400000, %eax # returning to 32bit stack? |
---|
467 | jnz restore_nocheck # allright, normal return |
---|
468 | /* If returning to userspace with 16bit stack, |
---|
469 | * try to fix the higher word of ESP, as the CPU |
---|
470 | * won't restore it. |
---|
471 | * This is an "official" bug of all the x86-compatible |
---|
472 | * CPUs, which we can try to work around to make |
---|
473 | * dosemu and wine happy. */ |
---|
474 | subl $8, %esp # reserve space for switch16 pointer |
---|
475 | CFI_ADJUST_CFA_OFFSET 8 |
---|
476 | cli |
---|
477 | TRACE_IRQS_OFF |
---|
478 | movl %esp, %eax |
---|
479 | /* Set up the 16bit stack frame with switch32 pointer on top, |
---|
480 | * and a switch16 pointer on top of the current frame. */ |
---|
481 | call setup_x86_bogus_stack |
---|
482 | CFI_ADJUST_CFA_OFFSET -8 # frame has moved |
---|
483 | TRACE_IRQS_IRET |
---|
484 | RESTORE_REGS |
---|
485 | lss 20+4(%esp), %esp # switch to 16bit stack |
---|
486 | 1: iret |
---|
487 | .section __ex_table,"a" |
---|
488 | .align 4 |
---|
489 | .long 1b,iret_exc |
---|
490 | .previous |
---|
491 | #else |
---|
492 | ALIGN |
---|
493 | restore_all_enable_events: |
---|
494 | TRACE_IRQS_ON |
---|
495 | __ENABLE_INTERRUPTS |
---|
496 | scrit: /**** START OF CRITICAL REGION ****/ |
---|
497 | __TEST_PENDING |
---|
498 | jnz 14f # process more events if necessary... |
---|
499 | RESTORE_REGS |
---|
500 | addl $4, %esp |
---|
501 | CFI_ADJUST_CFA_OFFSET -4 |
---|
502 | 1: iret |
---|
503 | .section __ex_table,"a" |
---|
504 | .align 4 |
---|
505 | .long 1b,iret_exc |
---|
506 | .previous |
---|
507 | 14: __DISABLE_INTERRUPTS |
---|
508 | TRACE_IRQS_OFF |
---|
509 | jmp 11f |
---|
510 | ecrit: /**** END OF CRITICAL REGION ****/ |
---|
511 | |
---|
512 | CFI_RESTORE_STATE |
---|
513 | hypervisor_iret: |
---|
514 | andl $~NMI_MASK, EFLAGS(%esp) |
---|
515 | RESTORE_REGS |
---|
516 | addl $4, %esp |
---|
517 | CFI_ADJUST_CFA_OFFSET -4 |
---|
518 | jmp hypercall_page + (__HYPERVISOR_iret * 32) |
---|
519 | #endif |
---|
520 | CFI_ENDPROC |
---|
521 | |
---|
522 | # perform work that needs to be done immediately before resumption |
---|
523 | ALIGN |
---|
524 | RING0_PTREGS_FRAME # can't unwind into user space anyway |
---|
525 | work_pending: |
---|
526 | testb $_TIF_NEED_RESCHED, %cl |
---|
527 | jz work_notifysig |
---|
528 | work_resched: |
---|
529 | call schedule |
---|
530 | DISABLE_INTERRUPTS # make sure we don't miss an interrupt |
---|
531 | # setting need_resched or sigpending |
---|
532 | # between sampling and the iret |
---|
533 | TRACE_IRQS_OFF |
---|
534 | movl TI_flags(%ebp), %ecx |
---|
535 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done other |
---|
536 | # than syscall tracing? |
---|
537 | jz restore_all |
---|
538 | testb $_TIF_NEED_RESCHED, %cl |
---|
539 | jnz work_resched |
---|
540 | |
---|
541 | work_notifysig: # deal with pending signals and |
---|
542 | # notify-resume requests |
---|
543 | testl $VM_MASK, EFLAGS(%esp) |
---|
544 | movl %esp, %eax |
---|
545 | jne work_notifysig_v86 # returning to kernel-space or |
---|
546 | # vm86-space |
---|
547 | xorl %edx, %edx |
---|
548 | call do_notify_resume |
---|
549 | jmp resume_userspace_sig |
---|
550 | |
---|
551 | ALIGN |
---|
552 | work_notifysig_v86: |
---|
553 | #ifdef CONFIG_VM86 |
---|
554 | pushl %ecx # save ti_flags for do_notify_resume |
---|
555 | CFI_ADJUST_CFA_OFFSET 4 |
---|
556 | call save_v86_state # %eax contains pt_regs pointer |
---|
557 | popl %ecx |
---|
558 | CFI_ADJUST_CFA_OFFSET -4 |
---|
559 | movl %eax, %esp |
---|
560 | xorl %edx, %edx |
---|
561 | call do_notify_resume |
---|
562 | jmp resume_userspace_sig |
---|
563 | #endif |
---|
564 | |
---|
565 | # perform syscall exit tracing |
---|
566 | ALIGN |
---|
567 | syscall_trace_entry: |
---|
568 | movl $-ENOSYS,EAX(%esp) |
---|
569 | movl %esp, %eax |
---|
570 | xorl %edx,%edx |
---|
571 | call do_syscall_trace |
---|
572 | cmpl $0, %eax |
---|
573 | jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU, |
---|
574 | # so must skip actual syscall |
---|
575 | movl ORIG_EAX(%esp), %eax |
---|
576 | cmpl $(nr_syscalls), %eax |
---|
577 | jnae syscall_call |
---|
578 | jmp syscall_exit |
---|
579 | |
---|
580 | # perform syscall exit tracing |
---|
581 | ALIGN |
---|
582 | syscall_exit_work: |
---|
583 | testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl |
---|
584 | jz work_pending |
---|
585 | TRACE_IRQS_ON |
---|
586 | ENABLE_INTERRUPTS # could let do_syscall_trace() call |
---|
587 | # schedule() instead |
---|
588 | movl %esp, %eax |
---|
589 | movl $1, %edx |
---|
590 | call do_syscall_trace |
---|
591 | jmp resume_userspace |
---|
592 | CFI_ENDPROC |
---|
593 | |
---|
594 | RING0_INT_FRAME # can't unwind into user space anyway |
---|
595 | syscall_fault: |
---|
596 | pushl %eax # save orig_eax |
---|
597 | CFI_ADJUST_CFA_OFFSET 4 |
---|
598 | SAVE_ALL |
---|
599 | GET_THREAD_INFO(%ebp) |
---|
600 | movl $-EFAULT,EAX(%esp) |
---|
601 | jmp resume_userspace |
---|
602 | |
---|
603 | syscall_badsys: |
---|
604 | movl $-ENOSYS,EAX(%esp) |
---|
605 | jmp resume_userspace |
---|
606 | CFI_ENDPROC |
---|
607 | |
---|
608 | #ifndef CONFIG_XEN |
---|
609 | #define FIXUP_ESPFIX_STACK \ |
---|
610 | movl %esp, %eax; \ |
---|
611 | /* switch to 32bit stack using the pointer on top of 16bit stack */ \ |
---|
612 | lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \ |
---|
613 | /* copy data from 16bit stack to 32bit stack */ \ |
---|
614 | call fixup_x86_bogus_stack; \ |
---|
615 | /* put ESP to the proper location */ \ |
---|
616 | movl %eax, %esp; |
---|
617 | #define UNWIND_ESPFIX_STACK \ |
---|
618 | pushl %eax; \ |
---|
619 | CFI_ADJUST_CFA_OFFSET 4; \ |
---|
620 | movl %ss, %eax; \ |
---|
621 | /* see if on 16bit stack */ \ |
---|
622 | cmpw $__ESPFIX_SS, %ax; \ |
---|
623 | je 28f; \ |
---|
624 | 27: popl %eax; \ |
---|
625 | CFI_ADJUST_CFA_OFFSET -4; \ |
---|
626 | .section .fixup,"ax"; \ |
---|
627 | 28: movl $__KERNEL_DS, %eax; \ |
---|
628 | movl %eax, %ds; \ |
---|
629 | movl %eax, %es; \ |
---|
630 | /* switch to 32bit stack */ \ |
---|
631 | FIXUP_ESPFIX_STACK; \ |
---|
632 | jmp 27b; \ |
---|
633 | .previous |
---|
634 | |
---|
635 | /* |
---|
636 | * Build the entry stubs and pointer table with |
---|
637 | * some assembler magic. |
---|
638 | */ |
---|
639 | .data |
---|
640 | ENTRY(interrupt) |
---|
641 | .text |
---|
642 | |
---|
643 | vector=0 |
---|
644 | ENTRY(irq_entries_start) |
---|
645 | RING0_INT_FRAME |
---|
646 | .rept NR_IRQS |
---|
647 | ALIGN |
---|
648 | .if vector |
---|
649 | CFI_ADJUST_CFA_OFFSET -4 |
---|
650 | .endif |
---|
651 | 1: pushl $~(vector) |
---|
652 | CFI_ADJUST_CFA_OFFSET 4 |
---|
653 | jmp common_interrupt |
---|
654 | .data |
---|
655 | .long 1b |
---|
656 | .text |
---|
657 | vector=vector+1 |
---|
658 | .endr |
---|
659 | |
---|
660 | /* |
---|
661 | * the CPU automatically disables interrupts when executing an IRQ vector, |
---|
662 | * so IRQ-flags tracing has to follow that: |
---|
663 | */ |
---|
664 | ALIGN |
---|
665 | common_interrupt: |
---|
666 | SAVE_ALL |
---|
667 | TRACE_IRQS_OFF |
---|
668 | movl %esp,%eax |
---|
669 | call do_IRQ |
---|
670 | jmp ret_from_intr |
---|
671 | CFI_ENDPROC |
---|
672 | |
---|
673 | #define BUILD_INTERRUPT(name, nr) \ |
---|
674 | ENTRY(name) \ |
---|
675 | RING0_INT_FRAME; \ |
---|
676 | pushl $~(nr); \ |
---|
677 | CFI_ADJUST_CFA_OFFSET 4; \ |
---|
678 | SAVE_ALL; \ |
---|
679 | TRACE_IRQS_OFF \ |
---|
680 | movl %esp,%eax; \ |
---|
681 | call smp_/**/name; \ |
---|
682 | jmp ret_from_intr; \ |
---|
683 | CFI_ENDPROC |
---|
684 | |
---|
685 | /* The include is where all of the SMP etc. interrupts come from */ |
---|
686 | #include "entry_arch.h" |
---|
687 | #else |
---|
688 | #define UNWIND_ESPFIX_STACK |
---|
689 | #endif |
---|
690 | |
---|
691 | ENTRY(divide_error) |
---|
692 | RING0_INT_FRAME |
---|
693 | pushl $0 # no error code |
---|
694 | CFI_ADJUST_CFA_OFFSET 4 |
---|
695 | pushl $do_divide_error |
---|
696 | CFI_ADJUST_CFA_OFFSET 4 |
---|
697 | ALIGN |
---|
698 | error_code: |
---|
699 | pushl %ds |
---|
700 | CFI_ADJUST_CFA_OFFSET 4 |
---|
701 | /*CFI_REL_OFFSET ds, 0*/ |
---|
702 | pushl %eax |
---|
703 | CFI_ADJUST_CFA_OFFSET 4 |
---|
704 | CFI_REL_OFFSET eax, 0 |
---|
705 | xorl %eax, %eax |
---|
706 | pushl %ebp |
---|
707 | CFI_ADJUST_CFA_OFFSET 4 |
---|
708 | CFI_REL_OFFSET ebp, 0 |
---|
709 | pushl %edi |
---|
710 | CFI_ADJUST_CFA_OFFSET 4 |
---|
711 | CFI_REL_OFFSET edi, 0 |
---|
712 | pushl %esi |
---|
713 | CFI_ADJUST_CFA_OFFSET 4 |
---|
714 | CFI_REL_OFFSET esi, 0 |
---|
715 | pushl %edx |
---|
716 | CFI_ADJUST_CFA_OFFSET 4 |
---|
717 | CFI_REL_OFFSET edx, 0 |
---|
718 | decl %eax # eax = -1 |
---|
719 | pushl %ecx |
---|
720 | CFI_ADJUST_CFA_OFFSET 4 |
---|
721 | CFI_REL_OFFSET ecx, 0 |
---|
722 | pushl %ebx |
---|
723 | CFI_ADJUST_CFA_OFFSET 4 |
---|
724 | CFI_REL_OFFSET ebx, 0 |
---|
725 | cld |
---|
726 | pushl %es |
---|
727 | CFI_ADJUST_CFA_OFFSET 4 |
---|
728 | /*CFI_REL_OFFSET es, 0*/ |
---|
729 | UNWIND_ESPFIX_STACK |
---|
730 | popl %ecx |
---|
731 | CFI_ADJUST_CFA_OFFSET -4 |
---|
732 | /*CFI_REGISTER es, ecx*/ |
---|
733 | movl ES(%esp), %edi # get the function address |
---|
734 | movl ORIG_EAX(%esp), %edx # get the error code |
---|
735 | movl %eax, ORIG_EAX(%esp) |
---|
736 | movl %ecx, ES(%esp) |
---|
737 | /*CFI_REL_OFFSET es, ES*/ |
---|
738 | movl $(__USER_DS), %ecx |
---|
739 | movl %ecx, %ds |
---|
740 | movl %ecx, %es |
---|
741 | movl %esp,%eax # pt_regs pointer |
---|
742 | call *%edi |
---|
743 | jmp ret_from_exception |
---|
744 | CFI_ENDPROC |
---|
745 | |
---|
746 | #ifdef CONFIG_XEN |
---|
747 | # A note on the "critical region" in our callback handler. |
---|
748 | # We want to avoid stacking callback handlers due to events occurring |
---|
749 | # during handling of the last event. To do this, we keep events disabled |
---|
750 | # until we've done all processing. HOWEVER, we must enable events before |
---|
751 | # popping the stack frame (can't be done atomically) and so it would still |
---|
752 | # be possible to get enough handler activations to overflow the stack. |
---|
753 | # Although unlikely, bugs of that kind are hard to track down, so we'd |
---|
754 | # like to avoid the possibility. |
---|
755 | # So, on entry to the handler we detect whether we interrupted an |
---|
756 | # existing activation in its critical region -- if so, we pop the current |
---|
757 | # activation and restart the handler using the previous one. |
---|
758 | # |
---|
759 | # The sysexit critical region is slightly different. sysexit |
---|
760 | # atomically removes the entire stack frame. If we interrupt in the |
---|
761 | # critical region we know that the entire frame is present and correct |
---|
762 | # so we can simply throw away the new one. |
---|
763 | ENTRY(hypervisor_callback) |
---|
764 | RING0_INT_FRAME |
---|
765 | pushl %eax |
---|
766 | CFI_ADJUST_CFA_OFFSET 4 |
---|
767 | SAVE_ALL |
---|
768 | movl EIP(%esp),%eax |
---|
769 | cmpl $scrit,%eax |
---|
770 | jb 11f |
---|
771 | cmpl $ecrit,%eax |
---|
772 | jb critical_region_fixup |
---|
773 | cmpl $sysexit_scrit,%eax |
---|
774 | jb 11f |
---|
775 | cmpl $sysexit_ecrit,%eax |
---|
776 | ja 11f |
---|
777 | addl $OLDESP,%esp # Remove eflags...ebx from stack frame. |
---|
778 | 11: push %esp |
---|
779 | CFI_ADJUST_CFA_OFFSET 4 |
---|
780 | call evtchn_do_upcall |
---|
781 | add $4,%esp |
---|
782 | CFI_ADJUST_CFA_OFFSET -4 |
---|
783 | jmp ret_from_intr |
---|
784 | CFI_ENDPROC |
---|
785 | |
---|
786 | # [How we do the fixup]. We want to merge the current stack frame with the |
---|
787 | # just-interrupted frame. How we do this depends on where in the critical |
---|
788 | # region the interrupted handler was executing, and so how many saved |
---|
789 | # registers are in each frame. We do this quickly using the lookup table |
---|
790 | # 'critical_fixup_table'. For each byte offset in the critical region, it |
---|
791 | # provides the number of bytes which have already been popped from the |
---|
792 | # interrupted stack frame. |
---|
793 | critical_region_fixup: |
---|
794 | movzbl critical_fixup_table-scrit(%eax),%ecx # %eax contains num bytes popped |
---|
795 | cmpb $0xff,%cl # 0xff => vcpu_info critical region |
---|
796 | jne 15f |
---|
797 | xorl %ecx,%ecx |
---|
798 | 15: leal (%esp,%ecx),%esi # %esi points at end of src region |
---|
799 | leal OLDESP(%esp),%edi # %edi points at end of dst region |
---|
800 | shrl $2,%ecx # convert words to bytes |
---|
801 | je 17f # skip loop if nothing to copy |
---|
802 | 16: subl $4,%esi # pre-decrementing copy loop |
---|
803 | subl $4,%edi |
---|
804 | movl (%esi),%eax |
---|
805 | movl %eax,(%edi) |
---|
806 | loop 16b |
---|
807 | 17: movl %edi,%esp # final %edi is top of merged stack |
---|
808 | jmp 11b |
---|
809 | |
---|
810 | .section .rodata,"a" |
---|
811 | critical_fixup_table: |
---|
812 | .byte 0xff,0xff,0xff # testb $0xff,(%esi) = __TEST_PENDING |
---|
813 | .byte 0xff,0xff # jnz 14f |
---|
814 | .byte 0x00 # pop %ebx |
---|
815 | .byte 0x04 # pop %ecx |
---|
816 | .byte 0x08 # pop %edx |
---|
817 | .byte 0x0c # pop %esi |
---|
818 | .byte 0x10 # pop %edi |
---|
819 | .byte 0x14 # pop %ebp |
---|
820 | .byte 0x18 # pop %eax |
---|
821 | .byte 0x1c # pop %ds |
---|
822 | .byte 0x20 # pop %es |
---|
823 | .byte 0x24,0x24,0x24 # add $4,%esp |
---|
824 | .byte 0x28 # iret |
---|
825 | .byte 0xff,0xff,0xff,0xff # movb $1,1(%esi) |
---|
826 | .byte 0x00,0x00 # jmp 11b |
---|
827 | .previous |
---|
828 | |
---|
829 | # Hypervisor uses this for application faults while it executes. |
---|
830 | # We get here for two reasons: |
---|
831 | # 1. Fault while reloading DS, ES, FS or GS |
---|
832 | # 2. Fault while executing IRET |
---|
833 | # Category 1 we fix up by reattempting the load, and zeroing the segment |
---|
834 | # register if the load fails. |
---|
835 | # Category 2 we fix up by jumping to do_iret_error. We cannot use the |
---|
836 | # normal Linux return path in this case because if we use the IRET hypercall |
---|
837 | # to pop the stack frame we end up in an infinite loop of failsafe callbacks. |
---|
838 | # We distinguish between categories by maintaining a status value in EAX. |
---|
839 | ENTRY(failsafe_callback) |
---|
840 | pushl %eax |
---|
841 | movl $1,%eax |
---|
842 | 1: mov 4(%esp),%ds |
---|
843 | 2: mov 8(%esp),%es |
---|
844 | 3: mov 12(%esp),%fs |
---|
845 | 4: mov 16(%esp),%gs |
---|
846 | testl %eax,%eax |
---|
847 | popl %eax |
---|
848 | jz 5f |
---|
849 | addl $16,%esp # EAX != 0 => Category 2 (Bad IRET) |
---|
850 | jmp iret_exc |
---|
851 | 5: addl $16,%esp # EAX == 0 => Category 1 (Bad segment) |
---|
852 | RING0_INT_FRAME |
---|
853 | pushl $0 |
---|
854 | SAVE_ALL |
---|
855 | jmp ret_from_exception |
---|
856 | .section .fixup,"ax"; \ |
---|
857 | 6: xorl %eax,%eax; \ |
---|
858 | movl %eax,4(%esp); \ |
---|
859 | jmp 1b; \ |
---|
860 | 7: xorl %eax,%eax; \ |
---|
861 | movl %eax,8(%esp); \ |
---|
862 | jmp 2b; \ |
---|
863 | 8: xorl %eax,%eax; \ |
---|
864 | movl %eax,12(%esp); \ |
---|
865 | jmp 3b; \ |
---|
866 | 9: xorl %eax,%eax; \ |
---|
867 | movl %eax,16(%esp); \ |
---|
868 | jmp 4b; \ |
---|
869 | .previous; \ |
---|
870 | .section __ex_table,"a"; \ |
---|
871 | .align 4; \ |
---|
872 | .long 1b,6b; \ |
---|
873 | .long 2b,7b; \ |
---|
874 | .long 3b,8b; \ |
---|
875 | .long 4b,9b; \ |
---|
876 | .previous |
---|
877 | #endif |
---|
878 | CFI_ENDPROC |
---|
879 | |
---|
880 | ENTRY(coprocessor_error) |
---|
881 | RING0_INT_FRAME |
---|
882 | pushl $0 |
---|
883 | CFI_ADJUST_CFA_OFFSET 4 |
---|
884 | pushl $do_coprocessor_error |
---|
885 | CFI_ADJUST_CFA_OFFSET 4 |
---|
886 | jmp error_code |
---|
887 | CFI_ENDPROC |
---|
888 | |
---|
889 | ENTRY(simd_coprocessor_error) |
---|
890 | RING0_INT_FRAME |
---|
891 | pushl $0 |
---|
892 | CFI_ADJUST_CFA_OFFSET 4 |
---|
893 | pushl $do_simd_coprocessor_error |
---|
894 | CFI_ADJUST_CFA_OFFSET 4 |
---|
895 | jmp error_code |
---|
896 | CFI_ENDPROC |
---|
897 | |
---|
898 | ENTRY(device_not_available) |
---|
899 | RING0_INT_FRAME |
---|
900 | pushl $-1 # mark this as an int |
---|
901 | CFI_ADJUST_CFA_OFFSET 4 |
---|
902 | SAVE_ALL |
---|
903 | #ifndef CONFIG_XEN |
---|
904 | movl %cr0, %eax |
---|
905 | testl $0x4, %eax # EM (math emulation bit) |
---|
906 | je device_available_emulate |
---|
907 | pushl $0 # temporary storage for ORIG_EIP |
---|
908 | CFI_ADJUST_CFA_OFFSET 4 |
---|
909 | call math_emulate |
---|
910 | addl $4, %esp |
---|
911 | CFI_ADJUST_CFA_OFFSET -4 |
---|
912 | jmp ret_from_exception |
---|
913 | device_available_emulate: |
---|
914 | #endif |
---|
915 | preempt_stop |
---|
916 | call math_state_restore |
---|
917 | jmp ret_from_exception |
---|
918 | CFI_ENDPROC |
---|
919 | |
---|
920 | #ifndef CONFIG_XEN |
---|
921 | /* |
---|
922 | * Debug traps and NMI can happen at the one SYSENTER instruction |
---|
923 | * that sets up the real kernel stack. Check here, since we can't |
---|
924 | * allow the wrong stack to be used. |
---|
925 | * |
---|
926 | * "SYSENTER_stack_esp0+12" is because the NMI/debug handler will have |
---|
927 | * already pushed 3 words if it hits on the sysenter instruction: |
---|
928 | * eflags, cs and eip. |
---|
929 | * |
---|
930 | * We just load the right stack, and push the three (known) values |
---|
931 | * by hand onto the new stack - while updating the return eip past |
---|
932 | * the instruction that would have done it for sysenter. |
---|
933 | */ |
---|
934 | #define FIX_STACK(offset, ok, label) \ |
---|
935 | cmpw $__KERNEL_CS,4(%esp); \ |
---|
936 | jne ok; \ |
---|
937 | label: \ |
---|
938 | movl SYSENTER_stack_esp0+offset(%esp),%esp; \ |
---|
939 | pushfl; \ |
---|
940 | pushl $__KERNEL_CS; \ |
---|
941 | pushl $sysenter_past_esp |
---|
942 | #endif /* CONFIG_XEN */ |
---|
943 | |
---|
944 | KPROBE_ENTRY(debug) |
---|
945 | RING0_INT_FRAME |
---|
946 | #ifndef CONFIG_XEN |
---|
947 | cmpl $sysenter_entry,(%esp) |
---|
948 | jne debug_stack_correct |
---|
949 | FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn) |
---|
950 | debug_stack_correct: |
---|
951 | #endif /* !CONFIG_XEN */ |
---|
952 | pushl $-1 # mark this as an int |
---|
953 | CFI_ADJUST_CFA_OFFSET 4 |
---|
954 | SAVE_ALL |
---|
955 | xorl %edx,%edx # error code 0 |
---|
956 | movl %esp,%eax # pt_regs pointer |
---|
957 | call do_debug |
---|
958 | jmp ret_from_exception |
---|
959 | CFI_ENDPROC |
---|
960 | .previous .text |
---|
961 | #ifndef CONFIG_XEN |
---|
962 | /* |
---|
963 | * NMI is doubly nasty. It can happen _while_ we're handling |
---|
964 | * a debug fault, and the debug fault hasn't yet been able to |
---|
965 | * clear up the stack. So we first check whether we got an |
---|
966 | * NMI on the sysenter entry path, but after that we need to |
---|
967 | * check whether we got an NMI on the debug path where the debug |
---|
968 | * fault happened on the sysenter path. |
---|
969 | */ |
---|
970 | ENTRY(nmi) |
---|
971 | RING0_INT_FRAME |
---|
972 | pushl %eax |
---|
973 | CFI_ADJUST_CFA_OFFSET 4 |
---|
974 | movl %ss, %eax |
---|
975 | cmpw $__ESPFIX_SS, %ax |
---|
976 | popl %eax |
---|
977 | CFI_ADJUST_CFA_OFFSET -4 |
---|
978 | je nmi_16bit_stack |
---|
979 | cmpl $sysenter_entry,(%esp) |
---|
980 | je nmi_stack_fixup |
---|
981 | pushl %eax |
---|
982 | CFI_ADJUST_CFA_OFFSET 4 |
---|
983 | movl %esp,%eax |
---|
984 | /* Do not access memory above the end of our stack page, |
---|
985 | * it might not exist. |
---|
986 | */ |
---|
987 | andl $(THREAD_SIZE-1),%eax |
---|
988 | cmpl $(THREAD_SIZE-20),%eax |
---|
989 | popl %eax |
---|
990 | CFI_ADJUST_CFA_OFFSET -4 |
---|
991 | jae nmi_stack_correct |
---|
992 | cmpl $sysenter_entry,12(%esp) |
---|
993 | je nmi_debug_stack_check |
---|
994 | nmi_stack_correct: |
---|
995 | pushl %eax |
---|
996 | CFI_ADJUST_CFA_OFFSET 4 |
---|
997 | SAVE_ALL |
---|
998 | xorl %edx,%edx # zero error code |
---|
999 | movl %esp,%eax # pt_regs pointer |
---|
1000 | call do_nmi |
---|
1001 | jmp restore_nocheck_notrace |
---|
1002 | CFI_ENDPROC |
---|
1003 | |
---|
1004 | nmi_stack_fixup: |
---|
1005 | FIX_STACK(12,nmi_stack_correct, 1) |
---|
1006 | jmp nmi_stack_correct |
---|
1007 | nmi_debug_stack_check: |
---|
1008 | cmpw $__KERNEL_CS,16(%esp) |
---|
1009 | jne nmi_stack_correct |
---|
1010 | cmpl $debug,(%esp) |
---|
1011 | jb nmi_stack_correct |
---|
1012 | cmpl $debug_esp_fix_insn,(%esp) |
---|
1013 | ja nmi_stack_correct |
---|
1014 | FIX_STACK(24,nmi_stack_correct, 1) |
---|
1015 | jmp nmi_stack_correct |
---|
1016 | |
---|
1017 | nmi_16bit_stack: |
---|
1018 | RING0_INT_FRAME |
---|
1019 | /* create the pointer to lss back */ |
---|
1020 | pushl %ss |
---|
1021 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1022 | pushl %esp |
---|
1023 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1024 | movzwl %sp, %esp |
---|
1025 | addw $4, (%esp) |
---|
1026 | /* copy the iret frame of 12 bytes */ |
---|
1027 | .rept 3 |
---|
1028 | pushl 16(%esp) |
---|
1029 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1030 | .endr |
---|
1031 | pushl %eax |
---|
1032 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1033 | SAVE_ALL |
---|
1034 | FIXUP_ESPFIX_STACK # %eax == %esp |
---|
1035 | CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved |
---|
1036 | xorl %edx,%edx # zero error code |
---|
1037 | call do_nmi |
---|
1038 | RESTORE_REGS |
---|
1039 | lss 12+4(%esp), %esp # back to 16bit stack |
---|
1040 | 1: iret |
---|
1041 | CFI_ENDPROC |
---|
1042 | .section __ex_table,"a" |
---|
1043 | .align 4 |
---|
1044 | .long 1b,iret_exc |
---|
1045 | .previous |
---|
1046 | #else |
---|
1047 | ENTRY(nmi) |
---|
1048 | RING0_INT_FRAME |
---|
1049 | pushl %eax |
---|
1050 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1051 | SAVE_ALL |
---|
1052 | xorl %edx,%edx # zero error code |
---|
1053 | movl %esp,%eax # pt_regs pointer |
---|
1054 | call do_nmi |
---|
1055 | orl $NMI_MASK, EFLAGS(%esp) |
---|
1056 | jmp restore_all |
---|
1057 | CFI_ENDPROC |
---|
1058 | #endif |
---|
1059 | |
---|
1060 | KPROBE_ENTRY(int3) |
---|
1061 | RING0_INT_FRAME |
---|
1062 | pushl $-1 # mark this as an int |
---|
1063 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1064 | SAVE_ALL |
---|
1065 | xorl %edx,%edx # zero error code |
---|
1066 | movl %esp,%eax # pt_regs pointer |
---|
1067 | call do_int3 |
---|
1068 | jmp ret_from_exception |
---|
1069 | CFI_ENDPROC |
---|
1070 | .previous .text |
---|
1071 | |
---|
1072 | ENTRY(overflow) |
---|
1073 | RING0_INT_FRAME |
---|
1074 | pushl $0 |
---|
1075 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1076 | pushl $do_overflow |
---|
1077 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1078 | jmp error_code |
---|
1079 | CFI_ENDPROC |
---|
1080 | |
---|
1081 | ENTRY(bounds) |
---|
1082 | RING0_INT_FRAME |
---|
1083 | pushl $0 |
---|
1084 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1085 | pushl $do_bounds |
---|
1086 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1087 | jmp error_code |
---|
1088 | CFI_ENDPROC |
---|
1089 | |
---|
1090 | ENTRY(invalid_op) |
---|
1091 | RING0_INT_FRAME |
---|
1092 | pushl $0 |
---|
1093 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1094 | pushl $do_invalid_op |
---|
1095 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1096 | jmp error_code |
---|
1097 | CFI_ENDPROC |
---|
1098 | |
---|
1099 | ENTRY(coprocessor_segment_overrun) |
---|
1100 | RING0_INT_FRAME |
---|
1101 | pushl $0 |
---|
1102 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1103 | pushl $do_coprocessor_segment_overrun |
---|
1104 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1105 | jmp error_code |
---|
1106 | CFI_ENDPROC |
---|
1107 | |
---|
1108 | ENTRY(invalid_TSS) |
---|
1109 | RING0_EC_FRAME |
---|
1110 | pushl $do_invalid_TSS |
---|
1111 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1112 | jmp error_code |
---|
1113 | CFI_ENDPROC |
---|
1114 | |
---|
1115 | ENTRY(segment_not_present) |
---|
1116 | RING0_EC_FRAME |
---|
1117 | pushl $do_segment_not_present |
---|
1118 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1119 | jmp error_code |
---|
1120 | CFI_ENDPROC |
---|
1121 | |
---|
1122 | ENTRY(stack_segment) |
---|
1123 | RING0_EC_FRAME |
---|
1124 | pushl $do_stack_segment |
---|
1125 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1126 | jmp error_code |
---|
1127 | CFI_ENDPROC |
---|
1128 | |
---|
1129 | KPROBE_ENTRY(general_protection) |
---|
1130 | RING0_EC_FRAME |
---|
1131 | pushl $do_general_protection |
---|
1132 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1133 | jmp error_code |
---|
1134 | CFI_ENDPROC |
---|
1135 | .previous .text |
---|
1136 | |
---|
1137 | ENTRY(alignment_check) |
---|
1138 | RING0_EC_FRAME |
---|
1139 | pushl $do_alignment_check |
---|
1140 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1141 | jmp error_code |
---|
1142 | CFI_ENDPROC |
---|
1143 | |
---|
1144 | KPROBE_ENTRY(page_fault) |
---|
1145 | RING0_EC_FRAME |
---|
1146 | pushl $do_page_fault |
---|
1147 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1148 | jmp error_code |
---|
1149 | CFI_ENDPROC |
---|
1150 | .previous .text |
---|
1151 | |
---|
1152 | #ifdef CONFIG_X86_MCE |
---|
1153 | ENTRY(machine_check) |
---|
1154 | RING0_INT_FRAME |
---|
1155 | pushl $0 |
---|
1156 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1157 | pushl machine_check_vector |
---|
1158 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1159 | jmp error_code |
---|
1160 | CFI_ENDPROC |
---|
1161 | #endif |
---|
1162 | |
---|
1163 | #ifndef CONFIG_XEN |
---|
1164 | ENTRY(spurious_interrupt_bug) |
---|
1165 | RING0_INT_FRAME |
---|
1166 | pushl $0 |
---|
1167 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1168 | pushl $do_spurious_interrupt_bug |
---|
1169 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1170 | jmp error_code |
---|
1171 | CFI_ENDPROC |
---|
1172 | #endif /* !CONFIG_XEN */ |
---|
1173 | |
---|
1174 | #ifdef CONFIG_STACK_UNWIND |
---|
1175 | ENTRY(arch_unwind_init_running) |
---|
1176 | CFI_STARTPROC |
---|
1177 | movl 4(%esp), %edx |
---|
1178 | movl (%esp), %ecx |
---|
1179 | leal 4(%esp), %eax |
---|
1180 | movl %ebx, EBX(%edx) |
---|
1181 | xorl %ebx, %ebx |
---|
1182 | movl %ebx, ECX(%edx) |
---|
1183 | movl %ebx, EDX(%edx) |
---|
1184 | movl %esi, ESI(%edx) |
---|
1185 | movl %edi, EDI(%edx) |
---|
1186 | movl %ebp, EBP(%edx) |
---|
1187 | movl %ebx, EAX(%edx) |
---|
1188 | movl $__USER_DS, DS(%edx) |
---|
1189 | movl $__USER_DS, ES(%edx) |
---|
1190 | movl %ebx, ORIG_EAX(%edx) |
---|
1191 | movl %ecx, EIP(%edx) |
---|
1192 | movl 12(%esp), %ecx |
---|
1193 | movl $__KERNEL_CS, CS(%edx) |
---|
1194 | movl %ebx, EFLAGS(%edx) |
---|
1195 | movl %eax, OLDESP(%edx) |
---|
1196 | movl 8(%esp), %eax |
---|
1197 | movl %ecx, 8(%esp) |
---|
1198 | movl EBX(%edx), %ebx |
---|
1199 | movl $__KERNEL_DS, OLDSS(%edx) |
---|
1200 | jmpl *%eax |
---|
1201 | CFI_ENDPROC |
---|
1202 | ENDPROC(arch_unwind_init_running) |
---|
1203 | #endif |
---|
1204 | |
---|
1205 | ENTRY(fixup_4gb_segment) |
---|
1206 | RING0_EC_FRAME |
---|
1207 | pushl $do_fixup_4gb_segment |
---|
1208 | CFI_ADJUST_CFA_OFFSET 4 |
---|
1209 | jmp error_code |
---|
1210 | CFI_ENDPROC |
---|
1211 | |
---|
1212 | .section .rodata,"a" |
---|
1213 | .align 4 |
---|
1214 | #include "syscall_table.S" |
---|
1215 | |
---|
1216 | syscall_table_size=(.-sys_call_table) |
---|