source: trunk/packages/xen-3.1/xen-3.1/xen/arch/x86/x86_32/entry.S @ 34

Last change on this file since 34 was 34, checked in by hartmans, 18 years ago

Add xen and xen-common

File size: 24.3 KB
Line 
1/*
2 * Hypercall and fault low-level handling routines.
3 *
4 * Copyright (c) 2002-2004, K A Fraser
5 * Copyright (c) 1991, 1992 Linus Torvalds
6 *
7 * Calling back to a guest OS:
8 * ===========================
9 *
10 * First, we require that all callbacks (either via a supplied
11 * interrupt-descriptor-table, or via the special event or failsafe callbacks
12 * in the shared-info-structure) are to ring 1. This just makes life easier,
13 * in that it means we don't have to do messy GDT/LDT lookups to find
14 * out which the privilege-level of the return code-selector. That code
15 * would just be a hassle to write, and would need to account for running
16 * off the end of the GDT/LDT, for example. For all callbacks we check
17 * that the provided return CS is not == __HYPERVISOR_{CS,DS}. Apart from that
18 * we're safe as don't allow a guest OS to install ring-0 privileges into the
19 * GDT/LDT. It's up to the guest OS to ensure all returns via the IDT are to
20 * ring 1. If not, we load incorrect SS/ESP values from the TSS (for ring 1
21 * rather than the correct ring) and bad things are bound to ensue -- IRET is
22 * likely to fault, and we may end up killing the domain (no harm can
23 * come to Xen, though).
24 *     
25 * When doing a callback, we check if the return CS is in ring 0. If so,
26 * callback is delayed until next return to ring != 0.
27 * If return CS is in ring 1, then we create a callback frame
28 * starting at return SS/ESP. The base of the frame does an intra-privilege
29 * interrupt-return.
30 * If return CS is in ring > 1, we create a callback frame starting
31 * at SS/ESP taken from appropriate section of the current TSS. The base
32 * of the frame does an inter-privilege interrupt-return.
33 *
34 * Note that the "failsafe callback" uses a special stackframe:
35 * { return_DS, return_ES, return_FS, return_GS, return_EIP,
36 *   return_CS, return_EFLAGS[, return_ESP, return_SS] }
37 * That is, original values for DS/ES/FS/GS are placed on stack rather than
38 * in DS/ES/FS/GS themselves. Why? It saves us loading them, only to have them
39 * saved/restored in guest OS. Furthermore, if we load them we may cause
40 * a fault if they are invalid, which is a hassle to deal with. We avoid
41 * that problem if we don't load them :-) This property allows us to use
42 * the failsafe callback as a fallback: if we ever fault on loading DS/ES/FS/GS
43 * on return to ring != 0, we can simply package it up as a return via
44 * the failsafe callback, and let the guest OS sort it out (perhaps by
45 * killing an application process). Note that we also do this for any
46 * faulting IRET -- just let the guest OS handle it via the event
47 * callback.
48 *
49 * We terminate a domain in the following cases:
50 *  - creating a callback stack frame (due to bad ring-1 stack).
51 *  - faulting IRET on entry to failsafe callback handler.
52 * So, each domain must keep its ring-1 %ss/%esp and failsafe callback
53 * handler in good order (absolutely no faults allowed!).
54 */
55
56#include <xen/config.h>
57#include <xen/errno.h>
58#include <xen/softirq.h>
59#include <asm/asm_defns.h>
60#include <asm/apicdef.h>
61#include <asm/page.h>
62#include <public/xen.h>
63
64#define GET_GUEST_REGS(reg)                     \
65        movl $~(STACK_SIZE-1),reg;              \
66        andl %esp,reg;                          \
67        orl  $(STACK_SIZE-CPUINFO_sizeof),reg;
68
69#define GET_CURRENT(reg)         \
70        movl $STACK_SIZE-4, reg; \
71        orl  %esp, reg;          \
72        andl $~3,reg;            \
73        movl (reg),reg;
74
75
76        ALIGN
77restore_all_guest:
78        ASSERT_INTERRUPTS_DISABLED
79        testl $X86_EFLAGS_VM,UREGS_eflags(%esp)
80        jnz  restore_all_vm86
81#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
82        testl $2,UREGS_cs(%esp)
83        jnz   1f
84        call  restore_ring0_guest
85        jmp   restore_all_vm86
861:
87#endif
88.Lft1:  mov  UREGS_ds(%esp),%ds
89.Lft2:  mov  UREGS_es(%esp),%es
90.Lft3:  mov  UREGS_fs(%esp),%fs
91.Lft4:  mov  UREGS_gs(%esp),%gs
92restore_all_vm86:
93        popl %ebx
94        popl %ecx
95        popl %edx
96        popl %esi
97        popl %edi
98        popl %ebp
99        popl %eax
100        addl $4,%esp
101.Lft5:  iret
102.section .fixup,"ax"
103.Lfx5:  subl  $28,%esp
104        pushl 28(%esp)                 # error_code/entry_vector
105        movl  %eax,UREGS_eax+4(%esp)
106        movl  %ebp,UREGS_ebp+4(%esp)
107        movl  %edi,UREGS_edi+4(%esp)
108        movl  %esi,UREGS_esi+4(%esp)
109        movl  %edx,UREGS_edx+4(%esp)
110        movl  %ecx,UREGS_ecx+4(%esp)
111        movl  %ebx,UREGS_ebx+4(%esp)
112.Lfx1:  SET_XEN_SEGMENTS(a)
113        movl  %eax,%fs
114        movl  %eax,%gs
115        sti
116        popl  %esi
117        pushfl                         # EFLAGS
118        movl  $__HYPERVISOR_CS,%eax
119        pushl %eax                     # CS
120        movl  $.Ldf1,%eax
121        pushl %eax                     # EIP
122        pushl %esi                     # error_code/entry_vector
123        jmp   handle_exception
124.Ldf1:  GET_CURRENT(%ebx)
125        jmp   test_all_events
126failsafe_callback:
127        GET_CURRENT(%ebx)
128        leal  VCPU_trap_bounce(%ebx),%edx
129        movl  VCPU_failsafe_addr(%ebx),%eax
130        movl  %eax,TRAPBOUNCE_eip(%edx)
131        movl  VCPU_failsafe_sel(%ebx),%eax
132        movw  %ax,TRAPBOUNCE_cs(%edx)
133        movb  $TBF_FAILSAFE,TRAPBOUNCE_flags(%edx)
134        bt    $_VGCF_failsafe_disables_events,VCPU_guest_context_flags(%ebx)
135        jnc   1f
136        orb   $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
1371:      call  create_bounce_frame
138        xorl  %eax,%eax
139        movl  %eax,UREGS_ds(%esp)
140        movl  %eax,UREGS_es(%esp)
141        movl  %eax,UREGS_fs(%esp)
142        movl  %eax,UREGS_gs(%esp)
143        jmp   test_all_events
144.previous
145.section __pre_ex_table,"a"
146        .long .Lft1,.Lfx1
147        .long .Lft2,.Lfx1
148        .long .Lft3,.Lfx1
149        .long .Lft4,.Lfx1
150        .long .Lft5,.Lfx5
151.previous
152.section __ex_table,"a"
153        .long .Ldf1,failsafe_callback
154.previous
155
156        ALIGN
157restore_all_xen:
158        popl %ebx
159        popl %ecx
160        popl %edx
161        popl %esi
162        popl %edi
163        popl %ebp
164        popl %eax
165        addl $4,%esp
166        iret
167
168        ALIGN
169ENTRY(hypercall)
170        subl $4,%esp
171        FIXUP_RING0_GUEST_STACK
172        SAVE_ALL(b)
173        sti
174        GET_CURRENT(%ebx)
175        cmpl  $NR_hypercalls,%eax
176        jae   bad_hypercall
177        PERFC_INCR(PERFC_hypercalls, %eax, %ebx)
178#ifndef NDEBUG
179        /* Create shadow parameters and corrupt those not used by this call. */
180        pushl %eax
181        pushl UREGS_eip+4(%esp)
182        pushl 28(%esp) # EBP
183        pushl 28(%esp) # EDI
184        pushl 28(%esp) # ESI
185        pushl 28(%esp) # EDX
186        pushl 28(%esp) # ECX
187        pushl 28(%esp) # EBX
188        movzb hypercall_args_table(,%eax,1),%ecx
189        leal  (%esp,%ecx,4),%edi
190        subl  $6,%ecx
191        negl  %ecx
192        movl  %eax,%esi
193        movl  $0xDEADBEEF,%eax
194        rep   stosl
195        movl  %esi,%eax
196#else
197        /*
198         * We need shadow parameters even on non-debug builds. We depend on the
199         * original versions not being clobbered (needed to create a hypercall
200         * continuation). But that isn't guaranteed by the function-call ABI.
201         */
202        pushl 20(%esp) # EBP
203        pushl 20(%esp) # EDI
204        pushl 20(%esp) # ESI
205        pushl 20(%esp) # EDX
206        pushl 20(%esp) # ECX
207        pushl 20(%esp) # EBX
208#endif
209        call *hypercall_table(,%eax,4)
210        addl  $24,%esp     # Discard the shadow parameters
211#ifndef NDEBUG
212        /* Deliberately corrupt real parameter regs used by this hypercall. */
213        popl  %ecx         # Shadow EIP
214        cmpl  %ecx,UREGS_eip+4(%esp)
215        popl  %ecx         # Shadow hypercall index
216        jne   skip_clobber # If EIP has changed then don't clobber
217        movzb hypercall_args_table(,%ecx,1),%ecx
218        movl  %esp,%edi
219        movl  %eax,%esi
220        movl  $0xDEADBEEF,%eax
221        rep   stosl
222        movl  %esi,%eax
223skip_clobber:
224#endif
225        movl %eax,UREGS_eax(%esp)       # save the return value
226
227test_all_events:
228        xorl %ecx,%ecx
229        notl %ecx
230        cli                             # tests must not race interrupts
231/*test_softirqs:*/ 
232        movl VCPU_processor(%ebx),%eax
233        shl  $IRQSTAT_shift,%eax
234        test %ecx,irq_stat(%eax,1)
235        jnz  process_softirqs
236        testb $1,VCPU_nmi_pending(%ebx)
237        jnz  process_nmi
238test_guest_events:
239        movl VCPU_vcpu_info(%ebx),%eax
240        testb $0xFF,VCPUINFO_upcall_mask(%eax)
241        jnz  restore_all_guest
242        testb $0xFF,VCPUINFO_upcall_pending(%eax)
243        jz   restore_all_guest
244/*process_guest_events:*/
245        sti
246        leal VCPU_trap_bounce(%ebx),%edx
247        movl VCPU_event_addr(%ebx),%eax
248        movl %eax,TRAPBOUNCE_eip(%edx)
249        movl VCPU_event_sel(%ebx),%eax
250        movw %ax,TRAPBOUNCE_cs(%edx)
251        movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
252        call create_bounce_frame
253        jmp  test_all_events
254
255        ALIGN
256process_softirqs:
257        sti       
258        call do_softirq
259        jmp  test_all_events
260
261        ALIGN
262process_nmi:
263        testb $1,VCPU_nmi_masked(%ebx)
264        jnz  test_guest_events
265        movb $0,VCPU_nmi_pending(%ebx)
266        movl VCPU_nmi_addr(%ebx),%eax
267        test %eax,%eax
268        jz   test_guest_events
269        movb $1,VCPU_nmi_masked(%ebx)
270        sti
271        leal VCPU_trap_bounce(%ebx),%edx
272        movl %eax,TRAPBOUNCE_eip(%edx)
273        movw $FLAT_KERNEL_CS,TRAPBOUNCE_cs(%edx)
274        movb $TBF_INTERRUPT,TRAPBOUNCE_flags(%edx)
275        call create_bounce_frame
276        jmp  test_all_events
277
278bad_hypercall:
279        movl $-ENOSYS,UREGS_eax(%esp)
280        jmp  test_all_events
281
282/* CREATE A BASIC EXCEPTION FRAME ON GUEST OS (RING-1) STACK:            */
283/*   {EIP, CS, EFLAGS, [ESP, SS]}                                        */
284/* %edx == trap_bounce, %ebx == struct vcpu                       */
285/* %eax,%ecx are clobbered. %gs:%esi contain new UREGS_ss/UREGS_esp. */
286create_bounce_frame:
287        ASSERT_INTERRUPTS_ENABLED
288        movl UREGS_eflags+4(%esp),%ecx
289        movb UREGS_cs+4(%esp),%cl
290        testl $(2|X86_EFLAGS_VM),%ecx
291        jz   ring1 /* jump if returning to an existing ring-1 activation */
292        movl VCPU_kernel_sp(%ebx),%esi
293.Lft6:  mov  VCPU_kernel_ss(%ebx),%gs
294        testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
295        jz   .Lnvm86_1
296        subl $16,%esi       /* push ES/DS/FS/GS (VM86 stack frame) */
297        movl UREGS_es+4(%esp),%eax
298.Lft7:  movl %eax,%gs:(%esi)
299        movl UREGS_ds+4(%esp),%eax
300.Lft8:  movl %eax,%gs:4(%esi)
301        movl UREGS_fs+4(%esp),%eax
302.Lft9:  movl %eax,%gs:8(%esi)
303        movl UREGS_gs+4(%esp),%eax
304.Lft10: movl %eax,%gs:12(%esi)
305.Lnvm86_1:
306        subl $8,%esi        /* push SS/ESP (inter-priv iret) */
307        movl UREGS_esp+4(%esp),%eax
308.Lft11: movl %eax,%gs:(%esi)
309        movl UREGS_ss+4(%esp),%eax
310.Lft12: movl %eax,%gs:4(%esi)
311        jmp 1f
312ring1:  /* obtain ss/esp from oldss/oldesp -- a ring-1 activation exists */
313        movl UREGS_esp+4(%esp),%esi
314.Lft13: mov  UREGS_ss+4(%esp),%gs
3151:      /* Construct a stack frame: EFLAGS, CS/EIP */
316        movb TRAPBOUNCE_flags(%edx),%cl
317        subl $12,%esi
318        movl UREGS_eip+4(%esp),%eax
319.Lft14: movl %eax,%gs:(%esi)
320        movl VCPU_vcpu_info(%ebx),%eax
321        pushl VCPUINFO_upcall_mask(%eax)
322        testb $TBF_INTERRUPT,%cl
323        setnz %ch                        # TBF_INTERRUPT -> set upcall mask
324        orb  %ch,VCPUINFO_upcall_mask(%eax)
325        popl %eax
326        shll $16,%eax                    # Bits 16-23: saved_upcall_mask
327        movw UREGS_cs+4(%esp),%ax        # Bits  0-15: CS
328#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
329        testw $2,%ax
330        jnz  .Lft15
331        and  $~3,%ax                     # RPL 1 -> RPL 0
332#endif
333.Lft15: movl %eax,%gs:4(%esi)
334        test $0x00FF0000,%eax            # Bits 16-23: saved_upcall_mask
335        setz %ch                         # %ch == !saved_upcall_mask
336        movl UREGS_eflags+4(%esp),%eax
337        andl $~X86_EFLAGS_IF,%eax
338        shlb $1,%ch                      # Bit 9 (EFLAGS.IF)
339        orb  %ch,%ah                     # Fold EFLAGS.IF into %eax
340.Lft16: movl %eax,%gs:8(%esi)
341        test $TBF_EXCEPTION_ERRCODE,%cl
342        jz   1f
343        subl $4,%esi                    # push error_code onto guest frame
344        movl TRAPBOUNCE_error_code(%edx),%eax
345.Lft17: movl %eax,%gs:(%esi)
3461:      testb $TBF_FAILSAFE,%cl
347        jz   2f
348        subl $16,%esi                # add DS/ES/FS/GS to failsafe stack frame
349        testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
350        jz   .Lnvm86_2
351        xorl %eax,%eax               # VM86: we write zero selector values
352.Lft18: movl %eax,%gs:(%esi)
353.Lft19: movl %eax,%gs:4(%esi)
354.Lft20: movl %eax,%gs:8(%esi)
355.Lft21: movl %eax,%gs:12(%esi)
356        jmp  2f
357.Lnvm86_2:
358        movl UREGS_ds+4(%esp),%eax   # non-VM86: write real selector values
359.Lft22: movl %eax,%gs:(%esi)
360        movl UREGS_es+4(%esp),%eax
361.Lft23: movl %eax,%gs:4(%esi)
362        movl UREGS_fs+4(%esp),%eax
363.Lft24: movl %eax,%gs:8(%esi)
364        movl UREGS_gs+4(%esp),%eax
365.Lft25: movl %eax,%gs:12(%esi)
3662:      testl $X86_EFLAGS_VM,UREGS_eflags+4(%esp)
367        jz   .Lnvm86_3
368        xorl %eax,%eax      /* zero DS-GS, just as a real CPU would */
369        movl %eax,UREGS_ds+4(%esp)
370        movl %eax,UREGS_es+4(%esp)
371        movl %eax,UREGS_fs+4(%esp)
372        movl %eax,UREGS_gs+4(%esp)
373.Lnvm86_3:
374        /* Rewrite our stack frame and return to ring 1. */
375        /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
376        andl $~(X86_EFLAGS_VM|X86_EFLAGS_RF|\
377                X86_EFLAGS_NT|X86_EFLAGS_TF),UREGS_eflags+4(%esp)
378        mov  %gs,UREGS_ss+4(%esp)
379        movl %esi,UREGS_esp+4(%esp)
380        movzwl TRAPBOUNCE_cs(%edx),%eax
381        /* Null selectors (0-3) are not allowed. */
382        testl $~3,%eax
383        jz   domain_crash_synchronous
384        movl %eax,UREGS_cs+4(%esp)
385        movl TRAPBOUNCE_eip(%edx),%eax
386        movl %eax,UREGS_eip+4(%esp)
387        ret
388.section __ex_table,"a"
389        .long  .Lft6,domain_crash_synchronous ,  .Lft7,domain_crash_synchronous
390        .long  .Lft8,domain_crash_synchronous ,  .Lft9,domain_crash_synchronous
391        .long .Lft10,domain_crash_synchronous , .Lft11,domain_crash_synchronous
392        .long .Lft12,domain_crash_synchronous , .Lft13,domain_crash_synchronous
393        .long .Lft14,domain_crash_synchronous , .Lft15,domain_crash_synchronous
394        .long .Lft16,domain_crash_synchronous , .Lft17,domain_crash_synchronous
395        .long .Lft18,domain_crash_synchronous , .Lft19,domain_crash_synchronous
396        .long .Lft20,domain_crash_synchronous , .Lft21,domain_crash_synchronous
397        .long .Lft22,domain_crash_synchronous , .Lft23,domain_crash_synchronous
398        .long .Lft24,domain_crash_synchronous , .Lft25,domain_crash_synchronous
399.previous
400
401domain_crash_synchronous_string:
402        .asciz "domain_crash_sync called from entry.S (%lx)\n"
403
404domain_crash_synchronous:
405        pushl $domain_crash_synchronous_string
406        call  printk
407        jmp   __domain_crash_synchronous
408
409        ALIGN
410ENTRY(ret_from_intr)
411        GET_CURRENT(%ebx)
412        movl  UREGS_eflags(%esp),%eax
413        movb  UREGS_cs(%esp),%al
414        testl $(3|X86_EFLAGS_VM),%eax
415        jnz   test_all_events
416        jmp   restore_all_xen
417
418ENTRY(divide_error)
419        pushl $TRAP_divide_error<<16
420        ALIGN
421handle_exception:
422        FIXUP_RING0_GUEST_STACK
423        SAVE_ALL_NOSEGREGS(a)
424        SET_XEN_SEGMENTS(a)
425        testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%esp)
426        jz    exception_with_ints_disabled
427        sti                             # re-enable interrupts
4281:      xorl  %eax,%eax
429        movw  UREGS_entry_vector(%esp),%ax
430        movl  %esp,%edx
431        pushl %edx                      # push the cpu_user_regs pointer
432        GET_CURRENT(%ebx)
433        PERFC_INCR(PERFC_exceptions, %eax, %ebx)
434        call  *exception_table(,%eax,4)
435        addl  $4,%esp
436        movl  UREGS_eflags(%esp),%eax
437        movb  UREGS_cs(%esp),%al
438        testl $(3|X86_EFLAGS_VM),%eax
439        jz    restore_all_xen
440        leal  VCPU_trap_bounce(%ebx),%edx
441        testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%edx)
442        jz    test_all_events
443        call  create_bounce_frame
444        movb  $0,TRAPBOUNCE_flags(%edx)
445        jmp   test_all_events
446
447exception_with_ints_disabled:
448        movl  UREGS_eflags(%esp),%eax
449        movb  UREGS_cs(%esp),%al
450        testl $(3|X86_EFLAGS_VM),%eax   # interrupts disabled outside Xen?
451        jnz   FATAL_exception_with_ints_disabled
452        pushl %esp
453        call  search_pre_exception_table
454        addl  $4,%esp
455        testl %eax,%eax                 # no fixup code for faulting EIP?
456        jz    1b
457        movl  %eax,UREGS_eip(%esp)
458        movl  %esp,%esi
459        subl  $4,%esp
460        movl  %esp,%edi
461        movl  $UREGS_kernel_sizeof/4,%ecx
462        rep;  movsl                     # make room for error_code/entry_vector
463        movl  UREGS_error_code(%esp),%eax # error_code/entry_vector
464        movl  %eax,UREGS_kernel_sizeof(%esp)
465        jmp   restore_all_xen           # return to fixup code
466
467FATAL_exception_with_ints_disabled:
468        xorl  %esi,%esi
469        movw  UREGS_entry_vector(%esp),%si
470        movl  %esp,%edx
471        pushl %edx                      # push the cpu_user_regs pointer
472        pushl %esi                      # push the trapnr (entry vector)
473        call  fatal_trap
474        ud2
475                                       
476ENTRY(coprocessor_error)
477        pushl $TRAP_copro_error<<16
478        jmp   handle_exception
479
480ENTRY(simd_coprocessor_error)
481        pushl $TRAP_simd_error<<16
482        jmp   handle_exception
483
484ENTRY(device_not_available)
485        pushl $TRAP_no_device<<16
486        jmp   handle_exception
487
488ENTRY(debug)
489        pushl $TRAP_debug<<16
490        jmp   handle_exception
491
492ENTRY(int3)
493        pushl $TRAP_int3<<16
494        jmp   handle_exception
495
496ENTRY(overflow)
497        pushl $TRAP_overflow<<16
498        jmp   handle_exception
499
500ENTRY(bounds)
501        pushl $TRAP_bounds<<16
502        jmp   handle_exception
503
504ENTRY(invalid_op)
505        pushl $TRAP_invalid_op<<16
506        jmp   handle_exception
507
508ENTRY(coprocessor_segment_overrun)
509        pushl $TRAP_copro_seg<<16
510        jmp   handle_exception
511
512ENTRY(invalid_TSS)
513        movw  $TRAP_invalid_tss,2(%esp)
514        jmp   handle_exception
515
516ENTRY(segment_not_present)
517        movw  $TRAP_no_segment,2(%esp)
518        jmp   handle_exception
519
520ENTRY(stack_segment)
521        movw  $TRAP_stack_error,2(%esp)
522        jmp   handle_exception
523
524ENTRY(general_protection)
525        movw  $TRAP_gp_fault,2(%esp)
526        jmp   handle_exception
527
528ENTRY(alignment_check)
529        movw  $TRAP_alignment_check,2(%esp)
530        jmp   handle_exception
531
532ENTRY(page_fault)
533        movw  $TRAP_page_fault,2(%esp)
534        jmp   handle_exception
535
536ENTRY(machine_check)
537        pushl $TRAP_machine_check<<16
538        jmp   handle_exception
539
540ENTRY(spurious_interrupt_bug)
541        pushl $TRAP_spurious_int<<16
542        jmp   handle_exception
543
544ENTRY(early_page_fault)
545        SAVE_ALL_NOSEGREGS(a)
546        movl  %esp,%edx
547        pushl %edx
548        call  do_early_page_fault
549        addl  $4,%esp
550        jmp   restore_all_xen
551
552ENTRY(nmi)
553#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
554        # NMI entry protocol is incompatible with guest kernel in ring 0.
555        iret
556#else
557        # Save state but do not trash the segment registers!
558        # We may otherwise be unable to reload them or copy them to ring 1.
559        pushl %eax
560        SAVE_ALL_NOSEGREGS(a)
561
562        # We can only process the NMI if:
563        #  A. We are the outermost Xen activation (in which case we have
564        #     the selectors safely saved on our stack)
565        #  B. DS and ES contain sane Xen values.
566        # In all other cases we bail without touching DS-GS, as we have
567        # interrupted an enclosing Xen activation in tricky prologue or
568        # epilogue code.
569        movl  UREGS_eflags(%esp),%eax
570        movb  UREGS_cs(%esp),%al
571        testl $(3|X86_EFLAGS_VM),%eax
572        jnz   continue_nmi
573        movl  %ds,%eax
574        cmpw  $(__HYPERVISOR_DS),%ax
575        jne   defer_nmi
576        movl  %es,%eax
577        cmpw  $(__HYPERVISOR_DS),%ax
578        jne   defer_nmi
579
580continue_nmi:
581        SET_XEN_SEGMENTS(d)
582        movl  %esp,%edx
583        pushl %edx
584        call  do_nmi
585        addl  $4,%esp
586        jmp   ret_from_intr
587
588defer_nmi:
589        movl  $FIXMAP_apic_base,%eax
590        # apic_wait_icr_idle()
5911:      movl  %ss:APIC_ICR(%eax),%ebx
592        testl $APIC_ICR_BUSY,%ebx
593        jnz   1b
594        # __send_IPI_shortcut(APIC_DEST_SELF, TRAP_deferred_nmi)
595        movl  $(APIC_DM_FIXED | APIC_DEST_SELF | APIC_DEST_PHYSICAL | \
596                TRAP_deferred_nmi),%ss:APIC_ICR(%eax)
597        jmp   restore_all_xen
598#endif /* !CONFIG_X86_SUPERVISOR_MODE_KERNEL */
599
600ENTRY(setup_vm86_frame)
601        # Copies the entire stack frame forwards by 16 bytes.
602        .macro copy_vm86_words count=18
603        .if \count
604        pushl ((\count-1)*4)(%esp)
605        popl  ((\count-1)*4)+16(%esp)
606        copy_vm86_words "(\count-1)"
607        .endif
608        .endm
609        copy_vm86_words
610        addl $16,%esp
611        ret
612
613.data
614
615ENTRY(exception_table)
616        .long do_divide_error
617        .long do_debug
618        .long 0 # nmi
619        .long do_int3
620        .long do_overflow
621        .long do_bounds
622        .long do_invalid_op
623        .long math_state_restore
624        .long 0 # double fault
625        .long do_coprocessor_segment_overrun
626        .long do_invalid_TSS
627        .long do_segment_not_present
628        .long do_stack_segment
629        .long do_general_protection
630        .long do_page_fault
631        .long do_spurious_interrupt_bug
632        .long do_coprocessor_error
633        .long do_alignment_check
634        .long do_machine_check
635        .long do_simd_coprocessor_error
636
637ENTRY(hypercall_table)
638        .long do_set_trap_table     /*  0 */
639        .long do_mmu_update
640        .long do_set_gdt
641        .long do_stack_switch
642        .long do_set_callbacks
643        .long do_fpu_taskswitch     /*  5 */
644        .long do_sched_op_compat
645        .long do_platform_op
646        .long do_set_debugreg
647        .long do_get_debugreg
648        .long do_update_descriptor  /* 10 */
649        .long do_ni_hypercall
650        .long do_memory_op
651        .long do_multicall
652        .long do_update_va_mapping
653        .long do_set_timer_op       /* 15 */
654        .long do_event_channel_op_compat
655        .long do_xen_version
656        .long do_console_io
657        .long do_physdev_op_compat
658        .long do_grant_table_op     /* 20 */
659        .long do_vm_assist
660        .long do_update_va_mapping_otherdomain
661        .long do_iret
662        .long do_vcpu_op
663        .long do_ni_hypercall       /* 25 */
664        .long do_mmuext_op
665        .long do_acm_op
666        .long do_nmi_op
667        .long do_sched_op
668        .long do_callback_op        /* 30 */
669        .long do_xenoprof_op
670        .long do_event_channel_op
671        .long do_physdev_op
672        .long do_hvm_op
673        .long do_sysctl             /* 35 */
674        .long do_domctl
675        .long do_kexec_op
676        .rept NR_hypercalls-((.-hypercall_table)/4)
677        .long do_ni_hypercall
678        .endr
679
680ENTRY(hypercall_args_table)
681        .byte 1 /* do_set_trap_table    */  /*  0 */
682        .byte 4 /* do_mmu_update        */
683        .byte 2 /* do_set_gdt           */
684        .byte 2 /* do_stack_switch      */
685        .byte 4 /* do_set_callbacks     */
686        .byte 1 /* do_fpu_taskswitch    */  /*  5 */
687        .byte 2 /* do_sched_op_compat   */
688        .byte 1 /* do_platform_op       */
689        .byte 2 /* do_set_debugreg      */
690        .byte 1 /* do_get_debugreg      */
691        .byte 4 /* do_update_descriptor */  /* 10 */
692        .byte 0 /* do_ni_hypercall      */
693        .byte 2 /* do_memory_op         */
694        .byte 2 /* do_multicall         */
695        .byte 4 /* do_update_va_mapping */
696        .byte 2 /* do_set_timer_op      */  /* 15 */
697        .byte 1 /* do_event_channel_op_compat */
698        .byte 2 /* do_xen_version       */
699        .byte 3 /* do_console_io        */
700        .byte 1 /* do_physdev_op_compat */
701        .byte 3 /* do_grant_table_op    */  /* 20 */
702        .byte 2 /* do_vm_assist         */
703        .byte 5 /* do_update_va_mapping_otherdomain */
704        .byte 0 /* do_iret              */
705        .byte 3 /* do_vcpu_op           */
706        .byte 0 /* do_ni_hypercall      */  /* 25 */
707        .byte 4 /* do_mmuext_op         */
708        .byte 1 /* do_acm_op            */
709        .byte 2 /* do_nmi_op            */
710        .byte 2 /* do_sched_op          */
711        .byte 2 /* do_callback_op       */  /* 30 */
712        .byte 2 /* do_xenoprof_op       */
713        .byte 2 /* do_event_channel_op  */
714        .byte 2 /* do_physdev_op        */
715        .byte 2 /* do_hvm_op            */
716        .byte 1 /* do_sysctl            */  /* 35 */
717        .byte 1 /* do_domctl            */
718        .byte 2 /* do_kexec_op          */
719        .rept NR_hypercalls-(.-hypercall_args_table)
720        .byte 0 /* do_ni_hypercall      */
721        .endr
Note: See TracBrowser for help on using the repository browser.