1 | /* |
---|
2 | * exits.S: AMD-V architecture-specific exit handling. |
---|
3 | * Copyright (c) 2004, Intel Corporation. |
---|
4 | * Copyright (c) 2005, AMD Corporation. |
---|
5 | * |
---|
6 | * This program is free software; you can redistribute it and/or modify it |
---|
7 | * under the terms and conditions of the GNU General Public License, |
---|
8 | * version 2, as published by the Free Software Foundation. |
---|
9 | * |
---|
10 | * This program is distributed in the hope it will be useful, but WITHOUT |
---|
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
---|
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
---|
13 | * more details. |
---|
14 | * |
---|
15 | * You should have received a copy of the GNU General Public License along with |
---|
16 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
---|
17 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
---|
18 | */ |
---|
19 | #include <xen/config.h> |
---|
20 | #include <xen/errno.h> |
---|
21 | #include <xen/softirq.h> |
---|
22 | #include <asm/asm_defns.h> |
---|
23 | #include <asm/apicdef.h> |
---|
24 | #include <asm/page.h> |
---|
25 | #include <public/xen.h> |
---|
26 | |
---|
27 | #define GET_CURRENT(reg) \ |
---|
28 | movq $STACK_SIZE-8, reg; \ |
---|
29 | orq %rsp, reg; \ |
---|
30 | andq $~7,reg; \ |
---|
31 | movq (reg),reg; |
---|
32 | |
---|
33 | /* |
---|
34 | * At VMExit time the processor saves the guest selectors, rsp, rip, |
---|
35 | * and rflags. Therefore we don't save them, but simply decrement |
---|
36 | * the kernel stack pointer to make it consistent with the stack frame |
---|
37 | * at usual interruption time. The rflags of the host is not saved by AMD-V, |
---|
38 | * and we set it to the fixed value. |
---|
39 | * |
---|
40 | * We also need the room, especially because orig_eax field is used |
---|
41 | * by do_IRQ(). Compared the cpu_user_regs, we skip pushing for the following: |
---|
42 | * (10) u64 gs; |
---|
43 | * (9) u64 fs; |
---|
44 | * (8) u64 ds; |
---|
45 | * (7) u64 es; |
---|
46 | * <- get_stack_bottom() (= HOST_ESP) |
---|
47 | * (6) u64 ss; |
---|
48 | * (5) u64 rsp; |
---|
49 | * (4) u64 rflags; |
---|
50 | * (3) u64 cs; |
---|
51 | * (2) u64 rip; |
---|
52 | * (2/1) u32 entry_vector; |
---|
53 | * (1/1) u32 error_code; |
---|
54 | */ |
---|
55 | #define HVM_MONITOR_RFLAGS 0x202 /* IF on */ |
---|
56 | #define NR_SKIPPED_REGS 6 /* See the above explanation */ |
---|
57 | #define HVM_SAVE_ALL_NOSEGREGS \ |
---|
58 | pushq $HVM_MONITOR_RFLAGS; \ |
---|
59 | popfq; \ |
---|
60 | subq $(NR_SKIPPED_REGS*8), %rsp; \ |
---|
61 | pushq %rdi; \ |
---|
62 | pushq %rsi; \ |
---|
63 | pushq %rdx; \ |
---|
64 | pushq %rcx; \ |
---|
65 | pushq %rax; \ |
---|
66 | pushq %r8; \ |
---|
67 | pushq %r9; \ |
---|
68 | pushq %r10; \ |
---|
69 | pushq %r11; \ |
---|
70 | pushq %rbx; \ |
---|
71 | pushq %rbp; \ |
---|
72 | pushq %r12; \ |
---|
73 | pushq %r13; \ |
---|
74 | pushq %r14; \ |
---|
75 | pushq %r15; \ |
---|
76 | |
---|
77 | #define HVM_RESTORE_ALL_NOSEGREGS \ |
---|
78 | popq %r15; \ |
---|
79 | popq %r14; \ |
---|
80 | popq %r13; \ |
---|
81 | popq %r12; \ |
---|
82 | popq %rbp; \ |
---|
83 | popq %rbx; \ |
---|
84 | popq %r11; \ |
---|
85 | popq %r10; \ |
---|
86 | popq %r9; \ |
---|
87 | popq %r8; \ |
---|
88 | popq %rax; \ |
---|
89 | popq %rcx; \ |
---|
90 | popq %rdx; \ |
---|
91 | popq %rsi; \ |
---|
92 | popq %rdi; \ |
---|
93 | addq $(NR_SKIPPED_REGS*8), %rsp; \ |
---|
94 | |
---|
95 | #define VMRUN .byte 0x0F,0x01,0xD8 |
---|
96 | #define VMLOAD .byte 0x0F,0x01,0xDA |
---|
97 | #define VMSAVE .byte 0x0F,0x01,0xDB |
---|
98 | #define STGI .byte 0x0F,0x01,0xDC |
---|
99 | #define CLGI .byte 0x0F,0x01,0xDD |
---|
100 | |
---|
101 | ENTRY(svm_asm_do_resume) |
---|
102 | GET_CURRENT(%rbx) |
---|
103 | cli # tests must not race interrupts |
---|
104 | movl VCPU_processor(%rbx),%eax |
---|
105 | shl $IRQSTAT_shift, %rax |
---|
106 | leaq irq_stat(%rip), %rdx |
---|
107 | testl $~0, (%rdx, %rax, 1) |
---|
108 | jnz svm_process_softirqs |
---|
109 | call svm_intr_assist |
---|
110 | call svm_load_cr2 |
---|
111 | |
---|
112 | CLGI |
---|
113 | sti |
---|
114 | GET_CURRENT(%rbx) |
---|
115 | movq VCPU_svm_vmcb(%rbx), %rcx |
---|
116 | movq UREGS_rax(%rsp), %rax |
---|
117 | movq %rax, VMCB_rax(%rcx) |
---|
118 | leaq root_vmcb_pa(%rip), %rax |
---|
119 | movl VCPU_processor(%rbx), %ecx |
---|
120 | movq (%rax,%rcx,8), %rax |
---|
121 | VMSAVE |
---|
122 | |
---|
123 | movq VCPU_svm_vmcb_pa(%rbx), %rax |
---|
124 | popq %r15 |
---|
125 | popq %r14 |
---|
126 | popq %r13 |
---|
127 | popq %r12 |
---|
128 | popq %rbp |
---|
129 | popq %rbx |
---|
130 | popq %r11 |
---|
131 | popq %r10 |
---|
132 | popq %r9 |
---|
133 | popq %r8 |
---|
134 | /* |
---|
135 | * Skip %rax, we need to have vmcb address in there. |
---|
136 | * Don't worry, RAX is restored through the VMRUN instruction. |
---|
137 | */ |
---|
138 | addq $8, %rsp |
---|
139 | popq %rcx |
---|
140 | popq %rdx |
---|
141 | popq %rsi |
---|
142 | popq %rdi |
---|
143 | addq $(NR_SKIPPED_REGS*8), %rsp |
---|
144 | |
---|
145 | VMLOAD |
---|
146 | VMRUN |
---|
147 | VMSAVE |
---|
148 | HVM_SAVE_ALL_NOSEGREGS |
---|
149 | |
---|
150 | GET_CURRENT(%rbx) |
---|
151 | leaq root_vmcb_pa(%rip), %rax |
---|
152 | movl VCPU_processor(%rbx), %ecx |
---|
153 | movq (%rax,%rcx,8), %rax |
---|
154 | VMLOAD |
---|
155 | |
---|
156 | STGI |
---|
157 | .globl svm_stgi_label; |
---|
158 | svm_stgi_label: |
---|
159 | movq %rsp,%rdi |
---|
160 | call svm_vmexit_handler |
---|
161 | jmp svm_asm_do_resume |
---|
162 | |
---|
163 | ALIGN |
---|
164 | svm_process_softirqs: |
---|
165 | sti |
---|
166 | call do_softirq |
---|
167 | jmp svm_asm_do_resume |
---|