1 | /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ |
---|
2 | /* |
---|
3 | * vmx_phy_mode.c: emulating domain physical mode. |
---|
4 | * Copyright (c) 2005, Intel Corporation. |
---|
5 | * |
---|
6 | * This program is free software; you can redistribute it and/or modify it |
---|
7 | * under the terms and conditions of the GNU General Public License, |
---|
8 | * version 2, as published by the Free Software Foundation. |
---|
9 | * |
---|
10 | * This program is distributed in the hope it will be useful, but WITHOUT |
---|
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
---|
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
---|
13 | * more details. |
---|
14 | * |
---|
15 | * You should have received a copy of the GNU General Public License along with |
---|
16 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
---|
17 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
---|
18 | * |
---|
19 | * Arun Sharma (arun.sharma@intel.com) |
---|
20 | * Kun Tian (Kevin Tian) (kevin.tian@intel.com) |
---|
21 | * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com) |
---|
22 | */ |
---|
23 | |
---|
24 | |
---|
25 | #include <asm/processor.h> |
---|
26 | #include <asm/gcc_intrin.h> |
---|
27 | #include <asm/vmx_phy_mode.h> |
---|
28 | #include <xen/sched.h> |
---|
29 | #include <asm/pgtable.h> |
---|
30 | #include <asm/vmmu.h> |
---|
31 | int valid_mm_mode[8] = { |
---|
32 | GUEST_PHYS, /* (it, dt, rt) -> (0, 0, 0) */ |
---|
33 | INV_MODE, |
---|
34 | INV_MODE, |
---|
35 | GUEST_PHYS, /* (it, dt, rt) -> (0, 1, 1) */ |
---|
36 | INV_MODE, |
---|
37 | GUEST_PHYS, /* (it, dt, rt) -> (1, 0, 1) */ |
---|
38 | INV_MODE, |
---|
39 | GUEST_VIRT, /* (it, dt, rt) -> (1, 1, 1).*/ |
---|
40 | }; |
---|
41 | |
---|
42 | /* |
---|
43 | * Special notes: |
---|
44 | * - Index by it/dt/rt sequence |
---|
45 | * - Only existing mode transitions are allowed in this table |
---|
46 | * - RSE is placed at lazy mode when emulating guest partial mode |
---|
47 | * - If gva happens to be rr0 and rr4, only allowed case is identity |
---|
48 | * mapping (gva=gpa), or panic! (How?) |
---|
49 | */ |
---|
50 | int mm_switch_table[8][8] = { |
---|
51 | /* 2004/09/12(Kevin): Allow switch to self */ |
---|
52 | /* |
---|
53 | * (it,dt,rt): (0,0,0) -> (1,1,1) |
---|
54 | * This kind of transition usually occurs in the very early |
---|
55 | * stage of Linux boot up procedure. Another case is in efi |
---|
56 | * and pal calls. (see "arch/ia64/kernel/head.S") |
---|
57 | * |
---|
58 | * (it,dt,rt): (0,0,0) -> (0,1,1) |
---|
59 | * This kind of transition is found when OSYa exits efi boot |
---|
60 | * service. Due to gva = gpa in this case (Same region), |
---|
61 | * data access can be satisfied though itlb entry for physical |
---|
62 | * emulation is hit. |
---|
63 | */ |
---|
64 | {SW_SELF,0, 0, SW_NOP, 0, 0, 0, SW_P2V}, |
---|
65 | {0, 0, 0, 0, 0, 0, 0, 0}, |
---|
66 | {0, 0, 0, 0, 0, 0, 0, 0}, |
---|
67 | /* |
---|
68 | * (it,dt,rt): (0,1,1) -> (1,1,1) |
---|
69 | * This kind of transition is found in OSYa. |
---|
70 | * |
---|
71 | * (it,dt,rt): (0,1,1) -> (0,0,0) |
---|
72 | * This kind of transition is found in OSYa |
---|
73 | */ |
---|
74 | {SW_NOP, 0, 0, SW_SELF,0, 0, 0, SW_P2V}, |
---|
75 | /* (1,0,0)->(1,1,1) */ |
---|
76 | {0, 0, 0, 0, 0, 0, 0, SW_P2V}, |
---|
77 | /* |
---|
78 | * (it,dt,rt): (1,0,1) -> (1,1,1) |
---|
79 | * This kind of transition usually occurs when Linux returns |
---|
80 | * from the low level TLB miss handlers. |
---|
81 | * (see "arch/ia64/kernel/ivt.S") |
---|
82 | */ |
---|
83 | {0, 0, 0, 0, 0, SW_SELF,0, SW_P2V}, |
---|
84 | {0, 0, 0, 0, 0, 0, 0, 0}, |
---|
85 | /* |
---|
86 | * (it,dt,rt): (1,1,1) -> (1,0,1) |
---|
87 | * This kind of transition usually occurs in Linux low level |
---|
88 | * TLB miss handler. (see "arch/ia64/kernel/ivt.S") |
---|
89 | * |
---|
90 | * (it,dt,rt): (1,1,1) -> (0,0,0) |
---|
91 | * This kind of transition usually occurs in pal and efi calls, |
---|
92 | * which requires running in physical mode. |
---|
93 | * (see "arch/ia64/kernel/head.S") |
---|
94 | * (1,1,1)->(1,0,0) |
---|
95 | */ |
---|
96 | |
---|
97 | {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF}, |
---|
98 | }; |
---|
99 | |
---|
100 | void |
---|
101 | physical_mode_init(VCPU *vcpu) |
---|
102 | { |
---|
103 | vcpu->arch.mode_flags = GUEST_IN_PHY; |
---|
104 | } |
---|
105 | |
---|
106 | extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *); |
---|
107 | |
---|
108 | void |
---|
109 | physical_tlb_miss(VCPU *vcpu, u64 vadr, int type) |
---|
110 | { |
---|
111 | u64 pte; |
---|
112 | ia64_rr rr; |
---|
113 | rr.rrval = ia64_get_rr(vadr); |
---|
114 | pte = vadr& _PAGE_PPN_MASK; |
---|
115 | pte = pte | PHY_PAGE_WB; |
---|
116 | thash_vhpt_insert(vcpu, pte, (rr.ps << 2), vadr, type); |
---|
117 | return; |
---|
118 | } |
---|
119 | |
---|
120 | |
---|
121 | void |
---|
122 | vmx_init_all_rr(VCPU *vcpu) |
---|
123 | { |
---|
124 | VMX(vcpu, vrr[VRN0]) = 0x38; |
---|
125 | // enable vhpt in guest physical mode |
---|
126 | vcpu->arch.metaphysical_rr0 |= 1; |
---|
127 | vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(vcpu, 0x38); |
---|
128 | VMX(vcpu, vrr[VRN1]) = 0x38; |
---|
129 | VMX(vcpu, vrr[VRN2]) = 0x38; |
---|
130 | VMX(vcpu, vrr[VRN3]) = 0x38; |
---|
131 | VMX(vcpu, vrr[VRN4]) = 0x38; |
---|
132 | // enable vhpt in guest physical mode |
---|
133 | vcpu->arch.metaphysical_rr4 |= 1; |
---|
134 | vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(vcpu, 0x38); |
---|
135 | VMX(vcpu, vrr[VRN5]) = 0x38; |
---|
136 | VMX(vcpu, vrr[VRN6]) = 0x38; |
---|
137 | VMX(vcpu, vrr[VRN7]) = 0x738; |
---|
138 | } |
---|
139 | |
---|
140 | extern void * pal_vaddr; |
---|
141 | |
---|
142 | void |
---|
143 | vmx_load_all_rr(VCPU *vcpu) |
---|
144 | { |
---|
145 | unsigned long psr; |
---|
146 | |
---|
147 | local_irq_save(psr); |
---|
148 | |
---|
149 | /* WARNING: not allow co-exist of both virtual mode and physical |
---|
150 | * mode in same region |
---|
151 | */ |
---|
152 | if (is_physical_mode(vcpu)) { |
---|
153 | if (vcpu->arch.mode_flags & GUEST_PHY_EMUL){ |
---|
154 | panic_domain(vcpu_regs(vcpu), |
---|
155 | "Unexpected domain switch in phy emul\n"); |
---|
156 | } |
---|
157 | ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0); |
---|
158 | ia64_dv_serialize_data(); |
---|
159 | ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4); |
---|
160 | ia64_dv_serialize_data(); |
---|
161 | } else { |
---|
162 | ia64_set_rr((VRN0 << VRN_SHIFT), |
---|
163 | vcpu->arch.metaphysical_saved_rr0); |
---|
164 | ia64_dv_serialize_data(); |
---|
165 | ia64_set_rr((VRN4 << VRN_SHIFT), |
---|
166 | vcpu->arch.metaphysical_saved_rr4); |
---|
167 | ia64_dv_serialize_data(); |
---|
168 | } |
---|
169 | |
---|
170 | /* rr567 will be postponed to last point when resuming back to guest */ |
---|
171 | ia64_set_rr((VRN1 << VRN_SHIFT), |
---|
172 | vrrtomrr(vcpu, VMX(vcpu, vrr[VRN1]))); |
---|
173 | ia64_dv_serialize_data(); |
---|
174 | ia64_set_rr((VRN2 << VRN_SHIFT), |
---|
175 | vrrtomrr(vcpu, VMX(vcpu, vrr[VRN2]))); |
---|
176 | ia64_dv_serialize_data(); |
---|
177 | ia64_set_rr((VRN3 << VRN_SHIFT), |
---|
178 | vrrtomrr(vcpu, VMX(vcpu, vrr[VRN3]))); |
---|
179 | ia64_dv_serialize_data(); |
---|
180 | ia64_set_rr((VRN5 << VRN_SHIFT), |
---|
181 | vrrtomrr(vcpu, VMX(vcpu, vrr[VRN5]))); |
---|
182 | ia64_dv_serialize_data(); |
---|
183 | ia64_set_rr((VRN6 << VRN_SHIFT), |
---|
184 | vrrtomrr(vcpu, VMX(vcpu, vrr[VRN6]))); |
---|
185 | ia64_dv_serialize_data(); |
---|
186 | vmx_switch_rr7(vrrtomrr(vcpu,VMX(vcpu, vrr[VRN7])), |
---|
187 | (void *)vcpu->domain->shared_info, |
---|
188 | (void *)vcpu->arch.privregs, |
---|
189 | (void *)vcpu->arch.vhpt.hash, pal_vaddr ); |
---|
190 | ia64_set_pta(VMX(vcpu, mpta)); |
---|
191 | vmx_ia64_set_dcr(vcpu); |
---|
192 | |
---|
193 | ia64_srlz_d(); |
---|
194 | ia64_set_psr(psr); |
---|
195 | ia64_srlz_i(); |
---|
196 | } |
---|
197 | |
---|
198 | |
---|
199 | |
---|
200 | void |
---|
201 | switch_to_physical_rid(VCPU *vcpu) |
---|
202 | { |
---|
203 | u64 psr; |
---|
204 | /* Save original virtual mode rr[0] and rr[4] */ |
---|
205 | psr=ia64_clear_ic(); |
---|
206 | ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0); |
---|
207 | ia64_srlz_d(); |
---|
208 | ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4); |
---|
209 | ia64_srlz_d(); |
---|
210 | |
---|
211 | ia64_set_psr(psr); |
---|
212 | ia64_srlz_i(); |
---|
213 | return; |
---|
214 | } |
---|
215 | |
---|
216 | |
---|
217 | void |
---|
218 | switch_to_virtual_rid(VCPU *vcpu) |
---|
219 | { |
---|
220 | u64 psr; |
---|
221 | psr=ia64_clear_ic(); |
---|
222 | ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0); |
---|
223 | ia64_srlz_d(); |
---|
224 | ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4); |
---|
225 | ia64_srlz_d(); |
---|
226 | ia64_set_psr(psr); |
---|
227 | ia64_srlz_i(); |
---|
228 | return; |
---|
229 | } |
---|
230 | |
---|
231 | static int mm_switch_action(IA64_PSR opsr, IA64_PSR npsr) |
---|
232 | { |
---|
233 | return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)]; |
---|
234 | } |
---|
235 | |
---|
236 | void |
---|
237 | switch_mm_mode(VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr) |
---|
238 | { |
---|
239 | int act; |
---|
240 | act = mm_switch_action(old_psr, new_psr); |
---|
241 | perfc_incra(vmx_switch_mm_mode, act); |
---|
242 | switch (act) { |
---|
243 | case SW_V2P: |
---|
244 | // printk("V -> P mode transition: (0x%lx -> 0x%lx)\n", |
---|
245 | // old_psr.val, new_psr.val); |
---|
246 | switch_to_physical_rid(vcpu); |
---|
247 | /* |
---|
248 | * Set rse to enforced lazy, to prevent active rse save/restor when |
---|
249 | * guest physical mode. |
---|
250 | */ |
---|
251 | vcpu->arch.mode_flags |= GUEST_IN_PHY; |
---|
252 | break; |
---|
253 | case SW_P2V: |
---|
254 | // printk("P -> V mode transition: (0x%lx -> 0x%lx)\n", |
---|
255 | // old_psr.val, new_psr.val); |
---|
256 | switch_to_virtual_rid(vcpu); |
---|
257 | /* |
---|
258 | * recover old mode which is saved when entering |
---|
259 | * guest physical mode |
---|
260 | */ |
---|
261 | vcpu->arch.mode_flags &= ~GUEST_IN_PHY; |
---|
262 | break; |
---|
263 | case SW_SELF: |
---|
264 | printk("Switch to self-0x%lx!!! MM mode doesn't change...\n", |
---|
265 | old_psr.val); |
---|
266 | break; |
---|
267 | case SW_NOP: |
---|
268 | // printk("No action required for mode transition: (0x%lx -> 0x%lx)\n", |
---|
269 | // old_psr.val, new_psr.val); |
---|
270 | break; |
---|
271 | default: |
---|
272 | /* Sanity check */ |
---|
273 | panic_domain(vcpu_regs(vcpu),"Unexpected virtual <--> physical mode transition,old:%lx,new:%lx\n",old_psr.val,new_psr.val); |
---|
274 | break; |
---|
275 | } |
---|
276 | return; |
---|
277 | } |
---|
278 | |
---|
279 | |
---|
280 | |
---|
281 | /* |
---|
282 | * In physical mode, insert tc/tr for region 0 and 4 uses |
---|
283 | * RID[0] and RID[4] which is for physical mode emulation. |
---|
284 | * However what those inserted tc/tr wants is rid for |
---|
285 | * virtual mode. So original virtual rid needs to be restored |
---|
286 | * before insert. |
---|
287 | * |
---|
288 | * Operations which required such switch include: |
---|
289 | * - insertions (itc.*, itr.*) |
---|
290 | * - purges (ptc.* and ptr.*) |
---|
291 | * - tpa |
---|
292 | * - tak |
---|
293 | * - thash?, ttag? |
---|
294 | * All above needs actual virtual rid for destination entry. |
---|
295 | */ |
---|
296 | |
---|
297 | void |
---|
298 | check_mm_mode_switch (VCPU *vcpu, IA64_PSR old_psr, IA64_PSR new_psr) |
---|
299 | { |
---|
300 | |
---|
301 | if ( (old_psr.dt != new_psr.dt ) || |
---|
302 | (old_psr.it != new_psr.it ) || |
---|
303 | (old_psr.rt != new_psr.rt ) |
---|
304 | ) { |
---|
305 | switch_mm_mode (vcpu, old_psr, new_psr); |
---|
306 | } |
---|
307 | |
---|
308 | return; |
---|
309 | } |
---|
310 | |
---|
311 | |
---|
312 | /* |
---|
313 | * In physical mode, insert tc/tr for region 0 and 4 uses |
---|
314 | * RID[0] and RID[4] which is for physical mode emulation. |
---|
315 | * However what those inserted tc/tr wants is rid for |
---|
316 | * virtual mode. So original virtual rid needs to be restored |
---|
317 | * before insert. |
---|
318 | * |
---|
319 | * Operations which required such switch include: |
---|
320 | * - insertions (itc.*, itr.*) |
---|
321 | * - purges (ptc.* and ptr.*) |
---|
322 | * - tpa |
---|
323 | * - tak |
---|
324 | * - thash?, ttag? |
---|
325 | * All above needs actual virtual rid for destination entry. |
---|
326 | */ |
---|
327 | |
---|
328 | void |
---|
329 | prepare_if_physical_mode(VCPU *vcpu) |
---|
330 | { |
---|
331 | if (is_physical_mode(vcpu)) { |
---|
332 | vcpu->arch.mode_flags |= GUEST_PHY_EMUL; |
---|
333 | switch_to_virtual_rid(vcpu); |
---|
334 | } |
---|
335 | return; |
---|
336 | } |
---|
337 | |
---|
338 | /* Recover always follows prepare */ |
---|
339 | void |
---|
340 | recover_if_physical_mode(VCPU *vcpu) |
---|
341 | { |
---|
342 | if (is_physical_mode(vcpu)) |
---|
343 | switch_to_physical_rid(vcpu); |
---|
344 | vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL; |
---|
345 | return; |
---|
346 | } |
---|
347 | |
---|