1 | /* -*- Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */ |
---|
2 | /* |
---|
3 | * vmx_virt.c: |
---|
4 | * Copyright (c) 2005, Intel Corporation. |
---|
5 | * |
---|
6 | * This program is free software; you can redistribute it and/or modify it |
---|
7 | * under the terms and conditions of the GNU General Public License, |
---|
8 | * version 2, as published by the Free Software Foundation. |
---|
9 | * |
---|
10 | * This program is distributed in the hope it will be useful, but WITHOUT |
---|
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
---|
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
---|
13 | * more details. |
---|
14 | * |
---|
15 | * You should have received a copy of the GNU General Public License along with |
---|
16 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple |
---|
17 | * Place - Suite 330, Boston, MA 02111-1307 USA. |
---|
18 | * |
---|
19 | * Fred yang (fred.yang@intel.com) |
---|
20 | * Shaofan Li (Susue Li) <susie.li@intel.com> |
---|
21 | * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com) |
---|
22 | */ |
---|
23 | #include <asm/bundle.h> |
---|
24 | #include <asm/vmx_vcpu.h> |
---|
25 | #include <asm/processor.h> |
---|
26 | #include <asm/delay.h> // Debug only |
---|
27 | #include <asm/vmmu.h> |
---|
28 | #include <asm/vmx_mm_def.h> |
---|
29 | #include <asm/smp.h> |
---|
30 | #include <asm/vmx.h> |
---|
31 | #include <asm/virt_event.h> |
---|
32 | #include <asm/vmx_phy_mode.h> |
---|
33 | |
---|
34 | #ifdef BYPASS_VMAL_OPCODE |
---|
35 | static void |
---|
36 | ia64_priv_decoder(IA64_SLOT_TYPE slot_type, INST64 inst, u64 * cause) |
---|
37 | { |
---|
38 | *cause=0; |
---|
39 | switch (slot_type) { |
---|
40 | case M: |
---|
41 | if (inst.generic.major==0){ |
---|
42 | if(inst.M28.x3==0){ |
---|
43 | if(inst.M44.x4==6){ |
---|
44 | *cause=EVENT_SSM; |
---|
45 | }else if(inst.M44.x4==7){ |
---|
46 | *cause=EVENT_RSM; |
---|
47 | }else if(inst.M30.x4==8&&inst.M30.x2==2){ |
---|
48 | *cause=EVENT_MOV_TO_AR_IMM; |
---|
49 | } |
---|
50 | } |
---|
51 | } |
---|
52 | else if(inst.generic.major==1){ |
---|
53 | if(inst.M28.x3==0){ |
---|
54 | if(inst.M32.x6==0x2c){ |
---|
55 | *cause=EVENT_MOV_TO_CR; |
---|
56 | }else if(inst.M33.x6==0x24){ |
---|
57 | *cause=EVENT_MOV_FROM_CR; |
---|
58 | }else if(inst.M35.x6==0x2d){ |
---|
59 | *cause=EVENT_MOV_TO_PSR; |
---|
60 | }else if(inst.M36.x6==0x25){ |
---|
61 | *cause=EVENT_MOV_FROM_PSR; |
---|
62 | }else if(inst.M29.x6==0x2A){ |
---|
63 | *cause=EVENT_MOV_TO_AR; |
---|
64 | }else if(inst.M31.x6==0x22){ |
---|
65 | *cause=EVENT_MOV_FROM_AR; |
---|
66 | }else if(inst.M45.x6==0x09){ |
---|
67 | *cause=EVENT_PTC_L; |
---|
68 | }else if(inst.M45.x6==0x0A){ |
---|
69 | *cause=EVENT_PTC_G; |
---|
70 | }else if(inst.M45.x6==0x0B){ |
---|
71 | *cause=EVENT_PTC_GA; |
---|
72 | }else if(inst.M45.x6==0x0C){ |
---|
73 | *cause=EVENT_PTR_D; |
---|
74 | }else if(inst.M45.x6==0x0D){ |
---|
75 | *cause=EVENT_PTR_I; |
---|
76 | }else if(inst.M46.x6==0x1A){ |
---|
77 | *cause=EVENT_THASH; |
---|
78 | }else if(inst.M46.x6==0x1B){ |
---|
79 | *cause=EVENT_TTAG; |
---|
80 | }else if(inst.M46.x6==0x1E){ |
---|
81 | *cause=EVENT_TPA; |
---|
82 | }else if(inst.M46.x6==0x1F){ |
---|
83 | *cause=EVENT_TAK; |
---|
84 | }else if(inst.M47.x6==0x34){ |
---|
85 | *cause=EVENT_PTC_E; |
---|
86 | }else if(inst.M41.x6==0x2E){ |
---|
87 | *cause=EVENT_ITC_D; |
---|
88 | }else if(inst.M41.x6==0x2F){ |
---|
89 | *cause=EVENT_ITC_I; |
---|
90 | }else if(inst.M42.x6==0x00){ |
---|
91 | *cause=EVENT_MOV_TO_RR; |
---|
92 | }else if(inst.M42.x6==0x01){ |
---|
93 | *cause=EVENT_MOV_TO_DBR; |
---|
94 | }else if(inst.M42.x6==0x02){ |
---|
95 | *cause=EVENT_MOV_TO_IBR; |
---|
96 | }else if(inst.M42.x6==0x03){ |
---|
97 | *cause=EVENT_MOV_TO_PKR; |
---|
98 | }else if(inst.M42.x6==0x04){ |
---|
99 | *cause=EVENT_MOV_TO_PMC; |
---|
100 | }else if(inst.M42.x6==0x05){ |
---|
101 | *cause=EVENT_MOV_TO_PMD; |
---|
102 | }else if(inst.M42.x6==0x0E){ |
---|
103 | *cause=EVENT_ITR_D; |
---|
104 | }else if(inst.M42.x6==0x0F){ |
---|
105 | *cause=EVENT_ITR_I; |
---|
106 | }else if(inst.M43.x6==0x10){ |
---|
107 | *cause=EVENT_MOV_FROM_RR; |
---|
108 | }else if(inst.M43.x6==0x11){ |
---|
109 | *cause=EVENT_MOV_FROM_DBR; |
---|
110 | }else if(inst.M43.x6==0x12){ |
---|
111 | *cause=EVENT_MOV_FROM_IBR; |
---|
112 | }else if(inst.M43.x6==0x13){ |
---|
113 | *cause=EVENT_MOV_FROM_PKR; |
---|
114 | }else if(inst.M43.x6==0x14){ |
---|
115 | *cause=EVENT_MOV_FROM_PMC; |
---|
116 | /* |
---|
117 | }else if(inst.M43.x6==0x15){ |
---|
118 | *cause=EVENT_MOV_FROM_PMD; |
---|
119 | */ |
---|
120 | }else if(inst.M43.x6==0x17){ |
---|
121 | *cause=EVENT_MOV_FROM_CPUID; |
---|
122 | } |
---|
123 | } |
---|
124 | } |
---|
125 | break; |
---|
126 | case B: |
---|
127 | if(inst.generic.major==0){ |
---|
128 | if(inst.B8.x6==0x02){ |
---|
129 | *cause=EVENT_COVER; |
---|
130 | }else if(inst.B8.x6==0x08){ |
---|
131 | *cause=EVENT_RFI; |
---|
132 | }else if(inst.B8.x6==0x0c){ |
---|
133 | *cause=EVENT_BSW_0; |
---|
134 | }else if(inst.B8.x6==0x0d){ |
---|
135 | *cause=EVENT_BSW_1; |
---|
136 | } |
---|
137 | } |
---|
138 | case I: |
---|
139 | case F: |
---|
140 | case L: |
---|
141 | case ILLEGAL: |
---|
142 | break; |
---|
143 | } |
---|
144 | } |
---|
145 | #endif |
---|
146 | |
---|
147 | static IA64FAULT vmx_emul_rsm(VCPU *vcpu, INST64 inst) |
---|
148 | { |
---|
149 | u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm; |
---|
150 | return vmx_vcpu_reset_psr_sm(vcpu,imm24); |
---|
151 | } |
---|
152 | |
---|
153 | static IA64FAULT vmx_emul_ssm(VCPU *vcpu, INST64 inst) |
---|
154 | { |
---|
155 | u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm; |
---|
156 | return vmx_vcpu_set_psr_sm(vcpu,imm24); |
---|
157 | } |
---|
158 | |
---|
159 | static IA64FAULT vmx_emul_mov_from_psr(VCPU *vcpu, INST64 inst) |
---|
160 | { |
---|
161 | u64 tgt = inst.M33.r1; |
---|
162 | u64 val; |
---|
163 | |
---|
164 | /* |
---|
165 | if ((fault = vmx_vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT) |
---|
166 | return vcpu_set_gr(vcpu, tgt, val); |
---|
167 | else return fault; |
---|
168 | */ |
---|
169 | val = vmx_vcpu_get_psr(vcpu); |
---|
170 | val = (val & MASK(0, 32)) | (val & MASK(35, 2)); |
---|
171 | return vcpu_set_gr(vcpu, tgt, val, 0); |
---|
172 | } |
---|
173 | |
---|
174 | /** |
---|
175 | * @todo Check for reserved bits and return IA64_RSVDREG_FAULT. |
---|
176 | */ |
---|
177 | static IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst) |
---|
178 | { |
---|
179 | u64 val; |
---|
180 | |
---|
181 | if(vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT) |
---|
182 | panic_domain(vcpu_regs(vcpu),"get_psr nat bit fault\n"); |
---|
183 | |
---|
184 | return vmx_vcpu_set_psr_l(vcpu, val); |
---|
185 | } |
---|
186 | |
---|
187 | |
---|
188 | /************************************************************************** |
---|
189 | Privileged operation emulation routines |
---|
190 | **************************************************************************/ |
---|
191 | |
---|
192 | static IA64FAULT vmx_emul_rfi(VCPU *vcpu, INST64 inst) |
---|
193 | { |
---|
194 | IA64_PSR vpsr; |
---|
195 | REGS *regs; |
---|
196 | #ifdef CHECK_FAULT |
---|
197 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
198 | if ( vpsr.cpl != 0) { |
---|
199 | /* Inject Privileged Operation fault into guest */ |
---|
200 | set_privileged_operation_isr (vcpu, 0); |
---|
201 | privilege_op (vcpu); |
---|
202 | return IA64_FAULT; |
---|
203 | } |
---|
204 | #endif // CHECK_FAULT |
---|
205 | regs=vcpu_regs(vcpu); |
---|
206 | vpsr.val=regs->cr_ipsr; |
---|
207 | if ( vpsr.is == 1 ) { |
---|
208 | panic_domain(regs,"We do not support IA32 instruction yet"); |
---|
209 | } |
---|
210 | |
---|
211 | return vmx_vcpu_rfi(vcpu); |
---|
212 | } |
---|
213 | |
---|
214 | static IA64FAULT vmx_emul_bsw0(VCPU *vcpu, INST64 inst) |
---|
215 | { |
---|
216 | #ifdef CHECK_FAULT |
---|
217 | IA64_PSR vpsr; |
---|
218 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
219 | if ( vpsr.cpl != 0) { |
---|
220 | /* Inject Privileged Operation fault into guest */ |
---|
221 | set_privileged_operation_isr (vcpu, 0); |
---|
222 | privilege_op (vcpu); |
---|
223 | return IA64_FAULT; |
---|
224 | } |
---|
225 | #endif // CHECK_FAULT |
---|
226 | return vcpu_bsw0(vcpu); |
---|
227 | } |
---|
228 | |
---|
229 | static IA64FAULT vmx_emul_bsw1(VCPU *vcpu, INST64 inst) |
---|
230 | { |
---|
231 | #ifdef CHECK_FAULT |
---|
232 | IA64_PSR vpsr; |
---|
233 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
234 | if ( vpsr.cpl != 0) { |
---|
235 | /* Inject Privileged Operation fault into guest */ |
---|
236 | set_privileged_operation_isr (vcpu, 0); |
---|
237 | privilege_op (vcpu); |
---|
238 | return IA64_FAULT; |
---|
239 | } |
---|
240 | #endif // CHECK_FAULT |
---|
241 | return vcpu_bsw1(vcpu); |
---|
242 | } |
---|
243 | |
---|
244 | static IA64FAULT vmx_emul_cover(VCPU *vcpu, INST64 inst) |
---|
245 | { |
---|
246 | return vmx_vcpu_cover(vcpu); |
---|
247 | } |
---|
248 | |
---|
249 | static IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst) |
---|
250 | { |
---|
251 | u64 r2,r3; |
---|
252 | #ifdef VMAL_NO_FAULT_CHECK |
---|
253 | IA64_PSR vpsr; |
---|
254 | |
---|
255 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
256 | if ( vpsr.cpl != 0) { |
---|
257 | /* Inject Privileged Operation fault into guest */ |
---|
258 | set_privileged_operation_isr (vcpu, 0); |
---|
259 | privilege_op (vcpu); |
---|
260 | return IA64_FAULT; |
---|
261 | } |
---|
262 | #endif // VMAL_NO_FAULT_CHECK |
---|
263 | if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){ |
---|
264 | #ifdef VMAL_NO_FAULT_CHECK |
---|
265 | ISR isr; |
---|
266 | set_isr_reg_nat_consumption(vcpu,0,0); |
---|
267 | rnat_comsumption(vcpu); |
---|
268 | return IA64_FAULT; |
---|
269 | #endif // VMAL_NO_FAULT_CHECK |
---|
270 | } |
---|
271 | #ifdef VMAL_NO_FAULT_CHECK |
---|
272 | if (unimplemented_gva(vcpu,r3) ) { |
---|
273 | isr.val = set_isr_ei_ni(vcpu); |
---|
274 | isr.code = IA64_RESERVED_REG_FAULT; |
---|
275 | vcpu_set_isr(vcpu, isr.val); |
---|
276 | unimpl_daddr(vcpu); |
---|
277 | return IA64_FAULT; |
---|
278 | } |
---|
279 | #endif // VMAL_NO_FAULT_CHECK |
---|
280 | return vmx_vcpu_ptc_l(vcpu,r3,bits(r2,2,7)); |
---|
281 | } |
---|
282 | |
---|
283 | static IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst) |
---|
284 | { |
---|
285 | u64 r3; |
---|
286 | #ifdef VMAL_NO_FAULT_CHECK |
---|
287 | IA64_PSR vpsr; |
---|
288 | |
---|
289 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
290 | ISR isr; |
---|
291 | if ( vpsr.cpl != 0) { |
---|
292 | /* Inject Privileged Operation fault into guest */ |
---|
293 | set_privileged_operation_isr (vcpu, 0); |
---|
294 | privilege_op (vcpu); |
---|
295 | return IA64_FAULT; |
---|
296 | } |
---|
297 | #endif // VMAL_NO_FAULT_CHECK |
---|
298 | if(vcpu_get_gr_nat(vcpu,inst.M47.r3,&r3)){ |
---|
299 | #ifdef VMAL_NO_FAULT_CHECK |
---|
300 | set_isr_reg_nat_consumption(vcpu,0,0); |
---|
301 | rnat_comsumption(vcpu); |
---|
302 | return IA64_FAULT; |
---|
303 | #endif // VMAL_NO_FAULT_CHECK |
---|
304 | } |
---|
305 | return vmx_vcpu_ptc_e(vcpu,r3); |
---|
306 | } |
---|
307 | |
---|
308 | static IA64FAULT vmx_emul_ptc_g(VCPU *vcpu, INST64 inst) |
---|
309 | { |
---|
310 | u64 r2,r3; |
---|
311 | #ifdef VMAL_NO_FAULT_CHECK |
---|
312 | IA64_PSR vpsr; |
---|
313 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
314 | if ( vpsr.cpl != 0) { |
---|
315 | /* Inject Privileged Operation fault into guest */ |
---|
316 | set_privileged_operation_isr (vcpu, 0); |
---|
317 | privilege_op (vcpu); |
---|
318 | return IA64_FAULT; |
---|
319 | } |
---|
320 | #endif // VMAL_NO_FAULT_CHECK |
---|
321 | if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){ |
---|
322 | #ifdef VMAL_NO_FAULT_CHECK |
---|
323 | ISR isr; |
---|
324 | set_isr_reg_nat_consumption(vcpu,0,0); |
---|
325 | rnat_comsumption(vcpu); |
---|
326 | return IA64_FAULT; |
---|
327 | #endif // VMAL_NO_FAULT_CHECK |
---|
328 | } |
---|
329 | #ifdef VMAL_NO_FAULT_CHECK |
---|
330 | if (unimplemented_gva(vcpu,r3) ) { |
---|
331 | isr.val = set_isr_ei_ni(vcpu); |
---|
332 | isr.code = IA64_RESERVED_REG_FAULT; |
---|
333 | vcpu_set_isr(vcpu, isr.val); |
---|
334 | unimpl_daddr(vcpu); |
---|
335 | return IA64_FAULT; |
---|
336 | } |
---|
337 | #endif // VMAL_NO_FAULT_CHECK |
---|
338 | return vmx_vcpu_ptc_g(vcpu,r3,bits(r2,2,7)); |
---|
339 | } |
---|
340 | |
---|
341 | static IA64FAULT vmx_emul_ptc_ga(VCPU *vcpu, INST64 inst) |
---|
342 | { |
---|
343 | u64 r2,r3; |
---|
344 | #ifdef VMAL_NO_FAULT_CHECK |
---|
345 | IA64_PSR vpsr; |
---|
346 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
347 | if ( vpsr.cpl != 0) { |
---|
348 | /* Inject Privileged Operation fault into guest */ |
---|
349 | set_privileged_operation_isr (vcpu, 0); |
---|
350 | privilege_op (vcpu); |
---|
351 | return IA64_FAULT; |
---|
352 | } |
---|
353 | #endif // VMAL_NO_FAULT_CHECK |
---|
354 | if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){ |
---|
355 | #ifdef VMAL_NO_FAULT_CHECK |
---|
356 | ISR isr; |
---|
357 | set_isr_reg_nat_consumption(vcpu,0,0); |
---|
358 | rnat_comsumption(vcpu); |
---|
359 | return IA64_FAULT; |
---|
360 | #endif // VMAL_NO_FAULT_CHECK |
---|
361 | } |
---|
362 | #ifdef VMAL_NO_FAULT_CHECK |
---|
363 | if (unimplemented_gva(vcpu,r3) ) { |
---|
364 | isr.val = set_isr_ei_ni(vcpu); |
---|
365 | isr.code = IA64_RESERVED_REG_FAULT; |
---|
366 | vcpu_set_isr(vcpu, isr.val); |
---|
367 | unimpl_daddr(vcpu); |
---|
368 | return IA64_FAULT; |
---|
369 | } |
---|
370 | #endif // VMAL_NO_FAULT_CHECK |
---|
371 | return vmx_vcpu_ptc_ga(vcpu,r3,bits(r2,2,7)); |
---|
372 | } |
---|
373 | |
---|
374 | static IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3) |
---|
375 | { |
---|
376 | IA64FAULT ret1, ret2; |
---|
377 | |
---|
378 | #ifdef VMAL_NO_FAULT_CHECK |
---|
379 | ISR isr; |
---|
380 | IA64_PSR vpsr; |
---|
381 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
382 | if ( vpsr.cpl != 0) { |
---|
383 | /* Inject Privileged Operation fault into guest */ |
---|
384 | set_privileged_operation_isr (vcpu, 0); |
---|
385 | privilege_op (vcpu); |
---|
386 | return IA64_FAULT; |
---|
387 | } |
---|
388 | #endif // VMAL_NO_FAULT_CHECK |
---|
389 | ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r3,pr3); |
---|
390 | ret2 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pr2); |
---|
391 | #ifdef VMAL_NO_FAULT_CHECK |
---|
392 | if ( ret1 != IA64_NO_FAULT || ret2 != IA64_NO_FAULT ) { |
---|
393 | set_isr_reg_nat_consumption(vcpu,0,0); |
---|
394 | rnat_comsumption(vcpu); |
---|
395 | return IA64_FAULT; |
---|
396 | } |
---|
397 | if (unimplemented_gva(vcpu,r3) ) { |
---|
398 | isr.val = set_isr_ei_ni(vcpu); |
---|
399 | isr.code = IA64_RESERVED_REG_FAULT; |
---|
400 | vcpu_set_isr(vcpu, isr.val); |
---|
401 | unimpl_daddr(vcpu); |
---|
402 | return IA64_FAULT; |
---|
403 | } |
---|
404 | #endif // VMAL_NO_FAULT_CHECK |
---|
405 | return IA64_NO_FAULT; |
---|
406 | } |
---|
407 | |
---|
408 | static IA64FAULT vmx_emul_ptr_d(VCPU *vcpu, INST64 inst) |
---|
409 | { |
---|
410 | u64 r2,r3; |
---|
411 | if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT ) |
---|
412 | return IA64_FAULT; |
---|
413 | return vmx_vcpu_ptr_d(vcpu,r3,bits(r2,2,7)); |
---|
414 | } |
---|
415 | |
---|
416 | static IA64FAULT vmx_emul_ptr_i(VCPU *vcpu, INST64 inst) |
---|
417 | { |
---|
418 | u64 r2,r3; |
---|
419 | if ( ptr_fault_check(vcpu, inst, &r2, &r3 ) == IA64_FAULT ) |
---|
420 | return IA64_FAULT; |
---|
421 | return vmx_vcpu_ptr_i(vcpu,r3,bits(r2,2,7)); |
---|
422 | } |
---|
423 | |
---|
424 | |
---|
425 | static IA64FAULT vmx_emul_thash(VCPU *vcpu, INST64 inst) |
---|
426 | { |
---|
427 | u64 r1,r3; |
---|
428 | #ifdef CHECK_FAULT |
---|
429 | ISR visr; |
---|
430 | IA64_PSR vpsr; |
---|
431 | if(check_target_register(vcpu, inst.M46.r1)){ |
---|
432 | set_illegal_op_isr(vcpu); |
---|
433 | illegal_op(vcpu); |
---|
434 | return IA64_FAULT; |
---|
435 | } |
---|
436 | #endif //CHECK_FAULT |
---|
437 | if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){ |
---|
438 | #ifdef CHECK_FAULT |
---|
439 | vcpu_set_gr(vcpu, inst.M46.r1, 0, 1); |
---|
440 | return IA64_NO_FAULT; |
---|
441 | #endif //CHECK_FAULT |
---|
442 | } |
---|
443 | #ifdef CHECK_FAULT |
---|
444 | if(unimplemented_gva(vcpu, r3)){ |
---|
445 | vcpu_set_gr(vcpu, inst.M46.r1, 0, 1); |
---|
446 | return IA64_NO_FAULT; |
---|
447 | } |
---|
448 | #endif //CHECK_FAULT |
---|
449 | vmx_vcpu_thash(vcpu, r3, &r1); |
---|
450 | vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); |
---|
451 | return(IA64_NO_FAULT); |
---|
452 | } |
---|
453 | |
---|
454 | |
---|
455 | static IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST64 inst) |
---|
456 | { |
---|
457 | u64 r1,r3; |
---|
458 | #ifdef CHECK_FAULT |
---|
459 | ISR visr; |
---|
460 | IA64_PSR vpsr; |
---|
461 | #endif |
---|
462 | #ifdef CHECK_FAULT |
---|
463 | if(check_target_register(vcpu, inst.M46.r1)){ |
---|
464 | set_illegal_op_isr(vcpu); |
---|
465 | illegal_op(vcpu); |
---|
466 | return IA64_FAULT; |
---|
467 | } |
---|
468 | #endif //CHECK_FAULT |
---|
469 | if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){ |
---|
470 | #ifdef CHECK_FAULT |
---|
471 | vcpu_set_gr(vcpu, inst.M46.r1, 0, 1); |
---|
472 | return IA64_NO_FAULT; |
---|
473 | #endif //CHECK_FAULT |
---|
474 | } |
---|
475 | #ifdef CHECK_FAULT |
---|
476 | if(unimplemented_gva(vcpu, r3)){ |
---|
477 | vcpu_set_gr(vcpu, inst.M46.r1, 0, 1); |
---|
478 | return IA64_NO_FAULT; |
---|
479 | } |
---|
480 | #endif //CHECK_FAULT |
---|
481 | vmx_vcpu_ttag(vcpu, r3, &r1); |
---|
482 | vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); |
---|
483 | return(IA64_NO_FAULT); |
---|
484 | } |
---|
485 | |
---|
486 | |
---|
487 | static IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst) |
---|
488 | { |
---|
489 | u64 r1,r3; |
---|
490 | #ifdef CHECK_FAULT |
---|
491 | ISR visr; |
---|
492 | if(check_target_register(vcpu, inst.M46.r1)){ |
---|
493 | set_illegal_op_isr(vcpu); |
---|
494 | illegal_op(vcpu); |
---|
495 | return IA64_FAULT; |
---|
496 | } |
---|
497 | IA64_PSR vpsr; |
---|
498 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
499 | if(vpsr.cpl!=0){ |
---|
500 | visr.val=0; |
---|
501 | vcpu_set_isr(vcpu, visr.val); |
---|
502 | return IA64_FAULT; |
---|
503 | } |
---|
504 | #endif //CHECK_FAULT |
---|
505 | if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){ |
---|
506 | #ifdef CHECK_FAULT |
---|
507 | set_isr_reg_nat_consumption(vcpu,0,1); |
---|
508 | rnat_comsumption(vcpu); |
---|
509 | return IA64_FAULT; |
---|
510 | #endif //CHECK_FAULT |
---|
511 | } |
---|
512 | #ifdef CHECK_FAULT |
---|
513 | if (unimplemented_gva(vcpu,r3) ) { |
---|
514 | // inject unimplemented_data_address_fault |
---|
515 | visr.val = set_isr_ei_ni(vcpu); |
---|
516 | visr.code = IA64_RESERVED_REG_FAULT; |
---|
517 | vcpu_set_isr(vcpu, isr.val); |
---|
518 | // FAULT_UNIMPLEMENTED_DATA_ADDRESS. |
---|
519 | unimpl_daddr(vcpu); |
---|
520 | return IA64_FAULT; |
---|
521 | } |
---|
522 | #endif //CHECK_FAULT |
---|
523 | |
---|
524 | if(vmx_vcpu_tpa(vcpu, r3, &r1)){ |
---|
525 | return IA64_FAULT; |
---|
526 | } |
---|
527 | vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); |
---|
528 | return(IA64_NO_FAULT); |
---|
529 | } |
---|
530 | |
---|
531 | static IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst) |
---|
532 | { |
---|
533 | u64 r1,r3; |
---|
534 | #ifdef CHECK_FAULT |
---|
535 | ISR visr; |
---|
536 | IA64_PSR vpsr; |
---|
537 | int fault=IA64_NO_FAULT; |
---|
538 | visr.val=0; |
---|
539 | if(check_target_register(vcpu, inst.M46.r1)){ |
---|
540 | set_illegal_op_isr(vcpu); |
---|
541 | illegal_op(vcpu); |
---|
542 | return IA64_FAULT; |
---|
543 | } |
---|
544 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
545 | if(vpsr.cpl!=0){ |
---|
546 | vcpu_set_isr(vcpu, visr.val); |
---|
547 | return IA64_FAULT; |
---|
548 | } |
---|
549 | #endif |
---|
550 | if(vcpu_get_gr_nat(vcpu, inst.M46.r3, &r3)){ |
---|
551 | #ifdef CHECK_FAULT |
---|
552 | set_isr_reg_nat_consumption(vcpu,0,1); |
---|
553 | rnat_comsumption(vcpu); |
---|
554 | return IA64_FAULT; |
---|
555 | #endif |
---|
556 | } |
---|
557 | if(vmx_vcpu_tak(vcpu, r3, &r1)){ |
---|
558 | return IA64_FAULT; |
---|
559 | } |
---|
560 | vcpu_set_gr(vcpu, inst.M46.r1, r1, 0); |
---|
561 | return(IA64_NO_FAULT); |
---|
562 | } |
---|
563 | |
---|
564 | |
---|
565 | /************************************ |
---|
566 | * Insert translation register/cache |
---|
567 | ************************************/ |
---|
568 | |
---|
569 | static IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst) |
---|
570 | { |
---|
571 | u64 itir, ifa, pte, slot; |
---|
572 | #ifdef VMAL_NO_FAULT_CHECK |
---|
573 | IA64_PSR vpsr; |
---|
574 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
575 | if ( vpsr.ic ) { |
---|
576 | set_illegal_op_isr(vcpu); |
---|
577 | illegal_op(vcpu); |
---|
578 | return IA64_FAULT; |
---|
579 | } |
---|
580 | ISR isr; |
---|
581 | if ( vpsr.cpl != 0) { |
---|
582 | /* Inject Privileged Operation fault into guest */ |
---|
583 | set_privileged_operation_isr (vcpu, 0); |
---|
584 | privilege_op (vcpu); |
---|
585 | return IA64_FAULT; |
---|
586 | } |
---|
587 | #endif // VMAL_NO_FAULT_CHECK |
---|
588 | if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&slot)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&pte)){ |
---|
589 | #ifdef VMAL_NO_FAULT_CHECK |
---|
590 | set_isr_reg_nat_consumption(vcpu,0,0); |
---|
591 | rnat_comsumption(vcpu); |
---|
592 | return IA64_FAULT; |
---|
593 | #endif // VMAL_NO_FAULT_CHECK |
---|
594 | } |
---|
595 | #ifdef VMAL_NO_FAULT_CHECK |
---|
596 | if(is_reserved_rr_register(vcpu, slot)){ |
---|
597 | set_illegal_op_isr(vcpu); |
---|
598 | illegal_op(vcpu); |
---|
599 | return IA64_FAULT; |
---|
600 | } |
---|
601 | #endif // VMAL_NO_FAULT_CHECK |
---|
602 | |
---|
603 | if (vcpu_get_itir(vcpu,&itir)){ |
---|
604 | return(IA64_FAULT); |
---|
605 | } |
---|
606 | if (vcpu_get_ifa(vcpu,&ifa)){ |
---|
607 | return(IA64_FAULT); |
---|
608 | } |
---|
609 | #ifdef VMAL_NO_FAULT_CHECK |
---|
610 | if (is_reserved_itir_field(vcpu, itir)) { |
---|
611 | // TODO |
---|
612 | return IA64_FAULT; |
---|
613 | } |
---|
614 | if (unimplemented_gva(vcpu,ifa) ) { |
---|
615 | isr.val = set_isr_ei_ni(vcpu); |
---|
616 | isr.code = IA64_RESERVED_REG_FAULT; |
---|
617 | vcpu_set_isr(vcpu, isr.val); |
---|
618 | unimpl_daddr(vcpu); |
---|
619 | return IA64_FAULT; |
---|
620 | } |
---|
621 | #endif // VMAL_NO_FAULT_CHECK |
---|
622 | |
---|
623 | return (vmx_vcpu_itr_d(vcpu,slot,pte,itir,ifa)); |
---|
624 | } |
---|
625 | |
---|
626 | static IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst) |
---|
627 | { |
---|
628 | u64 itir, ifa, pte, slot; |
---|
629 | #ifdef VMAL_NO_FAULT_CHECK |
---|
630 | ISR isr; |
---|
631 | IA64_PSR vpsr; |
---|
632 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
633 | if ( vpsr.ic ) { |
---|
634 | set_illegal_op_isr(vcpu); |
---|
635 | illegal_op(vcpu); |
---|
636 | return IA64_FAULT; |
---|
637 | } |
---|
638 | if ( vpsr.cpl != 0) { |
---|
639 | /* Inject Privileged Operation fault into guest */ |
---|
640 | set_privileged_operation_isr (vcpu, 0); |
---|
641 | privilege_op (vcpu); |
---|
642 | return IA64_FAULT; |
---|
643 | } |
---|
644 | #endif // VMAL_NO_FAULT_CHECK |
---|
645 | if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&slot)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&pte)){ |
---|
646 | #ifdef VMAL_NO_FAULT_CHECK |
---|
647 | set_isr_reg_nat_consumption(vcpu,0,0); |
---|
648 | rnat_comsumption(vcpu); |
---|
649 | return IA64_FAULT; |
---|
650 | #endif // VMAL_NO_FAULT_CHECK |
---|
651 | } |
---|
652 | #ifdef VMAL_NO_FAULT_CHECK |
---|
653 | if(is_reserved_rr_register(vcpu, slot)){ |
---|
654 | set_illegal_op_isr(vcpu); |
---|
655 | illegal_op(vcpu); |
---|
656 | return IA64_FAULT; |
---|
657 | } |
---|
658 | #endif // VMAL_NO_FAULT_CHECK |
---|
659 | |
---|
660 | if (vcpu_get_itir(vcpu,&itir)){ |
---|
661 | return(IA64_FAULT); |
---|
662 | } |
---|
663 | if (vcpu_get_ifa(vcpu,&ifa)){ |
---|
664 | return(IA64_FAULT); |
---|
665 | } |
---|
666 | #ifdef VMAL_NO_FAULT_CHECK |
---|
667 | if (is_reserved_itir_field(vcpu, itir)) { |
---|
668 | // TODO |
---|
669 | return IA64_FAULT; |
---|
670 | } |
---|
671 | if (unimplemented_gva(vcpu,ifa) ) { |
---|
672 | isr.val = set_isr_ei_ni(vcpu); |
---|
673 | isr.code = IA64_RESERVED_REG_FAULT; |
---|
674 | vcpu_set_isr(vcpu, isr.val); |
---|
675 | unimpl_daddr(vcpu); |
---|
676 | return IA64_FAULT; |
---|
677 | } |
---|
678 | #endif // VMAL_NO_FAULT_CHECK |
---|
679 | |
---|
680 | return (vmx_vcpu_itr_i(vcpu,slot,pte,itir,ifa)); |
---|
681 | } |
---|
682 | |
---|
683 | static IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, |
---|
684 | u64 *itir, u64 *ifa, u64 *pte) |
---|
685 | { |
---|
686 | IA64FAULT ret1; |
---|
687 | |
---|
688 | #ifdef VMAL_NO_FAULT_CHECK |
---|
689 | IA64_PSR vpsr; |
---|
690 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
691 | if ( vpsr.ic ) { |
---|
692 | set_illegal_op_isr(vcpu); |
---|
693 | illegal_op(vcpu); |
---|
694 | return IA64_FAULT; |
---|
695 | } |
---|
696 | |
---|
697 | u64 fault; |
---|
698 | ISR isr; |
---|
699 | if ( vpsr.cpl != 0) { |
---|
700 | /* Inject Privileged Operation fault into guest */ |
---|
701 | set_privileged_operation_isr (vcpu, 0); |
---|
702 | privilege_op (vcpu); |
---|
703 | return IA64_FAULT; |
---|
704 | } |
---|
705 | #endif // VMAL_NO_FAULT_CHECK |
---|
706 | ret1 = vcpu_get_gr_nat(vcpu,inst.M45.r2,pte); |
---|
707 | #ifdef VMAL_NO_FAULT_CHECK |
---|
708 | if( ret1 != IA64_NO_FAULT ){ |
---|
709 | set_isr_reg_nat_consumption(vcpu,0,0); |
---|
710 | rnat_comsumption(vcpu); |
---|
711 | return IA64_FAULT; |
---|
712 | } |
---|
713 | #endif // VMAL_NO_FAULT_CHECK |
---|
714 | |
---|
715 | if (vcpu_get_itir(vcpu,itir)){ |
---|
716 | return(IA64_FAULT); |
---|
717 | } |
---|
718 | if (vcpu_get_ifa(vcpu,ifa)){ |
---|
719 | return(IA64_FAULT); |
---|
720 | } |
---|
721 | #ifdef VMAL_NO_FAULT_CHECK |
---|
722 | if (unimplemented_gva(vcpu,ifa) ) { |
---|
723 | isr.val = set_isr_ei_ni(vcpu); |
---|
724 | isr.code = IA64_RESERVED_REG_FAULT; |
---|
725 | vcpu_set_isr(vcpu, isr.val); |
---|
726 | unimpl_daddr(vcpu); |
---|
727 | return IA64_FAULT; |
---|
728 | } |
---|
729 | #endif // VMAL_NO_FAULT_CHECK |
---|
730 | return IA64_NO_FAULT; |
---|
731 | } |
---|
732 | |
---|
733 | static IA64FAULT vmx_emul_itc_d(VCPU *vcpu, INST64 inst) |
---|
734 | { |
---|
735 | u64 itir, ifa, pte; |
---|
736 | |
---|
737 | if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) { |
---|
738 | return IA64_FAULT; |
---|
739 | } |
---|
740 | |
---|
741 | return (vmx_vcpu_itc_d(vcpu,pte,itir,ifa)); |
---|
742 | } |
---|
743 | |
---|
744 | static IA64FAULT vmx_emul_itc_i(VCPU *vcpu, INST64 inst) |
---|
745 | { |
---|
746 | u64 itir, ifa, pte; |
---|
747 | |
---|
748 | if ( itc_fault_check(vcpu, inst, &itir, &ifa, &pte) == IA64_FAULT ) { |
---|
749 | return IA64_FAULT; |
---|
750 | } |
---|
751 | |
---|
752 | return (vmx_vcpu_itc_i(vcpu,pte,itir,ifa)); |
---|
753 | |
---|
754 | } |
---|
755 | |
---|
756 | /************************************* |
---|
757 | * Moves to semi-privileged registers |
---|
758 | *************************************/ |
---|
759 | |
---|
760 | static IA64FAULT vmx_emul_mov_to_ar_imm(VCPU *vcpu, INST64 inst) |
---|
761 | { |
---|
762 | // I27 and M30 are identical for these fields |
---|
763 | u64 imm; |
---|
764 | |
---|
765 | if(inst.M30.ar3!=44){ |
---|
766 | panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc"); |
---|
767 | } |
---|
768 | #ifdef CHECK_FAULT |
---|
769 | IA64_PSR vpsr; |
---|
770 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
771 | if ( vpsr.cpl != 0) { |
---|
772 | /* Inject Privileged Operation fault into guest */ |
---|
773 | set_privileged_operation_isr (vcpu, 0); |
---|
774 | privilege_op (vcpu); |
---|
775 | return IA64_FAULT; |
---|
776 | } |
---|
777 | #endif // CHECK_FAULT |
---|
778 | if(inst.M30.s){ |
---|
779 | imm = -inst.M30.imm; |
---|
780 | }else{ |
---|
781 | imm = inst.M30.imm; |
---|
782 | } |
---|
783 | return (vmx_vcpu_set_itc(vcpu, imm)); |
---|
784 | } |
---|
785 | |
---|
786 | static IA64FAULT vmx_emul_mov_to_ar_reg(VCPU *vcpu, INST64 inst) |
---|
787 | { |
---|
788 | // I26 and M29 are identical for these fields |
---|
789 | u64 r2; |
---|
790 | if(inst.M29.ar3!=44){ |
---|
791 | panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc"); |
---|
792 | } |
---|
793 | if(vcpu_get_gr_nat(vcpu,inst.M29.r2,&r2)){ |
---|
794 | #ifdef CHECK_FAULT |
---|
795 | set_isr_reg_nat_consumption(vcpu,0,0); |
---|
796 | rnat_comsumption(vcpu); |
---|
797 | return IA64_FAULT; |
---|
798 | #endif //CHECK_FAULT |
---|
799 | } |
---|
800 | #ifdef CHECK_FAULT |
---|
801 | IA64_PSR vpsr; |
---|
802 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
803 | if ( vpsr.cpl != 0) { |
---|
804 | /* Inject Privileged Operation fault into guest */ |
---|
805 | set_privileged_operation_isr (vcpu, 0); |
---|
806 | privilege_op (vcpu); |
---|
807 | return IA64_FAULT; |
---|
808 | } |
---|
809 | #endif // CHECK_FAULT |
---|
810 | return (vmx_vcpu_set_itc(vcpu, r2)); |
---|
811 | } |
---|
812 | |
---|
813 | |
---|
814 | static IA64FAULT vmx_emul_mov_from_ar_reg(VCPU *vcpu, INST64 inst) |
---|
815 | { |
---|
816 | // I27 and M30 are identical for these fields |
---|
817 | u64 r1; |
---|
818 | if(inst.M31.ar3!=44){ |
---|
819 | panic_domain(vcpu_regs(vcpu),"Can't support ar register other than itc"); |
---|
820 | } |
---|
821 | #ifdef CHECK_FAULT |
---|
822 | if(check_target_register(vcpu,inst.M31.r1)){ |
---|
823 | set_illegal_op_isr(vcpu); |
---|
824 | illegal_op(vcpu); |
---|
825 | return IA64_FAULT; |
---|
826 | } |
---|
827 | IA64_PSR vpsr; |
---|
828 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
829 | if (vpsr.si&& vpsr.cpl != 0) { |
---|
830 | /* Inject Privileged Operation fault into guest */ |
---|
831 | set_privileged_operation_isr (vcpu, 0); |
---|
832 | privilege_op (vcpu); |
---|
833 | return IA64_FAULT; |
---|
834 | } |
---|
835 | #endif // CHECK_FAULT |
---|
836 | vmx_vcpu_get_itc(vcpu,&r1); |
---|
837 | vcpu_set_gr(vcpu,inst.M31.r1,r1,0); |
---|
838 | return IA64_NO_FAULT; |
---|
839 | } |
---|
840 | |
---|
841 | |
---|
842 | /******************************** |
---|
843 | * Moves to privileged registers |
---|
844 | ********************************/ |
---|
845 | |
---|
846 | static IA64FAULT vmx_emul_mov_to_pkr(VCPU *vcpu, INST64 inst) |
---|
847 | { |
---|
848 | u64 r3,r2; |
---|
849 | #ifdef CHECK_FAULT |
---|
850 | IA64_PSR vpsr; |
---|
851 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
852 | if (vpsr.cpl != 0) { |
---|
853 | /* Inject Privileged Operation fault into guest */ |
---|
854 | set_privileged_operation_isr (vcpu, 0); |
---|
855 | privilege_op (vcpu); |
---|
856 | return IA64_FAULT; |
---|
857 | } |
---|
858 | #endif // CHECK_FAULT |
---|
859 | if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){ |
---|
860 | #ifdef CHECK_FAULT |
---|
861 | set_isr_reg_nat_consumption(vcpu,0,0); |
---|
862 | rnat_comsumption(vcpu); |
---|
863 | return IA64_FAULT; |
---|
864 | #endif //CHECK_FAULT |
---|
865 | } |
---|
866 | return (vmx_vcpu_set_pkr(vcpu,r3,r2)); |
---|
867 | } |
---|
868 | |
---|
869 | static IA64FAULT vmx_emul_mov_to_rr(VCPU *vcpu, INST64 inst) |
---|
870 | { |
---|
871 | u64 r3,r2; |
---|
872 | #ifdef CHECK_FAULT |
---|
873 | IA64_PSR vpsr; |
---|
874 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
875 | if (vpsr.cpl != 0) { |
---|
876 | /* Inject Privileged Operation fault into guest */ |
---|
877 | set_privileged_operation_isr (vcpu, 0); |
---|
878 | privilege_op (vcpu); |
---|
879 | return IA64_FAULT; |
---|
880 | } |
---|
881 | #endif // CHECK_FAULT |
---|
882 | if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){ |
---|
883 | #ifdef CHECK_FAULT |
---|
884 | set_isr_reg_nat_consumption(vcpu,0,0); |
---|
885 | rnat_comsumption(vcpu); |
---|
886 | return IA64_FAULT; |
---|
887 | #endif //CHECK_FAULT |
---|
888 | } |
---|
889 | return (vmx_vcpu_set_rr(vcpu,r3,r2)); |
---|
890 | } |
---|
891 | |
---|
892 | static IA64FAULT vmx_emul_mov_to_dbr(VCPU *vcpu, INST64 inst) |
---|
893 | { |
---|
894 | u64 r3,r2; |
---|
895 | return IA64_NO_FAULT; |
---|
896 | #ifdef CHECK_FAULT |
---|
897 | IA64_PSR vpsr; |
---|
898 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
899 | if (vpsr.cpl != 0) { |
---|
900 | /* Inject Privileged Operation fault into guest */ |
---|
901 | set_privileged_operation_isr (vcpu, 0); |
---|
902 | privilege_op (vcpu); |
---|
903 | return IA64_FAULT; |
---|
904 | } |
---|
905 | #endif // CHECK_FAULT |
---|
906 | if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){ |
---|
907 | #ifdef CHECK_FAULT |
---|
908 | set_isr_reg_nat_consumption(vcpu,0,0); |
---|
909 | rnat_comsumption(vcpu); |
---|
910 | return IA64_FAULT; |
---|
911 | #endif //CHECK_FAULT |
---|
912 | } |
---|
913 | return (vmx_vcpu_set_dbr(vcpu,r3,r2)); |
---|
914 | } |
---|
915 | |
---|
916 | static IA64FAULT vmx_emul_mov_to_ibr(VCPU *vcpu, INST64 inst) |
---|
917 | { |
---|
918 | u64 r3,r2; |
---|
919 | return IA64_NO_FAULT; |
---|
920 | #ifdef CHECK_FAULT |
---|
921 | IA64_PSR vpsr; |
---|
922 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
923 | if (vpsr.cpl != 0) { |
---|
924 | /* Inject Privileged Operation fault into guest */ |
---|
925 | set_privileged_operation_isr (vcpu, 0); |
---|
926 | privilege_op (vcpu); |
---|
927 | return IA64_FAULT; |
---|
928 | } |
---|
929 | #endif // CHECK_FAULT |
---|
930 | if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){ |
---|
931 | #ifdef CHECK_FAULT |
---|
932 | set_isr_reg_nat_consumption(vcpu,0,0); |
---|
933 | rnat_comsumption(vcpu); |
---|
934 | return IA64_FAULT; |
---|
935 | #endif //CHECK_FAULT |
---|
936 | } |
---|
937 | return (vmx_vcpu_set_ibr(vcpu,r3,r2)); |
---|
938 | } |
---|
939 | |
---|
940 | static IA64FAULT vmx_emul_mov_to_pmc(VCPU *vcpu, INST64 inst) |
---|
941 | { |
---|
942 | u64 r3,r2; |
---|
943 | #ifdef CHECK_FAULT |
---|
944 | IA64_PSR vpsr; |
---|
945 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
946 | if (vpsr.cpl != 0) { |
---|
947 | /* Inject Privileged Operation fault into guest */ |
---|
948 | set_privileged_operation_isr (vcpu, 0); |
---|
949 | privilege_op (vcpu); |
---|
950 | return IA64_FAULT; |
---|
951 | } |
---|
952 | #endif // CHECK_FAULT |
---|
953 | if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){ |
---|
954 | #ifdef CHECK_FAULT |
---|
955 | set_isr_reg_nat_consumption(vcpu,0,0); |
---|
956 | rnat_comsumption(vcpu); |
---|
957 | return IA64_FAULT; |
---|
958 | #endif //CHECK_FAULT |
---|
959 | } |
---|
960 | return (vmx_vcpu_set_pmc(vcpu,r3,r2)); |
---|
961 | } |
---|
962 | |
---|
963 | static IA64FAULT vmx_emul_mov_to_pmd(VCPU *vcpu, INST64 inst) |
---|
964 | { |
---|
965 | u64 r3,r2; |
---|
966 | #ifdef CHECK_FAULT |
---|
967 | IA64_PSR vpsr; |
---|
968 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
969 | if (vpsr.cpl != 0) { |
---|
970 | /* Inject Privileged Operation fault into guest */ |
---|
971 | set_privileged_operation_isr (vcpu, 0); |
---|
972 | privilege_op (vcpu); |
---|
973 | return IA64_FAULT; |
---|
974 | } |
---|
975 | #endif // CHECK_FAULT |
---|
976 | if(vcpu_get_gr_nat(vcpu,inst.M42.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M42.r2,&r2)){ |
---|
977 | #ifdef CHECK_FAULT |
---|
978 | set_isr_reg_nat_consumption(vcpu,0,0); |
---|
979 | rnat_comsumption(vcpu); |
---|
980 | return IA64_FAULT; |
---|
981 | #endif //CHECK_FAULT |
---|
982 | } |
---|
983 | return (vmx_vcpu_set_pmd(vcpu,r3,r2)); |
---|
984 | } |
---|
985 | |
---|
986 | |
---|
987 | /********************************** |
---|
988 | * Moves from privileged registers |
---|
989 | **********************************/ |
---|
990 | |
---|
991 | static IA64FAULT vmx_emul_mov_from_rr(VCPU *vcpu, INST64 inst) |
---|
992 | { |
---|
993 | u64 r3,r1; |
---|
994 | #ifdef CHECK_FAULT |
---|
995 | if(check_target_register(vcpu, inst.M43.r1)){ |
---|
996 | set_illegal_op_isr(vcpu); |
---|
997 | illegal_op(vcpu); |
---|
998 | return IA64_FAULT; |
---|
999 | } |
---|
1000 | IA64_PSR vpsr; |
---|
1001 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
1002 | if (vpsr.cpl != 0) { |
---|
1003 | /* Inject Privileged Operation fault into guest */ |
---|
1004 | set_privileged_operation_isr (vcpu, 0); |
---|
1005 | privilege_op (vcpu); |
---|
1006 | return IA64_FAULT; |
---|
1007 | } |
---|
1008 | |
---|
1009 | #endif //CHECK_FAULT |
---|
1010 | if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){ |
---|
1011 | #ifdef CHECK_FAULT |
---|
1012 | set_isr_reg_nat_consumption(vcpu,0,0); |
---|
1013 | rnat_comsumption(vcpu); |
---|
1014 | return IA64_FAULT; |
---|
1015 | #endif //CHECK_FAULT |
---|
1016 | } |
---|
1017 | #ifdef CHECK_FAULT |
---|
1018 | if(is_reserved_rr_register(vcpu,r3>>VRN_SHIFT)){ |
---|
1019 | set_rsv_reg_field_isr(vcpu); |
---|
1020 | rsv_reg_field(vcpu); |
---|
1021 | } |
---|
1022 | #endif //CHECK_FAULT |
---|
1023 | vcpu_get_rr(vcpu,r3,&r1); |
---|
1024 | return vcpu_set_gr(vcpu, inst.M43.r1, r1,0); |
---|
1025 | } |
---|
1026 | |
---|
1027 | static IA64FAULT vmx_emul_mov_from_pkr(VCPU *vcpu, INST64 inst) |
---|
1028 | { |
---|
1029 | u64 r3,r1; |
---|
1030 | #ifdef CHECK_FAULT |
---|
1031 | if(check_target_register(vcpu, inst.M43.r1)){ |
---|
1032 | set_illegal_op_isr(vcpu); |
---|
1033 | illegal_op(vcpu); |
---|
1034 | return IA64_FAULT; |
---|
1035 | } |
---|
1036 | IA64_PSR vpsr; |
---|
1037 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
1038 | if (vpsr.cpl != 0) { |
---|
1039 | /* Inject Privileged Operation fault into guest */ |
---|
1040 | set_privileged_operation_isr (vcpu, 0); |
---|
1041 | privilege_op (vcpu); |
---|
1042 | return IA64_FAULT; |
---|
1043 | } |
---|
1044 | |
---|
1045 | #endif //CHECK_FAULT |
---|
1046 | if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){ |
---|
1047 | #ifdef CHECK_FAULT |
---|
1048 | set_isr_reg_nat_consumption(vcpu,0,0); |
---|
1049 | rnat_comsumption(vcpu); |
---|
1050 | return IA64_FAULT; |
---|
1051 | #endif //CHECK_FAULT |
---|
1052 | } |
---|
1053 | #ifdef CHECK_FAULT |
---|
1054 | if(is_reserved_indirect_register(vcpu,r3)){ |
---|
1055 | set_rsv_reg_field_isr(vcpu); |
---|
1056 | rsv_reg_field(vcpu); |
---|
1057 | return IA64_FAULT; |
---|
1058 | } |
---|
1059 | #endif //CHECK_FAULT |
---|
1060 | vmx_vcpu_get_pkr(vcpu,r3,&r1); |
---|
1061 | return vcpu_set_gr(vcpu, inst.M43.r1, r1,0); |
---|
1062 | } |
---|
1063 | |
---|
1064 | static IA64FAULT vmx_emul_mov_from_dbr(VCPU *vcpu, INST64 inst) |
---|
1065 | { |
---|
1066 | u64 r3,r1; |
---|
1067 | #ifdef CHECK_FAULT |
---|
1068 | if(check_target_register(vcpu, inst.M43.r1)){ |
---|
1069 | set_illegal_op_isr(vcpu); |
---|
1070 | illegal_op(vcpu); |
---|
1071 | return IA64_FAULT; |
---|
1072 | } |
---|
1073 | IA64_PSR vpsr; |
---|
1074 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
1075 | if (vpsr.cpl != 0) { |
---|
1076 | /* Inject Privileged Operation fault into guest */ |
---|
1077 | set_privileged_operation_isr (vcpu, 0); |
---|
1078 | privilege_op (vcpu); |
---|
1079 | return IA64_FAULT; |
---|
1080 | } |
---|
1081 | |
---|
1082 | #endif //CHECK_FAULT |
---|
1083 | if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){ |
---|
1084 | #ifdef CHECK_FAULT |
---|
1085 | set_isr_reg_nat_consumption(vcpu,0,0); |
---|
1086 | rnat_comsumption(vcpu); |
---|
1087 | return IA64_FAULT; |
---|
1088 | #endif //CHECK_FAULT |
---|
1089 | } |
---|
1090 | #ifdef CHECK_FAULT |
---|
1091 | if(is_reserved_indirect_register(vcpu,r3)){ |
---|
1092 | set_rsv_reg_field_isr(vcpu); |
---|
1093 | rsv_reg_field(vcpu); |
---|
1094 | return IA64_FAULT; |
---|
1095 | } |
---|
1096 | #endif //CHECK_FAULT |
---|
1097 | vmx_vcpu_get_dbr(vcpu,r3,&r1); |
---|
1098 | return vcpu_set_gr(vcpu, inst.M43.r1, r1,0); |
---|
1099 | } |
---|
1100 | |
---|
1101 | static IA64FAULT vmx_emul_mov_from_ibr(VCPU *vcpu, INST64 inst) |
---|
1102 | { |
---|
1103 | u64 r3,r1; |
---|
1104 | #ifdef CHECK_FAULT |
---|
1105 | if(check_target_register(vcpu, inst.M43.r1)){ |
---|
1106 | set_illegal_op_isr(vcpu); |
---|
1107 | illegal_op(vcpu); |
---|
1108 | return IA64_FAULT; |
---|
1109 | } |
---|
1110 | IA64_PSR vpsr; |
---|
1111 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
1112 | if (vpsr.cpl != 0) { |
---|
1113 | /* Inject Privileged Operation fault into guest */ |
---|
1114 | set_privileged_operation_isr (vcpu, 0); |
---|
1115 | privilege_op (vcpu); |
---|
1116 | return IA64_FAULT; |
---|
1117 | } |
---|
1118 | |
---|
1119 | #endif //CHECK_FAULT |
---|
1120 | if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){ |
---|
1121 | #ifdef CHECK_FAULT |
---|
1122 | set_isr_reg_nat_consumption(vcpu,0,0); |
---|
1123 | rnat_comsumption(vcpu); |
---|
1124 | return IA64_FAULT; |
---|
1125 | #endif //CHECK_FAULT |
---|
1126 | } |
---|
1127 | #ifdef CHECK_FAULT |
---|
1128 | if(is_reserved_indirect_register(vcpu,r3)){ |
---|
1129 | set_rsv_reg_field_isr(vcpu); |
---|
1130 | rsv_reg_field(vcpu); |
---|
1131 | return IA64_FAULT; |
---|
1132 | } |
---|
1133 | #endif //CHECK_FAULT |
---|
1134 | vmx_vcpu_get_ibr(vcpu,r3,&r1); |
---|
1135 | return vcpu_set_gr(vcpu, inst.M43.r1, r1,0); |
---|
1136 | } |
---|
1137 | |
---|
1138 | static IA64FAULT vmx_emul_mov_from_pmc(VCPU *vcpu, INST64 inst) |
---|
1139 | { |
---|
1140 | u64 r3,r1; |
---|
1141 | #ifdef CHECK_FAULT |
---|
1142 | if(check_target_register(vcpu, inst.M43.r1)){ |
---|
1143 | set_illegal_op_isr(vcpu); |
---|
1144 | illegal_op(vcpu); |
---|
1145 | return IA64_FAULT; |
---|
1146 | } |
---|
1147 | IA64_PSR vpsr; |
---|
1148 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
1149 | if (vpsr.cpl != 0) { |
---|
1150 | /* Inject Privileged Operation fault into guest */ |
---|
1151 | set_privileged_operation_isr (vcpu, 0); |
---|
1152 | privilege_op (vcpu); |
---|
1153 | return IA64_FAULT; |
---|
1154 | } |
---|
1155 | |
---|
1156 | #endif //CHECK_FAULT |
---|
1157 | if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){ |
---|
1158 | #ifdef CHECK_FAULT |
---|
1159 | set_isr_reg_nat_consumption(vcpu,0,0); |
---|
1160 | rnat_comsumption(vcpu); |
---|
1161 | return IA64_FAULT; |
---|
1162 | #endif //CHECK_FAULT |
---|
1163 | } |
---|
1164 | #ifdef CHECK_FAULT |
---|
1165 | if(is_reserved_indirect_register(vcpu,r3)){ |
---|
1166 | set_rsv_reg_field_isr(vcpu); |
---|
1167 | rsv_reg_field(vcpu); |
---|
1168 | return IA64_FAULT; |
---|
1169 | } |
---|
1170 | #endif //CHECK_FAULT |
---|
1171 | vmx_vcpu_get_pmc(vcpu,r3,&r1); |
---|
1172 | return vcpu_set_gr(vcpu, inst.M43.r1, r1,0); |
---|
1173 | } |
---|
1174 | |
---|
1175 | static IA64FAULT vmx_emul_mov_from_cpuid(VCPU *vcpu, INST64 inst) |
---|
1176 | { |
---|
1177 | u64 r3,r1; |
---|
1178 | #ifdef CHECK_FAULT |
---|
1179 | if(check_target_register(vcpu, inst.M43.r1)){ |
---|
1180 | set_illegal_op_isr(vcpu); |
---|
1181 | illegal_op(vcpu); |
---|
1182 | return IA64_FAULT; |
---|
1183 | } |
---|
1184 | #endif //CHECK_FAULT |
---|
1185 | if(vcpu_get_gr_nat(vcpu,inst.M43.r3,&r3)){ |
---|
1186 | #ifdef CHECK_FAULT |
---|
1187 | set_isr_reg_nat_consumption(vcpu,0,0); |
---|
1188 | rnat_comsumption(vcpu); |
---|
1189 | return IA64_FAULT; |
---|
1190 | #endif //CHECK_FAULT |
---|
1191 | } |
---|
1192 | #ifdef CHECK_FAULT |
---|
1193 | if(is_reserved_indirect_register(vcpu,r3)){ |
---|
1194 | set_rsv_reg_field_isr(vcpu); |
---|
1195 | rsv_reg_field(vcpu); |
---|
1196 | return IA64_FAULT; |
---|
1197 | } |
---|
1198 | #endif //CHECK_FAULT |
---|
1199 | vmx_vcpu_get_cpuid(vcpu,r3,&r1); |
---|
1200 | return vcpu_set_gr(vcpu, inst.M43.r1, r1,0); |
---|
1201 | } |
---|
1202 | |
---|
1203 | static IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst) |
---|
1204 | { |
---|
1205 | u64 r2; |
---|
1206 | extern u64 cr_igfld_mask(int index, u64 value); |
---|
1207 | #ifdef CHECK_FAULT |
---|
1208 | IA64_PSR vpsr; |
---|
1209 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
1210 | if(is_reserved_cr(inst.M32.cr3)||(vpsr.ic&&is_interruption_control_cr(inst.M32.cr3))){ |
---|
1211 | set_illegal_op_isr(vcpu); |
---|
1212 | illegal_op(vcpu); |
---|
1213 | return IA64_FAULT; |
---|
1214 | } |
---|
1215 | if ( vpsr.cpl != 0) { |
---|
1216 | /* Inject Privileged Operation fault into guest */ |
---|
1217 | set_privileged_operation_isr (vcpu, 0); |
---|
1218 | privilege_op (vcpu); |
---|
1219 | return IA64_FAULT; |
---|
1220 | } |
---|
1221 | #endif // CHECK_FAULT |
---|
1222 | if(vcpu_get_gr_nat(vcpu, inst.M32.r2, &r2)){ |
---|
1223 | #ifdef CHECK_FAULT |
---|
1224 | set_isr_reg_nat_consumption(vcpu,0,0); |
---|
1225 | rnat_comsumption(vcpu); |
---|
1226 | return IA64_FAULT; |
---|
1227 | #endif //CHECK_FAULT |
---|
1228 | } |
---|
1229 | #ifdef CHECK_FAULT |
---|
1230 | if ( check_cr_rsv_fields (inst.M32.cr3, r2)) { |
---|
1231 | /* Inject Reserved Register/Field fault |
---|
1232 | * into guest */ |
---|
1233 | set_rsv_reg_field_isr (vcpu,0); |
---|
1234 | rsv_reg_field (vcpu); |
---|
1235 | return IA64_FAULT; |
---|
1236 | } |
---|
1237 | #endif //CHECK_FAULT |
---|
1238 | r2 = cr_igfld_mask(inst.M32.cr3,r2); |
---|
1239 | switch (inst.M32.cr3) { |
---|
1240 | case 0: return vcpu_set_dcr(vcpu,r2); |
---|
1241 | case 1: return vmx_vcpu_set_itm(vcpu,r2); |
---|
1242 | case 2: return vmx_vcpu_set_iva(vcpu,r2); |
---|
1243 | case 8: return vmx_vcpu_set_pta(vcpu,r2); |
---|
1244 | case 16:return vcpu_set_ipsr(vcpu,r2); |
---|
1245 | case 17:return vcpu_set_isr(vcpu,r2); |
---|
1246 | case 19:return vcpu_set_iip(vcpu,r2); |
---|
1247 | case 20:return vcpu_set_ifa(vcpu,r2); |
---|
1248 | case 21:return vcpu_set_itir(vcpu,r2); |
---|
1249 | case 22:return vcpu_set_iipa(vcpu,r2); |
---|
1250 | case 23:return vcpu_set_ifs(vcpu,r2); |
---|
1251 | case 24:return vcpu_set_iim(vcpu,r2); |
---|
1252 | case 25:return vcpu_set_iha(vcpu,r2); |
---|
1253 | case 64:printk("SET LID to 0x%lx\n", r2); |
---|
1254 | return IA64_NO_FAULT; |
---|
1255 | case 65:return IA64_NO_FAULT; |
---|
1256 | case 66:return vmx_vcpu_set_tpr(vcpu,r2); |
---|
1257 | case 67:return vmx_vcpu_set_eoi(vcpu,r2); |
---|
1258 | case 68:return IA64_NO_FAULT; |
---|
1259 | case 69:return IA64_NO_FAULT; |
---|
1260 | case 70:return IA64_NO_FAULT; |
---|
1261 | case 71:return IA64_NO_FAULT; |
---|
1262 | case 72:return vmx_vcpu_set_itv(vcpu,r2); |
---|
1263 | case 73:return vmx_vcpu_set_pmv(vcpu,r2); |
---|
1264 | case 74:return vmx_vcpu_set_cmcv(vcpu,r2); |
---|
1265 | case 80:return vmx_vcpu_set_lrr0(vcpu,r2); |
---|
1266 | case 81:return vmx_vcpu_set_lrr1(vcpu,r2); |
---|
1267 | default:VCPU(vcpu, vcr[inst.M32.cr3]) = r2; |
---|
1268 | return IA64_NO_FAULT; |
---|
1269 | } |
---|
1270 | } |
---|
1271 | |
---|
1272 | |
---|
1273 | #define cr_get(cr) \ |
---|
1274 | ((fault=vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\ |
---|
1275 | vcpu_set_gr(vcpu, tgt, val,0):fault; |
---|
1276 | |
---|
1277 | #define vmx_cr_get(cr) \ |
---|
1278 | ((fault=vmx_vcpu_get_##cr(vcpu,&val))==IA64_NO_FAULT)?\ |
---|
1279 | vcpu_set_gr(vcpu, tgt, val,0):fault; |
---|
1280 | |
---|
1281 | static IA64FAULT vmx_emul_mov_from_cr(VCPU *vcpu, INST64 inst) |
---|
1282 | { |
---|
1283 | u64 tgt = inst.M33.r1; |
---|
1284 | u64 val; |
---|
1285 | IA64FAULT fault; |
---|
1286 | #ifdef CHECK_FAULT |
---|
1287 | IA64_PSR vpsr; |
---|
1288 | vpsr.val=vmx_vcpu_get_psr(vcpu); |
---|
1289 | if(is_reserved_cr(inst.M33.cr3)||is_read_only_cr(inst.M33.cr3|| |
---|
1290 | (vpsr.ic&&is_interruption_control_cr(inst.M33.cr3)))){ |
---|
1291 | set_illegal_op_isr(vcpu); |
---|
1292 | illegal_op(vcpu); |
---|
1293 | return IA64_FAULT; |
---|
1294 | } |
---|
1295 | if ( vpsr.cpl != 0) { |
---|
1296 | /* Inject Privileged Operation fault into guest */ |
---|
1297 | set_privileged_operation_isr (vcpu, 0); |
---|
1298 | privilege_op (vcpu); |
---|
1299 | return IA64_FAULT; |
---|
1300 | } |
---|
1301 | #endif // CHECK_FAULT |
---|
1302 | |
---|
1303 | // from_cr_cnt[inst.M33.cr3]++; |
---|
1304 | switch (inst.M33.cr3) { |
---|
1305 | case 0: return cr_get(dcr); |
---|
1306 | case 1: return vmx_cr_get(itm); |
---|
1307 | case 2: return vmx_cr_get(iva); |
---|
1308 | case 8: return vmx_cr_get(pta); |
---|
1309 | case 16:return cr_get(ipsr); |
---|
1310 | case 17:return cr_get(isr); |
---|
1311 | case 19:return cr_get(iip); |
---|
1312 | case 20:return cr_get(ifa); |
---|
1313 | case 21:return cr_get(itir); |
---|
1314 | case 22:return cr_get(iipa); |
---|
1315 | case 23:return cr_get(ifs); |
---|
1316 | case 24:return cr_get(iim); |
---|
1317 | case 25:return cr_get(iha); |
---|
1318 | case 64:return vmx_cr_get(lid); |
---|
1319 | case 65: |
---|
1320 | vmx_vcpu_get_ivr(vcpu,&val); |
---|
1321 | return vcpu_set_gr(vcpu,tgt,val,0); |
---|
1322 | case 66:return vmx_cr_get(tpr); |
---|
1323 | case 67:return vcpu_set_gr(vcpu,tgt,0L,0); |
---|
1324 | case 68:return vmx_cr_get(irr0); |
---|
1325 | case 69:return vmx_cr_get(irr1); |
---|
1326 | case 70:return vmx_cr_get(irr2); |
---|
1327 | case 71:return vmx_cr_get(irr3); |
---|
1328 | case 72:return vmx_cr_get(itv); |
---|
1329 | case 73:return vmx_cr_get(pmv); |
---|
1330 | case 74:return vmx_cr_get(cmcv); |
---|
1331 | case 80:return vmx_cr_get(lrr0); |
---|
1332 | case 81:return vmx_cr_get(lrr1); |
---|
1333 | default: return IA64_NO_FAULT; |
---|
1334 | } |
---|
1335 | } |
---|
1336 | |
---|
1337 | |
---|
1338 | //#define BYPASS_VMAL_OPCODE |
---|
1339 | extern IA64_SLOT_TYPE slot_types[0x20][3]; |
---|
1340 | unsigned long |
---|
1341 | __vmx_get_domain_bundle(u64 iip, IA64_BUNDLE *pbundle) |
---|
1342 | { |
---|
1343 | return fetch_code(current, iip, pbundle); |
---|
1344 | } |
---|
1345 | |
---|
1346 | /** Emulate a privileged operation. |
---|
1347 | * |
---|
1348 | * |
---|
1349 | * @param vcpu virtual cpu |
---|
1350 | * @cause the reason cause virtualization fault |
---|
1351 | * @opcode the instruction code which cause virtualization fault |
---|
1352 | */ |
---|
1353 | |
---|
1354 | void |
---|
1355 | vmx_emulate(VCPU *vcpu, REGS *regs) |
---|
1356 | { |
---|
1357 | IA64FAULT status; |
---|
1358 | INST64 inst; |
---|
1359 | u64 iip, cause, opcode; |
---|
1360 | iip = regs->cr_iip; |
---|
1361 | cause = VMX(vcpu,cause); |
---|
1362 | opcode = VMX(vcpu,opcode); |
---|
1363 | |
---|
1364 | #ifdef VTLB_DEBUG |
---|
1365 | check_vtlb_sanity(vmx_vcpu_get_vtlb(vcpu)); |
---|
1366 | dump_vtlb(vmx_vcpu_get_vtlb(vcpu)); |
---|
1367 | #endif |
---|
1368 | #if 0 |
---|
1369 | if ( (cause == 0xff && opcode == 0x1e000000000) || cause == 0 ) { |
---|
1370 | printk ("VMAL decode error: cause - %lx; op - %lx\n", |
---|
1371 | cause, opcode ); |
---|
1372 | return; |
---|
1373 | } |
---|
1374 | #endif |
---|
1375 | #ifdef BYPASS_VMAL_OPCODE |
---|
1376 | // make a local copy of the bundle containing the privop |
---|
1377 | IA64_BUNDLE bundle; |
---|
1378 | int slot; |
---|
1379 | IA64_SLOT_TYPE slot_type; |
---|
1380 | IA64_PSR vpsr; |
---|
1381 | bundle = __vmx_get_domain_bundle(iip); |
---|
1382 | slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri; |
---|
1383 | if (!slot) inst.inst = bundle.slot0; |
---|
1384 | else if (slot == 1) |
---|
1385 | inst.inst = bundle.slot1a + (bundle.slot1b<<18); |
---|
1386 | else if (slot == 2) inst.inst = bundle.slot2; |
---|
1387 | else printk("priv_handle_op: illegal slot: %d\n", slot); |
---|
1388 | slot_type = slot_types[bundle.template][slot]; |
---|
1389 | ia64_priv_decoder(slot_type, inst, &cause); |
---|
1390 | if(cause==0){ |
---|
1391 | panic_domain(regs,"This instruction at 0x%lx slot %d can't be virtualized", iip, slot); |
---|
1392 | } |
---|
1393 | #else |
---|
1394 | inst.inst=opcode; |
---|
1395 | #endif /* BYPASS_VMAL_OPCODE */ |
---|
1396 | /* |
---|
1397 | * Switch to actual virtual rid in rr0 and rr4, |
---|
1398 | * which is required by some tlb related instructions. |
---|
1399 | */ |
---|
1400 | prepare_if_physical_mode(vcpu); |
---|
1401 | |
---|
1402 | switch(cause) { |
---|
1403 | case EVENT_RSM: |
---|
1404 | perfc_incr(vmx_rsm); |
---|
1405 | status=vmx_emul_rsm(vcpu, inst); |
---|
1406 | break; |
---|
1407 | case EVENT_SSM: |
---|
1408 | perfc_incr(vmx_ssm); |
---|
1409 | status=vmx_emul_ssm(vcpu, inst); |
---|
1410 | break; |
---|
1411 | case EVENT_MOV_TO_PSR: |
---|
1412 | perfc_incr(vmx_mov_to_psr); |
---|
1413 | status=vmx_emul_mov_to_psr(vcpu, inst); |
---|
1414 | break; |
---|
1415 | case EVENT_MOV_FROM_PSR: |
---|
1416 | perfc_incr(vmx_mov_from_psr); |
---|
1417 | status=vmx_emul_mov_from_psr(vcpu, inst); |
---|
1418 | break; |
---|
1419 | case EVENT_MOV_FROM_CR: |
---|
1420 | perfc_incr(vmx_mov_from_cr); |
---|
1421 | status=vmx_emul_mov_from_cr(vcpu, inst); |
---|
1422 | break; |
---|
1423 | case EVENT_MOV_TO_CR: |
---|
1424 | perfc_incr(vmx_mov_to_cr); |
---|
1425 | status=vmx_emul_mov_to_cr(vcpu, inst); |
---|
1426 | break; |
---|
1427 | case EVENT_BSW_0: |
---|
1428 | perfc_incr(vmx_bsw0); |
---|
1429 | status=vmx_emul_bsw0(vcpu, inst); |
---|
1430 | break; |
---|
1431 | case EVENT_BSW_1: |
---|
1432 | perfc_incr(vmx_bsw1); |
---|
1433 | status=vmx_emul_bsw1(vcpu, inst); |
---|
1434 | break; |
---|
1435 | case EVENT_COVER: |
---|
1436 | perfc_incr(vmx_cover); |
---|
1437 | status=vmx_emul_cover(vcpu, inst); |
---|
1438 | break; |
---|
1439 | case EVENT_RFI: |
---|
1440 | perfc_incr(vmx_rfi); |
---|
1441 | status=vmx_emul_rfi(vcpu, inst); |
---|
1442 | break; |
---|
1443 | case EVENT_ITR_D: |
---|
1444 | perfc_incr(vmx_itr_d); |
---|
1445 | status=vmx_emul_itr_d(vcpu, inst); |
---|
1446 | break; |
---|
1447 | case EVENT_ITR_I: |
---|
1448 | perfc_incr(vmx_itr_i); |
---|
1449 | status=vmx_emul_itr_i(vcpu, inst); |
---|
1450 | break; |
---|
1451 | case EVENT_PTR_D: |
---|
1452 | perfc_incr(vmx_ptr_d); |
---|
1453 | status=vmx_emul_ptr_d(vcpu, inst); |
---|
1454 | break; |
---|
1455 | case EVENT_PTR_I: |
---|
1456 | perfc_incr(vmx_ptr_i); |
---|
1457 | status=vmx_emul_ptr_i(vcpu, inst); |
---|
1458 | break; |
---|
1459 | case EVENT_ITC_D: |
---|
1460 | perfc_incr(vmx_itc_d); |
---|
1461 | status=vmx_emul_itc_d(vcpu, inst); |
---|
1462 | break; |
---|
1463 | case EVENT_ITC_I: |
---|
1464 | perfc_incr(vmx_itc_i); |
---|
1465 | status=vmx_emul_itc_i(vcpu, inst); |
---|
1466 | break; |
---|
1467 | case EVENT_PTC_L: |
---|
1468 | perfc_incr(vmx_ptc_l); |
---|
1469 | status=vmx_emul_ptc_l(vcpu, inst); |
---|
1470 | break; |
---|
1471 | case EVENT_PTC_G: |
---|
1472 | perfc_incr(vmx_ptc_g); |
---|
1473 | status=vmx_emul_ptc_g(vcpu, inst); |
---|
1474 | break; |
---|
1475 | case EVENT_PTC_GA: |
---|
1476 | perfc_incr(vmx_ptc_ga); |
---|
1477 | status=vmx_emul_ptc_ga(vcpu, inst); |
---|
1478 | break; |
---|
1479 | case EVENT_PTC_E: |
---|
1480 | perfc_incr(vmx_ptc_e); |
---|
1481 | status=vmx_emul_ptc_e(vcpu, inst); |
---|
1482 | break; |
---|
1483 | case EVENT_MOV_TO_RR: |
---|
1484 | perfc_incr(vmx_mov_to_rr); |
---|
1485 | status=vmx_emul_mov_to_rr(vcpu, inst); |
---|
1486 | break; |
---|
1487 | case EVENT_MOV_FROM_RR: |
---|
1488 | perfc_incr(vmx_mov_from_rr); |
---|
1489 | status=vmx_emul_mov_from_rr(vcpu, inst); |
---|
1490 | break; |
---|
1491 | case EVENT_THASH: |
---|
1492 | perfc_incr(vmx_thash); |
---|
1493 | status=vmx_emul_thash(vcpu, inst); |
---|
1494 | break; |
---|
1495 | case EVENT_TTAG: |
---|
1496 | perfc_incr(vmx_ttag); |
---|
1497 | status=vmx_emul_ttag(vcpu, inst); |
---|
1498 | break; |
---|
1499 | case EVENT_TPA: |
---|
1500 | perfc_incr(vmx_tpa); |
---|
1501 | status=vmx_emul_tpa(vcpu, inst); |
---|
1502 | break; |
---|
1503 | case EVENT_TAK: |
---|
1504 | perfc_incr(vmx_tak); |
---|
1505 | status=vmx_emul_tak(vcpu, inst); |
---|
1506 | break; |
---|
1507 | case EVENT_MOV_TO_AR_IMM: |
---|
1508 | perfc_incr(vmx_mov_to_ar_imm); |
---|
1509 | status=vmx_emul_mov_to_ar_imm(vcpu, inst); |
---|
1510 | break; |
---|
1511 | case EVENT_MOV_TO_AR: |
---|
1512 | perfc_incr(vmx_mov_to_ar_reg); |
---|
1513 | status=vmx_emul_mov_to_ar_reg(vcpu, inst); |
---|
1514 | break; |
---|
1515 | case EVENT_MOV_FROM_AR: |
---|
1516 | perfc_incr(vmx_mov_from_ar_reg); |
---|
1517 | status=vmx_emul_mov_from_ar_reg(vcpu, inst); |
---|
1518 | break; |
---|
1519 | case EVENT_MOV_TO_DBR: |
---|
1520 | perfc_incr(vmx_mov_to_dbr); |
---|
1521 | status=vmx_emul_mov_to_dbr(vcpu, inst); |
---|
1522 | break; |
---|
1523 | case EVENT_MOV_TO_IBR: |
---|
1524 | perfc_incr(vmx_mov_to_ibr); |
---|
1525 | status=vmx_emul_mov_to_ibr(vcpu, inst); |
---|
1526 | break; |
---|
1527 | case EVENT_MOV_TO_PMC: |
---|
1528 | perfc_incr(vmx_mov_to_pmc); |
---|
1529 | status=vmx_emul_mov_to_pmc(vcpu, inst); |
---|
1530 | break; |
---|
1531 | case EVENT_MOV_TO_PMD: |
---|
1532 | perfc_incr(vmx_mov_to_pmd); |
---|
1533 | status=vmx_emul_mov_to_pmd(vcpu, inst); |
---|
1534 | break; |
---|
1535 | case EVENT_MOV_TO_PKR: |
---|
1536 | perfc_incr(vmx_mov_to_pkr); |
---|
1537 | status=vmx_emul_mov_to_pkr(vcpu, inst); |
---|
1538 | break; |
---|
1539 | case EVENT_MOV_FROM_DBR: |
---|
1540 | perfc_incr(vmx_mov_from_dbr); |
---|
1541 | status=vmx_emul_mov_from_dbr(vcpu, inst); |
---|
1542 | break; |
---|
1543 | case EVENT_MOV_FROM_IBR: |
---|
1544 | perfc_incr(vmx_mov_from_ibr); |
---|
1545 | status=vmx_emul_mov_from_ibr(vcpu, inst); |
---|
1546 | break; |
---|
1547 | case EVENT_MOV_FROM_PMC: |
---|
1548 | perfc_incr(vmx_mov_from_pmc); |
---|
1549 | status=vmx_emul_mov_from_pmc(vcpu, inst); |
---|
1550 | break; |
---|
1551 | case EVENT_MOV_FROM_PKR: |
---|
1552 | perfc_incr(vmx_mov_from_pkr); |
---|
1553 | status=vmx_emul_mov_from_pkr(vcpu, inst); |
---|
1554 | break; |
---|
1555 | case EVENT_MOV_FROM_CPUID: |
---|
1556 | perfc_incr(vmx_mov_from_cpuid); |
---|
1557 | status=vmx_emul_mov_from_cpuid(vcpu, inst); |
---|
1558 | break; |
---|
1559 | case EVENT_VMSW: |
---|
1560 | printk ("Unimplemented instruction %ld\n", cause); |
---|
1561 | status=IA64_FAULT; |
---|
1562 | break; |
---|
1563 | default: |
---|
1564 | panic_domain(regs,"unknown cause %ld, iip: %lx, ipsr: %lx\n", cause,regs->cr_iip,regs->cr_ipsr); |
---|
1565 | break; |
---|
1566 | }; |
---|
1567 | |
---|
1568 | #if 0 |
---|
1569 | if (status == IA64_FAULT) |
---|
1570 | panic("Emulation failed with cause %d:\n", cause); |
---|
1571 | #endif |
---|
1572 | |
---|
1573 | if ( status == IA64_NO_FAULT && cause !=EVENT_RFI ) { |
---|
1574 | vcpu_increment_iip(vcpu); |
---|
1575 | } |
---|
1576 | |
---|
1577 | recover_if_physical_mode(vcpu); |
---|
1578 | return; |
---|
1579 | |
---|
1580 | } |
---|
1581 | |
---|