1 | /* |
---|
2 | * Privileged operation "API" handling functions. |
---|
3 | * |
---|
4 | * Copyright (C) 2004 Hewlett-Packard Co. |
---|
5 | * Dan Magenheimer (dan.magenheimer@hp.com) |
---|
6 | * |
---|
7 | */ |
---|
8 | |
---|
9 | #include <asm/privop.h> |
---|
10 | #include <asm/vcpu.h> |
---|
11 | #include <asm/processor.h> |
---|
12 | #include <asm/delay.h> // Debug only |
---|
13 | #include <asm/dom_fw.h> |
---|
14 | #include <asm/vhpt.h> |
---|
15 | #include <asm/bundle.h> |
---|
16 | #include <xen/perfc.h> |
---|
17 | |
---|
18 | long priv_verbose = 0; |
---|
19 | unsigned long privop_trace = 0; |
---|
20 | |
---|
21 | /* Set to 1 to handle privified instructions from the privify tool. */ |
---|
22 | #ifndef CONFIG_PRIVIFY |
---|
23 | static const int privify_en = 0; |
---|
24 | #else |
---|
25 | static const int privify_en = 1; |
---|
26 | #endif |
---|
27 | |
---|
28 | /************************************************************************** |
---|
29 | Privileged operation emulation routines |
---|
30 | **************************************************************************/ |
---|
31 | |
---|
32 | static IA64FAULT priv_rfi(VCPU * vcpu, INST64 inst) |
---|
33 | { |
---|
34 | return vcpu_rfi(vcpu); |
---|
35 | } |
---|
36 | |
---|
37 | static IA64FAULT priv_bsw0(VCPU * vcpu, INST64 inst) |
---|
38 | { |
---|
39 | return vcpu_bsw0(vcpu); |
---|
40 | } |
---|
41 | |
---|
42 | static IA64FAULT priv_bsw1(VCPU * vcpu, INST64 inst) |
---|
43 | { |
---|
44 | return vcpu_bsw1(vcpu); |
---|
45 | } |
---|
46 | |
---|
47 | static IA64FAULT priv_cover(VCPU * vcpu, INST64 inst) |
---|
48 | { |
---|
49 | return vcpu_cover(vcpu); |
---|
50 | } |
---|
51 | |
---|
52 | static IA64FAULT priv_ptc_l(VCPU * vcpu, INST64 inst) |
---|
53 | { |
---|
54 | u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3); |
---|
55 | u64 log_range; |
---|
56 | |
---|
57 | log_range = ((vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2); |
---|
58 | return vcpu_ptc_l(vcpu, vadr, log_range); |
---|
59 | } |
---|
60 | |
---|
61 | static IA64FAULT priv_ptc_e(VCPU * vcpu, INST64 inst) |
---|
62 | { |
---|
63 | unsigned int src = inst.M28.r3; |
---|
64 | |
---|
65 | // NOTE: ptc_e with source gr > 63 is emulated as a fc r(y-64) |
---|
66 | if (privify_en && src > 63) |
---|
67 | return vcpu_fc(vcpu, vcpu_get_gr(vcpu, src - 64)); |
---|
68 | return vcpu_ptc_e(vcpu, vcpu_get_gr(vcpu, src)); |
---|
69 | } |
---|
70 | |
---|
71 | static IA64FAULT priv_ptc_g(VCPU * vcpu, INST64 inst) |
---|
72 | { |
---|
73 | u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3); |
---|
74 | u64 addr_range; |
---|
75 | |
---|
76 | addr_range = 1 << ((vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2); |
---|
77 | return vcpu_ptc_g(vcpu, vadr, addr_range); |
---|
78 | } |
---|
79 | |
---|
80 | static IA64FAULT priv_ptc_ga(VCPU * vcpu, INST64 inst) |
---|
81 | { |
---|
82 | u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3); |
---|
83 | u64 addr_range; |
---|
84 | |
---|
85 | addr_range = 1 << ((vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2); |
---|
86 | return vcpu_ptc_ga(vcpu, vadr, addr_range); |
---|
87 | } |
---|
88 | |
---|
89 | static IA64FAULT priv_ptr_d(VCPU * vcpu, INST64 inst) |
---|
90 | { |
---|
91 | u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3); |
---|
92 | u64 log_range; |
---|
93 | |
---|
94 | log_range = (vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2; |
---|
95 | return vcpu_ptr_d(vcpu, vadr, log_range); |
---|
96 | } |
---|
97 | |
---|
98 | static IA64FAULT priv_ptr_i(VCPU * vcpu, INST64 inst) |
---|
99 | { |
---|
100 | u64 vadr = vcpu_get_gr(vcpu, inst.M45.r3); |
---|
101 | u64 log_range; |
---|
102 | |
---|
103 | log_range = (vcpu_get_gr(vcpu, inst.M45.r2) & 0xfc) >> 2; |
---|
104 | return vcpu_ptr_i(vcpu, vadr, log_range); |
---|
105 | } |
---|
106 | |
---|
107 | static IA64FAULT priv_tpa(VCPU * vcpu, INST64 inst) |
---|
108 | { |
---|
109 | u64 padr; |
---|
110 | unsigned int fault; |
---|
111 | unsigned int src = inst.M46.r3; |
---|
112 | |
---|
113 | // NOTE: tpa with source gr > 63 is emulated as a ttag rx=r(y-64) |
---|
114 | if (privify_en && src > 63) |
---|
115 | fault = vcpu_ttag(vcpu, vcpu_get_gr(vcpu, src - 64), &padr); |
---|
116 | else |
---|
117 | fault = vcpu_tpa(vcpu, vcpu_get_gr(vcpu, src), &padr); |
---|
118 | if (fault == IA64_NO_FAULT) |
---|
119 | return vcpu_set_gr(vcpu, inst.M46.r1, padr, 0); |
---|
120 | else |
---|
121 | return fault; |
---|
122 | } |
---|
123 | |
---|
124 | static IA64FAULT priv_tak(VCPU * vcpu, INST64 inst) |
---|
125 | { |
---|
126 | u64 key; |
---|
127 | unsigned int fault; |
---|
128 | unsigned int src = inst.M46.r3; |
---|
129 | |
---|
130 | // NOTE: tak with source gr > 63 is emulated as a thash rx=r(y-64) |
---|
131 | if (privify_en && src > 63) |
---|
132 | fault = vcpu_thash(vcpu, vcpu_get_gr(vcpu, src - 64), &key); |
---|
133 | else |
---|
134 | fault = vcpu_tak(vcpu, vcpu_get_gr(vcpu, src), &key); |
---|
135 | if (fault == IA64_NO_FAULT) |
---|
136 | return vcpu_set_gr(vcpu, inst.M46.r1, key, 0); |
---|
137 | else |
---|
138 | return fault; |
---|
139 | } |
---|
140 | |
---|
141 | /************************************ |
---|
142 | * Insert translation register/cache |
---|
143 | ************************************/ |
---|
144 | |
---|
145 | static IA64FAULT priv_itr_d(VCPU * vcpu, INST64 inst) |
---|
146 | { |
---|
147 | u64 fault, itir, ifa, pte, slot; |
---|
148 | |
---|
149 | //if (!vcpu_get_psr_ic(vcpu)) |
---|
150 | // return IA64_ILLOP_FAULT; |
---|
151 | fault = vcpu_get_itir(vcpu, &itir); |
---|
152 | if (fault != IA64_NO_FAULT) |
---|
153 | return IA64_ILLOP_FAULT; |
---|
154 | fault = vcpu_get_ifa(vcpu, &ifa); |
---|
155 | if (fault != IA64_NO_FAULT) |
---|
156 | return IA64_ILLOP_FAULT; |
---|
157 | pte = vcpu_get_gr(vcpu, inst.M42.r2); |
---|
158 | slot = vcpu_get_gr(vcpu, inst.M42.r3); |
---|
159 | |
---|
160 | return vcpu_itr_d(vcpu, slot, pte, itir, ifa); |
---|
161 | } |
---|
162 | |
---|
163 | static IA64FAULT priv_itr_i(VCPU * vcpu, INST64 inst) |
---|
164 | { |
---|
165 | u64 fault, itir, ifa, pte, slot; |
---|
166 | |
---|
167 | //if (!vcpu_get_psr_ic(vcpu)) return IA64_ILLOP_FAULT; |
---|
168 | fault = vcpu_get_itir(vcpu, &itir); |
---|
169 | if (fault != IA64_NO_FAULT) |
---|
170 | return IA64_ILLOP_FAULT; |
---|
171 | fault = vcpu_get_ifa(vcpu, &ifa); |
---|
172 | if (fault != IA64_NO_FAULT) |
---|
173 | return IA64_ILLOP_FAULT; |
---|
174 | pte = vcpu_get_gr(vcpu, inst.M42.r2); |
---|
175 | slot = vcpu_get_gr(vcpu, inst.M42.r3); |
---|
176 | |
---|
177 | return vcpu_itr_i(vcpu, slot, pte, itir, ifa); |
---|
178 | } |
---|
179 | |
---|
180 | static IA64FAULT priv_itc_d(VCPU * vcpu, INST64 inst) |
---|
181 | { |
---|
182 | u64 fault, itir, ifa, pte; |
---|
183 | |
---|
184 | //if (!vcpu_get_psr_ic(vcpu)) return IA64_ILLOP_FAULT; |
---|
185 | fault = vcpu_get_itir(vcpu, &itir); |
---|
186 | if (fault != IA64_NO_FAULT) |
---|
187 | return IA64_ILLOP_FAULT; |
---|
188 | fault = vcpu_get_ifa(vcpu, &ifa); |
---|
189 | if (fault != IA64_NO_FAULT) |
---|
190 | return IA64_ILLOP_FAULT; |
---|
191 | pte = vcpu_get_gr(vcpu, inst.M41.r2); |
---|
192 | |
---|
193 | return vcpu_itc_d(vcpu, pte, itir, ifa); |
---|
194 | } |
---|
195 | |
---|
196 | static IA64FAULT priv_itc_i(VCPU * vcpu, INST64 inst) |
---|
197 | { |
---|
198 | u64 fault, itir, ifa, pte; |
---|
199 | |
---|
200 | //if (!vcpu_get_psr_ic(vcpu)) return IA64_ILLOP_FAULT; |
---|
201 | fault = vcpu_get_itir(vcpu, &itir); |
---|
202 | if (fault != IA64_NO_FAULT) |
---|
203 | return IA64_ILLOP_FAULT; |
---|
204 | fault = vcpu_get_ifa(vcpu, &ifa); |
---|
205 | if (fault != IA64_NO_FAULT) |
---|
206 | return IA64_ILLOP_FAULT; |
---|
207 | pte = vcpu_get_gr(vcpu, inst.M41.r2); |
---|
208 | |
---|
209 | return vcpu_itc_i(vcpu, pte, itir, ifa); |
---|
210 | } |
---|
211 | |
---|
212 | /************************************* |
---|
213 | * Moves to semi-privileged registers |
---|
214 | *************************************/ |
---|
215 | |
---|
216 | static IA64FAULT priv_mov_to_ar_imm(VCPU * vcpu, INST64 inst) |
---|
217 | { |
---|
218 | // I27 and M30 are identical for these fields |
---|
219 | u64 ar3 = inst.M30.ar3; |
---|
220 | u64 imm = vcpu_get_gr(vcpu, inst.M30.imm); |
---|
221 | return vcpu_set_ar(vcpu, ar3, imm); |
---|
222 | } |
---|
223 | |
---|
224 | static IA64FAULT priv_mov_to_ar_reg(VCPU * vcpu, INST64 inst) |
---|
225 | { |
---|
226 | // I26 and M29 are identical for these fields |
---|
227 | u64 ar3 = inst.M29.ar3; |
---|
228 | |
---|
229 | if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8) { |
---|
230 | // privified mov from kr |
---|
231 | u64 val; |
---|
232 | if (vcpu_get_ar(vcpu, ar3, &val) != IA64_ILLOP_FAULT) |
---|
233 | return vcpu_set_gr(vcpu, inst.M29.r2 - 64, val, 0); |
---|
234 | else |
---|
235 | return IA64_ILLOP_FAULT; |
---|
236 | } else { |
---|
237 | u64 r2 = vcpu_get_gr(vcpu, inst.M29.r2); |
---|
238 | return vcpu_set_ar(vcpu, ar3, r2); |
---|
239 | } |
---|
240 | } |
---|
241 | |
---|
242 | /******************************** |
---|
243 | * Moves to privileged registers |
---|
244 | ********************************/ |
---|
245 | |
---|
246 | static IA64FAULT priv_mov_to_pkr(VCPU * vcpu, INST64 inst) |
---|
247 | { |
---|
248 | u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3); |
---|
249 | u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2); |
---|
250 | return vcpu_set_pkr(vcpu, r3, r2); |
---|
251 | } |
---|
252 | |
---|
253 | static IA64FAULT priv_mov_to_rr(VCPU * vcpu, INST64 inst) |
---|
254 | { |
---|
255 | u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3); |
---|
256 | u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2); |
---|
257 | return vcpu_set_rr(vcpu, r3, r2); |
---|
258 | } |
---|
259 | |
---|
260 | static IA64FAULT priv_mov_to_dbr(VCPU * vcpu, INST64 inst) |
---|
261 | { |
---|
262 | u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3); |
---|
263 | u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2); |
---|
264 | return vcpu_set_dbr(vcpu, r3, r2); |
---|
265 | } |
---|
266 | |
---|
267 | static IA64FAULT priv_mov_to_ibr(VCPU * vcpu, INST64 inst) |
---|
268 | { |
---|
269 | u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3); |
---|
270 | u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2); |
---|
271 | return vcpu_set_ibr(vcpu, r3, r2); |
---|
272 | } |
---|
273 | |
---|
274 | static IA64FAULT priv_mov_to_pmc(VCPU * vcpu, INST64 inst) |
---|
275 | { |
---|
276 | u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3); |
---|
277 | u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2); |
---|
278 | return vcpu_set_pmc(vcpu, r3, r2); |
---|
279 | } |
---|
280 | |
---|
281 | static IA64FAULT priv_mov_to_pmd(VCPU * vcpu, INST64 inst) |
---|
282 | { |
---|
283 | u64 r3 = vcpu_get_gr(vcpu, inst.M42.r3); |
---|
284 | u64 r2 = vcpu_get_gr(vcpu, inst.M42.r2); |
---|
285 | return vcpu_set_pmd(vcpu, r3, r2); |
---|
286 | } |
---|
287 | |
---|
288 | static IA64FAULT priv_mov_to_cr(VCPU * vcpu, INST64 inst) |
---|
289 | { |
---|
290 | u64 val = vcpu_get_gr(vcpu, inst.M32.r2); |
---|
291 | perfc_incra(mov_to_cr, inst.M32.cr3); |
---|
292 | switch (inst.M32.cr3) { |
---|
293 | case 0: |
---|
294 | return vcpu_set_dcr(vcpu, val); |
---|
295 | case 1: |
---|
296 | return vcpu_set_itm(vcpu, val); |
---|
297 | case 2: |
---|
298 | return vcpu_set_iva(vcpu, val); |
---|
299 | case 8: |
---|
300 | return vcpu_set_pta(vcpu, val); |
---|
301 | case 16: |
---|
302 | return vcpu_set_ipsr(vcpu, val); |
---|
303 | case 17: |
---|
304 | return vcpu_set_isr(vcpu, val); |
---|
305 | case 19: |
---|
306 | return vcpu_set_iip(vcpu, val); |
---|
307 | case 20: |
---|
308 | return vcpu_set_ifa(vcpu, val); |
---|
309 | case 21: |
---|
310 | return vcpu_set_itir(vcpu, val); |
---|
311 | case 22: |
---|
312 | return vcpu_set_iipa(vcpu, val); |
---|
313 | case 23: |
---|
314 | return vcpu_set_ifs(vcpu, val); |
---|
315 | case 24: |
---|
316 | return vcpu_set_iim(vcpu, val); |
---|
317 | case 25: |
---|
318 | return vcpu_set_iha(vcpu, val); |
---|
319 | case 64: |
---|
320 | return vcpu_set_lid(vcpu, val); |
---|
321 | case 65: |
---|
322 | return IA64_ILLOP_FAULT; |
---|
323 | case 66: |
---|
324 | return vcpu_set_tpr(vcpu, val); |
---|
325 | case 67: |
---|
326 | return vcpu_set_eoi(vcpu, val); |
---|
327 | case 68: |
---|
328 | return IA64_ILLOP_FAULT; |
---|
329 | case 69: |
---|
330 | return IA64_ILLOP_FAULT; |
---|
331 | case 70: |
---|
332 | return IA64_ILLOP_FAULT; |
---|
333 | case 71: |
---|
334 | return IA64_ILLOP_FAULT; |
---|
335 | case 72: |
---|
336 | return vcpu_set_itv(vcpu, val); |
---|
337 | case 73: |
---|
338 | return vcpu_set_pmv(vcpu, val); |
---|
339 | case 74: |
---|
340 | return vcpu_set_cmcv(vcpu, val); |
---|
341 | case 80: |
---|
342 | return vcpu_set_lrr0(vcpu, val); |
---|
343 | case 81: |
---|
344 | return vcpu_set_lrr1(vcpu, val); |
---|
345 | default: |
---|
346 | return IA64_ILLOP_FAULT; |
---|
347 | } |
---|
348 | } |
---|
349 | |
---|
350 | static IA64FAULT priv_rsm(VCPU * vcpu, INST64 inst) |
---|
351 | { |
---|
352 | u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm; |
---|
353 | return vcpu_reset_psr_sm(vcpu, imm24); |
---|
354 | } |
---|
355 | |
---|
356 | static IA64FAULT priv_ssm(VCPU * vcpu, INST64 inst) |
---|
357 | { |
---|
358 | u64 imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21) | inst.M44.imm; |
---|
359 | return vcpu_set_psr_sm(vcpu, imm24); |
---|
360 | } |
---|
361 | |
---|
362 | /** |
---|
363 | * @todo Check for reserved bits and return IA64_RSVDREG_FAULT. |
---|
364 | */ |
---|
365 | static IA64FAULT priv_mov_to_psr(VCPU * vcpu, INST64 inst) |
---|
366 | { |
---|
367 | u64 val = vcpu_get_gr(vcpu, inst.M35.r2); |
---|
368 | return vcpu_set_psr_l(vcpu, val); |
---|
369 | } |
---|
370 | |
---|
371 | /********************************** |
---|
372 | * Moves from privileged registers |
---|
373 | **********************************/ |
---|
374 | |
---|
375 | static IA64FAULT priv_mov_from_rr(VCPU * vcpu, INST64 inst) |
---|
376 | { |
---|
377 | u64 val; |
---|
378 | IA64FAULT fault; |
---|
379 | u64 reg; |
---|
380 | |
---|
381 | reg = vcpu_get_gr(vcpu, inst.M43.r3); |
---|
382 | if (privify_en && inst.M43.r1 > 63) { |
---|
383 | // privified mov from cpuid |
---|
384 | fault = vcpu_get_cpuid(vcpu, reg, &val); |
---|
385 | if (fault == IA64_NO_FAULT) |
---|
386 | return vcpu_set_gr(vcpu, inst.M43.r1 - 64, val, 0); |
---|
387 | } else { |
---|
388 | fault = vcpu_get_rr(vcpu, reg, &val); |
---|
389 | if (fault == IA64_NO_FAULT) |
---|
390 | return vcpu_set_gr(vcpu, inst.M43.r1, val, 0); |
---|
391 | } |
---|
392 | return fault; |
---|
393 | } |
---|
394 | |
---|
395 | static IA64FAULT priv_mov_from_pkr(VCPU * vcpu, INST64 inst) |
---|
396 | { |
---|
397 | u64 val; |
---|
398 | IA64FAULT fault; |
---|
399 | |
---|
400 | fault = vcpu_get_pkr(vcpu, vcpu_get_gr(vcpu, inst.M43.r3), &val); |
---|
401 | if (fault == IA64_NO_FAULT) |
---|
402 | return vcpu_set_gr(vcpu, inst.M43.r1, val, 0); |
---|
403 | else |
---|
404 | return fault; |
---|
405 | } |
---|
406 | |
---|
407 | static IA64FAULT priv_mov_from_dbr(VCPU * vcpu, INST64 inst) |
---|
408 | { |
---|
409 | u64 val; |
---|
410 | IA64FAULT fault; |
---|
411 | |
---|
412 | fault = vcpu_get_dbr(vcpu, vcpu_get_gr(vcpu, inst.M43.r3), &val); |
---|
413 | if (fault == IA64_NO_FAULT) |
---|
414 | return vcpu_set_gr(vcpu, inst.M43.r1, val, 0); |
---|
415 | else |
---|
416 | return fault; |
---|
417 | } |
---|
418 | |
---|
419 | static IA64FAULT priv_mov_from_ibr(VCPU * vcpu, INST64 inst) |
---|
420 | { |
---|
421 | u64 val; |
---|
422 | IA64FAULT fault; |
---|
423 | |
---|
424 | fault = vcpu_get_ibr(vcpu, vcpu_get_gr(vcpu, inst.M43.r3), &val); |
---|
425 | if (fault == IA64_NO_FAULT) |
---|
426 | return vcpu_set_gr(vcpu, inst.M43.r1, val, 0); |
---|
427 | else |
---|
428 | return fault; |
---|
429 | } |
---|
430 | |
---|
431 | static IA64FAULT priv_mov_from_pmc(VCPU * vcpu, INST64 inst) |
---|
432 | { |
---|
433 | u64 val; |
---|
434 | IA64FAULT fault; |
---|
435 | u64 reg; |
---|
436 | |
---|
437 | reg = vcpu_get_gr(vcpu, inst.M43.r3); |
---|
438 | if (privify_en && inst.M43.r1 > 63) { |
---|
439 | // privified mov from pmd |
---|
440 | fault = vcpu_get_pmd(vcpu, reg, &val); |
---|
441 | if (fault == IA64_NO_FAULT) |
---|
442 | return vcpu_set_gr(vcpu, inst.M43.r1 - 64, val, 0); |
---|
443 | } else { |
---|
444 | fault = vcpu_get_pmc(vcpu, reg, &val); |
---|
445 | if (fault == IA64_NO_FAULT) |
---|
446 | return vcpu_set_gr(vcpu, inst.M43.r1, val, 0); |
---|
447 | } |
---|
448 | return fault; |
---|
449 | } |
---|
450 | |
---|
451 | #define cr_get(cr) \ |
---|
452 | ((fault = vcpu_get_##cr(vcpu,&val)) == IA64_NO_FAULT) ? \ |
---|
453 | vcpu_set_gr(vcpu, tgt, val, 0) : fault; |
---|
454 | |
---|
455 | static IA64FAULT priv_mov_from_cr(VCPU * vcpu, INST64 inst) |
---|
456 | { |
---|
457 | u64 tgt = inst.M33.r1; |
---|
458 | u64 val; |
---|
459 | IA64FAULT fault; |
---|
460 | |
---|
461 | perfc_incra(mov_from_cr, inst.M33.cr3); |
---|
462 | switch (inst.M33.cr3) { |
---|
463 | case 0: |
---|
464 | return cr_get(dcr); |
---|
465 | case 1: |
---|
466 | return cr_get(itm); |
---|
467 | case 2: |
---|
468 | return cr_get(iva); |
---|
469 | case 8: |
---|
470 | return cr_get(pta); |
---|
471 | case 16: |
---|
472 | return cr_get(ipsr); |
---|
473 | case 17: |
---|
474 | return cr_get(isr); |
---|
475 | case 19: |
---|
476 | return cr_get(iip); |
---|
477 | case 20: |
---|
478 | return cr_get(ifa); |
---|
479 | case 21: |
---|
480 | return cr_get(itir); |
---|
481 | case 22: |
---|
482 | return cr_get(iipa); |
---|
483 | case 23: |
---|
484 | return cr_get(ifs); |
---|
485 | case 24: |
---|
486 | return cr_get(iim); |
---|
487 | case 25: |
---|
488 | return cr_get(iha); |
---|
489 | case 64: |
---|
490 | return cr_get(lid); |
---|
491 | case 65: |
---|
492 | return cr_get(ivr); |
---|
493 | case 66: |
---|
494 | return cr_get(tpr); |
---|
495 | case 67: |
---|
496 | return vcpu_set_gr(vcpu, tgt, 0L, 0); |
---|
497 | case 68: |
---|
498 | return cr_get(irr0); |
---|
499 | case 69: |
---|
500 | return cr_get(irr1); |
---|
501 | case 70: |
---|
502 | return cr_get(irr2); |
---|
503 | case 71: |
---|
504 | return cr_get(irr3); |
---|
505 | case 72: |
---|
506 | return cr_get(itv); |
---|
507 | case 73: |
---|
508 | return cr_get(pmv); |
---|
509 | case 74: |
---|
510 | return cr_get(cmcv); |
---|
511 | case 80: |
---|
512 | return cr_get(lrr0); |
---|
513 | case 81: |
---|
514 | return cr_get(lrr1); |
---|
515 | default: |
---|
516 | return IA64_ILLOP_FAULT; |
---|
517 | } |
---|
518 | return IA64_ILLOP_FAULT; |
---|
519 | } |
---|
520 | |
---|
521 | static IA64FAULT priv_mov_from_psr(VCPU * vcpu, INST64 inst) |
---|
522 | { |
---|
523 | u64 tgt = inst.M33.r1; |
---|
524 | u64 val; |
---|
525 | IA64FAULT fault; |
---|
526 | |
---|
527 | fault = vcpu_get_psr(vcpu, &val); |
---|
528 | if (fault == IA64_NO_FAULT) |
---|
529 | return vcpu_set_gr(vcpu, tgt, val, 0); |
---|
530 | else |
---|
531 | return fault; |
---|
532 | } |
---|
533 | |
---|
534 | /************************************************************************** |
---|
535 | Privileged operation decode and dispatch routines |
---|
536 | **************************************************************************/ |
---|
537 | |
---|
538 | static const IA64_SLOT_TYPE slot_types[0x20][3] = { |
---|
539 | {M, I, I}, {M, I, I}, {M, I, I}, {M, I, I}, |
---|
540 | {M, I, ILLEGAL}, {M, I, ILLEGAL}, |
---|
541 | {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL}, |
---|
542 | {M, M, I}, {M, M, I}, {M, M, I}, {M, M, I}, |
---|
543 | {M, F, I}, {M, F, I}, |
---|
544 | {M, M, F}, {M, M, F}, |
---|
545 | {M, I, B}, {M, I, B}, |
---|
546 | {M, B, B}, {M, B, B}, |
---|
547 | {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL}, |
---|
548 | {B, B, B}, {B, B, B}, |
---|
549 | {M, M, B}, {M, M, B}, |
---|
550 | {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL}, |
---|
551 | {M, F, B}, {M, F, B}, |
---|
552 | {ILLEGAL, ILLEGAL, ILLEGAL}, {ILLEGAL, ILLEGAL, ILLEGAL} |
---|
553 | }; |
---|
554 | |
---|
555 | // pointer to privileged emulation function |
---|
556 | typedef IA64FAULT(*PPEFCN) (VCPU * vcpu, INST64 inst); |
---|
557 | |
---|
558 | static const PPEFCN Mpriv_funcs[64] = { |
---|
559 | priv_mov_to_rr, priv_mov_to_dbr, priv_mov_to_ibr, priv_mov_to_pkr, |
---|
560 | priv_mov_to_pmc, priv_mov_to_pmd, 0, 0, |
---|
561 | 0, priv_ptc_l, priv_ptc_g, priv_ptc_ga, |
---|
562 | priv_ptr_d, priv_ptr_i, priv_itr_d, priv_itr_i, |
---|
563 | priv_mov_from_rr, priv_mov_from_dbr, priv_mov_from_ibr, |
---|
564 | priv_mov_from_pkr, |
---|
565 | priv_mov_from_pmc, 0, 0, 0, |
---|
566 | 0, 0, 0, 0, |
---|
567 | 0, 0, priv_tpa, priv_tak, |
---|
568 | 0, 0, 0, 0, |
---|
569 | priv_mov_from_cr, priv_mov_from_psr, 0, 0, |
---|
570 | 0, 0, 0, 0, |
---|
571 | priv_mov_to_cr, priv_mov_to_psr, priv_itc_d, priv_itc_i, |
---|
572 | 0, 0, 0, 0, |
---|
573 | priv_ptc_e, 0, 0, 0, |
---|
574 | 0, 0, 0, 0, 0, 0, 0, 0 |
---|
575 | }; |
---|
576 | |
---|
577 | static IA64FAULT priv_handle_op(VCPU * vcpu, REGS * regs, int privlvl) |
---|
578 | { |
---|
579 | IA64_BUNDLE bundle; |
---|
580 | int slot; |
---|
581 | IA64_SLOT_TYPE slot_type; |
---|
582 | INST64 inst; |
---|
583 | PPEFCN pfunc; |
---|
584 | unsigned long ipsr = regs->cr_ipsr; |
---|
585 | u64 iip = regs->cr_iip; |
---|
586 | int x6; |
---|
587 | |
---|
588 | // make a local copy of the bundle containing the privop |
---|
589 | if (!vcpu_get_domain_bundle(vcpu, regs, iip, &bundle)) { |
---|
590 | //return vcpu_force_data_miss(vcpu, regs->cr_iip); |
---|
591 | return vcpu_force_inst_miss(vcpu, regs->cr_iip); |
---|
592 | } |
---|
593 | #if 0 |
---|
594 | if (iip == 0xa000000100001820) { |
---|
595 | static int firstpagefault = 1; |
---|
596 | if (firstpagefault) { |
---|
597 | printk("*** First time to domain page fault!\n"); |
---|
598 | firstpagefault = 0; |
---|
599 | } |
---|
600 | } |
---|
601 | #endif |
---|
602 | if (privop_trace) { |
---|
603 | static long i = 400; |
---|
604 | //if (i > 0) printk("priv_handle_op: at 0x%lx\n",iip); |
---|
605 | if (i > 0) |
---|
606 | printk("priv_handle_op: privop trace at 0x%lx, " |
---|
607 | "itc=%lx, itm=%lx\n", |
---|
608 | iip, ia64_get_itc(), ia64_get_itm()); |
---|
609 | i--; |
---|
610 | } |
---|
611 | slot = ((struct ia64_psr *)&ipsr)->ri; |
---|
612 | if (!slot) |
---|
613 | inst.inst = (bundle.i64[0] >> 5) & MASK_41; |
---|
614 | else if (slot == 1) |
---|
615 | inst.inst = |
---|
616 | ((bundle.i64[0] >> 46) | bundle.i64[1] << 18) & MASK_41; |
---|
617 | else if (slot == 2) |
---|
618 | inst.inst = (bundle.i64[1] >> 23) & MASK_41; |
---|
619 | else |
---|
620 | printk("priv_handle_op: illegal slot: %d\n", slot); |
---|
621 | |
---|
622 | slot_type = slot_types[bundle.template][slot]; |
---|
623 | if (priv_verbose) { |
---|
624 | printk("priv_handle_op: checking bundle at 0x%lx " |
---|
625 | "(op=0x%016lx) slot %d (type=%d)\n", |
---|
626 | iip, (u64) inst.inst, slot, slot_type); |
---|
627 | } |
---|
628 | if (slot_type == B && inst.generic.major == 0 && inst.B8.x6 == 0x0) { |
---|
629 | // break instr for privified cover |
---|
630 | } else if (privlvl != 2) |
---|
631 | return IA64_ILLOP_FAULT; |
---|
632 | switch (slot_type) { |
---|
633 | case M: |
---|
634 | if (inst.generic.major == 0) { |
---|
635 | #if 0 |
---|
636 | if (inst.M29.x6 == 0 && inst.M29.x3 == 0) { |
---|
637 | privcnt.cover++; |
---|
638 | return priv_cover(vcpu, inst); |
---|
639 | } |
---|
640 | #endif |
---|
641 | if (inst.M29.x3 != 0) |
---|
642 | break; |
---|
643 | if (inst.M30.x4 == 8 && inst.M30.x2 == 2) { |
---|
644 | perfc_incr(mov_to_ar_imm); |
---|
645 | return priv_mov_to_ar_imm(vcpu, inst); |
---|
646 | } |
---|
647 | if (inst.M44.x4 == 6) { |
---|
648 | perfc_incr(ssm); |
---|
649 | return priv_ssm(vcpu, inst); |
---|
650 | } |
---|
651 | if (inst.M44.x4 == 7) { |
---|
652 | perfc_incr(rsm); |
---|
653 | return priv_rsm(vcpu, inst); |
---|
654 | } |
---|
655 | break; |
---|
656 | } else if (inst.generic.major != 1) |
---|
657 | break; |
---|
658 | x6 = inst.M29.x6; |
---|
659 | if (x6 == 0x2a) { |
---|
660 | if (privify_en && inst.M29.r2 > 63 && inst.M29.ar3 < 8) |
---|
661 | perfc_incr(mov_from_ar); // privified mov from kr |
---|
662 | else |
---|
663 | perfc_incr(mov_to_ar_reg); |
---|
664 | return priv_mov_to_ar_reg(vcpu, inst); |
---|
665 | } |
---|
666 | if (inst.M29.x3 != 0) |
---|
667 | break; |
---|
668 | if (!(pfunc = Mpriv_funcs[x6])) |
---|
669 | break; |
---|
670 | if (x6 == 0x1e || x6 == 0x1f) { // tpa or tak are "special" |
---|
671 | if (privify_en && inst.M46.r3 > 63) { |
---|
672 | if (x6 == 0x1e) |
---|
673 | x6 = 0x1b; |
---|
674 | else |
---|
675 | x6 = 0x1a; |
---|
676 | } |
---|
677 | } |
---|
678 | if (privify_en && x6 == 52 && inst.M28.r3 > 63) |
---|
679 | perfc_incr(fc); |
---|
680 | else if (privify_en && x6 == 16 && inst.M43.r3 > 63) |
---|
681 | perfc_incr(cpuid); |
---|
682 | else |
---|
683 | perfc_incra(misc_privop, x6); |
---|
684 | return (*pfunc) (vcpu, inst); |
---|
685 | break; |
---|
686 | case B: |
---|
687 | if (inst.generic.major != 0) |
---|
688 | break; |
---|
689 | if (inst.B8.x6 == 0x08) { |
---|
690 | IA64FAULT fault; |
---|
691 | perfc_incr(rfi); |
---|
692 | fault = priv_rfi(vcpu, inst); |
---|
693 | if (fault == IA64_NO_FAULT) |
---|
694 | fault = IA64_RFI_IN_PROGRESS; |
---|
695 | return fault; |
---|
696 | } |
---|
697 | if (inst.B8.x6 == 0x0c) { |
---|
698 | perfc_incr(bsw0); |
---|
699 | return priv_bsw0(vcpu, inst); |
---|
700 | } |
---|
701 | if (inst.B8.x6 == 0x0d) { |
---|
702 | perfc_incr(bsw1); |
---|
703 | return priv_bsw1(vcpu, inst); |
---|
704 | } |
---|
705 | if (inst.B8.x6 == 0x0) { |
---|
706 | // break instr for privified cover |
---|
707 | perfc_incr(cover); |
---|
708 | return priv_cover(vcpu, inst); |
---|
709 | } |
---|
710 | break; |
---|
711 | case I: |
---|
712 | if (inst.generic.major != 0) |
---|
713 | break; |
---|
714 | #if 0 |
---|
715 | if (inst.I26.x6 == 0 && inst.I26.x3 == 0) { |
---|
716 | perfc_incr(cover); |
---|
717 | return priv_cover(vcpu, inst); |
---|
718 | } |
---|
719 | #endif |
---|
720 | if (inst.I26.x3 != 0) |
---|
721 | break; // I26.x3 == I27.x3 |
---|
722 | if (inst.I26.x6 == 0x2a) { |
---|
723 | if (privify_en && inst.I26.r2 > 63 && inst.I26.ar3 < 8) |
---|
724 | perfc_incr(mov_from_ar); // privified mov from kr |
---|
725 | else |
---|
726 | perfc_incr(mov_to_ar_reg); |
---|
727 | return priv_mov_to_ar_reg(vcpu, inst); |
---|
728 | } |
---|
729 | if (inst.I27.x6 == 0x0a) { |
---|
730 | perfc_incr(mov_to_ar_imm); |
---|
731 | return priv_mov_to_ar_imm(vcpu, inst); |
---|
732 | } |
---|
733 | break; |
---|
734 | default: |
---|
735 | break; |
---|
736 | } |
---|
737 | //printk("We who are about do die salute you\n"); |
---|
738 | printk("priv_handle_op: can't handle privop at 0x%lx (op=0x%016lx) " |
---|
739 | "slot %d (type=%d), ipsr=0x%lx\n", |
---|
740 | iip, (u64) inst.inst, slot, slot_type, ipsr); |
---|
741 | //printk("vtop(0x%lx)==0x%lx\n", iip, tr_vtop(iip)); |
---|
742 | //thread_mozambique("privop fault\n"); |
---|
743 | return IA64_ILLOP_FAULT; |
---|
744 | } |
---|
745 | |
---|
746 | /** Emulate a privileged operation. |
---|
747 | * |
---|
748 | * This should probably return 0 on success and the "trap number" |
---|
749 | * (e.g. illegal operation for bad register, priv op for an |
---|
750 | * instruction that isn't allowed, etc.) on "failure" |
---|
751 | * |
---|
752 | * @param vcpu virtual cpu |
---|
753 | * @param isrcode interrupt service routine code |
---|
754 | * @return fault |
---|
755 | */ |
---|
756 | IA64FAULT priv_emulate(VCPU * vcpu, REGS * regs, u64 isr) |
---|
757 | { |
---|
758 | IA64FAULT fault; |
---|
759 | u64 ipsr = regs->cr_ipsr; |
---|
760 | u64 isrcode = (isr >> 4) & 0xf; |
---|
761 | int privlvl; |
---|
762 | |
---|
763 | // handle privops masked as illops? and breaks (6) |
---|
764 | if (isrcode != 1 && isrcode != 2 && isrcode != 0 && isrcode != 6) { |
---|
765 | printk("priv_emulate: isrcode != 0 or 1 or 2\n"); |
---|
766 | printk("priv_emulate: returning ILLOP, not implemented!\n"); |
---|
767 | while (1) ; |
---|
768 | return IA64_ILLOP_FAULT; |
---|
769 | } |
---|
770 | //if (isrcode != 1 && isrcode != 2) return 0; |
---|
771 | privlvl = ia64_get_cpl(ipsr); |
---|
772 | // its OK for a privified-cover to be executed in user-land |
---|
773 | fault = priv_handle_op(vcpu, regs, privlvl); |
---|
774 | if ((fault == IA64_NO_FAULT) || (fault == IA64_EXTINT_VECTOR)) { |
---|
775 | // success!! |
---|
776 | // update iip/ipsr to point to the next instruction |
---|
777 | (void)vcpu_increment_iip(vcpu); |
---|
778 | } |
---|
779 | if (fault == IA64_ILLOP_FAULT) |
---|
780 | printk("priv_emulate: priv_handle_op fails, " |
---|
781 | "isr=0x%lx iip=%lx\n", isr, regs->cr_iip); |
---|
782 | return fault; |
---|
783 | } |
---|
784 | |
---|
785 | /* hyperprivops are generally executed in assembly (with physical psr.ic off) |
---|
786 | * so this code is primarily used for debugging them */ |
---|
787 | int ia64_hyperprivop(unsigned long iim, REGS * regs) |
---|
788 | { |
---|
789 | struct vcpu *v = current; |
---|
790 | u64 val; |
---|
791 | u64 itir, ifa; |
---|
792 | |
---|
793 | if (!iim || iim > HYPERPRIVOP_MAX) { |
---|
794 | panic_domain(regs, "bad hyperprivop: iim=%lx, iip=0x%lx\n", |
---|
795 | iim, regs->cr_iip); |
---|
796 | return 1; |
---|
797 | } |
---|
798 | perfc_incra(slow_hyperprivop, iim); |
---|
799 | switch (iim) { |
---|
800 | case HYPERPRIVOP_RFI: |
---|
801 | vcpu_rfi(v); |
---|
802 | return 0; // don't update iip |
---|
803 | case HYPERPRIVOP_RSM_DT: |
---|
804 | vcpu_reset_psr_dt(v); |
---|
805 | return 1; |
---|
806 | case HYPERPRIVOP_SSM_DT: |
---|
807 | vcpu_set_psr_dt(v); |
---|
808 | return 1; |
---|
809 | case HYPERPRIVOP_COVER: |
---|
810 | vcpu_cover(v); |
---|
811 | return 1; |
---|
812 | case HYPERPRIVOP_ITC_D: |
---|
813 | vcpu_get_itir(v, &itir); |
---|
814 | vcpu_get_ifa(v, &ifa); |
---|
815 | vcpu_itc_d(v, regs->r8, itir, ifa); |
---|
816 | return 1; |
---|
817 | case HYPERPRIVOP_ITC_I: |
---|
818 | vcpu_get_itir(v, &itir); |
---|
819 | vcpu_get_ifa(v, &ifa); |
---|
820 | vcpu_itc_i(v, regs->r8, itir, ifa); |
---|
821 | return 1; |
---|
822 | case HYPERPRIVOP_SSM_I: |
---|
823 | vcpu_set_psr_i(v); |
---|
824 | return 1; |
---|
825 | case HYPERPRIVOP_GET_IVR: |
---|
826 | vcpu_get_ivr(v, &val); |
---|
827 | regs->r8 = val; |
---|
828 | return 1; |
---|
829 | case HYPERPRIVOP_GET_TPR: |
---|
830 | vcpu_get_tpr(v, &val); |
---|
831 | regs->r8 = val; |
---|
832 | return 1; |
---|
833 | case HYPERPRIVOP_SET_TPR: |
---|
834 | vcpu_set_tpr(v, regs->r8); |
---|
835 | return 1; |
---|
836 | case HYPERPRIVOP_EOI: |
---|
837 | vcpu_set_eoi(v, 0L); |
---|
838 | return 1; |
---|
839 | case HYPERPRIVOP_SET_ITM: |
---|
840 | vcpu_set_itm(v, regs->r8); |
---|
841 | return 1; |
---|
842 | case HYPERPRIVOP_THASH: |
---|
843 | vcpu_thash(v, regs->r8, &val); |
---|
844 | regs->r8 = val; |
---|
845 | return 1; |
---|
846 | case HYPERPRIVOP_PTC_GA: |
---|
847 | vcpu_ptc_ga(v, regs->r8, (1L << ((regs->r9 & 0xfc) >> 2))); |
---|
848 | return 1; |
---|
849 | case HYPERPRIVOP_ITR_D: |
---|
850 | vcpu_get_itir(v, &itir); |
---|
851 | vcpu_get_ifa(v, &ifa); |
---|
852 | vcpu_itr_d(v, regs->r8, regs->r9, itir, ifa); |
---|
853 | return 1; |
---|
854 | case HYPERPRIVOP_GET_RR: |
---|
855 | vcpu_get_rr(v, regs->r8, &val); |
---|
856 | regs->r8 = val; |
---|
857 | return 1; |
---|
858 | case HYPERPRIVOP_SET_RR: |
---|
859 | vcpu_set_rr(v, regs->r8, regs->r9); |
---|
860 | return 1; |
---|
861 | case HYPERPRIVOP_SET_KR: |
---|
862 | vcpu_set_ar(v, regs->r8, regs->r9); |
---|
863 | return 1; |
---|
864 | case HYPERPRIVOP_FC: |
---|
865 | vcpu_fc(v, regs->r8); |
---|
866 | return 1; |
---|
867 | case HYPERPRIVOP_GET_CPUID: |
---|
868 | vcpu_get_cpuid(v, regs->r8, &val); |
---|
869 | regs->r8 = val; |
---|
870 | return 1; |
---|
871 | case HYPERPRIVOP_GET_PMD: |
---|
872 | vcpu_get_pmd(v, regs->r8, &val); |
---|
873 | regs->r8 = val; |
---|
874 | return 1; |
---|
875 | case HYPERPRIVOP_GET_EFLAG: |
---|
876 | vcpu_get_ar(v, 24, &val); |
---|
877 | regs->r8 = val; |
---|
878 | return 1; |
---|
879 | case HYPERPRIVOP_SET_EFLAG: |
---|
880 | vcpu_set_ar(v, 24, regs->r8); |
---|
881 | return 1; |
---|
882 | case HYPERPRIVOP_RSM_BE: |
---|
883 | vcpu_reset_psr_sm(v, IA64_PSR_BE); |
---|
884 | return 1; |
---|
885 | case HYPERPRIVOP_GET_PSR: |
---|
886 | vcpu_get_psr(v, &val); |
---|
887 | regs->r8 = val; |
---|
888 | return 1; |
---|
889 | } |
---|
890 | return 0; |
---|
891 | } |
---|