1 | /* |
---|
2 | * Miscellaneous process/domain related routines |
---|
3 | * |
---|
4 | * Copyright (C) 2004 Hewlett-Packard Co. |
---|
5 | * Dan Magenheimer (dan.magenheimer@hp.com) |
---|
6 | * |
---|
7 | */ |
---|
8 | |
---|
9 | #include <xen/config.h> |
---|
10 | #include <xen/lib.h> |
---|
11 | #include <xen/errno.h> |
---|
12 | #include <xen/sched.h> |
---|
13 | #include <xen/smp.h> |
---|
14 | #include <asm/ptrace.h> |
---|
15 | #include <xen/delay.h> |
---|
16 | #include <xen/perfc.h> |
---|
17 | #include <xen/mm.h> |
---|
18 | |
---|
19 | #include <asm/system.h> |
---|
20 | #include <asm/processor.h> |
---|
21 | #include <xen/irq.h> |
---|
22 | #include <xen/event.h> |
---|
23 | #include <asm/privop.h> |
---|
24 | #include <asm/vcpu.h> |
---|
25 | #include <asm/ia64_int.h> |
---|
26 | #include <asm/dom_fw.h> |
---|
27 | #include <asm/vhpt.h> |
---|
28 | #include <asm/debugger.h> |
---|
29 | #include <asm/fpswa.h> |
---|
30 | #include <asm/bundle.h> |
---|
31 | #include <asm/asm-xsi-offsets.h> |
---|
32 | #include <asm/shadow.h> |
---|
33 | #include <asm/uaccess.h> |
---|
34 | #include <asm/p2m_entry.h> |
---|
35 | |
---|
36 | extern void die_if_kernel(char *str, struct pt_regs *regs, long err); |
---|
37 | /* FIXME: where these declarations shold be there ? */ |
---|
38 | extern int ia64_hyperprivop(unsigned long, REGS *); |
---|
39 | extern IA64FAULT ia64_hypercall(struct pt_regs *regs); |
---|
40 | |
---|
41 | #define IA64_PSR_CPL1 (__IA64_UL(1) << IA64_PSR_CPL1_BIT) |
---|
42 | // note IA64_PSR_PK removed from following, why is this necessary? |
---|
43 | #define DELIVER_PSR_SET (IA64_PSR_IC | IA64_PSR_I | \ |
---|
44 | IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_CPL1 | \ |
---|
45 | IA64_PSR_IT | IA64_PSR_BN) |
---|
46 | |
---|
47 | #define DELIVER_PSR_CLR (IA64_PSR_AC | IA64_PSR_DFL | IA64_PSR_DFH | \ |
---|
48 | IA64_PSR_SP | IA64_PSR_DI | IA64_PSR_SI | \ |
---|
49 | IA64_PSR_DB | IA64_PSR_LP | IA64_PSR_TB | \ |
---|
50 | IA64_PSR_CPL| IA64_PSR_MC | IA64_PSR_IS | \ |
---|
51 | IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD | \ |
---|
52 | IA64_PSR_SS | IA64_PSR_RI | IA64_PSR_ED | IA64_PSR_IA) |
---|
53 | |
---|
54 | extern void do_ssc(unsigned long ssc, struct pt_regs *regs); |
---|
55 | |
---|
56 | // should never panic domain... if it does, stack may have been overrun |
---|
57 | void check_bad_nested_interruption(unsigned long isr, struct pt_regs *regs, |
---|
58 | unsigned long vector) |
---|
59 | { |
---|
60 | struct vcpu *v = current; |
---|
61 | |
---|
62 | if (!(PSCB(v, ipsr) & IA64_PSR_DT)) { |
---|
63 | panic_domain(regs, |
---|
64 | "psr.dt off, trying to deliver nested dtlb!\n"); |
---|
65 | } |
---|
66 | vector &= ~0xf; |
---|
67 | if (vector != IA64_DATA_TLB_VECTOR && |
---|
68 | vector != IA64_ALT_DATA_TLB_VECTOR && |
---|
69 | vector != IA64_VHPT_TRANS_VECTOR) { |
---|
70 | panic_domain(regs, "psr.ic off, delivering fault=%lx," |
---|
71 | "ipsr=%lx,iip=%lx,ifa=%lx,isr=%lx,PSCB.iip=%lx\n", |
---|
72 | vector, regs->cr_ipsr, regs->cr_iip, PSCB(v, ifa), |
---|
73 | isr, PSCB(v, iip)); |
---|
74 | } |
---|
75 | } |
---|
76 | |
---|
77 | void reflect_interruption(unsigned long isr, struct pt_regs *regs, |
---|
78 | unsigned long vector) |
---|
79 | { |
---|
80 | struct vcpu *v = current; |
---|
81 | |
---|
82 | if (!PSCB(v, interrupt_collection_enabled)) |
---|
83 | check_bad_nested_interruption(isr, regs, vector); |
---|
84 | PSCB(v, unat) = regs->ar_unat; // not sure if this is really needed? |
---|
85 | PSCB(v, precover_ifs) = regs->cr_ifs; |
---|
86 | PSCB(v, ipsr) = vcpu_get_ipsr_int_state(v, regs->cr_ipsr); |
---|
87 | vcpu_bsw0(v); |
---|
88 | PSCB(v, isr) = isr; |
---|
89 | PSCB(v, iip) = regs->cr_iip; |
---|
90 | PSCB(v, ifs) = 0; |
---|
91 | |
---|
92 | regs->cr_iip = ((unsigned long)PSCBX(v, iva) + vector) & ~0xffUL; |
---|
93 | regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET; |
---|
94 | if (PSCB(v, dcr) & IA64_DCR_BE) |
---|
95 | regs->cr_ipsr |= IA64_PSR_BE; |
---|
96 | |
---|
97 | if (PSCB(v, hpsr_dfh)) |
---|
98 | regs->cr_ipsr |= IA64_PSR_DFH; |
---|
99 | PSCB(v, vpsr_dfh) = 0; |
---|
100 | v->vcpu_info->evtchn_upcall_mask = 1; |
---|
101 | PSCB(v, interrupt_collection_enabled) = 0; |
---|
102 | |
---|
103 | perfc_incra(slow_reflect, vector >> 8); |
---|
104 | } |
---|
105 | |
---|
106 | static unsigned long pending_false_positive = 0; |
---|
107 | |
---|
108 | void reflect_extint(struct pt_regs *regs) |
---|
109 | { |
---|
110 | unsigned long isr = regs->cr_ipsr & IA64_PSR_RI; |
---|
111 | struct vcpu *v = current; |
---|
112 | static int first_extint = 1; |
---|
113 | |
---|
114 | if (first_extint) { |
---|
115 | printk("Delivering first extint to domain: isr=0x%lx, " |
---|
116 | "iip=0x%lx\n", isr, regs->cr_iip); |
---|
117 | first_extint = 0; |
---|
118 | } |
---|
119 | if (vcpu_timer_pending_early(v)) |
---|
120 | printk("*#*#*#* about to deliver early timer to domain %d!!\n", |
---|
121 | v->domain->domain_id); |
---|
122 | PSCB(current, itir) = 0; |
---|
123 | reflect_interruption(isr, regs, IA64_EXTINT_VECTOR); |
---|
124 | } |
---|
125 | |
---|
126 | void reflect_event(void) |
---|
127 | { |
---|
128 | struct vcpu *v = current; |
---|
129 | struct pt_regs *regs; |
---|
130 | unsigned long isr; |
---|
131 | |
---|
132 | if (!event_pending(v)) |
---|
133 | return; |
---|
134 | |
---|
135 | /* Sanity check */ |
---|
136 | if (is_idle_vcpu(v)) { |
---|
137 | //printk("WARN: invocation to reflect_event in nested xen\n"); |
---|
138 | return; |
---|
139 | } |
---|
140 | |
---|
141 | regs = vcpu_regs(v); |
---|
142 | |
---|
143 | isr = regs->cr_ipsr & IA64_PSR_RI; |
---|
144 | |
---|
145 | if (!PSCB(v, interrupt_collection_enabled)) |
---|
146 | printk("psr.ic off, delivering event, ipsr=%lx,iip=%lx," |
---|
147 | "isr=%lx,viip=0x%lx\n", |
---|
148 | regs->cr_ipsr, regs->cr_iip, isr, PSCB(v, iip)); |
---|
149 | PSCB(v, unat) = regs->ar_unat; // not sure if this is really needed? |
---|
150 | PSCB(v, precover_ifs) = regs->cr_ifs; |
---|
151 | PSCB(v, ipsr) = vcpu_get_ipsr_int_state(v, regs->cr_ipsr); |
---|
152 | vcpu_bsw0(v); |
---|
153 | PSCB(v, isr) = isr; |
---|
154 | PSCB(v, iip) = regs->cr_iip; |
---|
155 | PSCB(v, ifs) = 0; |
---|
156 | |
---|
157 | regs->cr_iip = v->arch.event_callback_ip; |
---|
158 | regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET; |
---|
159 | if (PSCB(v, dcr) & IA64_DCR_BE) |
---|
160 | regs->cr_ipsr |= IA64_PSR_BE; |
---|
161 | |
---|
162 | if (PSCB(v, hpsr_dfh)) |
---|
163 | regs->cr_ipsr |= IA64_PSR_DFH; |
---|
164 | PSCB(v, vpsr_dfh) = 0; |
---|
165 | v->vcpu_info->evtchn_upcall_mask = 1; |
---|
166 | PSCB(v, interrupt_collection_enabled) = 0; |
---|
167 | } |
---|
168 | |
---|
169 | // ONLY gets called from ia64_leave_kernel |
---|
170 | // ONLY call with interrupts disabled?? (else might miss one?) |
---|
171 | // NEVER successful if already reflecting a trap/fault because psr.i==0 |
---|
172 | void deliver_pending_interrupt(struct pt_regs *regs) |
---|
173 | { |
---|
174 | struct domain *d = current->domain; |
---|
175 | struct vcpu *v = current; |
---|
176 | // FIXME: Will this work properly if doing an RFI??? |
---|
177 | if (!is_idle_domain(d) && user_mode(regs)) { |
---|
178 | if (vcpu_deliverable_interrupts(v)) |
---|
179 | reflect_extint(regs); |
---|
180 | else if (PSCB(v, pending_interruption)) |
---|
181 | ++pending_false_positive; |
---|
182 | } |
---|
183 | } |
---|
184 | |
---|
185 | static int handle_lazy_cover(struct vcpu *v, struct pt_regs *regs) |
---|
186 | { |
---|
187 | if (!PSCB(v, interrupt_collection_enabled)) { |
---|
188 | PSCB(v, ifs) = regs->cr_ifs; |
---|
189 | regs->cr_ifs = 0; |
---|
190 | perfc_incr(lazy_cover); |
---|
191 | return 1; // retry same instruction with cr.ifs off |
---|
192 | } |
---|
193 | return 0; |
---|
194 | } |
---|
195 | |
---|
196 | void ia64_do_page_fault(unsigned long address, unsigned long isr, |
---|
197 | struct pt_regs *regs, unsigned long itir) |
---|
198 | { |
---|
199 | unsigned long iip = regs->cr_iip, iha; |
---|
200 | // FIXME should validate address here |
---|
201 | unsigned long pteval; |
---|
202 | unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL); |
---|
203 | IA64FAULT fault; |
---|
204 | int is_ptc_l_needed = 0; |
---|
205 | u64 logps; |
---|
206 | |
---|
207 | if ((isr & IA64_ISR_SP) |
---|
208 | || ((isr & IA64_ISR_NA) |
---|
209 | && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) { |
---|
210 | /* |
---|
211 | * This fault was due to a speculative load or lfetch.fault, |
---|
212 | * set the "ed" bit in the psr to ensure forward progress. |
---|
213 | * (Target register will get a NaT for ld.s, lfetch will be |
---|
214 | * canceled.) |
---|
215 | */ |
---|
216 | ia64_psr(regs)->ed = 1; |
---|
217 | return; |
---|
218 | } |
---|
219 | |
---|
220 | again: |
---|
221 | fault = vcpu_translate(current, address, is_data, &pteval, |
---|
222 | &itir, &iha); |
---|
223 | if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) { |
---|
224 | struct p2m_entry entry; |
---|
225 | unsigned long m_pteval; |
---|
226 | m_pteval = translate_domain_pte(pteval, address, itir, |
---|
227 | &logps, &entry); |
---|
228 | vcpu_itc_no_srlz(current, is_data ? 2 : 1, address, |
---|
229 | m_pteval, pteval, logps, &entry); |
---|
230 | if ((fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) || |
---|
231 | p2m_entry_retry(&entry)) { |
---|
232 | /* dtlb has been purged in-between. This dtlb was |
---|
233 | matching. Undo the work. */ |
---|
234 | vcpu_flush_tlb_vhpt_range(address, logps); |
---|
235 | |
---|
236 | // the stale entry which we inserted above |
---|
237 | // may remains in tlb cache. |
---|
238 | // we don't purge it now hoping next itc purges it. |
---|
239 | is_ptc_l_needed = 1; |
---|
240 | goto again; |
---|
241 | } |
---|
242 | return; |
---|
243 | } |
---|
244 | |
---|
245 | if (is_ptc_l_needed) |
---|
246 | vcpu_ptc_l(current, address, logps); |
---|
247 | if (!user_mode(regs)) { |
---|
248 | /* The fault occurs inside Xen. */ |
---|
249 | if (!ia64_done_with_exception(regs)) { |
---|
250 | // should never happen. If it does, region 0 addr may |
---|
251 | // indicate a bad xen pointer |
---|
252 | printk("*** xen_handle_domain_access: exception table" |
---|
253 | " lookup failed, iip=0x%lx, addr=0x%lx, " |
---|
254 | "spinning...\n", iip, address); |
---|
255 | panic_domain(regs, "*** xen_handle_domain_access: " |
---|
256 | "exception table lookup failed, " |
---|
257 | "iip=0x%lx, addr=0x%lx, spinning...\n", |
---|
258 | iip, address); |
---|
259 | } |
---|
260 | return; |
---|
261 | } |
---|
262 | |
---|
263 | if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, regs)) |
---|
264 | return; |
---|
265 | |
---|
266 | if (!PSCB(current, interrupt_collection_enabled)) { |
---|
267 | check_bad_nested_interruption(isr, regs, fault); |
---|
268 | //printk("Delivering NESTED DATA TLB fault\n"); |
---|
269 | fault = IA64_DATA_NESTED_TLB_VECTOR; |
---|
270 | regs->cr_iip = |
---|
271 | ((unsigned long)PSCBX(current, iva) + fault) & ~0xffUL; |
---|
272 | regs->cr_ipsr = |
---|
273 | (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET; |
---|
274 | |
---|
275 | if (PSCB(current, hpsr_dfh)) |
---|
276 | regs->cr_ipsr |= IA64_PSR_DFH; |
---|
277 | PSCB(current, vpsr_dfh) = 0; |
---|
278 | perfc_incra(slow_reflect, fault >> 8); |
---|
279 | return; |
---|
280 | } |
---|
281 | |
---|
282 | PSCB(current, itir) = itir; |
---|
283 | PSCB(current, iha) = iha; |
---|
284 | PSCB(current, ifa) = address; |
---|
285 | reflect_interruption(isr, regs, fault); |
---|
286 | } |
---|
287 | |
---|
288 | fpswa_interface_t *fpswa_interface = 0; |
---|
289 | |
---|
290 | void trap_init(void) |
---|
291 | { |
---|
292 | if (ia64_boot_param->fpswa) |
---|
293 | /* FPSWA fixup: make the interface pointer a virtual address */ |
---|
294 | fpswa_interface = __va(ia64_boot_param->fpswa); |
---|
295 | else |
---|
296 | printk("No FPSWA supported.\n"); |
---|
297 | } |
---|
298 | |
---|
299 | static fpswa_ret_t |
---|
300 | fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr, |
---|
301 | unsigned long *fpsr, unsigned long *isr, unsigned long *pr, |
---|
302 | unsigned long *ifs, struct pt_regs *regs) |
---|
303 | { |
---|
304 | fp_state_t fp_state; |
---|
305 | fpswa_ret_t ret; |
---|
306 | |
---|
307 | if (!fpswa_interface) |
---|
308 | return (fpswa_ret_t) {-1, 0, 0, 0}; |
---|
309 | |
---|
310 | memset(&fp_state, 0, sizeof(fp_state_t)); |
---|
311 | |
---|
312 | /* |
---|
313 | * compute fp_state. only FP registers f6 - f11 are used by the |
---|
314 | * kernel, so set those bits in the mask and set the low volatile |
---|
315 | * pointer to point to these registers. |
---|
316 | */ |
---|
317 | fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */ |
---|
318 | |
---|
319 | fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) ®s->f6; |
---|
320 | /* |
---|
321 | * unsigned long (*EFI_FPSWA) ( |
---|
322 | * unsigned long trap_type, |
---|
323 | * void *Bundle, |
---|
324 | * unsigned long *pipsr, |
---|
325 | * unsigned long *pfsr, |
---|
326 | * unsigned long *pisr, |
---|
327 | * unsigned long *ppreds, |
---|
328 | * unsigned long *pifs, |
---|
329 | * void *fp_state); |
---|
330 | */ |
---|
331 | ret = (*fpswa_interface->fpswa) (fp_fault, bundle, |
---|
332 | ipsr, fpsr, isr, pr, ifs, &fp_state); |
---|
333 | |
---|
334 | return ret; |
---|
335 | } |
---|
336 | |
---|
337 | /* |
---|
338 | * Handle floating-point assist faults and traps for domain. |
---|
339 | */ |
---|
340 | unsigned long |
---|
341 | handle_fpu_swa(int fp_fault, struct pt_regs *regs, unsigned long isr) |
---|
342 | { |
---|
343 | struct vcpu *v = current; |
---|
344 | IA64_BUNDLE bundle; |
---|
345 | unsigned long fault_ip; |
---|
346 | fpswa_ret_t ret; |
---|
347 | |
---|
348 | fault_ip = regs->cr_iip; |
---|
349 | /* |
---|
350 | * When the FP trap occurs, the trapping instruction is completed. |
---|
351 | * If ipsr.ri == 0, there is the trapping instruction in previous |
---|
352 | * bundle. |
---|
353 | */ |
---|
354 | if (!fp_fault && (ia64_psr(regs)->ri == 0)) |
---|
355 | fault_ip -= 16; |
---|
356 | |
---|
357 | if (VMX_DOMAIN(current)) { |
---|
358 | if (IA64_RETRY == __vmx_get_domain_bundle(fault_ip, &bundle)) |
---|
359 | return IA64_RETRY; |
---|
360 | } else |
---|
361 | bundle = __get_domain_bundle(fault_ip); |
---|
362 | |
---|
363 | if (!bundle.i64[0] && !bundle.i64[1]) { |
---|
364 | printk("%s: floating-point bundle at 0x%lx not mapped\n", |
---|
365 | __FUNCTION__, fault_ip); |
---|
366 | return -1; |
---|
367 | } |
---|
368 | |
---|
369 | ret = fp_emulate(fp_fault, &bundle, ®s->cr_ipsr, ®s->ar_fpsr, |
---|
370 | &isr, ®s->pr, ®s->cr_ifs, regs); |
---|
371 | |
---|
372 | if (ret.status) { |
---|
373 | PSCBX(v, fpswa_ret) = ret; |
---|
374 | printk("%s(%s): fp_emulate() returned %ld\n", |
---|
375 | __FUNCTION__, fp_fault ? "fault" : "trap", ret.status); |
---|
376 | } |
---|
377 | |
---|
378 | return ret.status; |
---|
379 | } |
---|
380 | |
---|
381 | void |
---|
382 | ia64_fault(unsigned long vector, unsigned long isr, unsigned long ifa, |
---|
383 | unsigned long iim, unsigned long itir, unsigned long arg5, |
---|
384 | unsigned long arg6, unsigned long arg7, unsigned long stack) |
---|
385 | { |
---|
386 | struct pt_regs *regs = (struct pt_regs *)&stack; |
---|
387 | unsigned long code; |
---|
388 | static const char *const reason[] = { |
---|
389 | "IA-64 Illegal Operation fault", |
---|
390 | "IA-64 Privileged Operation fault", |
---|
391 | "IA-64 Privileged Register fault", |
---|
392 | "IA-64 Reserved Register/Field fault", |
---|
393 | "Disabled Instruction Set Transition fault", |
---|
394 | "Unknown fault 5", "Unknown fault 6", |
---|
395 | "Unknown fault 7", "Illegal Hazard fault", |
---|
396 | "Unknown fault 9", "Unknown fault 10", |
---|
397 | "Unknown fault 11", "Unknown fault 12", |
---|
398 | "Unknown fault 13", "Unknown fault 14", "Unknown fault 15" |
---|
399 | }; |
---|
400 | |
---|
401 | printk("ia64_fault, vector=0x%lx, ifa=0x%016lx, iip=0x%016lx, " |
---|
402 | "ipsr=0x%016lx, isr=0x%016lx\n", vector, ifa, |
---|
403 | regs->cr_iip, regs->cr_ipsr, isr); |
---|
404 | |
---|
405 | if ((isr & IA64_ISR_NA) && |
---|
406 | ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) { |
---|
407 | /* |
---|
408 | * This fault was due to lfetch.fault, set "ed" bit in the |
---|
409 | * psr to cancel the lfetch. |
---|
410 | */ |
---|
411 | ia64_psr(regs)->ed = 1; |
---|
412 | printk("ia64_fault: handled lfetch.fault\n"); |
---|
413 | return; |
---|
414 | } |
---|
415 | |
---|
416 | switch (vector) { |
---|
417 | case 0: |
---|
418 | printk("VHPT Translation.\n"); |
---|
419 | break; |
---|
420 | |
---|
421 | case 4: |
---|
422 | printk("Alt DTLB.\n"); |
---|
423 | break; |
---|
424 | |
---|
425 | case 6: |
---|
426 | printk("Instruction Key Miss.\n"); |
---|
427 | break; |
---|
428 | |
---|
429 | case 7: |
---|
430 | printk("Data Key Miss.\n"); |
---|
431 | break; |
---|
432 | |
---|
433 | case 8: |
---|
434 | printk("Dirty-bit.\n"); |
---|
435 | break; |
---|
436 | |
---|
437 | case 20: |
---|
438 | printk("Page Not Found.\n"); |
---|
439 | break; |
---|
440 | |
---|
441 | case 21: |
---|
442 | printk("Key Permission.\n"); |
---|
443 | break; |
---|
444 | |
---|
445 | case 22: |
---|
446 | printk("Instruction Access Rights.\n"); |
---|
447 | break; |
---|
448 | |
---|
449 | case 24: /* General Exception */ |
---|
450 | code = (isr >> 4) & 0xf; |
---|
451 | printk("General Exception: %s%s.\n", reason[code], |
---|
452 | (code == 3) ? ((isr & (1UL << 37)) ? " (RSE access)" : |
---|
453 | " (data access)") : ""); |
---|
454 | if (code == 8) { |
---|
455 | #ifdef CONFIG_IA64_PRINT_HAZARDS |
---|
456 | printk("%s[%d]: possible hazard @ ip=%016lx " |
---|
457 | "(pr = %016lx)\n", current->comm, current->pid, |
---|
458 | regs->cr_iip + ia64_psr(regs)->ri, regs->pr); |
---|
459 | #endif |
---|
460 | printk("ia64_fault: returning on hazard\n"); |
---|
461 | return; |
---|
462 | } |
---|
463 | break; |
---|
464 | |
---|
465 | case 25: |
---|
466 | printk("Disabled FP-Register.\n"); |
---|
467 | break; |
---|
468 | |
---|
469 | case 26: |
---|
470 | printk("NaT consumption.\n"); |
---|
471 | break; |
---|
472 | |
---|
473 | case 29: |
---|
474 | printk("Debug.\n"); |
---|
475 | break; |
---|
476 | |
---|
477 | case 30: |
---|
478 | printk("Unaligned Reference.\n"); |
---|
479 | break; |
---|
480 | |
---|
481 | case 31: |
---|
482 | printk("Unsupported data reference.\n"); |
---|
483 | break; |
---|
484 | |
---|
485 | case 32: |
---|
486 | printk("Floating-Point Fault.\n"); |
---|
487 | break; |
---|
488 | |
---|
489 | case 33: |
---|
490 | printk("Floating-Point Trap.\n"); |
---|
491 | break; |
---|
492 | |
---|
493 | case 34: |
---|
494 | printk("Lower Privilege Transfer Trap.\n"); |
---|
495 | break; |
---|
496 | |
---|
497 | case 35: |
---|
498 | printk("Taken Branch Trap.\n"); |
---|
499 | break; |
---|
500 | |
---|
501 | case 36: |
---|
502 | printk("Single Step Trap.\n"); |
---|
503 | break; |
---|
504 | |
---|
505 | case 45: |
---|
506 | printk("IA-32 Exception.\n"); |
---|
507 | break; |
---|
508 | |
---|
509 | case 46: |
---|
510 | printk("IA-32 Intercept.\n"); |
---|
511 | break; |
---|
512 | |
---|
513 | case 47: |
---|
514 | printk("IA-32 Interrupt.\n"); |
---|
515 | break; |
---|
516 | |
---|
517 | default: |
---|
518 | printk("Fault %lu\n", vector); |
---|
519 | break; |
---|
520 | } |
---|
521 | |
---|
522 | show_registers(regs); |
---|
523 | panic("Fault in Xen.\n"); |
---|
524 | } |
---|
525 | |
---|
526 | unsigned long running_on_sim = 0; |
---|
527 | |
---|
528 | /* Also read in hyperprivop.S */ |
---|
529 | int first_break = 0; |
---|
530 | |
---|
531 | void |
---|
532 | ia64_handle_break(unsigned long ifa, struct pt_regs *regs, unsigned long isr, |
---|
533 | unsigned long iim) |
---|
534 | { |
---|
535 | struct domain *d = current->domain; |
---|
536 | struct vcpu *v = current; |
---|
537 | IA64FAULT vector; |
---|
538 | |
---|
539 | /* FIXME: don't hardcode constant */ |
---|
540 | if ((iim == 0x80001 || iim == 0x80002) |
---|
541 | && ia64_get_cpl(regs->cr_ipsr) == 2) { |
---|
542 | do_ssc(vcpu_get_gr(current, 36), regs); |
---|
543 | } |
---|
544 | #ifdef CRASH_DEBUG |
---|
545 | else if ((iim == 0 || iim == CDB_BREAK_NUM) && !user_mode(regs)) { |
---|
546 | if (iim == 0) |
---|
547 | show_registers(regs); |
---|
548 | debugger_trap_fatal(0 /* don't care */ , regs); |
---|
549 | } |
---|
550 | #endif |
---|
551 | else if (iim == d->arch.breakimm && ia64_get_cpl(regs->cr_ipsr) == 2) { |
---|
552 | /* by default, do not continue */ |
---|
553 | v->arch.hypercall_continuation = 0; |
---|
554 | |
---|
555 | if ((vector = ia64_hypercall(regs)) == IA64_NO_FAULT) { |
---|
556 | if (!PSCBX(v, hypercall_continuation)) |
---|
557 | vcpu_increment_iip(current); |
---|
558 | } else |
---|
559 | reflect_interruption(isr, regs, vector); |
---|
560 | } else if ((iim - HYPERPRIVOP_START) < HYPERPRIVOP_MAX |
---|
561 | && ia64_get_cpl(regs->cr_ipsr) == 2) { |
---|
562 | if (ia64_hyperprivop(iim, regs)) |
---|
563 | vcpu_increment_iip(current); |
---|
564 | } else { |
---|
565 | if (iim == 0) |
---|
566 | die_if_kernel("bug check", regs, iim); |
---|
567 | PSCB(v, iim) = iim; |
---|
568 | reflect_interruption(isr, regs, IA64_BREAK_VECTOR); |
---|
569 | } |
---|
570 | } |
---|
571 | |
---|
572 | void |
---|
573 | ia64_handle_privop(unsigned long ifa, struct pt_regs *regs, unsigned long isr, |
---|
574 | unsigned long itir) |
---|
575 | { |
---|
576 | IA64FAULT vector; |
---|
577 | |
---|
578 | vector = priv_emulate(current, regs, isr); |
---|
579 | if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) { |
---|
580 | // Note: if a path results in a vector to reflect that requires |
---|
581 | // iha/itir (e.g. vcpu_force_data_miss), they must be set there |
---|
582 | reflect_interruption(isr, regs, vector); |
---|
583 | } |
---|
584 | } |
---|
585 | |
---|
586 | void |
---|
587 | ia64_handle_reflection(unsigned long ifa, struct pt_regs *regs, |
---|
588 | unsigned long isr, unsigned long iim, |
---|
589 | unsigned long vector) |
---|
590 | { |
---|
591 | struct vcpu *v = current; |
---|
592 | unsigned long check_lazy_cover = 0; |
---|
593 | unsigned long psr = regs->cr_ipsr; |
---|
594 | unsigned long status; |
---|
595 | |
---|
596 | /* Following faults shouldn'g be seen from Xen itself */ |
---|
597 | BUG_ON(!(psr & IA64_PSR_CPL)); |
---|
598 | |
---|
599 | switch (vector) { |
---|
600 | case 8: |
---|
601 | vector = IA64_DIRTY_BIT_VECTOR; |
---|
602 | break; |
---|
603 | case 9: |
---|
604 | vector = IA64_INST_ACCESS_BIT_VECTOR; |
---|
605 | break; |
---|
606 | case 10: |
---|
607 | check_lazy_cover = 1; |
---|
608 | vector = IA64_DATA_ACCESS_BIT_VECTOR; |
---|
609 | break; |
---|
610 | case 20: |
---|
611 | check_lazy_cover = 1; |
---|
612 | vector = IA64_PAGE_NOT_PRESENT_VECTOR; |
---|
613 | break; |
---|
614 | case 22: |
---|
615 | vector = IA64_INST_ACCESS_RIGHTS_VECTOR; |
---|
616 | break; |
---|
617 | case 23: |
---|
618 | check_lazy_cover = 1; |
---|
619 | vector = IA64_DATA_ACCESS_RIGHTS_VECTOR; |
---|
620 | break; |
---|
621 | case 24: |
---|
622 | vector = IA64_GENEX_VECTOR; |
---|
623 | break; |
---|
624 | case 25: |
---|
625 | if (PSCB(v, hpsr_dfh)) { |
---|
626 | PSCB(v, hpsr_dfh) = 0; |
---|
627 | PSCB(v, hpsr_mfh) = 1; |
---|
628 | if (__ia64_per_cpu_var(fp_owner) != v) |
---|
629 | __ia64_load_fpu(v->arch._thread.fph); |
---|
630 | } |
---|
631 | if (!PSCB(v, vpsr_dfh)) { |
---|
632 | regs->cr_ipsr &= ~IA64_PSR_DFH; |
---|
633 | return; |
---|
634 | } |
---|
635 | vector = IA64_DISABLED_FPREG_VECTOR; |
---|
636 | break; |
---|
637 | case 26: |
---|
638 | if (((isr >> 4L) & 0xfL) == 1) { |
---|
639 | /* Fault is due to a register NaT consumption fault. */ |
---|
640 | //regs->eml_unat = 0; FIXME: DO WE NEED THIS?? |
---|
641 | printk("ia64_handle_reflection: handling regNaT " |
---|
642 | "fault\n"); |
---|
643 | vector = IA64_NAT_CONSUMPTION_VECTOR; |
---|
644 | break; |
---|
645 | } |
---|
646 | #if 1 |
---|
647 | // pass null pointer dereferences through with no error |
---|
648 | // but retain debug output for non-zero ifa |
---|
649 | if (!ifa) { |
---|
650 | vector = IA64_NAT_CONSUMPTION_VECTOR; |
---|
651 | break; |
---|
652 | } |
---|
653 | #endif |
---|
654 | #ifdef CONFIG_PRIVIFY |
---|
655 | /* Some privified operations are coded using reg+64 instead |
---|
656 | of reg. */ |
---|
657 | printk("*** NaT fault... attempting to handle as privop\n"); |
---|
658 | printk("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n", |
---|
659 | isr, ifa, regs->cr_iip, psr); |
---|
660 | //regs->eml_unat = 0; FIXME: DO WE NEED THIS??? |
---|
661 | // certain NaT faults are higher priority than privop faults |
---|
662 | vector = priv_emulate(v, regs, isr); |
---|
663 | if (vector == IA64_NO_FAULT) { |
---|
664 | printk("*** Handled privop masquerading as NaT " |
---|
665 | "fault\n"); |
---|
666 | return; |
---|
667 | } |
---|
668 | #endif |
---|
669 | vector = IA64_NAT_CONSUMPTION_VECTOR; |
---|
670 | break; |
---|
671 | case 27: |
---|
672 | //printk("*** Handled speculation vector, itc=%lx!\n", |
---|
673 | // ia64_get_itc()); |
---|
674 | PSCB(current, iim) = iim; |
---|
675 | vector = IA64_SPECULATION_VECTOR; |
---|
676 | break; |
---|
677 | case 30: |
---|
678 | // FIXME: Should we handle unaligned refs in Xen?? |
---|
679 | vector = IA64_UNALIGNED_REF_VECTOR; |
---|
680 | break; |
---|
681 | case 32: |
---|
682 | status = handle_fpu_swa(1, regs, isr); |
---|
683 | if (!status) { |
---|
684 | vcpu_increment_iip(v); |
---|
685 | return; |
---|
686 | } |
---|
687 | // fetch code fail |
---|
688 | if (IA64_RETRY == status) |
---|
689 | return; |
---|
690 | printk("ia64_handle_reflection: handling FP fault\n"); |
---|
691 | vector = IA64_FP_FAULT_VECTOR; |
---|
692 | break; |
---|
693 | case 33: |
---|
694 | status = handle_fpu_swa(0, regs, isr); |
---|
695 | if (!status) |
---|
696 | return; |
---|
697 | // fetch code fail |
---|
698 | if (IA64_RETRY == status) |
---|
699 | return; |
---|
700 | printk("ia64_handle_reflection: handling FP trap\n"); |
---|
701 | vector = IA64_FP_TRAP_VECTOR; |
---|
702 | break; |
---|
703 | case 34: |
---|
704 | printk("ia64_handle_reflection: handling lowerpriv trap\n"); |
---|
705 | vector = IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR; |
---|
706 | break; |
---|
707 | case 35: |
---|
708 | printk("ia64_handle_reflection: handling taken branch trap\n"); |
---|
709 | vector = IA64_TAKEN_BRANCH_TRAP_VECTOR; |
---|
710 | break; |
---|
711 | case 36: |
---|
712 | printk("ia64_handle_reflection: handling single step trap\n"); |
---|
713 | vector = IA64_SINGLE_STEP_TRAP_VECTOR; |
---|
714 | break; |
---|
715 | |
---|
716 | default: |
---|
717 | printk("ia64_handle_reflection: unhandled vector=0x%lx\n", |
---|
718 | vector); |
---|
719 | while (vector) |
---|
720 | /* spin */; |
---|
721 | return; |
---|
722 | } |
---|
723 | if (check_lazy_cover && (isr & IA64_ISR_IR) && |
---|
724 | handle_lazy_cover(v, regs)) |
---|
725 | return; |
---|
726 | PSCB(current, ifa) = ifa; |
---|
727 | PSCB(current, itir) = vcpu_get_itir_on_fault(v, ifa); |
---|
728 | reflect_interruption(isr, regs, vector); |
---|
729 | } |
---|
730 | |
---|
731 | void |
---|
732 | ia64_shadow_fault(unsigned long ifa, unsigned long itir, |
---|
733 | unsigned long isr, struct pt_regs *regs) |
---|
734 | { |
---|
735 | struct vcpu *v = current; |
---|
736 | struct domain *d = current->domain; |
---|
737 | unsigned long gpfn; |
---|
738 | unsigned long pte = 0; |
---|
739 | struct vhpt_lf_entry *vlfe; |
---|
740 | |
---|
741 | /* There are 2 jobs to do: |
---|
742 | - marking the page as dirty (the metaphysical address must be |
---|
743 | extracted to do that). |
---|
744 | - reflecting or not the fault (the virtual Dirty bit must be |
---|
745 | extracted to decide). |
---|
746 | Unfortunatly these informations are not immediatly available! |
---|
747 | */ |
---|
748 | |
---|
749 | /* Extract the metaphysical address. |
---|
750 | Try to get it from VHPT and M2P as we need the flags. */ |
---|
751 | vlfe = (struct vhpt_lf_entry *)ia64_thash(ifa); |
---|
752 | pte = vlfe->page_flags; |
---|
753 | if (vlfe->ti_tag == ia64_ttag(ifa)) { |
---|
754 | /* The VHPT entry is valid. */ |
---|
755 | gpfn = get_gpfn_from_mfn((pte & _PAGE_PPN_MASK) >> PAGE_SHIFT); |
---|
756 | BUG_ON(gpfn == INVALID_M2P_ENTRY); |
---|
757 | } else { |
---|
758 | unsigned long itir, iha; |
---|
759 | IA64FAULT fault; |
---|
760 | |
---|
761 | /* The VHPT entry is not valid. */ |
---|
762 | vlfe = NULL; |
---|
763 | |
---|
764 | /* FIXME: gives a chance to tpa, as the TC was valid. */ |
---|
765 | |
---|
766 | fault = vcpu_translate(v, ifa, 1, &pte, &itir, &iha); |
---|
767 | |
---|
768 | /* Try again! */ |
---|
769 | if (fault != IA64_NO_FAULT) { |
---|
770 | /* This will trigger a dtlb miss. */ |
---|
771 | ia64_ptcl(ifa, PAGE_SHIFT << 2); |
---|
772 | return; |
---|
773 | } |
---|
774 | gpfn = ((pte & _PAGE_PPN_MASK) >> PAGE_SHIFT); |
---|
775 | if (pte & _PAGE_D) |
---|
776 | pte |= _PAGE_VIRT_D; |
---|
777 | } |
---|
778 | |
---|
779 | /* Set the dirty bit in the bitmap. */ |
---|
780 | shadow_mark_page_dirty(d, gpfn); |
---|
781 | |
---|
782 | /* Update the local TC/VHPT and decides wether or not the fault should |
---|
783 | be reflected. |
---|
784 | SMP note: we almost ignore the other processors. The shadow_bitmap |
---|
785 | has been atomically updated. If the dirty fault happen on another |
---|
786 | processor, it will do its job. |
---|
787 | */ |
---|
788 | |
---|
789 | if (pte != 0) { |
---|
790 | /* We will know how to handle the fault. */ |
---|
791 | |
---|
792 | if (pte & _PAGE_VIRT_D) { |
---|
793 | /* Rewrite VHPT entry. |
---|
794 | There is no race here because only the |
---|
795 | cpu VHPT owner can write page_flags. */ |
---|
796 | if (vlfe) |
---|
797 | vlfe->page_flags = pte | _PAGE_D; |
---|
798 | |
---|
799 | /* Purge the TC locally. |
---|
800 | It will be reloaded from the VHPT iff the |
---|
801 | VHPT entry is still valid. */ |
---|
802 | ia64_ptcl(ifa, PAGE_SHIFT << 2); |
---|
803 | |
---|
804 | atomic64_inc(&d->arch.shadow_fault_count); |
---|
805 | } else { |
---|
806 | /* Reflect. |
---|
807 | In this case there is no need to purge. */ |
---|
808 | ia64_handle_reflection(ifa, regs, isr, 0, 8); |
---|
809 | } |
---|
810 | } else { |
---|
811 | /* We don't know wether or not the fault must be |
---|
812 | reflected. The VHPT entry is not valid. */ |
---|
813 | /* FIXME: in metaphysical mode, we could do an ITC now. */ |
---|
814 | ia64_ptcl(ifa, PAGE_SHIFT << 2); |
---|
815 | } |
---|
816 | } |
---|