1 | /* |
---|
2 | * arch/ia64/xen/ivt.S |
---|
3 | * |
---|
4 | * Copyright (C) 2005 Hewlett-Packard Co |
---|
5 | * Dan Magenheimer <dan.magenheimer@hp.com> |
---|
6 | */ |
---|
7 | /* |
---|
8 | * This file defines the interruption vector table used by the CPU. |
---|
9 | * It does not include one entry per possible cause of interruption. |
---|
10 | * |
---|
11 | * The first 20 entries of the table contain 64 bundles each while the |
---|
12 | * remaining 48 entries contain only 16 bundles each. |
---|
13 | * |
---|
14 | * The 64 bundles are used to allow inlining the whole handler for critical |
---|
15 | * interruptions like TLB misses. |
---|
16 | * |
---|
17 | * For each entry, the comment is as follows: |
---|
18 | * |
---|
19 | * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) |
---|
20 | * entry offset ----/ / / / / |
---|
21 | * entry number ---------/ / / / |
---|
22 | * size of the entry -------------/ / / |
---|
23 | * vector name -------------------------------------/ / |
---|
24 | * interruptions triggering this vector ----------------------/ |
---|
25 | * |
---|
26 | * The table is 32KB in size and must be aligned on 32KB boundary. |
---|
27 | * (The CPU ignores the 15 lower bits of the address) |
---|
28 | * |
---|
29 | * Table is based upon EAS2.6 (Oct 1999) |
---|
30 | */ |
---|
31 | |
---|
32 | #include <asm/asmmacro.h> |
---|
33 | #include <asm/break.h> |
---|
34 | #include <asm/ia32.h> |
---|
35 | #include <asm/kregs.h> |
---|
36 | #include <asm/asm-offsets.h> |
---|
37 | #include <asm/pgtable.h> |
---|
38 | #include <asm/processor.h> |
---|
39 | #include <asm/ptrace.h> |
---|
40 | #include <asm/system.h> |
---|
41 | #include <asm/thread_info.h> |
---|
42 | #include <asm/unistd.h> |
---|
43 | #include <asm/errno.h> |
---|
44 | |
---|
45 | #ifdef CONFIG_XEN |
---|
46 | #define ia64_ivt xen_ivt |
---|
47 | #endif |
---|
48 | |
---|
49 | #if 1 |
---|
50 | # define PSR_DEFAULT_BITS psr.ac |
---|
51 | #else |
---|
52 | # define PSR_DEFAULT_BITS 0 |
---|
53 | #endif |
---|
54 | |
---|
55 | #if 0 |
---|
56 | /* |
---|
57 | * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't |
---|
58 | * needed for something else before enabling this... |
---|
59 | */ |
---|
60 | # define DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16 |
---|
61 | #else |
---|
62 | # define DBG_FAULT(i) |
---|
63 | #endif |
---|
64 | |
---|
65 | #define MINSTATE_VIRT /* needed by minstate.h */ |
---|
66 | #include "xenminstate.h" |
---|
67 | |
---|
68 | #define FAULT(n) \ |
---|
69 | mov r31=pr; \ |
---|
70 | mov r19=n;; /* prepare to save predicates */ \ |
---|
71 | br.sptk.many dispatch_to_fault_handler |
---|
72 | |
---|
73 | .section .text.ivt,"ax" |
---|
74 | |
---|
75 | .align 32768 // align on 32KB boundary |
---|
76 | .global ia64_ivt |
---|
77 | ia64_ivt: |
---|
78 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
79 | // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47) |
---|
80 | ENTRY(vhpt_miss) |
---|
81 | DBG_FAULT(0) |
---|
82 | /* |
---|
83 | * The VHPT vector is invoked when the TLB entry for the virtual page table |
---|
84 | * is missing. This happens only as a result of a previous |
---|
85 | * (the "original") TLB miss, which may either be caused by an instruction |
---|
86 | * fetch or a data access (or non-access). |
---|
87 | * |
---|
88 | * What we do here is normal TLB miss handing for the _original_ miss, |
---|
89 | * followed by inserting the TLB entry for the virtual page table page |
---|
90 | * that the VHPT walker was attempting to access. The latter gets |
---|
91 | * inserted as long as page table entry above pte level have valid |
---|
92 | * mappings for the faulting address. The TLB entry for the original |
---|
93 | * miss gets inserted only if the pte entry indicates that the page is |
---|
94 | * present. |
---|
95 | * |
---|
96 | * do_page_fault gets invoked in the following cases: |
---|
97 | * - the faulting virtual address uses unimplemented address bits |
---|
98 | * - the faulting virtual address has no valid page table mapping |
---|
99 | */ |
---|
100 | #ifdef CONFIG_XEN |
---|
101 | movl r16=XSI_IFA |
---|
102 | ;; |
---|
103 | ld8 r16=[r16] |
---|
104 | #ifdef CONFIG_HUGETLB_PAGE |
---|
105 | movl r18=PAGE_SHIFT |
---|
106 | movl r25=XSI_ITIR |
---|
107 | ;; |
---|
108 | ld8 r25=[r25] |
---|
109 | #endif |
---|
110 | ;; |
---|
111 | #else |
---|
112 | mov r16=cr.ifa // get address that caused the TLB miss |
---|
113 | #ifdef CONFIG_HUGETLB_PAGE |
---|
114 | movl r18=PAGE_SHIFT |
---|
115 | mov r25=cr.itir |
---|
116 | #endif |
---|
117 | #endif |
---|
118 | ;; |
---|
119 | #ifdef CONFIG_XEN |
---|
120 | XEN_HYPER_RSM_PSR_DT; |
---|
121 | #else |
---|
122 | rsm psr.dt // use physical addressing for data |
---|
123 | #endif |
---|
124 | mov r31=pr // save the predicate registers |
---|
125 | mov r19=IA64_KR(PT_BASE) // get page table base address |
---|
126 | shl r21=r16,3 // shift bit 60 into sign bit |
---|
127 | shr.u r17=r16,61 // get the region number into r17 |
---|
128 | ;; |
---|
129 | shr.u r22=r21,3 |
---|
130 | #ifdef CONFIG_HUGETLB_PAGE |
---|
131 | extr.u r26=r25,2,6 |
---|
132 | ;; |
---|
133 | cmp.ne p8,p0=r18,r26 |
---|
134 | sub r27=r26,r18 |
---|
135 | ;; |
---|
136 | (p8) dep r25=r18,r25,2,6 |
---|
137 | (p8) shr r22=r22,r27 |
---|
138 | #endif |
---|
139 | ;; |
---|
140 | cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5? |
---|
141 | shr.u r18=r22,PGDIR_SHIFT // get bottom portion of pgd index bit |
---|
142 | ;; |
---|
143 | (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place |
---|
144 | |
---|
145 | srlz.d |
---|
146 | LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir |
---|
147 | |
---|
148 | .pred.rel "mutex", p6, p7 |
---|
149 | (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT |
---|
150 | (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 |
---|
151 | ;; |
---|
152 | (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5 |
---|
153 | (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4] |
---|
154 | cmp.eq p7,p6=0,r21 // unused address bits all zeroes? |
---|
155 | #ifdef CONFIG_PGTABLE_4 |
---|
156 | shr.u r28=r22,PUD_SHIFT // shift pud index into position |
---|
157 | #else |
---|
158 | shr.u r18=r22,PMD_SHIFT // shift pmd index into position |
---|
159 | #endif |
---|
160 | ;; |
---|
161 | ld8 r17=[r17] // get *pgd (may be 0) |
---|
162 | ;; |
---|
163 | (p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL? |
---|
164 | #ifdef CONFIG_PGTABLE_4 |
---|
165 | dep r28=r28,r17,3,(PAGE_SHIFT-3) // r28=pud_offset(pgd,addr) |
---|
166 | ;; |
---|
167 | shr.u r18=r22,PMD_SHIFT // shift pmd index into position |
---|
168 | (p7) ld8 r29=[r28] // get *pud (may be 0) |
---|
169 | ;; |
---|
170 | (p7) cmp.eq.or.andcm p6,p7=r29,r0 // was pud_present(*pud) == NULL? |
---|
171 | dep r17=r18,r29,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr) |
---|
172 | #else |
---|
173 | dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pgd,addr) |
---|
174 | #endif |
---|
175 | ;; |
---|
176 | (p7) ld8 r20=[r17] // get *pmd (may be 0) |
---|
177 | shr.u r19=r22,PAGE_SHIFT // shift pte index into position |
---|
178 | ;; |
---|
179 | (p7) cmp.eq.or.andcm p6,p7=r20,r0 // was pmd_present(*pmd) == NULL? |
---|
180 | dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr) |
---|
181 | ;; |
---|
182 | (p7) ld8 r18=[r21] // read *pte |
---|
183 | #ifdef CONFIG_XEN |
---|
184 | movl r19=XSI_ISR |
---|
185 | ;; |
---|
186 | ld8 r19=[r19] |
---|
187 | #else |
---|
188 | mov r19=cr.isr // cr.isr bit 32 tells us if this is an insn miss |
---|
189 | #endif |
---|
190 | ;; |
---|
191 | (p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared? |
---|
192 | #ifdef CONFIG_XEN |
---|
193 | movl r22=XSI_IHA |
---|
194 | ;; |
---|
195 | ld8 r22=[r22] |
---|
196 | #else |
---|
197 | mov r22=cr.iha // get the VHPT address that caused the TLB miss |
---|
198 | #endif |
---|
199 | ;; // avoid RAW on p7 |
---|
200 | (p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss? |
---|
201 | dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address |
---|
202 | ;; |
---|
203 | #ifdef CONFIG_XEN |
---|
204 | mov r24=r8 |
---|
205 | mov r8=r18 |
---|
206 | ;; |
---|
207 | (p10) XEN_HYPER_ITC_I |
---|
208 | ;; |
---|
209 | (p11) XEN_HYPER_ITC_D |
---|
210 | ;; |
---|
211 | mov r8=r24 |
---|
212 | ;; |
---|
213 | #else |
---|
214 | (p10) itc.i r18 // insert the instruction TLB entry |
---|
215 | (p11) itc.d r18 // insert the data TLB entry |
---|
216 | #endif |
---|
217 | (p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault) |
---|
218 | #ifdef CONFIG_XEN |
---|
219 | movl r24=XSI_IFA |
---|
220 | ;; |
---|
221 | st8 [r24]=r22 |
---|
222 | ;; |
---|
223 | #else |
---|
224 | mov cr.ifa=r22 |
---|
225 | #endif |
---|
226 | |
---|
227 | #ifdef CONFIG_HUGETLB_PAGE |
---|
228 | (p8) mov cr.itir=r25 // change to default page-size for VHPT |
---|
229 | #endif |
---|
230 | |
---|
231 | /* |
---|
232 | * Now compute and insert the TLB entry for the virtual page table. We never |
---|
233 | * execute in a page table page so there is no need to set the exception deferral |
---|
234 | * bit. |
---|
235 | */ |
---|
236 | adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23 |
---|
237 | ;; |
---|
238 | #ifdef CONFIG_XEN |
---|
239 | (p7) mov r25=r8 |
---|
240 | (p7) mov r8=r24 |
---|
241 | ;; |
---|
242 | (p7) XEN_HYPER_ITC_D |
---|
243 | ;; |
---|
244 | (p7) mov r8=r25 |
---|
245 | ;; |
---|
246 | #else |
---|
247 | (p7) itc.d r24 |
---|
248 | #endif |
---|
249 | ;; |
---|
250 | #ifdef CONFIG_SMP |
---|
251 | /* |
---|
252 | * Tell the assemblers dependency-violation checker that the above "itc" instructions |
---|
253 | * cannot possibly affect the following loads: |
---|
254 | */ |
---|
255 | dv_serialize_data |
---|
256 | |
---|
257 | /* |
---|
258 | * Re-check pagetable entry. If they changed, we may have received a ptc.g |
---|
259 | * between reading the pagetable and the "itc". If so, flush the entry we |
---|
260 | * inserted and retry. At this point, we have: |
---|
261 | * |
---|
262 | * r28 = equivalent of pud_offset(pgd, ifa) |
---|
263 | * r17 = equivalent of pmd_offset(pud, ifa) |
---|
264 | * r21 = equivalent of pte_offset(pmd, ifa) |
---|
265 | * |
---|
266 | * r29 = *pud |
---|
267 | * r20 = *pmd |
---|
268 | * r18 = *pte |
---|
269 | */ |
---|
270 | ld8 r25=[r21] // read *pte again |
---|
271 | ld8 r26=[r17] // read *pmd again |
---|
272 | #ifdef CONFIG_PGTABLE_4 |
---|
273 | ld8 r19=[r28] // read *pud again |
---|
274 | #endif |
---|
275 | cmp.ne p6,p7=r0,r0 |
---|
276 | ;; |
---|
277 | cmp.ne.or.andcm p6,p7=r26,r20 // did *pmd change |
---|
278 | #ifdef CONFIG_PGTABLE_4 |
---|
279 | cmp.ne.or.andcm p6,p7=r19,r29 // did *pud change |
---|
280 | #endif |
---|
281 | mov r27=PAGE_SHIFT<<2 |
---|
282 | ;; |
---|
283 | (p6) ptc.l r22,r27 // purge PTE page translation |
---|
284 | (p7) cmp.ne.or.andcm p6,p7=r25,r18 // did *pte change |
---|
285 | ;; |
---|
286 | (p6) ptc.l r16,r27 // purge translation |
---|
287 | #endif |
---|
288 | |
---|
289 | mov pr=r31,-1 // restore predicate registers |
---|
290 | #ifdef CONFIG_XEN |
---|
291 | XEN_HYPER_RFI |
---|
292 | dv_serialize_data |
---|
293 | #else |
---|
294 | rfi |
---|
295 | #endif |
---|
296 | END(vhpt_miss) |
---|
297 | |
---|
298 | .org ia64_ivt+0x400 |
---|
299 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
300 | // 0x0400 Entry 1 (size 64 bundles) ITLB (21) |
---|
301 | ENTRY(itlb_miss) |
---|
302 | DBG_FAULT(1) |
---|
303 | /* |
---|
304 | * The ITLB handler accesses the PTE via the virtually mapped linear |
---|
305 | * page table. If a nested TLB miss occurs, we switch into physical |
---|
306 | * mode, walk the page table, and then re-execute the PTE read and |
---|
307 | * go on normally after that. |
---|
308 | */ |
---|
309 | #ifdef CONFIG_XEN |
---|
310 | movl r16=XSI_IFA |
---|
311 | ;; |
---|
312 | ld8 r16=[r16] |
---|
313 | #else |
---|
314 | mov r16=cr.ifa // get virtual address |
---|
315 | #endif |
---|
316 | mov r29=b0 // save b0 |
---|
317 | mov r31=pr // save predicates |
---|
318 | .itlb_fault: |
---|
319 | #ifdef CONFIG_XEN |
---|
320 | movl r17=XSI_IHA |
---|
321 | ;; |
---|
322 | ld8 r17=[r17] // get virtual address of L3 PTE |
---|
323 | #else |
---|
324 | mov r17=cr.iha // get virtual address of PTE |
---|
325 | #endif |
---|
326 | movl r30=1f // load nested fault continuation point |
---|
327 | ;; |
---|
328 | 1: ld8 r18=[r17] // read *pte |
---|
329 | ;; |
---|
330 | mov b0=r29 |
---|
331 | tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? |
---|
332 | (p6) br.cond.spnt page_fault |
---|
333 | ;; |
---|
334 | #ifdef CONFIG_XEN |
---|
335 | mov r19=r8 |
---|
336 | mov r8=r18 |
---|
337 | ;; |
---|
338 | XEN_HYPER_ITC_I |
---|
339 | ;; |
---|
340 | mov r8=r19 |
---|
341 | #else |
---|
342 | itc.i r18 |
---|
343 | #endif |
---|
344 | ;; |
---|
345 | #ifdef CONFIG_SMP |
---|
346 | /* |
---|
347 | * Tell the assemblers dependency-violation checker that the above "itc" instructions |
---|
348 | * cannot possibly affect the following loads: |
---|
349 | */ |
---|
350 | dv_serialize_data |
---|
351 | |
---|
352 | ld8 r19=[r17] // read *pte again and see if same |
---|
353 | mov r20=PAGE_SHIFT<<2 // setup page size for purge |
---|
354 | ;; |
---|
355 | cmp.ne p7,p0=r18,r19 |
---|
356 | ;; |
---|
357 | (p7) ptc.l r16,r20 |
---|
358 | #endif |
---|
359 | mov pr=r31,-1 |
---|
360 | #ifdef CONFIG_XEN |
---|
361 | XEN_HYPER_RFI |
---|
362 | dv_serialize_data |
---|
363 | #else |
---|
364 | rfi |
---|
365 | #endif |
---|
366 | END(itlb_miss) |
---|
367 | |
---|
368 | .org ia64_ivt+0x0800 |
---|
369 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
370 | // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48) |
---|
371 | ENTRY(dtlb_miss) |
---|
372 | DBG_FAULT(2) |
---|
373 | /* |
---|
374 | * The DTLB handler accesses the PTE via the virtually mapped linear |
---|
375 | * page table. If a nested TLB miss occurs, we switch into physical |
---|
376 | * mode, walk the page table, and then re-execute the PTE read and |
---|
377 | * go on normally after that. |
---|
378 | */ |
---|
379 | #ifdef CONFIG_XEN |
---|
380 | movl r16=XSI_IFA |
---|
381 | ;; |
---|
382 | ld8 r16=[r16] |
---|
383 | #else |
---|
384 | mov r16=cr.ifa // get virtual address |
---|
385 | #endif |
---|
386 | mov r29=b0 // save b0 |
---|
387 | mov r31=pr // save predicates |
---|
388 | dtlb_fault: |
---|
389 | #ifdef CONFIG_XEN |
---|
390 | movl r17=XSI_IHA |
---|
391 | ;; |
---|
392 | ld8 r17=[r17] // get virtual address of L3 PTE |
---|
393 | #else |
---|
394 | mov r17=cr.iha // get virtual address of PTE |
---|
395 | #endif |
---|
396 | movl r30=1f // load nested fault continuation point |
---|
397 | ;; |
---|
398 | 1: ld8 r18=[r17] // read *pte |
---|
399 | ;; |
---|
400 | mov b0=r29 |
---|
401 | tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared? |
---|
402 | (p6) br.cond.spnt page_fault |
---|
403 | ;; |
---|
404 | #ifdef CONFIG_XEN |
---|
405 | mov r19=r8 |
---|
406 | mov r8=r18 |
---|
407 | ;; |
---|
408 | XEN_HYPER_ITC_D |
---|
409 | ;; |
---|
410 | mov r8=r19 |
---|
411 | ;; |
---|
412 | #else |
---|
413 | itc.d r18 |
---|
414 | #endif |
---|
415 | ;; |
---|
416 | #ifdef CONFIG_SMP |
---|
417 | /* |
---|
418 | * Tell the assemblers dependency-violation checker that the above "itc" instructions |
---|
419 | * cannot possibly affect the following loads: |
---|
420 | */ |
---|
421 | dv_serialize_data |
---|
422 | |
---|
423 | ld8 r19=[r17] // read *pte again and see if same |
---|
424 | mov r20=PAGE_SHIFT<<2 // setup page size for purge |
---|
425 | ;; |
---|
426 | cmp.ne p7,p0=r18,r19 |
---|
427 | ;; |
---|
428 | (p7) ptc.l r16,r20 |
---|
429 | #endif |
---|
430 | mov pr=r31,-1 |
---|
431 | #ifdef CONFIG_XEN |
---|
432 | XEN_HYPER_RFI |
---|
433 | dv_serialize_data |
---|
434 | #else |
---|
435 | rfi |
---|
436 | #endif |
---|
437 | END(dtlb_miss) |
---|
438 | |
---|
439 | .org ia64_ivt+0x0c00 |
---|
440 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
441 | // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19) |
---|
442 | ENTRY(alt_itlb_miss) |
---|
443 | DBG_FAULT(3) |
---|
444 | #ifdef CONFIG_XEN |
---|
445 | movl r31=XSI_IPSR |
---|
446 | ;; |
---|
447 | ld8 r21=[r31],XSI_IFA_OFS-XSI_IPSR_OFS // get ipsr, point to ifa |
---|
448 | movl r17=PAGE_KERNEL |
---|
449 | ;; |
---|
450 | ld8 r16=[r31] // get ifa |
---|
451 | #else |
---|
452 | mov r16=cr.ifa // get address that caused the TLB miss |
---|
453 | movl r17=PAGE_KERNEL |
---|
454 | mov r21=cr.ipsr |
---|
455 | #endif |
---|
456 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) |
---|
457 | mov r31=pr |
---|
458 | ;; |
---|
459 | #ifdef CONFIG_DISABLE_VHPT |
---|
460 | shr.u r22=r16,61 // get the region number into r21 |
---|
461 | ;; |
---|
462 | cmp.gt p8,p0=6,r22 // user mode |
---|
463 | ;; |
---|
464 | #ifndef CONFIG_XEN |
---|
465 | (p8) thash r17=r16 |
---|
466 | ;; |
---|
467 | (p8) mov cr.iha=r17 |
---|
468 | #endif |
---|
469 | (p8) mov r29=b0 // save b0 |
---|
470 | (p8) br.cond.dptk .itlb_fault |
---|
471 | #endif |
---|
472 | extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl |
---|
473 | and r19=r19,r16 // clear ed, reserved bits, and PTE control bits |
---|
474 | shr.u r18=r16,57 // move address bit 61 to bit 4 |
---|
475 | ;; |
---|
476 | andcm r18=0x10,r18 // bit 4=~address-bit(61) |
---|
477 | cmp.ne p8,p0=r0,r23 // psr.cpl != 0? |
---|
478 | or r19=r17,r19 // insert PTE control bits into r19 |
---|
479 | ;; |
---|
480 | or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 |
---|
481 | (p8) br.cond.spnt page_fault |
---|
482 | ;; |
---|
483 | #ifdef CONFIG_XEN |
---|
484 | mov r18=r8 |
---|
485 | mov r8=r19 |
---|
486 | ;; |
---|
487 | XEN_HYPER_ITC_I |
---|
488 | ;; |
---|
489 | mov r8=r18 |
---|
490 | ;; |
---|
491 | mov pr=r31,-1 |
---|
492 | ;; |
---|
493 | XEN_HYPER_RFI; |
---|
494 | #else |
---|
495 | itc.i r19 // insert the TLB entry |
---|
496 | mov pr=r31,-1 |
---|
497 | rfi |
---|
498 | #endif |
---|
499 | END(alt_itlb_miss) |
---|
500 | |
---|
501 | .org ia64_ivt+0x1000 |
---|
502 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
503 | // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46) |
---|
504 | ENTRY(alt_dtlb_miss) |
---|
505 | DBG_FAULT(4) |
---|
506 | #ifdef CONFIG_XEN |
---|
507 | movl r31=XSI_IPSR |
---|
508 | ;; |
---|
509 | ld8 r21=[r31],XSI_ISR_OFS-XSI_IPSR_OFS // get ipsr, point to isr |
---|
510 | movl r17=PAGE_KERNEL |
---|
511 | ;; |
---|
512 | ld8 r20=[r31],XSI_IFA_OFS-XSI_ISR_OFS // get isr, point to ifa |
---|
513 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) |
---|
514 | ;; |
---|
515 | ld8 r16=[r31] // get ifa |
---|
516 | #else |
---|
517 | mov r16=cr.ifa // get address that caused the TLB miss |
---|
518 | movl r17=PAGE_KERNEL |
---|
519 | mov r20=cr.isr |
---|
520 | movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff) |
---|
521 | mov r21=cr.ipsr |
---|
522 | #endif |
---|
523 | mov r31=pr |
---|
524 | ;; |
---|
525 | #ifdef CONFIG_DISABLE_VHPT |
---|
526 | shr.u r22=r16,61 // get the region number into r21 |
---|
527 | ;; |
---|
528 | cmp.gt p8,p0=6,r22 // access to region 0-5 |
---|
529 | ;; |
---|
530 | #ifndef CONFIG_XEN |
---|
531 | (p8) thash r17=r16 |
---|
532 | ;; |
---|
533 | (p8) mov cr.iha=r17 |
---|
534 | #endif |
---|
535 | (p8) mov r29=b0 // save b0 |
---|
536 | (p8) br.cond.dptk dtlb_fault |
---|
537 | #endif |
---|
538 | extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl |
---|
539 | and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field |
---|
540 | tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on? |
---|
541 | shr.u r18=r16,57 // move address bit 61 to bit 4 |
---|
542 | and r19=r19,r16 // clear ed, reserved bits, and PTE control bits |
---|
543 | tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on? |
---|
544 | ;; |
---|
545 | andcm r18=0x10,r18 // bit 4=~address-bit(61) |
---|
546 | cmp.ne p8,p0=r0,r23 |
---|
547 | (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field |
---|
548 | (p8) br.cond.spnt page_fault |
---|
549 | |
---|
550 | dep r21=-1,r21,IA64_PSR_ED_BIT,1 |
---|
551 | or r19=r19,r17 // insert PTE control bits into r19 |
---|
552 | ;; |
---|
553 | or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6 |
---|
554 | (p6) mov cr.ipsr=r21 |
---|
555 | ;; |
---|
556 | #ifdef CONFIG_XEN |
---|
557 | (p7) mov r18=r8 |
---|
558 | (p7) mov r8=r19 |
---|
559 | ;; |
---|
560 | (p7) XEN_HYPER_ITC_D |
---|
561 | ;; |
---|
562 | (p7) mov r8=r18 |
---|
563 | ;; |
---|
564 | mov pr=r31,-1 |
---|
565 | ;; |
---|
566 | XEN_HYPER_RFI; |
---|
567 | #else |
---|
568 | (p7) itc.d r19 // insert the TLB entry |
---|
569 | mov pr=r31,-1 |
---|
570 | rfi |
---|
571 | #endif |
---|
572 | END(alt_dtlb_miss) |
---|
573 | |
---|
574 | .org ia64_ivt+0x1400 |
---|
575 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
576 | // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45) |
---|
577 | ENTRY(nested_dtlb_miss) |
---|
578 | /* |
---|
579 | * In the absence of kernel bugs, we get here when the virtually mapped linear |
---|
580 | * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction |
---|
581 | * Access-bit, or Data Access-bit faults). If the DTLB entry for the virtual page |
---|
582 | * table is missing, a nested TLB miss fault is triggered and control is |
---|
583 | * transferred to this point. When this happens, we lookup the pte for the |
---|
584 | * faulting address by walking the page table in physical mode and return to the |
---|
585 | * continuation point passed in register r30 (or call page_fault if the address is |
---|
586 | * not mapped). |
---|
587 | * |
---|
588 | * Input: r16: faulting address |
---|
589 | * r29: saved b0 |
---|
590 | * r30: continuation address |
---|
591 | * r31: saved pr |
---|
592 | * |
---|
593 | * Output: r17: physical address of PTE of faulting address |
---|
594 | * r29: saved b0 |
---|
595 | * r30: continuation address |
---|
596 | * r31: saved pr |
---|
597 | * |
---|
598 | * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared) |
---|
599 | */ |
---|
600 | #ifdef CONFIG_XEN |
---|
601 | XEN_HYPER_RSM_PSR_DT; |
---|
602 | #else |
---|
603 | rsm psr.dt // switch to using physical data addressing |
---|
604 | #endif |
---|
605 | mov r19=IA64_KR(PT_BASE) // get the page table base address |
---|
606 | shl r21=r16,3 // shift bit 60 into sign bit |
---|
607 | #ifdef CONFIG_XEN |
---|
608 | movl r18=XSI_ITIR |
---|
609 | ;; |
---|
610 | ld8 r18=[r18] |
---|
611 | #else |
---|
612 | mov r18=cr.itir |
---|
613 | #endif |
---|
614 | ;; |
---|
615 | shr.u r17=r16,61 // get the region number into r17 |
---|
616 | extr.u r18=r18,2,6 // get the faulting page size |
---|
617 | ;; |
---|
618 | cmp.eq p6,p7=5,r17 // is faulting address in region 5? |
---|
619 | add r22=-PAGE_SHIFT,r18 // adjustment for hugetlb address |
---|
620 | add r18=PGDIR_SHIFT-PAGE_SHIFT,r18 |
---|
621 | ;; |
---|
622 | shr.u r22=r16,r22 |
---|
623 | shr.u r18=r16,r18 |
---|
624 | (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place |
---|
625 | |
---|
626 | srlz.d |
---|
627 | LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir |
---|
628 | |
---|
629 | .pred.rel "mutex", p6, p7 |
---|
630 | (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT |
---|
631 | (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3 |
---|
632 | ;; |
---|
633 | (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5 |
---|
634 | (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4] |
---|
635 | cmp.eq p7,p6=0,r21 // unused address bits all zeroes? |
---|
636 | #ifdef CONFIG_PGTABLE_4 |
---|
637 | shr.u r18=r22,PUD_SHIFT // shift pud index into position |
---|
638 | #else |
---|
639 | shr.u r18=r22,PMD_SHIFT // shift pmd index into position |
---|
640 | #endif |
---|
641 | ;; |
---|
642 | ld8 r17=[r17] // get *pgd (may be 0) |
---|
643 | ;; |
---|
644 | (p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL? |
---|
645 | dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=p[u|m]d_offset(pgd,addr) |
---|
646 | ;; |
---|
647 | #ifdef CONFIG_PGTABLE_4 |
---|
648 | (p7) ld8 r17=[r17] // get *pud (may be 0) |
---|
649 | shr.u r18=r22,PMD_SHIFT // shift pmd index into position |
---|
650 | ;; |
---|
651 | (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pud_present(*pud) == NULL? |
---|
652 | dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr) |
---|
653 | ;; |
---|
654 | #endif |
---|
655 | (p7) ld8 r17=[r17] // get *pmd (may be 0) |
---|
656 | shr.u r19=r22,PAGE_SHIFT // shift pte index into position |
---|
657 | ;; |
---|
658 | (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pmd_present(*pmd) == NULL? |
---|
659 | dep r17=r19,r17,3,(PAGE_SHIFT-3) // r17=pte_offset(pmd,addr); |
---|
660 | (p6) br.cond.spnt page_fault |
---|
661 | mov b0=r30 |
---|
662 | br.sptk.many b0 // return to continuation point |
---|
663 | END(nested_dtlb_miss) |
---|
664 | |
---|
665 | .org ia64_ivt+0x1800 |
---|
666 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
667 | // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24) |
---|
668 | ENTRY(ikey_miss) |
---|
669 | DBG_FAULT(6) |
---|
670 | FAULT(6) |
---|
671 | END(ikey_miss) |
---|
672 | |
---|
673 | //----------------------------------------------------------------------------------- |
---|
674 | // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address) |
---|
675 | ENTRY(page_fault) |
---|
676 | #ifdef CONFIG_XEN |
---|
677 | XEN_HYPER_SSM_PSR_DT |
---|
678 | #else |
---|
679 | ssm psr.dt |
---|
680 | ;; |
---|
681 | srlz.i |
---|
682 | #endif |
---|
683 | ;; |
---|
684 | SAVE_MIN_WITH_COVER |
---|
685 | alloc r15=ar.pfs,0,0,3,0 |
---|
686 | #ifdef CONFIG_XEN |
---|
687 | movl r3=XSI_ISR |
---|
688 | ;; |
---|
689 | ld8 out1=[r3],XSI_IFA_OFS-XSI_ISR_OFS // get vcr.isr, point to ifa |
---|
690 | ;; |
---|
691 | ld8 out0=[r3] // get vcr.ifa |
---|
692 | mov r14=1 |
---|
693 | ;; |
---|
694 | add r3=XSI_PSR_IC_OFS-XSI_IFA_OFS, r3 // point to vpsr.ic |
---|
695 | ;; |
---|
696 | st4 [r3]=r14 // vpsr.ic = 1 |
---|
697 | adds r3=8,r2 // set up second base pointer |
---|
698 | ;; |
---|
699 | #else |
---|
700 | mov out0=cr.ifa |
---|
701 | mov out1=cr.isr |
---|
702 | adds r3=8,r2 // set up second base pointer |
---|
703 | ;; |
---|
704 | ssm psr.ic | PSR_DEFAULT_BITS |
---|
705 | ;; |
---|
706 | srlz.i // guarantee that interruption collectin is on |
---|
707 | ;; |
---|
708 | #endif |
---|
709 | #ifdef CONFIG_XEN |
---|
710 | |
---|
711 | #define MASK_TO_PEND_OFS (-1) |
---|
712 | |
---|
713 | (p15) movl r14=XSI_PSR_I_ADDR |
---|
714 | ;; |
---|
715 | (p15) ld8 r14=[r14] |
---|
716 | ;; |
---|
717 | (p15) st1 [r14]=r0,MASK_TO_PEND_OFS // if (p15) vpsr.i = 1 |
---|
718 | ;; // if (p15) (vcpu->vcpu_info->evtchn_upcall_mask)=0 |
---|
719 | (p15) ld1 r14=[r14] // if (vcpu->vcpu_info->evtchn_upcall_pending) |
---|
720 | ;; |
---|
721 | (p15) cmp.ne p15,p0=r14,r0 |
---|
722 | ;; |
---|
723 | (p15) XEN_HYPER_SSM_I |
---|
724 | #else |
---|
725 | (p15) ssm psr.i // restore psr.i |
---|
726 | #endif |
---|
727 | movl r14=ia64_leave_kernel |
---|
728 | ;; |
---|
729 | SAVE_REST |
---|
730 | mov rp=r14 |
---|
731 | ;; |
---|
732 | adds out2=16,r12 // out2 = pointer to pt_regs |
---|
733 | br.call.sptk.many b6=ia64_do_page_fault // ignore return address |
---|
734 | END(page_fault) |
---|
735 | |
---|
736 | .org ia64_ivt+0x1c00 |
---|
737 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
738 | // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51) |
---|
739 | ENTRY(dkey_miss) |
---|
740 | DBG_FAULT(7) |
---|
741 | FAULT(7) |
---|
742 | END(dkey_miss) |
---|
743 | |
---|
744 | .org ia64_ivt+0x2000 |
---|
745 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
746 | // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54) |
---|
747 | ENTRY(dirty_bit) |
---|
748 | DBG_FAULT(8) |
---|
749 | /* |
---|
750 | * What we do here is to simply turn on the dirty bit in the PTE. We need to |
---|
751 | * update both the page-table and the TLB entry. To efficiently access the PTE, |
---|
752 | * we address it through the virtual page table. Most likely, the TLB entry for |
---|
753 | * the relevant virtual page table page is still present in the TLB so we can |
---|
754 | * normally do this without additional TLB misses. In case the necessary virtual |
---|
755 | * page table TLB entry isn't present, we take a nested TLB miss hit where we look |
---|
756 | * up the physical address of the L3 PTE and then continue at label 1 below. |
---|
757 | */ |
---|
758 | #ifdef CONFIG_XEN |
---|
759 | movl r16=XSI_IFA |
---|
760 | ;; |
---|
761 | ld8 r16=[r16] |
---|
762 | ;; |
---|
763 | #else |
---|
764 | mov r16=cr.ifa // get the address that caused the fault |
---|
765 | #endif |
---|
766 | movl r30=1f // load continuation point in case of nested fault |
---|
767 | ;; |
---|
768 | #ifdef CONFIG_XEN |
---|
769 | mov r18=r8; |
---|
770 | mov r8=r16; |
---|
771 | XEN_HYPER_THASH;; |
---|
772 | mov r17=r8; |
---|
773 | mov r8=r18;; |
---|
774 | #else |
---|
775 | thash r17=r16 // compute virtual address of L3 PTE |
---|
776 | #endif |
---|
777 | mov r29=b0 // save b0 in case of nested fault |
---|
778 | mov r31=pr // save pr |
---|
779 | #ifdef CONFIG_SMP |
---|
780 | mov r28=ar.ccv // save ar.ccv |
---|
781 | ;; |
---|
782 | 1: ld8 r18=[r17] |
---|
783 | ;; // avoid RAW on r18 |
---|
784 | mov ar.ccv=r18 // set compare value for cmpxchg |
---|
785 | or r25=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits |
---|
786 | tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit |
---|
787 | ;; |
---|
788 | (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only update if page is present |
---|
789 | mov r24=PAGE_SHIFT<<2 |
---|
790 | ;; |
---|
791 | (p6) cmp.eq p6,p7=r26,r18 // Only compare if page is present |
---|
792 | ;; |
---|
793 | #ifdef CONFIG_XEN |
---|
794 | (p6) mov r18=r8 |
---|
795 | (p6) mov r8=r25 |
---|
796 | ;; |
---|
797 | (p6) XEN_HYPER_ITC_D |
---|
798 | ;; |
---|
799 | (p6) mov r8=r18 |
---|
800 | #else |
---|
801 | (p6) itc.d r25 // install updated PTE |
---|
802 | #endif |
---|
803 | ;; |
---|
804 | /* |
---|
805 | * Tell the assemblers dependency-violation checker that the above "itc" instructions |
---|
806 | * cannot possibly affect the following loads: |
---|
807 | */ |
---|
808 | dv_serialize_data |
---|
809 | |
---|
810 | ld8 r18=[r17] // read PTE again |
---|
811 | ;; |
---|
812 | cmp.eq p6,p7=r18,r25 // is it same as the newly installed |
---|
813 | ;; |
---|
814 | (p7) ptc.l r16,r24 |
---|
815 | mov b0=r29 // restore b0 |
---|
816 | mov ar.ccv=r28 |
---|
817 | #else |
---|
818 | ;; |
---|
819 | 1: ld8 r18=[r17] |
---|
820 | ;; // avoid RAW on r18 |
---|
821 | or r18=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits |
---|
822 | mov b0=r29 // restore b0 |
---|
823 | ;; |
---|
824 | st8 [r17]=r18 // store back updated PTE |
---|
825 | itc.d r18 // install updated PTE |
---|
826 | #endif |
---|
827 | mov pr=r31,-1 // restore pr |
---|
828 | #ifdef CONFIG_XEN |
---|
829 | XEN_HYPER_RFI |
---|
830 | dv_serialize_data |
---|
831 | #else |
---|
832 | rfi |
---|
833 | #endif |
---|
834 | END(dirty_bit) |
---|
835 | |
---|
836 | .org ia64_ivt+0x2400 |
---|
837 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
838 | // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27) |
---|
839 | ENTRY(iaccess_bit) |
---|
840 | DBG_FAULT(9) |
---|
841 | // Like Entry 8, except for instruction access |
---|
842 | #ifdef CONFIG_XEN |
---|
843 | movl r16=XSI_IFA |
---|
844 | ;; |
---|
845 | ld8 r16=[r16] |
---|
846 | ;; |
---|
847 | #else |
---|
848 | mov r16=cr.ifa // get the address that caused the fault |
---|
849 | #endif |
---|
850 | movl r30=1f // load continuation point in case of nested fault |
---|
851 | mov r31=pr // save predicates |
---|
852 | #ifdef CONFIG_ITANIUM |
---|
853 | /* |
---|
854 | * Erratum 10 (IFA may contain incorrect address) has "NoFix" status. |
---|
855 | */ |
---|
856 | mov r17=cr.ipsr |
---|
857 | ;; |
---|
858 | mov r18=cr.iip |
---|
859 | tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set? |
---|
860 | ;; |
---|
861 | (p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa |
---|
862 | #endif /* CONFIG_ITANIUM */ |
---|
863 | ;; |
---|
864 | #ifdef CONFIG_XEN |
---|
865 | mov r18=r8; |
---|
866 | mov r8=r16; |
---|
867 | XEN_HYPER_THASH;; |
---|
868 | mov r17=r8; |
---|
869 | mov r8=r18;; |
---|
870 | #else |
---|
871 | thash r17=r16 // compute virtual address of L3 PTE |
---|
872 | #endif |
---|
873 | mov r29=b0 // save b0 in case of nested fault) |
---|
874 | #ifdef CONFIG_SMP |
---|
875 | mov r28=ar.ccv // save ar.ccv |
---|
876 | ;; |
---|
877 | 1: ld8 r18=[r17] |
---|
878 | ;; |
---|
879 | mov ar.ccv=r18 // set compare value for cmpxchg |
---|
880 | or r25=_PAGE_A,r18 // set the accessed bit |
---|
881 | tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit |
---|
882 | ;; |
---|
883 | (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only if page present |
---|
884 | mov r24=PAGE_SHIFT<<2 |
---|
885 | ;; |
---|
886 | (p6) cmp.eq p6,p7=r26,r18 // Only if page present |
---|
887 | ;; |
---|
888 | #ifdef CONFIG_XEN |
---|
889 | mov r26=r8 |
---|
890 | mov r8=r25 |
---|
891 | ;; |
---|
892 | (p6) XEN_HYPER_ITC_I |
---|
893 | ;; |
---|
894 | mov r8=r26 |
---|
895 | ;; |
---|
896 | #else |
---|
897 | (p6) itc.i r25 // install updated PTE |
---|
898 | #endif |
---|
899 | ;; |
---|
900 | /* |
---|
901 | * Tell the assemblers dependency-violation checker that the above "itc" instructions |
---|
902 | * cannot possibly affect the following loads: |
---|
903 | */ |
---|
904 | dv_serialize_data |
---|
905 | |
---|
906 | ld8 r18=[r17] // read PTE again |
---|
907 | ;; |
---|
908 | cmp.eq p6,p7=r18,r25 // is it same as the newly installed |
---|
909 | ;; |
---|
910 | (p7) ptc.l r16,r24 |
---|
911 | mov b0=r29 // restore b0 |
---|
912 | mov ar.ccv=r28 |
---|
913 | #else /* !CONFIG_SMP */ |
---|
914 | ;; |
---|
915 | 1: ld8 r18=[r17] |
---|
916 | ;; |
---|
917 | or r18=_PAGE_A,r18 // set the accessed bit |
---|
918 | mov b0=r29 // restore b0 |
---|
919 | ;; |
---|
920 | st8 [r17]=r18 // store back updated PTE |
---|
921 | itc.i r18 // install updated PTE |
---|
922 | #endif /* !CONFIG_SMP */ |
---|
923 | mov pr=r31,-1 |
---|
924 | #ifdef CONFIG_XEN |
---|
925 | XEN_HYPER_RFI |
---|
926 | dv_serialize_data |
---|
927 | #else |
---|
928 | rfi |
---|
929 | #endif |
---|
930 | END(iaccess_bit) |
---|
931 | |
---|
932 | .org ia64_ivt+0x2800 |
---|
933 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
934 | // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55) |
---|
935 | ENTRY(daccess_bit) |
---|
936 | DBG_FAULT(10) |
---|
937 | // Like Entry 8, except for data access |
---|
938 | #ifdef CONFIG_XEN |
---|
939 | movl r16=XSI_IFA |
---|
940 | ;; |
---|
941 | ld8 r16=[r16] |
---|
942 | ;; |
---|
943 | #else |
---|
944 | mov r16=cr.ifa // get the address that caused the fault |
---|
945 | #endif |
---|
946 | movl r30=1f // load continuation point in case of nested fault |
---|
947 | ;; |
---|
948 | #ifdef CONFIG_XEN |
---|
949 | mov r18=r8 |
---|
950 | mov r8=r16 |
---|
951 | XEN_HYPER_THASH |
---|
952 | ;; |
---|
953 | mov r17=r8 |
---|
954 | mov r8=r18 |
---|
955 | ;; |
---|
956 | #else |
---|
957 | thash r17=r16 // compute virtual address of L3 PTE |
---|
958 | #endif |
---|
959 | mov r31=pr |
---|
960 | mov r29=b0 // save b0 in case of nested fault) |
---|
961 | #ifdef CONFIG_SMP |
---|
962 | mov r28=ar.ccv // save ar.ccv |
---|
963 | ;; |
---|
964 | 1: ld8 r18=[r17] |
---|
965 | ;; // avoid RAW on r18 |
---|
966 | mov ar.ccv=r18 // set compare value for cmpxchg |
---|
967 | or r25=_PAGE_A,r18 // set the dirty bit |
---|
968 | tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit |
---|
969 | ;; |
---|
970 | (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only if page is present |
---|
971 | mov r24=PAGE_SHIFT<<2 |
---|
972 | ;; |
---|
973 | (p6) cmp.eq p6,p7=r26,r18 // Only if page is present |
---|
974 | ;; |
---|
975 | #ifdef CONFIG_XEN |
---|
976 | mov r26=r8 |
---|
977 | mov r8=r25 |
---|
978 | ;; |
---|
979 | (p6) XEN_HYPER_ITC_D |
---|
980 | ;; |
---|
981 | mov r8=r26 |
---|
982 | ;; |
---|
983 | #else |
---|
984 | (p6) itc.d r25 // install updated PTE |
---|
985 | #endif |
---|
986 | /* |
---|
987 | * Tell the assemblers dependency-violation checker that the above "itc" instructions |
---|
988 | * cannot possibly affect the following loads: |
---|
989 | */ |
---|
990 | dv_serialize_data |
---|
991 | ;; |
---|
992 | ld8 r18=[r17] // read PTE again |
---|
993 | ;; |
---|
994 | cmp.eq p6,p7=r18,r25 // is it same as the newly installed |
---|
995 | ;; |
---|
996 | (p7) ptc.l r16,r24 |
---|
997 | mov ar.ccv=r28 |
---|
998 | #else |
---|
999 | ;; |
---|
1000 | 1: ld8 r18=[r17] |
---|
1001 | ;; // avoid RAW on r18 |
---|
1002 | or r18=_PAGE_A,r18 // set the accessed bit |
---|
1003 | ;; |
---|
1004 | st8 [r17]=r18 // store back updated PTE |
---|
1005 | itc.d r18 // install updated PTE |
---|
1006 | #endif |
---|
1007 | mov b0=r29 // restore b0 |
---|
1008 | mov pr=r31,-1 |
---|
1009 | #ifdef CONFIG_XEN |
---|
1010 | XEN_HYPER_RFI |
---|
1011 | dv_serialize_data |
---|
1012 | #else |
---|
1013 | rfi |
---|
1014 | #endif |
---|
1015 | END(daccess_bit) |
---|
1016 | |
---|
1017 | .org ia64_ivt+0x2c00 |
---|
1018 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1019 | // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33) |
---|
1020 | ENTRY(break_fault) |
---|
1021 | /* |
---|
1022 | * The streamlined system call entry/exit paths only save/restore the initial part |
---|
1023 | * of pt_regs. This implies that the callers of system-calls must adhere to the |
---|
1024 | * normal procedure calling conventions. |
---|
1025 | * |
---|
1026 | * Registers to be saved & restored: |
---|
1027 | * CR registers: cr.ipsr, cr.iip, cr.ifs |
---|
1028 | * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr |
---|
1029 | * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15 |
---|
1030 | * Registers to be restored only: |
---|
1031 | * r8-r11: output value from the system call. |
---|
1032 | * |
---|
1033 | * During system call exit, scratch registers (including r15) are modified/cleared |
---|
1034 | * to prevent leaking bits from kernel to user level. |
---|
1035 | */ |
---|
1036 | DBG_FAULT(11) |
---|
1037 | mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc) |
---|
1038 | #ifdef CONFIG_XEN |
---|
1039 | movl r22=XSI_IPSR |
---|
1040 | ;; |
---|
1041 | ld8 r29=[r22],XSI_IIM_OFS-XSI_IPSR_OFS // get ipsr, point to iip |
---|
1042 | #else |
---|
1043 | mov r29=cr.ipsr // M2 (12 cyc) |
---|
1044 | #endif |
---|
1045 | mov r31=pr // I0 (2 cyc) |
---|
1046 | |
---|
1047 | #ifdef CONFIG_XEN |
---|
1048 | ;; |
---|
1049 | ld8 r17=[r22],XSI_IIP_OFS-XSI_IIM_OFS |
---|
1050 | #else |
---|
1051 | mov r17=cr.iim // M2 (2 cyc) |
---|
1052 | #endif |
---|
1053 | mov.m r27=ar.rsc // M2 (12 cyc) |
---|
1054 | mov r18=__IA64_BREAK_SYSCALL // A |
---|
1055 | |
---|
1056 | mov.m ar.rsc=0 // M2 |
---|
1057 | mov.m r21=ar.fpsr // M2 (12 cyc) |
---|
1058 | mov r19=b6 // I0 (2 cyc) |
---|
1059 | ;; |
---|
1060 | mov.m r23=ar.bspstore // M2 (12 cyc) |
---|
1061 | mov.m r24=ar.rnat // M2 (5 cyc) |
---|
1062 | mov.i r26=ar.pfs // I0 (2 cyc) |
---|
1063 | |
---|
1064 | invala // M0|1 |
---|
1065 | nop.m 0 // M |
---|
1066 | mov r20=r1 // A save r1 |
---|
1067 | |
---|
1068 | nop.m 0 |
---|
1069 | movl r30=sys_call_table // X |
---|
1070 | |
---|
1071 | #ifdef CONFIG_XEN |
---|
1072 | ld8 r28=[r22] |
---|
1073 | #else |
---|
1074 | mov r28=cr.iip // M2 (2 cyc) |
---|
1075 | #endif |
---|
1076 | cmp.eq p0,p7=r18,r17 // I0 is this a system call? |
---|
1077 | (p7) br.cond.spnt non_syscall // B no -> |
---|
1078 | // |
---|
1079 | // From this point on, we are definitely on the syscall-path |
---|
1080 | // and we can use (non-banked) scratch registers. |
---|
1081 | // |
---|
1082 | /////////////////////////////////////////////////////////////////////// |
---|
1083 | mov r1=r16 // A move task-pointer to "addl"-addressable reg |
---|
1084 | mov r2=r16 // A setup r2 for ia64_syscall_setup |
---|
1085 | add r9=TI_FLAGS+IA64_TASK_SIZE,r16 // A r9 = ¤t_thread_info()->flags |
---|
1086 | |
---|
1087 | adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 |
---|
1088 | adds r15=-1024,r15 // A subtract 1024 from syscall number |
---|
1089 | mov r3=NR_syscalls - 1 |
---|
1090 | ;; |
---|
1091 | ld1.bias r17=[r16] // M0|1 r17 = current->thread.on_ustack flag |
---|
1092 | ld4 r9=[r9] // M0|1 r9 = current_thread_info()->flags |
---|
1093 | extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr |
---|
1094 | |
---|
1095 | shladd r30=r15,3,r30 // A r30 = sys_call_table + 8*(syscall-1024) |
---|
1096 | addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS |
---|
1097 | cmp.leu p6,p7=r15,r3 // A syscall number in range? |
---|
1098 | ;; |
---|
1099 | |
---|
1100 | lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS |
---|
1101 | (p6) ld8 r30=[r30] // M0|1 load address of syscall entry point |
---|
1102 | tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT? |
---|
1103 | |
---|
1104 | mov.m ar.bspstore=r22 // M2 switch to kernel RBS |
---|
1105 | cmp.eq p8,p9=2,r8 // A isr.ei==2? |
---|
1106 | ;; |
---|
1107 | |
---|
1108 | (p8) mov r8=0 // A clear ei to 0 |
---|
1109 | (p7) movl r30=sys_ni_syscall // X |
---|
1110 | |
---|
1111 | (p8) adds r28=16,r28 // A switch cr.iip to next bundle |
---|
1112 | (p9) adds r8=1,r8 // A increment ei to next slot |
---|
1113 | nop.i 0 |
---|
1114 | ;; |
---|
1115 | |
---|
1116 | mov.m r25=ar.unat // M2 (5 cyc) |
---|
1117 | dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr |
---|
1118 | adds r15=1024,r15 // A restore original syscall number |
---|
1119 | // |
---|
1120 | // If any of the above loads miss in L1D, we'll stall here until |
---|
1121 | // the data arrives. |
---|
1122 | // |
---|
1123 | /////////////////////////////////////////////////////////////////////// |
---|
1124 | st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag |
---|
1125 | mov b6=r30 // I0 setup syscall handler branch reg early |
---|
1126 | cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already? |
---|
1127 | |
---|
1128 | and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit |
---|
1129 | mov r18=ar.bsp // M2 (12 cyc) |
---|
1130 | (pKStk) br.cond.spnt .break_fixup // B we're already in kernel-mode -- fix up RBS |
---|
1131 | ;; |
---|
1132 | .back_from_break_fixup: |
---|
1133 | (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack |
---|
1134 | cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited? |
---|
1135 | br.call.sptk.many b7=ia64_syscall_setup // B |
---|
1136 | 1: |
---|
1137 | mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 |
---|
1138 | nop 0 |
---|
1139 | #ifdef CONFIG_XEN |
---|
1140 | mov r2=b0; br.call.sptk b0=xen_bsw1;; mov b0=r2;; |
---|
1141 | #else |
---|
1142 | bsw.1 // B (6 cyc) regs are saved, switch to bank 1 |
---|
1143 | #endif |
---|
1144 | ;; |
---|
1145 | |
---|
1146 | #ifdef CONFIG_XEN |
---|
1147 | movl r16=XSI_PSR_IC |
---|
1148 | mov r3=1 |
---|
1149 | ;; |
---|
1150 | st4 [r16]=r3,XSI_PSR_I_ADDR_OFS-XSI_PSR_IC_OFS // vpsr.ic = 1 |
---|
1151 | #else |
---|
1152 | ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection |
---|
1153 | #endif |
---|
1154 | movl r3=ia64_ret_from_syscall // X |
---|
1155 | ;; |
---|
1156 | |
---|
1157 | srlz.i // M0 ensure interruption collection is on |
---|
1158 | mov rp=r3 // I0 set the real return addr |
---|
1159 | (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT |
---|
1160 | |
---|
1161 | #ifdef CONFIG_XEN |
---|
1162 | (p15) ld8 r16=[r16] // vpsr.i |
---|
1163 | ;; |
---|
1164 | (p15) st1 [r16]=r0,MASK_TO_PEND_OFS // if (p15) vpsr.i = 1 |
---|
1165 | ;; // if (p15) (vcpu->vcpu_info->evtchn_upcall_mask)=0 |
---|
1166 | (p15) ld1 r2=[r16] // if (vcpu->vcpu_info->evtchn_upcall_pending) |
---|
1167 | ;; |
---|
1168 | (p15) cmp.ne.unc p6,p0=r2,r0 |
---|
1169 | ;; |
---|
1170 | (p6) XEN_HYPER_SSM_I // do a real ssm psr.i |
---|
1171 | #else |
---|
1172 | (p15) ssm psr.i // M2 restore psr.i |
---|
1173 | #endif |
---|
1174 | (p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr) |
---|
1175 | br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic |
---|
1176 | // NOT REACHED |
---|
1177 | /////////////////////////////////////////////////////////////////////// |
---|
1178 | // On entry, we optimistically assumed that we're coming from user-space. |
---|
1179 | // For the rare cases where a system-call is done from within the kernel, |
---|
1180 | // we fix things up at this point: |
---|
1181 | .break_fixup: |
---|
1182 | add r1=-IA64_PT_REGS_SIZE,sp // A allocate space for pt_regs structure |
---|
1183 | mov ar.rnat=r24 // M2 restore kernel's AR.RNAT |
---|
1184 | ;; |
---|
1185 | mov ar.bspstore=r23 // M2 restore kernel's AR.BSPSTORE |
---|
1186 | br.cond.sptk .back_from_break_fixup |
---|
1187 | END(break_fault) |
---|
1188 | |
---|
1189 | .org ia64_ivt+0x3000 |
---|
1190 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1191 | // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4) |
---|
1192 | ENTRY(interrupt) |
---|
1193 | DBG_FAULT(12) |
---|
1194 | mov r31=pr // prepare to save predicates |
---|
1195 | ;; |
---|
1196 | SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3 |
---|
1197 | #ifdef CONFIG_XEN |
---|
1198 | movl r3=XSI_PSR_IC |
---|
1199 | mov r14=1 |
---|
1200 | ;; |
---|
1201 | st4 [r3]=r14 |
---|
1202 | #else |
---|
1203 | ssm psr.ic | PSR_DEFAULT_BITS |
---|
1204 | #endif |
---|
1205 | ;; |
---|
1206 | adds r3=8,r2 // set up second base pointer for SAVE_REST |
---|
1207 | srlz.i // ensure everybody knows psr.ic is back on |
---|
1208 | ;; |
---|
1209 | SAVE_REST |
---|
1210 | ;; |
---|
1211 | alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group |
---|
1212 | #ifdef CONFIG_XEN |
---|
1213 | ;; |
---|
1214 | br.call.sptk.many rp=xen_get_ivr |
---|
1215 | ;; |
---|
1216 | mov out0=r8 // pass cr.ivr as first arg |
---|
1217 | #else |
---|
1218 | mov out0=cr.ivr // pass cr.ivr as first arg |
---|
1219 | #endif |
---|
1220 | add out1=16,sp // pass pointer to pt_regs as second arg |
---|
1221 | ;; |
---|
1222 | srlz.d // make sure we see the effect of cr.ivr |
---|
1223 | movl r14=ia64_leave_kernel |
---|
1224 | ;; |
---|
1225 | mov rp=r14 |
---|
1226 | br.call.sptk.many b6=ia64_handle_irq |
---|
1227 | END(interrupt) |
---|
1228 | |
---|
1229 | .org ia64_ivt+0x3400 |
---|
1230 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1231 | // 0x3400 Entry 13 (size 64 bundles) Reserved |
---|
1232 | DBG_FAULT(13) |
---|
1233 | FAULT(13) |
---|
1234 | |
---|
1235 | .org ia64_ivt+0x3800 |
---|
1236 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1237 | // 0x3800 Entry 14 (size 64 bundles) Reserved |
---|
1238 | DBG_FAULT(14) |
---|
1239 | FAULT(14) |
---|
1240 | |
---|
1241 | /* |
---|
1242 | * There is no particular reason for this code to be here, other than that |
---|
1243 | * there happens to be space here that would go unused otherwise. If this |
---|
1244 | * fault ever gets "unreserved", simply moved the following code to a more |
---|
1245 | * suitable spot... |
---|
1246 | * |
---|
1247 | * ia64_syscall_setup() is a separate subroutine so that it can |
---|
1248 | * allocate stacked registers so it can safely demine any |
---|
1249 | * potential NaT values from the input registers. |
---|
1250 | * |
---|
1251 | * On entry: |
---|
1252 | * - executing on bank 0 or bank 1 register set (doesn't matter) |
---|
1253 | * - r1: stack pointer |
---|
1254 | * - r2: current task pointer |
---|
1255 | * - r3: preserved |
---|
1256 | * - r11: original contents (saved ar.pfs to be saved) |
---|
1257 | * - r12: original contents (sp to be saved) |
---|
1258 | * - r13: original contents (tp to be saved) |
---|
1259 | * - r15: original contents (syscall # to be saved) |
---|
1260 | * - r18: saved bsp (after switching to kernel stack) |
---|
1261 | * - r19: saved b6 |
---|
1262 | * - r20: saved r1 (gp) |
---|
1263 | * - r21: saved ar.fpsr |
---|
1264 | * - r22: kernel's register backing store base (krbs_base) |
---|
1265 | * - r23: saved ar.bspstore |
---|
1266 | * - r24: saved ar.rnat |
---|
1267 | * - r25: saved ar.unat |
---|
1268 | * - r26: saved ar.pfs |
---|
1269 | * - r27: saved ar.rsc |
---|
1270 | * - r28: saved cr.iip |
---|
1271 | * - r29: saved cr.ipsr |
---|
1272 | * - r31: saved pr |
---|
1273 | * - b0: original contents (to be saved) |
---|
1274 | * On exit: |
---|
1275 | * - p10: TRUE if syscall is invoked with more than 8 out |
---|
1276 | * registers or r15's Nat is true |
---|
1277 | * - r1: kernel's gp |
---|
1278 | * - r3: preserved (same as on entry) |
---|
1279 | * - r8: -EINVAL if p10 is true |
---|
1280 | * - r12: points to kernel stack |
---|
1281 | * - r13: points to current task |
---|
1282 | * - r14: preserved (same as on entry) |
---|
1283 | * - p13: preserved |
---|
1284 | * - p15: TRUE if interrupts need to be re-enabled |
---|
1285 | * - ar.fpsr: set to kernel settings |
---|
1286 | * - b6: preserved (same as on entry) |
---|
1287 | */ |
---|
1288 | #ifndef CONFIG_XEN |
---|
1289 | GLOBAL_ENTRY(ia64_syscall_setup) |
---|
1290 | #if PT(B6) != 0 |
---|
1291 | # error This code assumes that b6 is the first field in pt_regs. |
---|
1292 | #endif |
---|
1293 | st8 [r1]=r19 // save b6 |
---|
1294 | add r16=PT(CR_IPSR),r1 // initialize first base pointer |
---|
1295 | add r17=PT(R11),r1 // initialize second base pointer |
---|
1296 | ;; |
---|
1297 | alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable |
---|
1298 | st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr |
---|
1299 | tnat.nz p8,p0=in0 |
---|
1300 | |
---|
1301 | st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11 |
---|
1302 | tnat.nz p9,p0=in1 |
---|
1303 | (pKStk) mov r18=r0 // make sure r18 isn't NaT |
---|
1304 | ;; |
---|
1305 | |
---|
1306 | st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs |
---|
1307 | st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip |
---|
1308 | mov r28=b0 // save b0 (2 cyc) |
---|
1309 | ;; |
---|
1310 | |
---|
1311 | st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat |
---|
1312 | dep r19=0,r19,38,26 // clear all bits but 0..37 [I0] |
---|
1313 | (p8) mov in0=-1 |
---|
1314 | ;; |
---|
1315 | |
---|
1316 | st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs |
---|
1317 | extr.u r11=r19,7,7 // I0 // get sol of ar.pfs |
---|
1318 | and r8=0x7f,r19 // A // get sof of ar.pfs |
---|
1319 | |
---|
1320 | st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc |
---|
1321 | tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0 |
---|
1322 | (p9) mov in1=-1 |
---|
1323 | ;; |
---|
1324 | |
---|
1325 | (pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8 |
---|
1326 | tnat.nz p10,p0=in2 |
---|
1327 | add r11=8,r11 |
---|
1328 | ;; |
---|
1329 | (pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field |
---|
1330 | (pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field |
---|
1331 | tnat.nz p11,p0=in3 |
---|
1332 | ;; |
---|
1333 | (p10) mov in2=-1 |
---|
1334 | tnat.nz p12,p0=in4 // [I0] |
---|
1335 | (p11) mov in3=-1 |
---|
1336 | ;; |
---|
1337 | (pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat |
---|
1338 | (pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore |
---|
1339 | shl r18=r18,16 // compute ar.rsc to be used for "loadrs" |
---|
1340 | ;; |
---|
1341 | st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates |
---|
1342 | st8 [r17]=r28,PT(R1)-PT(B0) // save b0 |
---|
1343 | tnat.nz p13,p0=in5 // [I0] |
---|
1344 | ;; |
---|
1345 | st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs" |
---|
1346 | st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1 |
---|
1347 | (p12) mov in4=-1 |
---|
1348 | ;; |
---|
1349 | |
---|
1350 | .mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12 |
---|
1351 | .mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13 |
---|
1352 | (p13) mov in5=-1 |
---|
1353 | ;; |
---|
1354 | st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr |
---|
1355 | tnat.nz p13,p0=in6 |
---|
1356 | cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8 |
---|
1357 | ;; |
---|
1358 | mov r8=1 |
---|
1359 | (p9) tnat.nz p10,p0=r15 |
---|
1360 | adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch) |
---|
1361 | |
---|
1362 | st8.spill [r17]=r15 // save r15 |
---|
1363 | tnat.nz p8,p0=in7 |
---|
1364 | nop.i 0 |
---|
1365 | |
---|
1366 | mov r13=r2 // establish `current' |
---|
1367 | movl r1=__gp // establish kernel global pointer |
---|
1368 | ;; |
---|
1369 | st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error) |
---|
1370 | (p13) mov in6=-1 |
---|
1371 | (p8) mov in7=-1 |
---|
1372 | |
---|
1373 | cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0 |
---|
1374 | movl r17=FPSR_DEFAULT |
---|
1375 | ;; |
---|
1376 | mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value |
---|
1377 | (p10) mov r8=-EINVAL |
---|
1378 | br.ret.sptk.many b7 |
---|
1379 | END(ia64_syscall_setup) |
---|
1380 | #endif |
---|
1381 | |
---|
1382 | .org ia64_ivt+0x3c00 |
---|
1383 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1384 | // 0x3c00 Entry 15 (size 64 bundles) Reserved |
---|
1385 | DBG_FAULT(15) |
---|
1386 | FAULT(15) |
---|
1387 | |
---|
1388 | /* |
---|
1389 | * Squatting in this space ... |
---|
1390 | * |
---|
1391 | * This special case dispatcher for illegal operation faults allows preserved |
---|
1392 | * registers to be modified through a callback function (asm only) that is handed |
---|
1393 | * back from the fault handler in r8. Up to three arguments can be passed to the |
---|
1394 | * callback function by returning an aggregate with the callback as its first |
---|
1395 | * element, followed by the arguments. |
---|
1396 | */ |
---|
1397 | ENTRY(dispatch_illegal_op_fault) |
---|
1398 | .prologue |
---|
1399 | .body |
---|
1400 | SAVE_MIN_WITH_COVER |
---|
1401 | ssm psr.ic | PSR_DEFAULT_BITS |
---|
1402 | ;; |
---|
1403 | srlz.i // guarantee that interruption collection is on |
---|
1404 | ;; |
---|
1405 | (p15) ssm psr.i // restore psr.i |
---|
1406 | adds r3=8,r2 // set up second base pointer for SAVE_REST |
---|
1407 | ;; |
---|
1408 | alloc r14=ar.pfs,0,0,1,0 // must be first in insn group |
---|
1409 | mov out0=ar.ec |
---|
1410 | ;; |
---|
1411 | SAVE_REST |
---|
1412 | PT_REGS_UNWIND_INFO(0) |
---|
1413 | ;; |
---|
1414 | br.call.sptk.many rp=ia64_illegal_op_fault |
---|
1415 | .ret0: ;; |
---|
1416 | alloc r14=ar.pfs,0,0,3,0 // must be first in insn group |
---|
1417 | mov out0=r9 |
---|
1418 | mov out1=r10 |
---|
1419 | mov out2=r11 |
---|
1420 | movl r15=ia64_leave_kernel |
---|
1421 | ;; |
---|
1422 | mov rp=r15 |
---|
1423 | mov b6=r8 |
---|
1424 | ;; |
---|
1425 | cmp.ne p6,p0=0,r8 |
---|
1426 | (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel |
---|
1427 | br.sptk.many ia64_leave_kernel |
---|
1428 | END(dispatch_illegal_op_fault) |
---|
1429 | |
---|
1430 | .org ia64_ivt+0x4000 |
---|
1431 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1432 | // 0x4000 Entry 16 (size 64 bundles) Reserved |
---|
1433 | DBG_FAULT(16) |
---|
1434 | FAULT(16) |
---|
1435 | |
---|
1436 | .org ia64_ivt+0x4400 |
---|
1437 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1438 | // 0x4400 Entry 17 (size 64 bundles) Reserved |
---|
1439 | DBG_FAULT(17) |
---|
1440 | FAULT(17) |
---|
1441 | |
---|
1442 | ENTRY(non_syscall) |
---|
1443 | mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER |
---|
1444 | ;; |
---|
1445 | SAVE_MIN_WITH_COVER |
---|
1446 | |
---|
1447 | // There is no particular reason for this code to be here, other than that |
---|
1448 | // there happens to be space here that would go unused otherwise. If this |
---|
1449 | // fault ever gets "unreserved", simply moved the following code to a more |
---|
1450 | // suitable spot... |
---|
1451 | |
---|
1452 | alloc r14=ar.pfs,0,0,2,0 |
---|
1453 | mov out0=cr.iim |
---|
1454 | add out1=16,sp |
---|
1455 | adds r3=8,r2 // set up second base pointer for SAVE_REST |
---|
1456 | |
---|
1457 | ssm psr.ic | PSR_DEFAULT_BITS |
---|
1458 | ;; |
---|
1459 | srlz.i // guarantee that interruption collection is on |
---|
1460 | ;; |
---|
1461 | (p15) ssm psr.i // restore psr.i |
---|
1462 | movl r15=ia64_leave_kernel |
---|
1463 | ;; |
---|
1464 | SAVE_REST |
---|
1465 | mov rp=r15 |
---|
1466 | ;; |
---|
1467 | br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr |
---|
1468 | END(non_syscall) |
---|
1469 | |
---|
1470 | .org ia64_ivt+0x4800 |
---|
1471 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1472 | // 0x4800 Entry 18 (size 64 bundles) Reserved |
---|
1473 | DBG_FAULT(18) |
---|
1474 | FAULT(18) |
---|
1475 | |
---|
1476 | /* |
---|
1477 | * There is no particular reason for this code to be here, other than that |
---|
1478 | * there happens to be space here that would go unused otherwise. If this |
---|
1479 | * fault ever gets "unreserved", simply moved the following code to a more |
---|
1480 | * suitable spot... |
---|
1481 | */ |
---|
1482 | |
---|
1483 | ENTRY(dispatch_unaligned_handler) |
---|
1484 | SAVE_MIN_WITH_COVER |
---|
1485 | ;; |
---|
1486 | alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!) |
---|
1487 | mov out0=cr.ifa |
---|
1488 | adds out1=16,sp |
---|
1489 | |
---|
1490 | ssm psr.ic | PSR_DEFAULT_BITS |
---|
1491 | ;; |
---|
1492 | srlz.i // guarantee that interruption collection is on |
---|
1493 | ;; |
---|
1494 | (p15) ssm psr.i // restore psr.i |
---|
1495 | adds r3=8,r2 // set up second base pointer |
---|
1496 | ;; |
---|
1497 | SAVE_REST |
---|
1498 | movl r14=ia64_leave_kernel |
---|
1499 | ;; |
---|
1500 | mov rp=r14 |
---|
1501 | br.sptk.many ia64_prepare_handle_unaligned |
---|
1502 | END(dispatch_unaligned_handler) |
---|
1503 | |
---|
1504 | .org ia64_ivt+0x4c00 |
---|
1505 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1506 | // 0x4c00 Entry 19 (size 64 bundles) Reserved |
---|
1507 | DBG_FAULT(19) |
---|
1508 | FAULT(19) |
---|
1509 | |
---|
1510 | /* |
---|
1511 | * There is no particular reason for this code to be here, other than that |
---|
1512 | * there happens to be space here that would go unused otherwise. If this |
---|
1513 | * fault ever gets "unreserved", simply moved the following code to a more |
---|
1514 | * suitable spot... |
---|
1515 | */ |
---|
1516 | |
---|
1517 | ENTRY(dispatch_to_fault_handler) |
---|
1518 | /* |
---|
1519 | * Input: |
---|
1520 | * psr.ic: off |
---|
1521 | * r19: fault vector number (e.g., 24 for General Exception) |
---|
1522 | * r31: contains saved predicates (pr) |
---|
1523 | */ |
---|
1524 | SAVE_MIN_WITH_COVER_R19 |
---|
1525 | alloc r14=ar.pfs,0,0,5,0 |
---|
1526 | mov out0=r15 |
---|
1527 | #ifdef CONFIG_XEN |
---|
1528 | movl out1=XSI_ISR |
---|
1529 | ;; |
---|
1530 | adds out2=XSI_IFA-XSI_ISR,out1 |
---|
1531 | adds out3=XSI_IIM-XSI_ISR,out1 |
---|
1532 | adds out4=XSI_ITIR-XSI_ISR,out1 |
---|
1533 | ;; |
---|
1534 | ld8 out1=[out1] |
---|
1535 | ld8 out2=[out2] |
---|
1536 | ld8 out3=[out4] |
---|
1537 | ld8 out4=[out4] |
---|
1538 | ;; |
---|
1539 | #else |
---|
1540 | mov out1=cr.isr |
---|
1541 | mov out2=cr.ifa |
---|
1542 | mov out3=cr.iim |
---|
1543 | mov out4=cr.itir |
---|
1544 | ;; |
---|
1545 | #endif |
---|
1546 | ssm psr.ic | PSR_DEFAULT_BITS |
---|
1547 | ;; |
---|
1548 | srlz.i // guarantee that interruption collection is on |
---|
1549 | ;; |
---|
1550 | (p15) ssm psr.i // restore psr.i |
---|
1551 | adds r3=8,r2 // set up second base pointer for SAVE_REST |
---|
1552 | ;; |
---|
1553 | SAVE_REST |
---|
1554 | movl r14=ia64_leave_kernel |
---|
1555 | ;; |
---|
1556 | mov rp=r14 |
---|
1557 | br.call.sptk.many b6=ia64_fault |
---|
1558 | END(dispatch_to_fault_handler) |
---|
1559 | |
---|
1560 | // |
---|
1561 | // --- End of long entries, Beginning of short entries |
---|
1562 | // |
---|
1563 | |
---|
1564 | .org ia64_ivt+0x5000 |
---|
1565 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1566 | // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49) |
---|
1567 | ENTRY(page_not_present) |
---|
1568 | DBG_FAULT(20) |
---|
1569 | mov r16=cr.ifa |
---|
1570 | rsm psr.dt |
---|
1571 | /* |
---|
1572 | * The Linux page fault handler doesn't expect non-present pages to be in |
---|
1573 | * the TLB. Flush the existing entry now, so we meet that expectation. |
---|
1574 | */ |
---|
1575 | mov r17=PAGE_SHIFT<<2 |
---|
1576 | ;; |
---|
1577 | ptc.l r16,r17 |
---|
1578 | ;; |
---|
1579 | mov r31=pr |
---|
1580 | srlz.d |
---|
1581 | br.sptk.many page_fault |
---|
1582 | END(page_not_present) |
---|
1583 | |
---|
1584 | .org ia64_ivt+0x5100 |
---|
1585 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1586 | // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52) |
---|
1587 | ENTRY(key_permission) |
---|
1588 | DBG_FAULT(21) |
---|
1589 | mov r16=cr.ifa |
---|
1590 | rsm psr.dt |
---|
1591 | mov r31=pr |
---|
1592 | ;; |
---|
1593 | srlz.d |
---|
1594 | br.sptk.many page_fault |
---|
1595 | END(key_permission) |
---|
1596 | |
---|
1597 | .org ia64_ivt+0x5200 |
---|
1598 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1599 | // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26) |
---|
1600 | ENTRY(iaccess_rights) |
---|
1601 | DBG_FAULT(22) |
---|
1602 | mov r16=cr.ifa |
---|
1603 | rsm psr.dt |
---|
1604 | mov r31=pr |
---|
1605 | ;; |
---|
1606 | srlz.d |
---|
1607 | br.sptk.many page_fault |
---|
1608 | END(iaccess_rights) |
---|
1609 | |
---|
1610 | .org ia64_ivt+0x5300 |
---|
1611 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1612 | // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53) |
---|
1613 | ENTRY(daccess_rights) |
---|
1614 | DBG_FAULT(23) |
---|
1615 | #ifdef CONFIG_XEN |
---|
1616 | movl r16=XSI_IFA |
---|
1617 | ;; |
---|
1618 | ld8 r16=[r16] |
---|
1619 | ;; |
---|
1620 | XEN_HYPER_RSM_PSR_DT |
---|
1621 | #else |
---|
1622 | mov r16=cr.ifa |
---|
1623 | rsm psr.dt |
---|
1624 | #endif |
---|
1625 | mov r31=pr |
---|
1626 | ;; |
---|
1627 | srlz.d |
---|
1628 | br.sptk.many page_fault |
---|
1629 | END(daccess_rights) |
---|
1630 | |
---|
1631 | .org ia64_ivt+0x5400 |
---|
1632 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1633 | // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39) |
---|
1634 | ENTRY(general_exception) |
---|
1635 | DBG_FAULT(24) |
---|
1636 | mov r16=cr.isr |
---|
1637 | mov r31=pr |
---|
1638 | ;; |
---|
1639 | cmp4.eq p6,p0=0,r16 |
---|
1640 | (p6) br.sptk.many dispatch_illegal_op_fault |
---|
1641 | ;; |
---|
1642 | mov r19=24 // fault number |
---|
1643 | br.sptk.many dispatch_to_fault_handler |
---|
1644 | END(general_exception) |
---|
1645 | |
---|
1646 | .org ia64_ivt+0x5500 |
---|
1647 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1648 | // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35) |
---|
1649 | ENTRY(disabled_fp_reg) |
---|
1650 | DBG_FAULT(25) |
---|
1651 | rsm psr.dfh // ensure we can access fph |
---|
1652 | ;; |
---|
1653 | srlz.d |
---|
1654 | mov r31=pr |
---|
1655 | mov r19=25 |
---|
1656 | br.sptk.many dispatch_to_fault_handler |
---|
1657 | END(disabled_fp_reg) |
---|
1658 | |
---|
1659 | .org ia64_ivt+0x5600 |
---|
1660 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1661 | // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50) |
---|
1662 | ENTRY(nat_consumption) |
---|
1663 | DBG_FAULT(26) |
---|
1664 | |
---|
1665 | mov r16=cr.ipsr |
---|
1666 | mov r17=cr.isr |
---|
1667 | mov r31=pr // save PR |
---|
1668 | ;; |
---|
1669 | and r18=0xf,r17 // r18 = cr.ipsr.code{3:0} |
---|
1670 | tbit.z p6,p0=r17,IA64_ISR_NA_BIT |
---|
1671 | ;; |
---|
1672 | cmp.ne.or p6,p0=IA64_ISR_CODE_LFETCH,r18 |
---|
1673 | dep r16=-1,r16,IA64_PSR_ED_BIT,1 |
---|
1674 | (p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH) |
---|
1675 | ;; |
---|
1676 | mov cr.ipsr=r16 // set cr.ipsr.na |
---|
1677 | mov pr=r31,-1 |
---|
1678 | ;; |
---|
1679 | rfi |
---|
1680 | |
---|
1681 | 1: mov pr=r31,-1 |
---|
1682 | ;; |
---|
1683 | FAULT(26) |
---|
1684 | END(nat_consumption) |
---|
1685 | |
---|
1686 | .org ia64_ivt+0x5700 |
---|
1687 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1688 | // 0x5700 Entry 27 (size 16 bundles) Speculation (40) |
---|
1689 | ENTRY(speculation_vector) |
---|
1690 | DBG_FAULT(27) |
---|
1691 | /* |
---|
1692 | * A [f]chk.[as] instruction needs to take the branch to the recovery code but |
---|
1693 | * this part of the architecture is not implemented in hardware on some CPUs, such |
---|
1694 | * as Itanium. Thus, in general we need to emulate the behavior. IIM contains |
---|
1695 | * the relative target (not yet sign extended). So after sign extending it we |
---|
1696 | * simply add it to IIP. We also need to reset the EI field of the IPSR to zero, |
---|
1697 | * i.e., the slot to restart into. |
---|
1698 | * |
---|
1699 | * cr.imm contains zero_ext(imm21) |
---|
1700 | */ |
---|
1701 | mov r18=cr.iim |
---|
1702 | ;; |
---|
1703 | mov r17=cr.iip |
---|
1704 | shl r18=r18,43 // put sign bit in position (43=64-21) |
---|
1705 | ;; |
---|
1706 | |
---|
1707 | mov r16=cr.ipsr |
---|
1708 | shr r18=r18,39 // sign extend (39=43-4) |
---|
1709 | ;; |
---|
1710 | |
---|
1711 | add r17=r17,r18 // now add the offset |
---|
1712 | ;; |
---|
1713 | mov cr.iip=r17 |
---|
1714 | dep r16=0,r16,41,2 // clear EI |
---|
1715 | ;; |
---|
1716 | |
---|
1717 | mov cr.ipsr=r16 |
---|
1718 | ;; |
---|
1719 | |
---|
1720 | #ifdef CONFIG_XEN |
---|
1721 | XEN_HYPER_RFI; |
---|
1722 | #else |
---|
1723 | rfi // and go back |
---|
1724 | #endif |
---|
1725 | END(speculation_vector) |
---|
1726 | |
---|
1727 | .org ia64_ivt+0x5800 |
---|
1728 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1729 | // 0x5800 Entry 28 (size 16 bundles) Reserved |
---|
1730 | DBG_FAULT(28) |
---|
1731 | FAULT(28) |
---|
1732 | |
---|
1733 | .org ia64_ivt+0x5900 |
---|
1734 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1735 | // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56) |
---|
1736 | ENTRY(debug_vector) |
---|
1737 | DBG_FAULT(29) |
---|
1738 | FAULT(29) |
---|
1739 | END(debug_vector) |
---|
1740 | |
---|
1741 | .org ia64_ivt+0x5a00 |
---|
1742 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1743 | // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57) |
---|
1744 | ENTRY(unaligned_access) |
---|
1745 | DBG_FAULT(30) |
---|
1746 | mov r31=pr // prepare to save predicates |
---|
1747 | ;; |
---|
1748 | br.sptk.many dispatch_unaligned_handler |
---|
1749 | END(unaligned_access) |
---|
1750 | |
---|
1751 | .org ia64_ivt+0x5b00 |
---|
1752 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1753 | // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57) |
---|
1754 | ENTRY(unsupported_data_reference) |
---|
1755 | DBG_FAULT(31) |
---|
1756 | FAULT(31) |
---|
1757 | END(unsupported_data_reference) |
---|
1758 | |
---|
1759 | .org ia64_ivt+0x5c00 |
---|
1760 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1761 | // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64) |
---|
1762 | ENTRY(floating_point_fault) |
---|
1763 | DBG_FAULT(32) |
---|
1764 | FAULT(32) |
---|
1765 | END(floating_point_fault) |
---|
1766 | |
---|
1767 | .org ia64_ivt+0x5d00 |
---|
1768 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1769 | // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66) |
---|
1770 | ENTRY(floating_point_trap) |
---|
1771 | DBG_FAULT(33) |
---|
1772 | FAULT(33) |
---|
1773 | END(floating_point_trap) |
---|
1774 | |
---|
1775 | .org ia64_ivt+0x5e00 |
---|
1776 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1777 | // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66) |
---|
1778 | ENTRY(lower_privilege_trap) |
---|
1779 | DBG_FAULT(34) |
---|
1780 | FAULT(34) |
---|
1781 | END(lower_privilege_trap) |
---|
1782 | |
---|
1783 | .org ia64_ivt+0x5f00 |
---|
1784 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1785 | // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68) |
---|
1786 | ENTRY(taken_branch_trap) |
---|
1787 | DBG_FAULT(35) |
---|
1788 | FAULT(35) |
---|
1789 | END(taken_branch_trap) |
---|
1790 | |
---|
1791 | .org ia64_ivt+0x6000 |
---|
1792 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1793 | // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69) |
---|
1794 | ENTRY(single_step_trap) |
---|
1795 | DBG_FAULT(36) |
---|
1796 | FAULT(36) |
---|
1797 | END(single_step_trap) |
---|
1798 | |
---|
1799 | .org ia64_ivt+0x6100 |
---|
1800 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1801 | // 0x6100 Entry 37 (size 16 bundles) Reserved |
---|
1802 | DBG_FAULT(37) |
---|
1803 | FAULT(37) |
---|
1804 | |
---|
1805 | .org ia64_ivt+0x6200 |
---|
1806 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1807 | // 0x6200 Entry 38 (size 16 bundles) Reserved |
---|
1808 | DBG_FAULT(38) |
---|
1809 | FAULT(38) |
---|
1810 | |
---|
1811 | .org ia64_ivt+0x6300 |
---|
1812 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1813 | // 0x6300 Entry 39 (size 16 bundles) Reserved |
---|
1814 | DBG_FAULT(39) |
---|
1815 | FAULT(39) |
---|
1816 | |
---|
1817 | .org ia64_ivt+0x6400 |
---|
1818 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1819 | // 0x6400 Entry 40 (size 16 bundles) Reserved |
---|
1820 | DBG_FAULT(40) |
---|
1821 | FAULT(40) |
---|
1822 | |
---|
1823 | .org ia64_ivt+0x6500 |
---|
1824 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1825 | // 0x6500 Entry 41 (size 16 bundles) Reserved |
---|
1826 | DBG_FAULT(41) |
---|
1827 | FAULT(41) |
---|
1828 | |
---|
1829 | .org ia64_ivt+0x6600 |
---|
1830 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1831 | // 0x6600 Entry 42 (size 16 bundles) Reserved |
---|
1832 | DBG_FAULT(42) |
---|
1833 | FAULT(42) |
---|
1834 | |
---|
1835 | .org ia64_ivt+0x6700 |
---|
1836 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1837 | // 0x6700 Entry 43 (size 16 bundles) Reserved |
---|
1838 | DBG_FAULT(43) |
---|
1839 | FAULT(43) |
---|
1840 | |
---|
1841 | .org ia64_ivt+0x6800 |
---|
1842 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1843 | // 0x6800 Entry 44 (size 16 bundles) Reserved |
---|
1844 | DBG_FAULT(44) |
---|
1845 | FAULT(44) |
---|
1846 | |
---|
1847 | .org ia64_ivt+0x6900 |
---|
1848 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1849 | // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77) |
---|
1850 | ENTRY(ia32_exception) |
---|
1851 | DBG_FAULT(45) |
---|
1852 | FAULT(45) |
---|
1853 | END(ia32_exception) |
---|
1854 | |
---|
1855 | .org ia64_ivt+0x6a00 |
---|
1856 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1857 | // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) |
---|
1858 | ENTRY(ia32_intercept) |
---|
1859 | DBG_FAULT(46) |
---|
1860 | #ifdef CONFIG_IA32_SUPPORT |
---|
1861 | mov r31=pr |
---|
1862 | mov r16=cr.isr |
---|
1863 | ;; |
---|
1864 | extr.u r17=r16,16,8 // get ISR.code |
---|
1865 | mov r18=ar.eflag |
---|
1866 | mov r19=cr.iim // old eflag value |
---|
1867 | ;; |
---|
1868 | cmp.ne p6,p0=2,r17 |
---|
1869 | (p6) br.cond.spnt 1f // not a system flag fault |
---|
1870 | xor r16=r18,r19 |
---|
1871 | ;; |
---|
1872 | extr.u r17=r16,18,1 // get the eflags.ac bit |
---|
1873 | ;; |
---|
1874 | cmp.eq p6,p0=0,r17 |
---|
1875 | (p6) br.cond.spnt 1f // eflags.ac bit didn't change |
---|
1876 | ;; |
---|
1877 | mov pr=r31,-1 // restore predicate registers |
---|
1878 | #ifdef CONFIG_XEN |
---|
1879 | XEN_HYPER_RFI; |
---|
1880 | #else |
---|
1881 | rfi |
---|
1882 | #endif |
---|
1883 | |
---|
1884 | 1: |
---|
1885 | #endif // CONFIG_IA32_SUPPORT |
---|
1886 | FAULT(46) |
---|
1887 | END(ia32_intercept) |
---|
1888 | |
---|
1889 | .org ia64_ivt+0x6b00 |
---|
1890 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1891 | // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74) |
---|
1892 | ENTRY(ia32_interrupt) |
---|
1893 | DBG_FAULT(47) |
---|
1894 | #ifdef CONFIG_IA32_SUPPORT |
---|
1895 | mov r31=pr |
---|
1896 | br.sptk.many dispatch_to_ia32_handler |
---|
1897 | #else |
---|
1898 | FAULT(47) |
---|
1899 | #endif |
---|
1900 | END(ia32_interrupt) |
---|
1901 | |
---|
1902 | .org ia64_ivt+0x6c00 |
---|
1903 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1904 | // 0x6c00 Entry 48 (size 16 bundles) Reserved |
---|
1905 | DBG_FAULT(48) |
---|
1906 | FAULT(48) |
---|
1907 | |
---|
1908 | .org ia64_ivt+0x6d00 |
---|
1909 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1910 | // 0x6d00 Entry 49 (size 16 bundles) Reserved |
---|
1911 | DBG_FAULT(49) |
---|
1912 | FAULT(49) |
---|
1913 | |
---|
1914 | .org ia64_ivt+0x6e00 |
---|
1915 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1916 | // 0x6e00 Entry 50 (size 16 bundles) Reserved |
---|
1917 | DBG_FAULT(50) |
---|
1918 | FAULT(50) |
---|
1919 | |
---|
1920 | .org ia64_ivt+0x6f00 |
---|
1921 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1922 | // 0x6f00 Entry 51 (size 16 bundles) Reserved |
---|
1923 | DBG_FAULT(51) |
---|
1924 | FAULT(51) |
---|
1925 | |
---|
1926 | .org ia64_ivt+0x7000 |
---|
1927 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1928 | // 0x7000 Entry 52 (size 16 bundles) Reserved |
---|
1929 | DBG_FAULT(52) |
---|
1930 | FAULT(52) |
---|
1931 | |
---|
1932 | .org ia64_ivt+0x7100 |
---|
1933 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1934 | // 0x7100 Entry 53 (size 16 bundles) Reserved |
---|
1935 | DBG_FAULT(53) |
---|
1936 | FAULT(53) |
---|
1937 | |
---|
1938 | .org ia64_ivt+0x7200 |
---|
1939 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1940 | // 0x7200 Entry 54 (size 16 bundles) Reserved |
---|
1941 | DBG_FAULT(54) |
---|
1942 | FAULT(54) |
---|
1943 | |
---|
1944 | .org ia64_ivt+0x7300 |
---|
1945 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1946 | // 0x7300 Entry 55 (size 16 bundles) Reserved |
---|
1947 | DBG_FAULT(55) |
---|
1948 | FAULT(55) |
---|
1949 | |
---|
1950 | .org ia64_ivt+0x7400 |
---|
1951 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1952 | // 0x7400 Entry 56 (size 16 bundles) Reserved |
---|
1953 | DBG_FAULT(56) |
---|
1954 | FAULT(56) |
---|
1955 | |
---|
1956 | .org ia64_ivt+0x7500 |
---|
1957 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1958 | // 0x7500 Entry 57 (size 16 bundles) Reserved |
---|
1959 | DBG_FAULT(57) |
---|
1960 | FAULT(57) |
---|
1961 | |
---|
1962 | .org ia64_ivt+0x7600 |
---|
1963 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1964 | // 0x7600 Entry 58 (size 16 bundles) Reserved |
---|
1965 | DBG_FAULT(58) |
---|
1966 | FAULT(58) |
---|
1967 | |
---|
1968 | .org ia64_ivt+0x7700 |
---|
1969 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1970 | // 0x7700 Entry 59 (size 16 bundles) Reserved |
---|
1971 | DBG_FAULT(59) |
---|
1972 | FAULT(59) |
---|
1973 | |
---|
1974 | .org ia64_ivt+0x7800 |
---|
1975 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1976 | // 0x7800 Entry 60 (size 16 bundles) Reserved |
---|
1977 | DBG_FAULT(60) |
---|
1978 | FAULT(60) |
---|
1979 | |
---|
1980 | .org ia64_ivt+0x7900 |
---|
1981 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1982 | // 0x7900 Entry 61 (size 16 bundles) Reserved |
---|
1983 | DBG_FAULT(61) |
---|
1984 | FAULT(61) |
---|
1985 | |
---|
1986 | .org ia64_ivt+0x7a00 |
---|
1987 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1988 | // 0x7a00 Entry 62 (size 16 bundles) Reserved |
---|
1989 | DBG_FAULT(62) |
---|
1990 | FAULT(62) |
---|
1991 | |
---|
1992 | .org ia64_ivt+0x7b00 |
---|
1993 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
1994 | // 0x7b00 Entry 63 (size 16 bundles) Reserved |
---|
1995 | DBG_FAULT(63) |
---|
1996 | FAULT(63) |
---|
1997 | |
---|
1998 | .org ia64_ivt+0x7c00 |
---|
1999 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
2000 | // 0x7c00 Entry 64 (size 16 bundles) Reserved |
---|
2001 | DBG_FAULT(64) |
---|
2002 | FAULT(64) |
---|
2003 | |
---|
2004 | .org ia64_ivt+0x7d00 |
---|
2005 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
2006 | // 0x7d00 Entry 65 (size 16 bundles) Reserved |
---|
2007 | DBG_FAULT(65) |
---|
2008 | FAULT(65) |
---|
2009 | |
---|
2010 | .org ia64_ivt+0x7e00 |
---|
2011 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
2012 | // 0x7e00 Entry 66 (size 16 bundles) Reserved |
---|
2013 | DBG_FAULT(66) |
---|
2014 | FAULT(66) |
---|
2015 | |
---|
2016 | .org ia64_ivt+0x7f00 |
---|
2017 | ///////////////////////////////////////////////////////////////////////////////////////// |
---|
2018 | // 0x7f00 Entry 67 (size 16 bundles) Reserved |
---|
2019 | DBG_FAULT(67) |
---|
2020 | FAULT(67) |
---|
2021 | |
---|
2022 | #ifdef CONFIG_IA32_SUPPORT |
---|
2023 | |
---|
2024 | /* |
---|
2025 | * There is no particular reason for this code to be here, other than that |
---|
2026 | * there happens to be space here that would go unused otherwise. If this |
---|
2027 | * fault ever gets "unreserved", simply moved the following code to a more |
---|
2028 | * suitable spot... |
---|
2029 | */ |
---|
2030 | |
---|
2031 | // IA32 interrupt entry point |
---|
2032 | |
---|
2033 | ENTRY(dispatch_to_ia32_handler) |
---|
2034 | SAVE_MIN |
---|
2035 | ;; |
---|
2036 | mov r14=cr.isr |
---|
2037 | ssm psr.ic | PSR_DEFAULT_BITS |
---|
2038 | ;; |
---|
2039 | srlz.i // guarantee that interruption collection is on |
---|
2040 | ;; |
---|
2041 | (p15) ssm psr.i |
---|
2042 | adds r3=8,r2 // Base pointer for SAVE_REST |
---|
2043 | ;; |
---|
2044 | SAVE_REST |
---|
2045 | ;; |
---|
2046 | mov r15=0x80 |
---|
2047 | shr r14=r14,16 // Get interrupt number |
---|
2048 | ;; |
---|
2049 | cmp.ne p6,p0=r14,r15 |
---|
2050 | (p6) br.call.dpnt.many b6=non_ia32_syscall |
---|
2051 | |
---|
2052 | adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions |
---|
2053 | adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp |
---|
2054 | ;; |
---|
2055 | cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0 |
---|
2056 | ld8 r8=[r14] // get r8 |
---|
2057 | ;; |
---|
2058 | st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP) |
---|
2059 | ;; |
---|
2060 | alloc r15=ar.pfs,0,0,6,0 // must first in an insn group |
---|
2061 | ;; |
---|
2062 | ld4 r8=[r14],8 // r8 == eax (syscall number) |
---|
2063 | mov r15=IA32_NR_syscalls |
---|
2064 | ;; |
---|
2065 | cmp.ltu.unc p6,p7=r8,r15 |
---|
2066 | ld4 out1=[r14],8 // r9 == ecx |
---|
2067 | ;; |
---|
2068 | ld4 out2=[r14],8 // r10 == edx |
---|
2069 | ;; |
---|
2070 | ld4 out0=[r14] // r11 == ebx |
---|
2071 | adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp |
---|
2072 | ;; |
---|
2073 | ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp |
---|
2074 | ;; |
---|
2075 | ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi |
---|
2076 | adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 |
---|
2077 | ;; |
---|
2078 | ld4 out4=[r14] // r15 == edi |
---|
2079 | movl r16=ia32_syscall_table |
---|
2080 | ;; |
---|
2081 | (p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number |
---|
2082 | ld4 r2=[r2] // r2 = current_thread_info()->flags |
---|
2083 | ;; |
---|
2084 | ld8 r16=[r16] |
---|
2085 | and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit |
---|
2086 | ;; |
---|
2087 | mov b6=r16 |
---|
2088 | movl r15=ia32_ret_from_syscall |
---|
2089 | cmp.eq p8,p0=r2,r0 |
---|
2090 | ;; |
---|
2091 | mov rp=r15 |
---|
2092 | (p8) br.call.sptk.many b6=b6 |
---|
2093 | br.cond.sptk ia32_trace_syscall |
---|
2094 | |
---|
2095 | non_ia32_syscall: |
---|
2096 | alloc r15=ar.pfs,0,0,2,0 |
---|
2097 | mov out0=r14 // interrupt # |
---|
2098 | add out1=16,sp // pointer to pt_regs |
---|
2099 | ;; // avoid WAW on CFM |
---|
2100 | br.call.sptk.many rp=ia32_bad_interrupt |
---|
2101 | .ret1: movl r15=ia64_leave_kernel |
---|
2102 | ;; |
---|
2103 | mov rp=r15 |
---|
2104 | br.ret.sptk.many rp |
---|
2105 | END(dispatch_to_ia32_handler) |
---|
2106 | #endif /* CONFIG_IA32_SUPPORT */ |
---|
2107 | |
---|
2108 | #ifdef CONFIG_XEN |
---|
2109 | .section .text,"ax" |
---|
2110 | GLOBAL_ENTRY(xen_event_callback) |
---|
2111 | mov r31=pr // prepare to save predicates |
---|
2112 | ;; |
---|
2113 | SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3 |
---|
2114 | ;; |
---|
2115 | movl r3=XSI_PSR_IC |
---|
2116 | mov r14=1 |
---|
2117 | ;; |
---|
2118 | st4 [r3]=r14 |
---|
2119 | ;; |
---|
2120 | adds r3=8,r2 // set up second base pointer for SAVE_REST |
---|
2121 | srlz.i // ensure everybody knows psr.ic is back on |
---|
2122 | ;; |
---|
2123 | SAVE_REST |
---|
2124 | ;; |
---|
2125 | 1: |
---|
2126 | alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group |
---|
2127 | add out0=16,sp // pass pointer to pt_regs as first arg |
---|
2128 | ;; |
---|
2129 | br.call.sptk.many b0=evtchn_do_upcall |
---|
2130 | ;; |
---|
2131 | movl r20=XSI_PSR_I_ADDR |
---|
2132 | ;; |
---|
2133 | ld8 r20=[r20] |
---|
2134 | ;; |
---|
2135 | adds r20=-1,r20 // vcpu_info->evtchn_upcall_pending |
---|
2136 | ;; |
---|
2137 | ld1 r20=[r20] |
---|
2138 | ;; |
---|
2139 | cmp.ne p6,p0=r20,r0 // if there are pending events, |
---|
2140 | (p6) br.spnt.few 1b // call evtchn_do_upcall again. |
---|
2141 | br.sptk.many ia64_leave_kernel |
---|
2142 | END(xen_event_callback) |
---|
2143 | |
---|
2144 | |
---|
2145 | /* |
---|
2146 | * There is no particular reason for this code to be here, other than that |
---|
2147 | * there happens to be space here that would go unused otherwise. If this |
---|
2148 | * fault ever gets "unreserved", simply moved the following code to a more |
---|
2149 | * suitable spot... |
---|
2150 | */ |
---|
2151 | |
---|
2152 | GLOBAL_ENTRY(xen_bsw1) |
---|
2153 | /* FIXME: THIS CODE IS NOT NaT SAFE! */ |
---|
2154 | mov r14=ar.unat |
---|
2155 | movl r30=XSI_B1NAT |
---|
2156 | ;; |
---|
2157 | ld8 r30=[r30];; |
---|
2158 | mov ar.unat=r30 |
---|
2159 | movl r30=XSI_BANKNUM; |
---|
2160 | mov r31=1;; |
---|
2161 | st4 [r30]=r31; |
---|
2162 | movl r30=XSI_BANK1_R16; |
---|
2163 | movl r31=XSI_BANK1_R16+8;; |
---|
2164 | ld8.fill r16=[r30],16; ld8.fill r17=[r31],16;; |
---|
2165 | ld8.fill r18=[r30],16; ld8.fill r19=[r31],16;; |
---|
2166 | ld8.fill r20=[r30],16; ld8.fill r21=[r31],16;; |
---|
2167 | ld8.fill r22=[r30],16; ld8.fill r23=[r31],16;; |
---|
2168 | ld8.fill r24=[r30],16; ld8.fill r25=[r31],16;; |
---|
2169 | ld8.fill r26=[r30],16; ld8.fill r27=[r31],16;; |
---|
2170 | ld8.fill r28=[r30],16; ld8.fill r29=[r31],16;; |
---|
2171 | ld8.fill r30=[r30]; ld8.fill r31=[r31];; |
---|
2172 | mov ar.unat=r14 |
---|
2173 | br.ret.sptk.many b0 |
---|
2174 | END(xen_bsw1) |
---|
2175 | |
---|
2176 | |
---|
2177 | #endif |
---|