1 | /* |
---|
2 | * This program is free software; you can redistribute it and/or modify |
---|
3 | * it under the terms of the GNU General Public License as published by |
---|
4 | * the Free Software Foundation; either version 2 of the License, or |
---|
5 | * (at your option) any later version. |
---|
6 | * |
---|
7 | * This program is distributed in the hope that it will be useful, |
---|
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
10 | * GNU General Public License for more details. |
---|
11 | * |
---|
12 | * You should have received a copy of the GNU General Public License |
---|
13 | * along with this program; if not, write to the Free Software |
---|
14 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
---|
15 | * |
---|
16 | * Copyright (C) IBM Corp. 2005, 2006 |
---|
17 | * |
---|
18 | * Authors: Jimi Xenidis <jimix@watson.ibm.com> |
---|
19 | * Hollis Blanchard <hollisb@us.ibm.com> |
---|
20 | */ |
---|
21 | |
---|
22 | #include <asm/config.h> |
---|
23 | #include <asm/asm-offsets.h> |
---|
24 | #include <asm/reg_defs.h> |
---|
25 | #include <asm/msr.h> |
---|
26 | #include <asm/processor.h> |
---|
27 | #include <asm/percpu.h> |
---|
28 | |
---|
29 | .macro SAVE_GPR regno uregs |
---|
30 | std \regno, (UREGS_gprs + GPR_WIDTH * \regno)(\uregs) |
---|
31 | .endm |
---|
32 | |
---|
33 | .macro SAVE_GPRS from to uregs |
---|
34 | .ifge \to-\from |
---|
35 | SAVE_GPR \from, \uregs |
---|
36 | SAVE_GPRS "(\from+1)", \to, \uregs |
---|
37 | .endif |
---|
38 | .endm |
---|
39 | |
---|
40 | .macro LOAD_GPR regno uregs |
---|
41 | ld \regno, (UREGS_gprs + GPR_WIDTH * \regno)(\uregs) |
---|
42 | .endm |
---|
43 | |
---|
44 | .macro LOAD_GPRS from to uregs |
---|
45 | .ifge \to-\from |
---|
46 | LOAD_GPR \from, \uregs |
---|
47 | LOAD_GPRS "(\from+1)", \to, \uregs |
---|
48 | .endif |
---|
49 | .endm |
---|
50 | |
---|
51 | .macro GET_STACK parea srr1 |
---|
52 | /* get processor area pointer and save off a couple registers there */ |
---|
53 | mtspr SPRN_HSPRG1, \parea |
---|
54 | mfspr \parea, SPRN_HSPRG0 |
---|
55 | std r1, PAREA_r1(\parea) |
---|
56 | mfcr r1 |
---|
57 | std r1, PAREA_cr(\parea) |
---|
58 | mfspr r1, \srr1 |
---|
59 | rldicl. r1, r1, 4, 63 /* test (H)SRR1:HV */ |
---|
60 | /* assume we interrupted the guest, in which case we start at top of this |
---|
61 | * processsor's hypervisor stack (as found in parea). */ |
---|
62 | ld r1, PAREA_stack(\parea) |
---|
63 | beq 1f |
---|
64 | /* nope, we interrupted the hypervisor. continue on that stack. */ |
---|
65 | ld r1, PAREA_r1(\parea) |
---|
66 | 1: |
---|
67 | .endm |
---|
68 | |
---|
69 | /* SAVE_C_STATE: set up enough state to jump to C code |
---|
70 | * r14-r31 are non-volatile in the C ABI, so not saved here |
---|
71 | */ |
---|
72 | .macro SAVE_C_STATE uregs |
---|
73 | SAVE_GPRS r2, r12, \uregs /* save r2-r12 */ |
---|
74 | |
---|
75 | mflr r0 |
---|
76 | std r0, UREGS_lr(\uregs) /* save LR */ |
---|
77 | mfxer r0 |
---|
78 | std r0, UREGS_xer(\uregs) /* save XER */ |
---|
79 | .endm |
---|
80 | |
---|
81 | .macro LOAD_C_STATE uregs |
---|
82 | ld r0, UREGS_lr(\uregs) /* load LR */ |
---|
83 | mtlr r0 |
---|
84 | ld r0, UREGS_xer(\uregs) /* load XER */ |
---|
85 | mtxer r0 |
---|
86 | lwz r0, UREGS_cr(\uregs) /* load CR */ |
---|
87 | mtcr r0 |
---|
88 | |
---|
89 | LOAD_GPRS r2, r12, \uregs /* load r2-r12 */ |
---|
90 | .endm |
---|
91 | |
---|
92 | .macro LOADADDR reg symbol |
---|
93 | lis \reg,\symbol@highest |
---|
94 | ori \reg,\reg,\symbol@higher |
---|
95 | rldicr \reg,\reg,32,31 |
---|
96 | oris \reg,\reg,\symbol@h |
---|
97 | ori \reg,\reg,\symbol@l |
---|
98 | .endm |
---|
99 | |
---|
100 | .macro CALL_CFUNC reg |
---|
101 | ld r2, 8(\reg) /* load function's TOC value */ |
---|
102 | ld \reg, 0(\reg) |
---|
103 | mtctr \reg |
---|
104 | bctrl |
---|
105 | nop |
---|
106 | .endm |
---|
107 | |
---|
108 | .macro EXCEPTION_HEAD parea continue |
---|
109 | /* make room for cpu_user_regs */ |
---|
110 | subi r1, r1, STACK_VOLATILE_AREA + UREGS_sizeof |
---|
111 | |
---|
112 | /* get all we need from the processor_area */ |
---|
113 | std r0, UREGS_r0(r1) /* get scratch register */ |
---|
114 | ld r0, PAREA_r1(\parea) |
---|
115 | std r0, UREGS_r1(r1) /* save R1 */ |
---|
116 | ld r0, PAREA_cr(\parea) |
---|
117 | stw r0, UREGS_cr(r1) /* save CR */ |
---|
118 | mfspr r0, SPRN_HSPRG1 |
---|
119 | std r0, UREGS_r13(r1) /* save R13 from HSPRG1 */ |
---|
120 | |
---|
121 | /* Only _one_ larx is allowed at a time. Any future use will be |
---|
122 | * rejected until the earlier one (if any) completes. Since we |
---|
123 | * may have interrupted a larx in the Domain, or Xen we need to |
---|
124 | * clear any larx that may currently exist. We could probably |
---|
125 | * skip which for hcalls */ |
---|
126 | ldx r0, 0, r1 |
---|
127 | stdcx. r0, 0, r1 |
---|
128 | |
---|
129 | /* save CTR and use it to jump */ |
---|
130 | mfctr r0 |
---|
131 | std r0, UREGS_ctr(r1) |
---|
132 | LOADADDR r0, \continue |
---|
133 | mtctr r0 |
---|
134 | .endm |
---|
135 | |
---|
136 | /* For normal exceptions. */ |
---|
137 | .macro EXCEPTION_SAVE_STATE uregs |
---|
138 | SAVE_C_STATE \uregs |
---|
139 | |
---|
140 | /* save DEC */ |
---|
141 | mfdec r0 |
---|
142 | ld r3, PAREA_vcpu(r13) |
---|
143 | stw r0, VCPU_dec(r3) |
---|
144 | |
---|
145 | /* save PC, MSR */ |
---|
146 | mfspr r0, SPRN_SRR0 |
---|
147 | std r0, UREGS_pc(\uregs) |
---|
148 | mfspr r0, SPRN_SRR1 |
---|
149 | std r0, UREGS_msr(\uregs) |
---|
150 | li r0, -1 /* we clobbered the OS's SRR0/SRR1 to get here. */ |
---|
151 | std r0, UREGS_srr0(\uregs) |
---|
152 | std r0, UREGS_srr1(\uregs) |
---|
153 | |
---|
154 | /* done with processor_area; re-enable MSR:RI */ |
---|
155 | mfmsr r0 |
---|
156 | ori r0, r0, MSR_RI@l |
---|
157 | mtmsrd r0 |
---|
158 | |
---|
159 | |
---|
160 | .endm |
---|
161 | |
---|
162 | /* For exceptions that use HSRR0/1 (preserving the OS's SRR0/1). */ |
---|
163 | .macro H_EXCEPTION_SAVE_STATE uregs |
---|
164 | SAVE_C_STATE \uregs |
---|
165 | |
---|
166 | /* save DEC */ |
---|
167 | mfdec r0 |
---|
168 | ld r3, PAREA_vcpu(r13) |
---|
169 | stw r0, VCPU_dec(r3) |
---|
170 | |
---|
171 | /* save PC, MSR */ |
---|
172 | mfspr r0, SPRN_HSRR0 |
---|
173 | std r0, UREGS_pc(\uregs) |
---|
174 | mfspr r0, SPRN_HSRR1 |
---|
175 | std r0, UREGS_msr(\uregs) |
---|
176 | mfspr r0, SPRN_SRR0 |
---|
177 | std r0, UREGS_srr0(\uregs) |
---|
178 | mfspr r0, SPRN_SRR1 |
---|
179 | std r0, UREGS_srr1(\uregs) |
---|
180 | |
---|
181 | /* done with processor_area; re-enable MSR:RI */ |
---|
182 | mfmsr r0 |
---|
183 | ori r0, r0, MSR_RI@l |
---|
184 | mtmsrd r0 |
---|
185 | |
---|
186 | .endm |
---|
187 | |
---|
188 | /* Hypervisor exception handling code; copied to physical address zero. */ |
---|
189 | .align 3 |
---|
190 | .globl exception_vectors |
---|
191 | exception_vectors: |
---|
192 | |
---|
193 | . = 0x0 # wild branch to 0 |
---|
194 | zero: |
---|
195 | GET_STACK r13 SPRN_SRR1 |
---|
196 | EXCEPTION_HEAD r13 ex_program_continued |
---|
197 | li r0, 0x0 /* exception vector for GDB stub */ |
---|
198 | bctr |
---|
199 | |
---|
200 | /* The following byte array is where any per-CPU state flags |
---|
201 | * that can be be used across interrupts. Currently it is only used |
---|
202 | * to track Cache Inhibited Mode when a Machine Check occurs. */ |
---|
203 | /* NOTE: This array is indexed by PIR NOT CPUID */ |
---|
204 | . = MCK_CPU_STAT_BASE |
---|
205 | .space NR_CPUS |
---|
206 | . = MCK_GOOD_HID4 |
---|
207 | .quad 0 |
---|
208 | . = 0x100 # System Reset |
---|
209 | ex_reset: |
---|
210 | /* XXX thread initialization */ |
---|
211 | GET_STACK r13 SPRN_SRR1 |
---|
212 | EXCEPTION_HEAD r13 ex_program_continued |
---|
213 | li r0, 0x100 /* exception vector for GDB stub */ |
---|
214 | bctr |
---|
215 | |
---|
216 | . = 0x200 # Machine Check |
---|
217 | ex_machcheck: |
---|
218 | /* Restore HID4 to a known state early, we do not recover from |
---|
219 | * machine check yet, but when we do we shoul dbe able to restore |
---|
220 | * HID4 to it proper value */ |
---|
221 | mtspr SPRN_HSPRG1, r13 |
---|
222 | ld r13, MCK_GOOD_HID4(0) |
---|
223 | sync |
---|
224 | mtspr SPRN_HID4, r13 |
---|
225 | isync |
---|
226 | /* Hopefully we don't have to worry about the ERAT */ |
---|
227 | mfspr r13, SPRN_HSPRG1 |
---|
228 | /* and now back to our regularly schedualed program */ |
---|
229 | GET_STACK r13 SPRN_SRR1 |
---|
230 | EXCEPTION_HEAD r13 ex_machcheck_continued |
---|
231 | li r0, 0x200 /* exception vector for GDB stub */ |
---|
232 | bctr |
---|
233 | |
---|
234 | . = 0x300 |
---|
235 | ex_dsi: |
---|
236 | GET_STACK r13 SPRN_SRR1 |
---|
237 | EXCEPTION_HEAD r13 ex_program_continued |
---|
238 | li r0, 0x300 /* exception vector for GDB stub */ |
---|
239 | bctr |
---|
240 | |
---|
241 | . = 0x380 |
---|
242 | ex_data_slb: |
---|
243 | GET_STACK r13 SPRN_SRR1 |
---|
244 | EXCEPTION_HEAD r13 ex_program_continued |
---|
245 | li r0, 0x380 /* exception vector for GDB stub */ |
---|
246 | bctr |
---|
247 | |
---|
248 | . = 0x400 |
---|
249 | ex_isi: |
---|
250 | GET_STACK r13 SPRN_SRR1 |
---|
251 | EXCEPTION_HEAD r13 ex_program_continued |
---|
252 | li r0, 0x400 /* exception vector for GDB stub */ |
---|
253 | bctr |
---|
254 | |
---|
255 | . = 0x480 |
---|
256 | ex_inst_slb: |
---|
257 | GET_STACK r13 SPRN_SRR1 |
---|
258 | EXCEPTION_HEAD r13 ex_program_continued |
---|
259 | li r0, 0x480 /* exception vector for GDB stub */ |
---|
260 | bctr |
---|
261 | |
---|
262 | . = 0x500 |
---|
263 | ex_external: |
---|
264 | GET_STACK r13 SPRN_SRR1 |
---|
265 | EXCEPTION_HEAD r13 ex_external_continued |
---|
266 | bctr |
---|
267 | |
---|
268 | . = 0x600 |
---|
269 | ex_alignment: |
---|
270 | GET_STACK r13 SPRN_SRR1 |
---|
271 | EXCEPTION_HEAD r13 ex_program_continued |
---|
272 | li r0, 0x600 /* exception vector for GDB stub */ |
---|
273 | bctr |
---|
274 | |
---|
275 | . = 0x700 |
---|
276 | ex_program: |
---|
277 | GET_STACK r13 SPRN_SRR1 |
---|
278 | EXCEPTION_HEAD r13 ex_program_continued |
---|
279 | li r0, 0x700 /* exception vector for GDB stub */ |
---|
280 | bctr |
---|
281 | |
---|
282 | . = 0x800 |
---|
283 | ex_float: |
---|
284 | GET_STACK r13 SPRN_SRR1 |
---|
285 | EXCEPTION_HEAD r13 ex_program_continued |
---|
286 | li r0, 0x800 /* exception vector for GDB stub */ |
---|
287 | bctr |
---|
288 | |
---|
289 | . = 0x900 |
---|
290 | ex_dec: |
---|
291 | /* delivered to hypervisor when MSR:EE is set... */ |
---|
292 | #ifdef SLOW_TRAP |
---|
293 | GET_STACK r13 SPRN_SRR1 |
---|
294 | EXCEPTION_HEAD r13 ex_dec_continued |
---|
295 | bctr |
---|
296 | #else |
---|
297 | /* XXX for now just reset DEC and return */ |
---|
298 | mtspr SPRN_HSPRG1, r3 |
---|
299 | lis r3, 0x7fff |
---|
300 | mtdec r3 |
---|
301 | mfspr r3, SPRN_HSPRG1 |
---|
302 | rfid |
---|
303 | #endif |
---|
304 | |
---|
305 | . = 0x980 |
---|
306 | ex_hdec: |
---|
307 | GET_STACK r13 SPRN_HSRR1 |
---|
308 | EXCEPTION_HEAD r13 ex_hdec_continued |
---|
309 | bctr |
---|
310 | |
---|
311 | . = 0xc00 |
---|
312 | ex_syscall: |
---|
313 | GET_STACK r13 SPRN_SRR1 |
---|
314 | EXCEPTION_HEAD r13 ex_hcall_continued |
---|
315 | bctr |
---|
316 | |
---|
317 | . = 0xd00 |
---|
318 | ex_trace: |
---|
319 | GET_STACK r13 SPRN_SRR1 |
---|
320 | EXCEPTION_HEAD r13 ex_program_continued |
---|
321 | li r0, 0xd00 /* exception vector for GDB stub */ |
---|
322 | bctr |
---|
323 | |
---|
324 | . = 0xe00 |
---|
325 | ex_fp: |
---|
326 | GET_STACK r13 SPRN_SRR1 |
---|
327 | EXCEPTION_HEAD r13 ex_program_continued |
---|
328 | li r0, 0xe00 /* exception vector for GDB stub */ |
---|
329 | bctr |
---|
330 | |
---|
331 | .align 3 |
---|
332 | .globl exception_vectors_end |
---|
333 | |
---|
334 | exception_vectors_end: |
---|
335 | /* put some stuff here so we see the next symbol */ |
---|
336 | .long 0xdeadbeef |
---|
337 | .long 0xdeadbeef |
---|
338 | |
---|
339 | .macro FAST_RESUME |
---|
340 | LOAD_C_STATE r1 /* restore most C volatiles */ |
---|
341 | |
---|
342 | ld r0, UREGS_ctr(r1) |
---|
343 | mtctr r0 |
---|
344 | |
---|
345 | /* clear MSR:RI/EE to set SRR0/SRR1 */ |
---|
346 | li r0, 0 |
---|
347 | mtmsrd r0, 1 |
---|
348 | |
---|
349 | ld r0, UREGS_pc(r1) |
---|
350 | mtspr SPRN_HSRR0, r0 |
---|
351 | ld r0, UREGS_msr(r1) |
---|
352 | mtspr SPRN_HSRR1, r0 |
---|
353 | |
---|
354 | ld r0, UREGS_srr0(r1) |
---|
355 | mtspr SPRN_SRR0, r0 |
---|
356 | ld r0, UREGS_srr1(r1) |
---|
357 | mtspr SPRN_SRR1, r0 |
---|
358 | |
---|
359 | ld r13, UREGS_r13(r1) |
---|
360 | ld r0, UREGS_r0(r1) |
---|
361 | ld r1, UREGS_r1(r1) |
---|
362 | HRFID |
---|
363 | b . /* prevent speculative icache fetch */ |
---|
364 | .endm |
---|
365 | |
---|
366 | /* Not a whole lot just yet */ |
---|
367 | ex_machcheck_continued: |
---|
368 | |
---|
369 | |
---|
370 | /* We enter with the exception number in r0. The EXCEPTION_SAVE_STATE macro |
---|
371 | * clobbers r0 though, so we have to move it around a little bit. Not ideal, |
---|
372 | * but hopefully program exception is not performance-critical... Maybe there's |
---|
373 | * a better way, but this works for now. */ |
---|
374 | ex_program_continued: |
---|
375 | SAVE_GPRS r14, r31, r1 /* save all the non-volatiles */ |
---|
376 | |
---|
377 | /* save these for debug, no needed for restore */ |
---|
378 | mfspr r14, SPRN_HID4 |
---|
379 | std r14, UREGS_hid4(r1) |
---|
380 | mfdar r14 |
---|
381 | std r14, UREGS_dar(r1) |
---|
382 | mfdsisr r14 |
---|
383 | stw r14, UREGS_dsisr(r1) |
---|
384 | |
---|
385 | mr r14, r0 |
---|
386 | EXCEPTION_SAVE_STATE r1 |
---|
387 | mr r4, r14 |
---|
388 | LOADADDR r12, program_exception |
---|
389 | mr r3, r1 /* pass pointer to cpu_user_regs */ |
---|
390 | subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */ |
---|
391 | CALL_CFUNC r12 |
---|
392 | |
---|
393 | /* reload state and rfid */ |
---|
394 | addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */ |
---|
395 | LOAD_GPRS r14, r31, r1 |
---|
396 | FAST_RESUME |
---|
397 | |
---|
398 | ex_external_continued: |
---|
399 | EXCEPTION_SAVE_STATE r1 |
---|
400 | LOADADDR r12, do_external |
---|
401 | mr r3, r1 /* pass pointer to cpu_user_regs */ |
---|
402 | subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */ |
---|
403 | CALL_CFUNC r12 |
---|
404 | |
---|
405 | addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */ |
---|
406 | b fast_resume |
---|
407 | |
---|
408 | ex_hcall_continued: |
---|
409 | /* We have to save the non-volatiles here in case of a block hcall (which |
---|
410 | * will end up in context_switch()). */ |
---|
411 | SAVE_GPRS r14, r31, r1 |
---|
412 | EXCEPTION_SAVE_STATE r1 |
---|
413 | LOADADDR r12, do_hcall |
---|
414 | mr r3, r1 /* pass pointer to cpu_user_regs */ |
---|
415 | subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */ |
---|
416 | CALL_CFUNC r12 /* call hcall handler */ |
---|
417 | |
---|
418 | /* test for pending softirqs, and loop until there are no more. */ |
---|
419 | mfmsr r14 |
---|
420 | ori r14, r14, MSR_EE |
---|
421 | xori r15, r14, MSR_EE |
---|
422 | |
---|
423 | hcall_test_all_events: |
---|
424 | mtmsrd r15, 1 /* disable interrupts */ |
---|
425 | ld r3, PAREA_vcpu(r13) |
---|
426 | lwz r3, VCPU_processor(r3) |
---|
427 | LOADADDR r4, irq_stat |
---|
428 | sldi r3, r3, IRQSTAT_shift |
---|
429 | add r4, r3, r4 |
---|
430 | ld r5, IRQSTAT_pending(r4) |
---|
431 | cmpldi r5, 0 |
---|
432 | beq hcall_out /* no more softirqs; exit loop */ |
---|
433 | |
---|
434 | LOADADDR r6, do_softirq |
---|
435 | mtmsrd r14, 1 /* enable interrupts */ |
---|
436 | CALL_CFUNC r6 /* process softirqs */ |
---|
437 | b hcall_test_all_events /* look for more */ |
---|
438 | |
---|
439 | hcall_out: |
---|
440 | addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */ |
---|
441 | LOAD_GPRS r14, r15, r1 /* we clobbered r14/r15 */ |
---|
442 | b fast_resume |
---|
443 | |
---|
444 | |
---|
445 | ex_dec_continued: |
---|
446 | EXCEPTION_SAVE_STATE r1 |
---|
447 | LOADADDR r12, do_dec |
---|
448 | mr r3, r1 /* pass pointer to cpu_user_regs */ |
---|
449 | subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */ |
---|
450 | CALL_CFUNC r12 |
---|
451 | |
---|
452 | addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */ |
---|
453 | b fast_resume |
---|
454 | |
---|
455 | ex_hdec_continued: |
---|
456 | /* When we get an HDEC, we (almost?) always context_switch, so we need to |
---|
457 | * save the nonvolatiles. */ |
---|
458 | SAVE_GPRS r14, r31, r1 |
---|
459 | H_EXCEPTION_SAVE_STATE r1 |
---|
460 | |
---|
461 | LOADADDR r12, do_timer |
---|
462 | mr r3, r1 |
---|
463 | subi r1, r1, STACK_FRAME_OVERHEAD /* make a "caller" stack frame */ |
---|
464 | CALL_CFUNC r12 |
---|
465 | |
---|
466 | /* if we are resuming into hypervisor, don't handle softirqs */ |
---|
467 | ld r10, (UREGS_msr + STACK_FRAME_OVERHEAD)(r1) |
---|
468 | rldicl. r11, r10, 4, 63 /* test SRR1:HV */ |
---|
469 | bne hdec_out |
---|
470 | |
---|
471 | /* test for pending softirqs, and loop until there are no more. */ |
---|
472 | mfmsr r14 |
---|
473 | ori r14, r14, MSR_EE |
---|
474 | xori r15, r14, MSR_EE |
---|
475 | test_all_events: |
---|
476 | mtmsrd r15, 1 /* disable interrupts */ |
---|
477 | ld r3, PAREA_vcpu(r13) |
---|
478 | lwz r3, VCPU_processor(r3) |
---|
479 | LOADADDR r4, irq_stat |
---|
480 | sldi r3, r3, IRQSTAT_shift |
---|
481 | add r4, r3, r4 |
---|
482 | ld r5, IRQSTAT_pending(r4) |
---|
483 | cmpldi r5, 0 |
---|
484 | beq hdec_out /* no more softirqs; exit loop */ |
---|
485 | |
---|
486 | LOADADDR r6, do_softirq |
---|
487 | mtmsrd r14, 1 /* enable interrupts */ |
---|
488 | CALL_CFUNC r6 /* process softirqs */ |
---|
489 | b test_all_events /* look for more */ |
---|
490 | |
---|
491 | hdec_out: |
---|
492 | addi r1, r1, STACK_FRAME_OVERHEAD /* restore stack to cpu_user_regs */ |
---|
493 | LOAD_GPRS r14, r15, r1 /* we clobbered r14/r15 in the loop */ |
---|
494 | |
---|
495 | /* r1 points to the to-be-restored cpu_user_regs. These could be mid-hypervisor |
---|
496 | * stack (returning into elsewhere in Xen) or at the top of the stack |
---|
497 | * (restoring the domain). */ |
---|
498 | _GLOBAL(full_resume) |
---|
499 | /* disable MSR:EE, since we could have come from do_softirq() */ |
---|
500 | mfmsr r7 |
---|
501 | ori r7, r7, MSR_EE |
---|
502 | xori r7, r7, MSR_EE |
---|
503 | mtmsrd r7, 1 |
---|
504 | |
---|
505 | LOAD_GPRS r14, r31, r1 /* restore all non-volatiles */ |
---|
506 | |
---|
507 | fast_resume: |
---|
508 | ld r10, UREGS_msr(r1) |
---|
509 | rldicl. r11, r10, 4, 63 /* test SRR1:HV */ |
---|
510 | bne 1f /* returning to hypervisor */ |
---|
511 | |
---|
512 | /* check for pending irqs */ |
---|
513 | mr r3, r1 |
---|
514 | subi r1, r1, STACK_FRAME_OVERHEAD |
---|
515 | bl .deliver_ee |
---|
516 | addi r1, r1, STACK_FRAME_OVERHEAD |
---|
517 | |
---|
518 | /* if we took a DEC in hypervisor mode, we don't want to reload the DEC |
---|
519 | * until we return to the domain. MSR_EE is clear, so the domain will take |
---|
520 | * any impending DEC. */ |
---|
521 | ld r3, PAREA_vcpu(r13) |
---|
522 | lwz r0, VCPU_dec(r3) |
---|
523 | mtdec r0 |
---|
524 | |
---|
525 | 1: |
---|
526 | FAST_RESUME |
---|
527 | /* not reached */ |
---|
528 | |
---|
529 | /* move all of the below somewhere else */ |
---|
530 | |
---|
531 | _GLOBAL(papr_hcall_jump) |
---|
532 | mtctr r4 |
---|
533 | bctr |
---|
534 | /* return to caller via LR */ |
---|
535 | |
---|
536 | /* XXX don't need to load all the registers */ |
---|
537 | _GLOBAL(xen_hvcall_jump) |
---|
538 | mtctr r4 |
---|
539 | ld r10, (UREGS_gprs + GPR_WIDTH * 11)(r3) |
---|
540 | ld r9, (UREGS_gprs + GPR_WIDTH * 10)(r3) |
---|
541 | ld r8, (UREGS_gprs + GPR_WIDTH * 9)(r3) |
---|
542 | ld r7, (UREGS_gprs + GPR_WIDTH * 8)(r3) |
---|
543 | ld r6, (UREGS_gprs + GPR_WIDTH * 7)(r3) |
---|
544 | ld r5, (UREGS_gprs + GPR_WIDTH * 6)(r3) |
---|
545 | ld r4, (UREGS_gprs + GPR_WIDTH * 5)(r3) |
---|
546 | ld r3, (UREGS_gprs + GPR_WIDTH * 4)(r3) |
---|
547 | bctr |
---|
548 | |
---|
549 | _GLOBAL(_reset_stack_and_jump) |
---|
550 | ld r2, 8(r3) |
---|
551 | ld r3, 0(r3) |
---|
552 | mtctr r3 |
---|
553 | mr r1, r4 |
---|
554 | bctr |
---|
555 | |
---|
556 | _GLOBAL(sleep) |
---|
557 | mfmsr r3 |
---|
558 | ori r4, r3, MSR_EE |
---|
559 | oris r4, r4, MSR_POW@h |
---|
560 | sync |
---|
561 | mtmsrd r4 |
---|
562 | isync |
---|
563 | mtmsrd r3 |
---|
564 | blr |
---|
565 | |
---|
566 | /* The primary processor issues a firmware call to spin us up at this |
---|
567 | * address, passing our CPU number in r3. We only need a function |
---|
568 | * entry point instead of a descriptor since this is never called from |
---|
569 | * C code. |
---|
570 | */ |
---|
571 | .globl spin_start |
---|
572 | spin_start: |
---|
573 | /* We discovered by experiment that the ERAT must be flushed early. */ |
---|
574 | isync |
---|
575 | slbia |
---|
576 | isync |
---|
577 | |
---|
578 | /* Do a cache flush for our text, in case the loader didn't */ |
---|
579 | LOADADDR(r9, _start) |
---|
580 | LOADADDR(r8, _etext) |
---|
581 | 4: dcbf r0,r9 |
---|
582 | icbi r0,r9 |
---|
583 | addi r9,r9,0x20 /* up to a 4 way set per line */ |
---|
584 | cmpld cr0,r9,r8 |
---|
585 | blt 4b |
---|
586 | sync |
---|
587 | isync |
---|
588 | |
---|
589 | /* Write our processor number as an acknowledgment that we're alive. */ |
---|
590 | LOADADDR(r14, __spin_ack) |
---|
591 | stw r3, 0(r14) |
---|
592 | sync |
---|
593 | /* If NR_CPUS is too small, we should just spin forever. */ |
---|
594 | LOADADDR(r15, NR_CPUS) |
---|
595 | cmpd r3, r15 |
---|
596 | blt 2f |
---|
597 | b . |
---|
598 | /* Find our index in the array of processor_area struct pointers. */ |
---|
599 | 2: LOADADDR(r14, global_cpu_table) |
---|
600 | mulli r15, r3, 8 |
---|
601 | add r14, r14, r15 |
---|
602 | /* Spin until the pointer for our processor goes valid. */ |
---|
603 | 1: ld r15, 0(r14) |
---|
604 | cmpldi r15, 0 |
---|
605 | beq 1b |
---|
606 | /* Dereference the pointer and load our stack pointer. */ |
---|
607 | isync |
---|
608 | ld r1, PAREA_stack(r15) |
---|
609 | li r14, STACK_FRAME_OVERHEAD |
---|
610 | sub r1, r1, r14 |
---|
611 | /* Load up the TOC and entry point for the C function to be called. */ |
---|
612 | LOADADDR(r14, secondary_cpu_init) |
---|
613 | ld r2, 8(r14) |
---|
614 | ld r11, 0(r14) |
---|
615 | mtctr r11 |
---|
616 | /* Warning: why do we need this synchronizing instruction on 970FX? */ |
---|
617 | isync |
---|
618 | /* Jump into C code now. */ |
---|
619 | bctrl |
---|
620 | nop |
---|
621 | b . |
---|