[34] | 1 | /* |
---|
| 2 | * ia64/kernel/entry.S |
---|
| 3 | * |
---|
| 4 | * Kernel entry points. |
---|
| 5 | * |
---|
| 6 | * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co |
---|
| 7 | * David Mosberger-Tang <davidm@hpl.hp.com> |
---|
| 8 | * Copyright (C) 1999, 2002-2003 |
---|
| 9 | * Asit Mallick <Asit.K.Mallick@intel.com> |
---|
| 10 | * Don Dugger <Don.Dugger@intel.com> |
---|
| 11 | * Suresh Siddha <suresh.b.siddha@intel.com> |
---|
| 12 | * Fenghua Yu <fenghua.yu@intel.com> |
---|
| 13 | * Copyright (C) 1999 VA Linux Systems |
---|
| 14 | * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> |
---|
| 15 | */ |
---|
| 16 | /* |
---|
| 17 | * ia64_switch_to now places correct virtual mapping in in TR2 for |
---|
| 18 | * kernel stack. This allows us to handle interrupts without changing |
---|
| 19 | * to physical mode. |
---|
| 20 | * |
---|
| 21 | * Jonathan Nicklin <nicklin@missioncriticallinux.com> |
---|
| 22 | * Patrick O'Rourke <orourke@missioncriticallinux.com> |
---|
| 23 | * 11/07/2000 |
---|
| 24 | */ |
---|
| 25 | /* |
---|
| 26 | * Global (preserved) predicate usage on syscall entry/exit path: |
---|
| 27 | * |
---|
| 28 | * pKStk: See entry.h. |
---|
| 29 | * pUStk: See entry.h. |
---|
| 30 | * pSys: See entry.h. |
---|
| 31 | * pNonSys: !pSys |
---|
| 32 | */ |
---|
| 33 | |
---|
| 34 | |
---|
| 35 | #include <asm/asmmacro.h> |
---|
| 36 | #include <asm/cache.h> |
---|
| 37 | #include <asm/errno.h> |
---|
| 38 | #include <asm/kregs.h> |
---|
| 39 | #include <asm/asm-offsets.h> |
---|
| 40 | #include <asm/pgtable.h> |
---|
| 41 | #include <asm/percpu.h> |
---|
| 42 | #include <asm/processor.h> |
---|
| 43 | #include <asm/thread_info.h> |
---|
| 44 | #include <asm/unistd.h> |
---|
| 45 | |
---|
| 46 | #include "minstate.h" |
---|
| 47 | |
---|
| 48 | /* |
---|
| 49 | * execve() is special because in case of success, we need to |
---|
| 50 | * setup a null register window frame. |
---|
| 51 | */ |
---|
| 52 | ENTRY(ia64_execve) |
---|
| 53 | /* |
---|
| 54 | * Allocate 8 input registers since ptrace() may clobber them |
---|
| 55 | */ |
---|
| 56 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) |
---|
| 57 | alloc loc1=ar.pfs,8,2,4,0 |
---|
| 58 | mov loc0=rp |
---|
| 59 | .body |
---|
| 60 | mov out0=in0 // filename |
---|
| 61 | ;; // stop bit between alloc and call |
---|
| 62 | mov out1=in1 // argv |
---|
| 63 | mov out2=in2 // envp |
---|
| 64 | add out3=16,sp // regs |
---|
| 65 | br.call.sptk.many rp=sys_execve |
---|
| 66 | .ret0: |
---|
| 67 | #ifdef CONFIG_IA32_SUPPORT |
---|
| 68 | /* |
---|
| 69 | * Check if we're returning to ia32 mode. If so, we need to restore ia32 registers |
---|
| 70 | * from pt_regs. |
---|
| 71 | */ |
---|
| 72 | adds r16=PT(CR_IPSR)+16,sp |
---|
| 73 | ;; |
---|
| 74 | ld8 r16=[r16] |
---|
| 75 | #endif |
---|
| 76 | cmp4.ge p6,p7=r8,r0 |
---|
| 77 | mov ar.pfs=loc1 // restore ar.pfs |
---|
| 78 | sxt4 r8=r8 // return 64-bit result |
---|
| 79 | ;; |
---|
| 80 | stf.spill [sp]=f0 |
---|
| 81 | (p6) cmp.ne pKStk,pUStk=r0,r0 // a successful execve() lands us in user-mode... |
---|
| 82 | mov rp=loc0 |
---|
| 83 | (p6) mov ar.pfs=r0 // clear ar.pfs on success |
---|
| 84 | (p7) br.ret.sptk.many rp |
---|
| 85 | |
---|
| 86 | /* |
---|
| 87 | * In theory, we'd have to zap this state only to prevent leaking of |
---|
| 88 | * security sensitive state (e.g., if current->mm->dumpable is zero). However, |
---|
| 89 | * this executes in less than 20 cycles even on Itanium, so it's not worth |
---|
| 90 | * optimizing for...). |
---|
| 91 | */ |
---|
| 92 | mov ar.unat=0; mov ar.lc=0 |
---|
| 93 | mov r4=0; mov f2=f0; mov b1=r0 |
---|
| 94 | mov r5=0; mov f3=f0; mov b2=r0 |
---|
| 95 | mov r6=0; mov f4=f0; mov b3=r0 |
---|
| 96 | mov r7=0; mov f5=f0; mov b4=r0 |
---|
| 97 | ldf.fill f12=[sp]; mov f13=f0; mov b5=r0 |
---|
| 98 | ldf.fill f14=[sp]; ldf.fill f15=[sp]; mov f16=f0 |
---|
| 99 | ldf.fill f17=[sp]; ldf.fill f18=[sp]; mov f19=f0 |
---|
| 100 | ldf.fill f20=[sp]; ldf.fill f21=[sp]; mov f22=f0 |
---|
| 101 | ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0 |
---|
| 102 | ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0 |
---|
| 103 | ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0 |
---|
| 104 | #ifdef CONFIG_IA32_SUPPORT |
---|
| 105 | tbit.nz p6,p0=r16, IA64_PSR_IS_BIT |
---|
| 106 | movl loc0=ia64_ret_from_ia32_execve |
---|
| 107 | ;; |
---|
| 108 | (p6) mov rp=loc0 |
---|
| 109 | #endif |
---|
| 110 | br.ret.sptk.many rp |
---|
| 111 | END(ia64_execve) |
---|
| 112 | |
---|
| 113 | /* |
---|
| 114 | * sys_clone2(u64 flags, u64 ustack_base, u64 ustack_size, u64 parent_tidptr, u64 child_tidptr, |
---|
| 115 | * u64 tls) |
---|
| 116 | */ |
---|
| 117 | GLOBAL_ENTRY(sys_clone2) |
---|
| 118 | /* |
---|
| 119 | * Allocate 8 input registers since ptrace() may clobber them |
---|
| 120 | */ |
---|
| 121 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) |
---|
| 122 | alloc r16=ar.pfs,8,2,6,0 |
---|
| 123 | DO_SAVE_SWITCH_STACK |
---|
| 124 | adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp |
---|
| 125 | mov loc0=rp |
---|
| 126 | mov loc1=r16 // save ar.pfs across do_fork |
---|
| 127 | .body |
---|
| 128 | mov out1=in1 |
---|
| 129 | mov out3=in2 |
---|
| 130 | tbit.nz p6,p0=in0,CLONE_SETTLS_BIT |
---|
| 131 | mov out4=in3 // parent_tidptr: valid only w/CLONE_PARENT_SETTID |
---|
| 132 | ;; |
---|
| 133 | (p6) st8 [r2]=in5 // store TLS in r16 for copy_thread() |
---|
| 134 | mov out5=in4 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID |
---|
| 135 | adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s |
---|
| 136 | mov out0=in0 // out0 = clone_flags |
---|
| 137 | br.call.sptk.many rp=do_fork |
---|
| 138 | .ret1: .restore sp |
---|
| 139 | adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack |
---|
| 140 | mov ar.pfs=loc1 |
---|
| 141 | mov rp=loc0 |
---|
| 142 | br.ret.sptk.many rp |
---|
| 143 | END(sys_clone2) |
---|
| 144 | |
---|
| 145 | /* |
---|
| 146 | * sys_clone(u64 flags, u64 ustack_base, u64 parent_tidptr, u64 child_tidptr, u64 tls) |
---|
| 147 | * Deprecated. Use sys_clone2() instead. |
---|
| 148 | */ |
---|
| 149 | GLOBAL_ENTRY(sys_clone) |
---|
| 150 | /* |
---|
| 151 | * Allocate 8 input registers since ptrace() may clobber them |
---|
| 152 | */ |
---|
| 153 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) |
---|
| 154 | alloc r16=ar.pfs,8,2,6,0 |
---|
| 155 | DO_SAVE_SWITCH_STACK |
---|
| 156 | adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp |
---|
| 157 | mov loc0=rp |
---|
| 158 | mov loc1=r16 // save ar.pfs across do_fork |
---|
| 159 | .body |
---|
| 160 | mov out1=in1 |
---|
| 161 | mov out3=16 // stacksize (compensates for 16-byte scratch area) |
---|
| 162 | tbit.nz p6,p0=in0,CLONE_SETTLS_BIT |
---|
| 163 | mov out4=in2 // parent_tidptr: valid only w/CLONE_PARENT_SETTID |
---|
| 164 | ;; |
---|
| 165 | (p6) st8 [r2]=in4 // store TLS in r13 (tp) |
---|
| 166 | mov out5=in3 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID |
---|
| 167 | adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s |
---|
| 168 | mov out0=in0 // out0 = clone_flags |
---|
| 169 | br.call.sptk.many rp=do_fork |
---|
| 170 | .ret2: .restore sp |
---|
| 171 | adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack |
---|
| 172 | mov ar.pfs=loc1 |
---|
| 173 | mov rp=loc0 |
---|
| 174 | br.ret.sptk.many rp |
---|
| 175 | END(sys_clone) |
---|
| 176 | |
---|
| 177 | /* |
---|
| 178 | * prev_task <- ia64_switch_to(struct task_struct *next) |
---|
| 179 | * With Ingo's new scheduler, interrupts are disabled when this routine gets |
---|
| 180 | * called. The code starting at .map relies on this. The rest of the code |
---|
| 181 | * doesn't care about the interrupt masking status. |
---|
| 182 | */ |
---|
| 183 | GLOBAL_ENTRY(__ia64_switch_to) |
---|
| 184 | .prologue |
---|
| 185 | alloc r16=ar.pfs,1,0,0,0 |
---|
| 186 | DO_SAVE_SWITCH_STACK |
---|
| 187 | .body |
---|
| 188 | |
---|
| 189 | adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13 |
---|
| 190 | movl r25=init_task |
---|
| 191 | mov r27=IA64_KR(CURRENT_STACK) |
---|
| 192 | adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0 |
---|
| 193 | dep r20=0,in0,61,3 // physical address of "next" |
---|
| 194 | ;; |
---|
| 195 | st8 [r22]=sp // save kernel stack pointer of old task |
---|
| 196 | shr.u r26=r20,IA64_GRANULE_SHIFT |
---|
| 197 | cmp.eq p7,p6=r25,in0 |
---|
| 198 | ;; |
---|
| 199 | /* |
---|
| 200 | * If we've already mapped this task's page, we can skip doing it again. |
---|
| 201 | */ |
---|
| 202 | (p6) cmp.eq p7,p6=r26,r27 |
---|
| 203 | (p6) br.cond.dpnt .map |
---|
| 204 | ;; |
---|
| 205 | .done: |
---|
| 206 | ld8 sp=[r21] // load kernel stack pointer of new task |
---|
| 207 | mov IA64_KR(CURRENT)=in0 // update "current" application register |
---|
| 208 | mov r8=r13 // return pointer to previously running task |
---|
| 209 | mov r13=in0 // set "current" pointer |
---|
| 210 | ;; |
---|
| 211 | DO_LOAD_SWITCH_STACK |
---|
| 212 | |
---|
| 213 | #ifdef CONFIG_SMP |
---|
| 214 | sync.i // ensure "fc"s done by this CPU are visible on other CPUs |
---|
| 215 | #endif |
---|
| 216 | br.ret.sptk.many rp // boogie on out in new context |
---|
| 217 | |
---|
| 218 | .map: |
---|
| 219 | rsm psr.ic // interrupts (psr.i) are already disabled here |
---|
| 220 | movl r25=PAGE_KERNEL |
---|
| 221 | ;; |
---|
| 222 | srlz.d |
---|
| 223 | or r23=r25,r20 // construct PA | page properties |
---|
| 224 | mov r25=IA64_GRANULE_SHIFT<<2 |
---|
| 225 | ;; |
---|
| 226 | mov cr.itir=r25 |
---|
| 227 | mov cr.ifa=in0 // VA of next task... |
---|
| 228 | ;; |
---|
| 229 | mov r25=IA64_TR_CURRENT_STACK |
---|
| 230 | mov IA64_KR(CURRENT_STACK)=r26 // remember last page we mapped... |
---|
| 231 | ;; |
---|
| 232 | itr.d dtr[r25]=r23 // wire in new mapping... |
---|
| 233 | ssm psr.ic // reenable the psr.ic bit |
---|
| 234 | ;; |
---|
| 235 | srlz.d |
---|
| 236 | br.cond.sptk .done |
---|
| 237 | END(__ia64_switch_to) |
---|
| 238 | |
---|
| 239 | /* |
---|
| 240 | * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This |
---|
| 241 | * means that we may get an interrupt with "sp" pointing to the new kernel stack while |
---|
| 242 | * ar.bspstore is still pointing to the old kernel backing store area. Since ar.rsc, |
---|
| 243 | * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts, this is not a |
---|
| 244 | * problem. Also, we don't need to specify unwind information for preserved registers |
---|
| 245 | * that are not modified in save_switch_stack as the right unwind information is already |
---|
| 246 | * specified at the call-site of save_switch_stack. |
---|
| 247 | */ |
---|
| 248 | |
---|
| 249 | /* |
---|
| 250 | * save_switch_stack: |
---|
| 251 | * - r16 holds ar.pfs |
---|
| 252 | * - b7 holds address to return to |
---|
| 253 | * - rp (b0) holds return address to save |
---|
| 254 | */ |
---|
| 255 | GLOBAL_ENTRY(save_switch_stack) |
---|
| 256 | .prologue |
---|
| 257 | .altrp b7 |
---|
| 258 | flushrs // flush dirty regs to backing store (must be first in insn group) |
---|
| 259 | .save @priunat,r17 |
---|
| 260 | mov r17=ar.unat // preserve caller's |
---|
| 261 | .body |
---|
| 262 | #ifdef CONFIG_ITANIUM |
---|
| 263 | adds r2=16+128,sp |
---|
| 264 | adds r3=16+64,sp |
---|
| 265 | adds r14=SW(R4)+16,sp |
---|
| 266 | ;; |
---|
| 267 | st8.spill [r14]=r4,16 // spill r4 |
---|
| 268 | lfetch.fault.excl.nt1 [r3],128 |
---|
| 269 | ;; |
---|
| 270 | lfetch.fault.excl.nt1 [r2],128 |
---|
| 271 | lfetch.fault.excl.nt1 [r3],128 |
---|
| 272 | ;; |
---|
| 273 | lfetch.fault.excl [r2] |
---|
| 274 | lfetch.fault.excl [r3] |
---|
| 275 | adds r15=SW(R5)+16,sp |
---|
| 276 | #else |
---|
| 277 | add r2=16+3*128,sp |
---|
| 278 | add r3=16,sp |
---|
| 279 | add r14=SW(R4)+16,sp |
---|
| 280 | ;; |
---|
| 281 | st8.spill [r14]=r4,SW(R6)-SW(R4) // spill r4 and prefetch offset 0x1c0 |
---|
| 282 | lfetch.fault.excl.nt1 [r3],128 // prefetch offset 0x010 |
---|
| 283 | ;; |
---|
| 284 | lfetch.fault.excl.nt1 [r3],128 // prefetch offset 0x090 |
---|
| 285 | lfetch.fault.excl.nt1 [r2],128 // prefetch offset 0x190 |
---|
| 286 | ;; |
---|
| 287 | lfetch.fault.excl.nt1 [r3] // prefetch offset 0x110 |
---|
| 288 | lfetch.fault.excl.nt1 [r2] // prefetch offset 0x210 |
---|
| 289 | adds r15=SW(R5)+16,sp |
---|
| 290 | #endif |
---|
| 291 | ;; |
---|
| 292 | st8.spill [r15]=r5,SW(R7)-SW(R5) // spill r5 |
---|
| 293 | mov.m ar.rsc=0 // put RSE in mode: enforced lazy, little endian, pl 0 |
---|
| 294 | add r2=SW(F2)+16,sp // r2 = &sw->f2 |
---|
| 295 | ;; |
---|
| 296 | st8.spill [r14]=r6,SW(B0)-SW(R6) // spill r6 |
---|
| 297 | mov.m r18=ar.fpsr // preserve fpsr |
---|
| 298 | add r3=SW(F3)+16,sp // r3 = &sw->f3 |
---|
| 299 | ;; |
---|
| 300 | stf.spill [r2]=f2,32 |
---|
| 301 | mov.m r19=ar.rnat |
---|
| 302 | mov r21=b0 |
---|
| 303 | |
---|
| 304 | stf.spill [r3]=f3,32 |
---|
| 305 | st8.spill [r15]=r7,SW(B2)-SW(R7) // spill r7 |
---|
| 306 | mov r22=b1 |
---|
| 307 | ;; |
---|
| 308 | // since we're done with the spills, read and save ar.unat: |
---|
| 309 | mov.m r29=ar.unat |
---|
| 310 | mov.m r20=ar.bspstore |
---|
| 311 | mov r23=b2 |
---|
| 312 | stf.spill [r2]=f4,32 |
---|
| 313 | stf.spill [r3]=f5,32 |
---|
| 314 | mov r24=b3 |
---|
| 315 | ;; |
---|
| 316 | st8 [r14]=r21,SW(B1)-SW(B0) // save b0 |
---|
| 317 | st8 [r15]=r23,SW(B3)-SW(B2) // save b2 |
---|
| 318 | mov r25=b4 |
---|
| 319 | mov r26=b5 |
---|
| 320 | ;; |
---|
| 321 | st8 [r14]=r22,SW(B4)-SW(B1) // save b1 |
---|
| 322 | st8 [r15]=r24,SW(AR_PFS)-SW(B3) // save b3 |
---|
| 323 | mov r21=ar.lc // I-unit |
---|
| 324 | stf.spill [r2]=f12,32 |
---|
| 325 | stf.spill [r3]=f13,32 |
---|
| 326 | ;; |
---|
| 327 | st8 [r14]=r25,SW(B5)-SW(B4) // save b4 |
---|
| 328 | st8 [r15]=r16,SW(AR_LC)-SW(AR_PFS) // save ar.pfs |
---|
| 329 | stf.spill [r2]=f14,32 |
---|
| 330 | stf.spill [r3]=f15,32 |
---|
| 331 | ;; |
---|
| 332 | st8 [r14]=r26 // save b5 |
---|
| 333 | st8 [r15]=r21 // save ar.lc |
---|
| 334 | stf.spill [r2]=f16,32 |
---|
| 335 | stf.spill [r3]=f17,32 |
---|
| 336 | ;; |
---|
| 337 | stf.spill [r2]=f18,32 |
---|
| 338 | stf.spill [r3]=f19,32 |
---|
| 339 | ;; |
---|
| 340 | stf.spill [r2]=f20,32 |
---|
| 341 | stf.spill [r3]=f21,32 |
---|
| 342 | ;; |
---|
| 343 | stf.spill [r2]=f22,32 |
---|
| 344 | stf.spill [r3]=f23,32 |
---|
| 345 | ;; |
---|
| 346 | stf.spill [r2]=f24,32 |
---|
| 347 | stf.spill [r3]=f25,32 |
---|
| 348 | ;; |
---|
| 349 | stf.spill [r2]=f26,32 |
---|
| 350 | stf.spill [r3]=f27,32 |
---|
| 351 | ;; |
---|
| 352 | stf.spill [r2]=f28,32 |
---|
| 353 | stf.spill [r3]=f29,32 |
---|
| 354 | ;; |
---|
| 355 | stf.spill [r2]=f30,SW(AR_UNAT)-SW(F30) |
---|
| 356 | stf.spill [r3]=f31,SW(PR)-SW(F31) |
---|
| 357 | add r14=SW(CALLER_UNAT)+16,sp |
---|
| 358 | ;; |
---|
| 359 | st8 [r2]=r29,SW(AR_RNAT)-SW(AR_UNAT) // save ar.unat |
---|
| 360 | st8 [r14]=r17,SW(AR_FPSR)-SW(CALLER_UNAT) // save caller_unat |
---|
| 361 | mov r21=pr |
---|
| 362 | ;; |
---|
| 363 | st8 [r2]=r19,SW(AR_BSPSTORE)-SW(AR_RNAT) // save ar.rnat |
---|
| 364 | st8 [r3]=r21 // save predicate registers |
---|
| 365 | ;; |
---|
| 366 | st8 [r2]=r20 // save ar.bspstore |
---|
| 367 | st8 [r14]=r18 // save fpsr |
---|
| 368 | mov ar.rsc=3 // put RSE back into eager mode, pl 0 |
---|
| 369 | br.cond.sptk.many b7 |
---|
| 370 | END(save_switch_stack) |
---|
| 371 | |
---|
| 372 | /* |
---|
| 373 | * load_switch_stack: |
---|
| 374 | * - "invala" MUST be done at call site (normally in DO_LOAD_SWITCH_STACK) |
---|
| 375 | * - b7 holds address to return to |
---|
| 376 | * - must not touch r8-r11 |
---|
| 377 | */ |
---|
| 378 | GLOBAL_ENTRY(load_switch_stack) |
---|
| 379 | .prologue |
---|
| 380 | .altrp b7 |
---|
| 381 | |
---|
| 382 | .body |
---|
| 383 | lfetch.fault.nt1 [sp] |
---|
| 384 | adds r2=SW(AR_BSPSTORE)+16,sp |
---|
| 385 | adds r3=SW(AR_UNAT)+16,sp |
---|
| 386 | mov ar.rsc=0 // put RSE into enforced lazy mode |
---|
| 387 | adds r14=SW(CALLER_UNAT)+16,sp |
---|
| 388 | adds r15=SW(AR_FPSR)+16,sp |
---|
| 389 | ;; |
---|
| 390 | ld8 r27=[r2],(SW(B0)-SW(AR_BSPSTORE)) // bspstore |
---|
| 391 | ld8 r29=[r3],(SW(B1)-SW(AR_UNAT)) // unat |
---|
| 392 | ;; |
---|
| 393 | ld8 r21=[r2],16 // restore b0 |
---|
| 394 | ld8 r22=[r3],16 // restore b1 |
---|
| 395 | ;; |
---|
| 396 | ld8 r23=[r2],16 // restore b2 |
---|
| 397 | ld8 r24=[r3],16 // restore b3 |
---|
| 398 | ;; |
---|
| 399 | ld8 r25=[r2],16 // restore b4 |
---|
| 400 | ld8 r26=[r3],16 // restore b5 |
---|
| 401 | ;; |
---|
| 402 | ld8 r16=[r2],(SW(PR)-SW(AR_PFS)) // ar.pfs |
---|
| 403 | ld8 r17=[r3],(SW(AR_RNAT)-SW(AR_LC)) // ar.lc |
---|
| 404 | ;; |
---|
| 405 | ld8 r28=[r2] // restore pr |
---|
| 406 | ld8 r30=[r3] // restore rnat |
---|
| 407 | ;; |
---|
| 408 | ld8 r18=[r14],16 // restore caller's unat |
---|
| 409 | ld8 r19=[r15],24 // restore fpsr |
---|
| 410 | ;; |
---|
| 411 | ldf.fill f2=[r14],32 |
---|
| 412 | ldf.fill f3=[r15],32 |
---|
| 413 | ;; |
---|
| 414 | ldf.fill f4=[r14],32 |
---|
| 415 | ldf.fill f5=[r15],32 |
---|
| 416 | ;; |
---|
| 417 | ldf.fill f12=[r14],32 |
---|
| 418 | ldf.fill f13=[r15],32 |
---|
| 419 | ;; |
---|
| 420 | ldf.fill f14=[r14],32 |
---|
| 421 | ldf.fill f15=[r15],32 |
---|
| 422 | ;; |
---|
| 423 | ldf.fill f16=[r14],32 |
---|
| 424 | ldf.fill f17=[r15],32 |
---|
| 425 | ;; |
---|
| 426 | ldf.fill f18=[r14],32 |
---|
| 427 | ldf.fill f19=[r15],32 |
---|
| 428 | mov b0=r21 |
---|
| 429 | ;; |
---|
| 430 | ldf.fill f20=[r14],32 |
---|
| 431 | ldf.fill f21=[r15],32 |
---|
| 432 | mov b1=r22 |
---|
| 433 | ;; |
---|
| 434 | ldf.fill f22=[r14],32 |
---|
| 435 | ldf.fill f23=[r15],32 |
---|
| 436 | mov b2=r23 |
---|
| 437 | ;; |
---|
| 438 | mov ar.bspstore=r27 |
---|
| 439 | mov ar.unat=r29 // establish unat holding the NaT bits for r4-r7 |
---|
| 440 | mov b3=r24 |
---|
| 441 | ;; |
---|
| 442 | ldf.fill f24=[r14],32 |
---|
| 443 | ldf.fill f25=[r15],32 |
---|
| 444 | mov b4=r25 |
---|
| 445 | ;; |
---|
| 446 | ldf.fill f26=[r14],32 |
---|
| 447 | ldf.fill f27=[r15],32 |
---|
| 448 | mov b5=r26 |
---|
| 449 | ;; |
---|
| 450 | ldf.fill f28=[r14],32 |
---|
| 451 | ldf.fill f29=[r15],32 |
---|
| 452 | mov ar.pfs=r16 |
---|
| 453 | ;; |
---|
| 454 | ldf.fill f30=[r14],32 |
---|
| 455 | ldf.fill f31=[r15],24 |
---|
| 456 | mov ar.lc=r17 |
---|
| 457 | ;; |
---|
| 458 | ld8.fill r4=[r14],16 |
---|
| 459 | ld8.fill r5=[r15],16 |
---|
| 460 | mov pr=r28,-1 |
---|
| 461 | ;; |
---|
| 462 | ld8.fill r6=[r14],16 |
---|
| 463 | ld8.fill r7=[r15],16 |
---|
| 464 | |
---|
| 465 | mov ar.unat=r18 // restore caller's unat |
---|
| 466 | mov ar.rnat=r30 // must restore after bspstore but before rsc! |
---|
| 467 | mov ar.fpsr=r19 // restore fpsr |
---|
| 468 | mov ar.rsc=3 // put RSE back into eager mode, pl 0 |
---|
| 469 | br.cond.sptk.many b7 |
---|
| 470 | END(load_switch_stack) |
---|
| 471 | |
---|
| 472 | GLOBAL_ENTRY(prefetch_stack) |
---|
| 473 | add r14 = -IA64_SWITCH_STACK_SIZE, sp |
---|
| 474 | add r15 = IA64_TASK_THREAD_KSP_OFFSET, in0 |
---|
| 475 | ;; |
---|
| 476 | ld8 r16 = [r15] // load next's stack pointer |
---|
| 477 | lfetch.fault.excl [r14], 128 |
---|
| 478 | ;; |
---|
| 479 | lfetch.fault.excl [r14], 128 |
---|
| 480 | lfetch.fault [r16], 128 |
---|
| 481 | ;; |
---|
| 482 | lfetch.fault.excl [r14], 128 |
---|
| 483 | lfetch.fault [r16], 128 |
---|
| 484 | ;; |
---|
| 485 | lfetch.fault.excl [r14], 128 |
---|
| 486 | lfetch.fault [r16], 128 |
---|
| 487 | ;; |
---|
| 488 | lfetch.fault.excl [r14], 128 |
---|
| 489 | lfetch.fault [r16], 128 |
---|
| 490 | ;; |
---|
| 491 | lfetch.fault [r16], 128 |
---|
| 492 | br.ret.sptk.many rp |
---|
| 493 | END(prefetch_stack) |
---|
| 494 | |
---|
| 495 | GLOBAL_ENTRY(execve) |
---|
| 496 | mov r15=__NR_execve // put syscall number in place |
---|
| 497 | break __BREAK_SYSCALL |
---|
| 498 | br.ret.sptk.many rp |
---|
| 499 | END(execve) |
---|
| 500 | |
---|
| 501 | GLOBAL_ENTRY(clone) |
---|
| 502 | mov r15=__NR_clone // put syscall number in place |
---|
| 503 | break __BREAK_SYSCALL |
---|
| 504 | br.ret.sptk.many rp |
---|
| 505 | END(clone) |
---|
| 506 | |
---|
| 507 | /* |
---|
| 508 | * Invoke a system call, but do some tracing before and after the call. |
---|
| 509 | * We MUST preserve the current register frame throughout this routine |
---|
| 510 | * because some system calls (such as ia64_execve) directly |
---|
| 511 | * manipulate ar.pfs. |
---|
| 512 | */ |
---|
| 513 | GLOBAL_ENTRY(__ia64_trace_syscall) |
---|
| 514 | PT_REGS_UNWIND_INFO(0) |
---|
| 515 | /* |
---|
| 516 | * We need to preserve the scratch registers f6-f11 in case the system |
---|
| 517 | * call is sigreturn. |
---|
| 518 | */ |
---|
| 519 | adds r16=PT(F6)+16,sp |
---|
| 520 | adds r17=PT(F7)+16,sp |
---|
| 521 | ;; |
---|
| 522 | stf.spill [r16]=f6,32 |
---|
| 523 | stf.spill [r17]=f7,32 |
---|
| 524 | ;; |
---|
| 525 | stf.spill [r16]=f8,32 |
---|
| 526 | stf.spill [r17]=f9,32 |
---|
| 527 | ;; |
---|
| 528 | stf.spill [r16]=f10 |
---|
| 529 | stf.spill [r17]=f11 |
---|
| 530 | br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args |
---|
| 531 | adds r16=PT(F6)+16,sp |
---|
| 532 | adds r17=PT(F7)+16,sp |
---|
| 533 | ;; |
---|
| 534 | ldf.fill f6=[r16],32 |
---|
| 535 | ldf.fill f7=[r17],32 |
---|
| 536 | ;; |
---|
| 537 | ldf.fill f8=[r16],32 |
---|
| 538 | ldf.fill f9=[r17],32 |
---|
| 539 | ;; |
---|
| 540 | ldf.fill f10=[r16] |
---|
| 541 | ldf.fill f11=[r17] |
---|
| 542 | // the syscall number may have changed, so re-load it and re-calculate the |
---|
| 543 | // syscall entry-point: |
---|
| 544 | adds r15=PT(R15)+16,sp // r15 = &pt_regs.r15 (syscall #) |
---|
| 545 | ;; |
---|
| 546 | ld8 r15=[r15] |
---|
| 547 | mov r3=NR_syscalls - 1 |
---|
| 548 | ;; |
---|
| 549 | adds r15=-1024,r15 |
---|
| 550 | movl r16=sys_call_table |
---|
| 551 | ;; |
---|
| 552 | shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024) |
---|
| 553 | cmp.leu p6,p7=r15,r3 |
---|
| 554 | ;; |
---|
| 555 | (p6) ld8 r20=[r20] // load address of syscall entry point |
---|
| 556 | (p7) movl r20=sys_ni_syscall |
---|
| 557 | ;; |
---|
| 558 | mov b6=r20 |
---|
| 559 | br.call.sptk.many rp=b6 // do the syscall |
---|
| 560 | .strace_check_retval: |
---|
| 561 | cmp.lt p6,p0=r8,r0 // syscall failed? |
---|
| 562 | adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 |
---|
| 563 | adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10 |
---|
| 564 | mov r10=0 |
---|
| 565 | (p6) br.cond.sptk strace_error // syscall failed -> |
---|
| 566 | ;; // avoid RAW on r10 |
---|
| 567 | .strace_save_retval: |
---|
| 568 | .mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8 |
---|
| 569 | .mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10 |
---|
| 570 | br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value |
---|
| 571 | .ret3: |
---|
| 572 | (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk |
---|
| 573 | br.cond.sptk .work_pending_syscall_end |
---|
| 574 | |
---|
| 575 | strace_error: |
---|
| 576 | ld8 r3=[r2] // load pt_regs.r8 |
---|
| 577 | sub r9=0,r8 // negate return value to get errno value |
---|
| 578 | ;; |
---|
| 579 | cmp.ne p6,p0=r3,r0 // is pt_regs.r8!=0? |
---|
| 580 | adds r3=16,r2 // r3=&pt_regs.r10 |
---|
| 581 | ;; |
---|
| 582 | (p6) mov r10=-1 |
---|
| 583 | (p6) mov r8=r9 |
---|
| 584 | br.cond.sptk .strace_save_retval |
---|
| 585 | END(__ia64_trace_syscall) |
---|
| 586 | |
---|
| 587 | /* |
---|
| 588 | * When traced and returning from sigreturn, we invoke syscall_trace but then |
---|
| 589 | * go straight to ia64_leave_kernel rather than ia64_leave_syscall. |
---|
| 590 | */ |
---|
| 591 | GLOBAL_ENTRY(ia64_strace_leave_kernel) |
---|
| 592 | PT_REGS_UNWIND_INFO(0) |
---|
| 593 | { /* |
---|
| 594 | * Some versions of gas generate bad unwind info if the first instruction of a |
---|
| 595 | * procedure doesn't go into the first slot of a bundle. This is a workaround. |
---|
| 596 | */ |
---|
| 597 | nop.m 0 |
---|
| 598 | nop.i 0 |
---|
| 599 | br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value |
---|
| 600 | } |
---|
| 601 | .ret4: br.cond.sptk ia64_leave_kernel |
---|
| 602 | END(ia64_strace_leave_kernel) |
---|
| 603 | |
---|
| 604 | GLOBAL_ENTRY(__ia64_ret_from_clone) |
---|
| 605 | PT_REGS_UNWIND_INFO(0) |
---|
| 606 | { /* |
---|
| 607 | * Some versions of gas generate bad unwind info if the first instruction of a |
---|
| 608 | * procedure doesn't go into the first slot of a bundle. This is a workaround. |
---|
| 609 | */ |
---|
| 610 | nop.m 0 |
---|
| 611 | nop.i 0 |
---|
| 612 | /* |
---|
| 613 | * We need to call schedule_tail() to complete the scheduling process. |
---|
| 614 | * Called by ia64_switch_to() after do_fork()->copy_thread(). r8 contains the |
---|
| 615 | * address of the previously executing task. |
---|
| 616 | */ |
---|
| 617 | br.call.sptk.many rp=ia64_invoke_schedule_tail |
---|
| 618 | } |
---|
| 619 | .ret8: |
---|
| 620 | adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 |
---|
| 621 | ;; |
---|
| 622 | ld4 r2=[r2] |
---|
| 623 | ;; |
---|
| 624 | mov r8=0 |
---|
| 625 | and r2=_TIF_SYSCALL_TRACEAUDIT,r2 |
---|
| 626 | ;; |
---|
| 627 | cmp.ne p6,p0=r2,r0 |
---|
| 628 | (p6) br.cond.spnt .strace_check_retval |
---|
| 629 | ;; // added stop bits to prevent r8 dependency |
---|
| 630 | END(__ia64_ret_from_clone) |
---|
| 631 | // fall through |
---|
| 632 | GLOBAL_ENTRY(ia64_ret_from_syscall) |
---|
| 633 | PT_REGS_UNWIND_INFO(0) |
---|
| 634 | cmp.ge p6,p7=r8,r0 // syscall executed successfully? |
---|
| 635 | adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 |
---|
| 636 | mov r10=r0 // clear error indication in r10 |
---|
| 637 | (p7) br.cond.spnt handle_syscall_error // handle potential syscall failure |
---|
| 638 | ;; |
---|
| 639 | // don't fall through, ia64_leave_syscall may be #define'd |
---|
| 640 | br.cond.sptk.few ia64_leave_syscall |
---|
| 641 | ;; |
---|
| 642 | END(ia64_ret_from_syscall) |
---|
| 643 | /* |
---|
| 644 | * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't |
---|
| 645 | * need to switch to bank 0 and doesn't restore the scratch registers. |
---|
| 646 | * To avoid leaking kernel bits, the scratch registers are set to |
---|
| 647 | * the following known-to-be-safe values: |
---|
| 648 | * |
---|
| 649 | * r1: restored (global pointer) |
---|
| 650 | * r2: cleared |
---|
| 651 | * r3: 1 (when returning to user-level) |
---|
| 652 | * r8-r11: restored (syscall return value(s)) |
---|
| 653 | * r12: restored (user-level stack pointer) |
---|
| 654 | * r13: restored (user-level thread pointer) |
---|
| 655 | * r14: set to __kernel_syscall_via_epc |
---|
| 656 | * r15: restored (syscall #) |
---|
| 657 | * r16-r17: cleared |
---|
| 658 | * r18: user-level b6 |
---|
| 659 | * r19: cleared |
---|
| 660 | * r20: user-level ar.fpsr |
---|
| 661 | * r21: user-level b0 |
---|
| 662 | * r22: cleared |
---|
| 663 | * r23: user-level ar.bspstore |
---|
| 664 | * r24: user-level ar.rnat |
---|
| 665 | * r25: user-level ar.unat |
---|
| 666 | * r26: user-level ar.pfs |
---|
| 667 | * r27: user-level ar.rsc |
---|
| 668 | * r28: user-level ip |
---|
| 669 | * r29: user-level psr |
---|
| 670 | * r30: user-level cfm |
---|
| 671 | * r31: user-level pr |
---|
| 672 | * f6-f11: cleared |
---|
| 673 | * pr: restored (user-level pr) |
---|
| 674 | * b0: restored (user-level rp) |
---|
| 675 | * b6: restored |
---|
| 676 | * b7: set to __kernel_syscall_via_epc |
---|
| 677 | * ar.unat: restored (user-level ar.unat) |
---|
| 678 | * ar.pfs: restored (user-level ar.pfs) |
---|
| 679 | * ar.rsc: restored (user-level ar.rsc) |
---|
| 680 | * ar.rnat: restored (user-level ar.rnat) |
---|
| 681 | * ar.bspstore: restored (user-level ar.bspstore) |
---|
| 682 | * ar.fpsr: restored (user-level ar.fpsr) |
---|
| 683 | * ar.ccv: cleared |
---|
| 684 | * ar.csd: cleared |
---|
| 685 | * ar.ssd: cleared |
---|
| 686 | */ |
---|
| 687 | GLOBAL_ENTRY(__ia64_leave_syscall) |
---|
| 688 | PT_REGS_UNWIND_INFO(0) |
---|
| 689 | /* |
---|
| 690 | * work.need_resched etc. mustn't get changed by this CPU before it returns to |
---|
| 691 | * user- or fsys-mode, hence we disable interrupts early on. |
---|
| 692 | * |
---|
| 693 | * p6 controls whether current_thread_info()->flags needs to be check for |
---|
| 694 | * extra work. We always check for extra work when returning to user-level. |
---|
| 695 | * With CONFIG_PREEMPT, we also check for extra work when the preempt_count |
---|
| 696 | * is 0. After extra work processing has been completed, execution |
---|
| 697 | * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check |
---|
| 698 | * needs to be redone. |
---|
| 699 | */ |
---|
| 700 | #ifdef CONFIG_PREEMPT |
---|
| 701 | rsm psr.i // disable interrupts |
---|
| 702 | cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall |
---|
| 703 | (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 |
---|
| 704 | ;; |
---|
| 705 | .pred.rel.mutex pUStk,pKStk |
---|
| 706 | (pKStk) ld4 r21=[r20] // r21 <- preempt_count |
---|
| 707 | (pUStk) mov r21=0 // r21 <- 0 |
---|
| 708 | ;; |
---|
| 709 | cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0) |
---|
| 710 | #else /* !CONFIG_PREEMPT */ |
---|
| 711 | (pUStk) rsm psr.i |
---|
| 712 | cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall |
---|
| 713 | (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk |
---|
| 714 | #endif |
---|
| 715 | .work_processed_syscall: |
---|
| 716 | adds r2=PT(LOADRS)+16,r12 |
---|
| 717 | adds r3=PT(AR_BSPSTORE)+16,r12 |
---|
| 718 | adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 |
---|
| 719 | ;; |
---|
| 720 | (p6) ld4 r31=[r18] // load current_thread_info()->flags |
---|
| 721 | ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs" |
---|
| 722 | nop.i 0 |
---|
| 723 | ;; |
---|
| 724 | mov r16=ar.bsp // M2 get existing backing store pointer |
---|
| 725 | ld8 r18=[r2],PT(R9)-PT(B6) // load b6 |
---|
| 726 | (p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE? |
---|
| 727 | ;; |
---|
| 728 | ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage) |
---|
| 729 | (p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending? |
---|
| 730 | (p6) br.cond.spnt .work_pending_syscall |
---|
| 731 | ;; |
---|
| 732 | // start restoring the state saved on the kernel stack (struct pt_regs): |
---|
| 733 | ld8 r9=[r2],PT(CR_IPSR)-PT(R9) |
---|
| 734 | ld8 r11=[r3],PT(CR_IIP)-PT(R11) |
---|
| 735 | (pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE! |
---|
| 736 | ;; |
---|
| 737 | invala // M0|1 invalidate ALAT |
---|
| 738 | rsm psr.i | psr.ic // M2 turn off interrupts and interruption collection |
---|
| 739 | cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs |
---|
| 740 | |
---|
| 741 | ld8 r29=[r2],16 // M0|1 load cr.ipsr |
---|
| 742 | ld8 r28=[r3],16 // M0|1 load cr.iip |
---|
| 743 | mov r22=r0 // A clear r22 |
---|
| 744 | ;; |
---|
| 745 | ld8 r30=[r2],16 // M0|1 load cr.ifs |
---|
| 746 | ld8 r25=[r3],16 // M0|1 load ar.unat |
---|
| 747 | (pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 |
---|
| 748 | ;; |
---|
| 749 | ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs |
---|
| 750 | (pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled |
---|
| 751 | nop 0 |
---|
| 752 | ;; |
---|
| 753 | ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0 |
---|
| 754 | ld8 r27=[r3],PT(PR)-PT(AR_RSC) // M0|1 load ar.rsc |
---|
| 755 | mov f6=f0 // F clear f6 |
---|
| 756 | ;; |
---|
| 757 | ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT) // M0|1 load ar.rnat (may be garbage) |
---|
| 758 | ld8 r31=[r3],PT(R1)-PT(PR) // M0|1 load predicates |
---|
| 759 | mov f7=f0 // F clear f7 |
---|
| 760 | ;; |
---|
| 761 | ld8 r20=[r2],PT(R12)-PT(AR_FPSR) // M0|1 load ar.fpsr |
---|
| 762 | ld8.fill r1=[r3],16 // M0|1 load r1 |
---|
| 763 | (pUStk) mov r17=1 // A |
---|
| 764 | ;; |
---|
| 765 | (pUStk) st1 [r14]=r17 // M2|3 |
---|
| 766 | ld8.fill r13=[r3],16 // M0|1 |
---|
| 767 | mov f8=f0 // F clear f8 |
---|
| 768 | ;; |
---|
| 769 | ld8.fill r12=[r2] // M0|1 restore r12 (sp) |
---|
| 770 | ld8.fill r15=[r3] // M0|1 restore r15 |
---|
| 771 | mov b6=r18 // I0 restore b6 |
---|
| 772 | |
---|
| 773 | addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0 // A |
---|
| 774 | mov f9=f0 // F clear f9 |
---|
| 775 | (pKStk) br.cond.dpnt.many skip_rbs_switch // B |
---|
| 776 | |
---|
| 777 | srlz.d // M0 ensure interruption collection is off (for cover) |
---|
| 778 | shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition |
---|
| 779 | cover // B add current frame into dirty partition & set cr.ifs |
---|
| 780 | ;; |
---|
| 781 | (pUStk) ld4 r17=[r17] // M0|1 r17 = cpu_data->phys_stacked_size_p8 |
---|
| 782 | mov r19=ar.bsp // M2 get new backing store pointer |
---|
| 783 | mov f10=f0 // F clear f10 |
---|
| 784 | |
---|
| 785 | nop.m 0 |
---|
| 786 | movl r14=__kernel_syscall_via_epc // X |
---|
| 787 | ;; |
---|
| 788 | mov.m ar.csd=r0 // M2 clear ar.csd |
---|
| 789 | mov.m ar.ccv=r0 // M2 clear ar.ccv |
---|
| 790 | mov b7=r14 // I0 clear b7 (hint with __kernel_syscall_via_epc) |
---|
| 791 | |
---|
| 792 | mov.m ar.ssd=r0 // M2 clear ar.ssd |
---|
| 793 | mov f11=f0 // F clear f11 |
---|
| 794 | br.cond.sptk.many rbs_switch // B |
---|
| 795 | END(__ia64_leave_syscall) |
---|
| 796 | |
---|
| 797 | #ifdef CONFIG_IA32_SUPPORT |
---|
| 798 | GLOBAL_ENTRY(ia64_ret_from_ia32_execve) |
---|
| 799 | PT_REGS_UNWIND_INFO(0) |
---|
| 800 | adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 |
---|
| 801 | adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10 |
---|
| 802 | ;; |
---|
| 803 | .mem.offset 0,0 |
---|
| 804 | st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit |
---|
| 805 | .mem.offset 8,0 |
---|
| 806 | st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit |
---|
| 807 | ;; |
---|
| 808 | // don't fall through, ia64_leave_kernel may be #define'd |
---|
| 809 | br.cond.sptk.few ia64_leave_kernel |
---|
| 810 | ;; |
---|
| 811 | END(ia64_ret_from_ia32_execve) |
---|
| 812 | #endif /* CONFIG_IA32_SUPPORT */ |
---|
| 813 | GLOBAL_ENTRY(__ia64_leave_kernel) |
---|
| 814 | PT_REGS_UNWIND_INFO(0) |
---|
| 815 | /* |
---|
| 816 | * work.need_resched etc. mustn't get changed by this CPU before it returns to |
---|
| 817 | * user- or fsys-mode, hence we disable interrupts early on. |
---|
| 818 | * |
---|
| 819 | * p6 controls whether current_thread_info()->flags needs to be check for |
---|
| 820 | * extra work. We always check for extra work when returning to user-level. |
---|
| 821 | * With CONFIG_PREEMPT, we also check for extra work when the preempt_count |
---|
| 822 | * is 0. After extra work processing has been completed, execution |
---|
| 823 | * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check |
---|
| 824 | * needs to be redone. |
---|
| 825 | */ |
---|
| 826 | #ifdef CONFIG_PREEMPT |
---|
| 827 | rsm psr.i // disable interrupts |
---|
| 828 | cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel |
---|
| 829 | (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 |
---|
| 830 | ;; |
---|
| 831 | .pred.rel.mutex pUStk,pKStk |
---|
| 832 | (pKStk) ld4 r21=[r20] // r21 <- preempt_count |
---|
| 833 | (pUStk) mov r21=0 // r21 <- 0 |
---|
| 834 | ;; |
---|
| 835 | cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0) |
---|
| 836 | #else |
---|
| 837 | (pUStk) rsm psr.i |
---|
| 838 | cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel |
---|
| 839 | (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk |
---|
| 840 | #endif |
---|
| 841 | .work_processed_kernel: |
---|
| 842 | adds r17=TI_FLAGS+IA64_TASK_SIZE,r13 |
---|
| 843 | ;; |
---|
| 844 | (p6) ld4 r31=[r17] // load current_thread_info()->flags |
---|
| 845 | adds r21=PT(PR)+16,r12 |
---|
| 846 | ;; |
---|
| 847 | |
---|
| 848 | lfetch [r21],PT(CR_IPSR)-PT(PR) |
---|
| 849 | adds r2=PT(B6)+16,r12 |
---|
| 850 | adds r3=PT(R16)+16,r12 |
---|
| 851 | ;; |
---|
| 852 | lfetch [r21] |
---|
| 853 | ld8 r28=[r2],8 // load b6 |
---|
| 854 | adds r29=PT(R24)+16,r12 |
---|
| 855 | |
---|
| 856 | ld8.fill r16=[r3],PT(AR_CSD)-PT(R16) |
---|
| 857 | adds r30=PT(AR_CCV)+16,r12 |
---|
| 858 | (p6) and r19=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE? |
---|
| 859 | ;; |
---|
| 860 | ld8.fill r24=[r29] |
---|
| 861 | ld8 r15=[r30] // load ar.ccv |
---|
| 862 | (p6) cmp4.ne.unc p6,p0=r19, r0 // any special work pending? |
---|
| 863 | ;; |
---|
| 864 | ld8 r29=[r2],16 // load b7 |
---|
| 865 | ld8 r30=[r3],16 // load ar.csd |
---|
| 866 | (p6) br.cond.spnt .work_pending |
---|
| 867 | ;; |
---|
| 868 | ld8 r31=[r2],16 // load ar.ssd |
---|
| 869 | ld8.fill r8=[r3],16 |
---|
| 870 | ;; |
---|
| 871 | ld8.fill r9=[r2],16 |
---|
| 872 | ld8.fill r10=[r3],PT(R17)-PT(R10) |
---|
| 873 | ;; |
---|
| 874 | ld8.fill r11=[r2],PT(R18)-PT(R11) |
---|
| 875 | ld8.fill r17=[r3],16 |
---|
| 876 | ;; |
---|
| 877 | ld8.fill r18=[r2],16 |
---|
| 878 | ld8.fill r19=[r3],16 |
---|
| 879 | ;; |
---|
| 880 | ld8.fill r20=[r2],16 |
---|
| 881 | ld8.fill r21=[r3],16 |
---|
| 882 | mov ar.csd=r30 |
---|
| 883 | mov ar.ssd=r31 |
---|
| 884 | ;; |
---|
| 885 | rsm psr.i | psr.ic // initiate turning off of interrupt and interruption collection |
---|
| 886 | invala // invalidate ALAT |
---|
| 887 | ;; |
---|
| 888 | ld8.fill r22=[r2],24 |
---|
| 889 | ld8.fill r23=[r3],24 |
---|
| 890 | mov b6=r28 |
---|
| 891 | ;; |
---|
| 892 | ld8.fill r25=[r2],16 |
---|
| 893 | ld8.fill r26=[r3],16 |
---|
| 894 | mov b7=r29 |
---|
| 895 | ;; |
---|
| 896 | ld8.fill r27=[r2],16 |
---|
| 897 | ld8.fill r28=[r3],16 |
---|
| 898 | ;; |
---|
| 899 | ld8.fill r29=[r2],16 |
---|
| 900 | ld8.fill r30=[r3],24 |
---|
| 901 | ;; |
---|
| 902 | ld8.fill r31=[r2],PT(F9)-PT(R31) |
---|
| 903 | adds r3=PT(F10)-PT(F6),r3 |
---|
| 904 | ;; |
---|
| 905 | ldf.fill f9=[r2],PT(F6)-PT(F9) |
---|
| 906 | ldf.fill f10=[r3],PT(F8)-PT(F10) |
---|
| 907 | ;; |
---|
| 908 | ldf.fill f6=[r2],PT(F7)-PT(F6) |
---|
| 909 | ;; |
---|
| 910 | ldf.fill f7=[r2],PT(F11)-PT(F7) |
---|
| 911 | ldf.fill f8=[r3],32 |
---|
| 912 | ;; |
---|
| 913 | srlz.d // ensure that inter. collection is off (VHPT is don't care, since text is pinned) |
---|
| 914 | mov ar.ccv=r15 |
---|
| 915 | ;; |
---|
| 916 | ldf.fill f11=[r2] |
---|
| 917 | bsw.0 // switch back to bank 0 (no stop bit required beforehand...) |
---|
| 918 | ;; |
---|
| 919 | (pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency) |
---|
| 920 | adds r16=PT(CR_IPSR)+16,r12 |
---|
| 921 | adds r17=PT(CR_IIP)+16,r12 |
---|
| 922 | |
---|
| 923 | (pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled |
---|
| 924 | nop.i 0 |
---|
| 925 | nop.i 0 |
---|
| 926 | ;; |
---|
| 927 | ld8 r29=[r16],16 // load cr.ipsr |
---|
| 928 | ld8 r28=[r17],16 // load cr.iip |
---|
| 929 | ;; |
---|
| 930 | ld8 r30=[r16],16 // load cr.ifs |
---|
| 931 | ld8 r25=[r17],16 // load ar.unat |
---|
| 932 | ;; |
---|
| 933 | ld8 r26=[r16],16 // load ar.pfs |
---|
| 934 | ld8 r27=[r17],16 // load ar.rsc |
---|
| 935 | cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs |
---|
| 936 | ;; |
---|
| 937 | ld8 r24=[r16],16 // load ar.rnat (may be garbage) |
---|
| 938 | ld8 r23=[r17],16 // load ar.bspstore (may be garbage) |
---|
| 939 | ;; |
---|
| 940 | ld8 r31=[r16],16 // load predicates |
---|
| 941 | ld8 r21=[r17],16 // load b0 |
---|
| 942 | ;; |
---|
| 943 | ld8 r19=[r16],16 // load ar.rsc value for "loadrs" |
---|
| 944 | ld8.fill r1=[r17],16 // load r1 |
---|
| 945 | ;; |
---|
| 946 | ld8.fill r12=[r16],16 |
---|
| 947 | ld8.fill r13=[r17],16 |
---|
| 948 | (pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 |
---|
| 949 | ;; |
---|
| 950 | ld8 r20=[r16],16 // ar.fpsr |
---|
| 951 | ld8.fill r15=[r17],16 |
---|
| 952 | ;; |
---|
| 953 | ld8.fill r14=[r16],16 |
---|
| 954 | ld8.fill r2=[r17] |
---|
| 955 | (pUStk) mov r17=1 |
---|
| 956 | ;; |
---|
| 957 | ld8.fill r3=[r16] |
---|
| 958 | (pUStk) st1 [r18]=r17 // restore current->thread.on_ustack |
---|
| 959 | shr.u r18=r19,16 // get byte size of existing "dirty" partition |
---|
| 960 | ;; |
---|
| 961 | mov r16=ar.bsp // get existing backing store pointer |
---|
| 962 | addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0 |
---|
| 963 | ;; |
---|
| 964 | ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8 |
---|
| 965 | (pKStk) br.cond.dpnt skip_rbs_switch |
---|
| 966 | |
---|
| 967 | /* |
---|
| 968 | * Restore user backing store. |
---|
| 969 | * |
---|
| 970 | * NOTE: alloc, loadrs, and cover can't be predicated. |
---|
| 971 | */ |
---|
| 972 | (pNonSys) br.cond.dpnt dont_preserve_current_frame |
---|
| 973 | cover // add current frame into dirty partition and set cr.ifs |
---|
| 974 | ;; |
---|
| 975 | mov r19=ar.bsp // get new backing store pointer |
---|
| 976 | rbs_switch: |
---|
| 977 | sub r16=r16,r18 // krbs = old bsp - size of dirty partition |
---|
| 978 | cmp.ne p9,p0=r0,r0 // clear p9 to skip restore of cr.ifs |
---|
| 979 | ;; |
---|
| 980 | sub r19=r19,r16 // calculate total byte size of dirty partition |
---|
| 981 | add r18=64,r18 // don't force in0-in7 into memory... |
---|
| 982 | ;; |
---|
| 983 | shl r19=r19,16 // shift size of dirty partition into loadrs position |
---|
| 984 | ;; |
---|
| 985 | dont_preserve_current_frame: |
---|
| 986 | /* |
---|
| 987 | * To prevent leaking bits between the kernel and user-space, |
---|
| 988 | * we must clear the stacked registers in the "invalid" partition here. |
---|
| 989 | * Not pretty, but at least it's fast (3.34 registers/cycle on Itanium, |
---|
| 990 | * 5 registers/cycle on McKinley). |
---|
| 991 | */ |
---|
| 992 | # define pRecurse p6 |
---|
| 993 | # define pReturn p7 |
---|
| 994 | #ifdef CONFIG_ITANIUM |
---|
| 995 | # define Nregs 10 |
---|
| 996 | #else |
---|
| 997 | # define Nregs 14 |
---|
| 998 | #endif |
---|
| 999 | alloc loc0=ar.pfs,2,Nregs-2,2,0 |
---|
| 1000 | shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8)) |
---|
| 1001 | sub r17=r17,r18 // r17 = (physStackedSize + 8) - dirtySize |
---|
| 1002 | ;; |
---|
| 1003 | mov ar.rsc=r19 // load ar.rsc to be used for "loadrs" |
---|
| 1004 | shladd in0=loc1,3,r17 |
---|
| 1005 | mov in1=0 |
---|
| 1006 | ;; |
---|
| 1007 | TEXT_ALIGN(32) |
---|
| 1008 | rse_clear_invalid: |
---|
| 1009 | #ifdef CONFIG_ITANIUM |
---|
| 1010 | // cycle 0 |
---|
| 1011 | { .mii |
---|
| 1012 | alloc loc0=ar.pfs,2,Nregs-2,2,0 |
---|
| 1013 | cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse |
---|
| 1014 | add out0=-Nregs*8,in0 |
---|
| 1015 | }{ .mfb |
---|
| 1016 | add out1=1,in1 // increment recursion count |
---|
| 1017 | nop.f 0 |
---|
| 1018 | nop.b 0 // can't do br.call here because of alloc (WAW on CFM) |
---|
| 1019 | ;; |
---|
| 1020 | }{ .mfi // cycle 1 |
---|
| 1021 | mov loc1=0 |
---|
| 1022 | nop.f 0 |
---|
| 1023 | mov loc2=0 |
---|
| 1024 | }{ .mib |
---|
| 1025 | mov loc3=0 |
---|
| 1026 | mov loc4=0 |
---|
| 1027 | (pRecurse) br.call.sptk.many b0=rse_clear_invalid |
---|
| 1028 | |
---|
| 1029 | }{ .mfi // cycle 2 |
---|
| 1030 | mov loc5=0 |
---|
| 1031 | nop.f 0 |
---|
| 1032 | cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret |
---|
| 1033 | }{ .mib |
---|
| 1034 | mov loc6=0 |
---|
| 1035 | mov loc7=0 |
---|
| 1036 | (pReturn) br.ret.sptk.many b0 |
---|
| 1037 | } |
---|
| 1038 | #else /* !CONFIG_ITANIUM */ |
---|
| 1039 | alloc loc0=ar.pfs,2,Nregs-2,2,0 |
---|
| 1040 | cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse |
---|
| 1041 | add out0=-Nregs*8,in0 |
---|
| 1042 | add out1=1,in1 // increment recursion count |
---|
| 1043 | mov loc1=0 |
---|
| 1044 | mov loc2=0 |
---|
| 1045 | ;; |
---|
| 1046 | mov loc3=0 |
---|
| 1047 | mov loc4=0 |
---|
| 1048 | mov loc5=0 |
---|
| 1049 | mov loc6=0 |
---|
| 1050 | mov loc7=0 |
---|
| 1051 | (pRecurse) br.call.dptk.few b0=rse_clear_invalid |
---|
| 1052 | ;; |
---|
| 1053 | mov loc8=0 |
---|
| 1054 | mov loc9=0 |
---|
| 1055 | cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret |
---|
| 1056 | mov loc10=0 |
---|
| 1057 | mov loc11=0 |
---|
| 1058 | (pReturn) br.ret.dptk.many b0 |
---|
| 1059 | #endif /* !CONFIG_ITANIUM */ |
---|
| 1060 | # undef pRecurse |
---|
| 1061 | # undef pReturn |
---|
| 1062 | ;; |
---|
| 1063 | alloc r17=ar.pfs,0,0,0,0 // drop current register frame |
---|
| 1064 | ;; |
---|
| 1065 | loadrs |
---|
| 1066 | ;; |
---|
| 1067 | skip_rbs_switch: |
---|
| 1068 | mov ar.unat=r25 // M2 |
---|
| 1069 | (pKStk) extr.u r22=r22,21,1 // I0 extract current value of psr.pp from r22 |
---|
| 1070 | (pLvSys)mov r19=r0 // A clear r19 for leave_syscall, no-op otherwise |
---|
| 1071 | ;; |
---|
| 1072 | (pUStk) mov ar.bspstore=r23 // M2 |
---|
| 1073 | (pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp |
---|
| 1074 | (pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise |
---|
| 1075 | ;; |
---|
| 1076 | mov cr.ipsr=r29 // M2 |
---|
| 1077 | mov ar.pfs=r26 // I0 |
---|
| 1078 | (pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise |
---|
| 1079 | |
---|
| 1080 | (p9) mov cr.ifs=r30 // M2 |
---|
| 1081 | mov b0=r21 // I0 |
---|
| 1082 | (pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise |
---|
| 1083 | |
---|
| 1084 | mov ar.fpsr=r20 // M2 |
---|
| 1085 | mov cr.iip=r28 // M2 |
---|
| 1086 | nop 0 |
---|
| 1087 | ;; |
---|
| 1088 | (pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode |
---|
| 1089 | nop 0 |
---|
| 1090 | (pLvSys)mov r2=r0 |
---|
| 1091 | |
---|
| 1092 | mov ar.rsc=r27 // M2 |
---|
| 1093 | mov pr=r31,-1 // I0 |
---|
| 1094 | rfi // B |
---|
| 1095 | |
---|
| 1096 | /* |
---|
| 1097 | * On entry: |
---|
| 1098 | * r20 = ¤t->thread_info->pre_count (if CONFIG_PREEMPT) |
---|
| 1099 | * r31 = current->thread_info->flags |
---|
| 1100 | * On exit: |
---|
| 1101 | * p6 = TRUE if work-pending-check needs to be redone |
---|
| 1102 | */ |
---|
| 1103 | .work_pending_syscall: |
---|
| 1104 | add r2=-8,r2 |
---|
| 1105 | add r3=-8,r3 |
---|
| 1106 | ;; |
---|
| 1107 | st8 [r2]=r8 |
---|
| 1108 | st8 [r3]=r10 |
---|
| 1109 | .work_pending: |
---|
| 1110 | tbit.z p6,p0=r31,TIF_NEED_RESCHED // current_thread_info()->need_resched==0? |
---|
| 1111 | (p6) br.cond.sptk.few .notify |
---|
| 1112 | #ifdef CONFIG_PREEMPT |
---|
| 1113 | (pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1 |
---|
| 1114 | ;; |
---|
| 1115 | (pKStk) st4 [r20]=r21 |
---|
| 1116 | ssm psr.i // enable interrupts |
---|
| 1117 | #endif |
---|
| 1118 | br.call.spnt.many rp=schedule |
---|
| 1119 | .ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 |
---|
| 1120 | rsm psr.i // disable interrupts |
---|
| 1121 | ;; |
---|
| 1122 | #ifdef CONFIG_PREEMPT |
---|
| 1123 | (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 |
---|
| 1124 | ;; |
---|
| 1125 | (pKStk) st4 [r20]=r0 // preempt_count() <- 0 |
---|
| 1126 | #endif |
---|
| 1127 | (pLvSys)br.cond.sptk.few .work_pending_syscall_end |
---|
| 1128 | br.cond.sptk.many .work_processed_kernel // re-check |
---|
| 1129 | |
---|
| 1130 | .notify: |
---|
| 1131 | (pUStk) br.call.spnt.many rp=notify_resume_user |
---|
| 1132 | .ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 |
---|
| 1133 | (pLvSys)br.cond.sptk.few .work_pending_syscall_end |
---|
| 1134 | br.cond.sptk.many .work_processed_kernel // don't re-check |
---|
| 1135 | |
---|
| 1136 | .work_pending_syscall_end: |
---|
| 1137 | adds r2=PT(R8)+16,r12 |
---|
| 1138 | adds r3=PT(R10)+16,r12 |
---|
| 1139 | ;; |
---|
| 1140 | ld8 r8=[r2] |
---|
| 1141 | ld8 r10=[r3] |
---|
| 1142 | br.cond.sptk.many .work_processed_syscall // re-check |
---|
| 1143 | |
---|
| 1144 | END(__ia64_leave_kernel) |
---|
| 1145 | |
---|
| 1146 | ENTRY(handle_syscall_error) |
---|
| 1147 | /* |
---|
| 1148 | * Some system calls (e.g., ptrace, mmap) can return arbitrary values which could |
---|
| 1149 | * lead us to mistake a negative return value as a failed syscall. Those syscall |
---|
| 1150 | * must deposit a non-zero value in pt_regs.r8 to indicate an error. If |
---|
| 1151 | * pt_regs.r8 is zero, we assume that the call completed successfully. |
---|
| 1152 | */ |
---|
| 1153 | PT_REGS_UNWIND_INFO(0) |
---|
| 1154 | ld8 r3=[r2] // load pt_regs.r8 |
---|
| 1155 | ;; |
---|
| 1156 | cmp.eq p6,p7=r3,r0 // is pt_regs.r8==0? |
---|
| 1157 | ;; |
---|
| 1158 | (p7) mov r10=-1 |
---|
| 1159 | (p7) sub r8=0,r8 // negate return value to get errno |
---|
| 1160 | br.cond.sptk ia64_leave_syscall |
---|
| 1161 | END(handle_syscall_error) |
---|
| 1162 | |
---|
| 1163 | /* |
---|
| 1164 | * Invoke schedule_tail(task) while preserving in0-in7, which may be needed |
---|
| 1165 | * in case a system call gets restarted. |
---|
| 1166 | */ |
---|
| 1167 | GLOBAL_ENTRY(ia64_invoke_schedule_tail) |
---|
| 1168 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) |
---|
| 1169 | alloc loc1=ar.pfs,8,2,1,0 |
---|
| 1170 | mov loc0=rp |
---|
| 1171 | mov out0=r8 // Address of previous task |
---|
| 1172 | ;; |
---|
| 1173 | br.call.sptk.many rp=schedule_tail |
---|
| 1174 | .ret11: mov ar.pfs=loc1 |
---|
| 1175 | mov rp=loc0 |
---|
| 1176 | br.ret.sptk.many rp |
---|
| 1177 | END(ia64_invoke_schedule_tail) |
---|
| 1178 | |
---|
| 1179 | /* |
---|
| 1180 | * Setup stack and call do_notify_resume_user(). Note that pSys and pNonSys need to |
---|
| 1181 | * be set up by the caller. We declare 8 input registers so the system call |
---|
| 1182 | * args get preserved, in case we need to restart a system call. |
---|
| 1183 | */ |
---|
| 1184 | GLOBAL_ENTRY(notify_resume_user) |
---|
| 1185 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) |
---|
| 1186 | alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart! |
---|
| 1187 | mov r9=ar.unat |
---|
| 1188 | mov loc0=rp // save return address |
---|
| 1189 | mov out0=0 // there is no "oldset" |
---|
| 1190 | adds out1=8,sp // out1=&sigscratch->ar_pfs |
---|
| 1191 | (pSys) mov out2=1 // out2==1 => we're in a syscall |
---|
| 1192 | ;; |
---|
| 1193 | (pNonSys) mov out2=0 // out2==0 => not a syscall |
---|
| 1194 | .fframe 16 |
---|
| 1195 | .spillsp ar.unat, 16 |
---|
| 1196 | st8 [sp]=r9,-16 // allocate space for ar.unat and save it |
---|
| 1197 | st8 [out1]=loc1,-8 // save ar.pfs, out1=&sigscratch |
---|
| 1198 | .body |
---|
| 1199 | br.call.sptk.many rp=do_notify_resume_user |
---|
| 1200 | .ret15: .restore sp |
---|
| 1201 | adds sp=16,sp // pop scratch stack space |
---|
| 1202 | ;; |
---|
| 1203 | ld8 r9=[sp] // load new unat from sigscratch->scratch_unat |
---|
| 1204 | mov rp=loc0 |
---|
| 1205 | ;; |
---|
| 1206 | mov ar.unat=r9 |
---|
| 1207 | mov ar.pfs=loc1 |
---|
| 1208 | br.ret.sptk.many rp |
---|
| 1209 | END(notify_resume_user) |
---|
| 1210 | |
---|
| 1211 | GLOBAL_ENTRY(sys_rt_sigsuspend) |
---|
| 1212 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) |
---|
| 1213 | alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart! |
---|
| 1214 | mov r9=ar.unat |
---|
| 1215 | mov loc0=rp // save return address |
---|
| 1216 | mov out0=in0 // mask |
---|
| 1217 | mov out1=in1 // sigsetsize |
---|
| 1218 | adds out2=8,sp // out2=&sigscratch->ar_pfs |
---|
| 1219 | ;; |
---|
| 1220 | .fframe 16 |
---|
| 1221 | .spillsp ar.unat, 16 |
---|
| 1222 | st8 [sp]=r9,-16 // allocate space for ar.unat and save it |
---|
| 1223 | st8 [out2]=loc1,-8 // save ar.pfs, out2=&sigscratch |
---|
| 1224 | .body |
---|
| 1225 | br.call.sptk.many rp=ia64_rt_sigsuspend |
---|
| 1226 | .ret17: .restore sp |
---|
| 1227 | adds sp=16,sp // pop scratch stack space |
---|
| 1228 | ;; |
---|
| 1229 | ld8 r9=[sp] // load new unat from sw->caller_unat |
---|
| 1230 | mov rp=loc0 |
---|
| 1231 | ;; |
---|
| 1232 | mov ar.unat=r9 |
---|
| 1233 | mov ar.pfs=loc1 |
---|
| 1234 | br.ret.sptk.many rp |
---|
| 1235 | END(sys_rt_sigsuspend) |
---|
| 1236 | |
---|
| 1237 | ENTRY(sys_rt_sigreturn) |
---|
| 1238 | PT_REGS_UNWIND_INFO(0) |
---|
| 1239 | /* |
---|
| 1240 | * Allocate 8 input registers since ptrace() may clobber them |
---|
| 1241 | */ |
---|
| 1242 | alloc r2=ar.pfs,8,0,1,0 |
---|
| 1243 | .prologue |
---|
| 1244 | PT_REGS_SAVES(16) |
---|
| 1245 | adds sp=-16,sp |
---|
| 1246 | .body |
---|
| 1247 | cmp.eq pNonSys,pSys=r0,r0 // sigreturn isn't a normal syscall... |
---|
| 1248 | ;; |
---|
| 1249 | /* |
---|
| 1250 | * leave_kernel() restores f6-f11 from pt_regs, but since the streamlined |
---|
| 1251 | * syscall-entry path does not save them we save them here instead. Note: we |
---|
| 1252 | * don't need to save any other registers that are not saved by the stream-lined |
---|
| 1253 | * syscall path, because restore_sigcontext() restores them. |
---|
| 1254 | */ |
---|
| 1255 | adds r16=PT(F6)+32,sp |
---|
| 1256 | adds r17=PT(F7)+32,sp |
---|
| 1257 | ;; |
---|
| 1258 | stf.spill [r16]=f6,32 |
---|
| 1259 | stf.spill [r17]=f7,32 |
---|
| 1260 | ;; |
---|
| 1261 | stf.spill [r16]=f8,32 |
---|
| 1262 | stf.spill [r17]=f9,32 |
---|
| 1263 | ;; |
---|
| 1264 | stf.spill [r16]=f10 |
---|
| 1265 | stf.spill [r17]=f11 |
---|
| 1266 | adds out0=16,sp // out0 = &sigscratch |
---|
| 1267 | br.call.sptk.many rp=ia64_rt_sigreturn |
---|
| 1268 | .ret19: .restore sp,0 |
---|
| 1269 | adds sp=16,sp |
---|
| 1270 | ;; |
---|
| 1271 | ld8 r9=[sp] // load new ar.unat |
---|
| 1272 | mov.sptk b7=r8,__ia64_leave_kernel |
---|
| 1273 | ;; |
---|
| 1274 | mov ar.unat=r9 |
---|
| 1275 | br.many b7 |
---|
| 1276 | END(sys_rt_sigreturn) |
---|
| 1277 | |
---|
| 1278 | GLOBAL_ENTRY(ia64_prepare_handle_unaligned) |
---|
| 1279 | .prologue |
---|
| 1280 | /* |
---|
| 1281 | * r16 = fake ar.pfs, we simply need to make sure privilege is still 0 |
---|
| 1282 | */ |
---|
| 1283 | mov r16=r0 |
---|
| 1284 | DO_SAVE_SWITCH_STACK |
---|
| 1285 | br.call.sptk.many rp=ia64_handle_unaligned // stack frame setup in ivt |
---|
| 1286 | .ret21: .body |
---|
| 1287 | DO_LOAD_SWITCH_STACK |
---|
| 1288 | br.cond.sptk.many rp // goes to ia64_leave_kernel |
---|
| 1289 | END(ia64_prepare_handle_unaligned) |
---|
| 1290 | |
---|
| 1291 | // |
---|
| 1292 | // unw_init_running(void (*callback)(info, arg), void *arg) |
---|
| 1293 | // |
---|
| 1294 | # define EXTRA_FRAME_SIZE ((UNW_FRAME_INFO_SIZE+15)&~15) |
---|
| 1295 | |
---|
| 1296 | GLOBAL_ENTRY(unw_init_running) |
---|
| 1297 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2) |
---|
| 1298 | alloc loc1=ar.pfs,2,3,3,0 |
---|
| 1299 | ;; |
---|
| 1300 | ld8 loc2=[in0],8 |
---|
| 1301 | mov loc0=rp |
---|
| 1302 | mov r16=loc1 |
---|
| 1303 | DO_SAVE_SWITCH_STACK |
---|
| 1304 | .body |
---|
| 1305 | |
---|
| 1306 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2) |
---|
| 1307 | .fframe IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE |
---|
| 1308 | SWITCH_STACK_SAVES(EXTRA_FRAME_SIZE) |
---|
| 1309 | adds sp=-EXTRA_FRAME_SIZE,sp |
---|
| 1310 | .body |
---|
| 1311 | ;; |
---|
| 1312 | adds out0=16,sp // &info |
---|
| 1313 | mov out1=r13 // current |
---|
| 1314 | adds out2=16+EXTRA_FRAME_SIZE,sp // &switch_stack |
---|
| 1315 | br.call.sptk.many rp=unw_init_frame_info |
---|
| 1316 | 1: adds out0=16,sp // &info |
---|
| 1317 | mov b6=loc2 |
---|
| 1318 | mov loc2=gp // save gp across indirect function call |
---|
| 1319 | ;; |
---|
| 1320 | ld8 gp=[in0] |
---|
| 1321 | mov out1=in1 // arg |
---|
| 1322 | br.call.sptk.many rp=b6 // invoke the callback function |
---|
| 1323 | 1: mov gp=loc2 // restore gp |
---|
| 1324 | |
---|
| 1325 | // For now, we don't allow changing registers from within |
---|
| 1326 | // unw_init_running; if we ever want to allow that, we'd |
---|
| 1327 | // have to do a load_switch_stack here: |
---|
| 1328 | .restore sp |
---|
| 1329 | adds sp=IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE,sp |
---|
| 1330 | |
---|
| 1331 | mov ar.pfs=loc1 |
---|
| 1332 | mov rp=loc0 |
---|
| 1333 | br.ret.sptk.many rp |
---|
| 1334 | END(unw_init_running) |
---|
| 1335 | |
---|
| 1336 | .rodata |
---|
| 1337 | .align 8 |
---|
| 1338 | .globl sys_call_table |
---|
| 1339 | sys_call_table: |
---|
| 1340 | data8 sys_ni_syscall // This must be sys_ni_syscall! See ivt.S. |
---|
| 1341 | data8 sys_exit // 1025 |
---|
| 1342 | data8 sys_read |
---|
| 1343 | data8 sys_write |
---|
| 1344 | data8 sys_open |
---|
| 1345 | data8 sys_close |
---|
| 1346 | data8 sys_creat // 1030 |
---|
| 1347 | data8 sys_link |
---|
| 1348 | data8 sys_unlink |
---|
| 1349 | data8 ia64_execve |
---|
| 1350 | data8 sys_chdir |
---|
| 1351 | data8 sys_fchdir // 1035 |
---|
| 1352 | data8 sys_utimes |
---|
| 1353 | data8 sys_mknod |
---|
| 1354 | data8 sys_chmod |
---|
| 1355 | data8 sys_chown |
---|
| 1356 | data8 sys_lseek // 1040 |
---|
| 1357 | data8 sys_getpid |
---|
| 1358 | data8 sys_getppid |
---|
| 1359 | data8 sys_mount |
---|
| 1360 | data8 sys_umount |
---|
| 1361 | data8 sys_setuid // 1045 |
---|
| 1362 | data8 sys_getuid |
---|
| 1363 | data8 sys_geteuid |
---|
| 1364 | data8 sys_ptrace |
---|
| 1365 | data8 sys_access |
---|
| 1366 | data8 sys_sync // 1050 |
---|
| 1367 | data8 sys_fsync |
---|
| 1368 | data8 sys_fdatasync |
---|
| 1369 | data8 sys_kill |
---|
| 1370 | data8 sys_rename |
---|
| 1371 | data8 sys_mkdir // 1055 |
---|
| 1372 | data8 sys_rmdir |
---|
| 1373 | data8 sys_dup |
---|
| 1374 | data8 sys_pipe |
---|
| 1375 | data8 sys_times |
---|
| 1376 | data8 ia64_brk // 1060 |
---|
| 1377 | data8 sys_setgid |
---|
| 1378 | data8 sys_getgid |
---|
| 1379 | data8 sys_getegid |
---|
| 1380 | data8 sys_acct |
---|
| 1381 | data8 sys_ioctl // 1065 |
---|
| 1382 | data8 sys_fcntl |
---|
| 1383 | data8 sys_umask |
---|
| 1384 | data8 sys_chroot |
---|
| 1385 | data8 sys_ustat |
---|
| 1386 | data8 sys_dup2 // 1070 |
---|
| 1387 | data8 sys_setreuid |
---|
| 1388 | data8 sys_setregid |
---|
| 1389 | data8 sys_getresuid |
---|
| 1390 | data8 sys_setresuid |
---|
| 1391 | data8 sys_getresgid // 1075 |
---|
| 1392 | data8 sys_setresgid |
---|
| 1393 | data8 sys_getgroups |
---|
| 1394 | data8 sys_setgroups |
---|
| 1395 | data8 sys_getpgid |
---|
| 1396 | data8 sys_setpgid // 1080 |
---|
| 1397 | data8 sys_setsid |
---|
| 1398 | data8 sys_getsid |
---|
| 1399 | data8 sys_sethostname |
---|
| 1400 | data8 sys_setrlimit |
---|
| 1401 | data8 sys_getrlimit // 1085 |
---|
| 1402 | data8 sys_getrusage |
---|
| 1403 | data8 sys_gettimeofday |
---|
| 1404 | data8 sys_settimeofday |
---|
| 1405 | data8 sys_select |
---|
| 1406 | data8 sys_poll // 1090 |
---|
| 1407 | data8 sys_symlink |
---|
| 1408 | data8 sys_readlink |
---|
| 1409 | data8 sys_uselib |
---|
| 1410 | data8 sys_swapon |
---|
| 1411 | data8 sys_swapoff // 1095 |
---|
| 1412 | data8 sys_reboot |
---|
| 1413 | data8 sys_truncate |
---|
| 1414 | data8 sys_ftruncate |
---|
| 1415 | data8 sys_fchmod |
---|
| 1416 | data8 sys_fchown // 1100 |
---|
| 1417 | data8 ia64_getpriority |
---|
| 1418 | data8 sys_setpriority |
---|
| 1419 | data8 sys_statfs |
---|
| 1420 | data8 sys_fstatfs |
---|
| 1421 | data8 sys_gettid // 1105 |
---|
| 1422 | data8 sys_semget |
---|
| 1423 | data8 sys_semop |
---|
| 1424 | data8 sys_semctl |
---|
| 1425 | data8 sys_msgget |
---|
| 1426 | data8 sys_msgsnd // 1110 |
---|
| 1427 | data8 sys_msgrcv |
---|
| 1428 | data8 sys_msgctl |
---|
| 1429 | data8 sys_shmget |
---|
| 1430 | data8 sys_shmat |
---|
| 1431 | data8 sys_shmdt // 1115 |
---|
| 1432 | data8 sys_shmctl |
---|
| 1433 | data8 sys_syslog |
---|
| 1434 | data8 sys_setitimer |
---|
| 1435 | data8 sys_getitimer |
---|
| 1436 | data8 sys_ni_syscall // 1120 /* was: ia64_oldstat */ |
---|
| 1437 | data8 sys_ni_syscall /* was: ia64_oldlstat */ |
---|
| 1438 | data8 sys_ni_syscall /* was: ia64_oldfstat */ |
---|
| 1439 | data8 sys_vhangup |
---|
| 1440 | data8 sys_lchown |
---|
| 1441 | data8 sys_remap_file_pages // 1125 |
---|
| 1442 | data8 sys_wait4 |
---|
| 1443 | data8 sys_sysinfo |
---|
| 1444 | data8 sys_clone |
---|
| 1445 | data8 sys_setdomainname |
---|
| 1446 | data8 sys_newuname // 1130 |
---|
| 1447 | data8 sys_adjtimex |
---|
| 1448 | data8 sys_ni_syscall /* was: ia64_create_module */ |
---|
| 1449 | data8 sys_init_module |
---|
| 1450 | data8 sys_delete_module |
---|
| 1451 | data8 sys_ni_syscall // 1135 /* was: sys_get_kernel_syms */ |
---|
| 1452 | data8 sys_ni_syscall /* was: sys_query_module */ |
---|
| 1453 | data8 sys_quotactl |
---|
| 1454 | data8 sys_bdflush |
---|
| 1455 | data8 sys_sysfs |
---|
| 1456 | data8 sys_personality // 1140 |
---|
| 1457 | data8 sys_ni_syscall // sys_afs_syscall |
---|
| 1458 | data8 sys_setfsuid |
---|
| 1459 | data8 sys_setfsgid |
---|
| 1460 | data8 sys_getdents |
---|
| 1461 | data8 sys_flock // 1145 |
---|
| 1462 | data8 sys_readv |
---|
| 1463 | data8 sys_writev |
---|
| 1464 | data8 sys_pread64 |
---|
| 1465 | data8 sys_pwrite64 |
---|
| 1466 | data8 sys_sysctl // 1150 |
---|
| 1467 | data8 sys_mmap |
---|
| 1468 | data8 sys_munmap |
---|
| 1469 | data8 sys_mlock |
---|
| 1470 | data8 sys_mlockall |
---|
| 1471 | data8 sys_mprotect // 1155 |
---|
| 1472 | data8 ia64_mremap |
---|
| 1473 | data8 sys_msync |
---|
| 1474 | data8 sys_munlock |
---|
| 1475 | data8 sys_munlockall |
---|
| 1476 | data8 sys_sched_getparam // 1160 |
---|
| 1477 | data8 sys_sched_setparam |
---|
| 1478 | data8 sys_sched_getscheduler |
---|
| 1479 | data8 sys_sched_setscheduler |
---|
| 1480 | data8 sys_sched_yield |
---|
| 1481 | data8 sys_sched_get_priority_max // 1165 |
---|
| 1482 | data8 sys_sched_get_priority_min |
---|
| 1483 | data8 sys_sched_rr_get_interval |
---|
| 1484 | data8 sys_nanosleep |
---|
| 1485 | data8 sys_nfsservctl |
---|
| 1486 | data8 sys_prctl // 1170 |
---|
| 1487 | data8 sys_getpagesize |
---|
| 1488 | data8 sys_mmap2 |
---|
| 1489 | data8 sys_pciconfig_read |
---|
| 1490 | data8 sys_pciconfig_write |
---|
| 1491 | data8 sys_perfmonctl // 1175 |
---|
| 1492 | data8 sys_sigaltstack |
---|
| 1493 | data8 sys_rt_sigaction |
---|
| 1494 | data8 sys_rt_sigpending |
---|
| 1495 | data8 sys_rt_sigprocmask |
---|
| 1496 | data8 sys_rt_sigqueueinfo // 1180 |
---|
| 1497 | data8 sys_rt_sigreturn |
---|
| 1498 | data8 sys_rt_sigsuspend |
---|
| 1499 | data8 sys_rt_sigtimedwait |
---|
| 1500 | data8 sys_getcwd |
---|
| 1501 | data8 sys_capget // 1185 |
---|
| 1502 | data8 sys_capset |
---|
| 1503 | data8 sys_sendfile64 |
---|
| 1504 | data8 sys_ni_syscall // sys_getpmsg (STREAMS) |
---|
| 1505 | data8 sys_ni_syscall // sys_putpmsg (STREAMS) |
---|
| 1506 | data8 sys_socket // 1190 |
---|
| 1507 | data8 sys_bind |
---|
| 1508 | data8 sys_connect |
---|
| 1509 | data8 sys_listen |
---|
| 1510 | data8 sys_accept |
---|
| 1511 | data8 sys_getsockname // 1195 |
---|
| 1512 | data8 sys_getpeername |
---|
| 1513 | data8 sys_socketpair |
---|
| 1514 | data8 sys_send |
---|
| 1515 | data8 sys_sendto |
---|
| 1516 | data8 sys_recv // 1200 |
---|
| 1517 | data8 sys_recvfrom |
---|
| 1518 | data8 sys_shutdown |
---|
| 1519 | data8 sys_setsockopt |
---|
| 1520 | data8 sys_getsockopt |
---|
| 1521 | data8 sys_sendmsg // 1205 |
---|
| 1522 | data8 sys_recvmsg |
---|
| 1523 | data8 sys_pivot_root |
---|
| 1524 | data8 sys_mincore |
---|
| 1525 | data8 sys_madvise |
---|
| 1526 | data8 sys_newstat // 1210 |
---|
| 1527 | data8 sys_newlstat |
---|
| 1528 | data8 sys_newfstat |
---|
| 1529 | data8 sys_clone2 |
---|
| 1530 | data8 sys_getdents64 |
---|
| 1531 | data8 sys_getunwind // 1215 |
---|
| 1532 | data8 sys_readahead |
---|
| 1533 | data8 sys_setxattr |
---|
| 1534 | data8 sys_lsetxattr |
---|
| 1535 | data8 sys_fsetxattr |
---|
| 1536 | data8 sys_getxattr // 1220 |
---|
| 1537 | data8 sys_lgetxattr |
---|
| 1538 | data8 sys_fgetxattr |
---|
| 1539 | data8 sys_listxattr |
---|
| 1540 | data8 sys_llistxattr |
---|
| 1541 | data8 sys_flistxattr // 1225 |
---|
| 1542 | data8 sys_removexattr |
---|
| 1543 | data8 sys_lremovexattr |
---|
| 1544 | data8 sys_fremovexattr |
---|
| 1545 | data8 sys_tkill |
---|
| 1546 | data8 sys_futex // 1230 |
---|
| 1547 | data8 sys_sched_setaffinity |
---|
| 1548 | data8 sys_sched_getaffinity |
---|
| 1549 | data8 sys_set_tid_address |
---|
| 1550 | data8 sys_fadvise64_64 |
---|
| 1551 | data8 sys_tgkill // 1235 |
---|
| 1552 | data8 sys_exit_group |
---|
| 1553 | data8 sys_lookup_dcookie |
---|
| 1554 | data8 sys_io_setup |
---|
| 1555 | data8 sys_io_destroy |
---|
| 1556 | data8 sys_io_getevents // 1240 |
---|
| 1557 | data8 sys_io_submit |
---|
| 1558 | data8 sys_io_cancel |
---|
| 1559 | data8 sys_epoll_create |
---|
| 1560 | data8 sys_epoll_ctl |
---|
| 1561 | data8 sys_epoll_wait // 1245 |
---|
| 1562 | data8 sys_restart_syscall |
---|
| 1563 | data8 sys_semtimedop |
---|
| 1564 | data8 sys_timer_create |
---|
| 1565 | data8 sys_timer_settime |
---|
| 1566 | data8 sys_timer_gettime // 1250 |
---|
| 1567 | data8 sys_timer_getoverrun |
---|
| 1568 | data8 sys_timer_delete |
---|
| 1569 | data8 sys_clock_settime |
---|
| 1570 | data8 sys_clock_gettime |
---|
| 1571 | data8 sys_clock_getres // 1255 |
---|
| 1572 | data8 sys_clock_nanosleep |
---|
| 1573 | data8 sys_fstatfs64 |
---|
| 1574 | data8 sys_statfs64 |
---|
| 1575 | data8 sys_mbind |
---|
| 1576 | data8 sys_get_mempolicy // 1260 |
---|
| 1577 | data8 sys_set_mempolicy |
---|
| 1578 | data8 sys_mq_open |
---|
| 1579 | data8 sys_mq_unlink |
---|
| 1580 | data8 sys_mq_timedsend |
---|
| 1581 | data8 sys_mq_timedreceive // 1265 |
---|
| 1582 | data8 sys_mq_notify |
---|
| 1583 | data8 sys_mq_getsetattr |
---|
| 1584 | data8 sys_ni_syscall // reserved for kexec_load |
---|
| 1585 | data8 sys_ni_syscall // reserved for vserver |
---|
| 1586 | data8 sys_waitid // 1270 |
---|
| 1587 | data8 sys_add_key |
---|
| 1588 | data8 sys_request_key |
---|
| 1589 | data8 sys_keyctl |
---|
| 1590 | data8 sys_ioprio_set |
---|
| 1591 | data8 sys_ioprio_get // 1275 |
---|
| 1592 | data8 sys_move_pages |
---|
| 1593 | data8 sys_inotify_init |
---|
| 1594 | data8 sys_inotify_add_watch |
---|
| 1595 | data8 sys_inotify_rm_watch |
---|
| 1596 | data8 sys_migrate_pages // 1280 |
---|
| 1597 | data8 sys_openat |
---|
| 1598 | data8 sys_mkdirat |
---|
| 1599 | data8 sys_mknodat |
---|
| 1600 | data8 sys_fchownat |
---|
| 1601 | data8 sys_futimesat // 1285 |
---|
| 1602 | data8 sys_newfstatat |
---|
| 1603 | data8 sys_unlinkat |
---|
| 1604 | data8 sys_renameat |
---|
| 1605 | data8 sys_linkat |
---|
| 1606 | data8 sys_symlinkat // 1290 |
---|
| 1607 | data8 sys_readlinkat |
---|
| 1608 | data8 sys_fchmodat |
---|
| 1609 | data8 sys_faccessat |
---|
| 1610 | data8 sys_ni_syscall // reserved for pselect |
---|
| 1611 | data8 sys_ni_syscall // 1295 reserved for ppoll |
---|
| 1612 | data8 sys_unshare |
---|
| 1613 | data8 sys_splice |
---|
| 1614 | data8 sys_ni_syscall // reserved for set_robust_list |
---|
| 1615 | data8 sys_ni_syscall // reserved for get_robust_list |
---|
| 1616 | data8 sys_sync_file_range // 1300 |
---|
| 1617 | data8 sys_tee |
---|
| 1618 | data8 sys_vmsplice |
---|
| 1619 | |
---|
| 1620 | .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls |
---|