[34] | 1 | /* |
---|
| 2 | * Intel SMP support routines. |
---|
| 3 | * |
---|
| 4 | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> |
---|
| 5 | * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com> |
---|
| 6 | * (c) 2002,2003 Andi Kleen, SuSE Labs. |
---|
| 7 | * |
---|
| 8 | * This code is released under the GNU General Public License version 2 or |
---|
| 9 | * later. |
---|
| 10 | */ |
---|
| 11 | |
---|
| 12 | #include <linux/init.h> |
---|
| 13 | |
---|
| 14 | #include <linux/mm.h> |
---|
| 15 | #include <linux/delay.h> |
---|
| 16 | #include <linux/spinlock.h> |
---|
| 17 | #include <linux/smp_lock.h> |
---|
| 18 | #include <linux/smp.h> |
---|
| 19 | #include <linux/kernel_stat.h> |
---|
| 20 | #include <linux/mc146818rtc.h> |
---|
| 21 | #include <linux/interrupt.h> |
---|
| 22 | |
---|
| 23 | #include <asm/mtrr.h> |
---|
| 24 | #include <asm/pgalloc.h> |
---|
| 25 | #include <asm/tlbflush.h> |
---|
| 26 | #include <asm/mach_apic.h> |
---|
| 27 | #include <asm/mmu_context.h> |
---|
| 28 | #include <asm/proto.h> |
---|
| 29 | #include <asm/apicdef.h> |
---|
| 30 | #include <asm/idle.h> |
---|
| 31 | #ifdef CONFIG_XEN |
---|
| 32 | #include <xen/evtchn.h> |
---|
| 33 | #endif |
---|
| 34 | |
---|
| 35 | #ifndef CONFIG_XEN |
---|
| 36 | /* |
---|
| 37 | * Smarter SMP flushing macros. |
---|
| 38 | * c/o Linus Torvalds. |
---|
| 39 | * |
---|
| 40 | * These mean you can really definitely utterly forget about |
---|
| 41 | * writing to user space from interrupts. (Its not allowed anyway). |
---|
| 42 | * |
---|
| 43 | * Optimizations Manfred Spraul <manfred@colorfullife.com> |
---|
| 44 | * |
---|
| 45 | * More scalable flush, from Andi Kleen |
---|
| 46 | * |
---|
| 47 | * To avoid global state use 8 different call vectors. |
---|
| 48 | * Each CPU uses a specific vector to trigger flushes on other |
---|
| 49 | * CPUs. Depending on the received vector the target CPUs look into |
---|
| 50 | * the right per cpu variable for the flush data. |
---|
| 51 | * |
---|
| 52 | * With more than 8 CPUs they are hashed to the 8 available |
---|
| 53 | * vectors. The limited global vector space forces us to this right now. |
---|
| 54 | * In future when interrupts are split into per CPU domains this could be |
---|
| 55 | * fixed, at the cost of triggering multiple IPIs in some cases. |
---|
| 56 | */ |
---|
| 57 | |
---|
| 58 | union smp_flush_state { |
---|
| 59 | struct { |
---|
| 60 | cpumask_t flush_cpumask; |
---|
| 61 | struct mm_struct *flush_mm; |
---|
| 62 | unsigned long flush_va; |
---|
| 63 | #define FLUSH_ALL -1ULL |
---|
| 64 | spinlock_t tlbstate_lock; |
---|
| 65 | }; |
---|
| 66 | char pad[SMP_CACHE_BYTES]; |
---|
| 67 | } ____cacheline_aligned; |
---|
| 68 | |
---|
| 69 | /* State is put into the per CPU data section, but padded |
---|
| 70 | to a full cache line because other CPUs can access it and we don't |
---|
| 71 | want false sharing in the per cpu data segment. */ |
---|
| 72 | static DEFINE_PER_CPU(union smp_flush_state, flush_state); |
---|
| 73 | #endif |
---|
| 74 | |
---|
| 75 | /* |
---|
| 76 | * We cannot call mmdrop() because we are in interrupt context, |
---|
| 77 | * instead update mm->cpu_vm_mask. |
---|
| 78 | */ |
---|
| 79 | static inline void leave_mm(unsigned long cpu) |
---|
| 80 | { |
---|
| 81 | if (read_pda(mmu_state) == TLBSTATE_OK) |
---|
| 82 | BUG(); |
---|
| 83 | cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask); |
---|
| 84 | load_cr3(swapper_pg_dir); |
---|
| 85 | } |
---|
| 86 | |
---|
| 87 | #ifndef CONFIG_XEN |
---|
| 88 | /* |
---|
| 89 | * |
---|
| 90 | * The flush IPI assumes that a thread switch happens in this order: |
---|
| 91 | * [cpu0: the cpu that switches] |
---|
| 92 | * 1) switch_mm() either 1a) or 1b) |
---|
| 93 | * 1a) thread switch to a different mm |
---|
| 94 | * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); |
---|
| 95 | * Stop ipi delivery for the old mm. This is not synchronized with |
---|
| 96 | * the other cpus, but smp_invalidate_interrupt ignore flush ipis |
---|
| 97 | * for the wrong mm, and in the worst case we perform a superfluous |
---|
| 98 | * tlb flush. |
---|
| 99 | * 1a2) set cpu mmu_state to TLBSTATE_OK |
---|
| 100 | * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 |
---|
| 101 | * was in lazy tlb mode. |
---|
| 102 | * 1a3) update cpu active_mm |
---|
| 103 | * Now cpu0 accepts tlb flushes for the new mm. |
---|
| 104 | * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); |
---|
| 105 | * Now the other cpus will send tlb flush ipis. |
---|
| 106 | * 1a4) change cr3. |
---|
| 107 | * 1b) thread switch without mm change |
---|
| 108 | * cpu active_mm is correct, cpu0 already handles |
---|
| 109 | * flush ipis. |
---|
| 110 | * 1b1) set cpu mmu_state to TLBSTATE_OK |
---|
| 111 | * 1b2) test_and_set the cpu bit in cpu_vm_mask. |
---|
| 112 | * Atomically set the bit [other cpus will start sending flush ipis], |
---|
| 113 | * and test the bit. |
---|
| 114 | * 1b3) if the bit was 0: leave_mm was called, flush the tlb. |
---|
| 115 | * 2) switch %%esp, ie current |
---|
| 116 | * |
---|
| 117 | * The interrupt must handle 2 special cases: |
---|
| 118 | * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. |
---|
| 119 | * - the cpu performs speculative tlb reads, i.e. even if the cpu only |
---|
| 120 | * runs in kernel space, the cpu could load tlb entries for user space |
---|
| 121 | * pages. |
---|
| 122 | * |
---|
| 123 | * The good news is that cpu mmu_state is local to each cpu, no |
---|
| 124 | * write/read ordering problems. |
---|
| 125 | */ |
---|
| 126 | |
---|
| 127 | /* |
---|
| 128 | * TLB flush IPI: |
---|
| 129 | * |
---|
| 130 | * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. |
---|
| 131 | * 2) Leave the mm if we are in the lazy tlb mode. |
---|
| 132 | * |
---|
| 133 | * Interrupts are disabled. |
---|
| 134 | */ |
---|
| 135 | |
---|
| 136 | asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs) |
---|
| 137 | { |
---|
| 138 | int cpu; |
---|
| 139 | int sender; |
---|
| 140 | union smp_flush_state *f; |
---|
| 141 | |
---|
| 142 | cpu = smp_processor_id(); |
---|
| 143 | /* |
---|
| 144 | * orig_rax contains the negated interrupt vector. |
---|
| 145 | * Use that to determine where the sender put the data. |
---|
| 146 | */ |
---|
| 147 | sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START; |
---|
| 148 | f = &per_cpu(flush_state, sender); |
---|
| 149 | |
---|
| 150 | if (!cpu_isset(cpu, f->flush_cpumask)) |
---|
| 151 | goto out; |
---|
| 152 | /* |
---|
| 153 | * This was a BUG() but until someone can quote me the |
---|
| 154 | * line from the intel manual that guarantees an IPI to |
---|
| 155 | * multiple CPUs is retried _only_ on the erroring CPUs |
---|
| 156 | * its staying as a return |
---|
| 157 | * |
---|
| 158 | * BUG(); |
---|
| 159 | */ |
---|
| 160 | |
---|
| 161 | if (f->flush_mm == read_pda(active_mm)) { |
---|
| 162 | if (read_pda(mmu_state) == TLBSTATE_OK) { |
---|
| 163 | if (f->flush_va == FLUSH_ALL) |
---|
| 164 | local_flush_tlb(); |
---|
| 165 | else |
---|
| 166 | __flush_tlb_one(f->flush_va); |
---|
| 167 | } else |
---|
| 168 | leave_mm(cpu); |
---|
| 169 | } |
---|
| 170 | out: |
---|
| 171 | ack_APIC_irq(); |
---|
| 172 | cpu_clear(cpu, f->flush_cpumask); |
---|
| 173 | } |
---|
| 174 | |
---|
| 175 | static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, |
---|
| 176 | unsigned long va) |
---|
| 177 | { |
---|
| 178 | int sender; |
---|
| 179 | union smp_flush_state *f; |
---|
| 180 | |
---|
| 181 | /* Caller has disabled preemption */ |
---|
| 182 | sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; |
---|
| 183 | f = &per_cpu(flush_state, sender); |
---|
| 184 | |
---|
| 185 | /* Could avoid this lock when |
---|
| 186 | num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is |
---|
| 187 | probably not worth checking this for a cache-hot lock. */ |
---|
| 188 | spin_lock(&f->tlbstate_lock); |
---|
| 189 | |
---|
| 190 | f->flush_mm = mm; |
---|
| 191 | f->flush_va = va; |
---|
| 192 | cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask); |
---|
| 193 | |
---|
| 194 | /* |
---|
| 195 | * We have to send the IPI only to |
---|
| 196 | * CPUs affected. |
---|
| 197 | */ |
---|
| 198 | send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender); |
---|
| 199 | |
---|
| 200 | while (!cpus_empty(f->flush_cpumask)) |
---|
| 201 | cpu_relax(); |
---|
| 202 | |
---|
| 203 | f->flush_mm = NULL; |
---|
| 204 | f->flush_va = 0; |
---|
| 205 | spin_unlock(&f->tlbstate_lock); |
---|
| 206 | } |
---|
| 207 | |
---|
| 208 | int __cpuinit init_smp_flush(void) |
---|
| 209 | { |
---|
| 210 | int i; |
---|
| 211 | for_each_cpu_mask(i, cpu_possible_map) { |
---|
| 212 | spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock); |
---|
| 213 | } |
---|
| 214 | return 0; |
---|
| 215 | } |
---|
| 216 | |
---|
| 217 | core_initcall(init_smp_flush); |
---|
| 218 | |
---|
| 219 | void flush_tlb_current_task(void) |
---|
| 220 | { |
---|
| 221 | struct mm_struct *mm = current->mm; |
---|
| 222 | cpumask_t cpu_mask; |
---|
| 223 | |
---|
| 224 | preempt_disable(); |
---|
| 225 | cpu_mask = mm->cpu_vm_mask; |
---|
| 226 | cpu_clear(smp_processor_id(), cpu_mask); |
---|
| 227 | |
---|
| 228 | local_flush_tlb(); |
---|
| 229 | if (!cpus_empty(cpu_mask)) |
---|
| 230 | flush_tlb_others(cpu_mask, mm, FLUSH_ALL); |
---|
| 231 | preempt_enable(); |
---|
| 232 | } |
---|
| 233 | EXPORT_SYMBOL(flush_tlb_current_task); |
---|
| 234 | |
---|
| 235 | void flush_tlb_mm (struct mm_struct * mm) |
---|
| 236 | { |
---|
| 237 | cpumask_t cpu_mask; |
---|
| 238 | |
---|
| 239 | preempt_disable(); |
---|
| 240 | cpu_mask = mm->cpu_vm_mask; |
---|
| 241 | cpu_clear(smp_processor_id(), cpu_mask); |
---|
| 242 | |
---|
| 243 | if (current->active_mm == mm) { |
---|
| 244 | if (current->mm) |
---|
| 245 | local_flush_tlb(); |
---|
| 246 | else |
---|
| 247 | leave_mm(smp_processor_id()); |
---|
| 248 | } |
---|
| 249 | if (!cpus_empty(cpu_mask)) |
---|
| 250 | flush_tlb_others(cpu_mask, mm, FLUSH_ALL); |
---|
| 251 | |
---|
| 252 | preempt_enable(); |
---|
| 253 | } |
---|
| 254 | EXPORT_SYMBOL(flush_tlb_mm); |
---|
| 255 | |
---|
| 256 | void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) |
---|
| 257 | { |
---|
| 258 | struct mm_struct *mm = vma->vm_mm; |
---|
| 259 | cpumask_t cpu_mask; |
---|
| 260 | |
---|
| 261 | preempt_disable(); |
---|
| 262 | cpu_mask = mm->cpu_vm_mask; |
---|
| 263 | cpu_clear(smp_processor_id(), cpu_mask); |
---|
| 264 | |
---|
| 265 | if (current->active_mm == mm) { |
---|
| 266 | if(current->mm) |
---|
| 267 | __flush_tlb_one(va); |
---|
| 268 | else |
---|
| 269 | leave_mm(smp_processor_id()); |
---|
| 270 | } |
---|
| 271 | |
---|
| 272 | if (!cpus_empty(cpu_mask)) |
---|
| 273 | flush_tlb_others(cpu_mask, mm, va); |
---|
| 274 | |
---|
| 275 | preempt_enable(); |
---|
| 276 | } |
---|
| 277 | EXPORT_SYMBOL(flush_tlb_page); |
---|
| 278 | |
---|
| 279 | static void do_flush_tlb_all(void* info) |
---|
| 280 | { |
---|
| 281 | unsigned long cpu = smp_processor_id(); |
---|
| 282 | |
---|
| 283 | __flush_tlb_all(); |
---|
| 284 | if (read_pda(mmu_state) == TLBSTATE_LAZY) |
---|
| 285 | leave_mm(cpu); |
---|
| 286 | } |
---|
| 287 | |
---|
| 288 | void flush_tlb_all(void) |
---|
| 289 | { |
---|
| 290 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); |
---|
| 291 | } |
---|
| 292 | #else |
---|
| 293 | asmlinkage void smp_invalidate_interrupt (void) |
---|
| 294 | { return; } |
---|
| 295 | void flush_tlb_current_task(void) |
---|
| 296 | { xen_tlb_flush_mask(¤t->mm->cpu_vm_mask); } |
---|
| 297 | void flush_tlb_mm (struct mm_struct * mm) |
---|
| 298 | { xen_tlb_flush_mask(&mm->cpu_vm_mask); } |
---|
| 299 | void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) |
---|
| 300 | { xen_invlpg_mask(&vma->vm_mm->cpu_vm_mask, va); } |
---|
| 301 | void flush_tlb_all(void) |
---|
| 302 | { xen_tlb_flush_all(); } |
---|
| 303 | #endif /* Xen */ |
---|
| 304 | |
---|
| 305 | /* |
---|
| 306 | * this function sends a 'reschedule' IPI to another CPU. |
---|
| 307 | * it goes straight through and wastes no time serializing |
---|
| 308 | * anything. Worst case is that we lose a reschedule ... |
---|
| 309 | */ |
---|
| 310 | |
---|
| 311 | void smp_send_reschedule(int cpu) |
---|
| 312 | { |
---|
| 313 | send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); |
---|
| 314 | } |
---|
| 315 | |
---|
| 316 | /* |
---|
| 317 | * Structure and data for smp_call_function(). This is designed to minimise |
---|
| 318 | * static memory requirements. It also looks cleaner. |
---|
| 319 | */ |
---|
| 320 | static DEFINE_SPINLOCK(call_lock); |
---|
| 321 | |
---|
| 322 | struct call_data_struct { |
---|
| 323 | void (*func) (void *info); |
---|
| 324 | void *info; |
---|
| 325 | atomic_t started; |
---|
| 326 | atomic_t finished; |
---|
| 327 | int wait; |
---|
| 328 | }; |
---|
| 329 | |
---|
| 330 | static struct call_data_struct * call_data; |
---|
| 331 | |
---|
| 332 | void lock_ipi_call_lock(void) |
---|
| 333 | { |
---|
| 334 | spin_lock_irq(&call_lock); |
---|
| 335 | } |
---|
| 336 | |
---|
| 337 | void unlock_ipi_call_lock(void) |
---|
| 338 | { |
---|
| 339 | spin_unlock_irq(&call_lock); |
---|
| 340 | } |
---|
| 341 | |
---|
| 342 | /* |
---|
| 343 | * this function sends a 'generic call function' IPI to one other CPU |
---|
| 344 | * in the system. |
---|
| 345 | * |
---|
| 346 | * cpu is a standard Linux logical CPU number. |
---|
| 347 | */ |
---|
| 348 | static void |
---|
| 349 | __smp_call_function_single(int cpu, void (*func) (void *info), void *info, |
---|
| 350 | int nonatomic, int wait) |
---|
| 351 | { |
---|
| 352 | struct call_data_struct data; |
---|
| 353 | int cpus = 1; |
---|
| 354 | |
---|
| 355 | data.func = func; |
---|
| 356 | data.info = info; |
---|
| 357 | atomic_set(&data.started, 0); |
---|
| 358 | data.wait = wait; |
---|
| 359 | if (wait) |
---|
| 360 | atomic_set(&data.finished, 0); |
---|
| 361 | |
---|
| 362 | call_data = &data; |
---|
| 363 | wmb(); |
---|
| 364 | /* Send a message to all other CPUs and wait for them to respond */ |
---|
| 365 | send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR); |
---|
| 366 | |
---|
| 367 | /* Wait for response */ |
---|
| 368 | while (atomic_read(&data.started) != cpus) |
---|
| 369 | cpu_relax(); |
---|
| 370 | |
---|
| 371 | if (!wait) |
---|
| 372 | return; |
---|
| 373 | |
---|
| 374 | while (atomic_read(&data.finished) != cpus) |
---|
| 375 | cpu_relax(); |
---|
| 376 | } |
---|
| 377 | |
---|
| 378 | /* |
---|
| 379 | * smp_call_function_single - Run a function on another CPU |
---|
| 380 | * @func: The function to run. This must be fast and non-blocking. |
---|
| 381 | * @info: An arbitrary pointer to pass to the function. |
---|
| 382 | * @nonatomic: Currently unused. |
---|
| 383 | * @wait: If true, wait until function has completed on other CPUs. |
---|
| 384 | * |
---|
| 385 | * Retrurns 0 on success, else a negative status code. |
---|
| 386 | * |
---|
| 387 | * Does not return until the remote CPU is nearly ready to execute <func> |
---|
| 388 | * or is or has executed. |
---|
| 389 | */ |
---|
| 390 | |
---|
| 391 | int smp_call_function_single (int cpu, void (*func) (void *info), void *info, |
---|
| 392 | int nonatomic, int wait) |
---|
| 393 | { |
---|
| 394 | /* prevent preemption and reschedule on another processor */ |
---|
| 395 | int me = get_cpu(); |
---|
| 396 | if (cpu == me) { |
---|
| 397 | WARN_ON(1); |
---|
| 398 | put_cpu(); |
---|
| 399 | return -EBUSY; |
---|
| 400 | } |
---|
| 401 | spin_lock_bh(&call_lock); |
---|
| 402 | __smp_call_function_single(cpu, func, info, nonatomic, wait); |
---|
| 403 | spin_unlock_bh(&call_lock); |
---|
| 404 | put_cpu(); |
---|
| 405 | return 0; |
---|
| 406 | } |
---|
| 407 | |
---|
| 408 | /* |
---|
| 409 | * this function sends a 'generic call function' IPI to all other CPUs |
---|
| 410 | * in the system. |
---|
| 411 | */ |
---|
| 412 | static void __smp_call_function (void (*func) (void *info), void *info, |
---|
| 413 | int nonatomic, int wait) |
---|
| 414 | { |
---|
| 415 | struct call_data_struct data; |
---|
| 416 | int cpus = num_online_cpus()-1; |
---|
| 417 | |
---|
| 418 | if (!cpus) |
---|
| 419 | return; |
---|
| 420 | |
---|
| 421 | data.func = func; |
---|
| 422 | data.info = info; |
---|
| 423 | atomic_set(&data.started, 0); |
---|
| 424 | data.wait = wait; |
---|
| 425 | if (wait) |
---|
| 426 | atomic_set(&data.finished, 0); |
---|
| 427 | |
---|
| 428 | call_data = &data; |
---|
| 429 | wmb(); |
---|
| 430 | /* Send a message to all other CPUs and wait for them to respond */ |
---|
| 431 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); |
---|
| 432 | |
---|
| 433 | /* Wait for response */ |
---|
| 434 | while (atomic_read(&data.started) != cpus) |
---|
| 435 | #ifndef CONFIG_XEN |
---|
| 436 | cpu_relax(); |
---|
| 437 | #else |
---|
| 438 | barrier(); |
---|
| 439 | #endif |
---|
| 440 | |
---|
| 441 | if (!wait) |
---|
| 442 | return; |
---|
| 443 | |
---|
| 444 | while (atomic_read(&data.finished) != cpus) |
---|
| 445 | #ifndef CONFIG_XEN |
---|
| 446 | cpu_relax(); |
---|
| 447 | #else |
---|
| 448 | barrier(); |
---|
| 449 | #endif |
---|
| 450 | } |
---|
| 451 | |
---|
| 452 | /* |
---|
| 453 | * smp_call_function - run a function on all other CPUs. |
---|
| 454 | * @func: The function to run. This must be fast and non-blocking. |
---|
| 455 | * @info: An arbitrary pointer to pass to the function. |
---|
| 456 | * @nonatomic: currently unused. |
---|
| 457 | * @wait: If true, wait (atomically) until function has completed on other |
---|
| 458 | * CPUs. |
---|
| 459 | * |
---|
| 460 | * Returns 0 on success, else a negative status code. Does not return until |
---|
| 461 | * remote CPUs are nearly ready to execute func or are or have executed. |
---|
| 462 | * |
---|
| 463 | * You must not call this function with disabled interrupts or from a |
---|
| 464 | * hardware interrupt handler or from a bottom half handler. |
---|
| 465 | * Actually there are a few legal cases, like panic. |
---|
| 466 | */ |
---|
| 467 | int smp_call_function (void (*func) (void *info), void *info, int nonatomic, |
---|
| 468 | int wait) |
---|
| 469 | { |
---|
| 470 | spin_lock(&call_lock); |
---|
| 471 | __smp_call_function(func,info,nonatomic,wait); |
---|
| 472 | spin_unlock(&call_lock); |
---|
| 473 | return 0; |
---|
| 474 | } |
---|
| 475 | EXPORT_SYMBOL(smp_call_function); |
---|
| 476 | |
---|
| 477 | void smp_stop_cpu(void) |
---|
| 478 | { |
---|
| 479 | unsigned long flags; |
---|
| 480 | /* |
---|
| 481 | * Remove this CPU: |
---|
| 482 | */ |
---|
| 483 | cpu_clear(smp_processor_id(), cpu_online_map); |
---|
| 484 | local_irq_save(flags); |
---|
| 485 | #ifndef CONFIG_XEN |
---|
| 486 | disable_local_APIC(); |
---|
| 487 | #endif |
---|
| 488 | local_irq_restore(flags); |
---|
| 489 | } |
---|
| 490 | |
---|
| 491 | static void smp_really_stop_cpu(void *dummy) |
---|
| 492 | { |
---|
| 493 | smp_stop_cpu(); |
---|
| 494 | for (;;) |
---|
| 495 | halt(); |
---|
| 496 | } |
---|
| 497 | |
---|
| 498 | void smp_send_stop(void) |
---|
| 499 | { |
---|
| 500 | int nolock = 0; |
---|
| 501 | #ifndef CONFIG_XEN |
---|
| 502 | if (reboot_force) |
---|
| 503 | return; |
---|
| 504 | #endif |
---|
| 505 | /* Don't deadlock on the call lock in panic */ |
---|
| 506 | if (!spin_trylock(&call_lock)) { |
---|
| 507 | /* ignore locking because we have panicked anyways */ |
---|
| 508 | nolock = 1; |
---|
| 509 | } |
---|
| 510 | __smp_call_function(smp_really_stop_cpu, NULL, 0, 0); |
---|
| 511 | if (!nolock) |
---|
| 512 | spin_unlock(&call_lock); |
---|
| 513 | |
---|
| 514 | local_irq_disable(); |
---|
| 515 | #ifndef CONFIG_XEN |
---|
| 516 | disable_local_APIC(); |
---|
| 517 | #endif |
---|
| 518 | local_irq_enable(); |
---|
| 519 | } |
---|
| 520 | |
---|
| 521 | /* |
---|
| 522 | * Reschedule call back. Nothing to do, |
---|
| 523 | * all the work is done automatically when |
---|
| 524 | * we return from the interrupt. |
---|
| 525 | */ |
---|
| 526 | #ifndef CONFIG_XEN |
---|
| 527 | asmlinkage void smp_reschedule_interrupt(void) |
---|
| 528 | #else |
---|
| 529 | asmlinkage irqreturn_t smp_reschedule_interrupt(void) |
---|
| 530 | #endif |
---|
| 531 | { |
---|
| 532 | #ifndef CONFIG_XEN |
---|
| 533 | ack_APIC_irq(); |
---|
| 534 | #else |
---|
| 535 | return IRQ_HANDLED; |
---|
| 536 | #endif |
---|
| 537 | } |
---|
| 538 | |
---|
| 539 | #ifndef CONFIG_XEN |
---|
| 540 | asmlinkage void smp_call_function_interrupt(void) |
---|
| 541 | #else |
---|
| 542 | asmlinkage irqreturn_t smp_call_function_interrupt(void) |
---|
| 543 | #endif |
---|
| 544 | { |
---|
| 545 | void (*func) (void *info) = call_data->func; |
---|
| 546 | void *info = call_data->info; |
---|
| 547 | int wait = call_data->wait; |
---|
| 548 | |
---|
| 549 | #ifndef CONFIG_XEN |
---|
| 550 | ack_APIC_irq(); |
---|
| 551 | #endif |
---|
| 552 | /* |
---|
| 553 | * Notify initiating CPU that I've grabbed the data and am |
---|
| 554 | * about to execute the function |
---|
| 555 | */ |
---|
| 556 | mb(); |
---|
| 557 | atomic_inc(&call_data->started); |
---|
| 558 | /* |
---|
| 559 | * At this point the info structure may be out of scope unless wait==1 |
---|
| 560 | */ |
---|
| 561 | exit_idle(); |
---|
| 562 | irq_enter(); |
---|
| 563 | (*func)(info); |
---|
| 564 | irq_exit(); |
---|
| 565 | if (wait) { |
---|
| 566 | mb(); |
---|
| 567 | atomic_inc(&call_data->finished); |
---|
| 568 | } |
---|
| 569 | #ifdef CONFIG_XEN |
---|
| 570 | return IRQ_HANDLED; |
---|
| 571 | #endif |
---|
| 572 | } |
---|
| 573 | |
---|
| 574 | int safe_smp_processor_id(void) |
---|
| 575 | { |
---|
| 576 | #ifdef CONFIG_XEN |
---|
| 577 | return smp_processor_id(); |
---|
| 578 | #else |
---|
| 579 | unsigned apicid, i; |
---|
| 580 | |
---|
| 581 | if (disable_apic) |
---|
| 582 | return 0; |
---|
| 583 | |
---|
| 584 | apicid = hard_smp_processor_id(); |
---|
| 585 | if (apicid < NR_CPUS && x86_cpu_to_apicid[apicid] == apicid) |
---|
| 586 | return apicid; |
---|
| 587 | |
---|
| 588 | for (i = 0; i < NR_CPUS; ++i) { |
---|
| 589 | if (x86_cpu_to_apicid[i] == apicid) |
---|
| 590 | return i; |
---|
| 591 | } |
---|
| 592 | |
---|
| 593 | /* No entries in x86_cpu_to_apicid? Either no MPS|ACPI, |
---|
| 594 | * or called too early. Either way, we must be CPU 0. */ |
---|
| 595 | if (x86_cpu_to_apicid[0] == BAD_APICID) |
---|
| 596 | return 0; |
---|
| 597 | |
---|
| 598 | return 0; /* Should not happen */ |
---|
| 599 | #endif |
---|
| 600 | } |
---|