source: trunk/packages/xen-3.1/xen-3.1/xen/arch/x86/smp.c @ 34

Last change on this file since 34 was 34, checked in by hartmans, 18 years ago

Add xen and xen-common

File size: 8.1 KB
Line 
1/*
2 *      Intel SMP support routines.
3 *
4 *      (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 *      (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
6 *
7 *      This code is released under the GNU General Public License version 2 or
8 *      later.
9 */
10
11#include <xen/config.h>
12#include <xen/irq.h>
13#include <xen/sched.h>
14#include <xen/delay.h>
15#include <xen/perfc.h>
16#include <xen/spinlock.h>
17#include <asm/current.h>
18#include <asm/smp.h>
19#include <asm/mc146818rtc.h>
20#include <asm/flushtlb.h>
21#include <asm/smpboot.h>
22#include <asm/hardirq.h>
23#include <asm/ipi.h>
24#include <asm/hvm/support.h>
25#include <mach_apic.h>
26
27/*
28 *      Some notes on x86 processor bugs affecting SMP operation:
29 *
30 *      Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
31 *      The Linux implications for SMP are handled as follows:
32 *
33 *      Pentium III / [Xeon]
34 *              None of the E1AP-E3AP errata are visible to the user.
35 *
36 *      E1AP.   see PII A1AP
37 *      E2AP.   see PII A2AP
38 *      E3AP.   see PII A3AP
39 *
40 *      Pentium II / [Xeon]
41 *              None of the A1AP-A3AP errata are visible to the user.
42 *
43 *      A1AP.   see PPro 1AP
44 *      A2AP.   see PPro 2AP
45 *      A3AP.   see PPro 7AP
46 *
47 *      Pentium Pro
48 *              None of 1AP-9AP errata are visible to the normal user,
49 *      except occasional delivery of 'spurious interrupt' as trap #15.
50 *      This is very rare and a non-problem.
51 *
52 *      1AP.    Linux maps APIC as non-cacheable
53 *      2AP.    worked around in hardware
54 *      3AP.    fixed in C0 and above steppings microcode update.
55 *              Linux does not use excessive STARTUP_IPIs.
56 *      4AP.    worked around in hardware
57 *      5AP.    symmetric IO mode (normal Linux operation) not affected.
58 *              'noapic' mode has vector 0xf filled out properly.
59 *      6AP.    'noapic' mode might be affected - fixed in later steppings
60 *      7AP.    We do not assume writes to the LVT deassering IRQs
61 *      8AP.    We do not enable low power mode (deep sleep) during MP bootup
62 *      9AP.    We do not use mixed mode
63 */
64
65/*
66 * The following functions deal with sending IPIs between CPUs.
67 */
68
69static inline int __prepare_ICR (unsigned int shortcut, int vector)
70{
71    return APIC_DM_FIXED | shortcut | vector;
72}
73
74static inline int __prepare_ICR2 (unsigned int mask)
75{
76    return SET_APIC_DEST_FIELD(mask);
77}
78
79static inline void check_IPI_mask(cpumask_t cpumask)
80{
81    /*
82     * Sanity, and necessary. An IPI with no target generates a send accept
83     * error with Pentium and P6 APICs.
84     */
85    ASSERT(cpus_subset(cpumask, cpu_online_map));
86    ASSERT(!cpus_empty(cpumask));
87}
88
89void send_IPI_mask_flat(cpumask_t cpumask, int vector)
90{
91    unsigned long mask = cpus_addr(cpumask)[0];
92    unsigned long cfg;
93    unsigned long flags;
94
95    check_IPI_mask(cpumask);
96
97    local_irq_save(flags);
98
99    /*
100     * Wait for idle.
101     */
102    apic_wait_icr_idle();
103
104    /*
105     * prepare target chip field
106     */
107    cfg = __prepare_ICR2(mask);
108    apic_write_around(APIC_ICR2, cfg);
109
110    /*
111     * program the ICR
112     */
113    cfg = __prepare_ICR(0, vector) | APIC_DEST_LOGICAL;
114
115    /*
116     * Send the IPI. The write to APIC_ICR fires this off.
117     */
118    apic_write_around(APIC_ICR, cfg);
119   
120    local_irq_restore(flags);
121}
122
123void send_IPI_mask_phys(cpumask_t mask, int vector)
124{
125    unsigned long cfg, flags;
126    unsigned int query_cpu;
127
128    check_IPI_mask(mask);
129
130    /*
131     * Hack. The clustered APIC addressing mode doesn't allow us to send
132     * to an arbitrary mask, so I do a unicasts to each CPU instead. This
133     * should be modified to do 1 message per cluster ID - mbligh
134     */ 
135
136    local_irq_save(flags);
137
138    for_each_cpu_mask( query_cpu, mask )
139    {
140        /*
141         * Wait for idle.
142         */
143        apic_wait_icr_idle();
144
145        /*
146         * prepare target chip field
147         */
148        cfg = __prepare_ICR2(cpu_physical_id(query_cpu));
149        apic_write_around(APIC_ICR2, cfg);
150
151        /*
152         * program the ICR
153         */
154        cfg = __prepare_ICR(0, vector) | APIC_DEST_PHYSICAL;
155
156        /*
157         * Send the IPI. The write to APIC_ICR fires this off.
158         */
159        apic_write_around(APIC_ICR, cfg);
160    }
161
162    local_irq_restore(flags);
163}
164
165static DEFINE_SPINLOCK(flush_lock);
166static cpumask_t flush_cpumask;
167static unsigned long flush_va;
168
169fastcall void smp_invalidate_interrupt(void)
170{
171    ack_APIC_irq();
172    perfc_incr(ipis);
173    irq_enter();
174    if ( !__sync_lazy_execstate() )
175    {
176        if ( flush_va == FLUSHVA_ALL )
177            local_flush_tlb();
178        else
179            local_flush_tlb_one(flush_va);
180    }
181    cpu_clear(smp_processor_id(), flush_cpumask);
182    irq_exit();
183}
184
185void __flush_tlb_mask(cpumask_t mask, unsigned long va)
186{
187    ASSERT(local_irq_is_enabled());
188   
189    if ( cpu_isset(smp_processor_id(), mask) )
190    {
191        if ( va == FLUSHVA_ALL )
192            local_flush_tlb();
193        else
194            local_flush_tlb_one(va);
195        cpu_clear(smp_processor_id(), mask);
196    }
197
198    if ( !cpus_empty(mask) )
199    {
200        spin_lock(&flush_lock);
201        flush_cpumask = mask;
202        flush_va      = va;
203        send_IPI_mask(mask, INVALIDATE_TLB_VECTOR);
204        while ( !cpus_empty(flush_cpumask) )
205            cpu_relax();
206        spin_unlock(&flush_lock);
207    }
208}
209
210/* Call with no locks held and interrupts enabled (e.g., softirq context). */
211void new_tlbflush_clock_period(void)
212{
213    cpumask_t allbutself;
214
215    /* Flush everyone else. We definitely flushed just before entry. */
216    allbutself = cpu_online_map;
217    cpu_clear(smp_processor_id(), allbutself);
218    __flush_tlb_mask(allbutself, FLUSHVA_ALL);
219
220    /* No need for atomicity: we are the only possible updater. */
221    ASSERT(tlbflush_clock == 0);
222    tlbflush_clock++;
223}
224
225static void flush_tlb_all_pge_ipi(void *info)
226{
227    local_flush_tlb_pge();
228}
229
230void flush_tlb_all_pge(void)
231{
232    smp_call_function(flush_tlb_all_pge_ipi, 0, 1, 1);
233    local_flush_tlb_pge();
234}
235
236void smp_send_event_check_mask(cpumask_t mask)
237{
238    cpu_clear(smp_processor_id(), mask);
239    if ( !cpus_empty(mask) )
240        send_IPI_mask(mask, EVENT_CHECK_VECTOR);
241}
242
243/*
244 * Structure and data for smp_call_function()/on_selected_cpus().
245 */
246
247struct call_data_struct {
248    void (*func) (void *info);
249    void *info;
250    int wait;
251    atomic_t started;
252    atomic_t finished;
253    cpumask_t selected;
254};
255
256static DEFINE_SPINLOCK(call_lock);
257static struct call_data_struct *call_data;
258
259int smp_call_function(
260    void (*func) (void *info),
261    void *info,
262    int retry,
263    int wait)
264{
265    cpumask_t allbutself = cpu_online_map;
266    cpu_clear(smp_processor_id(), allbutself);
267    return on_selected_cpus(allbutself, func, info, retry, wait);
268}
269
270int on_selected_cpus(
271    cpumask_t selected,
272    void (*func) (void *info),
273    void *info,
274    int retry,
275    int wait)
276{
277    struct call_data_struct data;
278    unsigned int nr_cpus = cpus_weight(selected);
279
280    ASSERT(local_irq_is_enabled());
281
282    if ( nr_cpus == 0 )
283        return 0;
284
285    data.func = func;
286    data.info = info;
287    data.wait = wait;
288    atomic_set(&data.started, 0);
289    atomic_set(&data.finished, 0);
290    data.selected = selected;
291
292    spin_lock(&call_lock);
293
294    call_data = &data;
295    wmb();
296
297    send_IPI_mask(selected, CALL_FUNCTION_VECTOR);
298
299    while ( atomic_read(wait ? &data.finished : &data.started) != nr_cpus )
300        cpu_relax();
301
302    spin_unlock(&call_lock);
303
304    return 0;
305}
306
307static void stop_this_cpu (void *dummy)
308{
309    cpu_clear(smp_processor_id(), cpu_online_map);
310
311    local_irq_disable();
312    disable_local_APIC();
313    hvm_disable();
314
315    for ( ; ; )
316        __asm__ __volatile__ ( "hlt" );
317}
318
319void smp_send_stop(void)
320{
321    /* Stop all other CPUs in the system. */
322    smp_call_function(stop_this_cpu, NULL, 1, 0);
323
324    local_irq_disable();
325    disable_local_APIC();
326    local_irq_enable();
327}
328
329fastcall void smp_event_check_interrupt(struct cpu_user_regs *regs)
330{
331    ack_APIC_irq();
332    perfc_incr(ipis);
333}
334
335fastcall void smp_call_function_interrupt(struct cpu_user_regs *regs)
336{
337    void (*func)(void *info) = call_data->func;
338    void *info = call_data->info;
339
340    ack_APIC_irq();
341    perfc_incr(ipis);
342
343    if ( !cpu_isset(smp_processor_id(), call_data->selected) )
344        return;
345
346    irq_enter();
347
348    if ( call_data->wait )
349    {
350        (*func)(info);
351        mb();
352        atomic_inc(&call_data->finished);
353    }
354    else
355    {
356        mb();
357        atomic_inc(&call_data->started);
358        (*func)(info);
359    }
360
361    irq_exit();
362}
Note: See TracBrowser for help on using the repository browser.