| 1 | /* |
|---|
| 2 | * include/asm-x86_64/irqflags.h |
|---|
| 3 | * |
|---|
| 4 | * IRQ flags handling |
|---|
| 5 | * |
|---|
| 6 | * This file gets included from lowlevel asm headers too, to provide |
|---|
| 7 | * wrapped versions of the local_irq_*() APIs, based on the |
|---|
| 8 | * raw_local_irq_*() functions from the lowlevel headers. |
|---|
| 9 | */ |
|---|
| 10 | #ifndef _ASM_IRQFLAGS_H |
|---|
| 11 | #define _ASM_IRQFLAGS_H |
|---|
| 12 | |
|---|
| 13 | #ifndef __ASSEMBLY__ |
|---|
| 14 | /* |
|---|
| 15 | * Interrupt control: |
|---|
| 16 | */ |
|---|
| 17 | |
|---|
| 18 | /* |
|---|
| 19 | * The use of 'barrier' in the following reflects their use as local-lock |
|---|
| 20 | * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following |
|---|
| 21 | * critical operations are executed. All critical operations must complete |
|---|
| 22 | * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also |
|---|
| 23 | * includes these barriers, for example. |
|---|
| 24 | */ |
|---|
| 25 | |
|---|
| 26 | #define __raw_local_save_flags() (current_vcpu_info()->evtchn_upcall_mask) |
|---|
| 27 | |
|---|
| 28 | #define raw_local_save_flags(flags) \ |
|---|
| 29 | do { (flags) = __raw_local_save_flags(); } while (0) |
|---|
| 30 | |
|---|
| 31 | #define raw_local_irq_restore(x) \ |
|---|
| 32 | do { \ |
|---|
| 33 | vcpu_info_t *_vcpu; \ |
|---|
| 34 | barrier(); \ |
|---|
| 35 | _vcpu = current_vcpu_info(); \ |
|---|
| 36 | if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \ |
|---|
| 37 | barrier(); /* unmask then check (avoid races) */ \ |
|---|
| 38 | if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ |
|---|
| 39 | force_evtchn_callback(); \ |
|---|
| 40 | } \ |
|---|
| 41 | } while (0) |
|---|
| 42 | |
|---|
| 43 | #ifdef CONFIG_X86_VSMP |
|---|
| 44 | |
|---|
| 45 | /* |
|---|
| 46 | * Interrupt control for the VSMP architecture: |
|---|
| 47 | */ |
|---|
| 48 | |
|---|
| 49 | static inline void raw_local_irq_disable(void) |
|---|
| 50 | { |
|---|
| 51 | unsigned long flags = __raw_local_save_flags(); |
|---|
| 52 | |
|---|
| 53 | raw_local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); |
|---|
| 54 | } |
|---|
| 55 | |
|---|
| 56 | static inline void raw_local_irq_enable(void) |
|---|
| 57 | { |
|---|
| 58 | unsigned long flags = __raw_local_save_flags(); |
|---|
| 59 | |
|---|
| 60 | raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); |
|---|
| 61 | } |
|---|
| 62 | |
|---|
| 63 | static inline int raw_irqs_disabled_flags(unsigned long flags) |
|---|
| 64 | { |
|---|
| 65 | return !(flags & (1<<9)) || (flags & (1 << 18)); |
|---|
| 66 | } |
|---|
| 67 | |
|---|
| 68 | #else /* CONFIG_X86_VSMP */ |
|---|
| 69 | |
|---|
| 70 | #define raw_local_irq_disable() \ |
|---|
| 71 | do { \ |
|---|
| 72 | current_vcpu_info()->evtchn_upcall_mask = 1; \ |
|---|
| 73 | barrier(); \ |
|---|
| 74 | } while (0) |
|---|
| 75 | |
|---|
| 76 | #define raw_local_irq_enable() \ |
|---|
| 77 | do { \ |
|---|
| 78 | vcpu_info_t *_vcpu; \ |
|---|
| 79 | barrier(); \ |
|---|
| 80 | _vcpu = current_vcpu_info(); \ |
|---|
| 81 | _vcpu->evtchn_upcall_mask = 0; \ |
|---|
| 82 | barrier(); /* unmask then check (avoid races) */ \ |
|---|
| 83 | if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ |
|---|
| 84 | force_evtchn_callback(); \ |
|---|
| 85 | } while (0) |
|---|
| 86 | |
|---|
| 87 | static inline int raw_irqs_disabled_flags(unsigned long flags) |
|---|
| 88 | { |
|---|
| 89 | return (flags != 0); |
|---|
| 90 | } |
|---|
| 91 | |
|---|
| 92 | #endif |
|---|
| 93 | |
|---|
| 94 | /* |
|---|
| 95 | * For spinlocks, etc.: |
|---|
| 96 | */ |
|---|
| 97 | |
|---|
| 98 | #define __raw_local_irq_save() \ |
|---|
| 99 | ({ \ |
|---|
| 100 | unsigned long flags = __raw_local_save_flags(); \ |
|---|
| 101 | \ |
|---|
| 102 | raw_local_irq_disable(); \ |
|---|
| 103 | \ |
|---|
| 104 | flags; \ |
|---|
| 105 | }) |
|---|
| 106 | |
|---|
| 107 | #define raw_local_irq_save(flags) \ |
|---|
| 108 | do { (flags) = __raw_local_irq_save(); } while (0) |
|---|
| 109 | |
|---|
| 110 | #define raw_irqs_disabled() \ |
|---|
| 111 | ({ \ |
|---|
| 112 | unsigned long flags = __raw_local_save_flags(); \ |
|---|
| 113 | \ |
|---|
| 114 | raw_irqs_disabled_flags(flags); \ |
|---|
| 115 | }) |
|---|
| 116 | |
|---|
| 117 | /* |
|---|
| 118 | * Used in the idle loop; sti takes one instruction cycle |
|---|
| 119 | * to complete: |
|---|
| 120 | */ |
|---|
| 121 | void raw_safe_halt(void); |
|---|
| 122 | |
|---|
| 123 | /* |
|---|
| 124 | * Used when interrupts are already enabled or to |
|---|
| 125 | * shutdown the processor: |
|---|
| 126 | */ |
|---|
| 127 | void halt(void); |
|---|
| 128 | |
|---|
| 129 | #else /* __ASSEMBLY__: */ |
|---|
| 130 | # ifdef CONFIG_TRACE_IRQFLAGS |
|---|
| 131 | # define TRACE_IRQS_ON call trace_hardirqs_on_thunk |
|---|
| 132 | # define TRACE_IRQS_OFF call trace_hardirqs_off_thunk |
|---|
| 133 | # else |
|---|
| 134 | # define TRACE_IRQS_ON |
|---|
| 135 | # define TRACE_IRQS_OFF |
|---|
| 136 | # endif |
|---|
| 137 | #endif |
|---|
| 138 | |
|---|
| 139 | #endif |
|---|