| 1 | #ifndef __I386_SCHED_H |
|---|
| 2 | #define __I386_SCHED_H |
|---|
| 3 | |
|---|
| 4 | #include <asm/desc.h> |
|---|
| 5 | #include <asm/atomic.h> |
|---|
| 6 | #include <asm/pgalloc.h> |
|---|
| 7 | #include <asm/tlbflush.h> |
|---|
| 8 | |
|---|
| 9 | /* |
|---|
| 10 | * Used for LDT copy/destruction. |
|---|
| 11 | */ |
|---|
| 12 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm); |
|---|
| 13 | void destroy_context(struct mm_struct *mm); |
|---|
| 14 | |
|---|
| 15 | |
|---|
| 16 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
|---|
| 17 | { |
|---|
| 18 | #if 0 /* XEN: no lazy tlb */ |
|---|
| 19 | unsigned cpu = smp_processor_id(); |
|---|
| 20 | if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) |
|---|
| 21 | per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY; |
|---|
| 22 | #endif |
|---|
| 23 | } |
|---|
| 24 | |
|---|
| 25 | #define prepare_arch_switch(next) __prepare_arch_switch() |
|---|
| 26 | |
|---|
| 27 | static inline void __prepare_arch_switch(void) |
|---|
| 28 | { |
|---|
| 29 | /* |
|---|
| 30 | * Save away %fs and %gs. No need to save %es and %ds, as those |
|---|
| 31 | * are always kernel segments while inside the kernel. Must |
|---|
| 32 | * happen before reload of cr3/ldt (i.e., not in __switch_to). |
|---|
| 33 | */ |
|---|
| 34 | asm volatile ( "mov %%fs,%0 ; mov %%gs,%1" |
|---|
| 35 | : "=m" (current->thread.fs), |
|---|
| 36 | "=m" (current->thread.gs)); |
|---|
| 37 | asm volatile ( "movl %0,%%fs ; movl %0,%%gs" |
|---|
| 38 | : : "r" (0) ); |
|---|
| 39 | } |
|---|
| 40 | |
|---|
| 41 | extern void mm_pin(struct mm_struct *mm); |
|---|
| 42 | extern void mm_unpin(struct mm_struct *mm); |
|---|
| 43 | void mm_pin_all(void); |
|---|
| 44 | |
|---|
| 45 | static inline void switch_mm(struct mm_struct *prev, |
|---|
| 46 | struct mm_struct *next, |
|---|
| 47 | struct task_struct *tsk) |
|---|
| 48 | { |
|---|
| 49 | int cpu = smp_processor_id(); |
|---|
| 50 | struct mmuext_op _op[2], *op = _op; |
|---|
| 51 | |
|---|
| 52 | if (likely(prev != next)) { |
|---|
| 53 | BUG_ON(!xen_feature(XENFEAT_writable_page_tables) && |
|---|
| 54 | !test_bit(PG_pinned, &virt_to_page(next->pgd)->flags)); |
|---|
| 55 | |
|---|
| 56 | /* stop flush ipis for the previous mm */ |
|---|
| 57 | cpu_clear(cpu, prev->cpu_vm_mask); |
|---|
| 58 | #if 0 /* XEN: no lazy tlb */ |
|---|
| 59 | per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; |
|---|
| 60 | per_cpu(cpu_tlbstate, cpu).active_mm = next; |
|---|
| 61 | #endif |
|---|
| 62 | cpu_set(cpu, next->cpu_vm_mask); |
|---|
| 63 | |
|---|
| 64 | /* Re-load page tables: load_cr3(next->pgd) */ |
|---|
| 65 | op->cmd = MMUEXT_NEW_BASEPTR; |
|---|
| 66 | op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT); |
|---|
| 67 | op++; |
|---|
| 68 | |
|---|
| 69 | /* |
|---|
| 70 | * load the LDT, if the LDT is different: |
|---|
| 71 | */ |
|---|
| 72 | if (unlikely(prev->context.ldt != next->context.ldt)) { |
|---|
| 73 | /* load_LDT_nolock(&next->context, cpu) */ |
|---|
| 74 | op->cmd = MMUEXT_SET_LDT; |
|---|
| 75 | op->arg1.linear_addr = (unsigned long)next->context.ldt; |
|---|
| 76 | op->arg2.nr_ents = next->context.size; |
|---|
| 77 | op++; |
|---|
| 78 | } |
|---|
| 79 | |
|---|
| 80 | BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF)); |
|---|
| 81 | } |
|---|
| 82 | #if 0 /* XEN: no lazy tlb */ |
|---|
| 83 | else { |
|---|
| 84 | per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK; |
|---|
| 85 | BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next); |
|---|
| 86 | |
|---|
| 87 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { |
|---|
| 88 | /* We were in lazy tlb mode and leave_mm disabled |
|---|
| 89 | * tlb flush IPI delivery. We must reload %cr3. |
|---|
| 90 | */ |
|---|
| 91 | load_cr3(next->pgd); |
|---|
| 92 | load_LDT_nolock(&next->context, cpu); |
|---|
| 93 | } |
|---|
| 94 | } |
|---|
| 95 | #endif |
|---|
| 96 | } |
|---|
| 97 | |
|---|
| 98 | #define deactivate_mm(tsk, mm) \ |
|---|
| 99 | asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0)) |
|---|
| 100 | |
|---|
| 101 | static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) |
|---|
| 102 | { |
|---|
| 103 | if (!test_bit(PG_pinned, &virt_to_page(next->pgd)->flags)) |
|---|
| 104 | mm_pin(next); |
|---|
| 105 | switch_mm(prev, next, NULL); |
|---|
| 106 | } |
|---|
| 107 | |
|---|
| 108 | #endif |
|---|