1 | #ifndef __X86_64_MMU_CONTEXT_H |
---|
2 | #define __X86_64_MMU_CONTEXT_H |
---|
3 | |
---|
4 | #include <asm/desc.h> |
---|
5 | #include <asm/atomic.h> |
---|
6 | #include <asm/pgalloc.h> |
---|
7 | #include <asm/page.h> |
---|
8 | #include <asm/pda.h> |
---|
9 | #include <asm/pgtable.h> |
---|
10 | #include <asm/tlbflush.h> |
---|
11 | |
---|
12 | /* |
---|
13 | * possibly do the LDT unload here? |
---|
14 | */ |
---|
15 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm); |
---|
16 | void destroy_context(struct mm_struct *mm); |
---|
17 | |
---|
18 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
---|
19 | { |
---|
20 | #if defined(CONFIG_SMP) && !defined(CONFIG_XEN) |
---|
21 | if (read_pda(mmu_state) == TLBSTATE_OK) |
---|
22 | write_pda(mmu_state, TLBSTATE_LAZY); |
---|
23 | #endif |
---|
24 | } |
---|
25 | |
---|
26 | #define prepare_arch_switch(next) __prepare_arch_switch() |
---|
27 | |
---|
28 | static inline void __prepare_arch_switch(void) |
---|
29 | { |
---|
30 | /* |
---|
31 | * Save away %es, %ds, %fs and %gs. Must happen before reload |
---|
32 | * of cr3/ldt (i.e., not in __switch_to). |
---|
33 | */ |
---|
34 | __asm__ __volatile__ ( |
---|
35 | "mov %%es,%0 ; mov %%ds,%1 ; mov %%fs,%2 ; mov %%gs,%3" |
---|
36 | : "=m" (current->thread.es), |
---|
37 | "=m" (current->thread.ds), |
---|
38 | "=m" (current->thread.fsindex), |
---|
39 | "=m" (current->thread.gsindex) ); |
---|
40 | |
---|
41 | if (current->thread.ds) |
---|
42 | __asm__ __volatile__ ( "movl %0,%%ds" : : "r" (0) ); |
---|
43 | |
---|
44 | if (current->thread.es) |
---|
45 | __asm__ __volatile__ ( "movl %0,%%es" : : "r" (0) ); |
---|
46 | |
---|
47 | if (current->thread.fsindex) { |
---|
48 | __asm__ __volatile__ ( "movl %0,%%fs" : : "r" (0) ); |
---|
49 | current->thread.fs = 0; |
---|
50 | } |
---|
51 | |
---|
52 | if (current->thread.gsindex) { |
---|
53 | load_gs_index(0); |
---|
54 | current->thread.gs = 0; |
---|
55 | } |
---|
56 | } |
---|
57 | |
---|
58 | extern void mm_pin(struct mm_struct *mm); |
---|
59 | extern void mm_unpin(struct mm_struct *mm); |
---|
60 | void mm_pin_all(void); |
---|
61 | |
---|
62 | static inline void load_cr3(pgd_t *pgd) |
---|
63 | { |
---|
64 | asm volatile("movq %0,%%cr3" :: "r" (phys_to_machine(__pa(pgd))) : |
---|
65 | "memory"); |
---|
66 | } |
---|
67 | |
---|
68 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
---|
69 | struct task_struct *tsk) |
---|
70 | { |
---|
71 | unsigned cpu = smp_processor_id(); |
---|
72 | struct mmuext_op _op[3], *op = _op; |
---|
73 | |
---|
74 | if (likely(prev != next)) { |
---|
75 | BUG_ON(!xen_feature(XENFEAT_writable_page_tables) && |
---|
76 | !next->context.pinned); |
---|
77 | |
---|
78 | /* stop flush ipis for the previous mm */ |
---|
79 | cpu_clear(cpu, prev->cpu_vm_mask); |
---|
80 | #if defined(CONFIG_SMP) && !defined(CONFIG_XEN) |
---|
81 | write_pda(mmu_state, TLBSTATE_OK); |
---|
82 | write_pda(active_mm, next); |
---|
83 | #endif |
---|
84 | cpu_set(cpu, next->cpu_vm_mask); |
---|
85 | |
---|
86 | /* load_cr3(next->pgd) */ |
---|
87 | op->cmd = MMUEXT_NEW_BASEPTR; |
---|
88 | op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT); |
---|
89 | op++; |
---|
90 | |
---|
91 | /* xen_new_user_pt(__pa(__user_pgd(next->pgd))) */ |
---|
92 | op->cmd = MMUEXT_NEW_USER_BASEPTR; |
---|
93 | op->arg1.mfn = pfn_to_mfn(__pa(__user_pgd(next->pgd)) >> PAGE_SHIFT); |
---|
94 | op++; |
---|
95 | |
---|
96 | if (unlikely(next->context.ldt != prev->context.ldt)) { |
---|
97 | /* load_LDT_nolock(&next->context, cpu) */ |
---|
98 | op->cmd = MMUEXT_SET_LDT; |
---|
99 | op->arg1.linear_addr = (unsigned long)next->context.ldt; |
---|
100 | op->arg2.nr_ents = next->context.size; |
---|
101 | op++; |
---|
102 | } |
---|
103 | |
---|
104 | BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF)); |
---|
105 | } |
---|
106 | #if defined(CONFIG_SMP) && !defined(CONFIG_XEN) |
---|
107 | else { |
---|
108 | write_pda(mmu_state, TLBSTATE_OK); |
---|
109 | if (read_pda(active_mm) != next) |
---|
110 | out_of_line_bug(); |
---|
111 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { |
---|
112 | /* We were in lazy tlb mode and leave_mm disabled |
---|
113 | * tlb flush IPI delivery. We must reload CR3 |
---|
114 | * to make sure to use no freed page tables. |
---|
115 | */ |
---|
116 | load_cr3(next->pgd); |
---|
117 | xen_new_user_pt(__pa(__user_pgd(next->pgd))); |
---|
118 | load_LDT_nolock(&next->context, cpu); |
---|
119 | } |
---|
120 | } |
---|
121 | #endif |
---|
122 | } |
---|
123 | |
---|
124 | #define deactivate_mm(tsk,mm) do { \ |
---|
125 | load_gs_index(0); \ |
---|
126 | asm volatile("movl %0,%%fs"::"r"(0)); \ |
---|
127 | } while(0) |
---|
128 | |
---|
129 | static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) |
---|
130 | { |
---|
131 | if (!next->context.pinned) |
---|
132 | mm_pin(next); |
---|
133 | switch_mm(prev, next, NULL); |
---|
134 | } |
---|
135 | |
---|
136 | #endif |
---|