1 | #ifndef _ASM_GENERIC_PGTABLE_H |
---|
2 | #define _ASM_GENERIC_PGTABLE_H |
---|
3 | |
---|
4 | #ifndef __HAVE_ARCH_PTEP_ESTABLISH |
---|
5 | /* |
---|
6 | * Establish a new mapping: |
---|
7 | * - flush the old one |
---|
8 | * - update the page tables |
---|
9 | * - inform the TLB about the new one |
---|
10 | * |
---|
11 | * We hold the mm semaphore for reading and vma->vm_mm->page_table_lock. |
---|
12 | * |
---|
13 | * Note: the old pte is known to not be writable, so we don't need to |
---|
14 | * worry about dirty bits etc getting lost. |
---|
15 | */ |
---|
16 | #ifndef __HAVE_ARCH_SET_PTE_ATOMIC |
---|
17 | #define ptep_establish(__vma, __address, __ptep, __entry) \ |
---|
18 | do { \ |
---|
19 | set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ |
---|
20 | flush_tlb_page(__vma, __address); \ |
---|
21 | } while (0) |
---|
22 | #else /* __HAVE_ARCH_SET_PTE_ATOMIC */ |
---|
23 | #define ptep_establish(__vma, __address, __ptep, __entry) \ |
---|
24 | do { \ |
---|
25 | set_pte_atomic(__ptep, __entry); \ |
---|
26 | flush_tlb_page(__vma, __address); \ |
---|
27 | } while (0) |
---|
28 | #endif /* __HAVE_ARCH_SET_PTE_ATOMIC */ |
---|
29 | #endif |
---|
30 | |
---|
31 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
---|
32 | /* |
---|
33 | * Largely same as above, but only sets the access flags (dirty, |
---|
34 | * accessed, and writable). Furthermore, we know it always gets set |
---|
35 | * to a "more permissive" setting, which allows most architectures |
---|
36 | * to optimize this. |
---|
37 | */ |
---|
38 | #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ |
---|
39 | do { \ |
---|
40 | set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ |
---|
41 | flush_tlb_page(__vma, __address); \ |
---|
42 | } while (0) |
---|
43 | #endif |
---|
44 | |
---|
45 | #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
---|
46 | #define ptep_test_and_clear_young(__vma, __address, __ptep) \ |
---|
47 | ({ \ |
---|
48 | pte_t __pte = *(__ptep); \ |
---|
49 | int r = 1; \ |
---|
50 | if (!pte_young(__pte)) \ |
---|
51 | r = 0; \ |
---|
52 | else \ |
---|
53 | set_pte_at((__vma)->vm_mm, (__address), \ |
---|
54 | (__ptep), pte_mkold(__pte)); \ |
---|
55 | r; \ |
---|
56 | }) |
---|
57 | #endif |
---|
58 | |
---|
59 | #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
---|
60 | #define ptep_clear_flush_young(__vma, __address, __ptep) \ |
---|
61 | ({ \ |
---|
62 | int __young; \ |
---|
63 | __young = ptep_test_and_clear_young(__vma, __address, __ptep); \ |
---|
64 | if (__young) \ |
---|
65 | flush_tlb_page(__vma, __address); \ |
---|
66 | __young; \ |
---|
67 | }) |
---|
68 | #endif |
---|
69 | |
---|
70 | #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY |
---|
71 | #define ptep_test_and_clear_dirty(__vma, __address, __ptep) \ |
---|
72 | ({ \ |
---|
73 | pte_t __pte = *__ptep; \ |
---|
74 | int r = 1; \ |
---|
75 | if (!pte_dirty(__pte)) \ |
---|
76 | r = 0; \ |
---|
77 | else \ |
---|
78 | set_pte_at((__vma)->vm_mm, (__address), (__ptep), \ |
---|
79 | pte_mkclean(__pte)); \ |
---|
80 | r; \ |
---|
81 | }) |
---|
82 | #endif |
---|
83 | |
---|
84 | #ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH |
---|
85 | #define ptep_clear_flush_dirty(__vma, __address, __ptep) \ |
---|
86 | ({ \ |
---|
87 | int __dirty; \ |
---|
88 | __dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep); \ |
---|
89 | if (__dirty) \ |
---|
90 | flush_tlb_page(__vma, __address); \ |
---|
91 | __dirty; \ |
---|
92 | }) |
---|
93 | #endif |
---|
94 | |
---|
95 | #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR |
---|
96 | #define ptep_get_and_clear(__mm, __address, __ptep) \ |
---|
97 | ({ \ |
---|
98 | pte_t __pte = *(__ptep); \ |
---|
99 | pte_clear((__mm), (__address), (__ptep)); \ |
---|
100 | __pte; \ |
---|
101 | }) |
---|
102 | #endif |
---|
103 | |
---|
104 | #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH |
---|
105 | #define ptep_clear_flush(__vma, __address, __ptep) \ |
---|
106 | ({ \ |
---|
107 | pte_t __pte; \ |
---|
108 | __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \ |
---|
109 | flush_tlb_page(__vma, __address); \ |
---|
110 | __pte; \ |
---|
111 | }) |
---|
112 | #endif |
---|
113 | |
---|
114 | #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT |
---|
115 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) |
---|
116 | { |
---|
117 | pte_t old_pte = *ptep; |
---|
118 | set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); |
---|
119 | } |
---|
120 | #endif |
---|
121 | |
---|
122 | #ifndef __HAVE_ARCH_PTE_SAME |
---|
123 | #define pte_same(A,B) (pte_val(A) == pte_val(B)) |
---|
124 | #endif |
---|
125 | |
---|
126 | #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY |
---|
127 | #define page_test_and_clear_dirty(page) (0) |
---|
128 | #define pte_maybe_dirty(pte) pte_dirty(pte) |
---|
129 | #else |
---|
130 | #define pte_maybe_dirty(pte) (1) |
---|
131 | #endif |
---|
132 | |
---|
133 | #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG |
---|
134 | #define page_test_and_clear_young(page) (0) |
---|
135 | #endif |
---|
136 | |
---|
137 | #ifndef __HAVE_ARCH_PGD_OFFSET_GATE |
---|
138 | #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) |
---|
139 | #endif |
---|
140 | |
---|
141 | #ifndef __HAVE_ARCH_LAZY_MMU_PROT_UPDATE |
---|
142 | #define lazy_mmu_prot_update(pte) do { } while (0) |
---|
143 | #endif |
---|
144 | |
---|
145 | /* |
---|
146 | * When walking page tables, get the address of the next boundary, |
---|
147 | * or the end address of the range if that comes earlier. Although no |
---|
148 | * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. |
---|
149 | */ |
---|
150 | |
---|
151 | #define pgd_addr_end(addr, end) \ |
---|
152 | ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ |
---|
153 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ |
---|
154 | }) |
---|
155 | |
---|
156 | #ifndef pud_addr_end |
---|
157 | #define pud_addr_end(addr, end) \ |
---|
158 | ({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ |
---|
159 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ |
---|
160 | }) |
---|
161 | #endif |
---|
162 | |
---|
163 | #ifndef pmd_addr_end |
---|
164 | #define pmd_addr_end(addr, end) \ |
---|
165 | ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ |
---|
166 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ |
---|
167 | }) |
---|
168 | #endif |
---|
169 | |
---|
170 | #ifndef __ASSEMBLY__ |
---|
171 | /* |
---|
172 | * When walking page tables, we usually want to skip any p?d_none entries; |
---|
173 | * and any p?d_bad entries - reporting the error before resetting to none. |
---|
174 | * Do the tests inline, but report and clear the bad entry in mm/memory.c. |
---|
175 | */ |
---|
176 | void pgd_clear_bad(pgd_t *); |
---|
177 | void pud_clear_bad(pud_t *); |
---|
178 | void pmd_clear_bad(pmd_t *); |
---|
179 | |
---|
180 | static inline int pgd_none_or_clear_bad(pgd_t *pgd) |
---|
181 | { |
---|
182 | if (pgd_none(*pgd)) |
---|
183 | return 1; |
---|
184 | if (unlikely(pgd_bad(*pgd))) { |
---|
185 | pgd_clear_bad(pgd); |
---|
186 | return 1; |
---|
187 | } |
---|
188 | return 0; |
---|
189 | } |
---|
190 | |
---|
191 | static inline int pud_none_or_clear_bad(pud_t *pud) |
---|
192 | { |
---|
193 | if (pud_none(*pud)) |
---|
194 | return 1; |
---|
195 | if (unlikely(pud_bad(*pud))) { |
---|
196 | pud_clear_bad(pud); |
---|
197 | return 1; |
---|
198 | } |
---|
199 | return 0; |
---|
200 | } |
---|
201 | |
---|
202 | static inline int pmd_none_or_clear_bad(pmd_t *pmd) |
---|
203 | { |
---|
204 | if (pmd_none(*pmd)) |
---|
205 | return 1; |
---|
206 | if (unlikely(pmd_bad(*pmd))) { |
---|
207 | pmd_clear_bad(pmd); |
---|
208 | return 1; |
---|
209 | } |
---|
210 | return 0; |
---|
211 | } |
---|
212 | #endif /* !__ASSEMBLY__ */ |
---|
213 | |
---|
214 | #endif /* _ASM_GENERIC_PGTABLE_H */ |
---|