1 | /* |
---|
2 | * linux/arch/i386/mm/pgtable.c |
---|
3 | */ |
---|
4 | |
---|
5 | #include <linux/sched.h> |
---|
6 | #include <linux/kernel.h> |
---|
7 | #include <linux/errno.h> |
---|
8 | #include <linux/mm.h> |
---|
9 | #include <linux/swap.h> |
---|
10 | #include <linux/smp.h> |
---|
11 | #include <linux/highmem.h> |
---|
12 | #include <linux/slab.h> |
---|
13 | #include <linux/pagemap.h> |
---|
14 | #include <linux/spinlock.h> |
---|
15 | #include <linux/module.h> |
---|
16 | |
---|
17 | #include <asm/system.h> |
---|
18 | #include <asm/pgtable.h> |
---|
19 | #include <asm/pgalloc.h> |
---|
20 | #include <asm/fixmap.h> |
---|
21 | #include <asm/e820.h> |
---|
22 | #include <asm/tlb.h> |
---|
23 | #include <asm/tlbflush.h> |
---|
24 | #include <asm/io.h> |
---|
25 | #include <asm/mmu_context.h> |
---|
26 | |
---|
27 | #include <xen/features.h> |
---|
28 | #include <asm/hypervisor.h> |
---|
29 | |
---|
30 | static void pgd_test_and_unpin(pgd_t *pgd); |
---|
31 | |
---|
32 | void show_mem(void) |
---|
33 | { |
---|
34 | int total = 0, reserved = 0; |
---|
35 | int shared = 0, cached = 0; |
---|
36 | int highmem = 0; |
---|
37 | struct page *page; |
---|
38 | pg_data_t *pgdat; |
---|
39 | unsigned long i; |
---|
40 | unsigned long flags; |
---|
41 | |
---|
42 | printk(KERN_INFO "Mem-info:\n"); |
---|
43 | show_free_areas(); |
---|
44 | printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); |
---|
45 | for_each_online_pgdat(pgdat) { |
---|
46 | pgdat_resize_lock(pgdat, &flags); |
---|
47 | for (i = 0; i < pgdat->node_spanned_pages; ++i) { |
---|
48 | page = pgdat_page_nr(pgdat, i); |
---|
49 | total++; |
---|
50 | if (PageHighMem(page)) |
---|
51 | highmem++; |
---|
52 | if (PageReserved(page)) |
---|
53 | reserved++; |
---|
54 | else if (PageSwapCache(page)) |
---|
55 | cached++; |
---|
56 | else if (page_count(page)) |
---|
57 | shared += page_count(page) - 1; |
---|
58 | } |
---|
59 | pgdat_resize_unlock(pgdat, &flags); |
---|
60 | } |
---|
61 | printk(KERN_INFO "%d pages of RAM\n", total); |
---|
62 | printk(KERN_INFO "%d pages of HIGHMEM\n", highmem); |
---|
63 | printk(KERN_INFO "%d reserved pages\n", reserved); |
---|
64 | printk(KERN_INFO "%d pages shared\n", shared); |
---|
65 | printk(KERN_INFO "%d pages swap cached\n", cached); |
---|
66 | |
---|
67 | printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY)); |
---|
68 | printk(KERN_INFO "%lu pages writeback\n", |
---|
69 | global_page_state(NR_WRITEBACK)); |
---|
70 | printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED)); |
---|
71 | printk(KERN_INFO "%lu pages slab\n", global_page_state(NR_SLAB)); |
---|
72 | printk(KERN_INFO "%lu pages pagetables\n", |
---|
73 | global_page_state(NR_PAGETABLE)); |
---|
74 | } |
---|
75 | |
---|
76 | /* |
---|
77 | * Associate a virtual page frame with a given physical page frame |
---|
78 | * and protection flags for that frame. |
---|
79 | */ |
---|
80 | static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) |
---|
81 | { |
---|
82 | pgd_t *pgd; |
---|
83 | pud_t *pud; |
---|
84 | pmd_t *pmd; |
---|
85 | pte_t *pte; |
---|
86 | |
---|
87 | pgd = swapper_pg_dir + pgd_index(vaddr); |
---|
88 | if (pgd_none(*pgd)) { |
---|
89 | BUG(); |
---|
90 | return; |
---|
91 | } |
---|
92 | pud = pud_offset(pgd, vaddr); |
---|
93 | if (pud_none(*pud)) { |
---|
94 | BUG(); |
---|
95 | return; |
---|
96 | } |
---|
97 | pmd = pmd_offset(pud, vaddr); |
---|
98 | if (pmd_none(*pmd)) { |
---|
99 | BUG(); |
---|
100 | return; |
---|
101 | } |
---|
102 | pte = pte_offset_kernel(pmd, vaddr); |
---|
103 | if (pgprot_val(flags)) |
---|
104 | /* <pfn,flags> stored as-is, to permit clearing entries */ |
---|
105 | set_pte(pte, pfn_pte(pfn, flags)); |
---|
106 | else |
---|
107 | pte_clear(&init_mm, vaddr, pte); |
---|
108 | |
---|
109 | /* |
---|
110 | * It's enough to flush this one mapping. |
---|
111 | * (PGE mappings get flushed as well) |
---|
112 | */ |
---|
113 | __flush_tlb_one(vaddr); |
---|
114 | } |
---|
115 | |
---|
116 | /* |
---|
117 | * Associate a virtual page frame with a given physical page frame |
---|
118 | * and protection flags for that frame. |
---|
119 | */ |
---|
120 | static void set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn, |
---|
121 | pgprot_t flags) |
---|
122 | { |
---|
123 | pgd_t *pgd; |
---|
124 | pud_t *pud; |
---|
125 | pmd_t *pmd; |
---|
126 | pte_t *pte; |
---|
127 | |
---|
128 | pgd = swapper_pg_dir + pgd_index(vaddr); |
---|
129 | if (pgd_none(*pgd)) { |
---|
130 | BUG(); |
---|
131 | return; |
---|
132 | } |
---|
133 | pud = pud_offset(pgd, vaddr); |
---|
134 | if (pud_none(*pud)) { |
---|
135 | BUG(); |
---|
136 | return; |
---|
137 | } |
---|
138 | pmd = pmd_offset(pud, vaddr); |
---|
139 | if (pmd_none(*pmd)) { |
---|
140 | BUG(); |
---|
141 | return; |
---|
142 | } |
---|
143 | pte = pte_offset_kernel(pmd, vaddr); |
---|
144 | if (pgprot_val(flags)) |
---|
145 | /* <pfn,flags> stored as-is, to permit clearing entries */ |
---|
146 | set_pte(pte, pfn_pte_ma(pfn, flags)); |
---|
147 | else |
---|
148 | pte_clear(&init_mm, vaddr, pte); |
---|
149 | |
---|
150 | /* |
---|
151 | * It's enough to flush this one mapping. |
---|
152 | * (PGE mappings get flushed as well) |
---|
153 | */ |
---|
154 | __flush_tlb_one(vaddr); |
---|
155 | } |
---|
156 | |
---|
157 | /* |
---|
158 | * Associate a large virtual page frame with a given physical page frame |
---|
159 | * and protection flags for that frame. pfn is for the base of the page, |
---|
160 | * vaddr is what the page gets mapped to - both must be properly aligned. |
---|
161 | * The pmd must already be instantiated. Assumes PAE mode. |
---|
162 | */ |
---|
163 | void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) |
---|
164 | { |
---|
165 | pgd_t *pgd; |
---|
166 | pud_t *pud; |
---|
167 | pmd_t *pmd; |
---|
168 | |
---|
169 | if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */ |
---|
170 | printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n"); |
---|
171 | return; /* BUG(); */ |
---|
172 | } |
---|
173 | if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */ |
---|
174 | printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n"); |
---|
175 | return; /* BUG(); */ |
---|
176 | } |
---|
177 | pgd = swapper_pg_dir + pgd_index(vaddr); |
---|
178 | if (pgd_none(*pgd)) { |
---|
179 | printk(KERN_WARNING "set_pmd_pfn: pgd_none\n"); |
---|
180 | return; /* BUG(); */ |
---|
181 | } |
---|
182 | pud = pud_offset(pgd, vaddr); |
---|
183 | pmd = pmd_offset(pud, vaddr); |
---|
184 | set_pmd(pmd, pfn_pmd(pfn, flags)); |
---|
185 | /* |
---|
186 | * It's enough to flush this one mapping. |
---|
187 | * (PGE mappings get flushed as well) |
---|
188 | */ |
---|
189 | __flush_tlb_one(vaddr); |
---|
190 | } |
---|
191 | |
---|
192 | static int nr_fixmaps = 0; |
---|
193 | unsigned long hypervisor_virt_start = HYPERVISOR_VIRT_START; |
---|
194 | unsigned long __FIXADDR_TOP = (HYPERVISOR_VIRT_START - 2 * PAGE_SIZE); |
---|
195 | EXPORT_SYMBOL(__FIXADDR_TOP); |
---|
196 | |
---|
197 | void __init set_fixaddr_top(unsigned long top) |
---|
198 | { |
---|
199 | BUG_ON(nr_fixmaps > 0); |
---|
200 | hypervisor_virt_start = top; |
---|
201 | __FIXADDR_TOP = hypervisor_virt_start - 2 * PAGE_SIZE; |
---|
202 | } |
---|
203 | |
---|
204 | void __set_fixmap (enum fixed_addresses idx, maddr_t phys, pgprot_t flags) |
---|
205 | { |
---|
206 | unsigned long address = __fix_to_virt(idx); |
---|
207 | |
---|
208 | if (idx >= __end_of_fixed_addresses) { |
---|
209 | BUG(); |
---|
210 | return; |
---|
211 | } |
---|
212 | switch (idx) { |
---|
213 | case FIX_WP_TEST: |
---|
214 | #ifdef CONFIG_X86_F00F_BUG |
---|
215 | case FIX_F00F_IDT: |
---|
216 | #endif |
---|
217 | case FIX_VDSO: |
---|
218 | set_pte_pfn(address, phys >> PAGE_SHIFT, flags); |
---|
219 | break; |
---|
220 | default: |
---|
221 | set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags); |
---|
222 | break; |
---|
223 | } |
---|
224 | nr_fixmaps++; |
---|
225 | } |
---|
226 | |
---|
227 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
---|
228 | { |
---|
229 | pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); |
---|
230 | if (pte) |
---|
231 | make_lowmem_page_readonly(pte, XENFEAT_writable_page_tables); |
---|
232 | return pte; |
---|
233 | } |
---|
234 | |
---|
235 | struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) |
---|
236 | { |
---|
237 | struct page *pte; |
---|
238 | |
---|
239 | #ifdef CONFIG_HIGHPTE |
---|
240 | pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0); |
---|
241 | #else |
---|
242 | pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); |
---|
243 | #endif |
---|
244 | if (pte) { |
---|
245 | SetPageForeign(pte, pte_free); |
---|
246 | init_page_count(pte); |
---|
247 | } |
---|
248 | return pte; |
---|
249 | } |
---|
250 | |
---|
251 | void pte_free(struct page *pte) |
---|
252 | { |
---|
253 | unsigned long pfn = page_to_pfn(pte); |
---|
254 | |
---|
255 | if (!PageHighMem(pte)) { |
---|
256 | unsigned long va = (unsigned long)__va(pfn << PAGE_SHIFT); |
---|
257 | |
---|
258 | if (!pte_write(*virt_to_ptep(va))) |
---|
259 | if (HYPERVISOR_update_va_mapping( |
---|
260 | va, pfn_pte(pfn, PAGE_KERNEL), 0)) |
---|
261 | BUG(); |
---|
262 | } else |
---|
263 | clear_bit(PG_pinned, &pte->flags); |
---|
264 | |
---|
265 | ClearPageForeign(pte); |
---|
266 | init_page_count(pte); |
---|
267 | |
---|
268 | __free_page(pte); |
---|
269 | } |
---|
270 | |
---|
271 | void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags) |
---|
272 | { |
---|
273 | memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t)); |
---|
274 | } |
---|
275 | |
---|
276 | /* |
---|
277 | * List of all pgd's needed for non-PAE so it can invalidate entries |
---|
278 | * in both cached and uncached pgd's; not needed for PAE since the |
---|
279 | * kernel pmd is shared. If PAE were not to share the pmd a similar |
---|
280 | * tactic would be needed. This is essentially codepath-based locking |
---|
281 | * against pageattr.c; it is the unique case in which a valid change |
---|
282 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. |
---|
283 | * vmalloc faults work because attached pagetables are never freed. |
---|
284 | * The locking scheme was chosen on the basis of manfred's |
---|
285 | * recommendations and having no core impact whatsoever. |
---|
286 | * -- wli |
---|
287 | */ |
---|
288 | DEFINE_SPINLOCK(pgd_lock); |
---|
289 | struct page *pgd_list; |
---|
290 | |
---|
291 | static inline void pgd_list_add(pgd_t *pgd) |
---|
292 | { |
---|
293 | struct page *page = virt_to_page(pgd); |
---|
294 | page->index = (unsigned long)pgd_list; |
---|
295 | if (pgd_list) |
---|
296 | set_page_private(pgd_list, (unsigned long)&page->index); |
---|
297 | pgd_list = page; |
---|
298 | set_page_private(page, (unsigned long)&pgd_list); |
---|
299 | } |
---|
300 | |
---|
301 | static inline void pgd_list_del(pgd_t *pgd) |
---|
302 | { |
---|
303 | struct page *next, **pprev, *page = virt_to_page(pgd); |
---|
304 | next = (struct page *)page->index; |
---|
305 | pprev = (struct page **)page_private(page); |
---|
306 | *pprev = next; |
---|
307 | if (next) |
---|
308 | set_page_private(next, (unsigned long)pprev); |
---|
309 | } |
---|
310 | |
---|
311 | void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) |
---|
312 | { |
---|
313 | unsigned long flags; |
---|
314 | |
---|
315 | if (PTRS_PER_PMD > 1) { |
---|
316 | if (HAVE_SHARED_KERNEL_PMD) |
---|
317 | clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD, |
---|
318 | swapper_pg_dir + USER_PTRS_PER_PGD, |
---|
319 | KERNEL_PGD_PTRS); |
---|
320 | } else { |
---|
321 | spin_lock_irqsave(&pgd_lock, flags); |
---|
322 | clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD, |
---|
323 | swapper_pg_dir + USER_PTRS_PER_PGD, |
---|
324 | KERNEL_PGD_PTRS); |
---|
325 | memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); |
---|
326 | pgd_list_add(pgd); |
---|
327 | spin_unlock_irqrestore(&pgd_lock, flags); |
---|
328 | } |
---|
329 | } |
---|
330 | |
---|
331 | /* never called when PTRS_PER_PMD > 1 */ |
---|
332 | void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused) |
---|
333 | { |
---|
334 | unsigned long flags; /* can be called from interrupt context */ |
---|
335 | |
---|
336 | spin_lock_irqsave(&pgd_lock, flags); |
---|
337 | pgd_list_del(pgd); |
---|
338 | spin_unlock_irqrestore(&pgd_lock, flags); |
---|
339 | |
---|
340 | pgd_test_and_unpin(pgd); |
---|
341 | } |
---|
342 | |
---|
343 | pgd_t *pgd_alloc(struct mm_struct *mm) |
---|
344 | { |
---|
345 | int i; |
---|
346 | pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL); |
---|
347 | pmd_t **pmd; |
---|
348 | unsigned long flags; |
---|
349 | |
---|
350 | pgd_test_and_unpin(pgd); |
---|
351 | |
---|
352 | if (PTRS_PER_PMD == 1 || !pgd) |
---|
353 | return pgd; |
---|
354 | |
---|
355 | if (HAVE_SHARED_KERNEL_PMD) { |
---|
356 | for (i = 0; i < USER_PTRS_PER_PGD; ++i) { |
---|
357 | pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); |
---|
358 | if (!pmd) |
---|
359 | goto out_oom; |
---|
360 | set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); |
---|
361 | } |
---|
362 | return pgd; |
---|
363 | } |
---|
364 | |
---|
365 | /* |
---|
366 | * We can race save/restore (if we sleep during a GFP_KERNEL memory |
---|
367 | * allocation). We therefore store virtual addresses of pmds as they |
---|
368 | * do not change across save/restore, and poke the machine addresses |
---|
369 | * into the pgdir under the pgd_lock. |
---|
370 | */ |
---|
371 | pmd = kmalloc(PTRS_PER_PGD * sizeof(pmd_t *), GFP_KERNEL); |
---|
372 | if (!pmd) { |
---|
373 | kmem_cache_free(pgd_cache, pgd); |
---|
374 | return NULL; |
---|
375 | } |
---|
376 | |
---|
377 | /* Allocate pmds, remember virtual addresses. */ |
---|
378 | for (i = 0; i < PTRS_PER_PGD; ++i) { |
---|
379 | pmd[i] = kmem_cache_alloc(pmd_cache, GFP_KERNEL); |
---|
380 | if (!pmd[i]) |
---|
381 | goto out_oom; |
---|
382 | } |
---|
383 | |
---|
384 | spin_lock_irqsave(&pgd_lock, flags); |
---|
385 | |
---|
386 | /* Protect against save/restore: move below 4GB under pgd_lock. */ |
---|
387 | if (!xen_feature(XENFEAT_pae_pgdir_above_4gb)) { |
---|
388 | int rc = xen_create_contiguous_region( |
---|
389 | (unsigned long)pgd, 0, 32); |
---|
390 | if (rc) { |
---|
391 | spin_unlock_irqrestore(&pgd_lock, flags); |
---|
392 | goto out_oom; |
---|
393 | } |
---|
394 | } |
---|
395 | |
---|
396 | /* Copy kernel pmd contents and write-protect the new pmds. */ |
---|
397 | for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) { |
---|
398 | unsigned long v = (unsigned long)i << PGDIR_SHIFT; |
---|
399 | pgd_t *kpgd = pgd_offset_k(v); |
---|
400 | pud_t *kpud = pud_offset(kpgd, v); |
---|
401 | pmd_t *kpmd = pmd_offset(kpud, v); |
---|
402 | memcpy(pmd[i], kpmd, PAGE_SIZE); |
---|
403 | make_lowmem_page_readonly( |
---|
404 | pmd[i], XENFEAT_writable_page_tables); |
---|
405 | } |
---|
406 | |
---|
407 | /* It is safe to poke machine addresses of pmds under the pmd_lock. */ |
---|
408 | for (i = 0; i < PTRS_PER_PGD; i++) |
---|
409 | set_pgd(&pgd[i], __pgd(1 + __pa(pmd[i]))); |
---|
410 | |
---|
411 | /* Ensure this pgd gets picked up and pinned on save/restore. */ |
---|
412 | pgd_list_add(pgd); |
---|
413 | |
---|
414 | spin_unlock_irqrestore(&pgd_lock, flags); |
---|
415 | |
---|
416 | kfree(pmd); |
---|
417 | |
---|
418 | return pgd; |
---|
419 | |
---|
420 | out_oom: |
---|
421 | if (HAVE_SHARED_KERNEL_PMD) { |
---|
422 | for (i--; i >= 0; i--) |
---|
423 | kmem_cache_free(pmd_cache, |
---|
424 | (void *)__va(pgd_val(pgd[i])-1)); |
---|
425 | } else { |
---|
426 | for (i--; i >= 0; i--) |
---|
427 | kmem_cache_free(pmd_cache, pmd[i]); |
---|
428 | kfree(pmd); |
---|
429 | } |
---|
430 | kmem_cache_free(pgd_cache, pgd); |
---|
431 | return NULL; |
---|
432 | } |
---|
433 | |
---|
434 | void pgd_free(pgd_t *pgd) |
---|
435 | { |
---|
436 | int i; |
---|
437 | |
---|
438 | /* |
---|
439 | * After this the pgd should not be pinned for the duration of this |
---|
440 | * function's execution. We should never sleep and thus never race: |
---|
441 | * 1. User pmds will not become write-protected under our feet due |
---|
442 | * to a concurrent mm_pin_all(). |
---|
443 | * 2. The machine addresses in PGD entries will not become invalid |
---|
444 | * due to a concurrent save/restore. |
---|
445 | */ |
---|
446 | pgd_test_and_unpin(pgd); |
---|
447 | |
---|
448 | /* in the PAE case user pgd entries are overwritten before usage */ |
---|
449 | if (PTRS_PER_PMD > 1) { |
---|
450 | for (i = 0; i < USER_PTRS_PER_PGD; ++i) { |
---|
451 | pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1); |
---|
452 | kmem_cache_free(pmd_cache, pmd); |
---|
453 | } |
---|
454 | |
---|
455 | if (!HAVE_SHARED_KERNEL_PMD) { |
---|
456 | unsigned long flags; |
---|
457 | spin_lock_irqsave(&pgd_lock, flags); |
---|
458 | pgd_list_del(pgd); |
---|
459 | spin_unlock_irqrestore(&pgd_lock, flags); |
---|
460 | |
---|
461 | for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) { |
---|
462 | pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1); |
---|
463 | make_lowmem_page_writable( |
---|
464 | pmd, XENFEAT_writable_page_tables); |
---|
465 | memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t)); |
---|
466 | kmem_cache_free(pmd_cache, pmd); |
---|
467 | } |
---|
468 | |
---|
469 | if (!xen_feature(XENFEAT_pae_pgdir_above_4gb)) |
---|
470 | xen_destroy_contiguous_region( |
---|
471 | (unsigned long)pgd, 0); |
---|
472 | } |
---|
473 | } |
---|
474 | |
---|
475 | /* in the non-PAE case, free_pgtables() clears user pgd entries */ |
---|
476 | kmem_cache_free(pgd_cache, pgd); |
---|
477 | } |
---|
478 | |
---|
479 | void make_lowmem_page_readonly(void *va, unsigned int feature) |
---|
480 | { |
---|
481 | pte_t *pte; |
---|
482 | int rc; |
---|
483 | |
---|
484 | if (xen_feature(feature)) |
---|
485 | return; |
---|
486 | |
---|
487 | pte = virt_to_ptep(va); |
---|
488 | rc = HYPERVISOR_update_va_mapping( |
---|
489 | (unsigned long)va, pte_wrprotect(*pte), 0); |
---|
490 | BUG_ON(rc); |
---|
491 | } |
---|
492 | |
---|
493 | void make_lowmem_page_writable(void *va, unsigned int feature) |
---|
494 | { |
---|
495 | pte_t *pte; |
---|
496 | int rc; |
---|
497 | |
---|
498 | if (xen_feature(feature)) |
---|
499 | return; |
---|
500 | |
---|
501 | pte = virt_to_ptep(va); |
---|
502 | rc = HYPERVISOR_update_va_mapping( |
---|
503 | (unsigned long)va, pte_mkwrite(*pte), 0); |
---|
504 | BUG_ON(rc); |
---|
505 | } |
---|
506 | |
---|
507 | void make_page_readonly(void *va, unsigned int feature) |
---|
508 | { |
---|
509 | pte_t *pte; |
---|
510 | int rc; |
---|
511 | |
---|
512 | if (xen_feature(feature)) |
---|
513 | return; |
---|
514 | |
---|
515 | pte = virt_to_ptep(va); |
---|
516 | rc = HYPERVISOR_update_va_mapping( |
---|
517 | (unsigned long)va, pte_wrprotect(*pte), 0); |
---|
518 | if (rc) /* fallback? */ |
---|
519 | xen_l1_entry_update(pte, pte_wrprotect(*pte)); |
---|
520 | if ((unsigned long)va >= (unsigned long)high_memory) { |
---|
521 | unsigned long pfn = pte_pfn(*pte); |
---|
522 | #ifdef CONFIG_HIGHMEM |
---|
523 | if (pfn >= highstart_pfn) |
---|
524 | kmap_flush_unused(); /* flush stale writable kmaps */ |
---|
525 | else |
---|
526 | #endif |
---|
527 | make_lowmem_page_readonly( |
---|
528 | phys_to_virt(pfn << PAGE_SHIFT), feature); |
---|
529 | } |
---|
530 | } |
---|
531 | |
---|
532 | void make_page_writable(void *va, unsigned int feature) |
---|
533 | { |
---|
534 | pte_t *pte; |
---|
535 | int rc; |
---|
536 | |
---|
537 | if (xen_feature(feature)) |
---|
538 | return; |
---|
539 | |
---|
540 | pte = virt_to_ptep(va); |
---|
541 | rc = HYPERVISOR_update_va_mapping( |
---|
542 | (unsigned long)va, pte_mkwrite(*pte), 0); |
---|
543 | if (rc) /* fallback? */ |
---|
544 | xen_l1_entry_update(pte, pte_mkwrite(*pte)); |
---|
545 | if ((unsigned long)va >= (unsigned long)high_memory) { |
---|
546 | unsigned long pfn = pte_pfn(*pte); |
---|
547 | #ifdef CONFIG_HIGHMEM |
---|
548 | if (pfn < highstart_pfn) |
---|
549 | #endif |
---|
550 | make_lowmem_page_writable( |
---|
551 | phys_to_virt(pfn << PAGE_SHIFT), feature); |
---|
552 | } |
---|
553 | } |
---|
554 | |
---|
555 | void make_pages_readonly(void *va, unsigned int nr, unsigned int feature) |
---|
556 | { |
---|
557 | if (xen_feature(feature)) |
---|
558 | return; |
---|
559 | |
---|
560 | while (nr-- != 0) { |
---|
561 | make_page_readonly(va, feature); |
---|
562 | va = (void *)((unsigned long)va + PAGE_SIZE); |
---|
563 | } |
---|
564 | } |
---|
565 | |
---|
566 | void make_pages_writable(void *va, unsigned int nr, unsigned int feature) |
---|
567 | { |
---|
568 | if (xen_feature(feature)) |
---|
569 | return; |
---|
570 | |
---|
571 | while (nr-- != 0) { |
---|
572 | make_page_writable(va, feature); |
---|
573 | va = (void *)((unsigned long)va + PAGE_SIZE); |
---|
574 | } |
---|
575 | } |
---|
576 | |
---|
577 | static inline void pgd_walk_set_prot(struct page *page, pgprot_t flags) |
---|
578 | { |
---|
579 | unsigned long pfn = page_to_pfn(page); |
---|
580 | int rc; |
---|
581 | |
---|
582 | if (PageHighMem(page)) { |
---|
583 | if (pgprot_val(flags) & _PAGE_RW) |
---|
584 | clear_bit(PG_pinned, &page->flags); |
---|
585 | else |
---|
586 | set_bit(PG_pinned, &page->flags); |
---|
587 | } else { |
---|
588 | rc = HYPERVISOR_update_va_mapping( |
---|
589 | (unsigned long)__va(pfn << PAGE_SHIFT), |
---|
590 | pfn_pte(pfn, flags), 0); |
---|
591 | if (rc) |
---|
592 | BUG(); |
---|
593 | } |
---|
594 | } |
---|
595 | |
---|
596 | static void pgd_walk(pgd_t *pgd_base, pgprot_t flags) |
---|
597 | { |
---|
598 | pgd_t *pgd = pgd_base; |
---|
599 | pud_t *pud; |
---|
600 | pmd_t *pmd; |
---|
601 | int g, u, m, rc; |
---|
602 | |
---|
603 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
---|
604 | return; |
---|
605 | |
---|
606 | for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) { |
---|
607 | if (pgd_none(*pgd)) |
---|
608 | continue; |
---|
609 | pud = pud_offset(pgd, 0); |
---|
610 | if (PTRS_PER_PUD > 1) /* not folded */ |
---|
611 | pgd_walk_set_prot(virt_to_page(pud),flags); |
---|
612 | for (u = 0; u < PTRS_PER_PUD; u++, pud++) { |
---|
613 | if (pud_none(*pud)) |
---|
614 | continue; |
---|
615 | pmd = pmd_offset(pud, 0); |
---|
616 | if (PTRS_PER_PMD > 1) /* not folded */ |
---|
617 | pgd_walk_set_prot(virt_to_page(pmd),flags); |
---|
618 | for (m = 0; m < PTRS_PER_PMD; m++, pmd++) { |
---|
619 | if (pmd_none(*pmd)) |
---|
620 | continue; |
---|
621 | pgd_walk_set_prot(pmd_page(*pmd),flags); |
---|
622 | } |
---|
623 | } |
---|
624 | } |
---|
625 | |
---|
626 | rc = HYPERVISOR_update_va_mapping( |
---|
627 | (unsigned long)pgd_base, |
---|
628 | pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags), |
---|
629 | UVMF_TLB_FLUSH); |
---|
630 | if (rc) |
---|
631 | BUG(); |
---|
632 | } |
---|
633 | |
---|
634 | static void __pgd_pin(pgd_t *pgd) |
---|
635 | { |
---|
636 | pgd_walk(pgd, PAGE_KERNEL_RO); |
---|
637 | kmap_flush_unused(); |
---|
638 | xen_pgd_pin(__pa(pgd)); |
---|
639 | set_bit(PG_pinned, &virt_to_page(pgd)->flags); |
---|
640 | } |
---|
641 | |
---|
642 | static void __pgd_unpin(pgd_t *pgd) |
---|
643 | { |
---|
644 | xen_pgd_unpin(__pa(pgd)); |
---|
645 | pgd_walk(pgd, PAGE_KERNEL); |
---|
646 | clear_bit(PG_pinned, &virt_to_page(pgd)->flags); |
---|
647 | } |
---|
648 | |
---|
649 | static void pgd_test_and_unpin(pgd_t *pgd) |
---|
650 | { |
---|
651 | if (test_bit(PG_pinned, &virt_to_page(pgd)->flags)) |
---|
652 | __pgd_unpin(pgd); |
---|
653 | } |
---|
654 | |
---|
655 | void mm_pin(struct mm_struct *mm) |
---|
656 | { |
---|
657 | if (xen_feature(XENFEAT_writable_page_tables)) |
---|
658 | return; |
---|
659 | spin_lock(&mm->page_table_lock); |
---|
660 | __pgd_pin(mm->pgd); |
---|
661 | spin_unlock(&mm->page_table_lock); |
---|
662 | } |
---|
663 | |
---|
664 | void mm_unpin(struct mm_struct *mm) |
---|
665 | { |
---|
666 | if (xen_feature(XENFEAT_writable_page_tables)) |
---|
667 | return; |
---|
668 | spin_lock(&mm->page_table_lock); |
---|
669 | __pgd_unpin(mm->pgd); |
---|
670 | spin_unlock(&mm->page_table_lock); |
---|
671 | } |
---|
672 | |
---|
673 | void mm_pin_all(void) |
---|
674 | { |
---|
675 | struct page *page; |
---|
676 | unsigned long flags; |
---|
677 | |
---|
678 | if (xen_feature(XENFEAT_writable_page_tables)) |
---|
679 | return; |
---|
680 | |
---|
681 | /* |
---|
682 | * Allow uninterrupted access to the pgd_list. Also protects |
---|
683 | * __pgd_pin() by disabling preemption. |
---|
684 | * All other CPUs must be at a safe point (e.g., in stop_machine |
---|
685 | * or offlined entirely). |
---|
686 | */ |
---|
687 | spin_lock_irqsave(&pgd_lock, flags); |
---|
688 | for (page = pgd_list; page; page = (struct page *)page->index) { |
---|
689 | if (!test_bit(PG_pinned, &page->flags)) |
---|
690 | __pgd_pin((pgd_t *)page_address(page)); |
---|
691 | } |
---|
692 | spin_unlock_irqrestore(&pgd_lock, flags); |
---|
693 | } |
---|
694 | |
---|
695 | void _arch_dup_mmap(struct mm_struct *mm) |
---|
696 | { |
---|
697 | if (!test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags)) |
---|
698 | mm_pin(mm); |
---|
699 | } |
---|
700 | |
---|
701 | void _arch_exit_mmap(struct mm_struct *mm) |
---|
702 | { |
---|
703 | struct task_struct *tsk = current; |
---|
704 | |
---|
705 | task_lock(tsk); |
---|
706 | |
---|
707 | /* |
---|
708 | * We aggressively remove defunct pgd from cr3. We execute unmap_vmas() |
---|
709 | * *much* faster this way, as no tlb flushes means bigger wrpt batches. |
---|
710 | */ |
---|
711 | if (tsk->active_mm == mm) { |
---|
712 | tsk->active_mm = &init_mm; |
---|
713 | atomic_inc(&init_mm.mm_count); |
---|
714 | |
---|
715 | switch_mm(mm, &init_mm, tsk); |
---|
716 | |
---|
717 | atomic_dec(&mm->mm_count); |
---|
718 | BUG_ON(atomic_read(&mm->mm_count) == 0); |
---|
719 | } |
---|
720 | |
---|
721 | task_unlock(tsk); |
---|
722 | |
---|
723 | if (test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags) && |
---|
724 | (atomic_read(&mm->mm_count) == 1) && |
---|
725 | !mm->context.has_foreign_mappings) |
---|
726 | mm_unpin(mm); |
---|
727 | } |
---|