source: trunk/packages/xen-3.1/xen-3.1/linux-2.6-xen-sparse/include/asm-i386/mach-xen/asm/pgtable.h @ 34

Last change on this file since 34 was 34, checked in by hartmans, 18 years ago

Add xen and xen-common

File size: 17.6 KB
Line 
1#ifndef _I386_PGTABLE_H
2#define _I386_PGTABLE_H
3
4#include <asm/hypervisor.h>
5
6/*
7 * The Linux memory management assumes a three-level page table setup. On
8 * the i386, we use that, but "fold" the mid level into the top-level page
9 * table, so that we physically have the same two-level page table as the
10 * i386 mmu expects.
11 *
12 * This file contains the functions and defines necessary to modify and use
13 * the i386 page table tree.
14 */
15#ifndef __ASSEMBLY__
16#include <asm/processor.h>
17#include <asm/fixmap.h>
18#include <linux/threads.h>
19
20#ifndef _I386_BITOPS_H
21#include <asm/bitops.h>
22#endif
23
24#include <linux/slab.h>
25#include <linux/list.h>
26#include <linux/spinlock.h>
27
28/* Is this pagetable pinned? */
29#define PG_pinned       PG_arch_1
30
31struct mm_struct;
32struct vm_area_struct;
33
34/*
35 * ZERO_PAGE is a global shared page that is always zero: used
36 * for zero-mapped memory areas etc..
37 */
38#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
39extern unsigned long empty_zero_page[1024];
40extern pgd_t *swapper_pg_dir;
41extern kmem_cache_t *pgd_cache;
42extern kmem_cache_t *pmd_cache;
43extern spinlock_t pgd_lock;
44extern struct page *pgd_list;
45
46void pmd_ctor(void *, kmem_cache_t *, unsigned long);
47void pgd_ctor(void *, kmem_cache_t *, unsigned long);
48void pgd_dtor(void *, kmem_cache_t *, unsigned long);
49void pgtable_cache_init(void);
50void paging_init(void);
51
52/*
53 * The Linux x86 paging architecture is 'compile-time dual-mode', it
54 * implements both the traditional 2-level x86 page tables and the
55 * newer 3-level PAE-mode page tables.
56 */
57#ifdef CONFIG_X86_PAE
58# include <asm/pgtable-3level-defs.h>
59# define PMD_SIZE       (1UL << PMD_SHIFT)
60# define PMD_MASK       (~(PMD_SIZE-1))
61#else
62# include <asm/pgtable-2level-defs.h>
63#endif
64
65#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
66#define PGDIR_MASK      (~(PGDIR_SIZE-1))
67
68#define USER_PTRS_PER_PGD       (TASK_SIZE/PGDIR_SIZE)
69#define FIRST_USER_ADDRESS      0
70
71#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
72#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
73
74#define TWOLEVEL_PGDIR_SHIFT    22
75#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
76#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
77
78/* Just any arbitrary offset to the start of the vmalloc VM area: the
79 * current 8MB value just means that there will be a 8MB "hole" after the
80 * physical memory until the kernel virtual memory starts.  That means that
81 * any out-of-bounds memory accesses will hopefully be caught.
82 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
83 * area for the same reason. ;)
84 */
85#define VMALLOC_OFFSET  (8*1024*1024)
86#define VMALLOC_START   (((unsigned long) high_memory + vmalloc_earlyreserve + \
87                        2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
88#ifdef CONFIG_HIGHMEM
89# define VMALLOC_END    (PKMAP_BASE-2*PAGE_SIZE)
90#else
91# define VMALLOC_END    (FIXADDR_START-2*PAGE_SIZE)
92#endif
93
94/*
95 * _PAGE_PSE set in the page directory entry just means that
96 * the page directory entry points directly to a 4MB-aligned block of
97 * memory.
98 */
99#define _PAGE_BIT_PRESENT       0
100#define _PAGE_BIT_RW            1
101#define _PAGE_BIT_USER          2
102#define _PAGE_BIT_PWT           3
103#define _PAGE_BIT_PCD           4
104#define _PAGE_BIT_ACCESSED      5
105#define _PAGE_BIT_DIRTY         6
106#define _PAGE_BIT_PSE           7       /* 4 MB (or 2MB) page, Pentium+, if present.. */
107#define _PAGE_BIT_GLOBAL        8       /* Global TLB entry PPro+ */
108#define _PAGE_BIT_UNUSED1       9       /* available for programmer */
109#define _PAGE_BIT_UNUSED2       10
110#define _PAGE_BIT_UNUSED3       11
111#define _PAGE_BIT_NX            63
112
113#define _PAGE_PRESENT   0x001
114#define _PAGE_RW        0x002
115#define _PAGE_USER      0x004
116#define _PAGE_PWT       0x008
117#define _PAGE_PCD       0x010
118#define _PAGE_ACCESSED  0x020
119#define _PAGE_DIRTY     0x040
120#define _PAGE_PSE       0x080   /* 4 MB (or 2MB) page, Pentium+, if present.. */
121#define _PAGE_GLOBAL    0x100   /* Global TLB entry PPro+ */
122#define _PAGE_UNUSED1   0x200   /* available for programmer */
123#define _PAGE_UNUSED2   0x400
124#define _PAGE_UNUSED3   0x800
125
126/* If _PAGE_PRESENT is clear, we use these: */
127#define _PAGE_FILE      0x040   /* nonlinear file mapping, saved PTE; unset:swap */
128#define _PAGE_PROTNONE  0x080   /* if the user mapped it with PROT_NONE;
129                                   pte_present gives true */
130#ifdef CONFIG_X86_PAE
131#define _PAGE_NX        (1ULL<<_PAGE_BIT_NX)
132#else
133#define _PAGE_NX        0
134#endif
135
136#define _PAGE_TABLE     (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
137#define _KERNPG_TABLE   (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
138#define _PAGE_CHG_MASK  (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
139
140#define PAGE_NONE \
141        __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
142#define PAGE_SHARED \
143        __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
144
145#define PAGE_SHARED_EXEC \
146        __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
147#define PAGE_COPY_NOEXEC \
148        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
149#define PAGE_COPY_EXEC \
150        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
151#define PAGE_COPY \
152        PAGE_COPY_NOEXEC
153#define PAGE_READONLY \
154        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
155#define PAGE_READONLY_EXEC \
156        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
157
158#define _PAGE_KERNEL \
159        (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
160#define _PAGE_KERNEL_EXEC \
161        (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
162
163extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
164#define __PAGE_KERNEL_RO                (__PAGE_KERNEL & ~_PAGE_RW)
165#define __PAGE_KERNEL_NOCACHE           (__PAGE_KERNEL | _PAGE_PCD)
166#define __PAGE_KERNEL_LARGE             (__PAGE_KERNEL | _PAGE_PSE)
167#define __PAGE_KERNEL_LARGE_EXEC        (__PAGE_KERNEL_EXEC | _PAGE_PSE)
168
169#define PAGE_KERNEL             __pgprot(__PAGE_KERNEL)
170#define PAGE_KERNEL_RO          __pgprot(__PAGE_KERNEL_RO)
171#define PAGE_KERNEL_EXEC        __pgprot(__PAGE_KERNEL_EXEC)
172#define PAGE_KERNEL_NOCACHE     __pgprot(__PAGE_KERNEL_NOCACHE)
173#define PAGE_KERNEL_LARGE       __pgprot(__PAGE_KERNEL_LARGE)
174#define PAGE_KERNEL_LARGE_EXEC  __pgprot(__PAGE_KERNEL_LARGE_EXEC)
175
176/*
177 * The i386 can't do page protection for execute, and considers that
178 * the same are read. Also, write permissions imply read permissions.
179 * This is the closest we can get..
180 */
181#define __P000  PAGE_NONE
182#define __P001  PAGE_READONLY
183#define __P010  PAGE_COPY
184#define __P011  PAGE_COPY
185#define __P100  PAGE_READONLY_EXEC
186#define __P101  PAGE_READONLY_EXEC
187#define __P110  PAGE_COPY_EXEC
188#define __P111  PAGE_COPY_EXEC
189
190#define __S000  PAGE_NONE
191#define __S001  PAGE_READONLY
192#define __S010  PAGE_SHARED
193#define __S011  PAGE_SHARED
194#define __S100  PAGE_READONLY_EXEC
195#define __S101  PAGE_READONLY_EXEC
196#define __S110  PAGE_SHARED_EXEC
197#define __S111  PAGE_SHARED_EXEC
198
199/*
200 * Define this if things work differently on an i386 and an i486:
201 * it will (on an i486) warn about kernel memory accesses that are
202 * done without a 'access_ok(VERIFY_WRITE,..)'
203 */
204#undef TEST_ACCESS_OK
205
206/* The boot page tables (all created as a single array) */
207extern unsigned long pg0[];
208
209#define pte_present(x)  ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
210
211/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
212#define pmd_none(x)     (!(unsigned long)pmd_val(x))
213#if CONFIG_XEN_COMPAT <= 0x030002
214/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
215   can temporarily clear it. */
216#define pmd_present(x)  (pmd_val(x))
217#else
218#define pmd_present(x)  (pmd_val(x) & _PAGE_PRESENT)
219#endif
220#define pmd_bad(x)      ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
221
222
223#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
224
225/*
226 * The following only work if pte_present() is true.
227 * Undefined behaviour if not..
228 */
229static inline int pte_user(pte_t pte)           { return (pte).pte_low & _PAGE_USER; }
230static inline int pte_read(pte_t pte)           { return (pte).pte_low & _PAGE_USER; }
231static inline int pte_dirty(pte_t pte)          { return (pte).pte_low & _PAGE_DIRTY; }
232static inline int pte_young(pte_t pte)          { return (pte).pte_low & _PAGE_ACCESSED; }
233static inline int pte_write(pte_t pte)          { return (pte).pte_low & _PAGE_RW; }
234static inline int pte_huge(pte_t pte)           { return (pte).pte_low & _PAGE_PSE; }
235
236/*
237 * The following only works if pte_present() is not true.
238 */
239static inline int pte_file(pte_t pte)           { return (pte).pte_low & _PAGE_FILE; }
240
241static inline pte_t pte_rdprotect(pte_t pte)    { (pte).pte_low &= ~_PAGE_USER; return pte; }
242static inline pte_t pte_exprotect(pte_t pte)    { (pte).pte_low &= ~_PAGE_USER; return pte; }
243static inline pte_t pte_mkclean(pte_t pte)      { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
244static inline pte_t pte_mkold(pte_t pte)        { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
245static inline pte_t pte_wrprotect(pte_t pte)    { (pte).pte_low &= ~_PAGE_RW; return pte; }
246static inline pte_t pte_mkread(pte_t pte)       { (pte).pte_low |= _PAGE_USER; return pte; }
247static inline pte_t pte_mkexec(pte_t pte)       { (pte).pte_low |= _PAGE_USER; return pte; }
248static inline pte_t pte_mkdirty(pte_t pte)      { (pte).pte_low |= _PAGE_DIRTY; return pte; }
249static inline pte_t pte_mkyoung(pte_t pte)      { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
250static inline pte_t pte_mkwrite(pte_t pte)      { (pte).pte_low |= _PAGE_RW; return pte; }
251static inline pte_t pte_mkhuge(pte_t pte)       { (pte).pte_low |= _PAGE_PSE; return pte; }
252
253#ifdef CONFIG_X86_PAE
254# include <asm/pgtable-3level.h>
255#else
256# include <asm/pgtable-2level.h>
257#endif
258
259#define ptep_test_and_clear_dirty(vma, addr, ptep)                      \
260({                                                                      \
261        pte_t __pte = *(ptep);                                          \
262        int __ret = pte_dirty(__pte);                                   \
263        if (__ret) {                                                    \
264                __pte = pte_mkclean(__pte);                             \
265                if ((vma)->vm_mm != current->mm ||                      \
266                    HYPERVISOR_update_va_mapping(addr, __pte, 0))       \
267                        (ptep)->pte_low = __pte.pte_low;                \
268        }                                                               \
269        __ret;                                                          \
270})
271
272#define ptep_test_and_clear_young(vma, addr, ptep)                      \
273({                                                                      \
274        pte_t __pte = *(ptep);                                          \
275        int __ret = pte_young(__pte);                                   \
276        if (__ret)                                                      \
277                __pte = pte_mkold(__pte);                               \
278                if ((vma)->vm_mm != current->mm ||                      \
279                    HYPERVISOR_update_va_mapping(addr, __pte, 0))       \
280                        (ptep)->pte_low = __pte.pte_low;                \
281        __ret;                                                          \
282})
283
284#define ptep_get_and_clear_full(mm, addr, ptep, full)                   \
285        ((full) ? ({                                                    \
286                pte_t __res = *(ptep);                                  \
287                if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) \
288                        xen_l1_entry_update(ptep, __pte(0));            \
289                else                                                    \
290                        *(ptep) = __pte(0);                             \
291                __res;                                                  \
292         }) :                                                           \
293         ptep_get_and_clear(mm, addr, ptep))
294
295static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
296{
297        pte_t pte = *ptep;
298        if (pte_write(pte))
299                set_pte_at(mm, addr, ptep, pte_wrprotect(pte));
300}
301
302/*
303 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
304 *
305 *  dst - pointer to pgd range anwhere on a pgd page
306 *  src - ""
307 *  count - the number of pgds to copy.
308 *
309 * dst and src can be on the same page, but the range must not overlap,
310 * and must not cross a page boundary.
311 */
312static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
313{
314       memcpy(dst, src, count * sizeof(pgd_t));
315}
316
317/*
318 * Macro to mark a page protection value as "uncacheable".  On processors which do not support
319 * it, this is a no-op.
320 */
321#define pgprot_noncached(prot)  ((boot_cpu_data.x86 > 3)                                          \
322                                 ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
323
324/*
325 * Conversion functions: convert a page and protection to a page entry,
326 * and a page entry and page directory to the page they refer to.
327 */
328
329#define mk_pte(page, pgprot)    pfn_pte(page_to_pfn(page), (pgprot))
330
331static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
332{
333        /*
334         * Since this might change the present bit (which controls whether
335         * a pte_t object has undergone p2m translation), we must use
336         * pte_val() on the input pte and __pte() for the return value.
337         */
338        paddr_t pteval = pte_val(pte);
339
340        pteval &= _PAGE_CHG_MASK;
341        pteval |= pgprot_val(newprot);
342#ifdef CONFIG_X86_PAE
343        pteval &= __supported_pte_mask;
344#endif
345        return __pte(pteval);
346}
347
348#define pmd_large(pmd) \
349((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
350
351/*
352 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
353 *
354 * this macro returns the index of the entry in the pgd page which would
355 * control the given virtual address
356 */
357#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
358#define pgd_index_k(addr) pgd_index(addr)
359
360/*
361 * pgd_offset() returns a (pgd_t *)
362 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
363 */
364#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
365
366/*
367 * a shortcut which implies the use of the kernel's pgd, instead
368 * of a process's
369 */
370#define pgd_offset_k(address) pgd_offset(&init_mm, address)
371
372/*
373 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
374 *
375 * this macro returns the index of the entry in the pmd page which would
376 * control the given virtual address
377 */
378#define pmd_index(address) \
379                (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
380
381/*
382 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
383 *
384 * this macro returns the index of the entry in the pte page which would
385 * control the given virtual address
386 */
387#define pte_index(address) \
388                (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
389#define pte_offset_kernel(dir, address) \
390        ((pte_t *) pmd_page_kernel(*(dir)) +  pte_index(address))
391
392#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
393
394#define pmd_page_kernel(pmd) \
395                ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
396
397/*
398 * Helper function that returns the kernel pagetable entry controlling
399 * the virtual address 'address'. NULL means no pagetable entry present.
400 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
401 * as a pte too.
402 */
403extern pte_t *lookup_address(unsigned long address);
404
405/*
406 * Make a given kernel text page executable/non-executable.
407 * Returns the previous executability setting of that page (which
408 * is used to restore the previous state). Used by the SMP bootup code.
409 * NOTE: this is an __init function for security reasons.
410 */
411#ifdef CONFIG_X86_PAE
412 extern int set_kernel_exec(unsigned long vaddr, int enable);
413#else
414 static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
415#endif
416
417extern void noexec_setup(const char *str);
418
419#if defined(CONFIG_HIGHPTE)
420#define pte_offset_map(dir, address) \
421        ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + \
422         pte_index(address))
423#define pte_offset_map_nested(dir, address) \
424        ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + \
425         pte_index(address))
426#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
427#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
428#else
429#define pte_offset_map(dir, address) \
430        ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
431#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
432#define pte_unmap(pte) do { } while (0)
433#define pte_unmap_nested(pte) do { } while (0)
434#endif
435
436#define __HAVE_ARCH_PTEP_ESTABLISH
437#define ptep_establish(vma, address, ptep, pteval)                      \
438        do {                                                            \
439                if ( likely((vma)->vm_mm == current->mm) ) {            \
440                        BUG_ON(HYPERVISOR_update_va_mapping(address,    \
441                                pteval,                                 \
442                                (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
443                                        UVMF_INVLPG|UVMF_MULTI));       \
444                } else {                                                \
445                        xen_l1_entry_update(ptep, pteval);              \
446                        flush_tlb_page(vma, address);                   \
447                }                                                       \
448        } while (0)
449
450/*
451 * The i386 doesn't have any external MMU info: the kernel page
452 * tables contain all the necessary information.
453 *
454 * Also, we only update the dirty/accessed state if we set
455 * the dirty bit by hand in the kernel, since the hardware
456 * will do the accessed bit for us, and we don't want to
457 * race with other CPU's that might be updating the dirty
458 * bit at the same time.
459 */
460#define update_mmu_cache(vma,address,pte) do { } while (0)
461#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
462#define ptep_set_access_flags(vma, address, ptep, entry, dirty)         \
463        do {                                                            \
464                if (dirty)                                              \
465                        ptep_establish(vma, address, ptep, entry);      \
466        } while (0)
467
468#include <xen/features.h>
469void make_lowmem_page_readonly(void *va, unsigned int feature);
470void make_lowmem_page_writable(void *va, unsigned int feature);
471void make_page_readonly(void *va, unsigned int feature);
472void make_page_writable(void *va, unsigned int feature);
473void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
474void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
475
476#define virt_to_ptep(__va)                                              \
477({                                                                      \
478        pgd_t *__pgd = pgd_offset_k((unsigned long)(__va));             \
479        pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va));        \
480        pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va));        \
481        pte_offset_kernel(__pmd, (unsigned long)(__va));                \
482})
483
484#define arbitrary_virt_to_machine(__va)                                 \
485({                                                                      \
486        maddr_t m = (maddr_t)pte_mfn(*virt_to_ptep(__va)) << PAGE_SHIFT;\
487        m | ((unsigned long)(__va) & (PAGE_SIZE-1));                    \
488})
489
490#endif /* !__ASSEMBLY__ */
491
492#ifdef CONFIG_FLATMEM
493#define kern_addr_valid(addr)   (1)
494#endif /* CONFIG_FLATMEM */
495
496int direct_remap_pfn_range(struct vm_area_struct *vma,
497                           unsigned long address, 
498                           unsigned long mfn,
499                           unsigned long size, 
500                           pgprot_t prot,
501                           domid_t  domid);
502int direct_kernel_remap_pfn_range(unsigned long address, 
503                                  unsigned long mfn,
504                                  unsigned long size, 
505                                  pgprot_t prot,
506                                  domid_t  domid);
507int create_lookup_pte_addr(struct mm_struct *mm,
508                           unsigned long address,
509                           uint64_t *ptep);
510int touch_pte_range(struct mm_struct *mm,
511                    unsigned long address,
512                    unsigned long size);
513
514#define io_remap_pfn_range(vma,from,pfn,size,prot) \
515direct_remap_pfn_range(vma,from,pfn,size,prot,DOMID_IO)
516
517#define MK_IOSPACE_PFN(space, pfn)      (pfn)
518#define GET_IOSPACE(pfn)                0
519#define GET_PFN(pfn)                    (pfn)
520
521#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
522#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
523#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
524#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
525#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
526#define __HAVE_ARCH_PTEP_SET_WRPROTECT
527#define __HAVE_ARCH_PTE_SAME
528#include <asm-generic/pgtable.h>
529
530#endif /* _I386_PGTABLE_H */
Note: See TracBrowser for help on using the repository browser.