1 | #ifndef _X86_64_PAGE_H |
---|
2 | #define _X86_64_PAGE_H |
---|
3 | |
---|
4 | /* #include <linux/string.h> */ |
---|
5 | #ifndef __ASSEMBLY__ |
---|
6 | #include <linux/kernel.h> |
---|
7 | #include <linux/types.h> |
---|
8 | #include <asm/bug.h> |
---|
9 | #endif |
---|
10 | #include <xen/interface/xen.h> |
---|
11 | |
---|
12 | /* |
---|
13 | * Need to repeat this here in order to not include pgtable.h (which in turn |
---|
14 | * depends on definitions made here), but to be able to use the symbolic |
---|
15 | * below. The preprocessor will warn if the two definitions aren't identical. |
---|
16 | */ |
---|
17 | #define _PAGE_PRESENT 0x001 |
---|
18 | |
---|
19 | #define arch_free_page(_page,_order) \ |
---|
20 | ({ int foreign = PageForeign(_page); \ |
---|
21 | if (foreign) \ |
---|
22 | PageForeignDestructor(_page); \ |
---|
23 | foreign; \ |
---|
24 | }) |
---|
25 | #define HAVE_ARCH_FREE_PAGE |
---|
26 | |
---|
27 | /* PAGE_SHIFT determines the page size */ |
---|
28 | #define PAGE_SHIFT 12 |
---|
29 | #ifdef __ASSEMBLY__ |
---|
30 | #define PAGE_SIZE (0x1 << PAGE_SHIFT) |
---|
31 | #else |
---|
32 | #define PAGE_SIZE (1UL << PAGE_SHIFT) |
---|
33 | #endif |
---|
34 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
---|
35 | |
---|
36 | /* See Documentation/x86_64/mm.txt for a description of the memory map. */ |
---|
37 | #define __PHYSICAL_MASK_SHIFT 46 |
---|
38 | #define __PHYSICAL_MASK ((1UL << __PHYSICAL_MASK_SHIFT) - 1) |
---|
39 | #define __VIRTUAL_MASK_SHIFT 48 |
---|
40 | #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) |
---|
41 | |
---|
42 | #define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK) |
---|
43 | |
---|
44 | #define THREAD_ORDER 1 |
---|
45 | #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) |
---|
46 | #define CURRENT_MASK (~(THREAD_SIZE-1)) |
---|
47 | |
---|
48 | #define EXCEPTION_STACK_ORDER 0 |
---|
49 | #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) |
---|
50 | |
---|
51 | #define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1) |
---|
52 | #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) |
---|
53 | |
---|
54 | #define IRQSTACK_ORDER 2 |
---|
55 | #define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER) |
---|
56 | |
---|
57 | #define STACKFAULT_STACK 1 |
---|
58 | #define DOUBLEFAULT_STACK 2 |
---|
59 | #define NMI_STACK 3 |
---|
60 | #define DEBUG_STACK 4 |
---|
61 | #define MCE_STACK 5 |
---|
62 | #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ |
---|
63 | |
---|
64 | #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) |
---|
65 | #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT) |
---|
66 | |
---|
67 | #define HPAGE_SHIFT PMD_SHIFT |
---|
68 | #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) |
---|
69 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) |
---|
70 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) |
---|
71 | |
---|
72 | #ifdef __KERNEL__ |
---|
73 | #ifndef __ASSEMBLY__ |
---|
74 | |
---|
75 | extern unsigned long end_pfn; |
---|
76 | |
---|
77 | #include <asm/maddr.h> |
---|
78 | |
---|
79 | void clear_page(void *); |
---|
80 | void copy_page(void *, void *); |
---|
81 | |
---|
82 | #define clear_user_page(page, vaddr, pg) clear_page(page) |
---|
83 | #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) |
---|
84 | |
---|
85 | #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr) |
---|
86 | #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE |
---|
87 | |
---|
88 | /* |
---|
89 | * These are used to make use of C type-checking.. |
---|
90 | */ |
---|
91 | typedef struct { unsigned long pte; } pte_t; |
---|
92 | typedef struct { unsigned long pmd; } pmd_t; |
---|
93 | typedef struct { unsigned long pud; } pud_t; |
---|
94 | typedef struct { unsigned long pgd; } pgd_t; |
---|
95 | #define PTE_MASK PHYSICAL_PAGE_MASK |
---|
96 | |
---|
97 | typedef struct { unsigned long pgprot; } pgprot_t; |
---|
98 | |
---|
99 | #define pte_val(x) (((x).pte & _PAGE_PRESENT) ? \ |
---|
100 | pte_machine_to_phys((x).pte) : \ |
---|
101 | (x).pte) |
---|
102 | #define pte_val_ma(x) ((x).pte) |
---|
103 | |
---|
104 | static inline unsigned long pmd_val(pmd_t x) |
---|
105 | { |
---|
106 | unsigned long ret = x.pmd; |
---|
107 | #if CONFIG_XEN_COMPAT <= 0x030002 |
---|
108 | if (ret) ret = pte_machine_to_phys(ret) | _PAGE_PRESENT; |
---|
109 | #else |
---|
110 | if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret); |
---|
111 | #endif |
---|
112 | return ret; |
---|
113 | } |
---|
114 | |
---|
115 | static inline unsigned long pud_val(pud_t x) |
---|
116 | { |
---|
117 | unsigned long ret = x.pud; |
---|
118 | if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret); |
---|
119 | return ret; |
---|
120 | } |
---|
121 | |
---|
122 | static inline unsigned long pgd_val(pgd_t x) |
---|
123 | { |
---|
124 | unsigned long ret = x.pgd; |
---|
125 | if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret); |
---|
126 | return ret; |
---|
127 | } |
---|
128 | |
---|
129 | #define pgprot_val(x) ((x).pgprot) |
---|
130 | |
---|
131 | static inline pte_t __pte(unsigned long x) |
---|
132 | { |
---|
133 | if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x); |
---|
134 | return ((pte_t) { (x) }); |
---|
135 | } |
---|
136 | |
---|
137 | static inline pmd_t __pmd(unsigned long x) |
---|
138 | { |
---|
139 | if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x); |
---|
140 | return ((pmd_t) { (x) }); |
---|
141 | } |
---|
142 | |
---|
143 | static inline pud_t __pud(unsigned long x) |
---|
144 | { |
---|
145 | if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x); |
---|
146 | return ((pud_t) { (x) }); |
---|
147 | } |
---|
148 | |
---|
149 | static inline pgd_t __pgd(unsigned long x) |
---|
150 | { |
---|
151 | if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x); |
---|
152 | return ((pgd_t) { (x) }); |
---|
153 | } |
---|
154 | |
---|
155 | #define __pgprot(x) ((pgprot_t) { (x) } ) |
---|
156 | |
---|
157 | #define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START) |
---|
158 | #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) |
---|
159 | #define __START_KERNEL_map 0xffffffff80000000UL |
---|
160 | #define __PAGE_OFFSET 0xffff880000000000UL |
---|
161 | |
---|
162 | #else |
---|
163 | #define __PHYSICAL_START CONFIG_PHYSICAL_START |
---|
164 | #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) |
---|
165 | #define __START_KERNEL_map 0xffffffff80000000 |
---|
166 | #define __PAGE_OFFSET 0xffff880000000000 |
---|
167 | #endif /* !__ASSEMBLY__ */ |
---|
168 | |
---|
169 | #if CONFIG_XEN_COMPAT <= 0x030002 |
---|
170 | #undef LOAD_OFFSET |
---|
171 | #define LOAD_OFFSET 0 |
---|
172 | #endif |
---|
173 | |
---|
174 | /* to align the pointer to the (next) page boundary */ |
---|
175 | #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) |
---|
176 | |
---|
177 | #define KERNEL_TEXT_SIZE (40UL*1024*1024) |
---|
178 | #define KERNEL_TEXT_START 0xffffffff80000000UL |
---|
179 | |
---|
180 | #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) |
---|
181 | |
---|
182 | /* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol. |
---|
183 | Otherwise you risk miscompilation. */ |
---|
184 | #define __pa(x) (((unsigned long)(x)>=__START_KERNEL_map)?(unsigned long)(x) - (unsigned long)__START_KERNEL_map:(unsigned long)(x) - PAGE_OFFSET) |
---|
185 | /* __pa_symbol should be used for C visible symbols. |
---|
186 | This seems to be the official gcc blessed way to do such arithmetic. */ |
---|
187 | #define __pa_symbol(x) \ |
---|
188 | ({unsigned long v; \ |
---|
189 | asm("" : "=r" (v) : "0" (x)); \ |
---|
190 | __pa(v); }) |
---|
191 | |
---|
192 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) |
---|
193 | #define __boot_va(x) __va(x) |
---|
194 | #define __boot_pa(x) __pa(x) |
---|
195 | #ifdef CONFIG_FLATMEM |
---|
196 | #define pfn_valid(pfn) ((pfn) < end_pfn) |
---|
197 | #endif |
---|
198 | |
---|
199 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
---|
200 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
---|
201 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
---|
202 | |
---|
203 | #define VM_DATA_DEFAULT_FLAGS \ |
---|
204 | (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ |
---|
205 | VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
---|
206 | |
---|
207 | #define __HAVE_ARCH_GATE_AREA 1 |
---|
208 | |
---|
209 | #include <asm-generic/memory_model.h> |
---|
210 | #include <asm-generic/page.h> |
---|
211 | |
---|
212 | #endif /* __KERNEL__ */ |
---|
213 | |
---|
214 | #endif /* _X86_64_PAGE_H */ |
---|