1 | /****************************************************************************** |
---|
2 | * include/asm-ia64/shadow.h |
---|
3 | * |
---|
4 | * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp> |
---|
5 | * VA Linux Systems Japan K.K. |
---|
6 | * |
---|
7 | * This program is free software; you can redistribute it and/or modify |
---|
8 | * it under the terms of the GNU General Public License as published by |
---|
9 | * the Free Software Foundation; either version 2 of the License, or |
---|
10 | * (at your option) any later version. |
---|
11 | * |
---|
12 | * This program is distributed in the hope that it will be useful, |
---|
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
15 | * GNU General Public License for more details. |
---|
16 | * |
---|
17 | * You should have received a copy of the GNU General Public License |
---|
18 | * along with this program; if not, write to the Free Software |
---|
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
---|
20 | * |
---|
21 | */ |
---|
22 | |
---|
23 | //#include <linux/kernel.h> |
---|
24 | #include <linux/spinlock.h> |
---|
25 | #include <linux/bootmem.h> |
---|
26 | #include <linux/module.h> |
---|
27 | #include <linux/vmalloc.h> |
---|
28 | #include <linux/efi.h> |
---|
29 | #include <asm/page.h> |
---|
30 | #include <asm/pgalloc.h> |
---|
31 | #include <asm/meminit.h> |
---|
32 | #include <asm/hypervisor.h> |
---|
33 | #include <asm/hypercall.h> |
---|
34 | #include <xen/interface/memory.h> |
---|
35 | #include <xen/balloon.h> |
---|
36 | |
---|
37 | shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)XSI_BASE; |
---|
38 | EXPORT_SYMBOL(HYPERVISOR_shared_info); |
---|
39 | |
---|
40 | start_info_t *xen_start_info; |
---|
41 | EXPORT_SYMBOL(xen_start_info); |
---|
42 | |
---|
43 | int running_on_xen; |
---|
44 | EXPORT_SYMBOL(running_on_xen); |
---|
45 | |
---|
46 | #ifdef CONFIG_XEN_IA64_EXPOSE_P2M |
---|
47 | static int p2m_expose_init(void); |
---|
48 | #else |
---|
49 | #define p2m_expose_init() (-ENOSYS) |
---|
50 | #endif |
---|
51 | |
---|
52 | EXPORT_SYMBOL(__hypercall); |
---|
53 | |
---|
54 | //XXX same as i386, x86_64 contiguous_bitmap_set(), contiguous_bitmap_clear() |
---|
55 | // move those to lib/contiguous_bitmap? |
---|
56 | //XXX discontigmem/sparsemem |
---|
57 | |
---|
58 | /* |
---|
59 | * Bitmap is indexed by page number. If bit is set, the page is part of a |
---|
60 | * xen_create_contiguous_region() area of memory. |
---|
61 | */ |
---|
62 | unsigned long *contiguous_bitmap; |
---|
63 | |
---|
64 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
---|
65 | /* Following logic is stolen from create_mem_map_table() for virtual memmap */ |
---|
66 | static int |
---|
67 | create_contiguous_bitmap(u64 start, u64 end, void *arg) |
---|
68 | { |
---|
69 | unsigned long address, start_page, end_page; |
---|
70 | unsigned long bitmap_start, bitmap_end; |
---|
71 | unsigned char *bitmap; |
---|
72 | int node; |
---|
73 | pgd_t *pgd; |
---|
74 | pud_t *pud; |
---|
75 | pmd_t *pmd; |
---|
76 | pte_t *pte; |
---|
77 | |
---|
78 | bitmap_start = (unsigned long)contiguous_bitmap + |
---|
79 | ((__pa(start) >> PAGE_SHIFT) >> 3); |
---|
80 | bitmap_end = (unsigned long)contiguous_bitmap + |
---|
81 | (((__pa(end) >> PAGE_SHIFT) + 2 * BITS_PER_LONG) >> 3); |
---|
82 | |
---|
83 | start_page = bitmap_start & PAGE_MASK; |
---|
84 | end_page = PAGE_ALIGN(bitmap_end); |
---|
85 | node = paddr_to_nid(__pa(start)); |
---|
86 | |
---|
87 | bitmap = alloc_bootmem_pages_node(NODE_DATA(node), |
---|
88 | end_page - start_page); |
---|
89 | BUG_ON(!bitmap); |
---|
90 | memset(bitmap, 0, end_page - start_page); |
---|
91 | |
---|
92 | for (address = start_page; address < end_page; address += PAGE_SIZE) { |
---|
93 | pgd = pgd_offset_k(address); |
---|
94 | if (pgd_none(*pgd)) |
---|
95 | pgd_populate(&init_mm, pgd, |
---|
96 | alloc_bootmem_pages_node(NODE_DATA(node), |
---|
97 | PAGE_SIZE)); |
---|
98 | pud = pud_offset(pgd, address); |
---|
99 | |
---|
100 | if (pud_none(*pud)) |
---|
101 | pud_populate(&init_mm, pud, |
---|
102 | alloc_bootmem_pages_node(NODE_DATA(node), |
---|
103 | PAGE_SIZE)); |
---|
104 | pmd = pmd_offset(pud, address); |
---|
105 | |
---|
106 | if (pmd_none(*pmd)) |
---|
107 | pmd_populate_kernel(&init_mm, pmd, |
---|
108 | alloc_bootmem_pages_node |
---|
109 | (NODE_DATA(node), PAGE_SIZE)); |
---|
110 | pte = pte_offset_kernel(pmd, address); |
---|
111 | |
---|
112 | if (pte_none(*pte)) |
---|
113 | set_pte(pte, |
---|
114 | pfn_pte(__pa(bitmap + (address - start_page)) |
---|
115 | >> PAGE_SHIFT, PAGE_KERNEL)); |
---|
116 | } |
---|
117 | return 0; |
---|
118 | } |
---|
119 | #endif |
---|
120 | |
---|
121 | static void |
---|
122 | __contiguous_bitmap_init(unsigned long size) |
---|
123 | { |
---|
124 | contiguous_bitmap = alloc_bootmem_pages(size); |
---|
125 | BUG_ON(!contiguous_bitmap); |
---|
126 | memset(contiguous_bitmap, 0, size); |
---|
127 | } |
---|
128 | |
---|
129 | void |
---|
130 | contiguous_bitmap_init(unsigned long end_pfn) |
---|
131 | { |
---|
132 | unsigned long size = (end_pfn + 2 * BITS_PER_LONG) >> 3; |
---|
133 | #ifndef CONFIG_VIRTUAL_MEM_MAP |
---|
134 | __contiguous_bitmap_init(size); |
---|
135 | #else |
---|
136 | unsigned long max_gap = 0; |
---|
137 | |
---|
138 | efi_memmap_walk(find_largest_hole, (u64*)&max_gap); |
---|
139 | if (max_gap < LARGE_GAP) { |
---|
140 | __contiguous_bitmap_init(size); |
---|
141 | } else { |
---|
142 | unsigned long map_size = PAGE_ALIGN(size); |
---|
143 | vmalloc_end -= map_size; |
---|
144 | contiguous_bitmap = (unsigned long*)vmalloc_end; |
---|
145 | efi_memmap_walk(create_contiguous_bitmap, NULL); |
---|
146 | } |
---|
147 | #endif |
---|
148 | } |
---|
149 | |
---|
150 | #if 0 |
---|
151 | int |
---|
152 | contiguous_bitmap_test(void* p) |
---|
153 | { |
---|
154 | return test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap); |
---|
155 | } |
---|
156 | #endif |
---|
157 | |
---|
158 | static void contiguous_bitmap_set( |
---|
159 | unsigned long first_page, unsigned long nr_pages) |
---|
160 | { |
---|
161 | unsigned long start_off, end_off, curr_idx, end_idx; |
---|
162 | |
---|
163 | curr_idx = first_page / BITS_PER_LONG; |
---|
164 | start_off = first_page & (BITS_PER_LONG-1); |
---|
165 | end_idx = (first_page + nr_pages) / BITS_PER_LONG; |
---|
166 | end_off = (first_page + nr_pages) & (BITS_PER_LONG-1); |
---|
167 | |
---|
168 | if (curr_idx == end_idx) { |
---|
169 | contiguous_bitmap[curr_idx] |= |
---|
170 | ((1UL<<end_off)-1) & -(1UL<<start_off); |
---|
171 | } else { |
---|
172 | contiguous_bitmap[curr_idx] |= -(1UL<<start_off); |
---|
173 | while ( ++curr_idx < end_idx ) |
---|
174 | contiguous_bitmap[curr_idx] = ~0UL; |
---|
175 | contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1; |
---|
176 | } |
---|
177 | } |
---|
178 | |
---|
179 | static void contiguous_bitmap_clear( |
---|
180 | unsigned long first_page, unsigned long nr_pages) |
---|
181 | { |
---|
182 | unsigned long start_off, end_off, curr_idx, end_idx; |
---|
183 | |
---|
184 | curr_idx = first_page / BITS_PER_LONG; |
---|
185 | start_off = first_page & (BITS_PER_LONG-1); |
---|
186 | end_idx = (first_page + nr_pages) / BITS_PER_LONG; |
---|
187 | end_off = (first_page + nr_pages) & (BITS_PER_LONG-1); |
---|
188 | |
---|
189 | if (curr_idx == end_idx) { |
---|
190 | contiguous_bitmap[curr_idx] &= |
---|
191 | -(1UL<<end_off) | ((1UL<<start_off)-1); |
---|
192 | } else { |
---|
193 | contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1; |
---|
194 | while ( ++curr_idx != end_idx ) |
---|
195 | contiguous_bitmap[curr_idx] = 0; |
---|
196 | contiguous_bitmap[curr_idx] &= -(1UL<<end_off); |
---|
197 | } |
---|
198 | } |
---|
199 | |
---|
200 | // __xen_create_contiguous_region(), __xen_destroy_contiguous_region() |
---|
201 | // are based on i386 xen_create_contiguous_region(), |
---|
202 | // xen_destroy_contiguous_region() |
---|
203 | |
---|
204 | /* Protected by balloon_lock. */ |
---|
205 | #define MAX_CONTIG_ORDER 7 |
---|
206 | static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER]; |
---|
207 | |
---|
208 | /* Ensure multi-page extents are contiguous in machine memory. */ |
---|
209 | int |
---|
210 | __xen_create_contiguous_region(unsigned long vstart, |
---|
211 | unsigned int order, unsigned int address_bits) |
---|
212 | { |
---|
213 | unsigned long error = 0; |
---|
214 | unsigned long gphys = __pa(vstart); |
---|
215 | unsigned long start_gpfn = gphys >> PAGE_SHIFT; |
---|
216 | unsigned long num_gpfn = 1 << order; |
---|
217 | unsigned long i; |
---|
218 | unsigned long flags; |
---|
219 | |
---|
220 | unsigned long *in_frames = discontig_frames, out_frame; |
---|
221 | int success; |
---|
222 | struct xen_memory_exchange exchange = { |
---|
223 | .in = { |
---|
224 | .nr_extents = num_gpfn, |
---|
225 | .extent_order = 0, |
---|
226 | .domid = DOMID_SELF |
---|
227 | }, |
---|
228 | .out = { |
---|
229 | .nr_extents = 1, |
---|
230 | .extent_order = order, |
---|
231 | .address_bits = address_bits, |
---|
232 | .domid = DOMID_SELF |
---|
233 | }, |
---|
234 | .nr_exchanged = 0 |
---|
235 | }; |
---|
236 | |
---|
237 | if (unlikely(order > MAX_CONTIG_ORDER)) |
---|
238 | return -ENOMEM; |
---|
239 | |
---|
240 | set_xen_guest_handle(exchange.in.extent_start, in_frames); |
---|
241 | set_xen_guest_handle(exchange.out.extent_start, &out_frame); |
---|
242 | |
---|
243 | scrub_pages(vstart, num_gpfn); |
---|
244 | |
---|
245 | balloon_lock(flags); |
---|
246 | |
---|
247 | /* Get a new contiguous memory extent. */ |
---|
248 | for (i = 0; i < num_gpfn; i++) { |
---|
249 | in_frames[i] = start_gpfn + i; |
---|
250 | } |
---|
251 | out_frame = start_gpfn; |
---|
252 | error = HYPERVISOR_memory_op(XENMEM_exchange, &exchange); |
---|
253 | success = (exchange.nr_exchanged == num_gpfn); |
---|
254 | BUG_ON(!success && ((exchange.nr_exchanged != 0) || (error == 0))); |
---|
255 | BUG_ON(success && (error != 0)); |
---|
256 | if (unlikely(error == -ENOSYS)) { |
---|
257 | /* Compatibility when XENMEM_exchange is unsupported. */ |
---|
258 | error = HYPERVISOR_memory_op(XENMEM_decrease_reservation, |
---|
259 | &exchange.in); |
---|
260 | BUG_ON(error != num_gpfn); |
---|
261 | error = HYPERVISOR_memory_op(XENMEM_populate_physmap, |
---|
262 | &exchange.out); |
---|
263 | if (error != 1) { |
---|
264 | /* Couldn't get special memory: fall back to normal. */ |
---|
265 | for (i = 0; i < num_gpfn; i++) { |
---|
266 | in_frames[i] = start_gpfn + i; |
---|
267 | } |
---|
268 | error = HYPERVISOR_memory_op(XENMEM_populate_physmap, |
---|
269 | &exchange.in); |
---|
270 | BUG_ON(error != num_gpfn); |
---|
271 | success = 0; |
---|
272 | } else |
---|
273 | success = 1; |
---|
274 | } |
---|
275 | if (success) |
---|
276 | contiguous_bitmap_set(start_gpfn, num_gpfn); |
---|
277 | #if 0 |
---|
278 | if (success) { |
---|
279 | unsigned long mfn; |
---|
280 | unsigned long mfn_prev = ~0UL; |
---|
281 | for (i = 0; i < num_gpfn; i++) { |
---|
282 | mfn = pfn_to_mfn_for_dma(start_gpfn + i); |
---|
283 | if (mfn_prev != ~0UL && mfn != mfn_prev + 1) { |
---|
284 | xprintk("\n"); |
---|
285 | xprintk("%s:%d order %d " |
---|
286 | "start 0x%lx bus 0x%lx " |
---|
287 | "machine 0x%lx\n", |
---|
288 | __func__, __LINE__, order, |
---|
289 | vstart, virt_to_bus((void*)vstart), |
---|
290 | phys_to_machine_for_dma(gphys)); |
---|
291 | xprintk("mfn: "); |
---|
292 | for (i = 0; i < num_gpfn; i++) { |
---|
293 | mfn = pfn_to_mfn_for_dma( |
---|
294 | start_gpfn + i); |
---|
295 | xprintk("0x%lx ", mfn); |
---|
296 | } |
---|
297 | xprintk("\n"); |
---|
298 | break; |
---|
299 | } |
---|
300 | mfn_prev = mfn; |
---|
301 | } |
---|
302 | } |
---|
303 | #endif |
---|
304 | balloon_unlock(flags); |
---|
305 | return success? 0: -ENOMEM; |
---|
306 | } |
---|
307 | |
---|
308 | void |
---|
309 | __xen_destroy_contiguous_region(unsigned long vstart, unsigned int order) |
---|
310 | { |
---|
311 | unsigned long flags; |
---|
312 | unsigned long error = 0; |
---|
313 | unsigned long start_gpfn = __pa(vstart) >> PAGE_SHIFT; |
---|
314 | unsigned long num_gpfn = 1UL << order; |
---|
315 | unsigned long i; |
---|
316 | |
---|
317 | unsigned long *out_frames = discontig_frames, in_frame; |
---|
318 | int success; |
---|
319 | struct xen_memory_exchange exchange = { |
---|
320 | .in = { |
---|
321 | .nr_extents = 1, |
---|
322 | .extent_order = order, |
---|
323 | .domid = DOMID_SELF |
---|
324 | }, |
---|
325 | .out = { |
---|
326 | .nr_extents = num_gpfn, |
---|
327 | .extent_order = 0, |
---|
328 | .address_bits = 0, |
---|
329 | .domid = DOMID_SELF |
---|
330 | }, |
---|
331 | .nr_exchanged = 0 |
---|
332 | }; |
---|
333 | |
---|
334 | |
---|
335 | if (!test_bit(start_gpfn, contiguous_bitmap)) |
---|
336 | return; |
---|
337 | |
---|
338 | if (unlikely(order > MAX_CONTIG_ORDER)) |
---|
339 | return; |
---|
340 | |
---|
341 | set_xen_guest_handle(exchange.in.extent_start, &in_frame); |
---|
342 | set_xen_guest_handle(exchange.out.extent_start, out_frames); |
---|
343 | |
---|
344 | scrub_pages(vstart, num_gpfn); |
---|
345 | |
---|
346 | balloon_lock(flags); |
---|
347 | |
---|
348 | contiguous_bitmap_clear(start_gpfn, num_gpfn); |
---|
349 | |
---|
350 | /* Do the exchange for non-contiguous MFNs. */ |
---|
351 | in_frame = start_gpfn; |
---|
352 | for (i = 0; i < num_gpfn; i++) { |
---|
353 | out_frames[i] = start_gpfn + i; |
---|
354 | } |
---|
355 | error = HYPERVISOR_memory_op(XENMEM_exchange, &exchange); |
---|
356 | success = (exchange.nr_exchanged == 1); |
---|
357 | BUG_ON(!success && ((exchange.nr_exchanged != 0) || (error == 0))); |
---|
358 | BUG_ON(success && (error != 0)); |
---|
359 | if (unlikely(error == -ENOSYS)) { |
---|
360 | /* Compatibility when XENMEM_exchange is unsupported. */ |
---|
361 | error = HYPERVISOR_memory_op(XENMEM_decrease_reservation, |
---|
362 | &exchange.in); |
---|
363 | BUG_ON(error != 1); |
---|
364 | |
---|
365 | error = HYPERVISOR_memory_op(XENMEM_populate_physmap, |
---|
366 | &exchange.out); |
---|
367 | BUG_ON(error != num_gpfn); |
---|
368 | } |
---|
369 | balloon_unlock(flags); |
---|
370 | } |
---|
371 | |
---|
372 | |
---|
373 | /////////////////////////////////////////////////////////////////////////// |
---|
374 | // grant table hack |
---|
375 | // cmd: GNTTABOP_xxx |
---|
376 | |
---|
377 | #include <linux/mm.h> |
---|
378 | #include <xen/interface/xen.h> |
---|
379 | #include <xen/gnttab.h> |
---|
380 | |
---|
381 | static void |
---|
382 | gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop) |
---|
383 | { |
---|
384 | uint32_t flags; |
---|
385 | |
---|
386 | flags = uop->flags; |
---|
387 | |
---|
388 | if (flags & GNTMAP_host_map) { |
---|
389 | if (flags & GNTMAP_application_map) { |
---|
390 | xprintd("GNTMAP_application_map is not supported yet: flags 0x%x\n", flags); |
---|
391 | BUG(); |
---|
392 | } |
---|
393 | if (flags & GNTMAP_contains_pte) { |
---|
394 | xprintd("GNTMAP_contains_pte is not supported yet flags 0x%x\n", flags); |
---|
395 | BUG(); |
---|
396 | } |
---|
397 | } else if (flags & GNTMAP_device_map) { |
---|
398 | xprintd("GNTMAP_device_map is not supported yet 0x%x\n", flags); |
---|
399 | BUG();//XXX not yet. actually this flag is not used. |
---|
400 | } else { |
---|
401 | BUG(); |
---|
402 | } |
---|
403 | } |
---|
404 | |
---|
405 | int |
---|
406 | HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count) |
---|
407 | { |
---|
408 | if (cmd == GNTTABOP_map_grant_ref) { |
---|
409 | unsigned int i; |
---|
410 | for (i = 0; i < count; i++) { |
---|
411 | gnttab_map_grant_ref_pre( |
---|
412 | (struct gnttab_map_grant_ref*)uop + i); |
---|
413 | } |
---|
414 | } |
---|
415 | return xencomm_mini_hypercall_grant_table_op(cmd, uop, count); |
---|
416 | } |
---|
417 | EXPORT_SYMBOL(HYPERVISOR_grant_table_op); |
---|
418 | |
---|
419 | /////////////////////////////////////////////////////////////////////////// |
---|
420 | // foreign mapping |
---|
421 | #include <linux/efi.h> |
---|
422 | #include <asm/meminit.h> // for IA64_GRANULE_SIZE, GRANULEROUND{UP,DOWN}() |
---|
423 | |
---|
424 | static unsigned long privcmd_resource_min = 0; |
---|
425 | // Xen/ia64 currently can handle pseudo physical address bits up to |
---|
426 | // (PAGE_SHIFT * 3) |
---|
427 | static unsigned long privcmd_resource_max = GRANULEROUNDDOWN((1UL << (PAGE_SHIFT * 3)) - 1); |
---|
428 | static unsigned long privcmd_resource_align = IA64_GRANULE_SIZE; |
---|
429 | |
---|
430 | static unsigned long |
---|
431 | md_end_addr(const efi_memory_desc_t *md) |
---|
432 | { |
---|
433 | return md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); |
---|
434 | } |
---|
435 | |
---|
436 | #define XEN_IA64_PRIVCMD_LEAST_GAP_SIZE (1024 * 1024 * 1024UL) |
---|
437 | static int |
---|
438 | xen_ia64_privcmd_check_size(unsigned long start, unsigned long end) |
---|
439 | { |
---|
440 | return (start < end && |
---|
441 | (end - start) > XEN_IA64_PRIVCMD_LEAST_GAP_SIZE); |
---|
442 | } |
---|
443 | |
---|
444 | static int __init |
---|
445 | xen_ia64_privcmd_init(void) |
---|
446 | { |
---|
447 | void *efi_map_start, *efi_map_end, *p; |
---|
448 | u64 efi_desc_size; |
---|
449 | efi_memory_desc_t *md; |
---|
450 | unsigned long tmp_min; |
---|
451 | unsigned long tmp_max; |
---|
452 | unsigned long gap_size; |
---|
453 | unsigned long prev_end; |
---|
454 | |
---|
455 | if (!is_running_on_xen()) |
---|
456 | return -1; |
---|
457 | |
---|
458 | efi_map_start = __va(ia64_boot_param->efi_memmap); |
---|
459 | efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size; |
---|
460 | efi_desc_size = ia64_boot_param->efi_memdesc_size; |
---|
461 | |
---|
462 | // at first check the used highest address |
---|
463 | for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) { |
---|
464 | // nothing |
---|
465 | } |
---|
466 | md = p - efi_desc_size; |
---|
467 | privcmd_resource_min = GRANULEROUNDUP(md_end_addr(md)); |
---|
468 | if (xen_ia64_privcmd_check_size(privcmd_resource_min, |
---|
469 | privcmd_resource_max)) { |
---|
470 | goto out; |
---|
471 | } |
---|
472 | |
---|
473 | // the used highest address is too large. try to find the largest gap. |
---|
474 | tmp_min = privcmd_resource_max; |
---|
475 | tmp_max = 0; |
---|
476 | gap_size = 0; |
---|
477 | prev_end = 0; |
---|
478 | for (p = efi_map_start; |
---|
479 | p < efi_map_end - efi_desc_size; |
---|
480 | p += efi_desc_size) { |
---|
481 | unsigned long end; |
---|
482 | efi_memory_desc_t* next; |
---|
483 | unsigned long next_start; |
---|
484 | |
---|
485 | md = p; |
---|
486 | end = md_end_addr(md); |
---|
487 | if (end > privcmd_resource_max) { |
---|
488 | break; |
---|
489 | } |
---|
490 | if (end < prev_end) { |
---|
491 | // work around. |
---|
492 | // Xen may pass incompletely sorted memory |
---|
493 | // descriptors like |
---|
494 | // [x, x + length] |
---|
495 | // [x, x] |
---|
496 | // this order should be reversed. |
---|
497 | continue; |
---|
498 | } |
---|
499 | next = p + efi_desc_size; |
---|
500 | next_start = next->phys_addr; |
---|
501 | if (next_start > privcmd_resource_max) { |
---|
502 | next_start = privcmd_resource_max; |
---|
503 | } |
---|
504 | if (end < next_start && gap_size < (next_start - end)) { |
---|
505 | tmp_min = end; |
---|
506 | tmp_max = next_start; |
---|
507 | gap_size = tmp_max - tmp_min; |
---|
508 | } |
---|
509 | prev_end = end; |
---|
510 | } |
---|
511 | |
---|
512 | privcmd_resource_min = GRANULEROUNDUP(tmp_min); |
---|
513 | if (xen_ia64_privcmd_check_size(privcmd_resource_min, tmp_max)) { |
---|
514 | privcmd_resource_max = tmp_max; |
---|
515 | goto out; |
---|
516 | } |
---|
517 | |
---|
518 | privcmd_resource_min = tmp_min; |
---|
519 | privcmd_resource_max = tmp_max; |
---|
520 | if (!xen_ia64_privcmd_check_size(privcmd_resource_min, |
---|
521 | privcmd_resource_max)) { |
---|
522 | // Any large enough gap isn't found. |
---|
523 | // go ahead anyway with the warning hoping that large region |
---|
524 | // won't be requested. |
---|
525 | printk(KERN_WARNING "xen privcmd: large enough region for privcmd mmap is not found.\n"); |
---|
526 | } |
---|
527 | |
---|
528 | out: |
---|
529 | printk(KERN_INFO "xen privcmd uses pseudo physical addr range [0x%lx, 0x%lx] (%ldMB)\n", |
---|
530 | privcmd_resource_min, privcmd_resource_max, |
---|
531 | (privcmd_resource_max - privcmd_resource_min) >> 20); |
---|
532 | BUG_ON(privcmd_resource_min >= privcmd_resource_max); |
---|
533 | |
---|
534 | // XXX this should be somewhere appropriate |
---|
535 | (void)p2m_expose_init(); |
---|
536 | |
---|
537 | return 0; |
---|
538 | } |
---|
539 | late_initcall(xen_ia64_privcmd_init); |
---|
540 | |
---|
541 | struct xen_ia64_privcmd_entry { |
---|
542 | atomic_t map_count; |
---|
543 | #define INVALID_GPFN (~0UL) |
---|
544 | unsigned long gpfn; |
---|
545 | }; |
---|
546 | |
---|
547 | struct xen_ia64_privcmd_range { |
---|
548 | atomic_t ref_count; |
---|
549 | unsigned long pgoff; // in PAGE_SIZE |
---|
550 | struct resource* res; |
---|
551 | |
---|
552 | unsigned long num_entries; |
---|
553 | struct xen_ia64_privcmd_entry entries[0]; |
---|
554 | }; |
---|
555 | |
---|
556 | struct xen_ia64_privcmd_vma { |
---|
557 | int is_privcmd_mmapped; |
---|
558 | struct xen_ia64_privcmd_range* range; |
---|
559 | |
---|
560 | unsigned long num_entries; |
---|
561 | struct xen_ia64_privcmd_entry* entries; |
---|
562 | }; |
---|
563 | |
---|
564 | static void |
---|
565 | xen_ia64_privcmd_init_entry(struct xen_ia64_privcmd_entry* entry) |
---|
566 | { |
---|
567 | atomic_set(&entry->map_count, 0); |
---|
568 | entry->gpfn = INVALID_GPFN; |
---|
569 | } |
---|
570 | |
---|
571 | static int |
---|
572 | xen_ia64_privcmd_entry_mmap(struct vm_area_struct* vma, |
---|
573 | unsigned long addr, |
---|
574 | struct xen_ia64_privcmd_range* privcmd_range, |
---|
575 | int i, |
---|
576 | unsigned long gmfn, |
---|
577 | pgprot_t prot, |
---|
578 | domid_t domid) |
---|
579 | { |
---|
580 | int error = 0; |
---|
581 | struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i]; |
---|
582 | unsigned long gpfn; |
---|
583 | unsigned long flags; |
---|
584 | |
---|
585 | if ((addr & ~PAGE_MASK) != 0 || gmfn == INVALID_MFN) { |
---|
586 | error = -EINVAL; |
---|
587 | goto out; |
---|
588 | } |
---|
589 | |
---|
590 | if (entry->gpfn != INVALID_GPFN) { |
---|
591 | error = -EBUSY; |
---|
592 | goto out; |
---|
593 | } |
---|
594 | gpfn = (privcmd_range->res->start >> PAGE_SHIFT) + i; |
---|
595 | |
---|
596 | flags = ASSIGN_writable; |
---|
597 | if (pgprot_val(prot) == PROT_READ) { |
---|
598 | flags = ASSIGN_readonly; |
---|
599 | } |
---|
600 | error = HYPERVISOR_add_physmap_with_gmfn(gpfn, gmfn, flags, domid); |
---|
601 | if (error != 0) { |
---|
602 | goto out; |
---|
603 | } |
---|
604 | |
---|
605 | prot = vma->vm_page_prot; |
---|
606 | error = remap_pfn_range(vma, addr, gpfn, 1 << PAGE_SHIFT, prot); |
---|
607 | if (error != 0) { |
---|
608 | error = HYPERVISOR_zap_physmap(gpfn, 0); |
---|
609 | if (error) { |
---|
610 | BUG();//XXX |
---|
611 | } |
---|
612 | } else { |
---|
613 | atomic_inc(&entry->map_count); |
---|
614 | entry->gpfn = gpfn; |
---|
615 | } |
---|
616 | |
---|
617 | out: |
---|
618 | return error; |
---|
619 | } |
---|
620 | |
---|
621 | static void |
---|
622 | xen_ia64_privcmd_entry_munmap(struct xen_ia64_privcmd_range* privcmd_range, |
---|
623 | int i) |
---|
624 | { |
---|
625 | struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i]; |
---|
626 | unsigned long gpfn = entry->gpfn; |
---|
627 | //gpfn = (privcmd_range->res->start >> PAGE_SHIFT) + |
---|
628 | // (vma->vm_pgoff - privcmd_range->pgoff); |
---|
629 | int error; |
---|
630 | |
---|
631 | error = HYPERVISOR_zap_physmap(gpfn, 0); |
---|
632 | if (error) { |
---|
633 | BUG();//XXX |
---|
634 | } |
---|
635 | entry->gpfn = INVALID_GPFN; |
---|
636 | } |
---|
637 | |
---|
638 | static void |
---|
639 | xen_ia64_privcmd_entry_open(struct xen_ia64_privcmd_range* privcmd_range, |
---|
640 | int i) |
---|
641 | { |
---|
642 | struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i]; |
---|
643 | if (entry->gpfn != INVALID_GPFN) { |
---|
644 | atomic_inc(&entry->map_count); |
---|
645 | } else { |
---|
646 | BUG_ON(atomic_read(&entry->map_count) != 0); |
---|
647 | } |
---|
648 | } |
---|
649 | |
---|
650 | static void |
---|
651 | xen_ia64_privcmd_entry_close(struct xen_ia64_privcmd_range* privcmd_range, |
---|
652 | int i) |
---|
653 | { |
---|
654 | struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i]; |
---|
655 | if (entry->gpfn != INVALID_GPFN && |
---|
656 | atomic_dec_and_test(&entry->map_count)) { |
---|
657 | xen_ia64_privcmd_entry_munmap(privcmd_range, i); |
---|
658 | } |
---|
659 | } |
---|
660 | |
---|
661 | static void xen_ia64_privcmd_vma_open(struct vm_area_struct* vma); |
---|
662 | static void xen_ia64_privcmd_vma_close(struct vm_area_struct* vma); |
---|
663 | |
---|
664 | struct vm_operations_struct xen_ia64_privcmd_vm_ops = { |
---|
665 | .open = &xen_ia64_privcmd_vma_open, |
---|
666 | .close = &xen_ia64_privcmd_vma_close, |
---|
667 | }; |
---|
668 | |
---|
669 | static void |
---|
670 | __xen_ia64_privcmd_vma_open(struct vm_area_struct* vma, |
---|
671 | struct xen_ia64_privcmd_vma* privcmd_vma, |
---|
672 | struct xen_ia64_privcmd_range* privcmd_range) |
---|
673 | { |
---|
674 | unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff; |
---|
675 | unsigned long num_entries = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
---|
676 | unsigned long i; |
---|
677 | |
---|
678 | BUG_ON(entry_offset < 0); |
---|
679 | BUG_ON(entry_offset + num_entries > privcmd_range->num_entries); |
---|
680 | |
---|
681 | privcmd_vma->range = privcmd_range; |
---|
682 | privcmd_vma->num_entries = num_entries; |
---|
683 | privcmd_vma->entries = &privcmd_range->entries[entry_offset]; |
---|
684 | vma->vm_private_data = privcmd_vma; |
---|
685 | for (i = 0; i < privcmd_vma->num_entries; i++) { |
---|
686 | xen_ia64_privcmd_entry_open(privcmd_range, entry_offset + i); |
---|
687 | } |
---|
688 | |
---|
689 | vma->vm_private_data = privcmd_vma; |
---|
690 | vma->vm_ops = &xen_ia64_privcmd_vm_ops; |
---|
691 | } |
---|
692 | |
---|
693 | static void |
---|
694 | xen_ia64_privcmd_vma_open(struct vm_area_struct* vma) |
---|
695 | { |
---|
696 | struct xen_ia64_privcmd_vma* old_privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data; |
---|
697 | struct xen_ia64_privcmd_vma* privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data; |
---|
698 | struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range; |
---|
699 | |
---|
700 | atomic_inc(&privcmd_range->ref_count); |
---|
701 | // vm_op->open() can't fail. |
---|
702 | privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL | __GFP_NOFAIL); |
---|
703 | // copy original value if necessary |
---|
704 | privcmd_vma->is_privcmd_mmapped = old_privcmd_vma->is_privcmd_mmapped; |
---|
705 | |
---|
706 | __xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range); |
---|
707 | } |
---|
708 | |
---|
709 | static void |
---|
710 | xen_ia64_privcmd_vma_close(struct vm_area_struct* vma) |
---|
711 | { |
---|
712 | struct xen_ia64_privcmd_vma* privcmd_vma = |
---|
713 | (struct xen_ia64_privcmd_vma*)vma->vm_private_data; |
---|
714 | struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range; |
---|
715 | unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff; |
---|
716 | unsigned long i; |
---|
717 | |
---|
718 | for (i = 0; i < privcmd_vma->num_entries; i++) { |
---|
719 | xen_ia64_privcmd_entry_close(privcmd_range, entry_offset + i); |
---|
720 | } |
---|
721 | vma->vm_private_data = NULL; |
---|
722 | kfree(privcmd_vma); |
---|
723 | |
---|
724 | if (atomic_dec_and_test(&privcmd_range->ref_count)) { |
---|
725 | #if 1 |
---|
726 | for (i = 0; i < privcmd_range->num_entries; i++) { |
---|
727 | struct xen_ia64_privcmd_entry* entry = |
---|
728 | &privcmd_range->entries[i]; |
---|
729 | BUG_ON(atomic_read(&entry->map_count) != 0); |
---|
730 | BUG_ON(entry->gpfn != INVALID_GPFN); |
---|
731 | } |
---|
732 | #endif |
---|
733 | release_resource(privcmd_range->res); |
---|
734 | kfree(privcmd_range->res); |
---|
735 | vfree(privcmd_range); |
---|
736 | } |
---|
737 | } |
---|
738 | |
---|
739 | int |
---|
740 | privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma) |
---|
741 | { |
---|
742 | struct xen_ia64_privcmd_vma* privcmd_vma = |
---|
743 | (struct xen_ia64_privcmd_vma *)vma->vm_private_data; |
---|
744 | return (xchg(&privcmd_vma->is_privcmd_mmapped, 1) == 0); |
---|
745 | } |
---|
746 | |
---|
747 | int |
---|
748 | privcmd_mmap(struct file * file, struct vm_area_struct * vma) |
---|
749 | { |
---|
750 | int error; |
---|
751 | unsigned long size = vma->vm_end - vma->vm_start; |
---|
752 | unsigned long num_entries = size >> PAGE_SHIFT; |
---|
753 | struct xen_ia64_privcmd_range* privcmd_range = NULL; |
---|
754 | struct xen_ia64_privcmd_vma* privcmd_vma = NULL; |
---|
755 | struct resource* res = NULL; |
---|
756 | unsigned long i; |
---|
757 | BUG_ON(!is_running_on_xen()); |
---|
758 | |
---|
759 | BUG_ON(file->private_data != NULL); |
---|
760 | |
---|
761 | error = -ENOMEM; |
---|
762 | privcmd_range = |
---|
763 | vmalloc(sizeof(*privcmd_range) + |
---|
764 | sizeof(privcmd_range->entries[0]) * num_entries); |
---|
765 | if (privcmd_range == NULL) { |
---|
766 | goto out_enomem0; |
---|
767 | } |
---|
768 | privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL); |
---|
769 | if (privcmd_vma == NULL) { |
---|
770 | goto out_enomem1; |
---|
771 | } |
---|
772 | privcmd_vma->is_privcmd_mmapped = 0; |
---|
773 | |
---|
774 | res = kzalloc(sizeof(*res), GFP_KERNEL); |
---|
775 | if (res == NULL) { |
---|
776 | goto out_enomem1; |
---|
777 | } |
---|
778 | res->name = "Xen privcmd mmap"; |
---|
779 | error = allocate_resource(&iomem_resource, res, size, |
---|
780 | privcmd_resource_min, privcmd_resource_max, |
---|
781 | privcmd_resource_align, NULL, NULL); |
---|
782 | if (error) { |
---|
783 | goto out_enomem1; |
---|
784 | } |
---|
785 | privcmd_range->res = res; |
---|
786 | |
---|
787 | /* DONTCOPY is essential for Xen as copy_page_range is broken. */ |
---|
788 | vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP; |
---|
789 | |
---|
790 | atomic_set(&privcmd_range->ref_count, 1); |
---|
791 | privcmd_range->pgoff = vma->vm_pgoff; |
---|
792 | privcmd_range->num_entries = num_entries; |
---|
793 | for (i = 0; i < privcmd_range->num_entries; i++) { |
---|
794 | xen_ia64_privcmd_init_entry(&privcmd_range->entries[i]); |
---|
795 | } |
---|
796 | |
---|
797 | __xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range); |
---|
798 | return 0; |
---|
799 | |
---|
800 | out_enomem1: |
---|
801 | kfree(res); |
---|
802 | kfree(privcmd_vma); |
---|
803 | out_enomem0: |
---|
804 | vfree(privcmd_range); |
---|
805 | return error; |
---|
806 | } |
---|
807 | |
---|
808 | int |
---|
809 | direct_remap_pfn_range(struct vm_area_struct *vma, |
---|
810 | unsigned long address, // process virtual address |
---|
811 | unsigned long gmfn, // gmfn, gmfn + 1, ... gmfn + size/PAGE_SIZE |
---|
812 | unsigned long size, |
---|
813 | pgprot_t prot, |
---|
814 | domid_t domid) // target domain |
---|
815 | { |
---|
816 | struct xen_ia64_privcmd_vma* privcmd_vma = |
---|
817 | (struct xen_ia64_privcmd_vma*)vma->vm_private_data; |
---|
818 | struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range; |
---|
819 | unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff; |
---|
820 | |
---|
821 | unsigned long i; |
---|
822 | unsigned long offset; |
---|
823 | int error = 0; |
---|
824 | BUG_ON(!is_running_on_xen()); |
---|
825 | |
---|
826 | #if 0 |
---|
827 | if (prot != vm->vm_page_prot) { |
---|
828 | return -EINVAL; |
---|
829 | } |
---|
830 | #endif |
---|
831 | |
---|
832 | i = (address - vma->vm_start) >> PAGE_SHIFT; |
---|
833 | for (offset = 0; offset < size; offset += PAGE_SIZE) { |
---|
834 | error = xen_ia64_privcmd_entry_mmap(vma, (address + offset) & PAGE_MASK, privcmd_range, entry_offset + i, gmfn, prot, domid); |
---|
835 | if (error != 0) { |
---|
836 | break; |
---|
837 | } |
---|
838 | |
---|
839 | i++; |
---|
840 | gmfn++; |
---|
841 | } |
---|
842 | |
---|
843 | return error; |
---|
844 | } |
---|
845 | |
---|
846 | |
---|
847 | /* Called after suspend, to resume time. */ |
---|
848 | void |
---|
849 | time_resume(void) |
---|
850 | { |
---|
851 | extern void ia64_cpu_local_tick(void); |
---|
852 | |
---|
853 | /* Just trigger a tick. */ |
---|
854 | ia64_cpu_local_tick(); |
---|
855 | |
---|
856 | /* Time interpolator remembers the last timer status. Forget it */ |
---|
857 | time_interpolator_reset(); |
---|
858 | } |
---|
859 | |
---|
860 | /////////////////////////////////////////////////////////////////////////// |
---|
861 | // expose p2m table |
---|
862 | #ifdef CONFIG_XEN_IA64_EXPOSE_P2M |
---|
863 | #include <linux/cpu.h> |
---|
864 | #include <asm/uaccess.h> |
---|
865 | |
---|
866 | int p2m_initialized __read_mostly = 0; |
---|
867 | |
---|
868 | unsigned long p2m_min_low_pfn __read_mostly; |
---|
869 | unsigned long p2m_max_low_pfn __read_mostly; |
---|
870 | unsigned long p2m_convert_min_pfn __read_mostly; |
---|
871 | unsigned long p2m_convert_max_pfn __read_mostly; |
---|
872 | |
---|
873 | static struct resource p2m_resource = { |
---|
874 | .name = "Xen p2m table", |
---|
875 | .flags = IORESOURCE_MEM, |
---|
876 | }; |
---|
877 | static unsigned long p2m_assign_start_pfn __read_mostly; |
---|
878 | static unsigned long p2m_assign_end_pfn __read_mostly; |
---|
879 | volatile const pte_t* p2m_pte __read_mostly; |
---|
880 | |
---|
881 | #define GRNULE_PFN PTRS_PER_PTE |
---|
882 | static unsigned long p2m_granule_pfn __read_mostly = GRNULE_PFN; |
---|
883 | |
---|
884 | #define ROUNDDOWN(x, y) ((x) & ~((y) - 1)) |
---|
885 | #define ROUNDUP(x, y) (((x) + (y) - 1) & ~((y) - 1)) |
---|
886 | |
---|
887 | #define P2M_PREFIX "Xen p2m: " |
---|
888 | |
---|
889 | static int xen_ia64_p2m_expose __read_mostly = 1; |
---|
890 | module_param(xen_ia64_p2m_expose, int, 0); |
---|
891 | MODULE_PARM_DESC(xen_ia64_p2m_expose, |
---|
892 | "enable/disable xen/ia64 p2m exposure optimization\n"); |
---|
893 | |
---|
894 | #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR |
---|
895 | static int xen_ia64_p2m_expose_use_dtr __read_mostly = 1; |
---|
896 | module_param(xen_ia64_p2m_expose_use_dtr, int, 0); |
---|
897 | MODULE_PARM_DESC(xen_ia64_p2m_expose_use_dtr, |
---|
898 | "use/unuse dtr to map exposed p2m table\n"); |
---|
899 | |
---|
900 | static const int p2m_page_shifts[] = { |
---|
901 | _PAGE_SIZE_4K, |
---|
902 | _PAGE_SIZE_8K, |
---|
903 | _PAGE_SIZE_16K, |
---|
904 | _PAGE_SIZE_64K, |
---|
905 | _PAGE_SIZE_256K, |
---|
906 | _PAGE_SIZE_1M, |
---|
907 | _PAGE_SIZE_4M, |
---|
908 | _PAGE_SIZE_16M, |
---|
909 | _PAGE_SIZE_64M, |
---|
910 | _PAGE_SIZE_256M, |
---|
911 | }; |
---|
912 | |
---|
913 | struct p2m_itr_arg { |
---|
914 | unsigned long vaddr; |
---|
915 | unsigned long pteval; |
---|
916 | unsigned long log_page_size; |
---|
917 | }; |
---|
918 | static struct p2m_itr_arg p2m_itr_arg __read_mostly; |
---|
919 | |
---|
920 | // This should be in asm-ia64/kregs.h |
---|
921 | #define IA64_TR_P2M_TABLE 3 |
---|
922 | |
---|
923 | static void |
---|
924 | p2m_itr(void* info) |
---|
925 | { |
---|
926 | struct p2m_itr_arg* arg = (struct p2m_itr_arg*)info; |
---|
927 | ia64_itr(0x2, IA64_TR_P2M_TABLE, |
---|
928 | arg->vaddr, arg->pteval, arg->log_page_size); |
---|
929 | ia64_srlz_d(); |
---|
930 | } |
---|
931 | |
---|
932 | static int |
---|
933 | p2m_expose_dtr_call(struct notifier_block *self, |
---|
934 | unsigned long event, void* ptr) |
---|
935 | { |
---|
936 | unsigned int cpu = (unsigned int)(long)ptr; |
---|
937 | if (event != CPU_ONLINE) |
---|
938 | return 0; |
---|
939 | if (!(p2m_initialized && xen_ia64_p2m_expose_use_dtr)) |
---|
940 | smp_call_function_single(cpu, &p2m_itr, &p2m_itr_arg, 1, 1); |
---|
941 | return 0; |
---|
942 | } |
---|
943 | |
---|
944 | static struct notifier_block p2m_expose_dtr_hotplug_notifier = { |
---|
945 | .notifier_call = p2m_expose_dtr_call, |
---|
946 | .next = NULL, |
---|
947 | .priority = 0 |
---|
948 | }; |
---|
949 | #endif |
---|
950 | |
---|
951 | static int |
---|
952 | p2m_expose_init(void) |
---|
953 | { |
---|
954 | unsigned long num_pfn; |
---|
955 | unsigned long size = 0; |
---|
956 | unsigned long p2m_size = 0; |
---|
957 | unsigned long align = ~0UL; |
---|
958 | int error = 0; |
---|
959 | #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR |
---|
960 | int i; |
---|
961 | unsigned long page_size; |
---|
962 | unsigned long log_page_size = 0; |
---|
963 | #endif |
---|
964 | |
---|
965 | if (!xen_ia64_p2m_expose) |
---|
966 | return -ENOSYS; |
---|
967 | if (p2m_initialized) |
---|
968 | return 0; |
---|
969 | |
---|
970 | #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR |
---|
971 | error = register_cpu_notifier(&p2m_expose_dtr_hotplug_notifier); |
---|
972 | if (error < 0) |
---|
973 | return error; |
---|
974 | #endif |
---|
975 | |
---|
976 | lock_cpu_hotplug(); |
---|
977 | if (p2m_initialized) |
---|
978 | goto out; |
---|
979 | |
---|
980 | #ifdef CONFIG_DISCONTIGMEM |
---|
981 | p2m_min_low_pfn = min_low_pfn; |
---|
982 | p2m_max_low_pfn = max_low_pfn; |
---|
983 | #else |
---|
984 | p2m_min_low_pfn = 0; |
---|
985 | p2m_max_low_pfn = max_pfn; |
---|
986 | #endif |
---|
987 | |
---|
988 | #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR |
---|
989 | if (xen_ia64_p2m_expose_use_dtr) { |
---|
990 | unsigned long granule_pfn = 0; |
---|
991 | p2m_size = p2m_max_low_pfn - p2m_min_low_pfn; |
---|
992 | for (i = 0; |
---|
993 | i < sizeof(p2m_page_shifts)/sizeof(p2m_page_shifts[0]); |
---|
994 | i++) { |
---|
995 | log_page_size = p2m_page_shifts[i]; |
---|
996 | page_size = 1UL << log_page_size; |
---|
997 | if (page_size < p2m_size) |
---|
998 | continue; |
---|
999 | |
---|
1000 | granule_pfn = max(page_size >> PAGE_SHIFT, |
---|
1001 | p2m_granule_pfn); |
---|
1002 | p2m_convert_min_pfn = ROUNDDOWN(p2m_min_low_pfn, |
---|
1003 | granule_pfn); |
---|
1004 | p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn, |
---|
1005 | granule_pfn); |
---|
1006 | num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn; |
---|
1007 | size = num_pfn << PAGE_SHIFT; |
---|
1008 | p2m_size = num_pfn / PTRS_PER_PTE; |
---|
1009 | p2m_size = ROUNDUP(p2m_size, granule_pfn << PAGE_SHIFT); |
---|
1010 | if (p2m_size == page_size) |
---|
1011 | break; |
---|
1012 | } |
---|
1013 | if (p2m_size != page_size) { |
---|
1014 | printk(KERN_ERR "p2m_size != page_size\n"); |
---|
1015 | error = -EINVAL; |
---|
1016 | goto out; |
---|
1017 | } |
---|
1018 | align = max(privcmd_resource_align, granule_pfn << PAGE_SHIFT); |
---|
1019 | } else |
---|
1020 | #endif |
---|
1021 | { |
---|
1022 | BUG_ON(p2m_granule_pfn & (p2m_granule_pfn - 1)); |
---|
1023 | p2m_convert_min_pfn = ROUNDDOWN(p2m_min_low_pfn, |
---|
1024 | p2m_granule_pfn); |
---|
1025 | p2m_convert_max_pfn = ROUNDUP(p2m_max_low_pfn, p2m_granule_pfn); |
---|
1026 | num_pfn = p2m_convert_max_pfn - p2m_convert_min_pfn; |
---|
1027 | size = num_pfn << PAGE_SHIFT; |
---|
1028 | p2m_size = num_pfn / PTRS_PER_PTE; |
---|
1029 | p2m_size = ROUNDUP(p2m_size, p2m_granule_pfn << PAGE_SHIFT); |
---|
1030 | align = max(privcmd_resource_align, |
---|
1031 | p2m_granule_pfn << PAGE_SHIFT); |
---|
1032 | } |
---|
1033 | |
---|
1034 | // use privcmd region |
---|
1035 | error = allocate_resource(&iomem_resource, &p2m_resource, p2m_size, |
---|
1036 | privcmd_resource_min, privcmd_resource_max, |
---|
1037 | align, NULL, NULL); |
---|
1038 | if (error) { |
---|
1039 | printk(KERN_ERR P2M_PREFIX |
---|
1040 | "can't allocate region for p2m exposure " |
---|
1041 | "[0x%016lx, 0x%016lx) 0x%016lx\n", |
---|
1042 | p2m_convert_min_pfn, p2m_convert_max_pfn, p2m_size); |
---|
1043 | goto out; |
---|
1044 | } |
---|
1045 | |
---|
1046 | p2m_assign_start_pfn = p2m_resource.start >> PAGE_SHIFT; |
---|
1047 | p2m_assign_end_pfn = p2m_resource.end >> PAGE_SHIFT; |
---|
1048 | |
---|
1049 | error = HYPERVISOR_expose_p2m(p2m_convert_min_pfn, |
---|
1050 | p2m_assign_start_pfn, |
---|
1051 | size, p2m_granule_pfn); |
---|
1052 | if (error) { |
---|
1053 | printk(KERN_ERR P2M_PREFIX "failed expose p2m hypercall %d\n", |
---|
1054 | error); |
---|
1055 | printk(KERN_ERR P2M_PREFIX "conv 0x%016lx assign 0x%016lx " |
---|
1056 | "size 0x%016lx granule 0x%016lx\n", |
---|
1057 | p2m_convert_min_pfn, p2m_assign_start_pfn, |
---|
1058 | size, p2m_granule_pfn);; |
---|
1059 | release_resource(&p2m_resource); |
---|
1060 | goto out; |
---|
1061 | } |
---|
1062 | p2m_pte = (volatile const pte_t*)pfn_to_kaddr(p2m_assign_start_pfn); |
---|
1063 | #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR |
---|
1064 | if (xen_ia64_p2m_expose_use_dtr) { |
---|
1065 | p2m_itr_arg.vaddr = (unsigned long)__va(p2m_assign_start_pfn |
---|
1066 | << PAGE_SHIFT); |
---|
1067 | p2m_itr_arg.pteval = pte_val(pfn_pte(p2m_assign_start_pfn, |
---|
1068 | PAGE_KERNEL)); |
---|
1069 | p2m_itr_arg.log_page_size = log_page_size; |
---|
1070 | smp_mb(); |
---|
1071 | smp_call_function(&p2m_itr, &p2m_itr_arg, 1, 1); |
---|
1072 | p2m_itr(&p2m_itr_arg); |
---|
1073 | } |
---|
1074 | #endif |
---|
1075 | smp_mb(); |
---|
1076 | p2m_initialized = 1; |
---|
1077 | printk(P2M_PREFIX "assign p2m table of [0x%016lx, 0x%016lx)\n", |
---|
1078 | p2m_convert_min_pfn << PAGE_SHIFT, |
---|
1079 | p2m_convert_max_pfn << PAGE_SHIFT); |
---|
1080 | printk(P2M_PREFIX "to [0x%016lx, 0x%016lx) (%ld KBytes)\n", |
---|
1081 | p2m_assign_start_pfn << PAGE_SHIFT, |
---|
1082 | p2m_assign_end_pfn << PAGE_SHIFT, |
---|
1083 | p2m_size / 1024); |
---|
1084 | out: |
---|
1085 | unlock_cpu_hotplug(); |
---|
1086 | return error; |
---|
1087 | } |
---|
1088 | |
---|
1089 | #ifdef notyet |
---|
1090 | void |
---|
1091 | p2m_expose_cleanup(void) |
---|
1092 | { |
---|
1093 | BUG_ON(!p2m_initialized); |
---|
1094 | #ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR |
---|
1095 | unregister_cpu_notifier(&p2m_expose_dtr_hotplug_notifier); |
---|
1096 | #endif |
---|
1097 | release_resource(&p2m_resource); |
---|
1098 | } |
---|
1099 | #endif |
---|
1100 | |
---|
1101 | //XXX inlinize? |
---|
1102 | unsigned long |
---|
1103 | p2m_phystomach(unsigned long gpfn) |
---|
1104 | { |
---|
1105 | volatile const pte_t* pte; |
---|
1106 | unsigned long mfn; |
---|
1107 | unsigned long pteval; |
---|
1108 | |
---|
1109 | if (!p2m_initialized || |
---|
1110 | gpfn < p2m_min_low_pfn || gpfn > p2m_max_low_pfn |
---|
1111 | /* || !pfn_valid(gpfn) */) |
---|
1112 | return INVALID_MFN; |
---|
1113 | pte = p2m_pte + (gpfn - p2m_convert_min_pfn); |
---|
1114 | |
---|
1115 | mfn = INVALID_MFN; |
---|
1116 | if (likely(__get_user(pteval, (unsigned long __user *)pte) == 0 && |
---|
1117 | pte_present(__pte(pteval)) && |
---|
1118 | pte_pfn(__pte(pteval)) != (INVALID_MFN >> PAGE_SHIFT))) |
---|
1119 | mfn = (pteval & _PFN_MASK) >> PAGE_SHIFT; |
---|
1120 | |
---|
1121 | return mfn; |
---|
1122 | } |
---|
1123 | |
---|
1124 | EXPORT_SYMBOL_GPL(p2m_initialized); |
---|
1125 | EXPORT_SYMBOL_GPL(p2m_min_low_pfn); |
---|
1126 | EXPORT_SYMBOL_GPL(p2m_max_low_pfn); |
---|
1127 | EXPORT_SYMBOL_GPL(p2m_convert_min_pfn); |
---|
1128 | EXPORT_SYMBOL_GPL(p2m_convert_max_pfn); |
---|
1129 | EXPORT_SYMBOL_GPL(p2m_pte); |
---|
1130 | EXPORT_SYMBOL_GPL(p2m_phystomach); |
---|
1131 | #endif |
---|
1132 | |
---|
1133 | /////////////////////////////////////////////////////////////////////////// |
---|
1134 | // for xenoprof |
---|
1135 | |
---|
1136 | struct resource* |
---|
1137 | xen_ia64_allocate_resource(unsigned long size) |
---|
1138 | { |
---|
1139 | struct resource* res; |
---|
1140 | int error; |
---|
1141 | |
---|
1142 | res = kmalloc(sizeof(*res), GFP_KERNEL); |
---|
1143 | if (res == NULL) |
---|
1144 | return ERR_PTR(-ENOMEM); |
---|
1145 | |
---|
1146 | res->name = "Xen"; |
---|
1147 | res->flags = IORESOURCE_MEM; |
---|
1148 | error = allocate_resource(&iomem_resource, res, PAGE_ALIGN(size), |
---|
1149 | privcmd_resource_min, privcmd_resource_max, |
---|
1150 | IA64_GRANULE_SIZE, NULL, NULL); |
---|
1151 | if (error) { |
---|
1152 | kfree(res); |
---|
1153 | return ERR_PTR(error); |
---|
1154 | } |
---|
1155 | return res; |
---|
1156 | } |
---|
1157 | EXPORT_SYMBOL_GPL(xen_ia64_allocate_resource); |
---|
1158 | |
---|
1159 | void |
---|
1160 | xen_ia64_release_resource(struct resource* res) |
---|
1161 | { |
---|
1162 | release_resource(res); |
---|
1163 | kfree(res); |
---|
1164 | } |
---|
1165 | EXPORT_SYMBOL_GPL(xen_ia64_release_resource); |
---|
1166 | |
---|
1167 | void |
---|
1168 | xen_ia64_unmap_resource(struct resource* res) |
---|
1169 | { |
---|
1170 | unsigned long gpfn = res->start >> PAGE_SHIFT; |
---|
1171 | unsigned long nr_pages = (res->end - res->start) >> PAGE_SHIFT; |
---|
1172 | unsigned long i; |
---|
1173 | |
---|
1174 | for (i = 0; i < nr_pages; i++) { |
---|
1175 | int error = HYPERVISOR_zap_physmap(gpfn + i, 0); |
---|
1176 | if (error) |
---|
1177 | printk(KERN_ERR |
---|
1178 | "%s:%d zap_phsymap failed %d gpfn %lx\n", |
---|
1179 | __func__, __LINE__, error, gpfn + i); |
---|
1180 | } |
---|
1181 | xen_ia64_release_resource(res); |
---|
1182 | } |
---|
1183 | EXPORT_SYMBOL_GPL(xen_ia64_unmap_resource); |
---|