| 1 | /****************************************************************************** |
|---|
| 2 | * arch/x86/domain.c |
|---|
| 3 | * |
|---|
| 4 | * x86-specific domain handling (e.g., register setup and context switching). |
|---|
| 5 | */ |
|---|
| 6 | |
|---|
| 7 | /* |
|---|
| 8 | * Copyright (C) 1995 Linus Torvalds |
|---|
| 9 | * |
|---|
| 10 | * Pentium III FXSR, SSE support |
|---|
| 11 | * Gareth Hughes <gareth@valinux.com>, May 2000 |
|---|
| 12 | */ |
|---|
| 13 | |
|---|
| 14 | #include <xen/config.h> |
|---|
| 15 | #include <xen/init.h> |
|---|
| 16 | #include <xen/lib.h> |
|---|
| 17 | #include <xen/errno.h> |
|---|
| 18 | #include <xen/sched.h> |
|---|
| 19 | #include <xen/domain.h> |
|---|
| 20 | #include <xen/smp.h> |
|---|
| 21 | #include <xen/delay.h> |
|---|
| 22 | #include <xen/softirq.h> |
|---|
| 23 | #include <xen/grant_table.h> |
|---|
| 24 | #include <xen/iocap.h> |
|---|
| 25 | #include <xen/kernel.h> |
|---|
| 26 | #include <xen/multicall.h> |
|---|
| 27 | #include <xen/irq.h> |
|---|
| 28 | #include <xen/event.h> |
|---|
| 29 | #include <xen/console.h> |
|---|
| 30 | #include <xen/percpu.h> |
|---|
| 31 | #include <asm/regs.h> |
|---|
| 32 | #include <asm/mc146818rtc.h> |
|---|
| 33 | #include <asm/system.h> |
|---|
| 34 | #include <asm/io.h> |
|---|
| 35 | #include <asm/processor.h> |
|---|
| 36 | #include <asm/desc.h> |
|---|
| 37 | #include <asm/i387.h> |
|---|
| 38 | #include <asm/mpspec.h> |
|---|
| 39 | #include <asm/ldt.h> |
|---|
| 40 | #include <asm/paging.h> |
|---|
| 41 | #include <asm/hvm/hvm.h> |
|---|
| 42 | #include <asm/hvm/support.h> |
|---|
| 43 | #include <asm/msr.h> |
|---|
| 44 | #ifdef CONFIG_COMPAT |
|---|
| 45 | #include <compat/vcpu.h> |
|---|
| 46 | #endif |
|---|
| 47 | |
|---|
| 48 | DEFINE_PER_CPU(struct vcpu *, curr_vcpu); |
|---|
| 49 | |
|---|
| 50 | static void paravirt_ctxt_switch_from(struct vcpu *v); |
|---|
| 51 | static void paravirt_ctxt_switch_to(struct vcpu *v); |
|---|
| 52 | |
|---|
| 53 | static void vcpu_destroy_pagetables(struct vcpu *v); |
|---|
| 54 | |
|---|
| 55 | static void continue_idle_domain(struct vcpu *v) |
|---|
| 56 | { |
|---|
| 57 | reset_stack_and_jump(idle_loop); |
|---|
| 58 | } |
|---|
| 59 | |
|---|
| 60 | static void continue_nonidle_domain(struct vcpu *v) |
|---|
| 61 | { |
|---|
| 62 | reset_stack_and_jump(ret_from_intr); |
|---|
| 63 | } |
|---|
| 64 | |
|---|
| 65 | static void default_idle(void) |
|---|
| 66 | { |
|---|
| 67 | local_irq_disable(); |
|---|
| 68 | if ( !softirq_pending(smp_processor_id()) ) |
|---|
| 69 | safe_halt(); |
|---|
| 70 | else |
|---|
| 71 | local_irq_enable(); |
|---|
| 72 | } |
|---|
| 73 | |
|---|
| 74 | void idle_loop(void) |
|---|
| 75 | { |
|---|
| 76 | for ( ; ; ) |
|---|
| 77 | { |
|---|
| 78 | page_scrub_schedule_work(); |
|---|
| 79 | default_idle(); |
|---|
| 80 | do_softirq(); |
|---|
| 81 | } |
|---|
| 82 | } |
|---|
| 83 | |
|---|
| 84 | void startup_cpu_idle_loop(void) |
|---|
| 85 | { |
|---|
| 86 | struct vcpu *v = current; |
|---|
| 87 | |
|---|
| 88 | ASSERT(is_idle_vcpu(v)); |
|---|
| 89 | cpu_set(smp_processor_id(), v->domain->domain_dirty_cpumask); |
|---|
| 90 | cpu_set(smp_processor_id(), v->vcpu_dirty_cpumask); |
|---|
| 91 | |
|---|
| 92 | reset_stack_and_jump(idle_loop); |
|---|
| 93 | } |
|---|
| 94 | |
|---|
| 95 | void dump_pageframe_info(struct domain *d) |
|---|
| 96 | { |
|---|
| 97 | struct page_info *page; |
|---|
| 98 | |
|---|
| 99 | printk("Memory pages belonging to domain %u:\n", d->domain_id); |
|---|
| 100 | |
|---|
| 101 | if ( d->tot_pages >= 10 ) |
|---|
| 102 | { |
|---|
| 103 | printk(" DomPage list too long to display\n"); |
|---|
| 104 | } |
|---|
| 105 | else |
|---|
| 106 | { |
|---|
| 107 | list_for_each_entry ( page, &d->page_list, list ) |
|---|
| 108 | { |
|---|
| 109 | printk(" DomPage %p: mfn=%p, caf=%08x, taf=%" PRtype_info "\n", |
|---|
| 110 | _p(page_to_maddr(page)), _p(page_to_mfn(page)), |
|---|
| 111 | page->count_info, page->u.inuse.type_info); |
|---|
| 112 | } |
|---|
| 113 | } |
|---|
| 114 | |
|---|
| 115 | list_for_each_entry ( page, &d->xenpage_list, list ) |
|---|
| 116 | { |
|---|
| 117 | printk(" XenPage %p: mfn=%p, caf=%08x, taf=%" PRtype_info "\n", |
|---|
| 118 | _p(page_to_maddr(page)), _p(page_to_mfn(page)), |
|---|
| 119 | page->count_info, page->u.inuse.type_info); |
|---|
| 120 | } |
|---|
| 121 | } |
|---|
| 122 | |
|---|
| 123 | struct vcpu *alloc_vcpu_struct(void) |
|---|
| 124 | { |
|---|
| 125 | struct vcpu *v; |
|---|
| 126 | if ( (v = xmalloc(struct vcpu)) != NULL ) |
|---|
| 127 | memset(v, 0, sizeof(*v)); |
|---|
| 128 | return v; |
|---|
| 129 | } |
|---|
| 130 | |
|---|
| 131 | void free_vcpu_struct(struct vcpu *v) |
|---|
| 132 | { |
|---|
| 133 | xfree(v); |
|---|
| 134 | } |
|---|
| 135 | |
|---|
| 136 | #ifdef CONFIG_COMPAT |
|---|
| 137 | |
|---|
| 138 | int setup_arg_xlat_area(struct vcpu *v, l4_pgentry_t *l4tab) |
|---|
| 139 | { |
|---|
| 140 | struct domain *d = v->domain; |
|---|
| 141 | unsigned i; |
|---|
| 142 | struct page_info *pg; |
|---|
| 143 | |
|---|
| 144 | if ( !d->arch.mm_arg_xlat_l3 ) |
|---|
| 145 | { |
|---|
| 146 | pg = alloc_domheap_page(NULL); |
|---|
| 147 | if ( !pg ) |
|---|
| 148 | return -ENOMEM; |
|---|
| 149 | d->arch.mm_arg_xlat_l3 = clear_page(page_to_virt(pg)); |
|---|
| 150 | } |
|---|
| 151 | |
|---|
| 152 | l4tab[l4_table_offset(COMPAT_ARG_XLAT_VIRT_BASE)] = |
|---|
| 153 | l4e_from_paddr(__pa(d->arch.mm_arg_xlat_l3), __PAGE_HYPERVISOR); |
|---|
| 154 | |
|---|
| 155 | for ( i = 0; i < COMPAT_ARG_XLAT_PAGES; ++i ) |
|---|
| 156 | { |
|---|
| 157 | unsigned long va = COMPAT_ARG_XLAT_VIRT_START(v->vcpu_id) + i * PAGE_SIZE; |
|---|
| 158 | l2_pgentry_t *l2tab; |
|---|
| 159 | l1_pgentry_t *l1tab; |
|---|
| 160 | |
|---|
| 161 | if ( !l3e_get_intpte(d->arch.mm_arg_xlat_l3[l3_table_offset(va)]) ) |
|---|
| 162 | { |
|---|
| 163 | pg = alloc_domheap_page(NULL); |
|---|
| 164 | if ( !pg ) |
|---|
| 165 | return -ENOMEM; |
|---|
| 166 | clear_page(page_to_virt(pg)); |
|---|
| 167 | d->arch.mm_arg_xlat_l3[l3_table_offset(va)] = l3e_from_page(pg, __PAGE_HYPERVISOR); |
|---|
| 168 | } |
|---|
| 169 | l2tab = l3e_to_l2e(d->arch.mm_arg_xlat_l3[l3_table_offset(va)]); |
|---|
| 170 | if ( !l2e_get_intpte(l2tab[l2_table_offset(va)]) ) |
|---|
| 171 | { |
|---|
| 172 | pg = alloc_domheap_page(NULL); |
|---|
| 173 | if ( !pg ) |
|---|
| 174 | return -ENOMEM; |
|---|
| 175 | clear_page(page_to_virt(pg)); |
|---|
| 176 | l2tab[l2_table_offset(va)] = l2e_from_page(pg, __PAGE_HYPERVISOR); |
|---|
| 177 | } |
|---|
| 178 | l1tab = l2e_to_l1e(l2tab[l2_table_offset(va)]); |
|---|
| 179 | BUG_ON(l1e_get_intpte(l1tab[l1_table_offset(va)])); |
|---|
| 180 | pg = alloc_domheap_page(NULL); |
|---|
| 181 | if ( !pg ) |
|---|
| 182 | return -ENOMEM; |
|---|
| 183 | l1tab[l1_table_offset(va)] = l1e_from_page(pg, PAGE_HYPERVISOR); |
|---|
| 184 | } |
|---|
| 185 | |
|---|
| 186 | return 0; |
|---|
| 187 | } |
|---|
| 188 | |
|---|
| 189 | static void release_arg_xlat_area(struct domain *d) |
|---|
| 190 | { |
|---|
| 191 | if ( d->arch.mm_arg_xlat_l3 ) |
|---|
| 192 | { |
|---|
| 193 | unsigned l3; |
|---|
| 194 | |
|---|
| 195 | for ( l3 = 0; l3 < L3_PAGETABLE_ENTRIES; ++l3 ) |
|---|
| 196 | { |
|---|
| 197 | if ( l3e_get_intpte(d->arch.mm_arg_xlat_l3[l3]) ) |
|---|
| 198 | { |
|---|
| 199 | l2_pgentry_t *l2tab = l3e_to_l2e(d->arch.mm_arg_xlat_l3[l3]); |
|---|
| 200 | unsigned l2; |
|---|
| 201 | |
|---|
| 202 | for ( l2 = 0; l2 < L2_PAGETABLE_ENTRIES; ++l2 ) |
|---|
| 203 | { |
|---|
| 204 | if ( l2e_get_intpte(l2tab[l2]) ) |
|---|
| 205 | { |
|---|
| 206 | l1_pgentry_t *l1tab = l2e_to_l1e(l2tab[l2]); |
|---|
| 207 | unsigned l1; |
|---|
| 208 | |
|---|
| 209 | for ( l1 = 0; l1 < L1_PAGETABLE_ENTRIES; ++l1 ) |
|---|
| 210 | { |
|---|
| 211 | if ( l1e_get_intpte(l1tab[l1]) ) |
|---|
| 212 | free_domheap_page(l1e_get_page(l1tab[l1])); |
|---|
| 213 | } |
|---|
| 214 | free_domheap_page(l2e_get_page(l2tab[l2])); |
|---|
| 215 | } |
|---|
| 216 | } |
|---|
| 217 | free_domheap_page(l3e_get_page(d->arch.mm_arg_xlat_l3[l3])); |
|---|
| 218 | } |
|---|
| 219 | } |
|---|
| 220 | free_domheap_page(virt_to_page(d->arch.mm_arg_xlat_l3)); |
|---|
| 221 | } |
|---|
| 222 | } |
|---|
| 223 | |
|---|
| 224 | static int setup_compat_l4(struct vcpu *v) |
|---|
| 225 | { |
|---|
| 226 | struct page_info *pg = alloc_domheap_page(NULL); |
|---|
| 227 | l4_pgentry_t *l4tab; |
|---|
| 228 | int rc; |
|---|
| 229 | |
|---|
| 230 | if ( !pg ) |
|---|
| 231 | return -ENOMEM; |
|---|
| 232 | |
|---|
| 233 | /* This page needs to look like a pagetable so that it can be shadowed */ |
|---|
| 234 | pg->u.inuse.type_info = PGT_l4_page_table|PGT_validated; |
|---|
| 235 | |
|---|
| 236 | l4tab = copy_page(page_to_virt(pg), idle_pg_table); |
|---|
| 237 | l4tab[l4_table_offset(LINEAR_PT_VIRT_START)] = |
|---|
| 238 | l4e_from_page(pg, __PAGE_HYPERVISOR); |
|---|
| 239 | l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] = |
|---|
| 240 | l4e_from_paddr(__pa(v->domain->arch.mm_perdomain_l3), |
|---|
| 241 | __PAGE_HYPERVISOR); |
|---|
| 242 | v->arch.guest_table = pagetable_from_page(pg); |
|---|
| 243 | v->arch.guest_table_user = v->arch.guest_table; |
|---|
| 244 | |
|---|
| 245 | if ( (rc = setup_arg_xlat_area(v, l4tab)) < 0 ) |
|---|
| 246 | { |
|---|
| 247 | free_domheap_page(pg); |
|---|
| 248 | return rc; |
|---|
| 249 | } |
|---|
| 250 | |
|---|
| 251 | return 0; |
|---|
| 252 | } |
|---|
| 253 | |
|---|
| 254 | static void release_compat_l4(struct vcpu *v) |
|---|
| 255 | { |
|---|
| 256 | free_domheap_page(pagetable_get_page(v->arch.guest_table)); |
|---|
| 257 | v->arch.guest_table = pagetable_null(); |
|---|
| 258 | v->arch.guest_table_user = pagetable_null(); |
|---|
| 259 | } |
|---|
| 260 | |
|---|
| 261 | static inline int may_switch_mode(struct domain *d) |
|---|
| 262 | { |
|---|
| 263 | return (!is_hvm_domain(d) && (d->tot_pages == 0)); |
|---|
| 264 | } |
|---|
| 265 | |
|---|
| 266 | int switch_native(struct domain *d) |
|---|
| 267 | { |
|---|
| 268 | l1_pgentry_t gdt_l1e; |
|---|
| 269 | unsigned int vcpuid; |
|---|
| 270 | |
|---|
| 271 | if ( d == NULL ) |
|---|
| 272 | return -EINVAL; |
|---|
| 273 | if ( !may_switch_mode(d) ) |
|---|
| 274 | return -EACCES; |
|---|
| 275 | if ( !is_pv_32on64_domain(d) ) |
|---|
| 276 | return 0; |
|---|
| 277 | |
|---|
| 278 | d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0; |
|---|
| 279 | release_arg_xlat_area(d); |
|---|
| 280 | |
|---|
| 281 | /* switch gdt */ |
|---|
| 282 | gdt_l1e = l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR); |
|---|
| 283 | for ( vcpuid = 0; vcpuid < MAX_VIRT_CPUS; vcpuid++ ) |
|---|
| 284 | { |
|---|
| 285 | d->arch.mm_perdomain_pt[((vcpuid << GDT_LDT_VCPU_SHIFT) + |
|---|
| 286 | FIRST_RESERVED_GDT_PAGE)] = gdt_l1e; |
|---|
| 287 | if (d->vcpu[vcpuid]) |
|---|
| 288 | release_compat_l4(d->vcpu[vcpuid]); |
|---|
| 289 | } |
|---|
| 290 | |
|---|
| 291 | d->arch.physaddr_bitsize = 64; |
|---|
| 292 | |
|---|
| 293 | return 0; |
|---|
| 294 | } |
|---|
| 295 | |
|---|
| 296 | int switch_compat(struct domain *d) |
|---|
| 297 | { |
|---|
| 298 | l1_pgentry_t gdt_l1e; |
|---|
| 299 | unsigned int vcpuid; |
|---|
| 300 | |
|---|
| 301 | if ( d == NULL ) |
|---|
| 302 | return -EINVAL; |
|---|
| 303 | if ( compat_disabled ) |
|---|
| 304 | return -ENOSYS; |
|---|
| 305 | if ( !may_switch_mode(d) ) |
|---|
| 306 | return -EACCES; |
|---|
| 307 | if ( is_pv_32on64_domain(d) ) |
|---|
| 308 | return 0; |
|---|
| 309 | |
|---|
| 310 | d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 1; |
|---|
| 311 | |
|---|
| 312 | /* switch gdt */ |
|---|
| 313 | gdt_l1e = l1e_from_page(virt_to_page(compat_gdt_table), PAGE_HYPERVISOR); |
|---|
| 314 | for ( vcpuid = 0; vcpuid < MAX_VIRT_CPUS; vcpuid++ ) |
|---|
| 315 | { |
|---|
| 316 | d->arch.mm_perdomain_pt[((vcpuid << GDT_LDT_VCPU_SHIFT) + |
|---|
| 317 | FIRST_RESERVED_GDT_PAGE)] = gdt_l1e; |
|---|
| 318 | if (d->vcpu[vcpuid] |
|---|
| 319 | && setup_compat_l4(d->vcpu[vcpuid]) != 0) |
|---|
| 320 | return -ENOMEM; |
|---|
| 321 | } |
|---|
| 322 | |
|---|
| 323 | d->arch.physaddr_bitsize = |
|---|
| 324 | fls((1UL << 32) - HYPERVISOR_COMPAT_VIRT_START(d)) - 1 |
|---|
| 325 | + (PAGE_SIZE - 2); |
|---|
| 326 | |
|---|
| 327 | return 0; |
|---|
| 328 | } |
|---|
| 329 | |
|---|
| 330 | #else |
|---|
| 331 | #define release_arg_xlat_area(d) ((void)0) |
|---|
| 332 | #define setup_compat_l4(v) 0 |
|---|
| 333 | #define release_compat_l4(v) ((void)0) |
|---|
| 334 | #endif |
|---|
| 335 | |
|---|
| 336 | int vcpu_initialise(struct vcpu *v) |
|---|
| 337 | { |
|---|
| 338 | struct domain *d = v->domain; |
|---|
| 339 | int rc; |
|---|
| 340 | |
|---|
| 341 | v->arch.flags = TF_kernel_mode; |
|---|
| 342 | |
|---|
| 343 | pae_l3_cache_init(&v->arch.pae_l3_cache); |
|---|
| 344 | |
|---|
| 345 | paging_vcpu_init(v); |
|---|
| 346 | |
|---|
| 347 | if ( is_hvm_domain(d) ) |
|---|
| 348 | { |
|---|
| 349 | if ( (rc = hvm_vcpu_initialise(v)) != 0 ) |
|---|
| 350 | return rc; |
|---|
| 351 | } |
|---|
| 352 | else |
|---|
| 353 | { |
|---|
| 354 | /* PV guests by default have a 100Hz ticker. */ |
|---|
| 355 | v->periodic_period = MILLISECS(10); |
|---|
| 356 | |
|---|
| 357 | /* PV guests get an emulated PIT too for video BIOSes to use. */ |
|---|
| 358 | if ( !is_idle_domain(d) && (v->vcpu_id == 0) ) |
|---|
| 359 | pit_init(v, cpu_khz); |
|---|
| 360 | |
|---|
| 361 | v->arch.schedule_tail = continue_nonidle_domain; |
|---|
| 362 | v->arch.ctxt_switch_from = paravirt_ctxt_switch_from; |
|---|
| 363 | v->arch.ctxt_switch_to = paravirt_ctxt_switch_to; |
|---|
| 364 | |
|---|
| 365 | if ( is_idle_domain(d) ) |
|---|
| 366 | { |
|---|
| 367 | v->arch.schedule_tail = continue_idle_domain; |
|---|
| 368 | v->arch.cr3 = __pa(idle_pg_table); |
|---|
| 369 | } |
|---|
| 370 | } |
|---|
| 371 | |
|---|
| 372 | v->arch.perdomain_ptes = |
|---|
| 373 | d->arch.mm_perdomain_pt + (v->vcpu_id << GDT_LDT_VCPU_SHIFT); |
|---|
| 374 | |
|---|
| 375 | return (is_pv_32on64_vcpu(v) ? setup_compat_l4(v) : 0); |
|---|
| 376 | } |
|---|
| 377 | |
|---|
| 378 | void vcpu_destroy(struct vcpu *v) |
|---|
| 379 | { |
|---|
| 380 | if ( is_pv_32on64_vcpu(v) ) |
|---|
| 381 | release_compat_l4(v); |
|---|
| 382 | } |
|---|
| 383 | |
|---|
| 384 | int arch_domain_create(struct domain *d) |
|---|
| 385 | { |
|---|
| 386 | #ifdef __x86_64__ |
|---|
| 387 | struct page_info *pg; |
|---|
| 388 | int i; |
|---|
| 389 | #endif |
|---|
| 390 | l1_pgentry_t gdt_l1e; |
|---|
| 391 | int vcpuid, pdpt_order; |
|---|
| 392 | int rc = -ENOMEM; |
|---|
| 393 | |
|---|
| 394 | pdpt_order = get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t)); |
|---|
| 395 | d->arch.mm_perdomain_pt = alloc_xenheap_pages(pdpt_order); |
|---|
| 396 | if ( d->arch.mm_perdomain_pt == NULL ) |
|---|
| 397 | goto fail; |
|---|
| 398 | memset(d->arch.mm_perdomain_pt, 0, PAGE_SIZE << pdpt_order); |
|---|
| 399 | |
|---|
| 400 | /* |
|---|
| 401 | * Map Xen segments into every VCPU's GDT, irrespective of whether every |
|---|
| 402 | * VCPU will actually be used. This avoids an NMI race during context |
|---|
| 403 | * switch: if we take an interrupt after switching CR3 but before switching |
|---|
| 404 | * GDT, and the old VCPU# is invalid in the new domain, we would otherwise |
|---|
| 405 | * try to load CS from an invalid table. |
|---|
| 406 | */ |
|---|
| 407 | gdt_l1e = l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR); |
|---|
| 408 | for ( vcpuid = 0; vcpuid < MAX_VIRT_CPUS; vcpuid++ ) |
|---|
| 409 | d->arch.mm_perdomain_pt[((vcpuid << GDT_LDT_VCPU_SHIFT) + |
|---|
| 410 | FIRST_RESERVED_GDT_PAGE)] = gdt_l1e; |
|---|
| 411 | |
|---|
| 412 | #if defined(__i386__) |
|---|
| 413 | |
|---|
| 414 | mapcache_init(d); |
|---|
| 415 | |
|---|
| 416 | #else /* __x86_64__ */ |
|---|
| 417 | |
|---|
| 418 | if ( (pg = alloc_domheap_page(NULL)) == NULL ) |
|---|
| 419 | goto fail; |
|---|
| 420 | d->arch.mm_perdomain_l2 = clear_page(page_to_virt(pg)); |
|---|
| 421 | for ( i = 0; i < (1 << pdpt_order); i++ ) |
|---|
| 422 | d->arch.mm_perdomain_l2[l2_table_offset(PERDOMAIN_VIRT_START)+i] = |
|---|
| 423 | l2e_from_page(virt_to_page(d->arch.mm_perdomain_pt)+i, |
|---|
| 424 | __PAGE_HYPERVISOR); |
|---|
| 425 | |
|---|
| 426 | if ( (pg = alloc_domheap_page(NULL)) == NULL ) |
|---|
| 427 | goto fail; |
|---|
| 428 | d->arch.mm_perdomain_l3 = clear_page(page_to_virt(pg)); |
|---|
| 429 | d->arch.mm_perdomain_l3[l3_table_offset(PERDOMAIN_VIRT_START)] = |
|---|
| 430 | l3e_from_page(virt_to_page(d->arch.mm_perdomain_l2), |
|---|
| 431 | __PAGE_HYPERVISOR); |
|---|
| 432 | |
|---|
| 433 | #endif /* __x86_64__ */ |
|---|
| 434 | |
|---|
| 435 | #ifdef CONFIG_COMPAT |
|---|
| 436 | HYPERVISOR_COMPAT_VIRT_START(d) = __HYPERVISOR_COMPAT_VIRT_START; |
|---|
| 437 | #endif |
|---|
| 438 | |
|---|
| 439 | paging_domain_init(d); |
|---|
| 440 | |
|---|
| 441 | if ( !is_idle_domain(d) ) |
|---|
| 442 | { |
|---|
| 443 | d->arch.ioport_caps = |
|---|
| 444 | rangeset_new(d, "I/O Ports", RANGESETF_prettyprint_hex); |
|---|
| 445 | if ( d->arch.ioport_caps == NULL ) |
|---|
| 446 | goto fail; |
|---|
| 447 | |
|---|
| 448 | if ( (d->shared_info = alloc_xenheap_page()) == NULL ) |
|---|
| 449 | goto fail; |
|---|
| 450 | |
|---|
| 451 | memset(d->shared_info, 0, PAGE_SIZE); |
|---|
| 452 | share_xen_page_with_guest( |
|---|
| 453 | virt_to_page(d->shared_info), d, XENSHARE_writable); |
|---|
| 454 | } |
|---|
| 455 | |
|---|
| 456 | if ( is_hvm_domain(d) ) |
|---|
| 457 | { |
|---|
| 458 | if ( (rc = hvm_domain_initialise(d)) != 0 ) |
|---|
| 459 | goto fail; |
|---|
| 460 | } |
|---|
| 461 | else |
|---|
| 462 | { |
|---|
| 463 | /* 32-bit PV guest by default only if Xen is not 64-bit. */ |
|---|
| 464 | d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = |
|---|
| 465 | (CONFIG_PAGING_LEVELS != 4); |
|---|
| 466 | } |
|---|
| 467 | |
|---|
| 468 | |
|---|
| 469 | return 0; |
|---|
| 470 | |
|---|
| 471 | fail: |
|---|
| 472 | free_xenheap_page(d->shared_info); |
|---|
| 473 | #ifdef __x86_64__ |
|---|
| 474 | if ( d->arch.mm_perdomain_l2 ) |
|---|
| 475 | free_domheap_page(virt_to_page(d->arch.mm_perdomain_l2)); |
|---|
| 476 | if ( d->arch.mm_perdomain_l3 ) |
|---|
| 477 | free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3)); |
|---|
| 478 | #endif |
|---|
| 479 | free_xenheap_pages(d->arch.mm_perdomain_pt, pdpt_order); |
|---|
| 480 | return rc; |
|---|
| 481 | } |
|---|
| 482 | |
|---|
| 483 | void arch_domain_destroy(struct domain *d) |
|---|
| 484 | { |
|---|
| 485 | struct vcpu *v; |
|---|
| 486 | |
|---|
| 487 | if ( is_hvm_domain(d) ) |
|---|
| 488 | { |
|---|
| 489 | for_each_vcpu ( d, v ) |
|---|
| 490 | hvm_vcpu_destroy(v); |
|---|
| 491 | hvm_domain_destroy(d); |
|---|
| 492 | } |
|---|
| 493 | |
|---|
| 494 | paging_final_teardown(d); |
|---|
| 495 | |
|---|
| 496 | free_xenheap_pages( |
|---|
| 497 | d->arch.mm_perdomain_pt, |
|---|
| 498 | get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t))); |
|---|
| 499 | |
|---|
| 500 | #ifdef __x86_64__ |
|---|
| 501 | free_domheap_page(virt_to_page(d->arch.mm_perdomain_l2)); |
|---|
| 502 | free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3)); |
|---|
| 503 | #endif |
|---|
| 504 | |
|---|
| 505 | if ( is_pv_32on64_domain(d) ) |
|---|
| 506 | release_arg_xlat_area(d); |
|---|
| 507 | |
|---|
| 508 | free_xenheap_page(d->shared_info); |
|---|
| 509 | } |
|---|
| 510 | |
|---|
| 511 | /* This is called by arch_final_setup_guest and do_boot_vcpu */ |
|---|
| 512 | int arch_set_info_guest( |
|---|
| 513 | struct vcpu *v, vcpu_guest_context_u c) |
|---|
| 514 | { |
|---|
| 515 | struct domain *d = v->domain; |
|---|
| 516 | unsigned long cr3_pfn = INVALID_MFN; |
|---|
| 517 | unsigned long flags; |
|---|
| 518 | int i, rc = 0, compat; |
|---|
| 519 | |
|---|
| 520 | /* The context is a compat-mode one if the target domain is compat-mode; |
|---|
| 521 | * we expect the tools to DTRT even in compat-mode callers. */ |
|---|
| 522 | compat = is_pv_32on64_domain(d); |
|---|
| 523 | |
|---|
| 524 | #ifdef CONFIG_COMPAT |
|---|
| 525 | #define c(fld) (compat ? (c.cmp->fld) : (c.nat->fld)) |
|---|
| 526 | #else |
|---|
| 527 | #define c(fld) (c.nat->fld) |
|---|
| 528 | #endif |
|---|
| 529 | flags = c(flags); |
|---|
| 530 | |
|---|
| 531 | if ( !is_hvm_vcpu(v) ) |
|---|
| 532 | { |
|---|
| 533 | if ( !compat ) |
|---|
| 534 | { |
|---|
| 535 | fixup_guest_stack_selector(d, c.nat->user_regs.ss); |
|---|
| 536 | fixup_guest_stack_selector(d, c.nat->kernel_ss); |
|---|
| 537 | fixup_guest_code_selector(d, c.nat->user_regs.cs); |
|---|
| 538 | #ifdef __i386__ |
|---|
| 539 | fixup_guest_code_selector(d, c.nat->event_callback_cs); |
|---|
| 540 | fixup_guest_code_selector(d, c.nat->failsafe_callback_cs); |
|---|
| 541 | #endif |
|---|
| 542 | |
|---|
| 543 | for ( i = 0; i < 256; i++ ) |
|---|
| 544 | fixup_guest_code_selector(d, c.nat->trap_ctxt[i].cs); |
|---|
| 545 | |
|---|
| 546 | /* LDT safety checks. */ |
|---|
| 547 | if ( ((c.nat->ldt_base & (PAGE_SIZE-1)) != 0) || |
|---|
| 548 | (c.nat->ldt_ents > 8192) || |
|---|
| 549 | !array_access_ok(c.nat->ldt_base, |
|---|
| 550 | c.nat->ldt_ents, |
|---|
| 551 | LDT_ENTRY_SIZE) ) |
|---|
| 552 | return -EINVAL; |
|---|
| 553 | } |
|---|
| 554 | #ifdef CONFIG_COMPAT |
|---|
| 555 | else |
|---|
| 556 | { |
|---|
| 557 | fixup_guest_stack_selector(d, c.cmp->user_regs.ss); |
|---|
| 558 | fixup_guest_stack_selector(d, c.cmp->kernel_ss); |
|---|
| 559 | fixup_guest_code_selector(d, c.cmp->user_regs.cs); |
|---|
| 560 | fixup_guest_code_selector(d, c.cmp->event_callback_cs); |
|---|
| 561 | fixup_guest_code_selector(d, c.cmp->failsafe_callback_cs); |
|---|
| 562 | |
|---|
| 563 | for ( i = 0; i < 256; i++ ) |
|---|
| 564 | fixup_guest_code_selector(d, c.cmp->trap_ctxt[i].cs); |
|---|
| 565 | |
|---|
| 566 | /* LDT safety checks. */ |
|---|
| 567 | if ( ((c.cmp->ldt_base & (PAGE_SIZE-1)) != 0) || |
|---|
| 568 | (c.cmp->ldt_ents > 8192) || |
|---|
| 569 | !compat_array_access_ok(c.cmp->ldt_base, |
|---|
| 570 | c.cmp->ldt_ents, |
|---|
| 571 | LDT_ENTRY_SIZE) ) |
|---|
| 572 | return -EINVAL; |
|---|
| 573 | } |
|---|
| 574 | #endif |
|---|
| 575 | } |
|---|
| 576 | |
|---|
| 577 | v->fpu_initialised = !!(flags & VGCF_I387_VALID); |
|---|
| 578 | |
|---|
| 579 | v->arch.flags &= ~TF_kernel_mode; |
|---|
| 580 | if ( (flags & VGCF_in_kernel) || is_hvm_vcpu(v)/*???*/ ) |
|---|
| 581 | v->arch.flags |= TF_kernel_mode; |
|---|
| 582 | |
|---|
| 583 | if ( !compat ) |
|---|
| 584 | memcpy(&v->arch.guest_context, c.nat, sizeof(*c.nat)); |
|---|
| 585 | #ifdef CONFIG_COMPAT |
|---|
| 586 | else |
|---|
| 587 | { |
|---|
| 588 | XLAT_vcpu_guest_context(&v->arch.guest_context, c.cmp); |
|---|
| 589 | } |
|---|
| 590 | #endif |
|---|
| 591 | |
|---|
| 592 | /* Only CR0.TS is modifiable by guest or admin. */ |
|---|
| 593 | v->arch.guest_context.ctrlreg[0] &= X86_CR0_TS; |
|---|
| 594 | v->arch.guest_context.ctrlreg[0] |= read_cr0() & ~X86_CR0_TS; |
|---|
| 595 | |
|---|
| 596 | init_int80_direct_trap(v); |
|---|
| 597 | |
|---|
| 598 | if ( !is_hvm_vcpu(v) ) |
|---|
| 599 | { |
|---|
| 600 | /* IOPL privileges are virtualised. */ |
|---|
| 601 | v->arch.iopl = (v->arch.guest_context.user_regs.eflags >> 12) & 3; |
|---|
| 602 | v->arch.guest_context.user_regs.eflags &= ~EF_IOPL; |
|---|
| 603 | |
|---|
| 604 | /* Ensure real hardware interrupts are enabled. */ |
|---|
| 605 | v->arch.guest_context.user_regs.eflags |= EF_IE; |
|---|
| 606 | } |
|---|
| 607 | else |
|---|
| 608 | { |
|---|
| 609 | hvm_load_cpu_guest_regs(v, &v->arch.guest_context.user_regs); |
|---|
| 610 | } |
|---|
| 611 | |
|---|
| 612 | if ( v->is_initialised ) |
|---|
| 613 | goto out; |
|---|
| 614 | |
|---|
| 615 | memset(v->arch.guest_context.debugreg, 0, |
|---|
| 616 | sizeof(v->arch.guest_context.debugreg)); |
|---|
| 617 | for ( i = 0; i < 8; i++ ) |
|---|
| 618 | (void)set_debugreg(v, i, c(debugreg[i])); |
|---|
| 619 | |
|---|
| 620 | if ( v->vcpu_id == 0 ) |
|---|
| 621 | d->vm_assist = c(vm_assist); |
|---|
| 622 | |
|---|
| 623 | if ( !is_hvm_vcpu(v) ) |
|---|
| 624 | { |
|---|
| 625 | if ( !compat ) |
|---|
| 626 | rc = (int)set_gdt(v, c.nat->gdt_frames, c.nat->gdt_ents); |
|---|
| 627 | #ifdef CONFIG_COMPAT |
|---|
| 628 | else |
|---|
| 629 | { |
|---|
| 630 | unsigned long gdt_frames[ARRAY_SIZE(c.cmp->gdt_frames)]; |
|---|
| 631 | unsigned int i, n = (c.cmp->gdt_ents + 511) / 512; |
|---|
| 632 | |
|---|
| 633 | if ( n > ARRAY_SIZE(c.cmp->gdt_frames) ) |
|---|
| 634 | return -EINVAL; |
|---|
| 635 | for ( i = 0; i < n; ++i ) |
|---|
| 636 | gdt_frames[i] = c.cmp->gdt_frames[i]; |
|---|
| 637 | rc = (int)set_gdt(v, gdt_frames, c.cmp->gdt_ents); |
|---|
| 638 | } |
|---|
| 639 | #endif |
|---|
| 640 | if ( rc != 0 ) |
|---|
| 641 | return rc; |
|---|
| 642 | |
|---|
| 643 | if ( !compat ) |
|---|
| 644 | { |
|---|
| 645 | cr3_pfn = gmfn_to_mfn(d, xen_cr3_to_pfn(c.nat->ctrlreg[3])); |
|---|
| 646 | |
|---|
| 647 | if ( !mfn_valid(cr3_pfn) || |
|---|
| 648 | (paging_mode_refcounts(d) |
|---|
| 649 | ? !get_page(mfn_to_page(cr3_pfn), d) |
|---|
| 650 | : !get_page_and_type(mfn_to_page(cr3_pfn), d, |
|---|
| 651 | PGT_base_page_table)) ) |
|---|
| 652 | { |
|---|
| 653 | destroy_gdt(v); |
|---|
| 654 | return -EINVAL; |
|---|
| 655 | } |
|---|
| 656 | |
|---|
| 657 | v->arch.guest_table = pagetable_from_pfn(cr3_pfn); |
|---|
| 658 | |
|---|
| 659 | #ifdef __x86_64__ |
|---|
| 660 | if ( c.nat->ctrlreg[1] ) |
|---|
| 661 | { |
|---|
| 662 | cr3_pfn = gmfn_to_mfn(d, xen_cr3_to_pfn(c.nat->ctrlreg[1])); |
|---|
| 663 | |
|---|
| 664 | if ( !mfn_valid(cr3_pfn) || |
|---|
| 665 | (paging_mode_refcounts(d) |
|---|
| 666 | ? !get_page(mfn_to_page(cr3_pfn), d) |
|---|
| 667 | : !get_page_and_type(mfn_to_page(cr3_pfn), d, |
|---|
| 668 | PGT_base_page_table)) ) |
|---|
| 669 | { |
|---|
| 670 | cr3_pfn = pagetable_get_pfn(v->arch.guest_table); |
|---|
| 671 | v->arch.guest_table = pagetable_null(); |
|---|
| 672 | if ( paging_mode_refcounts(d) ) |
|---|
| 673 | put_page(mfn_to_page(cr3_pfn)); |
|---|
| 674 | else |
|---|
| 675 | put_page_and_type(mfn_to_page(cr3_pfn)); |
|---|
| 676 | destroy_gdt(v); |
|---|
| 677 | return -EINVAL; |
|---|
| 678 | } |
|---|
| 679 | |
|---|
| 680 | v->arch.guest_table_user = pagetable_from_pfn(cr3_pfn); |
|---|
| 681 | } |
|---|
| 682 | #endif |
|---|
| 683 | } |
|---|
| 684 | #ifdef CONFIG_COMPAT |
|---|
| 685 | else |
|---|
| 686 | { |
|---|
| 687 | l4_pgentry_t *l4tab; |
|---|
| 688 | |
|---|
| 689 | cr3_pfn = gmfn_to_mfn(d, compat_cr3_to_pfn(c.cmp->ctrlreg[3])); |
|---|
| 690 | |
|---|
| 691 | if ( !mfn_valid(cr3_pfn) || |
|---|
| 692 | (paging_mode_refcounts(d) |
|---|
| 693 | ? !get_page(mfn_to_page(cr3_pfn), d) |
|---|
| 694 | : !get_page_and_type(mfn_to_page(cr3_pfn), d, |
|---|
| 695 | PGT_l3_page_table)) ) |
|---|
| 696 | { |
|---|
| 697 | destroy_gdt(v); |
|---|
| 698 | return -EINVAL; |
|---|
| 699 | } |
|---|
| 700 | |
|---|
| 701 | l4tab = __va(pagetable_get_paddr(v->arch.guest_table)); |
|---|
| 702 | *l4tab = l4e_from_pfn(cr3_pfn, _PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED); |
|---|
| 703 | } |
|---|
| 704 | #endif |
|---|
| 705 | } |
|---|
| 706 | |
|---|
| 707 | if ( v->vcpu_id == 0 ) |
|---|
| 708 | update_domain_wallclock_time(d); |
|---|
| 709 | |
|---|
| 710 | /* Don't redo final setup */ |
|---|
| 711 | v->is_initialised = 1; |
|---|
| 712 | |
|---|
| 713 | if ( paging_mode_enabled(d) ) |
|---|
| 714 | paging_update_paging_modes(v); |
|---|
| 715 | |
|---|
| 716 | update_cr3(v); |
|---|
| 717 | |
|---|
| 718 | out: |
|---|
| 719 | if ( flags & VGCF_online ) |
|---|
| 720 | clear_bit(_VPF_down, &v->pause_flags); |
|---|
| 721 | else |
|---|
| 722 | set_bit(_VPF_down, &v->pause_flags); |
|---|
| 723 | return 0; |
|---|
| 724 | #undef c |
|---|
| 725 | } |
|---|
| 726 | |
|---|
| 727 | int arch_vcpu_reset(struct vcpu *v) |
|---|
| 728 | { |
|---|
| 729 | destroy_gdt(v); |
|---|
| 730 | vcpu_destroy_pagetables(v); |
|---|
| 731 | return 0; |
|---|
| 732 | } |
|---|
| 733 | |
|---|
| 734 | long |
|---|
| 735 | arch_do_vcpu_op( |
|---|
| 736 | int cmd, struct vcpu *v, XEN_GUEST_HANDLE(void) arg) |
|---|
| 737 | { |
|---|
| 738 | long rc = 0; |
|---|
| 739 | |
|---|
| 740 | switch ( cmd ) |
|---|
| 741 | { |
|---|
| 742 | case VCPUOP_register_runstate_memory_area: |
|---|
| 743 | { |
|---|
| 744 | struct vcpu_register_runstate_memory_area area; |
|---|
| 745 | struct vcpu_runstate_info runstate; |
|---|
| 746 | |
|---|
| 747 | rc = -EFAULT; |
|---|
| 748 | if ( copy_from_guest(&area, arg, 1) ) |
|---|
| 749 | break; |
|---|
| 750 | |
|---|
| 751 | if ( !guest_handle_okay(area.addr.h, 1) ) |
|---|
| 752 | break; |
|---|
| 753 | |
|---|
| 754 | rc = 0; |
|---|
| 755 | runstate_guest(v) = area.addr.h; |
|---|
| 756 | |
|---|
| 757 | if ( v == current ) |
|---|
| 758 | { |
|---|
| 759 | __copy_to_guest(runstate_guest(v), &v->runstate, 1); |
|---|
| 760 | } |
|---|
| 761 | else |
|---|
| 762 | { |
|---|
| 763 | vcpu_runstate_get(v, &runstate); |
|---|
| 764 | __copy_to_guest(runstate_guest(v), &runstate, 1); |
|---|
| 765 | } |
|---|
| 766 | |
|---|
| 767 | break; |
|---|
| 768 | } |
|---|
| 769 | |
|---|
| 770 | default: |
|---|
| 771 | rc = -ENOSYS; |
|---|
| 772 | break; |
|---|
| 773 | } |
|---|
| 774 | |
|---|
| 775 | return rc; |
|---|
| 776 | } |
|---|
| 777 | |
|---|
| 778 | #ifdef __x86_64__ |
|---|
| 779 | |
|---|
| 780 | #define loadsegment(seg,value) ({ \ |
|---|
| 781 | int __r = 1; \ |
|---|
| 782 | __asm__ __volatile__ ( \ |
|---|
| 783 | "1: movl %k1,%%" #seg "\n2:\n" \ |
|---|
| 784 | ".section .fixup,\"ax\"\n" \ |
|---|
| 785 | "3: xorl %k0,%k0\n" \ |
|---|
| 786 | " movl %k0,%%" #seg "\n" \ |
|---|
| 787 | " jmp 2b\n" \ |
|---|
| 788 | ".previous\n" \ |
|---|
| 789 | ".section __ex_table,\"a\"\n" \ |
|---|
| 790 | " .align 8\n" \ |
|---|
| 791 | " .quad 1b,3b\n" \ |
|---|
| 792 | ".previous" \ |
|---|
| 793 | : "=r" (__r) : "r" (value), "0" (__r) );\ |
|---|
| 794 | __r; }) |
|---|
| 795 | |
|---|
| 796 | /* |
|---|
| 797 | * save_segments() writes a mask of segments which are dirty (non-zero), |
|---|
| 798 | * allowing load_segments() to avoid some expensive segment loads and |
|---|
| 799 | * MSR writes. |
|---|
| 800 | */ |
|---|
| 801 | static DEFINE_PER_CPU(unsigned int, dirty_segment_mask); |
|---|
| 802 | #define DIRTY_DS 0x01 |
|---|
| 803 | #define DIRTY_ES 0x02 |
|---|
| 804 | #define DIRTY_FS 0x04 |
|---|
| 805 | #define DIRTY_GS 0x08 |
|---|
| 806 | #define DIRTY_FS_BASE 0x10 |
|---|
| 807 | #define DIRTY_GS_BASE_USER 0x20 |
|---|
| 808 | |
|---|
| 809 | static void load_segments(struct vcpu *n) |
|---|
| 810 | { |
|---|
| 811 | struct vcpu_guest_context *nctxt = &n->arch.guest_context; |
|---|
| 812 | int all_segs_okay = 1; |
|---|
| 813 | unsigned int dirty_segment_mask, cpu = smp_processor_id(); |
|---|
| 814 | |
|---|
| 815 | /* Load and clear the dirty segment mask. */ |
|---|
| 816 | dirty_segment_mask = per_cpu(dirty_segment_mask, cpu); |
|---|
| 817 | per_cpu(dirty_segment_mask, cpu) = 0; |
|---|
| 818 | |
|---|
| 819 | /* Either selector != 0 ==> reload. */ |
|---|
| 820 | if ( unlikely((dirty_segment_mask & DIRTY_DS) | nctxt->user_regs.ds) ) |
|---|
| 821 | all_segs_okay &= loadsegment(ds, nctxt->user_regs.ds); |
|---|
| 822 | |
|---|
| 823 | /* Either selector != 0 ==> reload. */ |
|---|
| 824 | if ( unlikely((dirty_segment_mask & DIRTY_ES) | nctxt->user_regs.es) ) |
|---|
| 825 | all_segs_okay &= loadsegment(es, nctxt->user_regs.es); |
|---|
| 826 | |
|---|
| 827 | /* |
|---|
| 828 | * Either selector != 0 ==> reload. |
|---|
| 829 | * Also reload to reset FS_BASE if it was non-zero. |
|---|
| 830 | */ |
|---|
| 831 | if ( unlikely((dirty_segment_mask & (DIRTY_FS | DIRTY_FS_BASE)) | |
|---|
| 832 | nctxt->user_regs.fs) ) |
|---|
| 833 | all_segs_okay &= loadsegment(fs, nctxt->user_regs.fs); |
|---|
| 834 | |
|---|
| 835 | /* |
|---|
| 836 | * Either selector != 0 ==> reload. |
|---|
| 837 | * Also reload to reset GS_BASE if it was non-zero. |
|---|
| 838 | */ |
|---|
| 839 | if ( unlikely((dirty_segment_mask & (DIRTY_GS | DIRTY_GS_BASE_USER)) | |
|---|
| 840 | nctxt->user_regs.gs) ) |
|---|
| 841 | { |
|---|
| 842 | /* Reset GS_BASE with user %gs? */ |
|---|
| 843 | if ( (dirty_segment_mask & DIRTY_GS) || !nctxt->gs_base_user ) |
|---|
| 844 | all_segs_okay &= loadsegment(gs, nctxt->user_regs.gs); |
|---|
| 845 | } |
|---|
| 846 | |
|---|
| 847 | if ( !is_pv_32on64_domain(n->domain) ) |
|---|
| 848 | { |
|---|
| 849 | /* This can only be non-zero if selector is NULL. */ |
|---|
| 850 | if ( nctxt->fs_base ) |
|---|
| 851 | wrmsr(MSR_FS_BASE, |
|---|
| 852 | nctxt->fs_base, |
|---|
| 853 | nctxt->fs_base>>32); |
|---|
| 854 | |
|---|
| 855 | /* Most kernels have non-zero GS base, so don't bother testing. */ |
|---|
| 856 | /* (This is also a serialising instruction, avoiding AMD erratum #88.) */ |
|---|
| 857 | wrmsr(MSR_SHADOW_GS_BASE, |
|---|
| 858 | nctxt->gs_base_kernel, |
|---|
| 859 | nctxt->gs_base_kernel>>32); |
|---|
| 860 | |
|---|
| 861 | /* This can only be non-zero if selector is NULL. */ |
|---|
| 862 | if ( nctxt->gs_base_user ) |
|---|
| 863 | wrmsr(MSR_GS_BASE, |
|---|
| 864 | nctxt->gs_base_user, |
|---|
| 865 | nctxt->gs_base_user>>32); |
|---|
| 866 | |
|---|
| 867 | /* If in kernel mode then switch the GS bases around. */ |
|---|
| 868 | if ( (n->arch.flags & TF_kernel_mode) ) |
|---|
| 869 | __asm__ __volatile__ ( "swapgs" ); |
|---|
| 870 | } |
|---|
| 871 | |
|---|
| 872 | if ( unlikely(!all_segs_okay) ) |
|---|
| 873 | { |
|---|
| 874 | struct cpu_user_regs *regs = guest_cpu_user_regs(); |
|---|
| 875 | unsigned long *rsp = |
|---|
| 876 | (n->arch.flags & TF_kernel_mode) ? |
|---|
| 877 | (unsigned long *)regs->rsp : |
|---|
| 878 | (unsigned long *)nctxt->kernel_sp; |
|---|
| 879 | unsigned long cs_and_mask, rflags; |
|---|
| 880 | |
|---|
| 881 | if ( is_pv_32on64_domain(n->domain) ) |
|---|
| 882 | { |
|---|
| 883 | unsigned int *esp = ring_1(regs) ? |
|---|
| 884 | (unsigned int *)regs->rsp : |
|---|
| 885 | (unsigned int *)nctxt->kernel_sp; |
|---|
| 886 | unsigned int cs_and_mask, eflags; |
|---|
| 887 | int ret = 0; |
|---|
| 888 | |
|---|
| 889 | /* CS longword also contains full evtchn_upcall_mask. */ |
|---|
| 890 | cs_and_mask = (unsigned short)regs->cs | |
|---|
| 891 | ((unsigned int)vcpu_info(n, evtchn_upcall_mask) << 16); |
|---|
| 892 | /* Fold upcall mask into RFLAGS.IF. */ |
|---|
| 893 | eflags = regs->_eflags & ~X86_EFLAGS_IF; |
|---|
| 894 | eflags |= !vcpu_info(n, evtchn_upcall_mask) << 9; |
|---|
| 895 | |
|---|
| 896 | if ( !ring_1(regs) ) |
|---|
| 897 | { |
|---|
| 898 | ret = put_user(regs->ss, esp-1); |
|---|
| 899 | ret |= put_user(regs->_esp, esp-2); |
|---|
| 900 | esp -= 2; |
|---|
| 901 | } |
|---|
| 902 | |
|---|
| 903 | if ( ret | |
|---|
| 904 | put_user(eflags, esp-1) | |
|---|
| 905 | put_user(cs_and_mask, esp-2) | |
|---|
| 906 | put_user(regs->_eip, esp-3) | |
|---|
| 907 | put_user(nctxt->user_regs.gs, esp-4) | |
|---|
| 908 | put_user(nctxt->user_regs.fs, esp-5) | |
|---|
| 909 | put_user(nctxt->user_regs.es, esp-6) | |
|---|
| 910 | put_user(nctxt->user_regs.ds, esp-7) ) |
|---|
| 911 | { |
|---|
| 912 | gdprintk(XENLOG_ERR, "Error while creating compat " |
|---|
| 913 | "failsafe callback frame.\n"); |
|---|
| 914 | domain_crash(n->domain); |
|---|
| 915 | } |
|---|
| 916 | |
|---|
| 917 | if ( test_bit(_VGCF_failsafe_disables_events, |
|---|
| 918 | &n->arch.guest_context.flags) ) |
|---|
| 919 | vcpu_info(n, evtchn_upcall_mask) = 1; |
|---|
| 920 | |
|---|
| 921 | regs->entry_vector = TRAP_syscall; |
|---|
| 922 | regs->_eflags &= 0xFFFCBEFFUL; |
|---|
| 923 | regs->ss = FLAT_COMPAT_KERNEL_SS; |
|---|
| 924 | regs->_esp = (unsigned long)(esp-7); |
|---|
| 925 | regs->cs = FLAT_COMPAT_KERNEL_CS; |
|---|
| 926 | regs->_eip = nctxt->failsafe_callback_eip; |
|---|
| 927 | return; |
|---|
| 928 | } |
|---|
| 929 | |
|---|
| 930 | if ( !(n->arch.flags & TF_kernel_mode) ) |
|---|
| 931 | toggle_guest_mode(n); |
|---|
| 932 | else |
|---|
| 933 | regs->cs &= ~3; |
|---|
| 934 | |
|---|
| 935 | /* CS longword also contains full evtchn_upcall_mask. */ |
|---|
| 936 | cs_and_mask = (unsigned long)regs->cs | |
|---|
| 937 | ((unsigned long)vcpu_info(n, evtchn_upcall_mask) << 32); |
|---|
| 938 | |
|---|
| 939 | /* Fold upcall mask into RFLAGS.IF. */ |
|---|
| 940 | rflags = regs->rflags & ~X86_EFLAGS_IF; |
|---|
| 941 | rflags |= !vcpu_info(n, evtchn_upcall_mask) << 9; |
|---|
| 942 | |
|---|
| 943 | if ( put_user(regs->ss, rsp- 1) | |
|---|
| 944 | put_user(regs->rsp, rsp- 2) | |
|---|
| 945 | put_user(rflags, rsp- 3) | |
|---|
| 946 | put_user(cs_and_mask, rsp- 4) | |
|---|
| 947 | put_user(regs->rip, rsp- 5) | |
|---|
| 948 | put_user(nctxt->user_regs.gs, rsp- 6) | |
|---|
| 949 | put_user(nctxt->user_regs.fs, rsp- 7) | |
|---|
| 950 | put_user(nctxt->user_regs.es, rsp- 8) | |
|---|
| 951 | put_user(nctxt->user_regs.ds, rsp- 9) | |
|---|
| 952 | put_user(regs->r11, rsp-10) | |
|---|
| 953 | put_user(regs->rcx, rsp-11) ) |
|---|
| 954 | { |
|---|
| 955 | gdprintk(XENLOG_ERR, "Error while creating failsafe " |
|---|
| 956 | "callback frame.\n"); |
|---|
| 957 | domain_crash(n->domain); |
|---|
| 958 | } |
|---|
| 959 | |
|---|
| 960 | if ( test_bit(_VGCF_failsafe_disables_events, |
|---|
| 961 | &n->arch.guest_context.flags) ) |
|---|
| 962 | vcpu_info(n, evtchn_upcall_mask) = 1; |
|---|
| 963 | |
|---|
| 964 | regs->entry_vector = TRAP_syscall; |
|---|
| 965 | regs->rflags &= ~(X86_EFLAGS_AC|X86_EFLAGS_VM|X86_EFLAGS_RF| |
|---|
| 966 | X86_EFLAGS_NT|X86_EFLAGS_TF); |
|---|
| 967 | regs->ss = FLAT_KERNEL_SS; |
|---|
| 968 | regs->rsp = (unsigned long)(rsp-11); |
|---|
| 969 | regs->cs = FLAT_KERNEL_CS; |
|---|
| 970 | regs->rip = nctxt->failsafe_callback_eip; |
|---|
| 971 | } |
|---|
| 972 | } |
|---|
| 973 | |
|---|
| 974 | static void save_segments(struct vcpu *v) |
|---|
| 975 | { |
|---|
| 976 | struct vcpu_guest_context *ctxt = &v->arch.guest_context; |
|---|
| 977 | struct cpu_user_regs *regs = &ctxt->user_regs; |
|---|
| 978 | unsigned int dirty_segment_mask = 0; |
|---|
| 979 | |
|---|
| 980 | regs->ds = read_segment_register(ds); |
|---|
| 981 | regs->es = read_segment_register(es); |
|---|
| 982 | regs->fs = read_segment_register(fs); |
|---|
| 983 | regs->gs = read_segment_register(gs); |
|---|
| 984 | |
|---|
| 985 | if ( regs->ds ) |
|---|
| 986 | dirty_segment_mask |= DIRTY_DS; |
|---|
| 987 | |
|---|
| 988 | if ( regs->es ) |
|---|
| 989 | dirty_segment_mask |= DIRTY_ES; |
|---|
| 990 | |
|---|
| 991 | if ( regs->fs || is_pv_32on64_domain(v->domain) ) |
|---|
| 992 | { |
|---|
| 993 | dirty_segment_mask |= DIRTY_FS; |
|---|
| 994 | ctxt->fs_base = 0; /* != 0 selector kills fs_base */ |
|---|
| 995 | } |
|---|
| 996 | else if ( ctxt->fs_base ) |
|---|
| 997 | { |
|---|
| 998 | dirty_segment_mask |= DIRTY_FS_BASE; |
|---|
| 999 | } |
|---|
| 1000 | |
|---|
| 1001 | if ( regs->gs || is_pv_32on64_domain(v->domain) ) |
|---|
| 1002 | { |
|---|
| 1003 | dirty_segment_mask |= DIRTY_GS; |
|---|
| 1004 | ctxt->gs_base_user = 0; /* != 0 selector kills gs_base_user */ |
|---|
| 1005 | } |
|---|
| 1006 | else if ( ctxt->gs_base_user ) |
|---|
| 1007 | { |
|---|
| 1008 | dirty_segment_mask |= DIRTY_GS_BASE_USER; |
|---|
| 1009 | } |
|---|
| 1010 | |
|---|
| 1011 | this_cpu(dirty_segment_mask) = dirty_segment_mask; |
|---|
| 1012 | } |
|---|
| 1013 | |
|---|
| 1014 | #define switch_kernel_stack(v) ((void)0) |
|---|
| 1015 | |
|---|
| 1016 | #elif defined(__i386__) |
|---|
| 1017 | |
|---|
| 1018 | #define load_segments(n) ((void)0) |
|---|
| 1019 | #define save_segments(p) ((void)0) |
|---|
| 1020 | |
|---|
| 1021 | static inline void switch_kernel_stack(struct vcpu *v) |
|---|
| 1022 | { |
|---|
| 1023 | struct tss_struct *tss = &init_tss[smp_processor_id()]; |
|---|
| 1024 | tss->esp1 = v->arch.guest_context.kernel_sp; |
|---|
| 1025 | tss->ss1 = v->arch.guest_context.kernel_ss; |
|---|
| 1026 | } |
|---|
| 1027 | |
|---|
| 1028 | #endif /* __i386__ */ |
|---|
| 1029 | |
|---|
| 1030 | static void paravirt_ctxt_switch_from(struct vcpu *v) |
|---|
| 1031 | { |
|---|
| 1032 | save_segments(v); |
|---|
| 1033 | } |
|---|
| 1034 | |
|---|
| 1035 | static void paravirt_ctxt_switch_to(struct vcpu *v) |
|---|
| 1036 | { |
|---|
| 1037 | set_int80_direct_trap(v); |
|---|
| 1038 | switch_kernel_stack(v); |
|---|
| 1039 | } |
|---|
| 1040 | |
|---|
| 1041 | #define loaddebug(_v,_reg) \ |
|---|
| 1042 | __asm__ __volatile__ ("mov %0,%%db" #_reg : : "r" ((_v)->debugreg[_reg])) |
|---|
| 1043 | |
|---|
| 1044 | static void __context_switch(void) |
|---|
| 1045 | { |
|---|
| 1046 | struct cpu_user_regs *stack_regs = guest_cpu_user_regs(); |
|---|
| 1047 | unsigned int cpu = smp_processor_id(); |
|---|
| 1048 | struct vcpu *p = per_cpu(curr_vcpu, cpu); |
|---|
| 1049 | struct vcpu *n = current; |
|---|
| 1050 | |
|---|
| 1051 | ASSERT(p != n); |
|---|
| 1052 | ASSERT(cpus_empty(n->vcpu_dirty_cpumask)); |
|---|
| 1053 | |
|---|
| 1054 | if ( !is_idle_vcpu(p) ) |
|---|
| 1055 | { |
|---|
| 1056 | memcpy(&p->arch.guest_context.user_regs, |
|---|
| 1057 | stack_regs, |
|---|
| 1058 | CTXT_SWITCH_STACK_BYTES); |
|---|
| 1059 | unlazy_fpu(p); |
|---|
| 1060 | p->arch.ctxt_switch_from(p); |
|---|
| 1061 | } |
|---|
| 1062 | |
|---|
| 1063 | if ( !is_idle_vcpu(n) ) |
|---|
| 1064 | { |
|---|
| 1065 | memcpy(stack_regs, |
|---|
| 1066 | &n->arch.guest_context.user_regs, |
|---|
| 1067 | CTXT_SWITCH_STACK_BYTES); |
|---|
| 1068 | |
|---|
| 1069 | /* Maybe switch the debug registers. */ |
|---|
| 1070 | if ( unlikely(n->arch.guest_context.debugreg[7]) ) |
|---|
| 1071 | { |
|---|
| 1072 | loaddebug(&n->arch.guest_context, 0); |
|---|
| 1073 | loaddebug(&n->arch.guest_context, 1); |
|---|
| 1074 | loaddebug(&n->arch.guest_context, 2); |
|---|
| 1075 | loaddebug(&n->arch.guest_context, 3); |
|---|
| 1076 | /* no 4 and 5 */ |
|---|
| 1077 | loaddebug(&n->arch.guest_context, 6); |
|---|
| 1078 | loaddebug(&n->arch.guest_context, 7); |
|---|
| 1079 | } |
|---|
| 1080 | n->arch.ctxt_switch_to(n); |
|---|
| 1081 | } |
|---|
| 1082 | |
|---|
| 1083 | if ( p->domain != n->domain ) |
|---|
| 1084 | cpu_set(cpu, n->domain->domain_dirty_cpumask); |
|---|
| 1085 | cpu_set(cpu, n->vcpu_dirty_cpumask); |
|---|
| 1086 | |
|---|
| 1087 | write_ptbase(n); |
|---|
| 1088 | |
|---|
| 1089 | if ( p->vcpu_id != n->vcpu_id ) |
|---|
| 1090 | { |
|---|
| 1091 | char gdt_load[10]; |
|---|
| 1092 | *(unsigned short *)(&gdt_load[0]) = LAST_RESERVED_GDT_BYTE; |
|---|
| 1093 | *(unsigned long *)(&gdt_load[2]) = GDT_VIRT_START(n); |
|---|
| 1094 | __asm__ __volatile__ ( "lgdt %0" : "=m" (gdt_load) ); |
|---|
| 1095 | } |
|---|
| 1096 | |
|---|
| 1097 | if ( p->domain != n->domain ) |
|---|
| 1098 | cpu_clear(cpu, p->domain->domain_dirty_cpumask); |
|---|
| 1099 | cpu_clear(cpu, p->vcpu_dirty_cpumask); |
|---|
| 1100 | |
|---|
| 1101 | per_cpu(curr_vcpu, cpu) = n; |
|---|
| 1102 | } |
|---|
| 1103 | |
|---|
| 1104 | |
|---|
| 1105 | void context_switch(struct vcpu *prev, struct vcpu *next) |
|---|
| 1106 | { |
|---|
| 1107 | unsigned int cpu = smp_processor_id(); |
|---|
| 1108 | cpumask_t dirty_mask = next->vcpu_dirty_cpumask; |
|---|
| 1109 | |
|---|
| 1110 | ASSERT(local_irq_is_enabled()); |
|---|
| 1111 | |
|---|
| 1112 | /* Allow at most one CPU at a time to be dirty. */ |
|---|
| 1113 | ASSERT(cpus_weight(dirty_mask) <= 1); |
|---|
| 1114 | if ( unlikely(!cpu_isset(cpu, dirty_mask) && !cpus_empty(dirty_mask)) ) |
|---|
| 1115 | { |
|---|
| 1116 | /* Other cpus call __sync_lazy_execstate from flush ipi handler. */ |
|---|
| 1117 | if ( !cpus_empty(next->vcpu_dirty_cpumask) ) |
|---|
| 1118 | flush_tlb_mask(next->vcpu_dirty_cpumask); |
|---|
| 1119 | } |
|---|
| 1120 | |
|---|
| 1121 | local_irq_disable(); |
|---|
| 1122 | |
|---|
| 1123 | if ( is_hvm_vcpu(prev) && !list_empty(&prev->arch.hvm_vcpu.tm_list) ) |
|---|
| 1124 | pt_freeze_time(prev); |
|---|
| 1125 | |
|---|
| 1126 | set_current(next); |
|---|
| 1127 | |
|---|
| 1128 | if ( (per_cpu(curr_vcpu, cpu) == next) || is_idle_vcpu(next) ) |
|---|
| 1129 | { |
|---|
| 1130 | local_irq_enable(); |
|---|
| 1131 | } |
|---|
| 1132 | else |
|---|
| 1133 | { |
|---|
| 1134 | __context_switch(); |
|---|
| 1135 | |
|---|
| 1136 | #ifdef CONFIG_COMPAT |
|---|
| 1137 | if ( is_idle_vcpu(prev) || |
|---|
| 1138 | (is_pv_32on64_domain(prev->domain) != |
|---|
| 1139 | is_pv_32on64_domain(next->domain)) ) |
|---|
| 1140 | { |
|---|
| 1141 | uint32_t efer_lo, efer_hi; |
|---|
| 1142 | |
|---|
| 1143 | local_flush_tlb_one(GDT_VIRT_START(next) + |
|---|
| 1144 | FIRST_RESERVED_GDT_BYTE); |
|---|
| 1145 | |
|---|
| 1146 | rdmsr(MSR_EFER, efer_lo, efer_hi); |
|---|
| 1147 | if ( !is_pv_32on64_domain(next->domain) == !(efer_lo & EFER_SCE) ) |
|---|
| 1148 | { |
|---|
| 1149 | efer_lo ^= EFER_SCE; |
|---|
| 1150 | wrmsr(MSR_EFER, efer_lo, efer_hi); |
|---|
| 1151 | } |
|---|
| 1152 | } |
|---|
| 1153 | #endif |
|---|
| 1154 | |
|---|
| 1155 | /* Re-enable interrupts before restoring state which may fault. */ |
|---|
| 1156 | local_irq_enable(); |
|---|
| 1157 | |
|---|
| 1158 | if ( !is_hvm_vcpu(next) ) |
|---|
| 1159 | { |
|---|
| 1160 | load_LDT(next); |
|---|
| 1161 | load_segments(next); |
|---|
| 1162 | } |
|---|
| 1163 | } |
|---|
| 1164 | |
|---|
| 1165 | context_saved(prev); |
|---|
| 1166 | |
|---|
| 1167 | /* Update per-VCPU guest runstate shared memory area (if registered). */ |
|---|
| 1168 | if ( !guest_handle_is_null(runstate_guest(next)) ) |
|---|
| 1169 | { |
|---|
| 1170 | if ( !is_pv_32on64_domain(next->domain) ) |
|---|
| 1171 | __copy_to_guest(runstate_guest(next), &next->runstate, 1); |
|---|
| 1172 | #ifdef CONFIG_COMPAT |
|---|
| 1173 | else |
|---|
| 1174 | { |
|---|
| 1175 | struct compat_vcpu_runstate_info info; |
|---|
| 1176 | |
|---|
| 1177 | XLAT_vcpu_runstate_info(&info, &next->runstate); |
|---|
| 1178 | __copy_to_guest(next->runstate_guest.compat, &info, 1); |
|---|
| 1179 | } |
|---|
| 1180 | #endif |
|---|
| 1181 | } |
|---|
| 1182 | |
|---|
| 1183 | schedule_tail(next); |
|---|
| 1184 | BUG(); |
|---|
| 1185 | } |
|---|
| 1186 | |
|---|
| 1187 | void continue_running(struct vcpu *same) |
|---|
| 1188 | { |
|---|
| 1189 | schedule_tail(same); |
|---|
| 1190 | BUG(); |
|---|
| 1191 | } |
|---|
| 1192 | |
|---|
| 1193 | int __sync_lazy_execstate(void) |
|---|
| 1194 | { |
|---|
| 1195 | unsigned long flags; |
|---|
| 1196 | int switch_required; |
|---|
| 1197 | |
|---|
| 1198 | local_irq_save(flags); |
|---|
| 1199 | |
|---|
| 1200 | switch_required = (this_cpu(curr_vcpu) != current); |
|---|
| 1201 | |
|---|
| 1202 | if ( switch_required ) |
|---|
| 1203 | { |
|---|
| 1204 | ASSERT(current == idle_vcpu[smp_processor_id()]); |
|---|
| 1205 | __context_switch(); |
|---|
| 1206 | } |
|---|
| 1207 | |
|---|
| 1208 | local_irq_restore(flags); |
|---|
| 1209 | |
|---|
| 1210 | return switch_required; |
|---|
| 1211 | } |
|---|
| 1212 | |
|---|
| 1213 | void sync_vcpu_execstate(struct vcpu *v) |
|---|
| 1214 | { |
|---|
| 1215 | if ( cpu_isset(smp_processor_id(), v->vcpu_dirty_cpumask) ) |
|---|
| 1216 | (void)__sync_lazy_execstate(); |
|---|
| 1217 | |
|---|
| 1218 | /* Other cpus call __sync_lazy_execstate from flush ipi handler. */ |
|---|
| 1219 | flush_tlb_mask(v->vcpu_dirty_cpumask); |
|---|
| 1220 | } |
|---|
| 1221 | |
|---|
| 1222 | #define next_arg(fmt, args) ({ \ |
|---|
| 1223 | unsigned long __arg; \ |
|---|
| 1224 | switch ( *(fmt)++ ) \ |
|---|
| 1225 | { \ |
|---|
| 1226 | case 'i': __arg = (unsigned long)va_arg(args, unsigned int); break; \ |
|---|
| 1227 | case 'l': __arg = (unsigned long)va_arg(args, unsigned long); break; \ |
|---|
| 1228 | case 'h': __arg = (unsigned long)va_arg(args, void *); break; \ |
|---|
| 1229 | default: __arg = 0; BUG(); \ |
|---|
| 1230 | } \ |
|---|
| 1231 | __arg; \ |
|---|
| 1232 | }) |
|---|
| 1233 | |
|---|
| 1234 | unsigned long hypercall_create_continuation( |
|---|
| 1235 | unsigned int op, const char *format, ...) |
|---|
| 1236 | { |
|---|
| 1237 | struct mc_state *mcs = &this_cpu(mc_state); |
|---|
| 1238 | struct cpu_user_regs *regs; |
|---|
| 1239 | const char *p = format; |
|---|
| 1240 | unsigned long arg; |
|---|
| 1241 | unsigned int i; |
|---|
| 1242 | va_list args; |
|---|
| 1243 | |
|---|
| 1244 | va_start(args, format); |
|---|
| 1245 | |
|---|
| 1246 | if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) |
|---|
| 1247 | { |
|---|
| 1248 | __set_bit(_MCSF_call_preempted, &mcs->flags); |
|---|
| 1249 | |
|---|
| 1250 | for ( i = 0; *p != '\0'; i++ ) |
|---|
| 1251 | mcs->call.args[i] = next_arg(p, args); |
|---|
| 1252 | if ( is_pv_32on64_domain(current->domain) ) |
|---|
| 1253 | { |
|---|
| 1254 | for ( ; i < 6; i++ ) |
|---|
| 1255 | mcs->call.args[i] = 0; |
|---|
| 1256 | } |
|---|
| 1257 | } |
|---|
| 1258 | else |
|---|
| 1259 | { |
|---|
| 1260 | regs = guest_cpu_user_regs(); |
|---|
| 1261 | regs->eax = op; |
|---|
| 1262 | regs->eip -= 2; /* re-execute 'syscall' / 'int 0x82' */ |
|---|
| 1263 | |
|---|
| 1264 | #ifdef __x86_64__ |
|---|
| 1265 | if ( !is_pv_32on64_domain(current->domain) ) |
|---|
| 1266 | { |
|---|
| 1267 | for ( i = 0; *p != '\0'; i++ ) |
|---|
| 1268 | { |
|---|
| 1269 | arg = next_arg(p, args); |
|---|
| 1270 | switch ( i ) |
|---|
| 1271 | { |
|---|
| 1272 | case 0: regs->rdi = arg; break; |
|---|
| 1273 | case 1: regs->rsi = arg; break; |
|---|
| 1274 | case 2: regs->rdx = arg; break; |
|---|
| 1275 | case 3: regs->r10 = arg; break; |
|---|
| 1276 | case 4: regs->r8 = arg; break; |
|---|
| 1277 | case 5: regs->r9 = arg; break; |
|---|
| 1278 | } |
|---|
| 1279 | } |
|---|
| 1280 | } |
|---|
| 1281 | else |
|---|
| 1282 | #endif |
|---|
| 1283 | { |
|---|
| 1284 | if ( supervisor_mode_kernel ) |
|---|
| 1285 | regs->eip &= ~31; /* re-execute entire hypercall entry stub */ |
|---|
| 1286 | |
|---|
| 1287 | for ( i = 0; *p != '\0'; i++ ) |
|---|
| 1288 | { |
|---|
| 1289 | arg = next_arg(p, args); |
|---|
| 1290 | switch ( i ) |
|---|
| 1291 | { |
|---|
| 1292 | case 0: regs->ebx = arg; break; |
|---|
| 1293 | case 1: regs->ecx = arg; break; |
|---|
| 1294 | case 2: regs->edx = arg; break; |
|---|
| 1295 | case 3: regs->esi = arg; break; |
|---|
| 1296 | case 4: regs->edi = arg; break; |
|---|
| 1297 | case 5: regs->ebp = arg; break; |
|---|
| 1298 | } |
|---|
| 1299 | } |
|---|
| 1300 | } |
|---|
| 1301 | } |
|---|
| 1302 | |
|---|
| 1303 | va_end(args); |
|---|
| 1304 | |
|---|
| 1305 | return op; |
|---|
| 1306 | } |
|---|
| 1307 | |
|---|
| 1308 | #ifdef CONFIG_COMPAT |
|---|
| 1309 | int hypercall_xlat_continuation(unsigned int *id, unsigned int mask, ...) |
|---|
| 1310 | { |
|---|
| 1311 | int rc = 0; |
|---|
| 1312 | struct mc_state *mcs = &this_cpu(mc_state); |
|---|
| 1313 | struct cpu_user_regs *regs; |
|---|
| 1314 | unsigned int i, cval = 0; |
|---|
| 1315 | unsigned long nval = 0; |
|---|
| 1316 | va_list args; |
|---|
| 1317 | |
|---|
| 1318 | BUG_ON(*id > 5); |
|---|
| 1319 | BUG_ON(mask & (1U << *id)); |
|---|
| 1320 | |
|---|
| 1321 | va_start(args, mask); |
|---|
| 1322 | |
|---|
| 1323 | if ( test_bit(_MCSF_in_multicall, &mcs->flags) ) |
|---|
| 1324 | { |
|---|
| 1325 | if ( !test_bit(_MCSF_call_preempted, &mcs->flags) ) |
|---|
| 1326 | return 0; |
|---|
| 1327 | for ( i = 0; i < 6; ++i, mask >>= 1 ) |
|---|
| 1328 | { |
|---|
| 1329 | if ( mask & 1 ) |
|---|
| 1330 | { |
|---|
| 1331 | nval = va_arg(args, unsigned long); |
|---|
| 1332 | cval = va_arg(args, unsigned int); |
|---|
| 1333 | if ( cval == nval ) |
|---|
| 1334 | mask &= ~1U; |
|---|
| 1335 | else |
|---|
| 1336 | BUG_ON(nval == (unsigned int)nval); |
|---|
| 1337 | } |
|---|
| 1338 | else if ( id && *id == i ) |
|---|
| 1339 | { |
|---|
| 1340 | *id = mcs->call.args[i]; |
|---|
| 1341 | id = NULL; |
|---|
| 1342 | } |
|---|
| 1343 | if ( (mask & 1) && mcs->call.args[i] == nval ) |
|---|
| 1344 | ++rc; |
|---|
| 1345 | else |
|---|
| 1346 | { |
|---|
| 1347 | cval = mcs->call.args[i]; |
|---|
| 1348 | BUG_ON(mcs->call.args[i] != cval); |
|---|
| 1349 | } |
|---|
| 1350 | mcs->compat_call.args[i] = cval; |
|---|
| 1351 | } |
|---|
| 1352 | } |
|---|
| 1353 | else |
|---|
| 1354 | { |
|---|
| 1355 | regs = guest_cpu_user_regs(); |
|---|
| 1356 | for ( i = 0; i < 6; ++i, mask >>= 1 ) |
|---|
| 1357 | { |
|---|
| 1358 | unsigned long *reg; |
|---|
| 1359 | |
|---|
| 1360 | switch ( i ) |
|---|
| 1361 | { |
|---|
| 1362 | case 0: reg = ®s->ebx; break; |
|---|
| 1363 | case 1: reg = ®s->ecx; break; |
|---|
| 1364 | case 2: reg = ®s->edx; break; |
|---|
| 1365 | case 3: reg = ®s->esi; break; |
|---|
| 1366 | case 4: reg = ®s->edi; break; |
|---|
| 1367 | case 5: reg = ®s->ebp; break; |
|---|
| 1368 | default: BUG(); reg = NULL; break; |
|---|
| 1369 | } |
|---|
| 1370 | if ( (mask & 1) ) |
|---|
| 1371 | { |
|---|
| 1372 | nval = va_arg(args, unsigned long); |
|---|
| 1373 | cval = va_arg(args, unsigned int); |
|---|
| 1374 | if ( cval == nval ) |
|---|
| 1375 | mask &= ~1U; |
|---|
| 1376 | else |
|---|
| 1377 | BUG_ON(nval == (unsigned int)nval); |
|---|
| 1378 | } |
|---|
| 1379 | else if ( id && *id == i ) |
|---|
| 1380 | { |
|---|
| 1381 | *id = *reg; |
|---|
| 1382 | id = NULL; |
|---|
| 1383 | } |
|---|
| 1384 | if ( (mask & 1) && *reg == nval ) |
|---|
| 1385 | { |
|---|
| 1386 | *reg = cval; |
|---|
| 1387 | ++rc; |
|---|
| 1388 | } |
|---|
| 1389 | else |
|---|
| 1390 | BUG_ON(*reg != (unsigned int)*reg); |
|---|
| 1391 | } |
|---|
| 1392 | } |
|---|
| 1393 | |
|---|
| 1394 | va_end(args); |
|---|
| 1395 | |
|---|
| 1396 | return rc; |
|---|
| 1397 | } |
|---|
| 1398 | #endif |
|---|
| 1399 | |
|---|
| 1400 | static void relinquish_memory(struct domain *d, struct list_head *list, |
|---|
| 1401 | unsigned long type) |
|---|
| 1402 | { |
|---|
| 1403 | struct list_head *ent; |
|---|
| 1404 | struct page_info *page; |
|---|
| 1405 | unsigned long x, y; |
|---|
| 1406 | |
|---|
| 1407 | /* Use a recursive lock, as we may enter 'free_domheap_page'. */ |
|---|
| 1408 | spin_lock_recursive(&d->page_alloc_lock); |
|---|
| 1409 | |
|---|
| 1410 | ent = list->next; |
|---|
| 1411 | while ( ent != list ) |
|---|
| 1412 | { |
|---|
| 1413 | page = list_entry(ent, struct page_info, list); |
|---|
| 1414 | |
|---|
| 1415 | /* Grab a reference to the page so it won't disappear from under us. */ |
|---|
| 1416 | if ( unlikely(!get_page(page, d)) ) |
|---|
| 1417 | { |
|---|
| 1418 | /* Couldn't get a reference -- someone is freeing this page. */ |
|---|
| 1419 | ent = ent->next; |
|---|
| 1420 | continue; |
|---|
| 1421 | } |
|---|
| 1422 | |
|---|
| 1423 | if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) ) |
|---|
| 1424 | put_page_and_type(page); |
|---|
| 1425 | |
|---|
| 1426 | if ( test_and_clear_bit(_PGC_allocated, &page->count_info) ) |
|---|
| 1427 | put_page(page); |
|---|
| 1428 | |
|---|
| 1429 | /* |
|---|
| 1430 | * Forcibly invalidate top-most, still valid page tables at this point |
|---|
| 1431 | * to break circular 'linear page table' references. This is okay |
|---|
| 1432 | * because MMU structures are not shared across domains and this domain |
|---|
| 1433 | * is now dead. Thus top-most valid tables are not in use so a non-zero |
|---|
| 1434 | * count means circular reference. |
|---|
| 1435 | */ |
|---|
| 1436 | y = page->u.inuse.type_info; |
|---|
| 1437 | for ( ; ; ) |
|---|
| 1438 | { |
|---|
| 1439 | x = y; |
|---|
| 1440 | if ( likely((x & (PGT_type_mask|PGT_validated)) != |
|---|
| 1441 | (type|PGT_validated)) ) |
|---|
| 1442 | break; |
|---|
| 1443 | |
|---|
| 1444 | y = cmpxchg(&page->u.inuse.type_info, x, x & ~PGT_validated); |
|---|
| 1445 | if ( likely(y == x) ) |
|---|
| 1446 | { |
|---|
| 1447 | free_page_type(page, type); |
|---|
| 1448 | break; |
|---|
| 1449 | } |
|---|
| 1450 | } |
|---|
| 1451 | |
|---|
| 1452 | /* Follow the list chain and /then/ potentially free the page. */ |
|---|
| 1453 | ent = ent->next; |
|---|
| 1454 | put_page(page); |
|---|
| 1455 | } |
|---|
| 1456 | |
|---|
| 1457 | spin_unlock_recursive(&d->page_alloc_lock); |
|---|
| 1458 | } |
|---|
| 1459 | |
|---|
| 1460 | static void vcpu_destroy_pagetables(struct vcpu *v) |
|---|
| 1461 | { |
|---|
| 1462 | struct domain *d = v->domain; |
|---|
| 1463 | unsigned long pfn; |
|---|
| 1464 | |
|---|
| 1465 | #ifdef __x86_64__ |
|---|
| 1466 | if ( is_pv_32on64_vcpu(v) ) |
|---|
| 1467 | { |
|---|
| 1468 | pfn = l4e_get_pfn(*(l4_pgentry_t *) |
|---|
| 1469 | __va(pagetable_get_paddr(v->arch.guest_table))); |
|---|
| 1470 | |
|---|
| 1471 | if ( pfn != 0 ) |
|---|
| 1472 | { |
|---|
| 1473 | if ( paging_mode_refcounts(d) ) |
|---|
| 1474 | put_page(mfn_to_page(pfn)); |
|---|
| 1475 | else |
|---|
| 1476 | put_page_and_type(mfn_to_page(pfn)); |
|---|
| 1477 | } |
|---|
| 1478 | |
|---|
| 1479 | l4e_write( |
|---|
| 1480 | (l4_pgentry_t *)__va(pagetable_get_paddr(v->arch.guest_table)), |
|---|
| 1481 | l4e_empty()); |
|---|
| 1482 | |
|---|
| 1483 | v->arch.cr3 = 0; |
|---|
| 1484 | return; |
|---|
| 1485 | } |
|---|
| 1486 | #endif |
|---|
| 1487 | |
|---|
| 1488 | pfn = pagetable_get_pfn(v->arch.guest_table); |
|---|
| 1489 | if ( pfn != 0 ) |
|---|
| 1490 | { |
|---|
| 1491 | if ( paging_mode_refcounts(d) ) |
|---|
| 1492 | put_page(mfn_to_page(pfn)); |
|---|
| 1493 | else |
|---|
| 1494 | put_page_and_type(mfn_to_page(pfn)); |
|---|
| 1495 | #ifdef __x86_64__ |
|---|
| 1496 | if ( pfn == pagetable_get_pfn(v->arch.guest_table_user) ) |
|---|
| 1497 | v->arch.guest_table_user = pagetable_null(); |
|---|
| 1498 | #endif |
|---|
| 1499 | v->arch.guest_table = pagetable_null(); |
|---|
| 1500 | } |
|---|
| 1501 | |
|---|
| 1502 | #ifdef __x86_64__ |
|---|
| 1503 | /* Drop ref to guest_table_user (from MMUEXT_NEW_USER_BASEPTR) */ |
|---|
| 1504 | pfn = pagetable_get_pfn(v->arch.guest_table_user); |
|---|
| 1505 | if ( pfn != 0 ) |
|---|
| 1506 | { |
|---|
| 1507 | if ( paging_mode_refcounts(d) ) |
|---|
| 1508 | put_page(mfn_to_page(pfn)); |
|---|
| 1509 | else |
|---|
| 1510 | put_page_and_type(mfn_to_page(pfn)); |
|---|
| 1511 | v->arch.guest_table_user = pagetable_null(); |
|---|
| 1512 | } |
|---|
| 1513 | #endif |
|---|
| 1514 | |
|---|
| 1515 | v->arch.cr3 = 0; |
|---|
| 1516 | } |
|---|
| 1517 | |
|---|
| 1518 | void domain_relinquish_resources(struct domain *d) |
|---|
| 1519 | { |
|---|
| 1520 | struct vcpu *v; |
|---|
| 1521 | |
|---|
| 1522 | BUG_ON(!cpus_empty(d->domain_dirty_cpumask)); |
|---|
| 1523 | |
|---|
| 1524 | /* Drop the in-use references to page-table bases. */ |
|---|
| 1525 | for_each_vcpu ( d, v ) |
|---|
| 1526 | vcpu_destroy_pagetables(v); |
|---|
| 1527 | |
|---|
| 1528 | /* Tear down paging-assistance stuff. */ |
|---|
| 1529 | paging_teardown(d); |
|---|
| 1530 | |
|---|
| 1531 | /* |
|---|
| 1532 | * Relinquish GDT mappings. No need for explicit unmapping of the LDT as |
|---|
| 1533 | * it automatically gets squashed when the guest's mappings go away. |
|---|
| 1534 | */ |
|---|
| 1535 | for_each_vcpu(d, v) |
|---|
| 1536 | destroy_gdt(v); |
|---|
| 1537 | |
|---|
| 1538 | /* Relinquish every page of memory. */ |
|---|
| 1539 | #if CONFIG_PAGING_LEVELS >= 4 |
|---|
| 1540 | relinquish_memory(d, &d->xenpage_list, PGT_l4_page_table); |
|---|
| 1541 | relinquish_memory(d, &d->page_list, PGT_l4_page_table); |
|---|
| 1542 | #endif |
|---|
| 1543 | #if CONFIG_PAGING_LEVELS >= 3 |
|---|
| 1544 | relinquish_memory(d, &d->xenpage_list, PGT_l3_page_table); |
|---|
| 1545 | relinquish_memory(d, &d->page_list, PGT_l3_page_table); |
|---|
| 1546 | #endif |
|---|
| 1547 | relinquish_memory(d, &d->xenpage_list, PGT_l2_page_table); |
|---|
| 1548 | relinquish_memory(d, &d->page_list, PGT_l2_page_table); |
|---|
| 1549 | |
|---|
| 1550 | /* Free page used by xen oprofile buffer. */ |
|---|
| 1551 | free_xenoprof_pages(d); |
|---|
| 1552 | |
|---|
| 1553 | if ( is_hvm_domain(d) ) |
|---|
| 1554 | hvm_domain_relinquish_resources(d); |
|---|
| 1555 | } |
|---|
| 1556 | |
|---|
| 1557 | void arch_dump_domain_info(struct domain *d) |
|---|
| 1558 | { |
|---|
| 1559 | paging_dump_domain_info(d); |
|---|
| 1560 | } |
|---|
| 1561 | |
|---|
| 1562 | void arch_dump_vcpu_info(struct vcpu *v) |
|---|
| 1563 | { |
|---|
| 1564 | paging_dump_vcpu_info(v); |
|---|
| 1565 | } |
|---|
| 1566 | |
|---|
| 1567 | /* |
|---|
| 1568 | * Local variables: |
|---|
| 1569 | * mode: C |
|---|
| 1570 | * c-set-style: "BSD" |
|---|
| 1571 | * c-basic-offset: 4 |
|---|
| 1572 | * tab-width: 4 |
|---|
| 1573 | * indent-tabs-mode: nil |
|---|
| 1574 | * End: |
|---|
| 1575 | */ |
|---|