1 | /****************************************************************************** |
---|
2 | * domain.c |
---|
3 | * |
---|
4 | * Generic domain-handling functions. |
---|
5 | */ |
---|
6 | |
---|
7 | #include <xen/config.h> |
---|
8 | #include <xen/compat.h> |
---|
9 | #include <xen/init.h> |
---|
10 | #include <xen/lib.h> |
---|
11 | #include <xen/errno.h> |
---|
12 | #include <xen/sched.h> |
---|
13 | #include <xen/domain.h> |
---|
14 | #include <xen/mm.h> |
---|
15 | #include <xen/event.h> |
---|
16 | #include <xen/time.h> |
---|
17 | #include <xen/console.h> |
---|
18 | #include <xen/softirq.h> |
---|
19 | #include <xen/domain_page.h> |
---|
20 | #include <xen/rangeset.h> |
---|
21 | #include <xen/guest_access.h> |
---|
22 | #include <xen/hypercall.h> |
---|
23 | #include <xen/delay.h> |
---|
24 | #include <xen/shutdown.h> |
---|
25 | #include <xen/percpu.h> |
---|
26 | #include <xen/multicall.h> |
---|
27 | #include <xen/rcupdate.h> |
---|
28 | #include <asm/debugger.h> |
---|
29 | #include <public/sched.h> |
---|
30 | #include <public/vcpu.h> |
---|
31 | #include <acm/acm_hooks.h> |
---|
32 | |
---|
33 | /* Protect updates/reads (resp.) of domain_list and domain_hash. */ |
---|
34 | DEFINE_SPINLOCK(domlist_update_lock); |
---|
35 | DEFINE_RCU_READ_LOCK(domlist_read_lock); |
---|
36 | |
---|
37 | #define DOMAIN_HASH_SIZE 256 |
---|
38 | #define DOMAIN_HASH(_id) ((int)(_id)&(DOMAIN_HASH_SIZE-1)) |
---|
39 | static struct domain *domain_hash[DOMAIN_HASH_SIZE]; |
---|
40 | struct domain *domain_list; |
---|
41 | |
---|
42 | struct domain *dom0; |
---|
43 | |
---|
44 | struct vcpu *idle_vcpu[NR_CPUS] __read_mostly; |
---|
45 | |
---|
46 | int current_domain_id(void) |
---|
47 | { |
---|
48 | return current->domain->domain_id; |
---|
49 | } |
---|
50 | |
---|
51 | struct domain *alloc_domain(domid_t domid) |
---|
52 | { |
---|
53 | struct domain *d; |
---|
54 | |
---|
55 | if ( (d = xmalloc(struct domain)) == NULL ) |
---|
56 | return NULL; |
---|
57 | |
---|
58 | memset(d, 0, sizeof(*d)); |
---|
59 | d->domain_id = domid; |
---|
60 | atomic_set(&d->refcnt, 1); |
---|
61 | spin_lock_init(&d->big_lock); |
---|
62 | spin_lock_init(&d->page_alloc_lock); |
---|
63 | spin_lock_init(&d->shutdown_lock); |
---|
64 | INIT_LIST_HEAD(&d->page_list); |
---|
65 | INIT_LIST_HEAD(&d->xenpage_list); |
---|
66 | |
---|
67 | return d; |
---|
68 | } |
---|
69 | |
---|
70 | void free_domain(struct domain *d) |
---|
71 | { |
---|
72 | struct vcpu *v; |
---|
73 | int i; |
---|
74 | |
---|
75 | for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- ) |
---|
76 | { |
---|
77 | if ( (v = d->vcpu[i]) == NULL ) |
---|
78 | continue; |
---|
79 | vcpu_destroy(v); |
---|
80 | sched_destroy_vcpu(v); |
---|
81 | free_vcpu_struct(v); |
---|
82 | } |
---|
83 | |
---|
84 | sched_destroy_domain(d); |
---|
85 | xfree(d); |
---|
86 | } |
---|
87 | |
---|
88 | static void __domain_finalise_shutdown(struct domain *d) |
---|
89 | { |
---|
90 | struct vcpu *v; |
---|
91 | |
---|
92 | BUG_ON(!spin_is_locked(&d->shutdown_lock)); |
---|
93 | |
---|
94 | if ( d->is_shut_down ) |
---|
95 | return; |
---|
96 | |
---|
97 | for_each_vcpu ( d, v ) |
---|
98 | if ( !v->paused_for_shutdown ) |
---|
99 | return; |
---|
100 | |
---|
101 | d->is_shut_down = 1; |
---|
102 | |
---|
103 | for_each_vcpu ( d, v ) |
---|
104 | vcpu_sleep_nosync(v); |
---|
105 | |
---|
106 | send_guest_global_virq(dom0, VIRQ_DOM_EXC); |
---|
107 | } |
---|
108 | |
---|
109 | static void vcpu_check_shutdown(struct vcpu *v) |
---|
110 | { |
---|
111 | struct domain *d = v->domain; |
---|
112 | |
---|
113 | spin_lock(&d->shutdown_lock); |
---|
114 | |
---|
115 | if ( d->is_shutting_down ) |
---|
116 | { |
---|
117 | if ( !v->paused_for_shutdown ) |
---|
118 | atomic_inc(&v->pause_count); |
---|
119 | v->paused_for_shutdown = 1; |
---|
120 | v->defer_shutdown = 0; |
---|
121 | __domain_finalise_shutdown(d); |
---|
122 | } |
---|
123 | |
---|
124 | spin_unlock(&d->shutdown_lock); |
---|
125 | } |
---|
126 | |
---|
127 | struct vcpu *alloc_vcpu( |
---|
128 | struct domain *d, unsigned int vcpu_id, unsigned int cpu_id) |
---|
129 | { |
---|
130 | struct vcpu *v; |
---|
131 | |
---|
132 | BUG_ON(d->vcpu[vcpu_id] != NULL); |
---|
133 | |
---|
134 | if ( (v = alloc_vcpu_struct()) == NULL ) |
---|
135 | return NULL; |
---|
136 | |
---|
137 | v->domain = d; |
---|
138 | v->vcpu_id = vcpu_id; |
---|
139 | |
---|
140 | v->runstate.state = is_idle_vcpu(v) ? RUNSTATE_running : RUNSTATE_offline; |
---|
141 | v->runstate.state_entry_time = NOW(); |
---|
142 | |
---|
143 | if ( !is_idle_domain(d) ) |
---|
144 | { |
---|
145 | set_bit(_VPF_down, &v->pause_flags); |
---|
146 | v->vcpu_info = shared_info_addr(d, vcpu_info[vcpu_id]); |
---|
147 | } |
---|
148 | |
---|
149 | if ( sched_init_vcpu(v, cpu_id) != 0 ) |
---|
150 | { |
---|
151 | free_vcpu_struct(v); |
---|
152 | return NULL; |
---|
153 | } |
---|
154 | |
---|
155 | if ( vcpu_initialise(v) != 0 ) |
---|
156 | { |
---|
157 | sched_destroy_vcpu(v); |
---|
158 | free_vcpu_struct(v); |
---|
159 | return NULL; |
---|
160 | } |
---|
161 | |
---|
162 | d->vcpu[vcpu_id] = v; |
---|
163 | if ( vcpu_id != 0 ) |
---|
164 | d->vcpu[v->vcpu_id-1]->next_in_list = v; |
---|
165 | |
---|
166 | /* Must be called after making new vcpu visible to for_each_vcpu(). */ |
---|
167 | vcpu_check_shutdown(v); |
---|
168 | |
---|
169 | return v; |
---|
170 | } |
---|
171 | |
---|
172 | struct vcpu *alloc_idle_vcpu(unsigned int cpu_id) |
---|
173 | { |
---|
174 | struct domain *d; |
---|
175 | struct vcpu *v; |
---|
176 | unsigned int vcpu_id = cpu_id % MAX_VIRT_CPUS; |
---|
177 | |
---|
178 | if ( (v = idle_vcpu[cpu_id]) != NULL ) |
---|
179 | return v; |
---|
180 | |
---|
181 | d = (vcpu_id == 0) ? |
---|
182 | domain_create(IDLE_DOMAIN_ID, 0, 0) : |
---|
183 | idle_vcpu[cpu_id - vcpu_id]->domain; |
---|
184 | BUG_ON(d == NULL); |
---|
185 | |
---|
186 | v = alloc_vcpu(d, vcpu_id, cpu_id); |
---|
187 | idle_vcpu[cpu_id] = v; |
---|
188 | |
---|
189 | return v; |
---|
190 | } |
---|
191 | |
---|
192 | struct domain *domain_create( |
---|
193 | domid_t domid, unsigned int domcr_flags, ssidref_t ssidref) |
---|
194 | { |
---|
195 | struct domain *d, **pd; |
---|
196 | |
---|
197 | if ( (d = alloc_domain(domid)) == NULL ) |
---|
198 | return NULL; |
---|
199 | |
---|
200 | if ( domcr_flags & DOMCRF_hvm ) |
---|
201 | d->is_hvm = 1; |
---|
202 | |
---|
203 | rangeset_domain_initialise(d); |
---|
204 | |
---|
205 | if ( !is_idle_domain(d) ) |
---|
206 | { |
---|
207 | d->is_paused_by_controller = 1; |
---|
208 | atomic_inc(&d->pause_count); |
---|
209 | |
---|
210 | if ( evtchn_init(d) != 0 ) |
---|
211 | goto fail1; |
---|
212 | |
---|
213 | if ( grant_table_create(d) != 0 ) |
---|
214 | goto fail2; |
---|
215 | |
---|
216 | if ( acm_domain_create(d, ssidref) != 0 ) |
---|
217 | goto fail3; |
---|
218 | } |
---|
219 | |
---|
220 | if ( arch_domain_create(d) != 0 ) |
---|
221 | goto fail4; |
---|
222 | |
---|
223 | d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex); |
---|
224 | d->irq_caps = rangeset_new(d, "Interrupts", 0); |
---|
225 | if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) ) |
---|
226 | goto fail5; |
---|
227 | |
---|
228 | if ( sched_init_domain(d) != 0 ) |
---|
229 | goto fail5; |
---|
230 | |
---|
231 | if ( !is_idle_domain(d) ) |
---|
232 | { |
---|
233 | spin_lock(&domlist_update_lock); |
---|
234 | pd = &domain_list; /* NB. domain_list maintained in order of domid. */ |
---|
235 | for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) |
---|
236 | if ( (*pd)->domain_id > d->domain_id ) |
---|
237 | break; |
---|
238 | d->next_in_list = *pd; |
---|
239 | d->next_in_hashbucket = domain_hash[DOMAIN_HASH(domid)]; |
---|
240 | /* Two rcu assignments are not atomic |
---|
241 | * Readers may see inconsistent domlist and hash table |
---|
242 | * That is OK as long as each RCU reader-side critical section uses |
---|
243 | * only one or them */ |
---|
244 | rcu_assign_pointer(*pd, d); |
---|
245 | rcu_assign_pointer(domain_hash[DOMAIN_HASH(domid)], d); |
---|
246 | spin_unlock(&domlist_update_lock); |
---|
247 | } |
---|
248 | |
---|
249 | return d; |
---|
250 | |
---|
251 | fail5: |
---|
252 | arch_domain_destroy(d); |
---|
253 | fail4: |
---|
254 | if ( !is_idle_domain(d) ) |
---|
255 | acm_domain_destroy(d); |
---|
256 | fail3: |
---|
257 | if ( !is_idle_domain(d) ) |
---|
258 | grant_table_destroy(d); |
---|
259 | fail2: |
---|
260 | if ( !is_idle_domain(d) ) |
---|
261 | evtchn_destroy(d); |
---|
262 | fail1: |
---|
263 | rangeset_domain_destroy(d); |
---|
264 | free_domain(d); |
---|
265 | return NULL; |
---|
266 | } |
---|
267 | |
---|
268 | |
---|
269 | struct domain *get_domain_by_id(domid_t dom) |
---|
270 | { |
---|
271 | struct domain *d; |
---|
272 | |
---|
273 | rcu_read_lock(&domlist_read_lock); |
---|
274 | |
---|
275 | for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]); |
---|
276 | d != NULL; |
---|
277 | d = rcu_dereference(d->next_in_hashbucket) ) |
---|
278 | { |
---|
279 | if ( d->domain_id == dom ) |
---|
280 | { |
---|
281 | if ( unlikely(!get_domain(d)) ) |
---|
282 | d = NULL; |
---|
283 | break; |
---|
284 | } |
---|
285 | } |
---|
286 | |
---|
287 | rcu_read_unlock(&domlist_read_lock); |
---|
288 | |
---|
289 | return d; |
---|
290 | } |
---|
291 | |
---|
292 | |
---|
293 | struct domain *rcu_lock_domain_by_id(domid_t dom) |
---|
294 | { |
---|
295 | struct domain *d; |
---|
296 | |
---|
297 | rcu_read_lock(&domlist_read_lock); |
---|
298 | |
---|
299 | for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]); |
---|
300 | d != NULL; |
---|
301 | d = rcu_dereference(d->next_in_hashbucket) ) |
---|
302 | { |
---|
303 | if ( d->domain_id == dom ) |
---|
304 | return d; |
---|
305 | } |
---|
306 | |
---|
307 | rcu_read_unlock(&domlist_read_lock); |
---|
308 | |
---|
309 | return NULL; |
---|
310 | } |
---|
311 | |
---|
312 | |
---|
313 | void domain_kill(struct domain *d) |
---|
314 | { |
---|
315 | domain_pause(d); |
---|
316 | |
---|
317 | /* Already dying? Then bail. */ |
---|
318 | if ( test_and_set_bool(d->is_dying) ) |
---|
319 | { |
---|
320 | domain_unpause(d); |
---|
321 | return; |
---|
322 | } |
---|
323 | |
---|
324 | gnttab_release_mappings(d); |
---|
325 | domain_relinquish_resources(d); |
---|
326 | put_domain(d); |
---|
327 | |
---|
328 | /* Kick page scrubbing after domain_relinquish_resources(). */ |
---|
329 | page_scrub_kick(); |
---|
330 | |
---|
331 | send_guest_global_virq(dom0, VIRQ_DOM_EXC); |
---|
332 | } |
---|
333 | |
---|
334 | |
---|
335 | void __domain_crash(struct domain *d) |
---|
336 | { |
---|
337 | if ( d->is_shutting_down ) |
---|
338 | { |
---|
339 | /* Print nothing: the domain is already shutting down. */ |
---|
340 | } |
---|
341 | else if ( d == current->domain ) |
---|
342 | { |
---|
343 | printk("Domain %d (vcpu#%d) crashed on cpu#%d:\n", |
---|
344 | d->domain_id, current->vcpu_id, smp_processor_id()); |
---|
345 | show_execution_state(guest_cpu_user_regs()); |
---|
346 | } |
---|
347 | else |
---|
348 | { |
---|
349 | printk("Domain %d reported crashed by domain %d on cpu#%d:\n", |
---|
350 | d->domain_id, current->domain->domain_id, smp_processor_id()); |
---|
351 | } |
---|
352 | |
---|
353 | domain_shutdown(d, SHUTDOWN_crash); |
---|
354 | } |
---|
355 | |
---|
356 | |
---|
357 | void __domain_crash_synchronous(void) |
---|
358 | { |
---|
359 | __domain_crash(current->domain); |
---|
360 | |
---|
361 | /* |
---|
362 | * Flush multicall state before dying if a multicall is in progress. |
---|
363 | * This shouldn't be necessary, but some architectures are calling |
---|
364 | * domain_crash_synchronous() when they really shouldn't (i.e., from |
---|
365 | * within hypercall context). |
---|
366 | */ |
---|
367 | if ( this_cpu(mc_state).flags != 0 ) |
---|
368 | { |
---|
369 | dprintk(XENLOG_ERR, |
---|
370 | "FIXME: synchronous domain crash during a multicall!\n"); |
---|
371 | this_cpu(mc_state).flags = 0; |
---|
372 | } |
---|
373 | |
---|
374 | for ( ; ; ) |
---|
375 | do_softirq(); |
---|
376 | } |
---|
377 | |
---|
378 | |
---|
379 | void domain_shutdown(struct domain *d, u8 reason) |
---|
380 | { |
---|
381 | struct vcpu *v; |
---|
382 | |
---|
383 | if ( d->domain_id == 0 ) |
---|
384 | dom0_shutdown(reason); |
---|
385 | |
---|
386 | spin_lock(&d->shutdown_lock); |
---|
387 | |
---|
388 | if ( d->is_shutting_down ) |
---|
389 | { |
---|
390 | spin_unlock(&d->shutdown_lock); |
---|
391 | return; |
---|
392 | } |
---|
393 | |
---|
394 | d->is_shutting_down = 1; |
---|
395 | d->shutdown_code = reason; |
---|
396 | |
---|
397 | smp_mb(); /* set shutdown status /then/ check for per-cpu deferrals */ |
---|
398 | |
---|
399 | for_each_vcpu ( d, v ) |
---|
400 | { |
---|
401 | if ( v->defer_shutdown ) |
---|
402 | continue; |
---|
403 | atomic_inc(&v->pause_count); |
---|
404 | v->paused_for_shutdown = 1; |
---|
405 | } |
---|
406 | |
---|
407 | __domain_finalise_shutdown(d); |
---|
408 | |
---|
409 | spin_unlock(&d->shutdown_lock); |
---|
410 | } |
---|
411 | |
---|
412 | void domain_resume(struct domain *d) |
---|
413 | { |
---|
414 | struct vcpu *v; |
---|
415 | |
---|
416 | /* |
---|
417 | * Some code paths assume that shutdown status does not get reset under |
---|
418 | * their feet (e.g., some assertions make this assumption). |
---|
419 | */ |
---|
420 | domain_pause(d); |
---|
421 | |
---|
422 | spin_lock(&d->shutdown_lock); |
---|
423 | |
---|
424 | d->is_shutting_down = d->is_shut_down = 0; |
---|
425 | |
---|
426 | for_each_vcpu ( d, v ) |
---|
427 | { |
---|
428 | if ( v->paused_for_shutdown ) |
---|
429 | vcpu_unpause(v); |
---|
430 | v->paused_for_shutdown = 0; |
---|
431 | } |
---|
432 | |
---|
433 | spin_unlock(&d->shutdown_lock); |
---|
434 | |
---|
435 | domain_unpause(d); |
---|
436 | } |
---|
437 | |
---|
438 | int vcpu_start_shutdown_deferral(struct vcpu *v) |
---|
439 | { |
---|
440 | v->defer_shutdown = 1; |
---|
441 | smp_mb(); /* set deferral status /then/ check for shutdown */ |
---|
442 | if ( unlikely(v->domain->is_shutting_down) ) |
---|
443 | vcpu_check_shutdown(v); |
---|
444 | return v->defer_shutdown; |
---|
445 | } |
---|
446 | |
---|
447 | void vcpu_end_shutdown_deferral(struct vcpu *v) |
---|
448 | { |
---|
449 | v->defer_shutdown = 0; |
---|
450 | smp_mb(); /* clear deferral status /then/ check for shutdown */ |
---|
451 | if ( unlikely(v->domain->is_shutting_down) ) |
---|
452 | vcpu_check_shutdown(v); |
---|
453 | } |
---|
454 | |
---|
455 | void domain_pause_for_debugger(void) |
---|
456 | { |
---|
457 | struct domain *d = current->domain; |
---|
458 | struct vcpu *v; |
---|
459 | |
---|
460 | atomic_inc(&d->pause_count); |
---|
461 | if ( test_and_set_bool(d->is_paused_by_controller) ) |
---|
462 | domain_unpause(d); /* race-free atomic_dec(&d->pause_count) */ |
---|
463 | |
---|
464 | for_each_vcpu ( d, v ) |
---|
465 | vcpu_sleep_nosync(v); |
---|
466 | |
---|
467 | send_guest_global_virq(dom0, VIRQ_DEBUGGER); |
---|
468 | } |
---|
469 | |
---|
470 | /* Complete domain destroy after RCU readers are not holding old references. */ |
---|
471 | static void complete_domain_destroy(struct rcu_head *head) |
---|
472 | { |
---|
473 | struct domain *d = container_of(head, struct domain, rcu); |
---|
474 | |
---|
475 | acm_domain_destroy(d); |
---|
476 | |
---|
477 | rangeset_domain_destroy(d); |
---|
478 | |
---|
479 | evtchn_destroy(d); |
---|
480 | grant_table_destroy(d); |
---|
481 | |
---|
482 | arch_domain_destroy(d); |
---|
483 | |
---|
484 | free_domain(d); |
---|
485 | |
---|
486 | send_guest_global_virq(dom0, VIRQ_DOM_EXC); |
---|
487 | } |
---|
488 | |
---|
489 | /* Release resources belonging to task @p. */ |
---|
490 | void domain_destroy(struct domain *d) |
---|
491 | { |
---|
492 | struct domain **pd; |
---|
493 | atomic_t old, new; |
---|
494 | |
---|
495 | BUG_ON(!d->is_dying); |
---|
496 | |
---|
497 | /* May be already destroyed, or get_domain() can race us. */ |
---|
498 | _atomic_set(old, 0); |
---|
499 | _atomic_set(new, DOMAIN_DESTROYED); |
---|
500 | old = atomic_compareandswap(old, new, &d->refcnt); |
---|
501 | if ( _atomic_read(old) != 0 ) |
---|
502 | return; |
---|
503 | |
---|
504 | /* Delete from task list and task hashtable. */ |
---|
505 | spin_lock(&domlist_update_lock); |
---|
506 | pd = &domain_list; |
---|
507 | while ( *pd != d ) |
---|
508 | pd = &(*pd)->next_in_list; |
---|
509 | rcu_assign_pointer(*pd, d->next_in_list); |
---|
510 | pd = &domain_hash[DOMAIN_HASH(d->domain_id)]; |
---|
511 | while ( *pd != d ) |
---|
512 | pd = &(*pd)->next_in_hashbucket; |
---|
513 | rcu_assign_pointer(*pd, d->next_in_hashbucket); |
---|
514 | spin_unlock(&domlist_update_lock); |
---|
515 | |
---|
516 | /* Schedule RCU asynchronous completion of domain destroy. */ |
---|
517 | call_rcu(&d->rcu, complete_domain_destroy); |
---|
518 | } |
---|
519 | |
---|
520 | void vcpu_pause(struct vcpu *v) |
---|
521 | { |
---|
522 | ASSERT(v != current); |
---|
523 | atomic_inc(&v->pause_count); |
---|
524 | vcpu_sleep_sync(v); |
---|
525 | } |
---|
526 | |
---|
527 | void vcpu_pause_nosync(struct vcpu *v) |
---|
528 | { |
---|
529 | atomic_inc(&v->pause_count); |
---|
530 | vcpu_sleep_nosync(v); |
---|
531 | } |
---|
532 | |
---|
533 | void vcpu_unpause(struct vcpu *v) |
---|
534 | { |
---|
535 | if ( atomic_dec_and_test(&v->pause_count) ) |
---|
536 | vcpu_wake(v); |
---|
537 | } |
---|
538 | |
---|
539 | void domain_pause(struct domain *d) |
---|
540 | { |
---|
541 | struct vcpu *v; |
---|
542 | |
---|
543 | ASSERT(d != current->domain); |
---|
544 | |
---|
545 | atomic_inc(&d->pause_count); |
---|
546 | |
---|
547 | for_each_vcpu( d, v ) |
---|
548 | vcpu_sleep_sync(v); |
---|
549 | } |
---|
550 | |
---|
551 | void domain_unpause(struct domain *d) |
---|
552 | { |
---|
553 | struct vcpu *v; |
---|
554 | |
---|
555 | if ( atomic_dec_and_test(&d->pause_count) ) |
---|
556 | for_each_vcpu( d, v ) |
---|
557 | vcpu_wake(v); |
---|
558 | } |
---|
559 | |
---|
560 | void domain_pause_by_systemcontroller(struct domain *d) |
---|
561 | { |
---|
562 | domain_pause(d); |
---|
563 | if ( test_and_set_bool(d->is_paused_by_controller) ) |
---|
564 | domain_unpause(d); |
---|
565 | } |
---|
566 | |
---|
567 | void domain_unpause_by_systemcontroller(struct domain *d) |
---|
568 | { |
---|
569 | if ( test_and_clear_bool(d->is_paused_by_controller) ) |
---|
570 | domain_unpause(d); |
---|
571 | } |
---|
572 | |
---|
573 | int boot_vcpu(struct domain *d, int vcpuid, vcpu_guest_context_u ctxt) |
---|
574 | { |
---|
575 | struct vcpu *v = d->vcpu[vcpuid]; |
---|
576 | |
---|
577 | BUG_ON(v->is_initialised); |
---|
578 | |
---|
579 | return arch_set_info_guest(v, ctxt); |
---|
580 | } |
---|
581 | |
---|
582 | int vcpu_reset(struct vcpu *v) |
---|
583 | { |
---|
584 | struct domain *d = v->domain; |
---|
585 | int rc; |
---|
586 | |
---|
587 | domain_pause(d); |
---|
588 | LOCK_BIGLOCK(d); |
---|
589 | |
---|
590 | rc = arch_vcpu_reset(v); |
---|
591 | if ( rc != 0 ) |
---|
592 | goto out; |
---|
593 | |
---|
594 | set_bit(_VPF_down, &v->pause_flags); |
---|
595 | |
---|
596 | v->fpu_initialised = 0; |
---|
597 | v->fpu_dirtied = 0; |
---|
598 | v->is_polling = 0; |
---|
599 | v->is_initialised = 0; |
---|
600 | v->nmi_pending = 0; |
---|
601 | v->nmi_masked = 0; |
---|
602 | clear_bit(_VPF_blocked, &v->pause_flags); |
---|
603 | |
---|
604 | out: |
---|
605 | UNLOCK_BIGLOCK(v->domain); |
---|
606 | domain_unpause(d); |
---|
607 | |
---|
608 | return rc; |
---|
609 | } |
---|
610 | |
---|
611 | |
---|
612 | long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg) |
---|
613 | { |
---|
614 | struct domain *d = current->domain; |
---|
615 | struct vcpu *v; |
---|
616 | struct vcpu_guest_context *ctxt; |
---|
617 | long rc = 0; |
---|
618 | |
---|
619 | if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) ) |
---|
620 | return -EINVAL; |
---|
621 | |
---|
622 | if ( (v = d->vcpu[vcpuid]) == NULL ) |
---|
623 | return -ENOENT; |
---|
624 | |
---|
625 | switch ( cmd ) |
---|
626 | { |
---|
627 | case VCPUOP_initialise: |
---|
628 | if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL ) |
---|
629 | return -ENOMEM; |
---|
630 | |
---|
631 | if ( copy_from_guest(ctxt, arg, 1) ) |
---|
632 | { |
---|
633 | xfree(ctxt); |
---|
634 | return -EFAULT; |
---|
635 | } |
---|
636 | |
---|
637 | LOCK_BIGLOCK(d); |
---|
638 | rc = -EEXIST; |
---|
639 | if ( !v->is_initialised ) |
---|
640 | rc = boot_vcpu(d, vcpuid, ctxt); |
---|
641 | UNLOCK_BIGLOCK(d); |
---|
642 | |
---|
643 | xfree(ctxt); |
---|
644 | break; |
---|
645 | |
---|
646 | case VCPUOP_up: |
---|
647 | if ( !v->is_initialised ) |
---|
648 | return -EINVAL; |
---|
649 | |
---|
650 | if ( test_and_clear_bit(_VPF_down, &v->pause_flags) ) |
---|
651 | vcpu_wake(v); |
---|
652 | |
---|
653 | break; |
---|
654 | |
---|
655 | case VCPUOP_down: |
---|
656 | if ( !test_and_set_bit(_VPF_down, &v->pause_flags) ) |
---|
657 | vcpu_sleep_nosync(v); |
---|
658 | break; |
---|
659 | |
---|
660 | case VCPUOP_is_up: |
---|
661 | rc = !test_bit(_VPF_down, &v->pause_flags); |
---|
662 | break; |
---|
663 | |
---|
664 | case VCPUOP_get_runstate_info: |
---|
665 | { |
---|
666 | struct vcpu_runstate_info runstate; |
---|
667 | vcpu_runstate_get(v, &runstate); |
---|
668 | if ( copy_to_guest(arg, &runstate, 1) ) |
---|
669 | rc = -EFAULT; |
---|
670 | break; |
---|
671 | } |
---|
672 | |
---|
673 | case VCPUOP_set_periodic_timer: |
---|
674 | { |
---|
675 | struct vcpu_set_periodic_timer set; |
---|
676 | |
---|
677 | if ( copy_from_guest(&set, arg, 1) ) |
---|
678 | return -EFAULT; |
---|
679 | |
---|
680 | if ( set.period_ns < MILLISECS(1) ) |
---|
681 | return -EINVAL; |
---|
682 | |
---|
683 | v->periodic_period = set.period_ns; |
---|
684 | vcpu_force_reschedule(v); |
---|
685 | |
---|
686 | break; |
---|
687 | } |
---|
688 | |
---|
689 | case VCPUOP_stop_periodic_timer: |
---|
690 | { |
---|
691 | v->periodic_period = 0; |
---|
692 | vcpu_force_reschedule(v); |
---|
693 | break; |
---|
694 | } |
---|
695 | |
---|
696 | case VCPUOP_set_singleshot_timer: |
---|
697 | { |
---|
698 | struct vcpu_set_singleshot_timer set; |
---|
699 | |
---|
700 | if ( v != current ) |
---|
701 | return -EINVAL; |
---|
702 | |
---|
703 | if ( copy_from_guest(&set, arg, 1) ) |
---|
704 | return -EFAULT; |
---|
705 | |
---|
706 | if ( (set.flags & VCPU_SSHOTTMR_future) && |
---|
707 | (set.timeout_abs_ns < NOW()) ) |
---|
708 | return -ETIME; |
---|
709 | |
---|
710 | if ( v->singleshot_timer.cpu != smp_processor_id() ) |
---|
711 | { |
---|
712 | stop_timer(&v->singleshot_timer); |
---|
713 | v->singleshot_timer.cpu = smp_processor_id(); |
---|
714 | } |
---|
715 | |
---|
716 | set_timer(&v->singleshot_timer, set.timeout_abs_ns); |
---|
717 | |
---|
718 | break; |
---|
719 | } |
---|
720 | |
---|
721 | case VCPUOP_stop_singleshot_timer: |
---|
722 | { |
---|
723 | if ( v != current ) |
---|
724 | return -EINVAL; |
---|
725 | |
---|
726 | stop_timer(&v->singleshot_timer); |
---|
727 | break; |
---|
728 | } |
---|
729 | |
---|
730 | default: |
---|
731 | rc = arch_do_vcpu_op(cmd, v, arg); |
---|
732 | break; |
---|
733 | } |
---|
734 | |
---|
735 | return rc; |
---|
736 | } |
---|
737 | |
---|
738 | long vm_assist(struct domain *p, unsigned int cmd, unsigned int type) |
---|
739 | { |
---|
740 | if ( type > MAX_VMASST_TYPE ) |
---|
741 | return -EINVAL; |
---|
742 | |
---|
743 | switch ( cmd ) |
---|
744 | { |
---|
745 | case VMASST_CMD_enable: |
---|
746 | set_bit(type, &p->vm_assist); |
---|
747 | return 0; |
---|
748 | case VMASST_CMD_disable: |
---|
749 | clear_bit(type, &p->vm_assist); |
---|
750 | return 0; |
---|
751 | } |
---|
752 | |
---|
753 | return -ENOSYS; |
---|
754 | } |
---|
755 | |
---|
756 | /* |
---|
757 | * Local variables: |
---|
758 | * mode: C |
---|
759 | * c-set-style: "BSD" |
---|
760 | * c-basic-offset: 4 |
---|
761 | * tab-width: 4 |
---|
762 | * indent-tabs-mode: nil |
---|
763 | * End: |
---|
764 | */ |
---|