1 | /****************************************************************************** |
---|
2 | * event_channel.c |
---|
3 | * |
---|
4 | * Event notifications from VIRQs, PIRQs, and other domains. |
---|
5 | * |
---|
6 | * Copyright (c) 2003-2006, K A Fraser. |
---|
7 | * |
---|
8 | * This program is distributed in the hope that it will be useful, |
---|
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
11 | * GNU General Public License for more details. |
---|
12 | * |
---|
13 | * You should have received a copy of the GNU General Public License |
---|
14 | * along with this program; if not, write to the Free Software |
---|
15 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
---|
16 | */ |
---|
17 | |
---|
18 | #include <xen/config.h> |
---|
19 | #include <xen/init.h> |
---|
20 | #include <xen/lib.h> |
---|
21 | #include <xen/errno.h> |
---|
22 | #include <xen/sched.h> |
---|
23 | #include <xen/event.h> |
---|
24 | #include <xen/irq.h> |
---|
25 | #include <xen/iocap.h> |
---|
26 | #include <xen/compat.h> |
---|
27 | #include <xen/guest_access.h> |
---|
28 | #include <asm/current.h> |
---|
29 | |
---|
30 | #include <public/xen.h> |
---|
31 | #include <public/event_channel.h> |
---|
32 | #include <acm/acm_hooks.h> |
---|
33 | |
---|
34 | #define bucket_from_port(d,p) \ |
---|
35 | ((d)->evtchn[(p)/EVTCHNS_PER_BUCKET]) |
---|
36 | #define port_is_valid(d,p) \ |
---|
37 | (((p) >= 0) && ((p) < MAX_EVTCHNS(d)) && \ |
---|
38 | (bucket_from_port(d,p) != NULL)) |
---|
39 | #define evtchn_from_port(d,p) \ |
---|
40 | (&(bucket_from_port(d,p))[(p)&(EVTCHNS_PER_BUCKET-1)]) |
---|
41 | |
---|
42 | #define ERROR_EXIT(_errno) \ |
---|
43 | do { \ |
---|
44 | gdprintk(XENLOG_WARNING, \ |
---|
45 | "EVTCHNOP failure: domain %d, error %d, line %d\n", \ |
---|
46 | current->domain->domain_id, (_errno), __LINE__); \ |
---|
47 | rc = (_errno); \ |
---|
48 | goto out; \ |
---|
49 | } while ( 0 ) |
---|
50 | |
---|
51 | |
---|
52 | static int virq_is_global(int virq) |
---|
53 | { |
---|
54 | int rc; |
---|
55 | |
---|
56 | ASSERT((virq >= 0) && (virq < NR_VIRQS)); |
---|
57 | |
---|
58 | switch ( virq ) |
---|
59 | { |
---|
60 | case VIRQ_TIMER: |
---|
61 | case VIRQ_DEBUG: |
---|
62 | case VIRQ_XENOPROF: |
---|
63 | rc = 0; |
---|
64 | break; |
---|
65 | case VIRQ_ARCH_0 ... VIRQ_ARCH_7: |
---|
66 | rc = arch_virq_is_global(virq); |
---|
67 | break; |
---|
68 | default: |
---|
69 | rc = 1; |
---|
70 | break; |
---|
71 | } |
---|
72 | |
---|
73 | return rc; |
---|
74 | } |
---|
75 | |
---|
76 | |
---|
77 | static int get_free_port(struct domain *d) |
---|
78 | { |
---|
79 | struct evtchn *chn; |
---|
80 | int port; |
---|
81 | |
---|
82 | for ( port = 0; port_is_valid(d, port); port++ ) |
---|
83 | if ( evtchn_from_port(d, port)->state == ECS_FREE ) |
---|
84 | return port; |
---|
85 | |
---|
86 | if ( port == MAX_EVTCHNS(d) ) |
---|
87 | return -ENOSPC; |
---|
88 | |
---|
89 | chn = xmalloc_array(struct evtchn, EVTCHNS_PER_BUCKET); |
---|
90 | if ( unlikely(chn == NULL) ) |
---|
91 | return -ENOMEM; |
---|
92 | memset(chn, 0, EVTCHNS_PER_BUCKET * sizeof(*chn)); |
---|
93 | bucket_from_port(d, port) = chn; |
---|
94 | |
---|
95 | return port; |
---|
96 | } |
---|
97 | |
---|
98 | |
---|
99 | static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc) |
---|
100 | { |
---|
101 | struct evtchn *chn; |
---|
102 | struct domain *d; |
---|
103 | int port; |
---|
104 | domid_t dom = alloc->dom; |
---|
105 | long rc; |
---|
106 | |
---|
107 | if ( (rc = acm_pre_eventchannel_unbound(dom, alloc->remote_dom)) != 0 ) |
---|
108 | return rc; |
---|
109 | |
---|
110 | if ( dom == DOMID_SELF ) |
---|
111 | dom = current->domain->domain_id; |
---|
112 | else if ( !IS_PRIV(current->domain) ) |
---|
113 | return -EPERM; |
---|
114 | |
---|
115 | if ( (d = rcu_lock_domain_by_id(dom)) == NULL ) |
---|
116 | return -ESRCH; |
---|
117 | |
---|
118 | spin_lock(&d->evtchn_lock); |
---|
119 | |
---|
120 | if ( (port = get_free_port(d)) < 0 ) |
---|
121 | ERROR_EXIT(port); |
---|
122 | chn = evtchn_from_port(d, port); |
---|
123 | |
---|
124 | chn->state = ECS_UNBOUND; |
---|
125 | if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF ) |
---|
126 | chn->u.unbound.remote_domid = current->domain->domain_id; |
---|
127 | |
---|
128 | alloc->port = port; |
---|
129 | |
---|
130 | out: |
---|
131 | spin_unlock(&d->evtchn_lock); |
---|
132 | |
---|
133 | rcu_unlock_domain(d); |
---|
134 | |
---|
135 | return rc; |
---|
136 | } |
---|
137 | |
---|
138 | |
---|
139 | static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind) |
---|
140 | { |
---|
141 | struct evtchn *lchn, *rchn; |
---|
142 | struct domain *ld = current->domain, *rd; |
---|
143 | int lport, rport = bind->remote_port; |
---|
144 | domid_t rdom = bind->remote_dom; |
---|
145 | long rc; |
---|
146 | |
---|
147 | if ( (rc = acm_pre_eventchannel_interdomain(rdom)) != 0 ) |
---|
148 | return rc; |
---|
149 | |
---|
150 | if ( rdom == DOMID_SELF ) |
---|
151 | rdom = current->domain->domain_id; |
---|
152 | |
---|
153 | if ( (rd = rcu_lock_domain_by_id(rdom)) == NULL ) |
---|
154 | return -ESRCH; |
---|
155 | |
---|
156 | /* Avoid deadlock by first acquiring lock of domain with smaller id. */ |
---|
157 | if ( ld < rd ) |
---|
158 | { |
---|
159 | spin_lock(&ld->evtchn_lock); |
---|
160 | spin_lock(&rd->evtchn_lock); |
---|
161 | } |
---|
162 | else |
---|
163 | { |
---|
164 | if ( ld != rd ) |
---|
165 | spin_lock(&rd->evtchn_lock); |
---|
166 | spin_lock(&ld->evtchn_lock); |
---|
167 | } |
---|
168 | |
---|
169 | if ( (lport = get_free_port(ld)) < 0 ) |
---|
170 | ERROR_EXIT(lport); |
---|
171 | lchn = evtchn_from_port(ld, lport); |
---|
172 | |
---|
173 | if ( !port_is_valid(rd, rport) ) |
---|
174 | ERROR_EXIT(-EINVAL); |
---|
175 | rchn = evtchn_from_port(rd, rport); |
---|
176 | if ( (rchn->state != ECS_UNBOUND) || |
---|
177 | (rchn->u.unbound.remote_domid != ld->domain_id) ) |
---|
178 | ERROR_EXIT(-EINVAL); |
---|
179 | |
---|
180 | lchn->u.interdomain.remote_dom = rd; |
---|
181 | lchn->u.interdomain.remote_port = (u16)rport; |
---|
182 | lchn->state = ECS_INTERDOMAIN; |
---|
183 | |
---|
184 | rchn->u.interdomain.remote_dom = ld; |
---|
185 | rchn->u.interdomain.remote_port = (u16)lport; |
---|
186 | rchn->state = ECS_INTERDOMAIN; |
---|
187 | |
---|
188 | /* |
---|
189 | * We may have lost notifications on the remote unbound port. Fix that up |
---|
190 | * here by conservatively always setting a notification on the local port. |
---|
191 | */ |
---|
192 | evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport); |
---|
193 | |
---|
194 | bind->local_port = lport; |
---|
195 | |
---|
196 | out: |
---|
197 | spin_unlock(&ld->evtchn_lock); |
---|
198 | if ( ld != rd ) |
---|
199 | spin_unlock(&rd->evtchn_lock); |
---|
200 | |
---|
201 | rcu_unlock_domain(rd); |
---|
202 | |
---|
203 | return rc; |
---|
204 | } |
---|
205 | |
---|
206 | |
---|
207 | static long evtchn_bind_virq(evtchn_bind_virq_t *bind) |
---|
208 | { |
---|
209 | struct evtchn *chn; |
---|
210 | struct vcpu *v; |
---|
211 | struct domain *d = current->domain; |
---|
212 | int port, virq = bind->virq, vcpu = bind->vcpu; |
---|
213 | long rc = 0; |
---|
214 | |
---|
215 | if ( (virq < 0) || (virq >= ARRAY_SIZE(v->virq_to_evtchn)) ) |
---|
216 | return -EINVAL; |
---|
217 | |
---|
218 | if ( virq_is_global(virq) && (vcpu != 0) ) |
---|
219 | return -EINVAL; |
---|
220 | |
---|
221 | if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) || |
---|
222 | ((v = d->vcpu[vcpu]) == NULL) ) |
---|
223 | return -ENOENT; |
---|
224 | |
---|
225 | spin_lock(&d->evtchn_lock); |
---|
226 | |
---|
227 | if ( v->virq_to_evtchn[virq] != 0 ) |
---|
228 | ERROR_EXIT(-EEXIST); |
---|
229 | |
---|
230 | if ( (port = get_free_port(d)) < 0 ) |
---|
231 | ERROR_EXIT(port); |
---|
232 | |
---|
233 | chn = evtchn_from_port(d, port); |
---|
234 | chn->state = ECS_VIRQ; |
---|
235 | chn->notify_vcpu_id = vcpu; |
---|
236 | chn->u.virq = virq; |
---|
237 | |
---|
238 | v->virq_to_evtchn[virq] = bind->port = port; |
---|
239 | |
---|
240 | out: |
---|
241 | spin_unlock(&d->evtchn_lock); |
---|
242 | |
---|
243 | return rc; |
---|
244 | } |
---|
245 | |
---|
246 | |
---|
247 | static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind) |
---|
248 | { |
---|
249 | struct evtchn *chn; |
---|
250 | struct domain *d = current->domain; |
---|
251 | int port, vcpu = bind->vcpu; |
---|
252 | long rc = 0; |
---|
253 | |
---|
254 | if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) || |
---|
255 | (d->vcpu[vcpu] == NULL) ) |
---|
256 | return -ENOENT; |
---|
257 | |
---|
258 | spin_lock(&d->evtchn_lock); |
---|
259 | |
---|
260 | if ( (port = get_free_port(d)) < 0 ) |
---|
261 | ERROR_EXIT(port); |
---|
262 | |
---|
263 | chn = evtchn_from_port(d, port); |
---|
264 | chn->state = ECS_IPI; |
---|
265 | chn->notify_vcpu_id = vcpu; |
---|
266 | |
---|
267 | bind->port = port; |
---|
268 | |
---|
269 | out: |
---|
270 | spin_unlock(&d->evtchn_lock); |
---|
271 | |
---|
272 | return rc; |
---|
273 | } |
---|
274 | |
---|
275 | |
---|
276 | static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind) |
---|
277 | { |
---|
278 | struct evtchn *chn; |
---|
279 | struct domain *d = current->domain; |
---|
280 | int port, pirq = bind->pirq; |
---|
281 | long rc; |
---|
282 | |
---|
283 | if ( (pirq < 0) || (pirq >= ARRAY_SIZE(d->pirq_to_evtchn)) ) |
---|
284 | return -EINVAL; |
---|
285 | |
---|
286 | if ( !irq_access_permitted(d, pirq) ) |
---|
287 | return -EPERM; |
---|
288 | |
---|
289 | spin_lock(&d->evtchn_lock); |
---|
290 | |
---|
291 | if ( d->pirq_to_evtchn[pirq] != 0 ) |
---|
292 | ERROR_EXIT(-EEXIST); |
---|
293 | |
---|
294 | if ( (port = get_free_port(d)) < 0 ) |
---|
295 | ERROR_EXIT(port); |
---|
296 | |
---|
297 | chn = evtchn_from_port(d, port); |
---|
298 | |
---|
299 | d->pirq_to_evtchn[pirq] = port; |
---|
300 | rc = pirq_guest_bind(d->vcpu[0], pirq, |
---|
301 | !!(bind->flags & BIND_PIRQ__WILL_SHARE)); |
---|
302 | if ( rc != 0 ) |
---|
303 | { |
---|
304 | d->pirq_to_evtchn[pirq] = 0; |
---|
305 | goto out; |
---|
306 | } |
---|
307 | |
---|
308 | chn->state = ECS_PIRQ; |
---|
309 | chn->u.pirq = pirq; |
---|
310 | |
---|
311 | bind->port = port; |
---|
312 | |
---|
313 | out: |
---|
314 | spin_unlock(&d->evtchn_lock); |
---|
315 | |
---|
316 | return rc; |
---|
317 | } |
---|
318 | |
---|
319 | |
---|
320 | static long __evtchn_close(struct domain *d1, int port1) |
---|
321 | { |
---|
322 | struct domain *d2 = NULL; |
---|
323 | struct vcpu *v; |
---|
324 | struct evtchn *chn1, *chn2; |
---|
325 | int port2; |
---|
326 | long rc = 0; |
---|
327 | |
---|
328 | again: |
---|
329 | spin_lock(&d1->evtchn_lock); |
---|
330 | |
---|
331 | if ( !port_is_valid(d1, port1) ) |
---|
332 | { |
---|
333 | rc = -EINVAL; |
---|
334 | goto out; |
---|
335 | } |
---|
336 | |
---|
337 | chn1 = evtchn_from_port(d1, port1); |
---|
338 | |
---|
339 | /* Guest cannot close a Xen-attached event channel. */ |
---|
340 | if ( unlikely(chn1->consumer_is_xen) ) |
---|
341 | { |
---|
342 | rc = -EINVAL; |
---|
343 | goto out; |
---|
344 | } |
---|
345 | |
---|
346 | switch ( chn1->state ) |
---|
347 | { |
---|
348 | case ECS_FREE: |
---|
349 | case ECS_RESERVED: |
---|
350 | rc = -EINVAL; |
---|
351 | goto out; |
---|
352 | |
---|
353 | case ECS_UNBOUND: |
---|
354 | break; |
---|
355 | |
---|
356 | case ECS_PIRQ: |
---|
357 | if ( (rc = pirq_guest_unbind(d1, chn1->u.pirq)) == 0 ) |
---|
358 | d1->pirq_to_evtchn[chn1->u.pirq] = 0; |
---|
359 | break; |
---|
360 | |
---|
361 | case ECS_VIRQ: |
---|
362 | for_each_vcpu ( d1, v ) |
---|
363 | if ( v->virq_to_evtchn[chn1->u.virq] == port1 ) |
---|
364 | v->virq_to_evtchn[chn1->u.virq] = 0; |
---|
365 | break; |
---|
366 | |
---|
367 | case ECS_IPI: |
---|
368 | break; |
---|
369 | |
---|
370 | case ECS_INTERDOMAIN: |
---|
371 | if ( d2 == NULL ) |
---|
372 | { |
---|
373 | d2 = chn1->u.interdomain.remote_dom; |
---|
374 | |
---|
375 | /* If we unlock d1 then we could lose d2. Must get a reference. */ |
---|
376 | if ( unlikely(!get_domain(d2)) ) |
---|
377 | { |
---|
378 | /* |
---|
379 | * Failed to obtain a reference. No matter: d2 must be dying |
---|
380 | * and so will close this event channel for us. |
---|
381 | */ |
---|
382 | d2 = NULL; |
---|
383 | goto out; |
---|
384 | } |
---|
385 | |
---|
386 | if ( d1 < d2 ) |
---|
387 | { |
---|
388 | spin_lock(&d2->evtchn_lock); |
---|
389 | } |
---|
390 | else if ( d1 != d2 ) |
---|
391 | { |
---|
392 | spin_unlock(&d1->evtchn_lock); |
---|
393 | spin_lock(&d2->evtchn_lock); |
---|
394 | goto again; |
---|
395 | } |
---|
396 | } |
---|
397 | else if ( d2 != chn1->u.interdomain.remote_dom ) |
---|
398 | { |
---|
399 | /* |
---|
400 | * We can only get here if the port was closed and re-bound after |
---|
401 | * unlocking d1 but before locking d2 above. We could retry but |
---|
402 | * it is easier to return the same error as if we had seen the |
---|
403 | * port in ECS_CLOSED. It must have passed through that state for |
---|
404 | * us to end up here, so it's a valid error to return. |
---|
405 | */ |
---|
406 | BUG_ON(d1 != current->domain); |
---|
407 | rc = -EINVAL; |
---|
408 | goto out; |
---|
409 | } |
---|
410 | |
---|
411 | port2 = chn1->u.interdomain.remote_port; |
---|
412 | BUG_ON(!port_is_valid(d2, port2)); |
---|
413 | |
---|
414 | chn2 = evtchn_from_port(d2, port2); |
---|
415 | BUG_ON(chn2->state != ECS_INTERDOMAIN); |
---|
416 | BUG_ON(chn2->u.interdomain.remote_dom != d1); |
---|
417 | |
---|
418 | chn2->state = ECS_UNBOUND; |
---|
419 | chn2->u.unbound.remote_domid = d1->domain_id; |
---|
420 | break; |
---|
421 | |
---|
422 | default: |
---|
423 | BUG(); |
---|
424 | } |
---|
425 | |
---|
426 | /* Reset binding to vcpu0 when the channel is freed. */ |
---|
427 | chn1->state = ECS_FREE; |
---|
428 | chn1->notify_vcpu_id = 0; |
---|
429 | |
---|
430 | out: |
---|
431 | if ( d2 != NULL ) |
---|
432 | { |
---|
433 | if ( d1 != d2 ) |
---|
434 | spin_unlock(&d2->evtchn_lock); |
---|
435 | put_domain(d2); |
---|
436 | } |
---|
437 | |
---|
438 | spin_unlock(&d1->evtchn_lock); |
---|
439 | |
---|
440 | return rc; |
---|
441 | } |
---|
442 | |
---|
443 | |
---|
444 | static long evtchn_close(evtchn_close_t *close) |
---|
445 | { |
---|
446 | return __evtchn_close(current->domain, close->port); |
---|
447 | } |
---|
448 | |
---|
449 | |
---|
450 | long evtchn_send(unsigned int lport) |
---|
451 | { |
---|
452 | struct evtchn *lchn, *rchn; |
---|
453 | struct domain *ld = current->domain, *rd; |
---|
454 | struct vcpu *rvcpu; |
---|
455 | int rport, ret = 0; |
---|
456 | |
---|
457 | spin_lock(&ld->evtchn_lock); |
---|
458 | |
---|
459 | if ( unlikely(!port_is_valid(ld, lport)) ) |
---|
460 | { |
---|
461 | spin_unlock(&ld->evtchn_lock); |
---|
462 | return -EINVAL; |
---|
463 | } |
---|
464 | |
---|
465 | lchn = evtchn_from_port(ld, lport); |
---|
466 | |
---|
467 | /* Guest cannot send via a Xen-attached event channel. */ |
---|
468 | if ( unlikely(lchn->consumer_is_xen) ) |
---|
469 | { |
---|
470 | spin_unlock(&ld->evtchn_lock); |
---|
471 | return -EINVAL; |
---|
472 | } |
---|
473 | |
---|
474 | switch ( lchn->state ) |
---|
475 | { |
---|
476 | case ECS_INTERDOMAIN: |
---|
477 | rd = lchn->u.interdomain.remote_dom; |
---|
478 | rport = lchn->u.interdomain.remote_port; |
---|
479 | rchn = evtchn_from_port(rd, rport); |
---|
480 | rvcpu = rd->vcpu[rchn->notify_vcpu_id]; |
---|
481 | if ( rchn->consumer_is_xen ) |
---|
482 | { |
---|
483 | /* Xen consumers need notification only if they are blocked. */ |
---|
484 | if ( test_and_clear_bit(_VPF_blocked_in_xen, |
---|
485 | &rvcpu->pause_flags) ) |
---|
486 | vcpu_wake(rvcpu); |
---|
487 | } |
---|
488 | else |
---|
489 | { |
---|
490 | evtchn_set_pending(rvcpu, rport); |
---|
491 | } |
---|
492 | break; |
---|
493 | case ECS_IPI: |
---|
494 | evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport); |
---|
495 | break; |
---|
496 | case ECS_UNBOUND: |
---|
497 | /* silently drop the notification */ |
---|
498 | break; |
---|
499 | default: |
---|
500 | ret = -EINVAL; |
---|
501 | } |
---|
502 | |
---|
503 | spin_unlock(&ld->evtchn_lock); |
---|
504 | |
---|
505 | return ret; |
---|
506 | } |
---|
507 | |
---|
508 | |
---|
509 | void evtchn_set_pending(struct vcpu *v, int port) |
---|
510 | { |
---|
511 | struct domain *d = v->domain; |
---|
512 | shared_info_t *s = d->shared_info; |
---|
513 | |
---|
514 | /* |
---|
515 | * The following bit operations must happen in strict order. |
---|
516 | * NB. On x86, the atomic bit operations also act as memory barriers. |
---|
517 | * There is therefore sufficiently strict ordering for this architecture -- |
---|
518 | * others may require explicit memory barriers. |
---|
519 | */ |
---|
520 | |
---|
521 | if ( test_and_set_bit(port, __shared_info_addr(d, s, evtchn_pending)) ) |
---|
522 | return; |
---|
523 | |
---|
524 | if ( !test_bit (port, __shared_info_addr(d, s, evtchn_mask)) && |
---|
525 | !test_and_set_bit(port / BITS_PER_GUEST_LONG(d), |
---|
526 | vcpu_info_addr(v, evtchn_pending_sel)) ) |
---|
527 | { |
---|
528 | vcpu_mark_events_pending(v); |
---|
529 | } |
---|
530 | |
---|
531 | /* Check if some VCPU might be polling for this event. */ |
---|
532 | if ( unlikely(d->is_polling) ) |
---|
533 | { |
---|
534 | d->is_polling = 0; |
---|
535 | smp_mb(); /* check vcpu poll-flags /after/ clearing domain poll-flag */ |
---|
536 | for_each_vcpu ( d, v ) |
---|
537 | { |
---|
538 | if ( !v->is_polling ) |
---|
539 | continue; |
---|
540 | v->is_polling = 0; |
---|
541 | vcpu_unblock(v); |
---|
542 | } |
---|
543 | } |
---|
544 | } |
---|
545 | |
---|
546 | |
---|
547 | void send_guest_vcpu_virq(struct vcpu *v, int virq) |
---|
548 | { |
---|
549 | int port; |
---|
550 | |
---|
551 | ASSERT(!virq_is_global(virq)); |
---|
552 | |
---|
553 | port = v->virq_to_evtchn[virq]; |
---|
554 | if ( unlikely(port == 0) ) |
---|
555 | return; |
---|
556 | |
---|
557 | evtchn_set_pending(v, port); |
---|
558 | } |
---|
559 | |
---|
560 | void send_guest_global_virq(struct domain *d, int virq) |
---|
561 | { |
---|
562 | int port; |
---|
563 | struct vcpu *v; |
---|
564 | struct evtchn *chn; |
---|
565 | |
---|
566 | ASSERT(virq_is_global(virq)); |
---|
567 | |
---|
568 | if ( unlikely(d == NULL) ) |
---|
569 | return; |
---|
570 | |
---|
571 | v = d->vcpu[0]; |
---|
572 | if ( unlikely(v == NULL) ) |
---|
573 | return; |
---|
574 | |
---|
575 | port = v->virq_to_evtchn[virq]; |
---|
576 | if ( unlikely(port == 0) ) |
---|
577 | return; |
---|
578 | |
---|
579 | chn = evtchn_from_port(d, port); |
---|
580 | evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port); |
---|
581 | } |
---|
582 | |
---|
583 | |
---|
584 | void send_guest_pirq(struct domain *d, int pirq) |
---|
585 | { |
---|
586 | int port = d->pirq_to_evtchn[pirq]; |
---|
587 | struct evtchn *chn; |
---|
588 | |
---|
589 | ASSERT(port != 0); |
---|
590 | |
---|
591 | chn = evtchn_from_port(d, port); |
---|
592 | evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port); |
---|
593 | } |
---|
594 | |
---|
595 | |
---|
596 | static long evtchn_status(evtchn_status_t *status) |
---|
597 | { |
---|
598 | struct domain *d; |
---|
599 | domid_t dom = status->dom; |
---|
600 | int port = status->port; |
---|
601 | struct evtchn *chn; |
---|
602 | long rc = 0; |
---|
603 | |
---|
604 | if ( dom == DOMID_SELF ) |
---|
605 | dom = current->domain->domain_id; |
---|
606 | else if ( !IS_PRIV(current->domain) ) |
---|
607 | return -EPERM; |
---|
608 | |
---|
609 | if ( (d = rcu_lock_domain_by_id(dom)) == NULL ) |
---|
610 | return -ESRCH; |
---|
611 | |
---|
612 | spin_lock(&d->evtchn_lock); |
---|
613 | |
---|
614 | if ( !port_is_valid(d, port) ) |
---|
615 | { |
---|
616 | rc = -EINVAL; |
---|
617 | goto out; |
---|
618 | } |
---|
619 | |
---|
620 | chn = evtchn_from_port(d, port); |
---|
621 | switch ( chn->state ) |
---|
622 | { |
---|
623 | case ECS_FREE: |
---|
624 | case ECS_RESERVED: |
---|
625 | status->status = EVTCHNSTAT_closed; |
---|
626 | break; |
---|
627 | case ECS_UNBOUND: |
---|
628 | status->status = EVTCHNSTAT_unbound; |
---|
629 | status->u.unbound.dom = chn->u.unbound.remote_domid; |
---|
630 | break; |
---|
631 | case ECS_INTERDOMAIN: |
---|
632 | status->status = EVTCHNSTAT_interdomain; |
---|
633 | status->u.interdomain.dom = |
---|
634 | chn->u.interdomain.remote_dom->domain_id; |
---|
635 | status->u.interdomain.port = chn->u.interdomain.remote_port; |
---|
636 | break; |
---|
637 | case ECS_PIRQ: |
---|
638 | status->status = EVTCHNSTAT_pirq; |
---|
639 | status->u.pirq = chn->u.pirq; |
---|
640 | break; |
---|
641 | case ECS_VIRQ: |
---|
642 | status->status = EVTCHNSTAT_virq; |
---|
643 | status->u.virq = chn->u.virq; |
---|
644 | break; |
---|
645 | case ECS_IPI: |
---|
646 | status->status = EVTCHNSTAT_ipi; |
---|
647 | break; |
---|
648 | default: |
---|
649 | BUG(); |
---|
650 | } |
---|
651 | |
---|
652 | status->vcpu = chn->notify_vcpu_id; |
---|
653 | |
---|
654 | out: |
---|
655 | spin_unlock(&d->evtchn_lock); |
---|
656 | rcu_unlock_domain(d); |
---|
657 | return rc; |
---|
658 | } |
---|
659 | |
---|
660 | |
---|
661 | long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id) |
---|
662 | { |
---|
663 | struct domain *d = current->domain; |
---|
664 | struct evtchn *chn; |
---|
665 | long rc = 0; |
---|
666 | |
---|
667 | if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) ) |
---|
668 | return -ENOENT; |
---|
669 | |
---|
670 | spin_lock(&d->evtchn_lock); |
---|
671 | |
---|
672 | if ( !port_is_valid(d, port) ) |
---|
673 | { |
---|
674 | rc = -EINVAL; |
---|
675 | goto out; |
---|
676 | } |
---|
677 | |
---|
678 | chn = evtchn_from_port(d, port); |
---|
679 | |
---|
680 | /* Guest cannot re-bind a Xen-attached event channel. */ |
---|
681 | if ( unlikely(chn->consumer_is_xen) ) |
---|
682 | { |
---|
683 | rc = -EINVAL; |
---|
684 | goto out; |
---|
685 | } |
---|
686 | |
---|
687 | switch ( chn->state ) |
---|
688 | { |
---|
689 | case ECS_VIRQ: |
---|
690 | if ( virq_is_global(chn->u.virq) ) |
---|
691 | chn->notify_vcpu_id = vcpu_id; |
---|
692 | else |
---|
693 | rc = -EINVAL; |
---|
694 | break; |
---|
695 | case ECS_UNBOUND: |
---|
696 | case ECS_INTERDOMAIN: |
---|
697 | case ECS_PIRQ: |
---|
698 | chn->notify_vcpu_id = vcpu_id; |
---|
699 | break; |
---|
700 | default: |
---|
701 | rc = -EINVAL; |
---|
702 | break; |
---|
703 | } |
---|
704 | |
---|
705 | out: |
---|
706 | spin_unlock(&d->evtchn_lock); |
---|
707 | return rc; |
---|
708 | } |
---|
709 | |
---|
710 | |
---|
711 | static long evtchn_unmask(evtchn_unmask_t *unmask) |
---|
712 | { |
---|
713 | struct domain *d = current->domain; |
---|
714 | shared_info_t *s = d->shared_info; |
---|
715 | int port = unmask->port; |
---|
716 | struct vcpu *v; |
---|
717 | |
---|
718 | spin_lock(&d->evtchn_lock); |
---|
719 | |
---|
720 | if ( unlikely(!port_is_valid(d, port)) ) |
---|
721 | { |
---|
722 | spin_unlock(&d->evtchn_lock); |
---|
723 | return -EINVAL; |
---|
724 | } |
---|
725 | |
---|
726 | v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id]; |
---|
727 | |
---|
728 | /* |
---|
729 | * These operations must happen in strict order. Based on |
---|
730 | * include/xen/event.h:evtchn_set_pending(). |
---|
731 | */ |
---|
732 | if ( test_and_clear_bit(port, __shared_info_addr(d, s, evtchn_mask)) && |
---|
733 | test_bit (port, __shared_info_addr(d, s, evtchn_pending)) && |
---|
734 | !test_and_set_bit (port / BITS_PER_GUEST_LONG(d), |
---|
735 | vcpu_info_addr(v, evtchn_pending_sel)) ) |
---|
736 | { |
---|
737 | vcpu_mark_events_pending(v); |
---|
738 | } |
---|
739 | |
---|
740 | spin_unlock(&d->evtchn_lock); |
---|
741 | |
---|
742 | return 0; |
---|
743 | } |
---|
744 | |
---|
745 | |
---|
746 | static long evtchn_reset(evtchn_reset_t *r) |
---|
747 | { |
---|
748 | domid_t dom = r->dom; |
---|
749 | struct domain *d; |
---|
750 | int i; |
---|
751 | |
---|
752 | if ( dom == DOMID_SELF ) |
---|
753 | dom = current->domain->domain_id; |
---|
754 | else if ( !IS_PRIV(current->domain) ) |
---|
755 | return -EPERM; |
---|
756 | |
---|
757 | if ( (d = rcu_lock_domain_by_id(dom)) == NULL ) |
---|
758 | return -ESRCH; |
---|
759 | |
---|
760 | for ( i = 0; port_is_valid(d, i); i++ ) |
---|
761 | (void)__evtchn_close(d, i); |
---|
762 | |
---|
763 | rcu_unlock_domain(d); |
---|
764 | |
---|
765 | return 0; |
---|
766 | } |
---|
767 | |
---|
768 | |
---|
769 | long do_event_channel_op(int cmd, XEN_GUEST_HANDLE(void) arg) |
---|
770 | { |
---|
771 | long rc; |
---|
772 | |
---|
773 | switch ( cmd ) |
---|
774 | { |
---|
775 | case EVTCHNOP_alloc_unbound: { |
---|
776 | struct evtchn_alloc_unbound alloc_unbound; |
---|
777 | if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 ) |
---|
778 | return -EFAULT; |
---|
779 | rc = evtchn_alloc_unbound(&alloc_unbound); |
---|
780 | if ( (rc == 0) && (copy_to_guest(arg, &alloc_unbound, 1) != 0) ) |
---|
781 | rc = -EFAULT; /* Cleaning up here would be a mess! */ |
---|
782 | break; |
---|
783 | } |
---|
784 | |
---|
785 | case EVTCHNOP_bind_interdomain: { |
---|
786 | struct evtchn_bind_interdomain bind_interdomain; |
---|
787 | if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 ) |
---|
788 | return -EFAULT; |
---|
789 | rc = evtchn_bind_interdomain(&bind_interdomain); |
---|
790 | if ( (rc == 0) && (copy_to_guest(arg, &bind_interdomain, 1) != 0) ) |
---|
791 | rc = -EFAULT; /* Cleaning up here would be a mess! */ |
---|
792 | break; |
---|
793 | } |
---|
794 | |
---|
795 | case EVTCHNOP_bind_virq: { |
---|
796 | struct evtchn_bind_virq bind_virq; |
---|
797 | if ( copy_from_guest(&bind_virq, arg, 1) != 0 ) |
---|
798 | return -EFAULT; |
---|
799 | rc = evtchn_bind_virq(&bind_virq); |
---|
800 | if ( (rc == 0) && (copy_to_guest(arg, &bind_virq, 1) != 0) ) |
---|
801 | rc = -EFAULT; /* Cleaning up here would be a mess! */ |
---|
802 | break; |
---|
803 | } |
---|
804 | |
---|
805 | case EVTCHNOP_bind_ipi: { |
---|
806 | struct evtchn_bind_ipi bind_ipi; |
---|
807 | if ( copy_from_guest(&bind_ipi, arg, 1) != 0 ) |
---|
808 | return -EFAULT; |
---|
809 | rc = evtchn_bind_ipi(&bind_ipi); |
---|
810 | if ( (rc == 0) && (copy_to_guest(arg, &bind_ipi, 1) != 0) ) |
---|
811 | rc = -EFAULT; /* Cleaning up here would be a mess! */ |
---|
812 | break; |
---|
813 | } |
---|
814 | |
---|
815 | case EVTCHNOP_bind_pirq: { |
---|
816 | struct evtchn_bind_pirq bind_pirq; |
---|
817 | if ( copy_from_guest(&bind_pirq, arg, 1) != 0 ) |
---|
818 | return -EFAULT; |
---|
819 | rc = evtchn_bind_pirq(&bind_pirq); |
---|
820 | if ( (rc == 0) && (copy_to_guest(arg, &bind_pirq, 1) != 0) ) |
---|
821 | rc = -EFAULT; /* Cleaning up here would be a mess! */ |
---|
822 | break; |
---|
823 | } |
---|
824 | |
---|
825 | case EVTCHNOP_close: { |
---|
826 | struct evtchn_close close; |
---|
827 | if ( copy_from_guest(&close, arg, 1) != 0 ) |
---|
828 | return -EFAULT; |
---|
829 | rc = evtchn_close(&close); |
---|
830 | break; |
---|
831 | } |
---|
832 | |
---|
833 | case EVTCHNOP_send: { |
---|
834 | struct evtchn_send send; |
---|
835 | if ( copy_from_guest(&send, arg, 1) != 0 ) |
---|
836 | return -EFAULT; |
---|
837 | rc = evtchn_send(send.port); |
---|
838 | break; |
---|
839 | } |
---|
840 | |
---|
841 | case EVTCHNOP_status: { |
---|
842 | struct evtchn_status status; |
---|
843 | if ( copy_from_guest(&status, arg, 1) != 0 ) |
---|
844 | return -EFAULT; |
---|
845 | rc = evtchn_status(&status); |
---|
846 | if ( (rc == 0) && (copy_to_guest(arg, &status, 1) != 0) ) |
---|
847 | rc = -EFAULT; |
---|
848 | break; |
---|
849 | } |
---|
850 | |
---|
851 | case EVTCHNOP_bind_vcpu: { |
---|
852 | struct evtchn_bind_vcpu bind_vcpu; |
---|
853 | if ( copy_from_guest(&bind_vcpu, arg, 1) != 0 ) |
---|
854 | return -EFAULT; |
---|
855 | rc = evtchn_bind_vcpu(bind_vcpu.port, bind_vcpu.vcpu); |
---|
856 | break; |
---|
857 | } |
---|
858 | |
---|
859 | case EVTCHNOP_unmask: { |
---|
860 | struct evtchn_unmask unmask; |
---|
861 | if ( copy_from_guest(&unmask, arg, 1) != 0 ) |
---|
862 | return -EFAULT; |
---|
863 | rc = evtchn_unmask(&unmask); |
---|
864 | break; |
---|
865 | } |
---|
866 | |
---|
867 | case EVTCHNOP_reset: { |
---|
868 | struct evtchn_reset reset; |
---|
869 | if ( copy_from_guest(&reset, arg, 1) != 0 ) |
---|
870 | return -EFAULT; |
---|
871 | rc = evtchn_reset(&reset); |
---|
872 | break; |
---|
873 | } |
---|
874 | |
---|
875 | default: |
---|
876 | rc = -ENOSYS; |
---|
877 | break; |
---|
878 | } |
---|
879 | |
---|
880 | return rc; |
---|
881 | } |
---|
882 | |
---|
883 | |
---|
884 | int alloc_unbound_xen_event_channel( |
---|
885 | struct vcpu *local_vcpu, domid_t remote_domid) |
---|
886 | { |
---|
887 | struct evtchn *chn; |
---|
888 | struct domain *d = local_vcpu->domain; |
---|
889 | int port; |
---|
890 | |
---|
891 | spin_lock(&d->evtchn_lock); |
---|
892 | |
---|
893 | if ( (port = get_free_port(d)) < 0 ) |
---|
894 | goto out; |
---|
895 | chn = evtchn_from_port(d, port); |
---|
896 | |
---|
897 | chn->state = ECS_UNBOUND; |
---|
898 | chn->consumer_is_xen = 1; |
---|
899 | chn->notify_vcpu_id = local_vcpu->vcpu_id; |
---|
900 | chn->u.unbound.remote_domid = remote_domid; |
---|
901 | |
---|
902 | out: |
---|
903 | spin_unlock(&d->evtchn_lock); |
---|
904 | |
---|
905 | return port; |
---|
906 | } |
---|
907 | |
---|
908 | |
---|
909 | void free_xen_event_channel( |
---|
910 | struct vcpu *local_vcpu, int port) |
---|
911 | { |
---|
912 | struct evtchn *chn; |
---|
913 | struct domain *d = local_vcpu->domain; |
---|
914 | |
---|
915 | spin_lock(&d->evtchn_lock); |
---|
916 | chn = evtchn_from_port(d, port); |
---|
917 | BUG_ON(!chn->consumer_is_xen); |
---|
918 | chn->consumer_is_xen = 0; |
---|
919 | spin_unlock(&d->evtchn_lock); |
---|
920 | |
---|
921 | (void)__evtchn_close(d, port); |
---|
922 | } |
---|
923 | |
---|
924 | |
---|
925 | void notify_via_xen_event_channel(int lport) |
---|
926 | { |
---|
927 | struct evtchn *lchn, *rchn; |
---|
928 | struct domain *ld = current->domain, *rd; |
---|
929 | int rport; |
---|
930 | |
---|
931 | spin_lock(&ld->evtchn_lock); |
---|
932 | |
---|
933 | ASSERT(port_is_valid(ld, lport)); |
---|
934 | lchn = evtchn_from_port(ld, lport); |
---|
935 | ASSERT(lchn->consumer_is_xen); |
---|
936 | |
---|
937 | if ( likely(lchn->state == ECS_INTERDOMAIN) ) |
---|
938 | { |
---|
939 | rd = lchn->u.interdomain.remote_dom; |
---|
940 | rport = lchn->u.interdomain.remote_port; |
---|
941 | rchn = evtchn_from_port(rd, rport); |
---|
942 | evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport); |
---|
943 | } |
---|
944 | |
---|
945 | spin_unlock(&ld->evtchn_lock); |
---|
946 | } |
---|
947 | |
---|
948 | |
---|
949 | int evtchn_init(struct domain *d) |
---|
950 | { |
---|
951 | spin_lock_init(&d->evtchn_lock); |
---|
952 | if ( get_free_port(d) != 0 ) |
---|
953 | return -EINVAL; |
---|
954 | evtchn_from_port(d, 0)->state = ECS_RESERVED; |
---|
955 | return 0; |
---|
956 | } |
---|
957 | |
---|
958 | |
---|
959 | void evtchn_destroy(struct domain *d) |
---|
960 | { |
---|
961 | int i; |
---|
962 | |
---|
963 | for ( i = 0; port_is_valid(d, i); i++ ) |
---|
964 | { |
---|
965 | evtchn_from_port(d, i)->consumer_is_xen = 0; |
---|
966 | (void)__evtchn_close(d, i); |
---|
967 | } |
---|
968 | |
---|
969 | for ( i = 0; i < NR_EVTCHN_BUCKETS; i++ ) |
---|
970 | xfree(d->evtchn[i]); |
---|
971 | } |
---|
972 | |
---|
973 | /* |
---|
974 | * Local variables: |
---|
975 | * mode: C |
---|
976 | * c-set-style: "BSD" |
---|
977 | * c-basic-offset: 4 |
---|
978 | * tab-width: 4 |
---|
979 | * indent-tabs-mode: nil |
---|
980 | * End: |
---|
981 | */ |
---|