1 | /* |
---|
2 | * linux/drivers/video/xenfb.c -- Xen para-virtual frame buffer device |
---|
3 | * |
---|
4 | * Copyright (C) 2005-2006 Anthony Liguori <aliguori@us.ibm.com> |
---|
5 | * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com> |
---|
6 | * |
---|
7 | * Based on linux/drivers/video/q40fb.c |
---|
8 | * |
---|
9 | * This file is subject to the terms and conditions of the GNU General Public |
---|
10 | * License. See the file COPYING in the main directory of this archive for |
---|
11 | * more details. |
---|
12 | */ |
---|
13 | |
---|
14 | /* |
---|
15 | * TODO: |
---|
16 | * |
---|
17 | * Switch to grant tables when they become capable of dealing with the |
---|
18 | * frame buffer. |
---|
19 | */ |
---|
20 | |
---|
21 | #include <linux/kernel.h> |
---|
22 | #include <linux/errno.h> |
---|
23 | #include <linux/fb.h> |
---|
24 | #include <linux/module.h> |
---|
25 | #include <linux/vmalloc.h> |
---|
26 | #include <linux/mm.h> |
---|
27 | #include <linux/mutex.h> |
---|
28 | #include <asm/hypervisor.h> |
---|
29 | #include <xen/evtchn.h> |
---|
30 | #include <xen/interface/io/fbif.h> |
---|
31 | #include <xen/interface/io/protocols.h> |
---|
32 | #include <xen/xenbus.h> |
---|
33 | #include <linux/kthread.h> |
---|
34 | |
---|
35 | struct xenfb_mapping |
---|
36 | { |
---|
37 | struct list_head link; |
---|
38 | struct vm_area_struct *vma; |
---|
39 | atomic_t map_refs; |
---|
40 | int faults; |
---|
41 | struct xenfb_info *info; |
---|
42 | }; |
---|
43 | |
---|
44 | struct xenfb_info |
---|
45 | { |
---|
46 | struct task_struct *kthread; |
---|
47 | wait_queue_head_t wq; |
---|
48 | |
---|
49 | unsigned char *fb; |
---|
50 | struct fb_info *fb_info; |
---|
51 | struct timer_list refresh; |
---|
52 | int dirty; |
---|
53 | int x1, y1, x2, y2; /* dirty rectangle, |
---|
54 | protected by dirty_lock */ |
---|
55 | spinlock_t dirty_lock; |
---|
56 | struct mutex mm_lock; |
---|
57 | int nr_pages; |
---|
58 | struct page **pages; |
---|
59 | struct list_head mappings; /* protected by mm_lock */ |
---|
60 | |
---|
61 | int irq; |
---|
62 | struct xenfb_page *page; |
---|
63 | unsigned long *mfns; |
---|
64 | int update_wanted; /* XENFB_TYPE_UPDATE wanted */ |
---|
65 | |
---|
66 | struct xenbus_device *xbdev; |
---|
67 | }; |
---|
68 | |
---|
69 | /* |
---|
70 | * How the locks work together |
---|
71 | * |
---|
72 | * There are two locks: spinlock dirty_lock protecting the dirty |
---|
73 | * rectangle, and mutex mm_lock protecting mappings. |
---|
74 | * |
---|
75 | * The problem is that dirty rectangle and mappings aren't |
---|
76 | * independent: the dirty rectangle must cover all faulted pages in |
---|
77 | * mappings. We need to prove that our locking maintains this |
---|
78 | * invariant. |
---|
79 | * |
---|
80 | * There are several kinds of critical regions: |
---|
81 | * |
---|
82 | * 1. Holding only dirty_lock: xenfb_refresh(). May run in |
---|
83 | * interrupts. Extends the dirty rectangle. Trivially preserves |
---|
84 | * invariant. |
---|
85 | * |
---|
86 | * 2. Holding only mm_lock: xenfb_mmap() and xenfb_vm_close(). Touch |
---|
87 | * only mappings. The former creates unfaulted pages. Preserves |
---|
88 | * invariant. The latter removes pages. Preserves invariant. |
---|
89 | * |
---|
90 | * 3. Holding both locks: xenfb_vm_nopage(). Extends the dirty |
---|
91 | * rectangle and updates mappings consistently. Preserves |
---|
92 | * invariant. |
---|
93 | * |
---|
94 | * 4. The ugliest one: xenfb_update_screen(). Clear the dirty |
---|
95 | * rectangle and update mappings consistently. |
---|
96 | * |
---|
97 | * We can't simply hold both locks, because zap_page_range() cannot |
---|
98 | * be called with a spinlock held. |
---|
99 | * |
---|
100 | * Therefore, we first clear the dirty rectangle with both locks |
---|
101 | * held. Then we unlock dirty_lock and update the mappings. |
---|
102 | * Critical regions that hold only dirty_lock may interfere with |
---|
103 | * that. This can only be region 1: xenfb_refresh(). But that |
---|
104 | * just extends the dirty rectangle, which can't harm the |
---|
105 | * invariant. |
---|
106 | * |
---|
107 | * But FIXME: the invariant is too weak. It misses that the fault |
---|
108 | * record in mappings must be consistent with the mapping of pages in |
---|
109 | * the associated address space! do_no_page() updates the PTE after |
---|
110 | * xenfb_vm_nopage() returns, i.e. outside the critical region. This |
---|
111 | * allows the following race: |
---|
112 | * |
---|
113 | * X writes to some address in the Xen frame buffer |
---|
114 | * Fault - call do_no_page() |
---|
115 | * call xenfb_vm_nopage() |
---|
116 | * grab mm_lock |
---|
117 | * map->faults++; |
---|
118 | * release mm_lock |
---|
119 | * return back to do_no_page() |
---|
120 | * (preempted, or SMP) |
---|
121 | * Xen worker thread runs. |
---|
122 | * grab mm_lock |
---|
123 | * look at mappings |
---|
124 | * find this mapping, zaps its pages (but page not in pte yet) |
---|
125 | * clear map->faults |
---|
126 | * releases mm_lock |
---|
127 | * (back to X process) |
---|
128 | * put page in X's pte |
---|
129 | * |
---|
130 | * Oh well, we wont be updating the writes to this page anytime soon. |
---|
131 | */ |
---|
132 | |
---|
133 | static int xenfb_fps = 20; |
---|
134 | static unsigned long xenfb_mem_len = XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8; |
---|
135 | |
---|
136 | static int xenfb_remove(struct xenbus_device *); |
---|
137 | static void xenfb_init_shared_page(struct xenfb_info *); |
---|
138 | static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *); |
---|
139 | static void xenfb_disconnect_backend(struct xenfb_info *); |
---|
140 | |
---|
141 | static void xenfb_do_update(struct xenfb_info *info, |
---|
142 | int x, int y, int w, int h) |
---|
143 | { |
---|
144 | union xenfb_out_event event; |
---|
145 | __u32 prod; |
---|
146 | |
---|
147 | event.type = XENFB_TYPE_UPDATE; |
---|
148 | event.update.x = x; |
---|
149 | event.update.y = y; |
---|
150 | event.update.width = w; |
---|
151 | event.update.height = h; |
---|
152 | |
---|
153 | prod = info->page->out_prod; |
---|
154 | /* caller ensures !xenfb_queue_full() */ |
---|
155 | mb(); /* ensure ring space available */ |
---|
156 | XENFB_OUT_RING_REF(info->page, prod) = event; |
---|
157 | wmb(); /* ensure ring contents visible */ |
---|
158 | info->page->out_prod = prod + 1; |
---|
159 | |
---|
160 | notify_remote_via_irq(info->irq); |
---|
161 | } |
---|
162 | |
---|
163 | static int xenfb_queue_full(struct xenfb_info *info) |
---|
164 | { |
---|
165 | __u32 cons, prod; |
---|
166 | |
---|
167 | prod = info->page->out_prod; |
---|
168 | cons = info->page->out_cons; |
---|
169 | return prod - cons == XENFB_OUT_RING_LEN; |
---|
170 | } |
---|
171 | |
---|
172 | static void xenfb_update_screen(struct xenfb_info *info) |
---|
173 | { |
---|
174 | unsigned long flags; |
---|
175 | int y1, y2, x1, x2; |
---|
176 | struct xenfb_mapping *map; |
---|
177 | |
---|
178 | if (!info->update_wanted) |
---|
179 | return; |
---|
180 | if (xenfb_queue_full(info)) |
---|
181 | return; |
---|
182 | |
---|
183 | mutex_lock(&info->mm_lock); |
---|
184 | |
---|
185 | spin_lock_irqsave(&info->dirty_lock, flags); |
---|
186 | y1 = info->y1; |
---|
187 | y2 = info->y2; |
---|
188 | x1 = info->x1; |
---|
189 | x2 = info->x2; |
---|
190 | info->x1 = info->y1 = INT_MAX; |
---|
191 | info->x2 = info->y2 = 0; |
---|
192 | spin_unlock_irqrestore(&info->dirty_lock, flags); |
---|
193 | |
---|
194 | list_for_each_entry(map, &info->mappings, link) { |
---|
195 | if (!map->faults) |
---|
196 | continue; |
---|
197 | zap_page_range(map->vma, map->vma->vm_start, |
---|
198 | map->vma->vm_end - map->vma->vm_start, NULL); |
---|
199 | map->faults = 0; |
---|
200 | } |
---|
201 | |
---|
202 | mutex_unlock(&info->mm_lock); |
---|
203 | |
---|
204 | xenfb_do_update(info, x1, y1, x2 - x1, y2 - y1); |
---|
205 | } |
---|
206 | |
---|
207 | static int xenfb_thread(void *data) |
---|
208 | { |
---|
209 | struct xenfb_info *info = data; |
---|
210 | |
---|
211 | while (!kthread_should_stop()) { |
---|
212 | if (info->dirty) { |
---|
213 | info->dirty = 0; |
---|
214 | xenfb_update_screen(info); |
---|
215 | } |
---|
216 | wait_event_interruptible(info->wq, |
---|
217 | kthread_should_stop() || info->dirty); |
---|
218 | try_to_freeze(); |
---|
219 | } |
---|
220 | return 0; |
---|
221 | } |
---|
222 | |
---|
223 | static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green, |
---|
224 | unsigned blue, unsigned transp, |
---|
225 | struct fb_info *info) |
---|
226 | { |
---|
227 | u32 v; |
---|
228 | |
---|
229 | if (regno > info->cmap.len) |
---|
230 | return 1; |
---|
231 | |
---|
232 | red >>= (16 - info->var.red.length); |
---|
233 | green >>= (16 - info->var.green.length); |
---|
234 | blue >>= (16 - info->var.blue.length); |
---|
235 | |
---|
236 | v = (red << info->var.red.offset) | |
---|
237 | (green << info->var.green.offset) | |
---|
238 | (blue << info->var.blue.offset); |
---|
239 | |
---|
240 | /* FIXME is this sane? check against xxxfb_setcolreg()! */ |
---|
241 | switch (info->var.bits_per_pixel) { |
---|
242 | case 16: |
---|
243 | case 24: |
---|
244 | case 32: |
---|
245 | ((u32 *)info->pseudo_palette)[regno] = v; |
---|
246 | break; |
---|
247 | } |
---|
248 | |
---|
249 | return 0; |
---|
250 | } |
---|
251 | |
---|
252 | static void xenfb_timer(unsigned long data) |
---|
253 | { |
---|
254 | struct xenfb_info *info = (struct xenfb_info *)data; |
---|
255 | info->dirty = 1; |
---|
256 | wake_up(&info->wq); |
---|
257 | } |
---|
258 | |
---|
259 | static void __xenfb_refresh(struct xenfb_info *info, |
---|
260 | int x1, int y1, int w, int h) |
---|
261 | { |
---|
262 | int y2, x2; |
---|
263 | |
---|
264 | y2 = y1 + h; |
---|
265 | x2 = x1 + w; |
---|
266 | |
---|
267 | if (info->y1 > y1) |
---|
268 | info->y1 = y1; |
---|
269 | if (info->y2 < y2) |
---|
270 | info->y2 = y2; |
---|
271 | if (info->x1 > x1) |
---|
272 | info->x1 = x1; |
---|
273 | if (info->x2 < x2) |
---|
274 | info->x2 = x2; |
---|
275 | |
---|
276 | if (timer_pending(&info->refresh)) |
---|
277 | return; |
---|
278 | |
---|
279 | mod_timer(&info->refresh, jiffies + HZ/xenfb_fps); |
---|
280 | } |
---|
281 | |
---|
282 | static void xenfb_refresh(struct xenfb_info *info, |
---|
283 | int x1, int y1, int w, int h) |
---|
284 | { |
---|
285 | unsigned long flags; |
---|
286 | |
---|
287 | spin_lock_irqsave(&info->dirty_lock, flags); |
---|
288 | __xenfb_refresh(info, x1, y1, w, h); |
---|
289 | spin_unlock_irqrestore(&info->dirty_lock, flags); |
---|
290 | } |
---|
291 | |
---|
292 | static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect) |
---|
293 | { |
---|
294 | struct xenfb_info *info = p->par; |
---|
295 | |
---|
296 | cfb_fillrect(p, rect); |
---|
297 | xenfb_refresh(info, rect->dx, rect->dy, rect->width, rect->height); |
---|
298 | } |
---|
299 | |
---|
300 | static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image) |
---|
301 | { |
---|
302 | struct xenfb_info *info = p->par; |
---|
303 | |
---|
304 | cfb_imageblit(p, image); |
---|
305 | xenfb_refresh(info, image->dx, image->dy, image->width, image->height); |
---|
306 | } |
---|
307 | |
---|
308 | static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area) |
---|
309 | { |
---|
310 | struct xenfb_info *info = p->par; |
---|
311 | |
---|
312 | cfb_copyarea(p, area); |
---|
313 | xenfb_refresh(info, area->dx, area->dy, area->width, area->height); |
---|
314 | } |
---|
315 | |
---|
316 | static void xenfb_vm_open(struct vm_area_struct *vma) |
---|
317 | { |
---|
318 | struct xenfb_mapping *map = vma->vm_private_data; |
---|
319 | atomic_inc(&map->map_refs); |
---|
320 | } |
---|
321 | |
---|
322 | static void xenfb_vm_close(struct vm_area_struct *vma) |
---|
323 | { |
---|
324 | struct xenfb_mapping *map = vma->vm_private_data; |
---|
325 | struct xenfb_info *info = map->info; |
---|
326 | |
---|
327 | mutex_lock(&info->mm_lock); |
---|
328 | if (atomic_dec_and_test(&map->map_refs)) { |
---|
329 | list_del(&map->link); |
---|
330 | kfree(map); |
---|
331 | } |
---|
332 | mutex_unlock(&info->mm_lock); |
---|
333 | } |
---|
334 | |
---|
335 | static struct page *xenfb_vm_nopage(struct vm_area_struct *vma, |
---|
336 | unsigned long vaddr, int *type) |
---|
337 | { |
---|
338 | struct xenfb_mapping *map = vma->vm_private_data; |
---|
339 | struct xenfb_info *info = map->info; |
---|
340 | int pgnr = (vaddr - vma->vm_start) >> PAGE_SHIFT; |
---|
341 | unsigned long flags; |
---|
342 | struct page *page; |
---|
343 | int y1, y2; |
---|
344 | |
---|
345 | if (pgnr >= info->nr_pages) |
---|
346 | return NOPAGE_SIGBUS; |
---|
347 | |
---|
348 | mutex_lock(&info->mm_lock); |
---|
349 | spin_lock_irqsave(&info->dirty_lock, flags); |
---|
350 | page = info->pages[pgnr]; |
---|
351 | get_page(page); |
---|
352 | map->faults++; |
---|
353 | |
---|
354 | y1 = pgnr * PAGE_SIZE / info->fb_info->fix.line_length; |
---|
355 | y2 = (pgnr * PAGE_SIZE + PAGE_SIZE - 1) / info->fb_info->fix.line_length; |
---|
356 | if (y2 > info->fb_info->var.yres) |
---|
357 | y2 = info->fb_info->var.yres; |
---|
358 | __xenfb_refresh(info, 0, y1, info->fb_info->var.xres, y2 - y1); |
---|
359 | spin_unlock_irqrestore(&info->dirty_lock, flags); |
---|
360 | mutex_unlock(&info->mm_lock); |
---|
361 | |
---|
362 | if (type) |
---|
363 | *type = VM_FAULT_MINOR; |
---|
364 | |
---|
365 | return page; |
---|
366 | } |
---|
367 | |
---|
368 | static struct vm_operations_struct xenfb_vm_ops = { |
---|
369 | .open = xenfb_vm_open, |
---|
370 | .close = xenfb_vm_close, |
---|
371 | .nopage = xenfb_vm_nopage, |
---|
372 | }; |
---|
373 | |
---|
374 | static int xenfb_mmap(struct fb_info *fb_info, struct vm_area_struct *vma) |
---|
375 | { |
---|
376 | struct xenfb_info *info = fb_info->par; |
---|
377 | struct xenfb_mapping *map; |
---|
378 | int map_pages; |
---|
379 | |
---|
380 | if (!(vma->vm_flags & VM_WRITE)) |
---|
381 | return -EINVAL; |
---|
382 | if (!(vma->vm_flags & VM_SHARED)) |
---|
383 | return -EINVAL; |
---|
384 | if (vma->vm_pgoff != 0) |
---|
385 | return -EINVAL; |
---|
386 | |
---|
387 | map_pages = (vma->vm_end - vma->vm_start + PAGE_SIZE-1) >> PAGE_SHIFT; |
---|
388 | if (map_pages > info->nr_pages) |
---|
389 | return -EINVAL; |
---|
390 | |
---|
391 | map = kzalloc(sizeof(*map), GFP_KERNEL); |
---|
392 | if (map == NULL) |
---|
393 | return -ENOMEM; |
---|
394 | |
---|
395 | map->vma = vma; |
---|
396 | map->faults = 0; |
---|
397 | map->info = info; |
---|
398 | atomic_set(&map->map_refs, 1); |
---|
399 | |
---|
400 | mutex_lock(&info->mm_lock); |
---|
401 | list_add(&map->link, &info->mappings); |
---|
402 | mutex_unlock(&info->mm_lock); |
---|
403 | |
---|
404 | vma->vm_ops = &xenfb_vm_ops; |
---|
405 | vma->vm_flags |= (VM_DONTEXPAND | VM_RESERVED); |
---|
406 | vma->vm_private_data = map; |
---|
407 | |
---|
408 | return 0; |
---|
409 | } |
---|
410 | |
---|
411 | static struct fb_ops xenfb_fb_ops = { |
---|
412 | .owner = THIS_MODULE, |
---|
413 | .fb_setcolreg = xenfb_setcolreg, |
---|
414 | .fb_fillrect = xenfb_fillrect, |
---|
415 | .fb_copyarea = xenfb_copyarea, |
---|
416 | .fb_imageblit = xenfb_imageblit, |
---|
417 | .fb_mmap = xenfb_mmap, |
---|
418 | }; |
---|
419 | |
---|
420 | static irqreturn_t xenfb_event_handler(int rq, void *dev_id, |
---|
421 | struct pt_regs *regs) |
---|
422 | { |
---|
423 | /* |
---|
424 | * No in events recognized, simply ignore them all. |
---|
425 | * If you need to recognize some, see xenbkd's input_handler() |
---|
426 | * for how to do that. |
---|
427 | */ |
---|
428 | struct xenfb_info *info = dev_id; |
---|
429 | struct xenfb_page *page = info->page; |
---|
430 | |
---|
431 | if (page->in_cons != page->in_prod) { |
---|
432 | info->page->in_cons = info->page->in_prod; |
---|
433 | notify_remote_via_irq(info->irq); |
---|
434 | } |
---|
435 | return IRQ_HANDLED; |
---|
436 | } |
---|
437 | |
---|
438 | static unsigned long vmalloc_to_mfn(void *address) |
---|
439 | { |
---|
440 | return pfn_to_mfn(vmalloc_to_pfn(address)); |
---|
441 | } |
---|
442 | |
---|
443 | static int __devinit xenfb_probe(struct xenbus_device *dev, |
---|
444 | const struct xenbus_device_id *id) |
---|
445 | { |
---|
446 | struct xenfb_info *info; |
---|
447 | struct fb_info *fb_info; |
---|
448 | int ret; |
---|
449 | |
---|
450 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
---|
451 | if (info == NULL) { |
---|
452 | xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); |
---|
453 | return -ENOMEM; |
---|
454 | } |
---|
455 | dev->dev.driver_data = info; |
---|
456 | info->xbdev = dev; |
---|
457 | info->irq = -1; |
---|
458 | info->x1 = info->y1 = INT_MAX; |
---|
459 | spin_lock_init(&info->dirty_lock); |
---|
460 | mutex_init(&info->mm_lock); |
---|
461 | init_waitqueue_head(&info->wq); |
---|
462 | init_timer(&info->refresh); |
---|
463 | info->refresh.function = xenfb_timer; |
---|
464 | info->refresh.data = (unsigned long)info; |
---|
465 | INIT_LIST_HEAD(&info->mappings); |
---|
466 | |
---|
467 | info->fb = vmalloc(xenfb_mem_len); |
---|
468 | if (info->fb == NULL) |
---|
469 | goto error_nomem; |
---|
470 | memset(info->fb, 0, xenfb_mem_len); |
---|
471 | |
---|
472 | info->nr_pages = (xenfb_mem_len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
---|
473 | |
---|
474 | info->pages = kmalloc(sizeof(struct page *) * info->nr_pages, |
---|
475 | GFP_KERNEL); |
---|
476 | if (info->pages == NULL) |
---|
477 | goto error_nomem; |
---|
478 | |
---|
479 | info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages); |
---|
480 | if (!info->mfns) |
---|
481 | goto error_nomem; |
---|
482 | |
---|
483 | /* set up shared page */ |
---|
484 | info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); |
---|
485 | if (!info->page) |
---|
486 | goto error_nomem; |
---|
487 | |
---|
488 | xenfb_init_shared_page(info); |
---|
489 | |
---|
490 | fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL); |
---|
491 | /* see fishy hackery below */ |
---|
492 | if (fb_info == NULL) |
---|
493 | goto error_nomem; |
---|
494 | |
---|
495 | /* FIXME fishy hackery */ |
---|
496 | fb_info->pseudo_palette = fb_info->par; |
---|
497 | fb_info->par = info; |
---|
498 | /* /FIXME */ |
---|
499 | fb_info->screen_base = info->fb; |
---|
500 | |
---|
501 | fb_info->fbops = &xenfb_fb_ops; |
---|
502 | fb_info->var.xres_virtual = fb_info->var.xres = info->page->width; |
---|
503 | fb_info->var.yres_virtual = fb_info->var.yres = info->page->height; |
---|
504 | fb_info->var.bits_per_pixel = info->page->depth; |
---|
505 | |
---|
506 | fb_info->var.red = (struct fb_bitfield){16, 8, 0}; |
---|
507 | fb_info->var.green = (struct fb_bitfield){8, 8, 0}; |
---|
508 | fb_info->var.blue = (struct fb_bitfield){0, 8, 0}; |
---|
509 | |
---|
510 | fb_info->var.activate = FB_ACTIVATE_NOW; |
---|
511 | fb_info->var.height = -1; |
---|
512 | fb_info->var.width = -1; |
---|
513 | fb_info->var.vmode = FB_VMODE_NONINTERLACED; |
---|
514 | |
---|
515 | fb_info->fix.visual = FB_VISUAL_TRUECOLOR; |
---|
516 | fb_info->fix.line_length = info->page->line_length; |
---|
517 | fb_info->fix.smem_start = 0; |
---|
518 | fb_info->fix.smem_len = xenfb_mem_len; |
---|
519 | strcpy(fb_info->fix.id, "xen"); |
---|
520 | fb_info->fix.type = FB_TYPE_PACKED_PIXELS; |
---|
521 | fb_info->fix.accel = FB_ACCEL_NONE; |
---|
522 | |
---|
523 | fb_info->flags = FBINFO_FLAG_DEFAULT; |
---|
524 | |
---|
525 | ret = fb_alloc_cmap(&fb_info->cmap, 256, 0); |
---|
526 | if (ret < 0) { |
---|
527 | framebuffer_release(fb_info); |
---|
528 | xenbus_dev_fatal(dev, ret, "fb_alloc_cmap"); |
---|
529 | goto error; |
---|
530 | } |
---|
531 | |
---|
532 | ret = register_framebuffer(fb_info); |
---|
533 | if (ret) { |
---|
534 | fb_dealloc_cmap(&info->fb_info->cmap); |
---|
535 | framebuffer_release(fb_info); |
---|
536 | xenbus_dev_fatal(dev, ret, "register_framebuffer"); |
---|
537 | goto error; |
---|
538 | } |
---|
539 | info->fb_info = fb_info; |
---|
540 | |
---|
541 | /* FIXME should this be delayed until backend XenbusStateConnected? */ |
---|
542 | info->kthread = kthread_run(xenfb_thread, info, "xenfb thread"); |
---|
543 | if (IS_ERR(info->kthread)) { |
---|
544 | ret = PTR_ERR(info->kthread); |
---|
545 | info->kthread = NULL; |
---|
546 | xenbus_dev_fatal(dev, ret, "register_framebuffer"); |
---|
547 | goto error; |
---|
548 | } |
---|
549 | |
---|
550 | ret = xenfb_connect_backend(dev, info); |
---|
551 | if (ret < 0) |
---|
552 | goto error; |
---|
553 | |
---|
554 | return 0; |
---|
555 | |
---|
556 | error_nomem: |
---|
557 | ret = -ENOMEM; |
---|
558 | xenbus_dev_fatal(dev, ret, "allocating device memory"); |
---|
559 | error: |
---|
560 | xenfb_remove(dev); |
---|
561 | return ret; |
---|
562 | } |
---|
563 | |
---|
564 | static int xenfb_resume(struct xenbus_device *dev) |
---|
565 | { |
---|
566 | struct xenfb_info *info = dev->dev.driver_data; |
---|
567 | |
---|
568 | xenfb_disconnect_backend(info); |
---|
569 | xenfb_init_shared_page(info); |
---|
570 | return xenfb_connect_backend(dev, info); |
---|
571 | } |
---|
572 | |
---|
573 | static int xenfb_remove(struct xenbus_device *dev) |
---|
574 | { |
---|
575 | struct xenfb_info *info = dev->dev.driver_data; |
---|
576 | |
---|
577 | del_timer(&info->refresh); |
---|
578 | if (info->kthread) |
---|
579 | kthread_stop(info->kthread); |
---|
580 | xenfb_disconnect_backend(info); |
---|
581 | if (info->fb_info) { |
---|
582 | unregister_framebuffer(info->fb_info); |
---|
583 | fb_dealloc_cmap(&info->fb_info->cmap); |
---|
584 | framebuffer_release(info->fb_info); |
---|
585 | } |
---|
586 | free_page((unsigned long)info->page); |
---|
587 | vfree(info->mfns); |
---|
588 | kfree(info->pages); |
---|
589 | vfree(info->fb); |
---|
590 | kfree(info); |
---|
591 | |
---|
592 | return 0; |
---|
593 | } |
---|
594 | |
---|
595 | static void xenfb_init_shared_page(struct xenfb_info *info) |
---|
596 | { |
---|
597 | int i; |
---|
598 | |
---|
599 | for (i = 0; i < info->nr_pages; i++) |
---|
600 | info->pages[i] = vmalloc_to_page(info->fb + i * PAGE_SIZE); |
---|
601 | |
---|
602 | for (i = 0; i < info->nr_pages; i++) |
---|
603 | info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE); |
---|
604 | |
---|
605 | info->page->pd[0] = vmalloc_to_mfn(info->mfns); |
---|
606 | info->page->pd[1] = 0; |
---|
607 | info->page->width = XENFB_WIDTH; |
---|
608 | info->page->height = XENFB_HEIGHT; |
---|
609 | info->page->depth = XENFB_DEPTH; |
---|
610 | info->page->line_length = (info->page->depth / 8) * info->page->width; |
---|
611 | info->page->mem_length = xenfb_mem_len; |
---|
612 | info->page->in_cons = info->page->in_prod = 0; |
---|
613 | info->page->out_cons = info->page->out_prod = 0; |
---|
614 | } |
---|
615 | |
---|
616 | static int xenfb_connect_backend(struct xenbus_device *dev, |
---|
617 | struct xenfb_info *info) |
---|
618 | { |
---|
619 | int ret; |
---|
620 | struct xenbus_transaction xbt; |
---|
621 | |
---|
622 | ret = bind_listening_port_to_irqhandler( |
---|
623 | dev->otherend_id, xenfb_event_handler, 0, "xenfb", info); |
---|
624 | if (ret < 0) { |
---|
625 | xenbus_dev_fatal(dev, ret, |
---|
626 | "bind_listening_port_to_irqhandler"); |
---|
627 | return ret; |
---|
628 | } |
---|
629 | info->irq = ret; |
---|
630 | |
---|
631 | again: |
---|
632 | ret = xenbus_transaction_start(&xbt); |
---|
633 | if (ret) { |
---|
634 | xenbus_dev_fatal(dev, ret, "starting transaction"); |
---|
635 | return ret; |
---|
636 | } |
---|
637 | ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu", |
---|
638 | virt_to_mfn(info->page)); |
---|
639 | if (ret) |
---|
640 | goto error_xenbus; |
---|
641 | ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", |
---|
642 | irq_to_evtchn_port(info->irq)); |
---|
643 | if (ret) |
---|
644 | goto error_xenbus; |
---|
645 | ret = xenbus_printf(xbt, dev->nodename, "protocol", "%s", |
---|
646 | XEN_IO_PROTO_ABI_NATIVE); |
---|
647 | if (ret) |
---|
648 | goto error_xenbus; |
---|
649 | ret = xenbus_printf(xbt, dev->nodename, "feature-update", "1"); |
---|
650 | if (ret) |
---|
651 | goto error_xenbus; |
---|
652 | ret = xenbus_transaction_end(xbt, 0); |
---|
653 | if (ret) { |
---|
654 | if (ret == -EAGAIN) |
---|
655 | goto again; |
---|
656 | xenbus_dev_fatal(dev, ret, "completing transaction"); |
---|
657 | return ret; |
---|
658 | } |
---|
659 | |
---|
660 | xenbus_switch_state(dev, XenbusStateInitialised); |
---|
661 | return 0; |
---|
662 | |
---|
663 | error_xenbus: |
---|
664 | xenbus_transaction_end(xbt, 1); |
---|
665 | xenbus_dev_fatal(dev, ret, "writing xenstore"); |
---|
666 | return ret; |
---|
667 | } |
---|
668 | |
---|
669 | static void xenfb_disconnect_backend(struct xenfb_info *info) |
---|
670 | { |
---|
671 | if (info->irq >= 0) |
---|
672 | unbind_from_irqhandler(info->irq, info); |
---|
673 | info->irq = -1; |
---|
674 | } |
---|
675 | |
---|
676 | static void xenfb_backend_changed(struct xenbus_device *dev, |
---|
677 | enum xenbus_state backend_state) |
---|
678 | { |
---|
679 | struct xenfb_info *info = dev->dev.driver_data; |
---|
680 | int val; |
---|
681 | |
---|
682 | switch (backend_state) { |
---|
683 | case XenbusStateInitialising: |
---|
684 | case XenbusStateInitialised: |
---|
685 | case XenbusStateUnknown: |
---|
686 | case XenbusStateClosed: |
---|
687 | break; |
---|
688 | |
---|
689 | case XenbusStateInitWait: |
---|
690 | InitWait: |
---|
691 | xenbus_switch_state(dev, XenbusStateConnected); |
---|
692 | break; |
---|
693 | |
---|
694 | case XenbusStateConnected: |
---|
695 | /* |
---|
696 | * Work around xenbus race condition: If backend goes |
---|
697 | * through InitWait to Connected fast enough, we can |
---|
698 | * get Connected twice here. |
---|
699 | */ |
---|
700 | if (dev->state != XenbusStateConnected) |
---|
701 | goto InitWait; /* no InitWait seen yet, fudge it */ |
---|
702 | |
---|
703 | if (xenbus_scanf(XBT_NIL, info->xbdev->otherend, |
---|
704 | "request-update", "%d", &val) < 0) |
---|
705 | val = 0; |
---|
706 | if (val) |
---|
707 | info->update_wanted = 1; |
---|
708 | break; |
---|
709 | |
---|
710 | case XenbusStateClosing: |
---|
711 | // FIXME is this safe in any dev->state? |
---|
712 | xenbus_frontend_closed(dev); |
---|
713 | break; |
---|
714 | } |
---|
715 | } |
---|
716 | |
---|
717 | static struct xenbus_device_id xenfb_ids[] = { |
---|
718 | { "vfb" }, |
---|
719 | { "" } |
---|
720 | }; |
---|
721 | |
---|
722 | static struct xenbus_driver xenfb = { |
---|
723 | .name = "vfb", |
---|
724 | .owner = THIS_MODULE, |
---|
725 | .ids = xenfb_ids, |
---|
726 | .probe = xenfb_probe, |
---|
727 | .remove = xenfb_remove, |
---|
728 | .resume = xenfb_resume, |
---|
729 | .otherend_changed = xenfb_backend_changed, |
---|
730 | }; |
---|
731 | |
---|
732 | static int __init xenfb_init(void) |
---|
733 | { |
---|
734 | if (!is_running_on_xen()) |
---|
735 | return -ENODEV; |
---|
736 | |
---|
737 | /* Nothing to do if running in dom0. */ |
---|
738 | if (is_initial_xendomain()) |
---|
739 | return -ENODEV; |
---|
740 | |
---|
741 | return xenbus_register_frontend(&xenfb); |
---|
742 | } |
---|
743 | |
---|
744 | static void __exit xenfb_cleanup(void) |
---|
745 | { |
---|
746 | return xenbus_unregister_driver(&xenfb); |
---|
747 | } |
---|
748 | |
---|
749 | module_init(xenfb_init); |
---|
750 | module_exit(xenfb_cleanup); |
---|
751 | |
---|
752 | MODULE_LICENSE("GPL"); |
---|