diff options
Diffstat (limited to 'drivers/xen/gntdev.c')
-rw-r--r-- | drivers/xen/gntdev.c | 765 |
1 files changed, 765 insertions, 0 deletions
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c new file mode 100644 index 000000000000..f914b26cf0c2 --- /dev/null +++ b/drivers/xen/gntdev.c | |||
@@ -0,0 +1,765 @@ | |||
1 | /****************************************************************************** | ||
2 | * gntdev.c | ||
3 | * | ||
4 | * Device for accessing (in user-space) pages that have been granted by other | ||
5 | * domains. | ||
6 | * | ||
7 | * Copyright (c) 2006-2007, D G Murray. | ||
8 | * (c) 2009 Gerd Hoffmann <kraxel@redhat.com> | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | |||
20 | #undef DEBUG | ||
21 | |||
22 | #include <linux/module.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/miscdevice.h> | ||
26 | #include <linux/fs.h> | ||
27 | #include <linux/mm.h> | ||
28 | #include <linux/mman.h> | ||
29 | #include <linux/mmu_notifier.h> | ||
30 | #include <linux/types.h> | ||
31 | #include <linux/uaccess.h> | ||
32 | #include <linux/sched.h> | ||
33 | #include <linux/spinlock.h> | ||
34 | #include <linux/slab.h> | ||
35 | #include <linux/highmem.h> | ||
36 | |||
37 | #include <xen/xen.h> | ||
38 | #include <xen/grant_table.h> | ||
39 | #include <xen/balloon.h> | ||
40 | #include <xen/gntdev.h> | ||
41 | #include <xen/events.h> | ||
42 | #include <asm/xen/hypervisor.h> | ||
43 | #include <asm/xen/hypercall.h> | ||
44 | #include <asm/xen/page.h> | ||
45 | |||
46 | MODULE_LICENSE("GPL"); | ||
47 | MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, " | ||
48 | "Gerd Hoffmann <kraxel@redhat.com>"); | ||
49 | MODULE_DESCRIPTION("User-space granted page access driver"); | ||
50 | |||
51 | static int limit = 1024*1024; | ||
52 | module_param(limit, int, 0644); | ||
53 | MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by " | ||
54 | "the gntdev device"); | ||
55 | |||
56 | static atomic_t pages_mapped = ATOMIC_INIT(0); | ||
57 | |||
58 | static int use_ptemod; | ||
59 | |||
60 | struct gntdev_priv { | ||
61 | struct list_head maps; | ||
62 | /* lock protects maps from concurrent changes */ | ||
63 | spinlock_t lock; | ||
64 | struct mm_struct *mm; | ||
65 | struct mmu_notifier mn; | ||
66 | }; | ||
67 | |||
68 | struct unmap_notify { | ||
69 | int flags; | ||
70 | /* Address relative to the start of the grant_map */ | ||
71 | int addr; | ||
72 | int event; | ||
73 | }; | ||
74 | |||
75 | struct grant_map { | ||
76 | struct list_head next; | ||
77 | struct vm_area_struct *vma; | ||
78 | int index; | ||
79 | int count; | ||
80 | int flags; | ||
81 | atomic_t users; | ||
82 | struct unmap_notify notify; | ||
83 | struct ioctl_gntdev_grant_ref *grants; | ||
84 | struct gnttab_map_grant_ref *map_ops; | ||
85 | struct gnttab_unmap_grant_ref *unmap_ops; | ||
86 | struct page **pages; | ||
87 | }; | ||
88 | |||
89 | static int unmap_grant_pages(struct grant_map *map, int offset, int pages); | ||
90 | |||
91 | /* ------------------------------------------------------------------ */ | ||
92 | |||
93 | static void gntdev_print_maps(struct gntdev_priv *priv, | ||
94 | char *text, int text_index) | ||
95 | { | ||
96 | #ifdef DEBUG | ||
97 | struct grant_map *map; | ||
98 | |||
99 | pr_debug("%s: maps list (priv %p)\n", __func__, priv); | ||
100 | list_for_each_entry(map, &priv->maps, next) | ||
101 | pr_debug(" index %2d, count %2d %s\n", | ||
102 | map->index, map->count, | ||
103 | map->index == text_index && text ? text : ""); | ||
104 | #endif | ||
105 | } | ||
106 | |||
107 | static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count) | ||
108 | { | ||
109 | struct grant_map *add; | ||
110 | int i; | ||
111 | |||
112 | add = kzalloc(sizeof(struct grant_map), GFP_KERNEL); | ||
113 | if (NULL == add) | ||
114 | return NULL; | ||
115 | |||
116 | add->grants = kzalloc(sizeof(add->grants[0]) * count, GFP_KERNEL); | ||
117 | add->map_ops = kzalloc(sizeof(add->map_ops[0]) * count, GFP_KERNEL); | ||
118 | add->unmap_ops = kzalloc(sizeof(add->unmap_ops[0]) * count, GFP_KERNEL); | ||
119 | add->pages = kzalloc(sizeof(add->pages[0]) * count, GFP_KERNEL); | ||
120 | if (NULL == add->grants || | ||
121 | NULL == add->map_ops || | ||
122 | NULL == add->unmap_ops || | ||
123 | NULL == add->pages) | ||
124 | goto err; | ||
125 | |||
126 | if (alloc_xenballooned_pages(count, add->pages)) | ||
127 | goto err; | ||
128 | |||
129 | for (i = 0; i < count; i++) { | ||
130 | add->map_ops[i].handle = -1; | ||
131 | add->unmap_ops[i].handle = -1; | ||
132 | } | ||
133 | |||
134 | add->index = 0; | ||
135 | add->count = count; | ||
136 | atomic_set(&add->users, 1); | ||
137 | |||
138 | return add; | ||
139 | |||
140 | err: | ||
141 | kfree(add->pages); | ||
142 | kfree(add->grants); | ||
143 | kfree(add->map_ops); | ||
144 | kfree(add->unmap_ops); | ||
145 | kfree(add); | ||
146 | return NULL; | ||
147 | } | ||
148 | |||
149 | static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add) | ||
150 | { | ||
151 | struct grant_map *map; | ||
152 | |||
153 | list_for_each_entry(map, &priv->maps, next) { | ||
154 | if (add->index + add->count < map->index) { | ||
155 | list_add_tail(&add->next, &map->next); | ||
156 | goto done; | ||
157 | } | ||
158 | add->index = map->index + map->count; | ||
159 | } | ||
160 | list_add_tail(&add->next, &priv->maps); | ||
161 | |||
162 | done: | ||
163 | gntdev_print_maps(priv, "[new]", add->index); | ||
164 | } | ||
165 | |||
166 | static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv, | ||
167 | int index, int count) | ||
168 | { | ||
169 | struct grant_map *map; | ||
170 | |||
171 | list_for_each_entry(map, &priv->maps, next) { | ||
172 | if (map->index != index) | ||
173 | continue; | ||
174 | if (count && map->count != count) | ||
175 | continue; | ||
176 | return map; | ||
177 | } | ||
178 | return NULL; | ||
179 | } | ||
180 | |||
181 | static void gntdev_put_map(struct grant_map *map) | ||
182 | { | ||
183 | if (!map) | ||
184 | return; | ||
185 | |||
186 | if (!atomic_dec_and_test(&map->users)) | ||
187 | return; | ||
188 | |||
189 | atomic_sub(map->count, &pages_mapped); | ||
190 | |||
191 | if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) { | ||
192 | notify_remote_via_evtchn(map->notify.event); | ||
193 | } | ||
194 | |||
195 | if (map->pages) { | ||
196 | if (!use_ptemod) | ||
197 | unmap_grant_pages(map, 0, map->count); | ||
198 | |||
199 | free_xenballooned_pages(map->count, map->pages); | ||
200 | } | ||
201 | kfree(map->pages); | ||
202 | kfree(map->grants); | ||
203 | kfree(map->map_ops); | ||
204 | kfree(map->unmap_ops); | ||
205 | kfree(map); | ||
206 | } | ||
207 | |||
208 | /* ------------------------------------------------------------------ */ | ||
209 | |||
210 | static int find_grant_ptes(pte_t *pte, pgtable_t token, | ||
211 | unsigned long addr, void *data) | ||
212 | { | ||
213 | struct grant_map *map = data; | ||
214 | unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT; | ||
215 | int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte; | ||
216 | u64 pte_maddr; | ||
217 | |||
218 | BUG_ON(pgnr >= map->count); | ||
219 | pte_maddr = arbitrary_virt_to_machine(pte).maddr; | ||
220 | |||
221 | gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags, | ||
222 | map->grants[pgnr].ref, | ||
223 | map->grants[pgnr].domid); | ||
224 | gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags, | ||
225 | -1 /* handle */); | ||
226 | return 0; | ||
227 | } | ||
228 | |||
229 | static int map_grant_pages(struct grant_map *map) | ||
230 | { | ||
231 | int i, err = 0; | ||
232 | |||
233 | if (!use_ptemod) { | ||
234 | /* Note: it could already be mapped */ | ||
235 | if (map->map_ops[0].handle != -1) | ||
236 | return 0; | ||
237 | for (i = 0; i < map->count; i++) { | ||
238 | unsigned long addr = (unsigned long) | ||
239 | pfn_to_kaddr(page_to_pfn(map->pages[i])); | ||
240 | gnttab_set_map_op(&map->map_ops[i], addr, map->flags, | ||
241 | map->grants[i].ref, | ||
242 | map->grants[i].domid); | ||
243 | gnttab_set_unmap_op(&map->unmap_ops[i], addr, | ||
244 | map->flags, -1 /* handle */); | ||
245 | } | ||
246 | } | ||
247 | |||
248 | pr_debug("map %d+%d\n", map->index, map->count); | ||
249 | err = gnttab_map_refs(map->map_ops, map->pages, map->count); | ||
250 | if (err) | ||
251 | return err; | ||
252 | |||
253 | for (i = 0; i < map->count; i++) { | ||
254 | if (map->map_ops[i].status) | ||
255 | err = -EINVAL; | ||
256 | else { | ||
257 | BUG_ON(map->map_ops[i].handle == -1); | ||
258 | map->unmap_ops[i].handle = map->map_ops[i].handle; | ||
259 | pr_debug("map handle=%d\n", map->map_ops[i].handle); | ||
260 | } | ||
261 | } | ||
262 | return err; | ||
263 | } | ||
264 | |||
265 | static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) | ||
266 | { | ||
267 | int i, err = 0; | ||
268 | |||
269 | if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { | ||
270 | int pgno = (map->notify.addr >> PAGE_SHIFT); | ||
271 | if (pgno >= offset && pgno < offset + pages && use_ptemod) { | ||
272 | void __user *tmp = (void __user *) | ||
273 | map->vma->vm_start + map->notify.addr; | ||
274 | err = copy_to_user(tmp, &err, 1); | ||
275 | if (err) | ||
276 | return -EFAULT; | ||
277 | map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; | ||
278 | } else if (pgno >= offset && pgno < offset + pages) { | ||
279 | uint8_t *tmp = kmap(map->pages[pgno]); | ||
280 | tmp[map->notify.addr & (PAGE_SIZE-1)] = 0; | ||
281 | kunmap(map->pages[pgno]); | ||
282 | map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; | ||
283 | } | ||
284 | } | ||
285 | |||
286 | err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset, pages); | ||
287 | if (err) | ||
288 | return err; | ||
289 | |||
290 | for (i = 0; i < pages; i++) { | ||
291 | if (map->unmap_ops[offset+i].status) | ||
292 | err = -EINVAL; | ||
293 | pr_debug("unmap handle=%d st=%d\n", | ||
294 | map->unmap_ops[offset+i].handle, | ||
295 | map->unmap_ops[offset+i].status); | ||
296 | map->unmap_ops[offset+i].handle = -1; | ||
297 | } | ||
298 | return err; | ||
299 | } | ||
300 | |||
301 | static int unmap_grant_pages(struct grant_map *map, int offset, int pages) | ||
302 | { | ||
303 | int range, err = 0; | ||
304 | |||
305 | pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages); | ||
306 | |||
307 | /* It is possible the requested range will have a "hole" where we | ||
308 | * already unmapped some of the grants. Only unmap valid ranges. | ||
309 | */ | ||
310 | while (pages && !err) { | ||
311 | while (pages && map->unmap_ops[offset].handle == -1) { | ||
312 | offset++; | ||
313 | pages--; | ||
314 | } | ||
315 | range = 0; | ||
316 | while (range < pages) { | ||
317 | if (map->unmap_ops[offset+range].handle == -1) { | ||
318 | range--; | ||
319 | break; | ||
320 | } | ||
321 | range++; | ||
322 | } | ||
323 | err = __unmap_grant_pages(map, offset, range); | ||
324 | offset += range; | ||
325 | pages -= range; | ||
326 | } | ||
327 | |||
328 | return err; | ||
329 | } | ||
330 | |||
331 | /* ------------------------------------------------------------------ */ | ||
332 | |||
333 | static void gntdev_vma_open(struct vm_area_struct *vma) | ||
334 | { | ||
335 | struct grant_map *map = vma->vm_private_data; | ||
336 | |||
337 | pr_debug("gntdev_vma_open %p\n", vma); | ||
338 | atomic_inc(&map->users); | ||
339 | } | ||
340 | |||
341 | static void gntdev_vma_close(struct vm_area_struct *vma) | ||
342 | { | ||
343 | struct grant_map *map = vma->vm_private_data; | ||
344 | |||
345 | pr_debug("gntdev_vma_close %p\n", vma); | ||
346 | map->vma = NULL; | ||
347 | vma->vm_private_data = NULL; | ||
348 | gntdev_put_map(map); | ||
349 | } | ||
350 | |||
351 | static struct vm_operations_struct gntdev_vmops = { | ||
352 | .open = gntdev_vma_open, | ||
353 | .close = gntdev_vma_close, | ||
354 | }; | ||
355 | |||
356 | /* ------------------------------------------------------------------ */ | ||
357 | |||
358 | static void mn_invl_range_start(struct mmu_notifier *mn, | ||
359 | struct mm_struct *mm, | ||
360 | unsigned long start, unsigned long end) | ||
361 | { | ||
362 | struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); | ||
363 | struct grant_map *map; | ||
364 | unsigned long mstart, mend; | ||
365 | int err; | ||
366 | |||
367 | spin_lock(&priv->lock); | ||
368 | list_for_each_entry(map, &priv->maps, next) { | ||
369 | if (!map->vma) | ||
370 | continue; | ||
371 | if (map->vma->vm_start >= end) | ||
372 | continue; | ||
373 | if (map->vma->vm_end <= start) | ||
374 | continue; | ||
375 | mstart = max(start, map->vma->vm_start); | ||
376 | mend = min(end, map->vma->vm_end); | ||
377 | pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", | ||
378 | map->index, map->count, | ||
379 | map->vma->vm_start, map->vma->vm_end, | ||
380 | start, end, mstart, mend); | ||
381 | err = unmap_grant_pages(map, | ||
382 | (mstart - map->vma->vm_start) >> PAGE_SHIFT, | ||
383 | (mend - mstart) >> PAGE_SHIFT); | ||
384 | WARN_ON(err); | ||
385 | } | ||
386 | spin_unlock(&priv->lock); | ||
387 | } | ||
388 | |||
389 | static void mn_invl_page(struct mmu_notifier *mn, | ||
390 | struct mm_struct *mm, | ||
391 | unsigned long address) | ||
392 | { | ||
393 | mn_invl_range_start(mn, mm, address, address + PAGE_SIZE); | ||
394 | } | ||
395 | |||
396 | static void mn_release(struct mmu_notifier *mn, | ||
397 | struct mm_struct *mm) | ||
398 | { | ||
399 | struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn); | ||
400 | struct grant_map *map; | ||
401 | int err; | ||
402 | |||
403 | spin_lock(&priv->lock); | ||
404 | list_for_each_entry(map, &priv->maps, next) { | ||
405 | if (!map->vma) | ||
406 | continue; | ||
407 | pr_debug("map %d+%d (%lx %lx)\n", | ||
408 | map->index, map->count, | ||
409 | map->vma->vm_start, map->vma->vm_end); | ||
410 | err = unmap_grant_pages(map, /* offset */ 0, map->count); | ||
411 | WARN_ON(err); | ||
412 | } | ||
413 | spin_unlock(&priv->lock); | ||
414 | } | ||
415 | |||
416 | struct mmu_notifier_ops gntdev_mmu_ops = { | ||
417 | .release = mn_release, | ||
418 | .invalidate_page = mn_invl_page, | ||
419 | .invalidate_range_start = mn_invl_range_start, | ||
420 | }; | ||
421 | |||
422 | /* ------------------------------------------------------------------ */ | ||
423 | |||
424 | static int gntdev_open(struct inode *inode, struct file *flip) | ||
425 | { | ||
426 | struct gntdev_priv *priv; | ||
427 | int ret = 0; | ||
428 | |||
429 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
430 | if (!priv) | ||
431 | return -ENOMEM; | ||
432 | |||
433 | INIT_LIST_HEAD(&priv->maps); | ||
434 | spin_lock_init(&priv->lock); | ||
435 | |||
436 | if (use_ptemod) { | ||
437 | priv->mm = get_task_mm(current); | ||
438 | if (!priv->mm) { | ||
439 | kfree(priv); | ||
440 | return -ENOMEM; | ||
441 | } | ||
442 | priv->mn.ops = &gntdev_mmu_ops; | ||
443 | ret = mmu_notifier_register(&priv->mn, priv->mm); | ||
444 | mmput(priv->mm); | ||
445 | } | ||
446 | |||
447 | if (ret) { | ||
448 | kfree(priv); | ||
449 | return ret; | ||
450 | } | ||
451 | |||
452 | flip->private_data = priv; | ||
453 | pr_debug("priv %p\n", priv); | ||
454 | |||
455 | return 0; | ||
456 | } | ||
457 | |||
458 | static int gntdev_release(struct inode *inode, struct file *flip) | ||
459 | { | ||
460 | struct gntdev_priv *priv = flip->private_data; | ||
461 | struct grant_map *map; | ||
462 | |||
463 | pr_debug("priv %p\n", priv); | ||
464 | |||
465 | spin_lock(&priv->lock); | ||
466 | while (!list_empty(&priv->maps)) { | ||
467 | map = list_entry(priv->maps.next, struct grant_map, next); | ||
468 | list_del(&map->next); | ||
469 | gntdev_put_map(map); | ||
470 | } | ||
471 | spin_unlock(&priv->lock); | ||
472 | |||
473 | if (use_ptemod) | ||
474 | mmu_notifier_unregister(&priv->mn, priv->mm); | ||
475 | kfree(priv); | ||
476 | return 0; | ||
477 | } | ||
478 | |||
479 | static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv, | ||
480 | struct ioctl_gntdev_map_grant_ref __user *u) | ||
481 | { | ||
482 | struct ioctl_gntdev_map_grant_ref op; | ||
483 | struct grant_map *map; | ||
484 | int err; | ||
485 | |||
486 | if (copy_from_user(&op, u, sizeof(op)) != 0) | ||
487 | return -EFAULT; | ||
488 | pr_debug("priv %p, add %d\n", priv, op.count); | ||
489 | if (unlikely(op.count <= 0)) | ||
490 | return -EINVAL; | ||
491 | |||
492 | err = -ENOMEM; | ||
493 | map = gntdev_alloc_map(priv, op.count); | ||
494 | if (!map) | ||
495 | return err; | ||
496 | |||
497 | if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) { | ||
498 | pr_debug("can't map: over limit\n"); | ||
499 | gntdev_put_map(map); | ||
500 | return err; | ||
501 | } | ||
502 | |||
503 | if (copy_from_user(map->grants, &u->refs, | ||
504 | sizeof(map->grants[0]) * op.count) != 0) { | ||
505 | gntdev_put_map(map); | ||
506 | return err; | ||
507 | } | ||
508 | |||
509 | spin_lock(&priv->lock); | ||
510 | gntdev_add_map(priv, map); | ||
511 | op.index = map->index << PAGE_SHIFT; | ||
512 | spin_unlock(&priv->lock); | ||
513 | |||
514 | if (copy_to_user(u, &op, sizeof(op)) != 0) | ||
515 | return -EFAULT; | ||
516 | |||
517 | return 0; | ||
518 | } | ||
519 | |||
520 | static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv, | ||
521 | struct ioctl_gntdev_unmap_grant_ref __user *u) | ||
522 | { | ||
523 | struct ioctl_gntdev_unmap_grant_ref op; | ||
524 | struct grant_map *map; | ||
525 | int err = -ENOENT; | ||
526 | |||
527 | if (copy_from_user(&op, u, sizeof(op)) != 0) | ||
528 | return -EFAULT; | ||
529 | pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count); | ||
530 | |||
531 | spin_lock(&priv->lock); | ||
532 | map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count); | ||
533 | if (map) { | ||
534 | list_del(&map->next); | ||
535 | gntdev_put_map(map); | ||
536 | err = 0; | ||
537 | } | ||
538 | spin_unlock(&priv->lock); | ||
539 | return err; | ||
540 | } | ||
541 | |||
542 | static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv, | ||
543 | struct ioctl_gntdev_get_offset_for_vaddr __user *u) | ||
544 | { | ||
545 | struct ioctl_gntdev_get_offset_for_vaddr op; | ||
546 | struct vm_area_struct *vma; | ||
547 | struct grant_map *map; | ||
548 | |||
549 | if (copy_from_user(&op, u, sizeof(op)) != 0) | ||
550 | return -EFAULT; | ||
551 | pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr); | ||
552 | |||
553 | vma = find_vma(current->mm, op.vaddr); | ||
554 | if (!vma || vma->vm_ops != &gntdev_vmops) | ||
555 | return -EINVAL; | ||
556 | |||
557 | map = vma->vm_private_data; | ||
558 | if (!map) | ||
559 | return -EINVAL; | ||
560 | |||
561 | op.offset = map->index << PAGE_SHIFT; | ||
562 | op.count = map->count; | ||
563 | |||
564 | if (copy_to_user(u, &op, sizeof(op)) != 0) | ||
565 | return -EFAULT; | ||
566 | return 0; | ||
567 | } | ||
568 | |||
569 | static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u) | ||
570 | { | ||
571 | struct ioctl_gntdev_unmap_notify op; | ||
572 | struct grant_map *map; | ||
573 | int rc; | ||
574 | |||
575 | if (copy_from_user(&op, u, sizeof(op))) | ||
576 | return -EFAULT; | ||
577 | |||
578 | if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT)) | ||
579 | return -EINVAL; | ||
580 | |||
581 | spin_lock(&priv->lock); | ||
582 | |||
583 | list_for_each_entry(map, &priv->maps, next) { | ||
584 | uint64_t begin = map->index << PAGE_SHIFT; | ||
585 | uint64_t end = (map->index + map->count) << PAGE_SHIFT; | ||
586 | if (op.index >= begin && op.index < end) | ||
587 | goto found; | ||
588 | } | ||
589 | rc = -ENOENT; | ||
590 | goto unlock_out; | ||
591 | |||
592 | found: | ||
593 | if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) && | ||
594 | (map->flags & GNTMAP_readonly)) { | ||
595 | rc = -EINVAL; | ||
596 | goto unlock_out; | ||
597 | } | ||
598 | |||
599 | map->notify.flags = op.action; | ||
600 | map->notify.addr = op.index - (map->index << PAGE_SHIFT); | ||
601 | map->notify.event = op.event_channel_port; | ||
602 | rc = 0; | ||
603 | unlock_out: | ||
604 | spin_unlock(&priv->lock); | ||
605 | return rc; | ||
606 | } | ||
607 | |||
608 | static long gntdev_ioctl(struct file *flip, | ||
609 | unsigned int cmd, unsigned long arg) | ||
610 | { | ||
611 | struct gntdev_priv *priv = flip->private_data; | ||
612 | void __user *ptr = (void __user *)arg; | ||
613 | |||
614 | switch (cmd) { | ||
615 | case IOCTL_GNTDEV_MAP_GRANT_REF: | ||
616 | return gntdev_ioctl_map_grant_ref(priv, ptr); | ||
617 | |||
618 | case IOCTL_GNTDEV_UNMAP_GRANT_REF: | ||
619 | return gntdev_ioctl_unmap_grant_ref(priv, ptr); | ||
620 | |||
621 | case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR: | ||
622 | return gntdev_ioctl_get_offset_for_vaddr(priv, ptr); | ||
623 | |||
624 | case IOCTL_GNTDEV_SET_UNMAP_NOTIFY: | ||
625 | return gntdev_ioctl_notify(priv, ptr); | ||
626 | |||
627 | default: | ||
628 | pr_debug("priv %p, unknown cmd %x\n", priv, cmd); | ||
629 | return -ENOIOCTLCMD; | ||
630 | } | ||
631 | |||
632 | return 0; | ||
633 | } | ||
634 | |||
635 | static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) | ||
636 | { | ||
637 | struct gntdev_priv *priv = flip->private_data; | ||
638 | int index = vma->vm_pgoff; | ||
639 | int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | ||
640 | struct grant_map *map; | ||
641 | int i, err = -EINVAL; | ||
642 | |||
643 | if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) | ||
644 | return -EINVAL; | ||
645 | |||
646 | pr_debug("map %d+%d at %lx (pgoff %lx)\n", | ||
647 | index, count, vma->vm_start, vma->vm_pgoff); | ||
648 | |||
649 | spin_lock(&priv->lock); | ||
650 | map = gntdev_find_map_index(priv, index, count); | ||
651 | if (!map) | ||
652 | goto unlock_out; | ||
653 | if (use_ptemod && map->vma) | ||
654 | goto unlock_out; | ||
655 | if (use_ptemod && priv->mm != vma->vm_mm) { | ||
656 | printk(KERN_WARNING "Huh? Other mm?\n"); | ||
657 | goto unlock_out; | ||
658 | } | ||
659 | |||
660 | atomic_inc(&map->users); | ||
661 | |||
662 | vma->vm_ops = &gntdev_vmops; | ||
663 | |||
664 | vma->vm_flags |= VM_RESERVED|VM_DONTEXPAND; | ||
665 | |||
666 | if (use_ptemod) | ||
667 | vma->vm_flags |= VM_DONTCOPY|VM_PFNMAP; | ||
668 | |||
669 | vma->vm_private_data = map; | ||
670 | |||
671 | if (use_ptemod) | ||
672 | map->vma = vma; | ||
673 | |||
674 | if (map->flags) { | ||
675 | if ((vma->vm_flags & VM_WRITE) && | ||
676 | (map->flags & GNTMAP_readonly)) | ||
677 | goto out_unlock_put; | ||
678 | } else { | ||
679 | map->flags = GNTMAP_host_map; | ||
680 | if (!(vma->vm_flags & VM_WRITE)) | ||
681 | map->flags |= GNTMAP_readonly; | ||
682 | } | ||
683 | |||
684 | spin_unlock(&priv->lock); | ||
685 | |||
686 | if (use_ptemod) { | ||
687 | err = apply_to_page_range(vma->vm_mm, vma->vm_start, | ||
688 | vma->vm_end - vma->vm_start, | ||
689 | find_grant_ptes, map); | ||
690 | if (err) { | ||
691 | printk(KERN_WARNING "find_grant_ptes() failure.\n"); | ||
692 | goto out_put_map; | ||
693 | } | ||
694 | } | ||
695 | |||
696 | err = map_grant_pages(map); | ||
697 | if (err) | ||
698 | goto out_put_map; | ||
699 | |||
700 | if (!use_ptemod) { | ||
701 | for (i = 0; i < count; i++) { | ||
702 | err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE, | ||
703 | map->pages[i]); | ||
704 | if (err) | ||
705 | goto out_put_map; | ||
706 | } | ||
707 | } | ||
708 | |||
709 | return 0; | ||
710 | |||
711 | unlock_out: | ||
712 | spin_unlock(&priv->lock); | ||
713 | return err; | ||
714 | |||
715 | out_unlock_put: | ||
716 | spin_unlock(&priv->lock); | ||
717 | out_put_map: | ||
718 | if (use_ptemod) | ||
719 | map->vma = NULL; | ||
720 | gntdev_put_map(map); | ||
721 | return err; | ||
722 | } | ||
723 | |||
724 | static const struct file_operations gntdev_fops = { | ||
725 | .owner = THIS_MODULE, | ||
726 | .open = gntdev_open, | ||
727 | .release = gntdev_release, | ||
728 | .mmap = gntdev_mmap, | ||
729 | .unlocked_ioctl = gntdev_ioctl | ||
730 | }; | ||
731 | |||
732 | static struct miscdevice gntdev_miscdev = { | ||
733 | .minor = MISC_DYNAMIC_MINOR, | ||
734 | .name = "xen/gntdev", | ||
735 | .fops = &gntdev_fops, | ||
736 | }; | ||
737 | |||
738 | /* ------------------------------------------------------------------ */ | ||
739 | |||
740 | static int __init gntdev_init(void) | ||
741 | { | ||
742 | int err; | ||
743 | |||
744 | if (!xen_domain()) | ||
745 | return -ENODEV; | ||
746 | |||
747 | use_ptemod = xen_pv_domain(); | ||
748 | |||
749 | err = misc_register(&gntdev_miscdev); | ||
750 | if (err != 0) { | ||
751 | printk(KERN_ERR "Could not register gntdev device\n"); | ||
752 | return err; | ||
753 | } | ||
754 | return 0; | ||
755 | } | ||
756 | |||
757 | static void __exit gntdev_exit(void) | ||
758 | { | ||
759 | misc_deregister(&gntdev_miscdev); | ||
760 | } | ||
761 | |||
762 | module_init(gntdev_init); | ||
763 | module_exit(gntdev_exit); | ||
764 | |||
765 | /* ------------------------------------------------------------------ */ | ||