diff options
Diffstat (limited to 'drivers/gpu/ion/ion.c')
-rw-r--r-- | drivers/gpu/ion/ion.c | 1152 |
1 files changed, 1152 insertions, 0 deletions
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c new file mode 100644 index 00000000000..512ebc5cc8e --- /dev/null +++ b/drivers/gpu/ion/ion.c | |||
@@ -0,0 +1,1152 @@ | |||
1 | /* | ||
2 | * drivers/gpu/ion/ion.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Google, Inc. | ||
5 | * | ||
6 | * This software is licensed under the terms of the GNU General Public | ||
7 | * License version 2, as published by the Free Software Foundation, and | ||
8 | * may be copied, distributed, and modified under those terms. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #define pr_fmt(fmt) "%s():%d: " fmt, __func__, __LINE__ | ||
18 | |||
19 | #include <linux/device.h> | ||
20 | #include <linux/file.h> | ||
21 | #include <linux/fs.h> | ||
22 | #include <linux/anon_inodes.h> | ||
23 | #include <linux/ion.h> | ||
24 | #include <linux/list.h> | ||
25 | #include <linux/miscdevice.h> | ||
26 | #include <linux/mm.h> | ||
27 | #include <linux/mm_types.h> | ||
28 | #include <linux/rbtree.h> | ||
29 | #include <linux/sched.h> | ||
30 | #include <linux/slab.h> | ||
31 | #include <linux/seq_file.h> | ||
32 | #include <linux/uaccess.h> | ||
33 | #include <linux/debugfs.h> | ||
34 | |||
35 | #include "ion_priv.h" | ||
36 | #define DEBUG | ||
37 | |||
38 | /* this function should only be called while dev->lock is held */ | ||
39 | static void ion_buffer_add(struct ion_device *dev, | ||
40 | struct ion_buffer *buffer) | ||
41 | { | ||
42 | struct rb_node **p = &dev->buffers.rb_node; | ||
43 | struct rb_node *parent = NULL; | ||
44 | struct ion_buffer *entry; | ||
45 | |||
46 | while (*p) { | ||
47 | parent = *p; | ||
48 | entry = rb_entry(parent, struct ion_buffer, node); | ||
49 | |||
50 | if (buffer < entry) { | ||
51 | p = &(*p)->rb_left; | ||
52 | } else if (buffer > entry) { | ||
53 | p = &(*p)->rb_right; | ||
54 | } else { | ||
55 | pr_err("buffer already found."); | ||
56 | BUG(); | ||
57 | } | ||
58 | } | ||
59 | |||
60 | rb_link_node(&buffer->node, parent, p); | ||
61 | rb_insert_color(&buffer->node, &dev->buffers); | ||
62 | } | ||
63 | |||
64 | /* this function should only be called while dev->lock is held */ | ||
65 | static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, | ||
66 | struct ion_device *dev, | ||
67 | unsigned long len, | ||
68 | unsigned long align, | ||
69 | unsigned long flags) | ||
70 | { | ||
71 | struct ion_buffer *buffer; | ||
72 | int ret; | ||
73 | |||
74 | buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); | ||
75 | if (!buffer) | ||
76 | return ERR_PTR(-ENOMEM); | ||
77 | |||
78 | buffer->heap = heap; | ||
79 | kref_init(&buffer->ref); | ||
80 | |||
81 | ret = heap->ops->allocate(heap, buffer, len, align, flags); | ||
82 | if (ret) { | ||
83 | kfree(buffer); | ||
84 | return ERR_PTR(ret); | ||
85 | } | ||
86 | buffer->dev = dev; | ||
87 | buffer->size = len; | ||
88 | mutex_init(&buffer->lock); | ||
89 | ion_buffer_add(dev, buffer); | ||
90 | return buffer; | ||
91 | } | ||
92 | |||
93 | static void ion_buffer_destroy(struct kref *kref) | ||
94 | { | ||
95 | struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); | ||
96 | struct ion_device *dev = buffer->dev; | ||
97 | |||
98 | buffer->heap->ops->free(buffer); | ||
99 | mutex_lock(&dev->lock); | ||
100 | rb_erase(&buffer->node, &dev->buffers); | ||
101 | mutex_unlock(&dev->lock); | ||
102 | kfree(buffer); | ||
103 | } | ||
104 | |||
105 | void ion_buffer_get(struct ion_buffer *buffer) | ||
106 | { | ||
107 | kref_get(&buffer->ref); | ||
108 | } | ||
109 | |||
110 | static int ion_buffer_put(struct ion_buffer *buffer) | ||
111 | { | ||
112 | return kref_put(&buffer->ref, ion_buffer_destroy); | ||
113 | } | ||
114 | |||
115 | struct ion_handle *ion_handle_create(struct ion_client *client, | ||
116 | struct ion_buffer *buffer) | ||
117 | { | ||
118 | struct ion_handle *handle; | ||
119 | |||
120 | handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); | ||
121 | if (!handle) | ||
122 | return ERR_PTR(-ENOMEM); | ||
123 | kref_init(&handle->ref); | ||
124 | rb_init_node(&handle->node); | ||
125 | handle->client = client; | ||
126 | ion_buffer_get(buffer); | ||
127 | handle->buffer = buffer; | ||
128 | |||
129 | return handle; | ||
130 | } | ||
131 | |||
132 | static void ion_handle_destroy(struct kref *kref) | ||
133 | { | ||
134 | struct ion_handle *handle = container_of(kref, struct ion_handle, ref); | ||
135 | /* XXX Can a handle be destroyed while it's map count is non-zero?: | ||
136 | if (handle->map_cnt) unmap | ||
137 | */ | ||
138 | ion_buffer_put(handle->buffer); | ||
139 | mutex_lock(&handle->client->lock); | ||
140 | if (!RB_EMPTY_NODE(&handle->node)) | ||
141 | rb_erase(&handle->node, &handle->client->handles); | ||
142 | mutex_unlock(&handle->client->lock); | ||
143 | kfree(handle); | ||
144 | } | ||
145 | |||
146 | struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) | ||
147 | { | ||
148 | return handle->buffer; | ||
149 | } | ||
150 | |||
151 | void ion_handle_get(struct ion_handle *handle) | ||
152 | { | ||
153 | kref_get(&handle->ref); | ||
154 | } | ||
155 | |||
156 | int ion_handle_put(struct ion_handle *handle) | ||
157 | { | ||
158 | return kref_put(&handle->ref, ion_handle_destroy); | ||
159 | } | ||
160 | |||
161 | static struct ion_handle *ion_handle_lookup(struct ion_client *client, | ||
162 | struct ion_buffer *buffer) | ||
163 | { | ||
164 | struct rb_node *n; | ||
165 | |||
166 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { | ||
167 | struct ion_handle *handle = rb_entry(n, struct ion_handle, | ||
168 | node); | ||
169 | if (handle->buffer == buffer) | ||
170 | return handle; | ||
171 | } | ||
172 | return NULL; | ||
173 | } | ||
174 | |||
175 | bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) | ||
176 | { | ||
177 | struct rb_node *n = client->handles.rb_node; | ||
178 | |||
179 | while (n) { | ||
180 | struct ion_handle *handle_node = rb_entry(n, struct ion_handle, | ||
181 | node); | ||
182 | if (handle < handle_node) | ||
183 | n = n->rb_left; | ||
184 | else if (handle > handle_node) | ||
185 | n = n->rb_right; | ||
186 | else | ||
187 | return true; | ||
188 | } | ||
189 | WARN(1, "invalid handle passed h=0x%x,comm=%d\n", handle, | ||
190 | current->group_leader->comm); | ||
191 | return false; | ||
192 | } | ||
193 | |||
194 | void ion_handle_add(struct ion_client *client, struct ion_handle *handle) | ||
195 | { | ||
196 | struct rb_node **p = &client->handles.rb_node; | ||
197 | struct rb_node *parent = NULL; | ||
198 | struct ion_handle *entry; | ||
199 | |||
200 | while (*p) { | ||
201 | parent = *p; | ||
202 | entry = rb_entry(parent, struct ion_handle, node); | ||
203 | |||
204 | if (handle < entry) | ||
205 | p = &(*p)->rb_left; | ||
206 | else if (handle > entry) | ||
207 | p = &(*p)->rb_right; | ||
208 | else | ||
209 | WARN(1, "%s: buffer already found.", __func__); | ||
210 | } | ||
211 | |||
212 | rb_link_node(&handle->node, parent, p); | ||
213 | rb_insert_color(&handle->node, &client->handles); | ||
214 | } | ||
215 | |||
216 | struct ion_handle *ion_alloc(struct ion_client *client, size_t len, | ||
217 | size_t align, unsigned int flags) | ||
218 | { | ||
219 | struct rb_node *n; | ||
220 | struct ion_handle *handle; | ||
221 | struct ion_device *dev = client->dev; | ||
222 | struct ion_buffer *buffer = NULL; | ||
223 | |||
224 | /* | ||
225 | * traverse the list of heaps available in this system in priority | ||
226 | * order. If the heap type is supported by the client, and matches the | ||
227 | * request of the caller allocate from it. Repeat until allocate has | ||
228 | * succeeded or all heaps have been tried | ||
229 | */ | ||
230 | mutex_lock(&dev->lock); | ||
231 | for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { | ||
232 | struct ion_heap *heap = rb_entry(n, struct ion_heap, node); | ||
233 | /* if the client doesn't support this heap type */ | ||
234 | if (!((1 << heap->type) & client->heap_mask)) | ||
235 | continue; | ||
236 | /* if the caller didn't specify this heap type */ | ||
237 | if (!((1 << heap->id) & flags)) | ||
238 | continue; | ||
239 | buffer = ion_buffer_create(heap, dev, len, align, flags); | ||
240 | if (!IS_ERR_OR_NULL(buffer)) | ||
241 | break; | ||
242 | } | ||
243 | mutex_unlock(&dev->lock); | ||
244 | |||
245 | if (IS_ERR_OR_NULL(buffer)) | ||
246 | return ERR_PTR(PTR_ERR(buffer)); | ||
247 | |||
248 | handle = ion_handle_create(client, buffer); | ||
249 | |||
250 | if (IS_ERR_OR_NULL(handle)) | ||
251 | goto end; | ||
252 | |||
253 | /* | ||
254 | * ion_buffer_create will create a buffer with a ref_cnt of 1, | ||
255 | * and ion_handle_create will take a second reference, drop one here | ||
256 | */ | ||
257 | ion_buffer_put(buffer); | ||
258 | |||
259 | mutex_lock(&client->lock); | ||
260 | ion_handle_add(client, handle); | ||
261 | mutex_unlock(&client->lock); | ||
262 | return handle; | ||
263 | |||
264 | end: | ||
265 | ion_buffer_put(buffer); | ||
266 | return handle; | ||
267 | } | ||
268 | |||
269 | void ion_free(struct ion_client *client, struct ion_handle *handle) | ||
270 | { | ||
271 | bool valid_handle; | ||
272 | |||
273 | BUG_ON(client != handle->client); | ||
274 | |||
275 | mutex_lock(&client->lock); | ||
276 | valid_handle = ion_handle_validate(client, handle); | ||
277 | mutex_unlock(&client->lock); | ||
278 | |||
279 | if (!valid_handle) { | ||
280 | WARN(1, "%s: invalid handle passed to free.\n", __func__); | ||
281 | return; | ||
282 | } | ||
283 | ion_handle_put(handle); | ||
284 | } | ||
285 | |||
286 | static bool _ion_map(int *buffer_cnt, int *handle_cnt) | ||
287 | { | ||
288 | bool map; | ||
289 | |||
290 | BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0); | ||
291 | |||
292 | if (*buffer_cnt) | ||
293 | map = false; | ||
294 | else | ||
295 | map = true; | ||
296 | if (*handle_cnt == 0) | ||
297 | (*buffer_cnt)++; | ||
298 | (*handle_cnt)++; | ||
299 | return map; | ||
300 | } | ||
301 | |||
302 | static bool _ion_unmap(int *buffer_cnt, int *handle_cnt) | ||
303 | { | ||
304 | BUG_ON(*handle_cnt == 0); | ||
305 | (*handle_cnt)--; | ||
306 | if (*handle_cnt != 0) | ||
307 | return false; | ||
308 | BUG_ON(*buffer_cnt == 0); | ||
309 | (*buffer_cnt)--; | ||
310 | if (*buffer_cnt == 0) | ||
311 | return true; | ||
312 | return false; | ||
313 | } | ||
314 | |||
315 | int ion_phys(struct ion_client *client, struct ion_handle *handle, | ||
316 | ion_phys_addr_t *addr, size_t *len) | ||
317 | { | ||
318 | struct ion_buffer *buffer; | ||
319 | int ret; | ||
320 | |||
321 | mutex_lock(&client->lock); | ||
322 | if (!ion_handle_validate(client, handle)) { | ||
323 | mutex_unlock(&client->lock); | ||
324 | return -EINVAL; | ||
325 | } | ||
326 | |||
327 | buffer = handle->buffer; | ||
328 | |||
329 | if (!buffer->heap->ops->phys) { | ||
330 | pr_err("ion_phys is not implemented by this heap.\n"); | ||
331 | mutex_unlock(&client->lock); | ||
332 | return -ENODEV; | ||
333 | } | ||
334 | mutex_unlock(&client->lock); | ||
335 | ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); | ||
336 | return ret; | ||
337 | } | ||
338 | |||
339 | void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) | ||
340 | { | ||
341 | struct ion_buffer *buffer; | ||
342 | void *vaddr; | ||
343 | |||
344 | mutex_lock(&client->lock); | ||
345 | if (!ion_handle_validate(client, handle)) { | ||
346 | WARN(1, "invalid handle passed to map_kernel.\n"); | ||
347 | mutex_unlock(&client->lock); | ||
348 | return ERR_PTR(-EINVAL); | ||
349 | } | ||
350 | |||
351 | buffer = handle->buffer; | ||
352 | mutex_lock(&buffer->lock); | ||
353 | |||
354 | if (!handle->buffer->heap->ops->map_kernel) { | ||
355 | pr_err("map_kernel is not implemented by this heap.\n"); | ||
356 | mutex_unlock(&buffer->lock); | ||
357 | mutex_unlock(&client->lock); | ||
358 | return ERR_PTR(-ENODEV); | ||
359 | } | ||
360 | |||
361 | if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) { | ||
362 | vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); | ||
363 | if (IS_ERR_OR_NULL(vaddr)) | ||
364 | _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt); | ||
365 | buffer->vaddr = vaddr; | ||
366 | } else { | ||
367 | vaddr = buffer->vaddr; | ||
368 | } | ||
369 | mutex_unlock(&buffer->lock); | ||
370 | mutex_unlock(&client->lock); | ||
371 | return vaddr; | ||
372 | } | ||
373 | |||
374 | struct scatterlist *ion_map_dma(struct ion_client *client, | ||
375 | struct ion_handle *handle) | ||
376 | { | ||
377 | struct ion_buffer *buffer; | ||
378 | struct scatterlist *sglist; | ||
379 | |||
380 | mutex_lock(&client->lock); | ||
381 | if (!ion_handle_validate(client, handle)) { | ||
382 | WARN(1, "invalid handle passed to map_dma.\n"); | ||
383 | mutex_unlock(&client->lock); | ||
384 | return ERR_PTR(-EINVAL); | ||
385 | } | ||
386 | buffer = handle->buffer; | ||
387 | mutex_lock(&buffer->lock); | ||
388 | |||
389 | if (!handle->buffer->heap->ops->map_dma) { | ||
390 | pr_err("map_kernel is not implemented by this heap.\n"); | ||
391 | mutex_unlock(&buffer->lock); | ||
392 | mutex_unlock(&client->lock); | ||
393 | return ERR_PTR(-ENODEV); | ||
394 | } | ||
395 | if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) { | ||
396 | sglist = buffer->heap->ops->map_dma(buffer->heap, buffer); | ||
397 | if (IS_ERR_OR_NULL(sglist)) | ||
398 | _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt); | ||
399 | buffer->sglist = sglist; | ||
400 | } else { | ||
401 | sglist = buffer->sglist; | ||
402 | } | ||
403 | mutex_unlock(&buffer->lock); | ||
404 | mutex_unlock(&client->lock); | ||
405 | return sglist; | ||
406 | } | ||
407 | |||
408 | struct scatterlist *iommu_heap_remap_dma(struct ion_heap *heap, | ||
409 | struct ion_buffer *buf, | ||
410 | unsigned long addr); | ||
411 | int ion_remap_dma(struct ion_client *client, | ||
412 | struct ion_handle *handle, | ||
413 | unsigned long addr) | ||
414 | { | ||
415 | struct ion_buffer *buffer; | ||
416 | int ret; | ||
417 | |||
418 | mutex_lock(&client->lock); | ||
419 | if (!ion_handle_validate(client, handle)) { | ||
420 | pr_err("invalid handle passed to map_dma.\n"); | ||
421 | mutex_unlock(&client->lock); | ||
422 | return -EINVAL; | ||
423 | } | ||
424 | buffer = handle->buffer; | ||
425 | mutex_lock(&buffer->lock); | ||
426 | |||
427 | ret = iommu_heap_remap_dma(buffer->heap, buffer, addr); | ||
428 | |||
429 | mutex_unlock(&buffer->lock); | ||
430 | mutex_unlock(&client->lock); | ||
431 | return ret; | ||
432 | } | ||
433 | |||
434 | void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) | ||
435 | { | ||
436 | struct ion_buffer *buffer; | ||
437 | |||
438 | mutex_lock(&client->lock); | ||
439 | buffer = handle->buffer; | ||
440 | mutex_lock(&buffer->lock); | ||
441 | if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) { | ||
442 | buffer->heap->ops->unmap_kernel(buffer->heap, buffer); | ||
443 | buffer->vaddr = NULL; | ||
444 | } | ||
445 | mutex_unlock(&buffer->lock); | ||
446 | mutex_unlock(&client->lock); | ||
447 | } | ||
448 | |||
449 | void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle) | ||
450 | { | ||
451 | struct ion_buffer *buffer; | ||
452 | |||
453 | mutex_lock(&client->lock); | ||
454 | buffer = handle->buffer; | ||
455 | mutex_lock(&buffer->lock); | ||
456 | if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) { | ||
457 | buffer->heap->ops->unmap_dma(buffer->heap, buffer); | ||
458 | buffer->sglist = NULL; | ||
459 | } | ||
460 | mutex_unlock(&buffer->lock); | ||
461 | mutex_unlock(&client->lock); | ||
462 | } | ||
463 | |||
464 | |||
465 | struct ion_buffer *ion_share(struct ion_client *client, | ||
466 | struct ion_handle *handle) | ||
467 | { | ||
468 | bool valid_handle; | ||
469 | |||
470 | mutex_lock(&client->lock); | ||
471 | valid_handle = ion_handle_validate(client, handle); | ||
472 | mutex_unlock(&client->lock); | ||
473 | if (!valid_handle) { | ||
474 | WARN(1, "%s: invalid handle passed to share.\n", __func__); | ||
475 | return ERR_PTR(-EINVAL); | ||
476 | } | ||
477 | |||
478 | /* do not take an extra reference here, the burden is on the caller | ||
479 | * to make sure the buffer doesn't go away while it's passing it | ||
480 | * to another client -- ion_free should not be called on this handle | ||
481 | * until the buffer has been imported into the other client | ||
482 | */ | ||
483 | return handle->buffer; | ||
484 | } | ||
485 | |||
486 | struct ion_handle *ion_import(struct ion_client *client, | ||
487 | struct ion_buffer *buffer) | ||
488 | { | ||
489 | struct ion_handle *handle = NULL; | ||
490 | |||
491 | mutex_lock(&client->lock); | ||
492 | /* if a handle exists for this buffer just take a reference to it */ | ||
493 | handle = ion_handle_lookup(client, buffer); | ||
494 | if (!IS_ERR_OR_NULL(handle)) { | ||
495 | ion_handle_get(handle); | ||
496 | goto end; | ||
497 | } | ||
498 | handle = ion_handle_create(client, buffer); | ||
499 | if (IS_ERR_OR_NULL(handle)) { | ||
500 | pr_err("error during handle create\n"); | ||
501 | goto end; | ||
502 | } | ||
503 | ion_handle_add(client, handle); | ||
504 | end: | ||
505 | mutex_unlock(&client->lock); | ||
506 | return handle; | ||
507 | } | ||
508 | |||
509 | static const struct file_operations ion_share_fops; | ||
510 | |||
511 | struct ion_handle *ion_import_fd(struct ion_client *client, int fd) | ||
512 | { | ||
513 | struct file *file = fget(fd); | ||
514 | struct ion_handle *handle; | ||
515 | |||
516 | if (!file) { | ||
517 | pr_err("imported fd not found in file table.\n"); | ||
518 | return ERR_PTR(-EINVAL); | ||
519 | } | ||
520 | if (file->f_op != &ion_share_fops) { | ||
521 | pr_err("imported file is not a shared ion file.\n"); | ||
522 | handle = ERR_PTR(-EINVAL); | ||
523 | goto end; | ||
524 | } | ||
525 | handle = ion_import(client, file->private_data); | ||
526 | end: | ||
527 | fput(file); | ||
528 | return handle; | ||
529 | } | ||
530 | |||
531 | static int ion_debug_client_show(struct seq_file *s, void *unused) | ||
532 | { | ||
533 | struct ion_client *client = s->private; | ||
534 | struct rb_node *n; | ||
535 | size_t sizes[ION_NUM_HEAPS] = {0}; | ||
536 | const char *names[ION_NUM_HEAPS] = {0}; | ||
537 | int i; | ||
538 | |||
539 | mutex_lock(&client->lock); | ||
540 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { | ||
541 | struct ion_handle *handle = rb_entry(n, struct ion_handle, | ||
542 | node); | ||
543 | enum ion_heap_type type = handle->buffer->heap->type; | ||
544 | |||
545 | if (!names[type]) | ||
546 | names[type] = handle->buffer->heap->name; | ||
547 | sizes[type] += handle->buffer->size; | ||
548 | } | ||
549 | mutex_unlock(&client->lock); | ||
550 | |||
551 | seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); | ||
552 | for (i = 0; i < ION_NUM_HEAPS; i++) { | ||
553 | if (!names[i]) | ||
554 | continue; | ||
555 | seq_printf(s, "%16.16s: %16u %d\n", names[i], sizes[i], | ||
556 | atomic_read(&client->ref.refcount)); | ||
557 | } | ||
558 | return 0; | ||
559 | } | ||
560 | |||
561 | static int ion_debug_client_open(struct inode *inode, struct file *file) | ||
562 | { | ||
563 | return single_open(file, ion_debug_client_show, inode->i_private); | ||
564 | } | ||
565 | |||
566 | static const struct file_operations debug_client_fops = { | ||
567 | .open = ion_debug_client_open, | ||
568 | .read = seq_read, | ||
569 | .llseek = seq_lseek, | ||
570 | .release = single_release, | ||
571 | }; | ||
572 | |||
573 | static struct ion_client *ion_client_lookup(struct ion_device *dev, | ||
574 | struct task_struct *task) | ||
575 | { | ||
576 | struct rb_node *n = dev->user_clients.rb_node; | ||
577 | struct ion_client *client; | ||
578 | |||
579 | mutex_lock(&dev->lock); | ||
580 | while (n) { | ||
581 | client = rb_entry(n, struct ion_client, node); | ||
582 | if (task == client->task) { | ||
583 | ion_client_get(client); | ||
584 | mutex_unlock(&dev->lock); | ||
585 | return client; | ||
586 | } else if (task < client->task) { | ||
587 | n = n->rb_left; | ||
588 | } else if (task > client->task) { | ||
589 | n = n->rb_right; | ||
590 | } | ||
591 | } | ||
592 | mutex_unlock(&dev->lock); | ||
593 | return NULL; | ||
594 | } | ||
595 | |||
596 | struct ion_client *ion_client_create(struct ion_device *dev, | ||
597 | unsigned int heap_mask, | ||
598 | const char *name) | ||
599 | { | ||
600 | struct ion_client *client; | ||
601 | struct task_struct *task; | ||
602 | struct rb_node **p; | ||
603 | struct rb_node *parent = NULL; | ||
604 | struct ion_client *entry; | ||
605 | char debug_name[64]; | ||
606 | pid_t pid; | ||
607 | |||
608 | get_task_struct(current->group_leader); | ||
609 | task_lock(current->group_leader); | ||
610 | pid = task_pid_nr(current->group_leader); | ||
611 | /* don't bother to store task struct for kernel threads, | ||
612 | they can't be killed anyway */ | ||
613 | if (current->group_leader->flags & PF_KTHREAD) { | ||
614 | put_task_struct(current->group_leader); | ||
615 | task = NULL; | ||
616 | } else { | ||
617 | task = current->group_leader; | ||
618 | } | ||
619 | task_unlock(current->group_leader); | ||
620 | |||
621 | /* if this isn't a kernel thread, see if a client already | ||
622 | exists */ | ||
623 | if (task) { | ||
624 | client = ion_client_lookup(dev, task); | ||
625 | if (!IS_ERR_OR_NULL(client)) { | ||
626 | put_task_struct(current->group_leader); | ||
627 | return client; | ||
628 | } | ||
629 | } | ||
630 | |||
631 | client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); | ||
632 | if (!client) { | ||
633 | put_task_struct(current->group_leader); | ||
634 | return ERR_PTR(-ENOMEM); | ||
635 | } | ||
636 | |||
637 | client->dev = dev; | ||
638 | client->handles = RB_ROOT; | ||
639 | mutex_init(&client->lock); | ||
640 | client->name = name; | ||
641 | client->heap_mask = heap_mask; | ||
642 | client->task = task; | ||
643 | client->pid = pid; | ||
644 | kref_init(&client->ref); | ||
645 | |||
646 | mutex_lock(&dev->lock); | ||
647 | if (task) { | ||
648 | p = &dev->user_clients.rb_node; | ||
649 | while (*p) { | ||
650 | parent = *p; | ||
651 | entry = rb_entry(parent, struct ion_client, node); | ||
652 | |||
653 | if (task < entry->task) | ||
654 | p = &(*p)->rb_left; | ||
655 | else if (task > entry->task) | ||
656 | p = &(*p)->rb_right; | ||
657 | } | ||
658 | rb_link_node(&client->node, parent, p); | ||
659 | rb_insert_color(&client->node, &dev->user_clients); | ||
660 | } else { | ||
661 | p = &dev->kernel_clients.rb_node; | ||
662 | while (*p) { | ||
663 | parent = *p; | ||
664 | entry = rb_entry(parent, struct ion_client, node); | ||
665 | |||
666 | if (client < entry) | ||
667 | p = &(*p)->rb_left; | ||
668 | else if (client > entry) | ||
669 | p = &(*p)->rb_right; | ||
670 | } | ||
671 | rb_link_node(&client->node, parent, p); | ||
672 | rb_insert_color(&client->node, &dev->kernel_clients); | ||
673 | } | ||
674 | |||
675 | snprintf(debug_name, 64, "%u", client->pid); | ||
676 | client->debug_root = debugfs_create_file(debug_name, 0664, | ||
677 | dev->debug_root, client, | ||
678 | &debug_client_fops); | ||
679 | mutex_unlock(&dev->lock); | ||
680 | |||
681 | return client; | ||
682 | } | ||
683 | |||
684 | static void _ion_client_destroy(struct kref *kref) | ||
685 | { | ||
686 | struct ion_client *client = container_of(kref, struct ion_client, ref); | ||
687 | struct ion_device *dev = client->dev; | ||
688 | struct rb_node *n; | ||
689 | |||
690 | pr_debug("\n"); | ||
691 | while ((n = rb_first(&client->handles))) { | ||
692 | struct ion_handle *handle = rb_entry(n, struct ion_handle, | ||
693 | node); | ||
694 | ion_handle_destroy(&handle->ref); | ||
695 | } | ||
696 | mutex_lock(&dev->lock); | ||
697 | if (client->task) { | ||
698 | rb_erase(&client->node, &dev->user_clients); | ||
699 | put_task_struct(client->task); | ||
700 | } else { | ||
701 | rb_erase(&client->node, &dev->kernel_clients); | ||
702 | } | ||
703 | debugfs_remove_recursive(client->debug_root); | ||
704 | mutex_unlock(&dev->lock); | ||
705 | |||
706 | kfree(client); | ||
707 | } | ||
708 | |||
709 | void ion_client_get(struct ion_client *client) | ||
710 | { | ||
711 | kref_get(&client->ref); | ||
712 | } | ||
713 | |||
714 | int ion_client_put(struct ion_client *client) | ||
715 | { | ||
716 | return kref_put(&client->ref, _ion_client_destroy); | ||
717 | } | ||
718 | |||
719 | void ion_client_destroy(struct ion_client *client) | ||
720 | { | ||
721 | ion_client_put(client); | ||
722 | } | ||
723 | |||
724 | static int ion_share_release(struct inode *inode, struct file* file) | ||
725 | { | ||
726 | struct ion_buffer *buffer = file->private_data; | ||
727 | |||
728 | pr_debug("\n"); | ||
729 | /* drop the reference to the buffer -- this prevents the | ||
730 | buffer from going away because the client holding it exited | ||
731 | while it was being passed */ | ||
732 | ion_buffer_put(buffer); | ||
733 | return 0; | ||
734 | } | ||
735 | |||
736 | static void ion_vma_open(struct vm_area_struct *vma) | ||
737 | { | ||
738 | |||
739 | struct ion_buffer *buffer = vma->vm_file->private_data; | ||
740 | struct ion_handle *handle = vma->vm_private_data; | ||
741 | struct ion_client *client; | ||
742 | |||
743 | pr_debug("\n"); | ||
744 | /* check that the client still exists and take a reference so | ||
745 | it can't go away until this vma is closed */ | ||
746 | client = ion_client_lookup(buffer->dev, current->group_leader); | ||
747 | if (IS_ERR_OR_NULL(client)) { | ||
748 | vma->vm_private_data = NULL; | ||
749 | return; | ||
750 | } | ||
751 | ion_buffer_get(buffer); | ||
752 | ion_handle_get(handle); | ||
753 | pr_debug("client_cnt %d handle_cnt %d alloc_cnt %d\n", | ||
754 | atomic_read(&client->ref.refcount), | ||
755 | atomic_read(&handle->ref.refcount), | ||
756 | atomic_read(&buffer->ref.refcount)); | ||
757 | } | ||
758 | |||
759 | static void ion_vma_close(struct vm_area_struct *vma) | ||
760 | { | ||
761 | struct ion_handle *handle = vma->vm_private_data; | ||
762 | struct ion_buffer *buffer = vma->vm_file->private_data; | ||
763 | struct ion_client *client; | ||
764 | |||
765 | pr_debug("\n"); | ||
766 | /* this indicates the client is gone, nothing to do here */ | ||
767 | if (!handle) | ||
768 | return; | ||
769 | client = handle->client; | ||
770 | pr_debug("client_cnt %d handle_cnt %d alloc_cnt %d\n", | ||
771 | atomic_read(&client->ref.refcount), | ||
772 | atomic_read(&handle->ref.refcount), | ||
773 | atomic_read(&buffer->ref.refcount)); | ||
774 | ion_handle_put(handle); | ||
775 | ion_client_put(client); | ||
776 | ion_buffer_put(buffer); | ||
777 | pr_debug("client_cnt %d handle_cnt %d alloc_cnt %d\n", | ||
778 | atomic_read(&client->ref.refcount), | ||
779 | atomic_read(&handle->ref.refcount), | ||
780 | atomic_read(&buffer->ref.refcount)); | ||
781 | } | ||
782 | |||
783 | static struct vm_operations_struct ion_vm_ops = { | ||
784 | .open = ion_vma_open, | ||
785 | .close = ion_vma_close, | ||
786 | }; | ||
787 | |||
788 | static int ion_share_mmap(struct file *file, struct vm_area_struct *vma) | ||
789 | { | ||
790 | struct ion_buffer *buffer = file->private_data; | ||
791 | unsigned long size = vma->vm_end - vma->vm_start; | ||
792 | struct ion_client *client; | ||
793 | struct ion_handle *handle; | ||
794 | int ret; | ||
795 | |||
796 | pr_debug("\n"); | ||
797 | /* make sure the client still exists, it's possible for the client to | ||
798 | have gone away but the map/share fd still to be around, take | ||
799 | a reference to it so it can't go away while this mapping exists */ | ||
800 | client = ion_client_lookup(buffer->dev, current->group_leader); | ||
801 | if (IS_ERR_OR_NULL(client)) { | ||
802 | WARN(1, "trying to mmap an ion handle in a process with no " | ||
803 | "ion client\n"); | ||
804 | return -EINVAL; | ||
805 | } | ||
806 | |||
807 | if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) > | ||
808 | buffer->size)) { | ||
809 | WARN(1, "trying to map larger area than handle has available" | ||
810 | "\n"); | ||
811 | ret = -EINVAL; | ||
812 | goto err; | ||
813 | } | ||
814 | |||
815 | /* find the handle and take a reference to it */ | ||
816 | handle = ion_import(client, buffer); | ||
817 | if (IS_ERR_OR_NULL(handle)) { | ||
818 | ret = -EINVAL; | ||
819 | goto err; | ||
820 | } | ||
821 | ion_buffer_get(buffer); | ||
822 | |||
823 | if (!handle->buffer->heap->ops->map_user) { | ||
824 | pr_err("this heap does not define a method for mapping " | ||
825 | "to userspace\n"); | ||
826 | ret = -EINVAL; | ||
827 | goto err1; | ||
828 | } | ||
829 | |||
830 | mutex_lock(&buffer->lock); | ||
831 | /* now map it to userspace */ | ||
832 | ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); | ||
833 | mutex_unlock(&buffer->lock); | ||
834 | if (ret) { | ||
835 | pr_err("failure mapping buffer to userspace\n"); | ||
836 | goto err1; | ||
837 | } | ||
838 | |||
839 | vma->vm_ops = &ion_vm_ops; | ||
840 | /* move the handle into the vm_private_data so we can access it from | ||
841 | vma_open/close */ | ||
842 | vma->vm_private_data = handle; | ||
843 | pr_debug("client_cnt %d handle_cnt %d alloc_cnt %d\n", | ||
844 | atomic_read(&client->ref.refcount), | ||
845 | atomic_read(&handle->ref.refcount), | ||
846 | atomic_read(&buffer->ref.refcount)); | ||
847 | return 0; | ||
848 | |||
849 | err1: | ||
850 | /* drop the reference to the handle */ | ||
851 | ion_handle_put(handle); | ||
852 | err: | ||
853 | /* drop the reference to the client */ | ||
854 | ion_client_put(client); | ||
855 | return ret; | ||
856 | } | ||
857 | |||
858 | static const struct file_operations ion_share_fops = { | ||
859 | .owner = THIS_MODULE, | ||
860 | .release = ion_share_release, | ||
861 | .mmap = ion_share_mmap, | ||
862 | }; | ||
863 | |||
864 | static int ion_ioctl_share(struct file *parent, struct ion_client *client, | ||
865 | struct ion_handle *handle) | ||
866 | { | ||
867 | int fd = get_unused_fd(); | ||
868 | struct file *file; | ||
869 | |||
870 | if (fd < 0) | ||
871 | return -ENFILE; | ||
872 | |||
873 | file = anon_inode_getfile("ion_share_fd", &ion_share_fops, | ||
874 | handle->buffer, O_RDWR); | ||
875 | if (IS_ERR_OR_NULL(file)) | ||
876 | goto err; | ||
877 | ion_buffer_get(handle->buffer); | ||
878 | fd_install(fd, file); | ||
879 | |||
880 | return fd; | ||
881 | |||
882 | err: | ||
883 | put_unused_fd(fd); | ||
884 | return -ENFILE; | ||
885 | } | ||
886 | |||
887 | static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | ||
888 | { | ||
889 | struct ion_client *client = filp->private_data; | ||
890 | |||
891 | switch (cmd) { | ||
892 | case ION_IOC_ALLOC: | ||
893 | { | ||
894 | struct ion_allocation_data data; | ||
895 | |||
896 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | ||
897 | return -EFAULT; | ||
898 | data.handle = ion_alloc(client, data.len, data.align, | ||
899 | data.flags); | ||
900 | if (copy_to_user((void __user *)arg, &data, sizeof(data))) | ||
901 | return -EFAULT; | ||
902 | break; | ||
903 | } | ||
904 | case ION_IOC_FREE: | ||
905 | { | ||
906 | struct ion_handle_data data; | ||
907 | bool valid; | ||
908 | |||
909 | if (copy_from_user(&data, (void __user *)arg, | ||
910 | sizeof(struct ion_handle_data))) | ||
911 | return -EFAULT; | ||
912 | mutex_lock(&client->lock); | ||
913 | valid = ion_handle_validate(client, data.handle); | ||
914 | mutex_unlock(&client->lock); | ||
915 | if (!valid) | ||
916 | return -EINVAL; | ||
917 | ion_free(client, data.handle); | ||
918 | break; | ||
919 | } | ||
920 | case ION_IOC_MAP: | ||
921 | case ION_IOC_SHARE: | ||
922 | { | ||
923 | struct ion_fd_data data; | ||
924 | |||
925 | if (copy_from_user(&data, (void __user *)arg, sizeof(data))) | ||
926 | return -EFAULT; | ||
927 | mutex_lock(&client->lock); | ||
928 | if (!ion_handle_validate(client, data.handle)) { | ||
929 | WARN(1, "invalid handle passed to share ioctl.\n"); | ||
930 | mutex_unlock(&client->lock); | ||
931 | return -EINVAL; | ||
932 | } | ||
933 | data.fd = ion_ioctl_share(filp, client, data.handle); | ||
934 | mutex_unlock(&client->lock); | ||
935 | if (copy_to_user((void __user *)arg, &data, sizeof(data))) | ||
936 | return -EFAULT; | ||
937 | break; | ||
938 | } | ||
939 | case ION_IOC_IMPORT: | ||
940 | { | ||
941 | struct ion_fd_data data; | ||
942 | if (copy_from_user(&data, (void __user *)arg, | ||
943 | sizeof(struct ion_fd_data))) | ||
944 | return -EFAULT; | ||
945 | |||
946 | data.handle = ion_import_fd(client, data.fd); | ||
947 | if (IS_ERR(data.handle)) | ||
948 | data.handle = NULL; | ||
949 | if (copy_to_user((void __user *)arg, &data, | ||
950 | sizeof(struct ion_fd_data))) | ||
951 | return -EFAULT; | ||
952 | break; | ||
953 | } | ||
954 | case ION_IOC_CUSTOM: | ||
955 | { | ||
956 | struct ion_device *dev = client->dev; | ||
957 | struct ion_custom_data data; | ||
958 | |||
959 | if (!dev->custom_ioctl) | ||
960 | return -ENOTTY; | ||
961 | if (copy_from_user(&data, (void __user *)arg, | ||
962 | sizeof(struct ion_custom_data))) | ||
963 | return -EFAULT; | ||
964 | return dev->custom_ioctl(client, data.cmd, data.arg); | ||
965 | } | ||
966 | default: | ||
967 | return -ENOTTY; | ||
968 | } | ||
969 | return 0; | ||
970 | } | ||
971 | |||
972 | static int ion_release(struct inode *inode, struct file *file) | ||
973 | { | ||
974 | struct ion_client *client = file->private_data; | ||
975 | |||
976 | pr_debug("\n"); | ||
977 | ion_client_put(client); | ||
978 | return 0; | ||
979 | } | ||
980 | |||
981 | static int ion_open(struct inode *inode, struct file *file) | ||
982 | { | ||
983 | struct miscdevice *miscdev = file->private_data; | ||
984 | struct ion_device *dev = container_of(miscdev, struct ion_device, dev); | ||
985 | struct ion_client *client; | ||
986 | |||
987 | pr_debug("\n"); | ||
988 | client = ion_client_create(dev, -1, "user"); | ||
989 | if (IS_ERR_OR_NULL(client)) | ||
990 | return PTR_ERR(client); | ||
991 | file->private_data = client; | ||
992 | |||
993 | return 0; | ||
994 | } | ||
995 | |||
996 | static const struct file_operations ion_fops = { | ||
997 | .owner = THIS_MODULE, | ||
998 | .open = ion_open, | ||
999 | .release = ion_release, | ||
1000 | .unlocked_ioctl = ion_ioctl, | ||
1001 | }; | ||
1002 | |||
1003 | static size_t ion_debug_heap_total(struct ion_client *client, | ||
1004 | enum ion_heap_type type) | ||
1005 | { | ||
1006 | size_t size = 0; | ||
1007 | struct rb_node *n; | ||
1008 | |||
1009 | mutex_lock(&client->lock); | ||
1010 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { | ||
1011 | struct ion_handle *handle = rb_entry(n, | ||
1012 | struct ion_handle, | ||
1013 | node); | ||
1014 | if (handle->buffer->heap->type == type) | ||
1015 | size += handle->buffer->size; | ||
1016 | } | ||
1017 | mutex_unlock(&client->lock); | ||
1018 | return size; | ||
1019 | } | ||
1020 | |||
1021 | static int ion_debug_heap_show(struct seq_file *s, void *unused) | ||
1022 | { | ||
1023 | struct ion_heap *heap = s->private; | ||
1024 | struct ion_device *dev = heap->dev; | ||
1025 | struct rb_node *n; | ||
1026 | |||
1027 | seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); | ||
1028 | for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) { | ||
1029 | struct ion_client *client = rb_entry(n, struct ion_client, | ||
1030 | node); | ||
1031 | char task_comm[TASK_COMM_LEN]; | ||
1032 | size_t size = ion_debug_heap_total(client, heap->type); | ||
1033 | if (!size) | ||
1034 | continue; | ||
1035 | |||
1036 | get_task_comm(task_comm, client->task); | ||
1037 | seq_printf(s, "%16.s %16u %16u\n", task_comm, client->pid, | ||
1038 | size); | ||
1039 | } | ||
1040 | |||
1041 | for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) { | ||
1042 | struct ion_client *client = rb_entry(n, struct ion_client, | ||
1043 | node); | ||
1044 | size_t size = ion_debug_heap_total(client, heap->type); | ||
1045 | if (!size) | ||
1046 | continue; | ||
1047 | seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid, | ||
1048 | size); | ||
1049 | } | ||
1050 | return 0; | ||
1051 | } | ||
1052 | |||
1053 | static int ion_debug_heap_open(struct inode *inode, struct file *file) | ||
1054 | { | ||
1055 | return single_open(file, ion_debug_heap_show, inode->i_private); | ||
1056 | } | ||
1057 | |||
1058 | static const struct file_operations debug_heap_fops = { | ||
1059 | .open = ion_debug_heap_open, | ||
1060 | .read = seq_read, | ||
1061 | .llseek = seq_lseek, | ||
1062 | .release = single_release, | ||
1063 | }; | ||
1064 | |||
1065 | void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) | ||
1066 | { | ||
1067 | struct rb_node **p = &dev->heaps.rb_node; | ||
1068 | struct rb_node *parent = NULL; | ||
1069 | struct ion_heap *entry; | ||
1070 | |||
1071 | heap->dev = dev; | ||
1072 | mutex_lock(&dev->lock); | ||
1073 | while (*p) { | ||
1074 | parent = *p; | ||
1075 | entry = rb_entry(parent, struct ion_heap, node); | ||
1076 | |||
1077 | if (heap->id < entry->id) { | ||
1078 | p = &(*p)->rb_left; | ||
1079 | } else if (heap->id > entry->id ) { | ||
1080 | p = &(*p)->rb_right; | ||
1081 | } else { | ||
1082 | pr_err("can not insert multiple heaps with " | ||
1083 | "id %d\n", heap->id); | ||
1084 | goto end; | ||
1085 | } | ||
1086 | } | ||
1087 | |||
1088 | rb_link_node(&heap->node, parent, p); | ||
1089 | rb_insert_color(&heap->node, &dev->heaps); | ||
1090 | debugfs_create_file(heap->name, 0664, dev->debug_root, heap, | ||
1091 | &debug_heap_fops); | ||
1092 | end: | ||
1093 | mutex_unlock(&dev->lock); | ||
1094 | } | ||
1095 | |||
1096 | struct ion_device *ion_device_create(long (*custom_ioctl) | ||
1097 | (struct ion_client *client, | ||
1098 | unsigned int cmd, | ||
1099 | unsigned long arg)) | ||
1100 | { | ||
1101 | struct ion_device *idev; | ||
1102 | int ret; | ||
1103 | |||
1104 | idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); | ||
1105 | if (!idev) | ||
1106 | return ERR_PTR(-ENOMEM); | ||
1107 | |||
1108 | idev->dev.minor = MISC_DYNAMIC_MINOR; | ||
1109 | idev->dev.name = "ion"; | ||
1110 | idev->dev.fops = &ion_fops; | ||
1111 | idev->dev.parent = NULL; | ||
1112 | ret = misc_register(&idev->dev); | ||
1113 | if (ret) { | ||
1114 | pr_err("ion: failed to register misc device.\n"); | ||
1115 | return ERR_PTR(ret); | ||
1116 | } | ||
1117 | |||
1118 | idev->debug_root = debugfs_create_dir("ion", NULL); | ||
1119 | if (IS_ERR_OR_NULL(idev->debug_root)) | ||
1120 | pr_err("ion: failed to create debug files.\n"); | ||
1121 | |||
1122 | idev->custom_ioctl = custom_ioctl; | ||
1123 | idev->buffers = RB_ROOT; | ||
1124 | mutex_init(&idev->lock); | ||
1125 | idev->heaps = RB_ROOT; | ||
1126 | idev->user_clients = RB_ROOT; | ||
1127 | idev->kernel_clients = RB_ROOT; | ||
1128 | return idev; | ||
1129 | } | ||
1130 | |||
1131 | void ion_device_destroy(struct ion_device *dev) | ||
1132 | { | ||
1133 | misc_deregister(&dev->dev); | ||
1134 | /* XXX need to free the heaps and clients ? */ | ||
1135 | kfree(dev); | ||
1136 | } | ||
1137 | |||
1138 | struct ion_client *ion_client_get_file(int fd) | ||
1139 | { | ||
1140 | struct ion_client *client = ERR_PTR(-EFAULT); | ||
1141 | struct file *f = fget(fd); | ||
1142 | if (!f) | ||
1143 | return ERR_PTR(-EINVAL); | ||
1144 | |||
1145 | if (f->f_op == &ion_fops) { | ||
1146 | client = f->private_data; | ||
1147 | ion_client_get(client); | ||
1148 | } | ||
1149 | |||
1150 | fput(f); | ||
1151 | return client; | ||
1152 | } | ||