diff options
Diffstat (limited to 'drivers/video/tegra/nvmap/nvmap_ioctl.c')
-rw-r--r-- | drivers/video/tegra/nvmap/nvmap_ioctl.c | 749 |
1 files changed, 749 insertions, 0 deletions
diff --git a/drivers/video/tegra/nvmap/nvmap_ioctl.c b/drivers/video/tegra/nvmap/nvmap_ioctl.c new file mode 100644 index 00000000000..58bc71d5046 --- /dev/null +++ b/drivers/video/tegra/nvmap/nvmap_ioctl.c | |||
@@ -0,0 +1,749 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/nvmap/nvmap_ioctl.c | ||
3 | * | ||
4 | * User-space interface to nvmap | ||
5 | * | ||
6 | * Copyright (c) 2011, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/dma-mapping.h> | ||
24 | #include <linux/fs.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/uaccess.h> | ||
28 | |||
29 | #include <asm/cacheflush.h> | ||
30 | #include <asm/outercache.h> | ||
31 | #include <asm/tlbflush.h> | ||
32 | |||
33 | #include <mach/iovmm.h> | ||
34 | #include <mach/nvmap.h> | ||
35 | |||
36 | #include "nvmap_ioctl.h" | ||
37 | #include "nvmap.h" | ||
38 | #include "nvmap_common.h" | ||
39 | |||
40 | static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h, | ||
41 | int is_read, unsigned long h_offs, | ||
42 | unsigned long sys_addr, unsigned long h_stride, | ||
43 | unsigned long sys_stride, unsigned long elem_size, | ||
44 | unsigned long count); | ||
45 | |||
46 | static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h, | ||
47 | unsigned long start, unsigned long end, unsigned int op); | ||
48 | |||
49 | |||
50 | int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg) | ||
51 | { | ||
52 | struct nvmap_pin_handle op; | ||
53 | struct nvmap_handle *h; | ||
54 | unsigned long on_stack[16]; | ||
55 | unsigned long *refs; | ||
56 | unsigned long __user *output; | ||
57 | unsigned int i; | ||
58 | int err = 0; | ||
59 | |||
60 | if (copy_from_user(&op, arg, sizeof(op))) | ||
61 | return -EFAULT; | ||
62 | |||
63 | if (!op.count) | ||
64 | return -EINVAL; | ||
65 | |||
66 | if (op.count > 1) { | ||
67 | size_t bytes = op.count * sizeof(unsigned long *); | ||
68 | |||
69 | if (op.count > ARRAY_SIZE(on_stack)) | ||
70 | refs = kmalloc(op.count * sizeof(*refs), GFP_KERNEL); | ||
71 | else | ||
72 | refs = on_stack; | ||
73 | |||
74 | if (!refs) | ||
75 | return -ENOMEM; | ||
76 | |||
77 | if (copy_from_user(refs, (void *)op.handles, bytes)) { | ||
78 | err = -EFAULT; | ||
79 | goto out; | ||
80 | } | ||
81 | } else { | ||
82 | refs = on_stack; | ||
83 | on_stack[0] = (unsigned long)op.handles; | ||
84 | } | ||
85 | |||
86 | if (is_pin) | ||
87 | err = nvmap_pin_ids(filp->private_data, op.count, refs); | ||
88 | else | ||
89 | nvmap_unpin_ids(filp->private_data, op.count, refs); | ||
90 | |||
91 | /* skip the output stage on unpin */ | ||
92 | if (err || !is_pin) | ||
93 | goto out; | ||
94 | |||
95 | /* it is guaranteed that if nvmap_pin_ids returns 0 that | ||
96 | * all of the handle_ref objects are valid, so dereferencing | ||
97 | * directly here is safe */ | ||
98 | if (op.count > 1) | ||
99 | output = (unsigned long __user *)op.addr; | ||
100 | else { | ||
101 | struct nvmap_pin_handle __user *tmp = arg; | ||
102 | output = (unsigned long __user *)&(tmp->addr); | ||
103 | } | ||
104 | |||
105 | if (!output) | ||
106 | goto out; | ||
107 | |||
108 | for (i = 0; i < op.count && !err; i++) { | ||
109 | unsigned long addr; | ||
110 | |||
111 | h = (struct nvmap_handle *)refs[i]; | ||
112 | |||
113 | if (h->heap_pgalloc && h->pgalloc.contig) | ||
114 | addr = page_to_phys(h->pgalloc.pages[0]); | ||
115 | else if (h->heap_pgalloc) | ||
116 | addr = h->pgalloc.area->iovm_start; | ||
117 | else | ||
118 | addr = h->carveout->base; | ||
119 | |||
120 | err = put_user(addr, &output[i]); | ||
121 | } | ||
122 | |||
123 | if (err) | ||
124 | nvmap_unpin_ids(filp->private_data, op.count, refs); | ||
125 | |||
126 | out: | ||
127 | if (refs != on_stack) | ||
128 | kfree(refs); | ||
129 | |||
130 | return err; | ||
131 | } | ||
132 | |||
133 | int nvmap_ioctl_getid(struct file *filp, void __user *arg) | ||
134 | { | ||
135 | struct nvmap_client *client = filp->private_data; | ||
136 | struct nvmap_create_handle op; | ||
137 | struct nvmap_handle *h = NULL; | ||
138 | |||
139 | if (copy_from_user(&op, arg, sizeof(op))) | ||
140 | return -EFAULT; | ||
141 | |||
142 | if (!op.handle) | ||
143 | return -EINVAL; | ||
144 | |||
145 | h = nvmap_get_handle_id(client, op.handle); | ||
146 | |||
147 | if (!h) | ||
148 | return -EPERM; | ||
149 | |||
150 | op.id = (__u32)h; | ||
151 | if (client == h->owner) | ||
152 | h->global = true; | ||
153 | |||
154 | nvmap_handle_put(h); | ||
155 | |||
156 | return copy_to_user(arg, &op, sizeof(op)) ? -EFAULT : 0; | ||
157 | } | ||
158 | |||
159 | int nvmap_ioctl_alloc(struct file *filp, void __user *arg) | ||
160 | { | ||
161 | struct nvmap_alloc_handle op; | ||
162 | struct nvmap_client *client = filp->private_data; | ||
163 | |||
164 | if (copy_from_user(&op, arg, sizeof(op))) | ||
165 | return -EFAULT; | ||
166 | |||
167 | if (!op.handle) | ||
168 | return -EINVAL; | ||
169 | |||
170 | if (op.align & (op.align - 1)) | ||
171 | return -EINVAL; | ||
172 | |||
173 | /* user-space handles are aligned to page boundaries, to prevent | ||
174 | * data leakage. */ | ||
175 | op.align = max_t(size_t, op.align, PAGE_SIZE); | ||
176 | |||
177 | return nvmap_alloc_handle_id(client, op.handle, op.heap_mask, | ||
178 | op.align, op.flags); | ||
179 | } | ||
180 | |||
181 | int nvmap_ioctl_create(struct file *filp, unsigned int cmd, void __user *arg) | ||
182 | { | ||
183 | struct nvmap_create_handle op; | ||
184 | struct nvmap_handle_ref *ref = NULL; | ||
185 | struct nvmap_client *client = filp->private_data; | ||
186 | int err = 0; | ||
187 | |||
188 | if (copy_from_user(&op, arg, sizeof(op))) | ||
189 | return -EFAULT; | ||
190 | |||
191 | if (!client) | ||
192 | return -ENODEV; | ||
193 | |||
194 | if (cmd == NVMAP_IOC_CREATE) { | ||
195 | ref = nvmap_create_handle(client, PAGE_ALIGN(op.size)); | ||
196 | if (!IS_ERR(ref)) | ||
197 | ref->handle->orig_size = op.size; | ||
198 | } else if (cmd == NVMAP_IOC_FROM_ID) { | ||
199 | ref = nvmap_duplicate_handle_id(client, op.id); | ||
200 | } else { | ||
201 | return -EINVAL; | ||
202 | } | ||
203 | |||
204 | if (IS_ERR(ref)) | ||
205 | return PTR_ERR(ref); | ||
206 | |||
207 | op.handle = nvmap_ref_to_id(ref); | ||
208 | if (copy_to_user(arg, &op, sizeof(op))) { | ||
209 | err = -EFAULT; | ||
210 | nvmap_free_handle_id(client, op.handle); | ||
211 | } | ||
212 | |||
213 | return err; | ||
214 | } | ||
215 | |||
216 | int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg) | ||
217 | { | ||
218 | struct nvmap_client *client = filp->private_data; | ||
219 | struct nvmap_map_caller op; | ||
220 | struct nvmap_vma_priv *vpriv; | ||
221 | struct vm_area_struct *vma; | ||
222 | struct nvmap_handle *h = NULL; | ||
223 | unsigned int cache_flags; | ||
224 | int err = 0; | ||
225 | |||
226 | if (copy_from_user(&op, arg, sizeof(op))) | ||
227 | return -EFAULT; | ||
228 | |||
229 | if (!op.handle) | ||
230 | return -EINVAL; | ||
231 | |||
232 | h = nvmap_get_handle_id(client, op.handle); | ||
233 | |||
234 | if (!h) | ||
235 | return -EPERM; | ||
236 | |||
237 | down_read(¤t->mm->mmap_sem); | ||
238 | |||
239 | vma = find_vma(current->mm, op.addr); | ||
240 | if (!vma || !vma->vm_private_data) { | ||
241 | err = -ENOMEM; | ||
242 | goto out; | ||
243 | } | ||
244 | |||
245 | if (op.offset & ~PAGE_MASK) { | ||
246 | err = -EFAULT; | ||
247 | goto out; | ||
248 | } | ||
249 | |||
250 | if ((op.offset + op.length) > h->size) { | ||
251 | err = -EADDRNOTAVAIL; | ||
252 | goto out; | ||
253 | } | ||
254 | |||
255 | vpriv = vma->vm_private_data; | ||
256 | BUG_ON(!vpriv); | ||
257 | |||
258 | /* the VMA must exactly match the requested mapping operation, and the | ||
259 | * VMA that is targetted must have been created by this driver | ||
260 | */ | ||
261 | if ((vma->vm_start != op.addr) || !is_nvmap_vma(vma) || | ||
262 | (vma->vm_end-vma->vm_start != op.length)) { | ||
263 | err = -EPERM; | ||
264 | goto out; | ||
265 | } | ||
266 | |||
267 | /* verify that each mmap() system call creates a unique VMA */ | ||
268 | |||
269 | if (vpriv->handle && (h == vpriv->handle)) { | ||
270 | goto out; | ||
271 | } else if (vpriv->handle) { | ||
272 | err = -EADDRNOTAVAIL; | ||
273 | goto out; | ||
274 | } | ||
275 | |||
276 | nvmap_usecount_inc(h); | ||
277 | |||
278 | if (!h->heap_pgalloc && (h->carveout->base & ~PAGE_MASK)) { | ||
279 | nvmap_usecount_dec(h); | ||
280 | err = -EFAULT; | ||
281 | goto out; | ||
282 | } | ||
283 | |||
284 | vpriv->handle = h; | ||
285 | vpriv->offs = op.offset; | ||
286 | |||
287 | cache_flags = op.flags & NVMAP_HANDLE_CACHE_FLAG; | ||
288 | if ((cache_flags == NVMAP_HANDLE_INNER_CACHEABLE || | ||
289 | cache_flags == NVMAP_HANDLE_CACHEABLE) && | ||
290 | (h->flags == NVMAP_HANDLE_UNCACHEABLE || | ||
291 | h->flags == NVMAP_HANDLE_WRITE_COMBINE)) { | ||
292 | if (h->size & ~PAGE_MASK) { | ||
293 | pr_err("\n%s:attempt to convert a buffer from uc/wc to" | ||
294 | " wb, whose size is not a multiple of page size." | ||
295 | " request ignored.\n", __func__); | ||
296 | } else { | ||
297 | unsigned int nr_page = h->size >> PAGE_SHIFT; | ||
298 | wmb(); | ||
299 | /* override allocation time cache coherency attributes. */ | ||
300 | h->flags &= ~NVMAP_HANDLE_CACHE_FLAG; | ||
301 | h->flags |= cache_flags; | ||
302 | |||
303 | /* Update page attributes, if the memory is allocated | ||
304 | * from system heap pages. | ||
305 | */ | ||
306 | if (cache_flags == NVMAP_HANDLE_INNER_CACHEABLE && | ||
307 | h->heap_pgalloc) | ||
308 | set_pages_array_iwb(h->pgalloc.pages, nr_page); | ||
309 | else if (h->heap_pgalloc) | ||
310 | set_pages_array_wb(h->pgalloc.pages, nr_page); | ||
311 | } | ||
312 | } | ||
313 | vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot); | ||
314 | |||
315 | out: | ||
316 | up_read(¤t->mm->mmap_sem); | ||
317 | |||
318 | if (err) | ||
319 | nvmap_handle_put(h); | ||
320 | return err; | ||
321 | } | ||
322 | |||
323 | int nvmap_ioctl_get_param(struct file *filp, void __user* arg) | ||
324 | { | ||
325 | struct nvmap_handle_param op; | ||
326 | struct nvmap_client *client = filp->private_data; | ||
327 | struct nvmap_handle *h; | ||
328 | int err = 0; | ||
329 | |||
330 | if (copy_from_user(&op, arg, sizeof(op))) | ||
331 | return -EFAULT; | ||
332 | |||
333 | h = nvmap_get_handle_id(client, op.handle); | ||
334 | if (!h) | ||
335 | return -EINVAL; | ||
336 | |||
337 | switch (op.param) { | ||
338 | case NVMAP_HANDLE_PARAM_SIZE: | ||
339 | op.result = h->orig_size; | ||
340 | break; | ||
341 | case NVMAP_HANDLE_PARAM_ALIGNMENT: | ||
342 | mutex_lock(&h->lock); | ||
343 | if (!h->alloc) | ||
344 | op.result = 0; | ||
345 | else if (h->heap_pgalloc) | ||
346 | op.result = PAGE_SIZE; | ||
347 | else if (h->carveout->base) | ||
348 | op.result = (h->carveout->base & -h->carveout->base); | ||
349 | else | ||
350 | op.result = SZ_4M; | ||
351 | mutex_unlock(&h->lock); | ||
352 | break; | ||
353 | case NVMAP_HANDLE_PARAM_BASE: | ||
354 | if (WARN_ON(!h->alloc || !atomic_add_return(0, &h->pin))) | ||
355 | op.result = -1ul; | ||
356 | else if (!h->heap_pgalloc) { | ||
357 | mutex_lock(&h->lock); | ||
358 | op.result = h->carveout->base; | ||
359 | mutex_unlock(&h->lock); | ||
360 | } else if (h->pgalloc.contig) | ||
361 | op.result = page_to_phys(h->pgalloc.pages[0]); | ||
362 | else if (h->pgalloc.area) | ||
363 | op.result = h->pgalloc.area->iovm_start; | ||
364 | else | ||
365 | op.result = -1ul; | ||
366 | break; | ||
367 | case NVMAP_HANDLE_PARAM_HEAP: | ||
368 | if (!h->alloc) | ||
369 | op.result = 0; | ||
370 | else if (!h->heap_pgalloc) { | ||
371 | mutex_lock(&h->lock); | ||
372 | op.result = nvmap_carveout_usage(client, h->carveout); | ||
373 | mutex_unlock(&h->lock); | ||
374 | } else if (h->pgalloc.contig) | ||
375 | op.result = NVMAP_HEAP_SYSMEM; | ||
376 | else | ||
377 | op.result = NVMAP_HEAP_IOVMM; | ||
378 | break; | ||
379 | default: | ||
380 | err = -EINVAL; | ||
381 | break; | ||
382 | } | ||
383 | |||
384 | if (!err && copy_to_user(arg, &op, sizeof(op))) | ||
385 | err = -EFAULT; | ||
386 | |||
387 | nvmap_handle_put(h); | ||
388 | return err; | ||
389 | } | ||
390 | |||
391 | int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user* arg) | ||
392 | { | ||
393 | struct nvmap_client *client = filp->private_data; | ||
394 | struct nvmap_rw_handle __user *uarg = arg; | ||
395 | struct nvmap_rw_handle op; | ||
396 | struct nvmap_handle *h; | ||
397 | ssize_t copied; | ||
398 | int err = 0; | ||
399 | |||
400 | if (copy_from_user(&op, arg, sizeof(op))) | ||
401 | return -EFAULT; | ||
402 | |||
403 | if (!op.handle || !op.addr || !op.count || !op.elem_size) | ||
404 | return -EINVAL; | ||
405 | |||
406 | h = nvmap_get_handle_id(client, op.handle); | ||
407 | if (!h) | ||
408 | return -EPERM; | ||
409 | |||
410 | nvmap_usecount_inc(h); | ||
411 | |||
412 | copied = rw_handle(client, h, is_read, op.offset, | ||
413 | (unsigned long)op.addr, op.hmem_stride, | ||
414 | op.user_stride, op.elem_size, op.count); | ||
415 | |||
416 | if (copied < 0) { | ||
417 | err = copied; | ||
418 | copied = 0; | ||
419 | } else if (copied < (op.count * op.elem_size)) | ||
420 | err = -EINTR; | ||
421 | |||
422 | __put_user(copied, &uarg->count); | ||
423 | |||
424 | nvmap_usecount_dec(h); | ||
425 | |||
426 | nvmap_handle_put(h); | ||
427 | |||
428 | return err; | ||
429 | } | ||
430 | |||
431 | int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg) | ||
432 | { | ||
433 | struct nvmap_client *client = filp->private_data; | ||
434 | struct nvmap_cache_op op; | ||
435 | struct vm_area_struct *vma; | ||
436 | struct nvmap_vma_priv *vpriv; | ||
437 | unsigned long start; | ||
438 | unsigned long end; | ||
439 | int err = 0; | ||
440 | |||
441 | if (copy_from_user(&op, arg, sizeof(op))) | ||
442 | return -EFAULT; | ||
443 | |||
444 | if (!op.handle || !op.addr || op.op < NVMAP_CACHE_OP_WB || | ||
445 | op.op > NVMAP_CACHE_OP_WB_INV) | ||
446 | return -EINVAL; | ||
447 | |||
448 | down_read(¤t->mm->mmap_sem); | ||
449 | |||
450 | vma = find_vma(current->active_mm, (unsigned long)op.addr); | ||
451 | if (!vma || !is_nvmap_vma(vma) || | ||
452 | (unsigned long)op.addr + op.len > vma->vm_end) { | ||
453 | err = -EADDRNOTAVAIL; | ||
454 | goto out; | ||
455 | } | ||
456 | |||
457 | vpriv = (struct nvmap_vma_priv *)vma->vm_private_data; | ||
458 | |||
459 | if ((unsigned long)vpriv->handle != op.handle) { | ||
460 | err = -EFAULT; | ||
461 | goto out; | ||
462 | } | ||
463 | |||
464 | start = (unsigned long)op.addr - vma->vm_start; | ||
465 | end = start + op.len; | ||
466 | |||
467 | err = cache_maint(client, vpriv->handle, start, end, op.op); | ||
468 | out: | ||
469 | up_read(¤t->mm->mmap_sem); | ||
470 | return err; | ||
471 | } | ||
472 | |||
473 | int nvmap_ioctl_free(struct file *filp, unsigned long arg) | ||
474 | { | ||
475 | struct nvmap_client *client = filp->private_data; | ||
476 | |||
477 | if (!arg) | ||
478 | return 0; | ||
479 | |||
480 | nvmap_free_handle_id(client, arg); | ||
481 | return 0; | ||
482 | } | ||
483 | |||
484 | static void inner_cache_maint(unsigned int op, void *vaddr, size_t size) | ||
485 | { | ||
486 | if (op == NVMAP_CACHE_OP_WB_INV) | ||
487 | dmac_flush_range(vaddr, vaddr + size); | ||
488 | else if (op == NVMAP_CACHE_OP_INV) | ||
489 | dmac_map_area(vaddr, size, DMA_FROM_DEVICE); | ||
490 | else | ||
491 | dmac_map_area(vaddr, size, DMA_TO_DEVICE); | ||
492 | } | ||
493 | |||
494 | static void outer_cache_maint(unsigned int op, unsigned long paddr, size_t size) | ||
495 | { | ||
496 | if (op == NVMAP_CACHE_OP_WB_INV) | ||
497 | outer_flush_range(paddr, paddr + size); | ||
498 | else if (op == NVMAP_CACHE_OP_INV) | ||
499 | outer_inv_range(paddr, paddr + size); | ||
500 | else | ||
501 | outer_clean_range(paddr, paddr + size); | ||
502 | } | ||
503 | |||
504 | static void heap_page_cache_maint(struct nvmap_client *client, | ||
505 | struct nvmap_handle *h, unsigned long start, unsigned long end, | ||
506 | unsigned int op, bool inner, bool outer, pte_t **pte, | ||
507 | unsigned long kaddr, pgprot_t prot) | ||
508 | { | ||
509 | struct page *page; | ||
510 | unsigned long paddr; | ||
511 | unsigned long next; | ||
512 | unsigned long off; | ||
513 | size_t size; | ||
514 | |||
515 | while (start < end) { | ||
516 | page = h->pgalloc.pages[start >> PAGE_SHIFT]; | ||
517 | next = min(((start + PAGE_SIZE) & PAGE_MASK), end); | ||
518 | off = start & ~PAGE_MASK; | ||
519 | size = next - start; | ||
520 | paddr = page_to_phys(page) + off; | ||
521 | |||
522 | if (inner) { | ||
523 | void *vaddr = (void *)kaddr + off; | ||
524 | BUG_ON(!pte); | ||
525 | BUG_ON(!kaddr); | ||
526 | set_pte_at(&init_mm, kaddr, *pte, | ||
527 | pfn_pte(__phys_to_pfn(paddr), prot)); | ||
528 | flush_tlb_kernel_page(kaddr); | ||
529 | inner_cache_maint(op, vaddr, size); | ||
530 | } | ||
531 | |||
532 | if (outer) | ||
533 | outer_cache_maint(op, paddr, size); | ||
534 | start = next; | ||
535 | } | ||
536 | } | ||
537 | |||
538 | static bool fast_cache_maint(struct nvmap_client *client, struct nvmap_handle *h, | ||
539 | unsigned long start, unsigned long end, unsigned int op) | ||
540 | { | ||
541 | int ret = false; | ||
542 | |||
543 | if ((op == NVMAP_CACHE_OP_INV) || | ||
544 | ((end - start) < FLUSH_CLEAN_BY_SET_WAY_THRESHOLD)) | ||
545 | goto out; | ||
546 | |||
547 | if (op == NVMAP_CACHE_OP_WB_INV) | ||
548 | inner_flush_cache_all(); | ||
549 | else if (op == NVMAP_CACHE_OP_WB) | ||
550 | inner_clean_cache_all(); | ||
551 | |||
552 | if (h->heap_pgalloc && (h->flags != NVMAP_HANDLE_INNER_CACHEABLE)) { | ||
553 | heap_page_cache_maint(client, h, start, end, op, | ||
554 | false, true, NULL, 0, 0); | ||
555 | } else if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE) { | ||
556 | start += h->carveout->base; | ||
557 | end += h->carveout->base; | ||
558 | outer_cache_maint(op, start, end - start); | ||
559 | } | ||
560 | ret = true; | ||
561 | out: | ||
562 | return ret; | ||
563 | } | ||
564 | |||
565 | static int cache_maint(struct nvmap_client *client, struct nvmap_handle *h, | ||
566 | unsigned long start, unsigned long end, unsigned int op) | ||
567 | { | ||
568 | pgprot_t prot; | ||
569 | pte_t **pte = NULL; | ||
570 | unsigned long kaddr; | ||
571 | unsigned long loop; | ||
572 | int err = 0; | ||
573 | |||
574 | h = nvmap_handle_get(h); | ||
575 | if (!h) | ||
576 | return -EFAULT; | ||
577 | |||
578 | if (!h->alloc) { | ||
579 | err = -EFAULT; | ||
580 | goto out; | ||
581 | } | ||
582 | |||
583 | wmb(); | ||
584 | if (h->flags == NVMAP_HANDLE_UNCACHEABLE || | ||
585 | h->flags == NVMAP_HANDLE_WRITE_COMBINE || start == end) | ||
586 | goto out; | ||
587 | |||
588 | if (fast_cache_maint(client, h, start, end, op)) | ||
589 | goto out; | ||
590 | |||
591 | prot = nvmap_pgprot(h, pgprot_kernel); | ||
592 | pte = nvmap_alloc_pte(client->dev, (void **)&kaddr); | ||
593 | if (IS_ERR(pte)) { | ||
594 | err = PTR_ERR(pte); | ||
595 | pte = NULL; | ||
596 | goto out; | ||
597 | } | ||
598 | |||
599 | if (h->heap_pgalloc) { | ||
600 | heap_page_cache_maint(client, h, start, end, op, true, | ||
601 | (h->flags == NVMAP_HANDLE_INNER_CACHEABLE) ? false : true, | ||
602 | pte, kaddr, prot); | ||
603 | goto out; | ||
604 | } | ||
605 | |||
606 | if (start > h->size || end > h->size) { | ||
607 | nvmap_warn(client, "cache maintenance outside handle\n"); | ||
608 | return -EINVAL; | ||
609 | } | ||
610 | |||
611 | /* lock carveout from relocation by mapcount */ | ||
612 | nvmap_usecount_inc(h); | ||
613 | |||
614 | start += h->carveout->base; | ||
615 | end += h->carveout->base; | ||
616 | |||
617 | loop = start; | ||
618 | |||
619 | while (loop < end) { | ||
620 | unsigned long next = (loop + PAGE_SIZE) & PAGE_MASK; | ||
621 | void *base = (void *)kaddr + (loop & ~PAGE_MASK); | ||
622 | next = min(next, end); | ||
623 | |||
624 | set_pte_at(&init_mm, kaddr, *pte, | ||
625 | pfn_pte(__phys_to_pfn(loop), prot)); | ||
626 | flush_tlb_kernel_page(kaddr); | ||
627 | |||
628 | inner_cache_maint(op, base, next - loop); | ||
629 | loop = next; | ||
630 | } | ||
631 | |||
632 | if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE) | ||
633 | outer_cache_maint(op, start, end - start); | ||
634 | |||
635 | /* unlock carveout */ | ||
636 | nvmap_usecount_dec(h); | ||
637 | |||
638 | out: | ||
639 | if (pte) | ||
640 | nvmap_free_pte(client->dev, pte); | ||
641 | nvmap_handle_put(h); | ||
642 | return err; | ||
643 | } | ||
644 | |||
645 | static int rw_handle_page(struct nvmap_handle *h, int is_read, | ||
646 | phys_addr_t start, unsigned long rw_addr, | ||
647 | unsigned long bytes, unsigned long kaddr, pte_t *pte) | ||
648 | { | ||
649 | pgprot_t prot = nvmap_pgprot(h, pgprot_kernel); | ||
650 | unsigned long end = start + bytes; | ||
651 | int err = 0; | ||
652 | |||
653 | while (!err && start < end) { | ||
654 | struct page *page = NULL; | ||
655 | phys_addr_t phys; | ||
656 | size_t count; | ||
657 | void *src; | ||
658 | |||
659 | if (!h->heap_pgalloc) { | ||
660 | phys = h->carveout->base + start; | ||
661 | } else { | ||
662 | page = h->pgalloc.pages[start >> PAGE_SHIFT]; | ||
663 | BUG_ON(!page); | ||
664 | get_page(page); | ||
665 | phys = page_to_phys(page) + (start & ~PAGE_MASK); | ||
666 | } | ||
667 | |||
668 | set_pte_at(&init_mm, kaddr, pte, | ||
669 | pfn_pte(__phys_to_pfn(phys), prot)); | ||
670 | flush_tlb_kernel_page(kaddr); | ||
671 | |||
672 | src = (void *)kaddr + (phys & ~PAGE_MASK); | ||
673 | phys = PAGE_SIZE - (phys & ~PAGE_MASK); | ||
674 | count = min_t(size_t, end - start, phys); | ||
675 | |||
676 | if (is_read) | ||
677 | err = copy_to_user((void *)rw_addr, src, count); | ||
678 | else | ||
679 | err = copy_from_user(src, (void *)rw_addr, count); | ||
680 | |||
681 | if (err) | ||
682 | err = -EFAULT; | ||
683 | |||
684 | rw_addr += count; | ||
685 | start += count; | ||
686 | |||
687 | if (page) | ||
688 | put_page(page); | ||
689 | } | ||
690 | |||
691 | return err; | ||
692 | } | ||
693 | |||
694 | static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h, | ||
695 | int is_read, unsigned long h_offs, | ||
696 | unsigned long sys_addr, unsigned long h_stride, | ||
697 | unsigned long sys_stride, unsigned long elem_size, | ||
698 | unsigned long count) | ||
699 | { | ||
700 | ssize_t copied = 0; | ||
701 | pte_t **pte; | ||
702 | void *addr; | ||
703 | int ret = 0; | ||
704 | |||
705 | if (!elem_size) | ||
706 | return -EINVAL; | ||
707 | |||
708 | if (!h->alloc) | ||
709 | return -EFAULT; | ||
710 | |||
711 | if (elem_size == h_stride && elem_size == sys_stride) { | ||
712 | elem_size *= count; | ||
713 | h_stride = elem_size; | ||
714 | sys_stride = elem_size; | ||
715 | count = 1; | ||
716 | } | ||
717 | |||
718 | pte = nvmap_alloc_pte(client->dev, &addr); | ||
719 | if (IS_ERR(pte)) | ||
720 | return PTR_ERR(pte); | ||
721 | |||
722 | while (count--) { | ||
723 | if (h_offs + elem_size > h->size) { | ||
724 | nvmap_warn(client, "read/write outside of handle\n"); | ||
725 | ret = -EFAULT; | ||
726 | break; | ||
727 | } | ||
728 | if (is_read) | ||
729 | cache_maint(client, h, h_offs, | ||
730 | h_offs + elem_size, NVMAP_CACHE_OP_INV); | ||
731 | |||
732 | ret = rw_handle_page(h, is_read, h_offs, sys_addr, | ||
733 | elem_size, (unsigned long)addr, *pte); | ||
734 | |||
735 | if (ret) | ||
736 | break; | ||
737 | |||
738 | if (!is_read) | ||
739 | cache_maint(client, h, h_offs, | ||
740 | h_offs + elem_size, NVMAP_CACHE_OP_WB); | ||
741 | |||
742 | copied += elem_size; | ||
743 | sys_addr += sys_stride; | ||
744 | h_offs += h_stride; | ||
745 | } | ||
746 | |||
747 | nvmap_free_pte(client->dev, pte); | ||
748 | return ret ?: copied; | ||
749 | } | ||