diff options
Diffstat (limited to 'drivers/video/tegra/nvmap/nvmap.c')
-rw-r--r-- | drivers/video/tegra/nvmap/nvmap.c | 871 |
1 files changed, 871 insertions, 0 deletions
diff --git a/drivers/video/tegra/nvmap/nvmap.c b/drivers/video/tegra/nvmap/nvmap.c new file mode 100644 index 00000000000..b4b6241618d --- /dev/null +++ b/drivers/video/tegra/nvmap/nvmap.c | |||
@@ -0,0 +1,871 @@ | |||
1 | /* | ||
2 | * drivers/video/tegra/nvmap/nvmap.c | ||
3 | * | ||
4 | * Memory manager for Tegra GPU | ||
5 | * | ||
6 | * Copyright (c) 2009-2011, NVIDIA Corporation. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
16 | * more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License along | ||
19 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/err.h> | ||
24 | #include <linux/highmem.h> | ||
25 | #include <linux/io.h> | ||
26 | #include <linux/rbtree.h> | ||
27 | #include <linux/vmalloc.h> | ||
28 | #include <linux/wait.h> | ||
29 | #include <linux/slab.h> | ||
30 | |||
31 | #include <asm/pgtable.h> | ||
32 | #include <asm/tlbflush.h> | ||
33 | |||
34 | #include <mach/iovmm.h> | ||
35 | #include <mach/nvmap.h> | ||
36 | |||
37 | #include "nvmap.h" | ||
38 | #include "nvmap_mru.h" | ||
39 | |||
40 | /* private nvmap_handle flag for pinning duplicate detection */ | ||
41 | #define NVMAP_HANDLE_VISITED (0x1ul << 31) | ||
42 | |||
43 | /* map the backing pages for a heap_pgalloc handle into its IOVMM area */ | ||
44 | static void map_iovmm_area(struct nvmap_handle *h) | ||
45 | { | ||
46 | tegra_iovmm_addr_t va; | ||
47 | unsigned long i; | ||
48 | |||
49 | BUG_ON(!h->heap_pgalloc || !h->pgalloc.area); | ||
50 | BUG_ON(h->size & ~PAGE_MASK); | ||
51 | WARN_ON(!h->pgalloc.dirty); | ||
52 | |||
53 | for (va = h->pgalloc.area->iovm_start, i = 0; | ||
54 | va < (h->pgalloc.area->iovm_start + h->size); | ||
55 | i++, va += PAGE_SIZE) { | ||
56 | BUG_ON(!pfn_valid(page_to_pfn(h->pgalloc.pages[i]))); | ||
57 | tegra_iovmm_vm_insert_pfn(h->pgalloc.area, va, | ||
58 | page_to_pfn(h->pgalloc.pages[i])); | ||
59 | } | ||
60 | h->pgalloc.dirty = false; | ||
61 | } | ||
62 | |||
63 | /* must be called inside nvmap_pin_lock, to ensure that an entire stream | ||
64 | * of pins will complete without racing with a second stream. handle should | ||
65 | * have nvmap_handle_get (or nvmap_validate_get) called before calling | ||
66 | * this function. */ | ||
67 | static int pin_locked(struct nvmap_client *client, struct nvmap_handle *h) | ||
68 | { | ||
69 | struct tegra_iovmm_area *area; | ||
70 | BUG_ON(!h->alloc); | ||
71 | |||
72 | nvmap_mru_lock(client->share); | ||
73 | if (atomic_inc_return(&h->pin) == 1) { | ||
74 | if (h->heap_pgalloc && !h->pgalloc.contig) { | ||
75 | area = nvmap_handle_iovmm_locked(client, h); | ||
76 | if (!area) { | ||
77 | /* no race here, inside the pin mutex */ | ||
78 | atomic_dec(&h->pin); | ||
79 | nvmap_mru_unlock(client->share); | ||
80 | return -ENOMEM; | ||
81 | } | ||
82 | if (area != h->pgalloc.area) | ||
83 | h->pgalloc.dirty = true; | ||
84 | h->pgalloc.area = area; | ||
85 | } | ||
86 | } | ||
87 | nvmap_mru_unlock(client->share); | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | /* doesn't need to be called inside nvmap_pin_lock, since this will only | ||
92 | * expand the available VM area */ | ||
93 | static int handle_unpin(struct nvmap_client *client, | ||
94 | struct nvmap_handle *h, int free_vm) | ||
95 | { | ||
96 | int ret = 0; | ||
97 | nvmap_mru_lock(client->share); | ||
98 | |||
99 | if (atomic_read(&h->pin) == 0) { | ||
100 | nvmap_err(client, "%s unpinning unpinned handle %p\n", | ||
101 | current->group_leader->comm, h); | ||
102 | nvmap_mru_unlock(client->share); | ||
103 | return 0; | ||
104 | } | ||
105 | |||
106 | BUG_ON(!h->alloc); | ||
107 | |||
108 | if (!atomic_dec_return(&h->pin)) { | ||
109 | if (h->heap_pgalloc && h->pgalloc.area) { | ||
110 | /* if a secure handle is clean (i.e., mapped into | ||
111 | * IOVMM, it needs to be zapped on unpin. */ | ||
112 | if (h->secure && !h->pgalloc.dirty) { | ||
113 | tegra_iovmm_zap_vm(h->pgalloc.area); | ||
114 | h->pgalloc.dirty = true; | ||
115 | } | ||
116 | if (free_vm) { | ||
117 | tegra_iovmm_free_vm(h->pgalloc.area); | ||
118 | h->pgalloc.area = NULL; | ||
119 | } else | ||
120 | nvmap_mru_insert_locked(client->share, h); | ||
121 | ret = 1; | ||
122 | } | ||
123 | } | ||
124 | |||
125 | nvmap_mru_unlock(client->share); | ||
126 | nvmap_handle_put(h); | ||
127 | return ret; | ||
128 | } | ||
129 | |||
130 | static int pin_array_locked(struct nvmap_client *client, | ||
131 | struct nvmap_handle **h, int count) | ||
132 | { | ||
133 | int pinned; | ||
134 | int i; | ||
135 | int err = 0; | ||
136 | |||
137 | for (pinned = 0; pinned < count; pinned++) { | ||
138 | err = pin_locked(client, h[pinned]); | ||
139 | if (err) | ||
140 | break; | ||
141 | } | ||
142 | |||
143 | if (err) { | ||
144 | /* unpin pinned handles */ | ||
145 | for (i = 0; i < pinned; i++) { | ||
146 | /* inc ref counter, because | ||
147 | * handle_unpin decrements it */ | ||
148 | nvmap_handle_get(h[i]); | ||
149 | /* unpin handles and free vm */ | ||
150 | handle_unpin(client, h[i], true); | ||
151 | } | ||
152 | } | ||
153 | |||
154 | if (err && tegra_iovmm_get_max_free(client->share->iovmm) >= | ||
155 | client->iovm_limit) { | ||
156 | /* First attempt to pin in empty iovmm | ||
157 | * may still fail because of fragmentation caused by | ||
158 | * placing handles in MRU areas. After such failure | ||
159 | * all MRU gets cleaned and iovm space is freed. | ||
160 | * | ||
161 | * We have to do pinning again here since there might be is | ||
162 | * no more incoming pin_wait wakeup calls from unpin | ||
163 | * operations */ | ||
164 | for (pinned = 0; pinned < count; pinned++) { | ||
165 | err = pin_locked(client, h[pinned]); | ||
166 | if (err) | ||
167 | break; | ||
168 | } | ||
169 | if (err) { | ||
170 | pr_err("Pinning in empty iovmm failed!!!\n"); | ||
171 | BUG_ON(1); | ||
172 | } | ||
173 | } | ||
174 | return err; | ||
175 | } | ||
176 | |||
177 | static int wait_pin_array_locked(struct nvmap_client *client, | ||
178 | struct nvmap_handle **h, int count) | ||
179 | { | ||
180 | int ret = 0; | ||
181 | |||
182 | ret = pin_array_locked(client, h, count); | ||
183 | |||
184 | if (ret) { | ||
185 | ret = wait_event_interruptible(client->share->pin_wait, | ||
186 | !pin_array_locked(client, h, count)); | ||
187 | } | ||
188 | return ret ? -EINTR : 0; | ||
189 | } | ||
190 | |||
191 | static int handle_unpin_noref(struct nvmap_client *client, unsigned long id) | ||
192 | { | ||
193 | struct nvmap_handle *h; | ||
194 | int w; | ||
195 | |||
196 | h = nvmap_validate_get(client, id); | ||
197 | if (unlikely(!h)) { | ||
198 | nvmap_err(client, "%s attempting to unpin invalid handle %p\n", | ||
199 | current->group_leader->comm, (void *)id); | ||
200 | return 0; | ||
201 | } | ||
202 | |||
203 | nvmap_err(client, "%s unpinning unreferenced handle %p\n", | ||
204 | current->group_leader->comm, h); | ||
205 | WARN_ON(1); | ||
206 | |||
207 | w = handle_unpin(client, h, false); | ||
208 | nvmap_handle_put(h); | ||
209 | return w; | ||
210 | } | ||
211 | |||
212 | void nvmap_unpin_ids(struct nvmap_client *client, | ||
213 | unsigned int nr, const unsigned long *ids) | ||
214 | { | ||
215 | unsigned int i; | ||
216 | int do_wake = 0; | ||
217 | |||
218 | for (i = 0; i < nr; i++) { | ||
219 | struct nvmap_handle_ref *ref; | ||
220 | |||
221 | if (!ids[i]) | ||
222 | continue; | ||
223 | |||
224 | nvmap_ref_lock(client); | ||
225 | ref = _nvmap_validate_id_locked(client, ids[i]); | ||
226 | if (ref) { | ||
227 | struct nvmap_handle *h = ref->handle; | ||
228 | int e = atomic_add_unless(&ref->pin, -1, 0); | ||
229 | |||
230 | nvmap_ref_unlock(client); | ||
231 | |||
232 | if (!e) { | ||
233 | nvmap_err(client, "%s unpinning unpinned " | ||
234 | "handle %08lx\n", | ||
235 | current->group_leader->comm, ids[i]); | ||
236 | } else { | ||
237 | do_wake |= handle_unpin(client, h, false); | ||
238 | } | ||
239 | } else { | ||
240 | nvmap_ref_unlock(client); | ||
241 | if (client->super) | ||
242 | do_wake |= handle_unpin_noref(client, ids[i]); | ||
243 | else | ||
244 | nvmap_err(client, "%s unpinning invalid " | ||
245 | "handle %08lx\n", | ||
246 | current->group_leader->comm, ids[i]); | ||
247 | } | ||
248 | } | ||
249 | |||
250 | if (do_wake) | ||
251 | wake_up(&client->share->pin_wait); | ||
252 | } | ||
253 | |||
254 | /* pins a list of handle_ref objects; same conditions apply as to | ||
255 | * _nvmap_handle_pin, but also bumps the pin count of each handle_ref. */ | ||
256 | int nvmap_pin_ids(struct nvmap_client *client, | ||
257 | unsigned int nr, const unsigned long *ids) | ||
258 | { | ||
259 | int ret = 0; | ||
260 | unsigned int i; | ||
261 | struct nvmap_handle **h = (struct nvmap_handle **)ids; | ||
262 | struct nvmap_handle_ref *ref; | ||
263 | |||
264 | /* to optimize for the common case (client provided valid handle | ||
265 | * references and the pin succeeds), increment the handle_ref pin | ||
266 | * count during validation. in error cases, the tree will need to | ||
267 | * be re-walked, since the handle_ref is discarded so that an | ||
268 | * allocation isn't required. if a handle_ref is not found, | ||
269 | * locally validate that the caller has permission to pin the handle; | ||
270 | * handle_refs are not created in this case, so it is possible that | ||
271 | * if the caller crashes after pinning a global handle, the handle | ||
272 | * will be permanently leaked. */ | ||
273 | nvmap_ref_lock(client); | ||
274 | for (i = 0; i < nr && !ret; i++) { | ||
275 | ref = _nvmap_validate_id_locked(client, ids[i]); | ||
276 | if (ref) { | ||
277 | atomic_inc(&ref->pin); | ||
278 | nvmap_handle_get(h[i]); | ||
279 | } else { | ||
280 | struct nvmap_handle *verify; | ||
281 | nvmap_ref_unlock(client); | ||
282 | verify = nvmap_validate_get(client, ids[i]); | ||
283 | if (verify) | ||
284 | nvmap_warn(client, "%s pinning unreferenced " | ||
285 | "handle %p\n", | ||
286 | current->group_leader->comm, h[i]); | ||
287 | else | ||
288 | ret = -EPERM; | ||
289 | nvmap_ref_lock(client); | ||
290 | } | ||
291 | } | ||
292 | nvmap_ref_unlock(client); | ||
293 | |||
294 | nr = i; | ||
295 | |||
296 | if (ret) | ||
297 | goto out; | ||
298 | |||
299 | ret = mutex_lock_interruptible(&client->share->pin_lock); | ||
300 | if (WARN_ON(ret)) | ||
301 | goto out; | ||
302 | |||
303 | ret = wait_pin_array_locked(client, h, nr); | ||
304 | |||
305 | mutex_unlock(&client->share->pin_lock); | ||
306 | |||
307 | if (ret) { | ||
308 | ret = -EINTR; | ||
309 | } else { | ||
310 | for (i = 0; i < nr; i++) { | ||
311 | if (h[i]->heap_pgalloc && h[i]->pgalloc.dirty) | ||
312 | map_iovmm_area(h[i]); | ||
313 | } | ||
314 | } | ||
315 | |||
316 | out: | ||
317 | if (ret) { | ||
318 | nvmap_ref_lock(client); | ||
319 | for (i = 0; i < nr; i++) { | ||
320 | ref = _nvmap_validate_id_locked(client, ids[i]); | ||
321 | if (!ref) { | ||
322 | nvmap_warn(client, "%s freed handle %p " | ||
323 | "during pinning\n", | ||
324 | current->group_leader->comm, | ||
325 | (void *)ids[i]); | ||
326 | continue; | ||
327 | } | ||
328 | atomic_dec(&ref->pin); | ||
329 | } | ||
330 | nvmap_ref_unlock(client); | ||
331 | |||
332 | for (i = 0; i < nr; i++) | ||
333 | nvmap_handle_put(h[i]); | ||
334 | } | ||
335 | |||
336 | return ret; | ||
337 | } | ||
338 | |||
339 | static phys_addr_t handle_phys(struct nvmap_handle *h) | ||
340 | { | ||
341 | phys_addr_t addr; | ||
342 | |||
343 | if (h->heap_pgalloc && h->pgalloc.contig) { | ||
344 | addr = page_to_phys(h->pgalloc.pages[0]); | ||
345 | } else if (h->heap_pgalloc) { | ||
346 | BUG_ON(!h->pgalloc.area); | ||
347 | addr = h->pgalloc.area->iovm_start; | ||
348 | } else { | ||
349 | addr = h->carveout->base; | ||
350 | } | ||
351 | |||
352 | return addr; | ||
353 | } | ||
354 | |||
355 | /* stores the physical address (+offset) of each handle relocation entry | ||
356 | * into its output location. see nvmap_pin_array for more details. | ||
357 | * | ||
358 | * each entry in arr (i.e., each relocation request) specifies two handles: | ||
359 | * the handle to pin (pin), and the handle where the address of pin should be | ||
360 | * written (patch). in pseudocode, this loop basically looks like: | ||
361 | * | ||
362 | * for (i = 0; i < nr; i++) { | ||
363 | * (pin, pin_offset, patch, patch_offset) = arr[i]; | ||
364 | * patch[patch_offset] = address_of(pin) + pin_offset; | ||
365 | * } | ||
366 | */ | ||
367 | static int nvmap_reloc_pin_array(struct nvmap_client *client, | ||
368 | const struct nvmap_pinarray_elem *arr, | ||
369 | int nr, struct nvmap_handle *gather) | ||
370 | { | ||
371 | struct nvmap_handle *last_patch = NULL; | ||
372 | unsigned int last_pfn = 0; | ||
373 | pte_t **pte; | ||
374 | void *addr; | ||
375 | int i; | ||
376 | |||
377 | pte = nvmap_alloc_pte(client->dev, &addr); | ||
378 | if (IS_ERR(pte)) | ||
379 | return PTR_ERR(pte); | ||
380 | |||
381 | for (i = 0; i < nr; i++) { | ||
382 | struct nvmap_handle *patch; | ||
383 | struct nvmap_handle *pin; | ||
384 | phys_addr_t reloc_addr; | ||
385 | phys_addr_t phys; | ||
386 | unsigned int pfn; | ||
387 | |||
388 | /* all of the handles are validated and get'ted prior to | ||
389 | * calling this function, so casting is safe here */ | ||
390 | pin = (struct nvmap_handle *)arr[i].pin_mem; | ||
391 | |||
392 | if (arr[i].patch_mem == (unsigned long)last_patch) { | ||
393 | patch = last_patch; | ||
394 | } else if (arr[i].patch_mem == (unsigned long)gather) { | ||
395 | patch = gather; | ||
396 | } else { | ||
397 | if (last_patch) | ||
398 | nvmap_handle_put(last_patch); | ||
399 | |||
400 | patch = nvmap_get_handle_id(client, arr[i].patch_mem); | ||
401 | if (!patch) { | ||
402 | nvmap_free_pte(client->dev, pte); | ||
403 | return -EPERM; | ||
404 | } | ||
405 | last_patch = patch; | ||
406 | } | ||
407 | |||
408 | if (patch->heap_pgalloc) { | ||
409 | unsigned int page = arr[i].patch_offset >> PAGE_SHIFT; | ||
410 | phys = page_to_phys(patch->pgalloc.pages[page]); | ||
411 | phys += (arr[i].patch_offset & ~PAGE_MASK); | ||
412 | } else { | ||
413 | phys = patch->carveout->base + arr[i].patch_offset; | ||
414 | } | ||
415 | |||
416 | pfn = __phys_to_pfn(phys); | ||
417 | if (pfn != last_pfn) { | ||
418 | pgprot_t prot = nvmap_pgprot(patch, pgprot_kernel); | ||
419 | phys_addr_t kaddr = (phys_addr_t)addr; | ||
420 | set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, prot)); | ||
421 | flush_tlb_kernel_page(kaddr); | ||
422 | last_pfn = pfn; | ||
423 | } | ||
424 | |||
425 | reloc_addr = handle_phys(pin) + arr[i].pin_offset; | ||
426 | reloc_addr >>= arr[i].reloc_shift; | ||
427 | __raw_writel(reloc_addr, addr + (phys & ~PAGE_MASK)); | ||
428 | } | ||
429 | |||
430 | nvmap_free_pte(client->dev, pte); | ||
431 | |||
432 | if (last_patch) | ||
433 | nvmap_handle_put(last_patch); | ||
434 | |||
435 | wmb(); | ||
436 | |||
437 | return 0; | ||
438 | } | ||
439 | |||
440 | static int nvmap_validate_get_pin_array(struct nvmap_client *client, | ||
441 | const struct nvmap_pinarray_elem *arr, | ||
442 | int nr, struct nvmap_handle **h) | ||
443 | { | ||
444 | int i; | ||
445 | int ret = 0; | ||
446 | int count = 0; | ||
447 | |||
448 | nvmap_ref_lock(client); | ||
449 | |||
450 | for (i = 0; i < nr; i++) { | ||
451 | struct nvmap_handle_ref *ref; | ||
452 | |||
453 | if (need_resched()) { | ||
454 | nvmap_ref_unlock(client); | ||
455 | schedule(); | ||
456 | nvmap_ref_lock(client); | ||
457 | } | ||
458 | |||
459 | ref = _nvmap_validate_id_locked(client, arr[i].pin_mem); | ||
460 | |||
461 | if (!ref) | ||
462 | nvmap_warn(client, "falied to validate id\n"); | ||
463 | else if (!ref->handle) | ||
464 | nvmap_warn(client, "id had no associated handle\n"); | ||
465 | else if (!ref->handle->alloc) | ||
466 | nvmap_warn(client, "handle had no allocation\n"); | ||
467 | |||
468 | if (!ref || !ref->handle || !ref->handle->alloc) { | ||
469 | ret = -EPERM; | ||
470 | break; | ||
471 | } | ||
472 | |||
473 | /* a handle may be referenced multiple times in arr, but | ||
474 | * it will only be pinned once; this ensures that the | ||
475 | * minimum number of sync-queue slots in the host driver | ||
476 | * are dedicated to storing unpin lists, which allows | ||
477 | * for greater parallelism between the CPU and graphics | ||
478 | * processor */ | ||
479 | if (ref->handle->flags & NVMAP_HANDLE_VISITED) | ||
480 | continue; | ||
481 | |||
482 | ref->handle->flags |= NVMAP_HANDLE_VISITED; | ||
483 | |||
484 | h[count] = nvmap_handle_get(ref->handle); | ||
485 | BUG_ON(!h[count]); | ||
486 | count++; | ||
487 | } | ||
488 | |||
489 | nvmap_ref_unlock(client); | ||
490 | |||
491 | if (ret) { | ||
492 | for (i = 0; i < count; i++) { | ||
493 | h[i]->flags &= ~NVMAP_HANDLE_VISITED; | ||
494 | nvmap_handle_put(h[i]); | ||
495 | } | ||
496 | } | ||
497 | |||
498 | return ret ?: count; | ||
499 | } | ||
500 | |||
501 | /* a typical mechanism host1x clients use for using the Tegra graphics | ||
502 | * processor is to build a command buffer which contains relocatable | ||
503 | * memory handle commands, and rely on the kernel to convert these in-place | ||
504 | * to addresses which are understood by the GPU hardware. | ||
505 | * | ||
506 | * this is implemented by having clients provide a sideband array | ||
507 | * of relocatable handles (+ offsets) and the location in the command | ||
508 | * buffer handle to patch with the GPU address when the client submits | ||
509 | * its command buffer to the host1x driver. | ||
510 | * | ||
511 | * the host driver also uses this relocation mechanism internally to | ||
512 | * relocate the client's (unpinned) command buffers into host-addressable | ||
513 | * memory. | ||
514 | * | ||
515 | * @client: nvmap_client which should be used for validation; should be | ||
516 | * owned by the process which is submitting command buffers | ||
517 | * @gather: special handle for relocated command buffer outputs used | ||
518 | * internally by the host driver. if this handle is encountered | ||
519 | * as an output handle in the relocation array, it is assumed | ||
520 | * to be a known-good output and is not validated. | ||
521 | * @arr: array of ((relocatable handle, offset), (output handle, offset)) | ||
522 | * tuples. | ||
523 | * @nr: number of entries in arr | ||
524 | * @unique_arr: list of nvmap_handle objects which were pinned by | ||
525 | * nvmap_pin_array. must be unpinned by the caller after the | ||
526 | * command buffers referenced in gather have completed. | ||
527 | */ | ||
528 | int nvmap_pin_array(struct nvmap_client *client, struct nvmap_handle *gather, | ||
529 | const struct nvmap_pinarray_elem *arr, int nr, | ||
530 | struct nvmap_handle **unique_arr) | ||
531 | { | ||
532 | int count = 0; | ||
533 | int ret = 0; | ||
534 | int i; | ||
535 | |||
536 | if (mutex_lock_interruptible(&client->share->pin_lock)) { | ||
537 | nvmap_warn(client, "%s interrupted when acquiring pin lock\n", | ||
538 | current->group_leader->comm); | ||
539 | return -EINTR; | ||
540 | } | ||
541 | |||
542 | count = nvmap_validate_get_pin_array(client, arr, nr, unique_arr); | ||
543 | if (count < 0) { | ||
544 | mutex_unlock(&client->share->pin_lock); | ||
545 | nvmap_warn(client, "failed to validate pin array\n"); | ||
546 | return count; | ||
547 | } | ||
548 | |||
549 | for (i = 0; i < count; i++) | ||
550 | unique_arr[i]->flags &= ~NVMAP_HANDLE_VISITED; | ||
551 | |||
552 | ret = wait_pin_array_locked(client, unique_arr, count); | ||
553 | |||
554 | mutex_unlock(&client->share->pin_lock); | ||
555 | |||
556 | if (!ret) | ||
557 | ret = nvmap_reloc_pin_array(client, arr, nr, gather); | ||
558 | |||
559 | if (WARN_ON(ret)) { | ||
560 | for (i = 0; i < count; i++) | ||
561 | nvmap_handle_put(unique_arr[i]); | ||
562 | return ret; | ||
563 | } else { | ||
564 | for (i = 0; i < count; i++) { | ||
565 | if (unique_arr[i]->heap_pgalloc && | ||
566 | unique_arr[i]->pgalloc.dirty) | ||
567 | map_iovmm_area(unique_arr[i]); | ||
568 | } | ||
569 | } | ||
570 | |||
571 | return count; | ||
572 | } | ||
573 | |||
574 | phys_addr_t nvmap_pin(struct nvmap_client *client, | ||
575 | struct nvmap_handle_ref *ref) | ||
576 | { | ||
577 | struct nvmap_handle *h; | ||
578 | phys_addr_t phys; | ||
579 | int ret = 0; | ||
580 | |||
581 | h = nvmap_handle_get(ref->handle); | ||
582 | if (WARN_ON(!h)) | ||
583 | return -EINVAL; | ||
584 | |||
585 | atomic_inc(&ref->pin); | ||
586 | |||
587 | if (WARN_ON(mutex_lock_interruptible(&client->share->pin_lock))) { | ||
588 | ret = -EINTR; | ||
589 | } else { | ||
590 | ret = wait_pin_array_locked(client, &h, 1); | ||
591 | mutex_unlock(&client->share->pin_lock); | ||
592 | } | ||
593 | |||
594 | if (ret) { | ||
595 | atomic_dec(&ref->pin); | ||
596 | nvmap_handle_put(h); | ||
597 | } else { | ||
598 | if (h->heap_pgalloc && h->pgalloc.dirty) | ||
599 | map_iovmm_area(h); | ||
600 | phys = handle_phys(h); | ||
601 | } | ||
602 | |||
603 | return ret ?: phys; | ||
604 | } | ||
605 | |||
606 | phys_addr_t nvmap_handle_address(struct nvmap_client *c, unsigned long id) | ||
607 | { | ||
608 | struct nvmap_handle *h; | ||
609 | phys_addr_t phys; | ||
610 | |||
611 | h = nvmap_get_handle_id(c, id); | ||
612 | if (!h) | ||
613 | return -EPERM; | ||
614 | mutex_lock(&h->lock); | ||
615 | phys = handle_phys(h); | ||
616 | mutex_unlock(&h->lock); | ||
617 | nvmap_handle_put(h); | ||
618 | |||
619 | return phys; | ||
620 | } | ||
621 | |||
622 | void nvmap_unpin(struct nvmap_client *client, struct nvmap_handle_ref *ref) | ||
623 | { | ||
624 | if (!ref) | ||
625 | return; | ||
626 | |||
627 | atomic_dec(&ref->pin); | ||
628 | if (handle_unpin(client, ref->handle, false)) | ||
629 | wake_up(&client->share->pin_wait); | ||
630 | } | ||
631 | |||
632 | void nvmap_unpin_handles(struct nvmap_client *client, | ||
633 | struct nvmap_handle **h, int nr) | ||
634 | { | ||
635 | int i; | ||
636 | int do_wake = 0; | ||
637 | |||
638 | for (i = 0; i < nr; i++) { | ||
639 | if (WARN_ON(!h[i])) | ||
640 | continue; | ||
641 | do_wake |= handle_unpin(client, h[i], false); | ||
642 | } | ||
643 | |||
644 | if (do_wake) | ||
645 | wake_up(&client->share->pin_wait); | ||
646 | } | ||
647 | |||
648 | void *nvmap_mmap(struct nvmap_handle_ref *ref) | ||
649 | { | ||
650 | struct nvmap_handle *h; | ||
651 | pgprot_t prot; | ||
652 | unsigned long adj_size; | ||
653 | unsigned long offs; | ||
654 | struct vm_struct *v; | ||
655 | void *p; | ||
656 | |||
657 | h = nvmap_handle_get(ref->handle); | ||
658 | if (!h) | ||
659 | return NULL; | ||
660 | |||
661 | prot = nvmap_pgprot(h, pgprot_kernel); | ||
662 | |||
663 | if (h->heap_pgalloc) | ||
664 | return vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT, | ||
665 | -1, prot); | ||
666 | |||
667 | /* carveout - explicitly map the pfns into a vmalloc area */ | ||
668 | |||
669 | nvmap_usecount_inc(h); | ||
670 | |||
671 | adj_size = h->carveout->base & ~PAGE_MASK; | ||
672 | adj_size += h->size; | ||
673 | adj_size = PAGE_ALIGN(adj_size); | ||
674 | |||
675 | v = alloc_vm_area(adj_size); | ||
676 | if (!v) { | ||
677 | nvmap_usecount_dec(h); | ||
678 | nvmap_handle_put(h); | ||
679 | return NULL; | ||
680 | } | ||
681 | |||
682 | p = v->addr + (h->carveout->base & ~PAGE_MASK); | ||
683 | |||
684 | for (offs = 0; offs < adj_size; offs += PAGE_SIZE) { | ||
685 | unsigned long addr = (unsigned long) v->addr + offs; | ||
686 | unsigned int pfn; | ||
687 | pgd_t *pgd; | ||
688 | pud_t *pud; | ||
689 | pmd_t *pmd; | ||
690 | pte_t *pte; | ||
691 | |||
692 | pfn = __phys_to_pfn(h->carveout->base + offs); | ||
693 | pgd = pgd_offset_k(addr); | ||
694 | pud = pud_alloc(&init_mm, pgd, addr); | ||
695 | if (!pud) | ||
696 | break; | ||
697 | pmd = pmd_alloc(&init_mm, pud, addr); | ||
698 | if (!pmd) | ||
699 | break; | ||
700 | pte = pte_alloc_kernel(pmd, addr); | ||
701 | if (!pte) | ||
702 | break; | ||
703 | set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); | ||
704 | flush_tlb_kernel_page(addr); | ||
705 | } | ||
706 | |||
707 | if (offs != adj_size) { | ||
708 | free_vm_area(v); | ||
709 | nvmap_usecount_dec(h); | ||
710 | nvmap_handle_put(h); | ||
711 | return NULL; | ||
712 | } | ||
713 | |||
714 | /* leave the handle ref count incremented by 1, so that | ||
715 | * the handle will not be freed while the kernel mapping exists. | ||
716 | * nvmap_handle_put will be called by unmapping this address */ | ||
717 | return p; | ||
718 | } | ||
719 | |||
720 | void nvmap_munmap(struct nvmap_handle_ref *ref, void *addr) | ||
721 | { | ||
722 | struct nvmap_handle *h; | ||
723 | |||
724 | if (!ref) | ||
725 | return; | ||
726 | |||
727 | h = ref->handle; | ||
728 | |||
729 | if (h->heap_pgalloc) { | ||
730 | vm_unmap_ram(addr, h->size >> PAGE_SHIFT); | ||
731 | } else { | ||
732 | struct vm_struct *vm; | ||
733 | addr -= (h->carveout->base & ~PAGE_MASK); | ||
734 | vm = remove_vm_area(addr); | ||
735 | BUG_ON(!vm); | ||
736 | kfree(vm); | ||
737 | nvmap_usecount_dec(h); | ||
738 | } | ||
739 | nvmap_handle_put(h); | ||
740 | } | ||
741 | |||
742 | struct nvmap_handle_ref *nvmap_alloc(struct nvmap_client *client, size_t size, | ||
743 | size_t align, unsigned int flags, | ||
744 | unsigned int heap_mask) | ||
745 | { | ||
746 | const unsigned int default_heap = (NVMAP_HEAP_SYSMEM | | ||
747 | NVMAP_HEAP_CARVEOUT_GENERIC); | ||
748 | struct nvmap_handle_ref *r = NULL; | ||
749 | int err; | ||
750 | |||
751 | if (heap_mask == 0) | ||
752 | heap_mask = default_heap; | ||
753 | |||
754 | r = nvmap_create_handle(client, size); | ||
755 | if (IS_ERR(r)) | ||
756 | return r; | ||
757 | |||
758 | err = nvmap_alloc_handle_id(client, nvmap_ref_to_id(r), | ||
759 | heap_mask, align, flags); | ||
760 | |||
761 | if (err) { | ||
762 | nvmap_free_handle_id(client, nvmap_ref_to_id(r)); | ||
763 | return ERR_PTR(err); | ||
764 | } | ||
765 | |||
766 | return r; | ||
767 | } | ||
768 | |||
769 | /* allocates memory with specifed iovm_start address. */ | ||
770 | struct nvmap_handle_ref *nvmap_alloc_iovm(struct nvmap_client *client, | ||
771 | size_t size, size_t align, unsigned int flags, unsigned int iovm_start) | ||
772 | { | ||
773 | int err; | ||
774 | struct nvmap_handle *h; | ||
775 | struct nvmap_handle_ref *r; | ||
776 | const unsigned int default_heap = NVMAP_HEAP_IOVMM; | ||
777 | |||
778 | /* size need to be more than one page. | ||
779 | * otherwise heap preference would change to system heap. | ||
780 | */ | ||
781 | if (size <= PAGE_SIZE) | ||
782 | size = PAGE_SIZE << 1; | ||
783 | r = nvmap_create_handle(client, size); | ||
784 | if (IS_ERR_OR_NULL(r)) | ||
785 | return r; | ||
786 | |||
787 | h = r->handle; | ||
788 | h->pgalloc.iovm_addr = iovm_start; | ||
789 | err = nvmap_alloc_handle_id(client, nvmap_ref_to_id(r), | ||
790 | default_heap, align, flags); | ||
791 | if (err) | ||
792 | goto fail; | ||
793 | |||
794 | err = mutex_lock_interruptible(&client->share->pin_lock); | ||
795 | if (WARN_ON(err)) | ||
796 | goto fail; | ||
797 | err = pin_locked(client, h); | ||
798 | mutex_unlock(&client->share->pin_lock); | ||
799 | if (err) | ||
800 | goto fail; | ||
801 | return r; | ||
802 | |||
803 | fail: | ||
804 | nvmap_free_handle_id(client, nvmap_ref_to_id(r)); | ||
805 | return ERR_PTR(err); | ||
806 | } | ||
807 | |||
808 | void nvmap_free_iovm(struct nvmap_client *client, struct nvmap_handle_ref *r) | ||
809 | { | ||
810 | unsigned long ref_id = nvmap_ref_to_id(r); | ||
811 | |||
812 | nvmap_unpin_ids(client, 1, &ref_id); | ||
813 | nvmap_free_handle_id(client, ref_id); | ||
814 | } | ||
815 | |||
816 | void nvmap_free(struct nvmap_client *client, struct nvmap_handle_ref *r) | ||
817 | { | ||
818 | if (!r) | ||
819 | return; | ||
820 | |||
821 | nvmap_free_handle_id(client, nvmap_ref_to_id(r)); | ||
822 | } | ||
823 | |||
824 | /* | ||
825 | * create a mapping to the user's buffer and write it | ||
826 | * (uses similar logic from nvmap_reloc_pin_array to map the cmdbuf) | ||
827 | */ | ||
828 | int nvmap_patch_word(struct nvmap_client *client, | ||
829 | struct nvmap_handle *patch, | ||
830 | u32 patch_offset, u32 patch_value) | ||
831 | { | ||
832 | phys_addr_t phys; | ||
833 | unsigned long kaddr; | ||
834 | unsigned int pfn; | ||
835 | void *addr; | ||
836 | pte_t **pte; | ||
837 | pgprot_t prot; | ||
838 | |||
839 | if (patch_offset >= patch->size) { | ||
840 | nvmap_warn(client, "read/write outside of handle\n"); | ||
841 | return -EFAULT; | ||
842 | } | ||
843 | |||
844 | pte = nvmap_alloc_pte(client->dev, &addr); | ||
845 | if (IS_ERR(pte)) | ||
846 | return PTR_ERR(pte); | ||
847 | |||
848 | /* derive physaddr of cmdbuf WAIT to patch */ | ||
849 | if (patch->heap_pgalloc) { | ||
850 | unsigned int page = patch_offset >> PAGE_SHIFT; | ||
851 | phys = page_to_phys(patch->pgalloc.pages[page]); | ||
852 | phys += (patch_offset & ~PAGE_MASK); | ||
853 | } else { | ||
854 | phys = patch->carveout->base + patch_offset; | ||
855 | } | ||
856 | |||
857 | pfn = __phys_to_pfn(phys); | ||
858 | prot = nvmap_pgprot(patch, pgprot_kernel); | ||
859 | kaddr = (unsigned long)addr; | ||
860 | |||
861 | /* write PTE, so addr points to cmdbuf PFN */ | ||
862 | set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, prot)); | ||
863 | flush_tlb_kernel_page(kaddr); | ||
864 | |||
865 | /* write patch_value to addr + page offset */ | ||
866 | __raw_writel(patch_value, addr + (phys & ~PAGE_MASK)); | ||
867 | |||
868 | nvmap_free_pte(client->dev, pte); | ||
869 | wmb(); | ||
870 | return 0; | ||
871 | } | ||