aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-tegra/iovmm.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
commitfcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch)
treea57612d1888735a2ec7972891b68c1ac5ec8faea /arch/arm/mach-tegra/iovmm.c
parent8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff)
Added missing tegra files.HEADmaster
Diffstat (limited to 'arch/arm/mach-tegra/iovmm.c')
-rw-r--r--arch/arm/mach-tegra/iovmm.c943
1 files changed, 943 insertions, 0 deletions
diff --git a/arch/arm/mach-tegra/iovmm.c b/arch/arm/mach-tegra/iovmm.c
new file mode 100644
index 00000000000..6112128cb74
--- /dev/null
+++ b/arch/arm/mach-tegra/iovmm.c
@@ -0,0 +1,943 @@
1/*
2 * arch/arm/mach-tegra/iovmm.c
3 *
4 * Tegra I/O VM manager
5 *
6 * Copyright (c) 2010-2012, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 */
22
23#include <linux/kernel.h>
24#include <linux/spinlock.h>
25#include <linux/proc_fs.h>
26#include <linux/sched.h>
27#include <linux/string.h>
28#include <linux/slab.h>
29#include <linux/syscore_ops.h>
30
31#include <mach/iovmm.h>
32
33/*
34 * after the best-fit block is located, the remaining pages not needed
35 * for the allocation will be split into a new free block if the
36 * number of remaining pages is >= MIN_SPLIT_PAGE.
37 */
38#define MIN_SPLIT_PAGE 4
39#define MIN_SPLIT_BYTES(_d) (MIN_SPLIT_PAGE << (_d)->dev->pgsize_bits)
40#define NO_SPLIT(m) ((m) < MIN_SPLIT_BYTES(domain))
41#define DO_SPLIT(m) ((m) >= MIN_SPLIT_BYTES(domain))
42
43#define iovmm_start(_b) ((_b)->vm_area.iovm_start)
44#define iovmm_length(_b) ((_b)->vm_area.iovm_length)
45#define iovmm_end(_b) (iovmm_start(_b) + iovmm_length(_b))
46
47/* flags for the block */
48#define BK_FREE 0 /* indicates free mappings */
49#define BK_MAP_DIRTY 1 /* used by demand-loaded mappings */
50
51/* flags for the client */
52#define CL_LOCKED 0
53
54/* flags for the domain */
55#define DM_MAP_DIRTY 0
56
57struct tegra_iovmm_block {
58 struct tegra_iovmm_area vm_area;
59 tegra_iovmm_addr_t start;
60 size_t length;
61 atomic_t ref;
62 unsigned long flags;
63 unsigned long poison;
64 struct rb_node free_node;
65 struct rb_node all_node;
66};
67
68struct iovmm_share_group {
69 const char *name;
70 struct tegra_iovmm_domain *domain;
71 struct list_head client_list;
72 struct list_head group_list;
73 spinlock_t lock; /* for client_list */
74};
75
76static LIST_HEAD(iovmm_devices);
77static LIST_HEAD(iovmm_groups);
78static DEFINE_MUTEX(iovmm_group_list_lock);
79static struct kmem_cache *iovmm_cache;
80
81#define SIMALIGN(b, a) (((b)->start % (a)) ? ((a) - ((b)->start % (a))) : 0)
82
83size_t tegra_iovmm_get_max_free(struct tegra_iovmm_client *client)
84{
85 struct rb_node *n;
86 struct tegra_iovmm_block *b;
87 struct tegra_iovmm_domain *domain = client->domain;
88 tegra_iovmm_addr_t max_free = 0;
89
90 spin_lock(&domain->block_lock);
91 n = rb_first(&domain->all_blocks);
92 while (n) {
93 b = rb_entry(n, struct tegra_iovmm_block, all_node);
94 n = rb_next(n);
95 if (test_bit(BK_FREE, &b->flags)) {
96 max_free = max_t(tegra_iovmm_addr_t,
97 max_free, iovmm_length(b));
98 }
99 }
100 spin_unlock(&domain->block_lock);
101 return max_free;
102}
103
104
105static void tegra_iovmm_block_stats(struct tegra_iovmm_domain *domain,
106 unsigned int *num_blocks, unsigned int *num_free,
107 tegra_iovmm_addr_t *total, size_t *total_free, size_t *max_free)
108{
109 struct rb_node *n;
110 struct tegra_iovmm_block *b;
111
112 *num_blocks = 0;
113 *num_free = 0;
114 *total = 0;
115 *total_free = 0;
116 *max_free = 0;
117
118 spin_lock(&domain->block_lock);
119 n = rb_first(&domain->all_blocks);
120 while (n) {
121 b = rb_entry(n, struct tegra_iovmm_block, all_node);
122 n = rb_next(n);
123 (*num_blocks)++;
124 *total += b->length;
125 if (test_bit(BK_FREE, &b->flags)) {
126 (*num_free)++;
127 *total_free += b->length;
128 *max_free = max_t(size_t, *max_free, b->length);
129 }
130 }
131 spin_unlock(&domain->block_lock);
132}
133
134static int tegra_iovmm_read_proc(char *page, char **start, off_t off,
135 int count, int *eof, void *data)
136{
137 struct iovmm_share_group *grp;
138 size_t max_free, total_free, total;
139 unsigned int num, num_free;
140
141 int len = 0;
142
143 mutex_lock(&iovmm_group_list_lock);
144 len += snprintf(page + len, count - len, "\ngroups\n");
145 if (list_empty(&iovmm_groups))
146 len += snprintf(page + len, count - len, "\t<empty>\n");
147 else {
148 list_for_each_entry(grp, &iovmm_groups, group_list) {
149 len += snprintf(page + len, count - len,
150 "\t%s (device: %s)\n",
151 grp->name ? grp->name : "<unnamed>",
152 grp->domain->dev->name);
153 tegra_iovmm_block_stats(grp->domain, &num,
154 &num_free, &total, &total_free, &max_free);
155 total >>= 10;
156 total_free >>= 10;
157 max_free >>= 10;
158 len += snprintf(page + len, count - len,
159 "\t\tsize: %uKiB free: %uKiB "
160 "largest: %uKiB (%u free / %u total blocks)\n",
161 total, total_free, max_free, num_free, num);
162 }
163 }
164 mutex_unlock(&iovmm_group_list_lock);
165
166 *eof = 1;
167 return len;
168}
169
170static void iovmm_block_put(struct tegra_iovmm_block *b)
171{
172 BUG_ON(b->poison);
173 BUG_ON(atomic_read(&b->ref) == 0);
174 if (!atomic_dec_return(&b->ref)) {
175 b->poison = 0xa5a5a5a5;
176 kmem_cache_free(iovmm_cache, b);
177 }
178}
179
180static void iovmm_free_block(struct tegra_iovmm_domain *domain,
181 struct tegra_iovmm_block *block)
182{
183 struct tegra_iovmm_block *pred = NULL; /* address-order predecessor */
184 struct tegra_iovmm_block *succ = NULL; /* address-order successor */
185 struct rb_node **p;
186 struct rb_node *parent = NULL, *temp;
187 int pred_free = 0, succ_free = 0;
188
189 iovmm_block_put(block);
190
191 spin_lock(&domain->block_lock);
192 temp = rb_prev(&block->all_node);
193 if (temp)
194 pred = rb_entry(temp, struct tegra_iovmm_block, all_node);
195 temp = rb_next(&block->all_node);
196 if (temp)
197 succ = rb_entry(temp, struct tegra_iovmm_block, all_node);
198
199 if (pred)
200 pred_free = test_bit(BK_FREE, &pred->flags);
201 if (succ)
202 succ_free = test_bit(BK_FREE, &succ->flags);
203
204 if (pred_free && succ_free) {
205 pred->length += block->length;
206 pred->length += succ->length;
207 rb_erase(&block->all_node, &domain->all_blocks);
208 rb_erase(&succ->all_node, &domain->all_blocks);
209 rb_erase(&succ->free_node, &domain->free_blocks);
210 rb_erase(&pred->free_node, &domain->free_blocks);
211 iovmm_block_put(block);
212 iovmm_block_put(succ);
213 block = pred;
214 } else if (pred_free) {
215 pred->length += block->length;
216 rb_erase(&block->all_node, &domain->all_blocks);
217 rb_erase(&pred->free_node, &domain->free_blocks);
218 iovmm_block_put(block);
219 block = pred;
220 } else if (succ_free) {
221 block->length += succ->length;
222 rb_erase(&succ->all_node, &domain->all_blocks);
223 rb_erase(&succ->free_node, &domain->free_blocks);
224 iovmm_block_put(succ);
225 }
226
227 p = &domain->free_blocks.rb_node;
228 while (*p) {
229 struct tegra_iovmm_block *b;
230 parent = *p;
231 b = rb_entry(parent, struct tegra_iovmm_block, free_node);
232 if (block->length >= b->length)
233 p = &parent->rb_right;
234 else
235 p = &parent->rb_left;
236 }
237 rb_link_node(&block->free_node, parent, p);
238 rb_insert_color(&block->free_node, &domain->free_blocks);
239 set_bit(BK_FREE, &block->flags);
240 spin_unlock(&domain->block_lock);
241}
242
243/*
244 * if the best-fit block is larger than the requested size, a remainder
245 * block will be created and inserted into the free list in its place.
246 * since all free blocks are stored in two trees the new block needs to be
247 * linked into both.
248 */
249static struct tegra_iovmm_block *iovmm_split_free_block(
250 struct tegra_iovmm_domain *domain,
251 struct tegra_iovmm_block *block, unsigned long size)
252{
253 struct rb_node **p;
254 struct rb_node *parent = NULL;
255 struct tegra_iovmm_block *rem;
256 struct tegra_iovmm_block *b;
257
258 rem = kmem_cache_zalloc(iovmm_cache, GFP_KERNEL);
259 if (!rem)
260 return NULL;
261
262 spin_lock(&domain->block_lock);
263 p = &domain->free_blocks.rb_node;
264
265 rem->start = block->start + size;
266 rem->length = block->length - size;
267 atomic_set(&rem->ref, 1);
268 block->length = size;
269
270 while (*p) {
271 parent = *p;
272 b = rb_entry(parent, struct tegra_iovmm_block, free_node);
273 if (rem->length >= b->length)
274 p = &parent->rb_right;
275 else
276 p = &parent->rb_left;
277 }
278 set_bit(BK_FREE, &rem->flags);
279 rb_link_node(&rem->free_node, parent, p);
280 rb_insert_color(&rem->free_node, &domain->free_blocks);
281
282 p = &domain->all_blocks.rb_node;
283 parent = NULL;
284 while (*p) {
285 parent = *p;
286 b = rb_entry(parent, struct tegra_iovmm_block, all_node);
287 if (rem->start >= b->start)
288 p = &parent->rb_right;
289 else
290 p = &parent->rb_left;
291 }
292 rb_link_node(&rem->all_node, parent, p);
293 rb_insert_color(&rem->all_node, &domain->all_blocks);
294
295 return rem;
296}
297
298static int iovmm_block_splitting;
299static struct tegra_iovmm_block *iovmm_alloc_block(
300 struct tegra_iovmm_domain *domain, size_t size, size_t align)
301{
302 struct rb_node *n;
303 struct tegra_iovmm_block *b, *best;
304 size_t simalign;
305 unsigned long page_size = 1 << domain->dev->pgsize_bits;
306
307 BUG_ON(!size);
308
309 size = round_up(size, page_size);
310 align = round_up(align, page_size);
311 for (;;) {
312 spin_lock(&domain->block_lock);
313 if (!iovmm_block_splitting)
314 break;
315 spin_unlock(&domain->block_lock);
316 schedule();
317 }
318 n = domain->free_blocks.rb_node;
319 best = NULL;
320 while (n) {
321 tegra_iovmm_addr_t aligned_start, block_ceil;
322
323 b = rb_entry(n, struct tegra_iovmm_block, free_node);
324 simalign = SIMALIGN(b, align);
325 aligned_start = b->start + simalign;
326 block_ceil = b->start + b->length;
327
328 if (block_ceil >= aligned_start + size) {
329 /* Block has enough size */
330 best = b;
331 if (NO_SPLIT(simalign) &&
332 NO_SPLIT(block_ceil - (aligned_start + size)))
333 break;
334 n = n->rb_left;
335 } else {
336 n = n->rb_right;
337 }
338 }
339 if (!best) {
340 spin_unlock(&domain->block_lock);
341 return NULL;
342 }
343
344 simalign = SIMALIGN(best, align);
345 if (DO_SPLIT(simalign)) {
346 iovmm_block_splitting = 1;
347 spin_unlock(&domain->block_lock);
348
349 /* Split off misalignment */
350 b = best;
351 best = iovmm_split_free_block(domain, b, simalign);
352 if (best)
353 simalign = 0;
354 else
355 best = b;
356 }
357
358 /* Unfree designed block */
359 rb_erase(&best->free_node, &domain->free_blocks);
360 clear_bit(BK_FREE, &best->flags);
361 atomic_inc(&best->ref);
362
363 iovmm_start(best) = best->start + simalign;
364 iovmm_length(best) = size;
365
366 if (DO_SPLIT((best->start + best->length) - iovmm_end(best))) {
367 iovmm_block_splitting = 1;
368 spin_unlock(&domain->block_lock);
369
370 /* Split off excess */
371 (void)iovmm_split_free_block(domain, best, size + simalign);
372 }
373
374 iovmm_block_splitting = 0;
375 spin_unlock(&domain->block_lock);
376
377 return best;
378}
379
380static struct tegra_iovmm_block *iovmm_allocate_vm(
381 struct tegra_iovmm_domain *domain, size_t size,
382 size_t align, unsigned long iovm_start)
383{
384 struct rb_node *n;
385 struct tegra_iovmm_block *b, *best;
386 unsigned long page_size = 1 << domain->dev->pgsize_bits;
387
388 BUG_ON(iovm_start % align);
389 BUG_ON(!size);
390
391 size = round_up(size, page_size);
392 for (;;) {
393 spin_lock(&domain->block_lock);
394 if (!iovmm_block_splitting)
395 break;
396 spin_unlock(&domain->block_lock);
397 schedule();
398 }
399
400 n = rb_first(&domain->free_blocks);
401 best = NULL;
402 while (n) {
403 b = rb_entry(n, struct tegra_iovmm_block, free_node);
404 if ((b->start <= iovm_start) &&
405 (b->start + b->length) >= (iovm_start + size)) {
406 best = b;
407 break;
408 }
409 n = rb_next(n);
410 }
411
412 if (!best)
413 goto fail;
414
415 /* split the mem before iovm_start. */
416 if (DO_SPLIT(iovm_start - best->start)) {
417 iovmm_block_splitting = 1;
418 spin_unlock(&domain->block_lock);
419 best = iovmm_split_free_block(domain, best,
420 (iovm_start - best->start));
421 }
422 if (!best)
423 goto fail;
424
425 /* remove the desired block from free list. */
426 rb_erase(&best->free_node, &domain->free_blocks);
427 clear_bit(BK_FREE, &best->flags);
428 atomic_inc(&best->ref);
429
430 iovmm_start(best) = iovm_start;
431 iovmm_length(best) = size;
432
433 BUG_ON(best->start > iovmm_start(best));
434 BUG_ON((best->start + best->length) < iovmm_end(best));
435 /* split the mem after iovm_start+size. */
436 if (DO_SPLIT(best->start + best->length - iovmm_end(best))) {
437 iovmm_block_splitting = 1;
438 spin_unlock(&domain->block_lock);
439 (void)iovmm_split_free_block(domain, best,
440 (iovmm_start(best) - best->start + size));
441 }
442fail:
443 iovmm_block_splitting = 0;
444 spin_unlock(&domain->block_lock);
445 return best;
446}
447
448int tegra_iovmm_domain_init(struct tegra_iovmm_domain *domain,
449 struct tegra_iovmm_device *dev, tegra_iovmm_addr_t start,
450 tegra_iovmm_addr_t end)
451{
452 struct tegra_iovmm_block *b;
453 unsigned long page_size = 1 << dev->pgsize_bits;
454
455 b = kmem_cache_zalloc(iovmm_cache, GFP_KERNEL);
456 if (!b)
457 return -ENOMEM;
458
459 domain->dev = dev;
460
461 atomic_set(&domain->clients, 0);
462 atomic_set(&domain->locks, 0);
463 atomic_set(&b->ref, 1);
464 spin_lock_init(&domain->block_lock);
465 init_rwsem(&domain->map_lock);
466 init_waitqueue_head(&domain->delay_lock);
467
468 b->start = round_up(start, page_size);
469 b->length = round_down(end, page_size) - b->start;
470
471 set_bit(BK_FREE, &b->flags);
472 rb_link_node(&b->free_node, NULL, &domain->free_blocks.rb_node);
473 rb_insert_color(&b->free_node, &domain->free_blocks);
474 rb_link_node(&b->all_node, NULL, &domain->all_blocks.rb_node);
475 rb_insert_color(&b->all_node, &domain->all_blocks);
476
477 return 0;
478}
479
480/*
481 * If iovm_start != 0, tries to allocate specified iova block if it is
482 * free. if it is not free, it fails.
483 */
484struct tegra_iovmm_area *tegra_iovmm_create_vm(
485 struct tegra_iovmm_client *client, struct tegra_iovmm_area_ops *ops,
486 size_t size, size_t align, pgprot_t pgprot, unsigned long iovm_start)
487{
488 struct tegra_iovmm_block *b;
489 struct tegra_iovmm_domain *domain;
490
491 if (!client)
492 return NULL;
493
494 domain = client->domain;
495
496 if (iovm_start)
497 b = iovmm_allocate_vm(domain, size, align, iovm_start);
498 else
499 b = iovmm_alloc_block(domain, size, align);
500 if (!b)
501 return NULL;
502
503 b->vm_area.domain = domain;
504 b->vm_area.pgprot = pgprot;
505 b->vm_area.ops = ops;
506
507 down_read(&b->vm_area.domain->map_lock);
508 if (ops && !test_bit(CL_LOCKED, &client->flags)) {
509 set_bit(BK_MAP_DIRTY, &b->flags);
510 set_bit(DM_MAP_DIRTY, &client->domain->flags);
511 } else if (ops) {
512 if (domain->dev->ops->map(domain, &b->vm_area))
513 pr_err("%s failed to map locked domain\n", __func__);
514 }
515 up_read(&b->vm_area.domain->map_lock);
516
517 return &b->vm_area;
518}
519
520void tegra_iovmm_vm_insert_pfn(struct tegra_iovmm_area *vm,
521 tegra_iovmm_addr_t vaddr, unsigned long pfn)
522{
523 struct tegra_iovmm_domain *domain = vm->domain;
524
525 BUG_ON(vaddr & ((1 << domain->dev->pgsize_bits) - 1));
526 BUG_ON(vaddr >= vm->iovm_start + vm->iovm_length);
527 BUG_ON(vaddr < vm->iovm_start);
528 BUG_ON(vm->ops);
529
530 domain->dev->ops->map_pfn(domain, vm, vaddr, pfn);
531}
532
533void tegra_iovmm_zap_vm(struct tegra_iovmm_area *vm)
534{
535 struct tegra_iovmm_block *b;
536 struct tegra_iovmm_domain *domain;
537
538 b = container_of(vm, struct tegra_iovmm_block, vm_area);
539 domain = vm->domain;
540 /*
541 * if the vm area mapping was deferred, don't unmap it since
542 * the memory for the page tables it uses may not be allocated
543 */
544 down_read(&domain->map_lock);
545 if (!test_and_clear_bit(BK_MAP_DIRTY, &b->flags))
546 domain->dev->ops->unmap(domain, vm, false);
547 up_read(&domain->map_lock);
548}
549
550void tegra_iovmm_unzap_vm(struct tegra_iovmm_area *vm)
551{
552 struct tegra_iovmm_block *b;
553 struct tegra_iovmm_domain *domain;
554
555 b = container_of(vm, struct tegra_iovmm_block, vm_area);
556 domain = vm->domain;
557 if (!vm->ops)
558 return;
559
560 down_read(&domain->map_lock);
561 if (vm->ops) {
562 if (atomic_read(&domain->locks))
563 domain->dev->ops->map(domain, vm);
564 else {
565 set_bit(BK_MAP_DIRTY, &b->flags);
566 set_bit(DM_MAP_DIRTY, &domain->flags);
567 }
568 }
569 up_read(&domain->map_lock);
570}
571
572void tegra_iovmm_free_vm(struct tegra_iovmm_area *vm)
573{
574 struct tegra_iovmm_block *b;
575 struct tegra_iovmm_domain *domain;
576
577 if (!vm)
578 return;
579
580 b = container_of(vm, struct tegra_iovmm_block, vm_area);
581 domain = vm->domain;
582 down_read(&domain->map_lock);
583 if (!test_and_clear_bit(BK_MAP_DIRTY, &b->flags))
584 domain->dev->ops->unmap(domain, vm, true);
585 iovmm_free_block(domain, b);
586 up_read(&domain->map_lock);
587}
588
589struct tegra_iovmm_area *tegra_iovmm_area_get(struct tegra_iovmm_area *vm)
590{
591 struct tegra_iovmm_block *b;
592
593 BUG_ON(!vm);
594 b = container_of(vm, struct tegra_iovmm_block, vm_area);
595
596 atomic_inc(&b->ref);
597 return &b->vm_area;
598}
599
600void tegra_iovmm_area_put(struct tegra_iovmm_area *vm)
601{
602 struct tegra_iovmm_block *b;
603 BUG_ON(!vm);
604 b = container_of(vm, struct tegra_iovmm_block, vm_area);
605 iovmm_block_put(b);
606}
607
608struct tegra_iovmm_area *tegra_iovmm_find_area_get(
609 struct tegra_iovmm_client *client, tegra_iovmm_addr_t addr)
610{
611 struct rb_node *n;
612 struct tegra_iovmm_block *b = NULL;
613
614 if (!client)
615 return NULL;
616
617 spin_lock(&client->domain->block_lock);
618 n = client->domain->all_blocks.rb_node;
619
620 while (n) {
621 b = rb_entry(n, struct tegra_iovmm_block, all_node);
622 if (iovmm_start(b) <= addr && addr <= iovmm_end(b)) {
623 if (test_bit(BK_FREE, &b->flags))
624 b = NULL;
625 break;
626 }
627 if (addr > iovmm_start(b))
628 n = n->rb_right;
629 else
630 n = n->rb_left;
631 b = NULL;
632 }
633 if (b)
634 atomic_inc(&b->ref);
635 spin_unlock(&client->domain->block_lock);
636 if (!b)
637 return NULL;
638 return &b->vm_area;
639}
640
641static int _iovmm_client_lock(struct tegra_iovmm_client *client)
642{
643 struct tegra_iovmm_device *dev;
644 struct tegra_iovmm_domain *domain;
645 int v;
646
647 if (unlikely(!client))
648 return -ENODEV;
649
650 if (unlikely(test_bit(CL_LOCKED, &client->flags))) {
651 pr_err("attempting to relock client %s\n", client->name);
652 return 0;
653 }
654
655 domain = client->domain;
656 dev = domain->dev;
657 down_write(&domain->map_lock);
658 v = atomic_inc_return(&domain->locks);
659 /*
660 * if the device doesn't export the lock_domain function, the
661 * device must guarantee that any valid domain will be locked.
662 */
663 if (v == 1 && dev->ops->lock_domain) {
664 if (dev->ops->lock_domain(domain, client)) {
665 atomic_dec(&domain->locks);
666 up_write(&domain->map_lock);
667 return -EAGAIN;
668 }
669 }
670 if (test_and_clear_bit(DM_MAP_DIRTY, &domain->flags)) {
671 struct rb_node *n;
672 struct tegra_iovmm_block *b;
673
674 spin_lock(&domain->block_lock);
675 n = rb_first(&domain->all_blocks);
676 while (n) {
677 b = rb_entry(n, struct tegra_iovmm_block, all_node);
678 n = rb_next(n);
679 if (test_bit(BK_FREE, &b->flags))
680 continue;
681
682 if (test_and_clear_bit(BK_MAP_DIRTY, &b->flags)) {
683 if (!b->vm_area.ops) {
684 pr_err("%s: "
685 "vm_area ops must exist for lazy maps\n",
686 __func__);
687 continue;
688 }
689 dev->ops->map(domain, &b->vm_area);
690 }
691 }
692 }
693 set_bit(CL_LOCKED, &client->flags);
694 up_write(&domain->map_lock);
695 return 0;
696}
697
698int tegra_iovmm_client_trylock(struct tegra_iovmm_client *client)
699{
700 return _iovmm_client_lock(client);
701}
702
703int tegra_iovmm_client_lock(struct tegra_iovmm_client *client)
704{
705 int ret;
706
707 if (!client)
708 return -ENODEV;
709
710 ret = wait_event_interruptible(client->domain->delay_lock,
711 _iovmm_client_lock(client) != -EAGAIN);
712
713 if (ret == -ERESTARTSYS)
714 return -EINTR;
715
716 return ret;
717}
718
719void tegra_iovmm_client_unlock(struct tegra_iovmm_client *client)
720{
721 struct tegra_iovmm_device *dev;
722 struct tegra_iovmm_domain *domain;
723 int do_wake = 0;
724
725 if (!client)
726 return;
727
728 if (!test_and_clear_bit(CL_LOCKED, &client->flags)) {
729 pr_err("unlocking unlocked client %s\n", client->name);
730 return;
731 }
732
733 domain = client->domain;
734 dev = domain->dev;
735 down_write(&domain->map_lock);
736 if (!atomic_dec_return(&domain->locks)) {
737 if (dev->ops->unlock_domain)
738 dev->ops->unlock_domain(domain, client);
739 do_wake = 1;
740 }
741 up_write(&domain->map_lock);
742 if (do_wake)
743 wake_up(&domain->delay_lock);
744}
745
746size_t tegra_iovmm_get_vm_size(struct tegra_iovmm_client *client)
747{
748 struct tegra_iovmm_domain *domain;
749 struct rb_node *n;
750 struct tegra_iovmm_block *b;
751 size_t size = 0;
752
753 if (!client)
754 return 0;
755
756 domain = client->domain;
757
758 spin_lock(&domain->block_lock);
759 n = rb_first(&domain->all_blocks);
760 while (n) {
761 b = rb_entry(n, struct tegra_iovmm_block, all_node);
762 n = rb_next(n);
763 size += b->length;
764 }
765 spin_unlock(&domain->block_lock);
766
767 return size;
768}
769
770void tegra_iovmm_free_client(struct tegra_iovmm_client *client)
771{
772 struct tegra_iovmm_device *dev;
773 struct tegra_iovmm_domain *domain;
774
775 if (!client)
776 return;
777
778 BUG_ON(!client->domain || !client->domain->dev);
779
780 domain = client->domain;
781 dev = domain->dev;
782
783 if (test_and_clear_bit(CL_LOCKED, &client->flags)) {
784 pr_err("freeing locked client %s\n", client->name);
785 if (!atomic_dec_return(&domain->locks)) {
786 down_write(&domain->map_lock);
787 if (dev->ops->unlock_domain)
788 dev->ops->unlock_domain(domain, client);
789 up_write(&domain->map_lock);
790 wake_up(&domain->delay_lock);
791 }
792 }
793 mutex_lock(&iovmm_group_list_lock);
794 if (!atomic_dec_return(&domain->clients))
795 if (dev->ops->free_domain)
796 dev->ops->free_domain(domain, client);
797 list_del(&client->list);
798 if (list_empty(&client->group->client_list)) {
799 list_del(&client->group->group_list);
800 kfree(client->group->name);
801 kfree(client->group);
802 }
803 kfree(client->name);
804 kfree(client);
805 mutex_unlock(&iovmm_group_list_lock);
806}
807
808struct tegra_iovmm_client *tegra_iovmm_alloc_client(const char *name,
809 const char *share_group, struct miscdevice *misc_dev)
810{
811 struct tegra_iovmm_client *c = kzalloc(sizeof(*c), GFP_KERNEL);
812 struct iovmm_share_group *grp = NULL;
813 struct tegra_iovmm_device *dev;
814
815 if (!c)
816 return NULL;
817 c->name = kstrdup(name, GFP_KERNEL);
818 if (!c->name)
819 goto fail;
820 c->misc_dev = misc_dev;
821
822 mutex_lock(&iovmm_group_list_lock);
823 if (share_group) {
824 list_for_each_entry(grp, &iovmm_groups, group_list) {
825 if (grp->name && !strcmp(grp->name, share_group))
826 break;
827 }
828 }
829 if (!grp || strcmp(grp->name, share_group)) {
830 grp = kzalloc(sizeof(*grp), GFP_KERNEL);
831 if (!grp)
832 goto fail_lock;
833 grp->name =
834 share_group ? kstrdup(share_group, GFP_KERNEL) : NULL;
835 if (share_group && !grp->name) {
836 kfree(grp);
837 goto fail_lock;
838 }
839 list_for_each_entry(dev, &iovmm_devices, list) {
840 grp->domain = dev->ops->alloc_domain(dev, c);
841 if (grp->domain)
842 break;
843 }
844 if (!grp->domain) {
845 pr_err("%s: alloc_domain failed for %s\n",
846 __func__, c->name);
847 dump_stack();
848 kfree(grp->name);
849 kfree(grp);
850 grp = NULL;
851 goto fail_lock;
852 }
853 spin_lock_init(&grp->lock);
854 INIT_LIST_HEAD(&grp->client_list);
855 list_add_tail(&grp->group_list, &iovmm_groups);
856 }
857
858 atomic_inc(&grp->domain->clients);
859 c->group = grp;
860 c->domain = grp->domain;
861 spin_lock(&grp->lock);
862 list_add_tail(&c->list, &grp->client_list);
863 spin_unlock(&grp->lock);
864 mutex_unlock(&iovmm_group_list_lock);
865 return c;
866
867fail_lock:
868 mutex_unlock(&iovmm_group_list_lock);
869fail:
870 if (c)
871 kfree(c->name);
872 kfree(c);
873 return NULL;
874}
875
876int tegra_iovmm_register(struct tegra_iovmm_device *dev)
877{
878 BUG_ON(!dev);
879 mutex_lock(&iovmm_group_list_lock);
880 if (list_empty(&iovmm_devices)) {
881 iovmm_cache = KMEM_CACHE(tegra_iovmm_block, 0);
882 if (!iovmm_cache) {
883 pr_err("%s: failed to make kmem cache\n", __func__);
884 mutex_unlock(&iovmm_group_list_lock);
885 return -ENOMEM;
886 }
887 create_proc_read_entry("iovmminfo", S_IRUGO, NULL,
888 tegra_iovmm_read_proc, NULL);
889 }
890 list_add_tail(&dev->list, &iovmm_devices);
891 mutex_unlock(&iovmm_group_list_lock);
892 pr_info("%s: added %s\n", __func__, dev->name);
893 return 0;
894}
895
896int tegra_iovmm_unregister(struct tegra_iovmm_device *dev)
897{
898 mutex_lock(&iovmm_group_list_lock);
899 list_del(&dev->list);
900 mutex_unlock(&iovmm_group_list_lock);
901 return 0;
902}
903
904static int tegra_iovmm_suspend(void)
905{
906 int rc = 0;
907 struct tegra_iovmm_device *dev;
908
909 list_for_each_entry(dev, &iovmm_devices, list) {
910 if (!dev->ops->suspend)
911 continue;
912
913 rc = dev->ops->suspend(dev);
914 if (rc) {
915 pr_err("%s: %s suspend returned %d\n",
916 __func__, dev->name, rc);
917 return rc;
918 }
919 }
920 return 0;
921}
922
923static void tegra_iovmm_resume(void)
924{
925 struct tegra_iovmm_device *dev;
926
927 list_for_each_entry(dev, &iovmm_devices, list) {
928 if (dev->ops->resume)
929 dev->ops->resume(dev);
930 }
931}
932
933static struct syscore_ops tegra_iovmm_syscore_ops = {
934 .suspend = tegra_iovmm_suspend,
935 .resume = tegra_iovmm_resume,
936};
937
938static __init int tegra_iovmm_syscore_init(void)
939{
940 register_syscore_ops(&tegra_iovmm_syscore_ops);
941 return 0;
942}
943subsys_initcall(tegra_iovmm_syscore_init);