aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/video/tegra/nvmap/nvmap_mru.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
commitfcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch)
treea57612d1888735a2ec7972891b68c1ac5ec8faea /drivers/video/tegra/nvmap/nvmap_mru.c
parent8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff)
Added missing tegra files.HEADmaster
Diffstat (limited to 'drivers/video/tegra/nvmap/nvmap_mru.c')
-rw-r--r--drivers/video/tegra/nvmap/nvmap_mru.c187
1 files changed, 187 insertions, 0 deletions
diff --git a/drivers/video/tegra/nvmap/nvmap_mru.c b/drivers/video/tegra/nvmap/nvmap_mru.c
new file mode 100644
index 00000000000..f54d44923eb
--- /dev/null
+++ b/drivers/video/tegra/nvmap/nvmap_mru.c
@@ -0,0 +1,187 @@
1/*
2 * drivers/video/tegra/nvmap/nvmap_mru.c
3 *
4 * IOVMM virtualization support for nvmap
5 *
6 * Copyright (c) 2009-2011, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 */
22
23#include <linux/list.h>
24#include <linux/slab.h>
25
26#include <asm/pgtable.h>
27
28#include <mach/iovmm.h>
29
30#include "nvmap.h"
31#include "nvmap_mru.h"
32
33/* if IOVMM reclamation is enabled (CONFIG_NVMAP_RECLAIM_UNPINNED_VM),
34 * unpinned handles are placed onto a most-recently-used eviction list;
35 * multiple lists are maintained, segmented by size (sizes were chosen to
36 * roughly correspond with common sizes for graphics surfaces).
37 *
38 * if a handle is located on the MRU list, then the code below may
39 * steal its IOVMM area at any time to satisfy a pin operation if no
40 * free IOVMM space is available
41 */
42
43static const size_t mru_cutoff[] = {
44 262144, 393216, 786432, 1048576, 1572864
45};
46
47static inline struct list_head *mru_list(struct nvmap_share *share, size_t size)
48{
49 unsigned int i;
50
51 BUG_ON(!share->mru_lists);
52 for (i = 0; i < ARRAY_SIZE(mru_cutoff); i++)
53 if (size <= mru_cutoff[i])
54 break;
55
56 return &share->mru_lists[i];
57}
58
59size_t nvmap_mru_vm_size(struct tegra_iovmm_client *iovmm)
60{
61 size_t vm_size = tegra_iovmm_get_vm_size(iovmm);
62 return (vm_size >> 2) * 3;
63}
64
65/* nvmap_mru_vma_lock should be acquired by the caller before calling this */
66void nvmap_mru_insert_locked(struct nvmap_share *share, struct nvmap_handle *h)
67{
68 size_t len = h->pgalloc.area->iovm_length;
69 list_add(&h->pgalloc.mru_list, mru_list(share, len));
70}
71
72void nvmap_mru_remove(struct nvmap_share *s, struct nvmap_handle *h)
73{
74 nvmap_mru_lock(s);
75 if (!list_empty(&h->pgalloc.mru_list))
76 list_del(&h->pgalloc.mru_list);
77 nvmap_mru_unlock(s);
78 INIT_LIST_HEAD(&h->pgalloc.mru_list);
79}
80
81/* returns a tegra_iovmm_area for a handle. if the handle already has
82 * an iovmm_area allocated, the handle is simply removed from its MRU list
83 * and the existing iovmm_area is returned.
84 *
85 * if no existing allocation exists, try to allocate a new IOVMM area.
86 *
87 * if a new area can not be allocated, try to re-use the most-recently-unpinned
88 * handle's allocation.
89 *
90 * and if that fails, iteratively evict handles from the MRU lists and free
91 * their allocations, until the new allocation succeeds.
92 */
93struct tegra_iovmm_area *nvmap_handle_iovmm_locked(struct nvmap_client *c,
94 struct nvmap_handle *h)
95{
96 struct list_head *mru;
97 struct nvmap_handle *evict = NULL;
98 struct tegra_iovmm_area *vm = NULL;
99 unsigned int i, idx;
100 pgprot_t prot;
101
102 BUG_ON(!h || !c || !c->share);
103
104 prot = nvmap_pgprot(h, pgprot_kernel);
105
106 if (h->pgalloc.area) {
107 BUG_ON(list_empty(&h->pgalloc.mru_list));
108 list_del(&h->pgalloc.mru_list);
109 INIT_LIST_HEAD(&h->pgalloc.mru_list);
110 return h->pgalloc.area;
111 }
112
113 vm = tegra_iovmm_create_vm(c->share->iovmm, NULL,
114 h->size, h->align, prot,
115 h->pgalloc.iovm_addr);
116
117 if (vm) {
118 INIT_LIST_HEAD(&h->pgalloc.mru_list);
119 return vm;
120 }
121 /* if client is looking for specific iovm address, return from here. */
122 if ((vm == NULL) && (h->pgalloc.iovm_addr != 0))
123 return NULL;
124 /* attempt to re-use the most recently unpinned IOVMM area in the
125 * same size bin as the current handle. If that fails, iteratively
126 * evict handles (starting from the current bin) until an allocation
127 * succeeds or no more areas can be evicted */
128 mru = mru_list(c->share, h->size);
129 if (!list_empty(mru))
130 evict = list_first_entry(mru, struct nvmap_handle,
131 pgalloc.mru_list);
132
133 if (evict && evict->pgalloc.area->iovm_length >= h->size) {
134 list_del(&evict->pgalloc.mru_list);
135 vm = evict->pgalloc.area;
136 evict->pgalloc.area = NULL;
137 INIT_LIST_HEAD(&evict->pgalloc.mru_list);
138 return vm;
139 }
140
141 idx = mru - c->share->mru_lists;
142
143 for (i = 0; i < c->share->nr_mru && !vm; i++, idx++) {
144 if (idx >= c->share->nr_mru)
145 idx = 0;
146 mru = &c->share->mru_lists[idx];
147 while (!list_empty(mru) && !vm) {
148 evict = list_first_entry(mru, struct nvmap_handle,
149 pgalloc.mru_list);
150
151 BUG_ON(atomic_read(&evict->pin) != 0);
152 BUG_ON(!evict->pgalloc.area);
153 list_del(&evict->pgalloc.mru_list);
154 INIT_LIST_HEAD(&evict->pgalloc.mru_list);
155 tegra_iovmm_free_vm(evict->pgalloc.area);
156 evict->pgalloc.area = NULL;
157 vm = tegra_iovmm_create_vm(c->share->iovmm,
158 NULL, h->size, h->align,
159 prot, h->pgalloc.iovm_addr);
160 }
161 }
162 return vm;
163}
164
165int nvmap_mru_init(struct nvmap_share *share)
166{
167 int i;
168 mutex_init(&share->mru_lock);
169 share->nr_mru = ARRAY_SIZE(mru_cutoff) + 1;
170
171 share->mru_lists = kzalloc(sizeof(struct list_head) * share->nr_mru,
172 GFP_KERNEL);
173
174 if (!share->mru_lists)
175 return -ENOMEM;
176
177 for (i = 0; i < share->nr_mru; i++)
178 INIT_LIST_HEAD(&share->mru_lists[i]);
179
180 return 0;
181}
182
183void nvmap_mru_destroy(struct nvmap_share *share)
184{
185 kfree(share->mru_lists);
186 share->mru_lists = NULL;
187}