diff options
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm')
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/page_allocator.c | 44 |
1 files changed, 9 insertions, 35 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c index 14d66efe..1b6a2092 100644 --- a/drivers/gpu/nvgpu/common/mm/page_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c | |||
@@ -151,28 +151,10 @@ static void __nvgpu_free_pages(struct nvgpu_page_allocator *a, | |||
151 | static int __insert_page_alloc(struct nvgpu_page_allocator *a, | 151 | static int __insert_page_alloc(struct nvgpu_page_allocator *a, |
152 | struct nvgpu_page_alloc *alloc) | 152 | struct nvgpu_page_alloc *alloc) |
153 | { | 153 | { |
154 | struct rb_node **new = &a->allocs.rb_node; | 154 | alloc->tree_entry.key_start = alloc->base; |
155 | struct rb_node *parent = NULL; | 155 | alloc->tree_entry.key_end = alloc->base + alloc->length; |
156 | |||
157 | while (*new) { | ||
158 | struct nvgpu_page_alloc *tmp = | ||
159 | container_of(*new, struct nvgpu_page_alloc, | ||
160 | tree_entry); | ||
161 | |||
162 | parent = *new; | ||
163 | if (alloc->base < tmp->base) { | ||
164 | new = &((*new)->rb_left); | ||
165 | } else if (alloc->base > tmp->base) { | ||
166 | new = &((*new)->rb_right); | ||
167 | } else { | ||
168 | WARN(1, "Duplicate entries in allocated list!\n"); | ||
169 | return 0; | ||
170 | } | ||
171 | } | ||
172 | |||
173 | rb_link_node(&alloc->tree_entry, parent, new); | ||
174 | rb_insert_color(&alloc->tree_entry, &a->allocs); | ||
175 | 156 | ||
157 | nvgpu_rbtree_insert(&alloc->tree_entry, &a->allocs); | ||
176 | return 0; | 158 | return 0; |
177 | } | 159 | } |
178 | 160 | ||
@@ -180,24 +162,16 @@ static struct nvgpu_page_alloc *__find_page_alloc( | |||
180 | struct nvgpu_page_allocator *a, | 162 | struct nvgpu_page_allocator *a, |
181 | u64 addr) | 163 | u64 addr) |
182 | { | 164 | { |
183 | struct rb_node *node = a->allocs.rb_node; | ||
184 | struct nvgpu_page_alloc *alloc; | 165 | struct nvgpu_page_alloc *alloc; |
166 | struct nvgpu_rbtree_node *node = NULL; | ||
185 | 167 | ||
186 | while (node) { | 168 | nvgpu_rbtree_search(addr, &node, a->allocs); |
187 | alloc = container_of(node, struct nvgpu_page_alloc, tree_entry); | ||
188 | |||
189 | if (addr < alloc->base) | ||
190 | node = node->rb_left; | ||
191 | else if (addr > alloc->base) | ||
192 | node = node->rb_right; | ||
193 | else | ||
194 | break; | ||
195 | } | ||
196 | |||
197 | if (!node) | 169 | if (!node) |
198 | return NULL; | 170 | return NULL; |
199 | 171 | ||
200 | rb_erase(node, &a->allocs); | 172 | alloc = nvgpu_page_alloc_from_rbtree_node(node); |
173 | |||
174 | nvgpu_rbtree_unlink(node, &a->allocs); | ||
201 | 175 | ||
202 | return alloc; | 176 | return alloc; |
203 | } | 177 | } |
@@ -906,7 +880,7 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a, | |||
906 | a->length = length; | 880 | a->length = length; |
907 | a->page_size = blk_size; | 881 | a->page_size = blk_size; |
908 | a->page_shift = __ffs(blk_size); | 882 | a->page_shift = __ffs(blk_size); |
909 | a->allocs = RB_ROOT; | 883 | a->allocs = NULL; |
910 | a->owner = __a; | 884 | a->owner = __a; |
911 | a->flags = flags; | 885 | a->flags = flags; |
912 | 886 | ||