summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/page_allocator.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/page_allocator.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/page_allocator.c73
1 files changed, 38 insertions, 35 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c
index 056c0c8f..193decc9 100644
--- a/drivers/gpu/nvgpu/common/mm/page_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c
@@ -27,11 +27,6 @@
27#define palloc_dbg(a, fmt, arg...) \ 27#define palloc_dbg(a, fmt, arg...) \
28 alloc_dbg(palloc_owner(a), fmt, ##arg) 28 alloc_dbg(palloc_owner(a), fmt, ##arg)
29 29
30static struct kmem_cache *page_alloc_cache;
31static struct kmem_cache *page_alloc_chunk_cache;
32static struct kmem_cache *page_alloc_slab_page_cache;
33static DEFINE_MUTEX(meta_data_cache_lock);
34
35/* 30/*
36 * Handle the book-keeping for these operations. 31 * Handle the book-keeping for these operations.
37 */ 32 */
@@ -147,10 +142,10 @@ static void __nvgpu_free_pages(struct nvgpu_page_allocator *a,
147 142
148 if (free_buddy_alloc) 143 if (free_buddy_alloc)
149 nvgpu_free(&a->source_allocator, chunk->base); 144 nvgpu_free(&a->source_allocator, chunk->base);
150 kfree(chunk); 145 nvgpu_kmem_cache_free(a->chunk_cache, chunk);
151 } 146 }
152 147
153 kmem_cache_free(page_alloc_cache, alloc); 148 nvgpu_kmem_cache_free(a->alloc_cache, alloc);
154} 149}
155 150
156static int __insert_page_alloc(struct nvgpu_page_allocator *a, 151static int __insert_page_alloc(struct nvgpu_page_allocator *a,
@@ -213,7 +208,7 @@ static struct page_alloc_slab_page *alloc_slab_page(
213{ 208{
214 struct page_alloc_slab_page *slab_page; 209 struct page_alloc_slab_page *slab_page;
215 210
216 slab_page = kmem_cache_alloc(page_alloc_slab_page_cache, GFP_KERNEL); 211 slab_page = nvgpu_kmem_cache_alloc(a->slab_page_cache);
217 if (!slab_page) { 212 if (!slab_page) {
218 palloc_dbg(a, "OOM: unable to alloc slab_page struct!\n"); 213 palloc_dbg(a, "OOM: unable to alloc slab_page struct!\n");
219 return ERR_PTR(-ENOMEM); 214 return ERR_PTR(-ENOMEM);
@@ -223,7 +218,7 @@ static struct page_alloc_slab_page *alloc_slab_page(
223 218
224 slab_page->page_addr = nvgpu_alloc(&a->source_allocator, a->page_size); 219 slab_page->page_addr = nvgpu_alloc(&a->source_allocator, a->page_size);
225 if (!slab_page->page_addr) { 220 if (!slab_page->page_addr) {
226 kfree(slab_page); 221 nvgpu_kmem_cache_free(a->slab_page_cache, slab_page);
227 palloc_dbg(a, "OOM: vidmem is full!\n"); 222 palloc_dbg(a, "OOM: vidmem is full!\n");
228 return ERR_PTR(-ENOMEM); 223 return ERR_PTR(-ENOMEM);
229 } 224 }
@@ -255,7 +250,7 @@ static void free_slab_page(struct nvgpu_page_allocator *a,
255 nvgpu_free(&a->source_allocator, slab_page->page_addr); 250 nvgpu_free(&a->source_allocator, slab_page->page_addr);
256 a->pages_freed++; 251 a->pages_freed++;
257 252
258 kmem_cache_free(page_alloc_slab_page_cache, slab_page); 253 nvgpu_kmem_cache_free(a->slab_page_cache, slab_page);
259} 254}
260 255
261/* 256/*
@@ -352,12 +347,12 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_slab(
352 slab_nr = (int)ilog2(PAGE_ALIGN(len) >> 12); 347 slab_nr = (int)ilog2(PAGE_ALIGN(len) >> 12);
353 slab = &a->slabs[slab_nr]; 348 slab = &a->slabs[slab_nr];
354 349
355 alloc = kmem_cache_alloc(page_alloc_cache, GFP_KERNEL); 350 alloc = nvgpu_kmem_cache_alloc(a->alloc_cache);
356 if (!alloc) { 351 if (!alloc) {
357 palloc_dbg(a, "OOM: could not alloc page_alloc struct!\n"); 352 palloc_dbg(a, "OOM: could not alloc page_alloc struct!\n");
358 goto fail; 353 goto fail;
359 } 354 }
360 chunk = kmem_cache_alloc(page_alloc_chunk_cache, GFP_KERNEL); 355 chunk = nvgpu_kmem_cache_alloc(a->chunk_cache);
361 if (!chunk) { 356 if (!chunk) {
362 palloc_dbg(a, "OOM: could not alloc alloc_chunk struct!\n"); 357 palloc_dbg(a, "OOM: could not alloc alloc_chunk struct!\n");
363 goto fail; 358 goto fail;
@@ -377,8 +372,10 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_slab(
377 return alloc; 372 return alloc;
378 373
379fail: 374fail:
380 kfree(alloc); 375 if (alloc)
381 kfree(chunk); 376 nvgpu_kmem_cache_free(a->alloc_cache, alloc);
377 if (chunk)
378 nvgpu_kmem_cache_free(a->chunk_cache, chunk);
382 return NULL; 379 return NULL;
383} 380}
384 381
@@ -444,7 +441,7 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages(
444 u64 max_chunk_len = pages << a->page_shift; 441 u64 max_chunk_len = pages << a->page_shift;
445 int i = 0; 442 int i = 0;
446 443
447 alloc = kmem_cache_alloc(page_alloc_cache, GFP_KERNEL); 444 alloc = nvgpu_kmem_cache_alloc(a->alloc_cache);
448 if (!alloc) 445 if (!alloc)
449 goto fail; 446 goto fail;
450 447
@@ -496,7 +493,7 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages(
496 goto fail_cleanup; 493 goto fail_cleanup;
497 } 494 }
498 495
499 c = kmem_cache_alloc(page_alloc_chunk_cache, GFP_KERNEL); 496 c = nvgpu_kmem_cache_alloc(a->chunk_cache);
500 if (!c) { 497 if (!c) {
501 nvgpu_free(&a->source_allocator, chunk_addr); 498 nvgpu_free(&a->source_allocator, chunk_addr);
502 goto fail_cleanup; 499 goto fail_cleanup;
@@ -524,9 +521,9 @@ fail_cleanup:
524 struct page_alloc_chunk, list_entry); 521 struct page_alloc_chunk, list_entry);
525 list_del(&c->list_entry); 522 list_del(&c->list_entry);
526 nvgpu_free(&a->source_allocator, c->base); 523 nvgpu_free(&a->source_allocator, c->base);
527 kfree(c); 524 nvgpu_kmem_cache_free(a->chunk_cache, c);
528 } 525 }
529 kfree(alloc); 526 nvgpu_kmem_cache_free(a->alloc_cache, alloc);
530fail: 527fail:
531 return ERR_PTR(-ENOMEM); 528 return ERR_PTR(-ENOMEM);
532} 529}
@@ -653,8 +650,8 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages_fixed(
653 struct nvgpu_page_alloc *alloc; 650 struct nvgpu_page_alloc *alloc;
654 struct page_alloc_chunk *c; 651 struct page_alloc_chunk *c;
655 652
656 alloc = kmem_cache_alloc(page_alloc_cache, GFP_KERNEL); 653 alloc = nvgpu_kmem_cache_alloc(a->alloc_cache);
657 c = kmem_cache_alloc(page_alloc_chunk_cache, GFP_KERNEL); 654 c = nvgpu_kmem_cache_alloc(a->chunk_cache);
658 if (!alloc || !c) 655 if (!alloc || !c)
659 goto fail; 656 goto fail;
660 657
@@ -675,8 +672,10 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages_fixed(
675 return alloc; 672 return alloc;
676 673
677fail: 674fail:
678 kfree(c); 675 if (c)
679 kfree(alloc); 676 nvgpu_kmem_cache_free(a->chunk_cache, c);
677 if (alloc)
678 nvgpu_kmem_cache_free(a->alloc_cache, alloc);
680 return ERR_PTR(-ENOMEM); 679 return ERR_PTR(-ENOMEM);
681} 680}
682 681
@@ -879,19 +878,6 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
879 char buddy_name[sizeof(__a->name)]; 878 char buddy_name[sizeof(__a->name)];
880 int err; 879 int err;
881 880
882 mutex_lock(&meta_data_cache_lock);
883 if (!page_alloc_cache)
884 page_alloc_cache = KMEM_CACHE(nvgpu_page_alloc, 0);
885 if (!page_alloc_chunk_cache)
886 page_alloc_chunk_cache = KMEM_CACHE(page_alloc_chunk, 0);
887 if (!page_alloc_slab_page_cache)
888 page_alloc_slab_page_cache =
889 KMEM_CACHE(page_alloc_slab_page, 0);
890 mutex_unlock(&meta_data_cache_lock);
891
892 if (!page_alloc_cache || !page_alloc_chunk_cache)
893 return -ENOMEM;
894
895 if (blk_size < SZ_4K) 881 if (blk_size < SZ_4K)
896 return -EINVAL; 882 return -EINVAL;
897 883
@@ -903,6 +889,17 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
903 if (err) 889 if (err)
904 goto fail; 890 goto fail;
905 891
892 a->alloc_cache = nvgpu_kmem_cache_create(g,
893 sizeof(struct nvgpu_page_alloc));
894 a->chunk_cache = nvgpu_kmem_cache_create(g,
895 sizeof(struct page_alloc_chunk));
896 a->slab_page_cache = nvgpu_kmem_cache_create(g,
897 sizeof(struct page_alloc_slab_page));
898 if (!a->alloc_cache || !a->chunk_cache || !a->slab_page_cache) {
899 err = -ENOMEM;
900 goto fail;
901 }
902
906 a->base = base; 903 a->base = base;
907 a->length = length; 904 a->length = length;
908 a->page_size = blk_size; 905 a->page_size = blk_size;
@@ -935,6 +932,12 @@ int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a,
935 return 0; 932 return 0;
936 933
937fail: 934fail:
935 if (a->alloc_cache)
936 nvgpu_kmem_cache_destroy(a->alloc_cache);
937 if (a->chunk_cache)
938 nvgpu_kmem_cache_destroy(a->chunk_cache);
939 if (a->slab_page_cache)
940 nvgpu_kmem_cache_destroy(a->slab_page_cache);
938 kfree(a); 941 kfree(a);
939 return err; 942 return err;
940} 943}