summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/page_allocator.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-02-10 07:05:58 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-03 11:55:19 -0400
commit0d8830394adf94fee8f69bc034293d1880e9d8db (patch)
treec708df6bc3bdd65fccbf4a6bea6e80a128318380 /drivers/gpu/nvgpu/common/mm/page_allocator.c
parent1e355ca52e2b3ac5f4e433e1bb115f6fd8499954 (diff)
gpu: nvgpu: use nvgpu list for page chunks
Use nvgpu list APIs instead of linux list APIs to store chunks of page allocator Jira NVGPU-13 Change-Id: I63375fc2df683e018c48a90b76eca368438cc32f Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1326814 Reviewed-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/page_allocator.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/page_allocator.c42
1 files changed, 22 insertions, 20 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c
index 7d2cedc9..d74db3e3 100644
--- a/drivers/gpu/nvgpu/common/mm/page_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c
@@ -135,11 +135,11 @@ static void __nvgpu_free_pages(struct nvgpu_page_allocator *a,
135{ 135{
136 struct page_alloc_chunk *chunk; 136 struct page_alloc_chunk *chunk;
137 137
138 while (!list_empty(&alloc->alloc_chunks)) { 138 while (!nvgpu_list_empty(&alloc->alloc_chunks)) {
139 chunk = list_first_entry(&alloc->alloc_chunks, 139 chunk = nvgpu_list_first_entry(&alloc->alloc_chunks,
140 struct page_alloc_chunk, 140 page_alloc_chunk,
141 list_entry); 141 list_entry);
142 list_del(&chunk->list_entry); 142 nvgpu_list_del(&chunk->list_entry);
143 143
144 if (free_buddy_alloc) 144 if (free_buddy_alloc)
145 nvgpu_free(&a->source_allocator, chunk->base); 145 nvgpu_free(&a->source_allocator, chunk->base);
@@ -322,8 +322,8 @@ static int __do_slab_alloc(struct nvgpu_page_allocator *a,
322 alloc->length = slab_page->slab_size; 322 alloc->length = slab_page->slab_size;
323 alloc->base = slab_page->page_addr + (offs * slab_page->slab_size); 323 alloc->base = slab_page->page_addr + (offs * slab_page->slab_size);
324 324
325 chunk = list_first_entry(&alloc->alloc_chunks, 325 chunk = nvgpu_list_first_entry(&alloc->alloc_chunks,
326 struct page_alloc_chunk, list_entry); 326 page_alloc_chunk, list_entry);
327 chunk->base = alloc->base; 327 chunk->base = alloc->base;
328 chunk->length = alloc->length; 328 chunk->length = alloc->length;
329 329
@@ -359,8 +359,8 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_slab(
359 goto fail; 359 goto fail;
360 } 360 }
361 361
362 INIT_LIST_HEAD(&alloc->alloc_chunks); 362 nvgpu_init_list_node(&alloc->alloc_chunks);
363 list_add(&chunk->list_entry, &alloc->alloc_chunks); 363 nvgpu_list_add(&chunk->list_entry, &alloc->alloc_chunks);
364 364
365 err = __do_slab_alloc(a, slab, alloc); 365 err = __do_slab_alloc(a, slab, alloc);
366 if (err) 366 if (err)
@@ -448,7 +448,7 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages(
448 448
449 memset(alloc, 0, sizeof(*alloc)); 449 memset(alloc, 0, sizeof(*alloc));
450 450
451 INIT_LIST_HEAD(&alloc->alloc_chunks); 451 nvgpu_init_list_node(&alloc->alloc_chunks);
452 alloc->length = pages << a->page_shift; 452 alloc->length = pages << a->page_shift;
453 453
454 while (pages) { 454 while (pages) {
@@ -504,23 +504,23 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages(
504 504
505 c->base = chunk_addr; 505 c->base = chunk_addr;
506 c->length = chunk_len; 506 c->length = chunk_len;
507 list_add(&c->list_entry, &alloc->alloc_chunks); 507 nvgpu_list_add(&c->list_entry, &alloc->alloc_chunks);
508 508
509 i++; 509 i++;
510 } 510 }
511 511
512 alloc->nr_chunks = i; 512 alloc->nr_chunks = i;
513 c = list_first_entry(&alloc->alloc_chunks, 513 c = nvgpu_list_first_entry(&alloc->alloc_chunks,
514 struct page_alloc_chunk, list_entry); 514 page_alloc_chunk, list_entry);
515 alloc->base = c->base; 515 alloc->base = c->base;
516 516
517 return alloc; 517 return alloc;
518 518
519fail_cleanup: 519fail_cleanup:
520 while (!list_empty(&alloc->alloc_chunks)) { 520 while (!nvgpu_list_empty(&alloc->alloc_chunks)) {
521 c = list_first_entry(&alloc->alloc_chunks, 521 c = nvgpu_list_first_entry(&alloc->alloc_chunks,
522 struct page_alloc_chunk, list_entry); 522 page_alloc_chunk, list_entry);
523 list_del(&c->list_entry); 523 nvgpu_list_del(&c->list_entry);
524 nvgpu_free(&a->source_allocator, c->base); 524 nvgpu_free(&a->source_allocator, c->base);
525 nvgpu_kmem_cache_free(a->chunk_cache, c); 525 nvgpu_kmem_cache_free(a->chunk_cache, c);
526 } 526 }
@@ -548,7 +548,8 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages(
548 548
549 palloc_dbg(a, "Alloc 0x%llx (%llu) id=0x%010llx\n", 549 palloc_dbg(a, "Alloc 0x%llx (%llu) id=0x%010llx\n",
550 pages << a->page_shift, pages, alloc->base); 550 pages << a->page_shift, pages, alloc->base);
551 list_for_each_entry(c, &alloc->alloc_chunks, list_entry) { 551 nvgpu_list_for_each_entry(c, &alloc->alloc_chunks,
552 page_alloc_chunk, list_entry) {
552 palloc_dbg(a, " Chunk %2d: 0x%010llx + 0x%llx\n", 553 palloc_dbg(a, " Chunk %2d: 0x%010llx + 0x%llx\n",
553 i++, c->base, c->length); 554 i++, c->base, c->length);
554 } 555 }
@@ -664,11 +665,11 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages_fixed(
664 665
665 alloc->nr_chunks = 1; 666 alloc->nr_chunks = 1;
666 alloc->length = length; 667 alloc->length = length;
667 INIT_LIST_HEAD(&alloc->alloc_chunks); 668 nvgpu_init_list_node(&alloc->alloc_chunks);
668 669
669 c->base = alloc->base; 670 c->base = alloc->base;
670 c->length = length; 671 c->length = length;
671 list_add(&c->list_entry, &alloc->alloc_chunks); 672 nvgpu_list_add(&c->list_entry, &alloc->alloc_chunks);
672 673
673 return alloc; 674 return alloc;
674 675
@@ -708,7 +709,8 @@ static u64 nvgpu_page_alloc_fixed(struct nvgpu_allocator *__a,
708 709
709 palloc_dbg(a, "Alloc [fixed] @ 0x%010llx + 0x%llx (%llu)\n", 710 palloc_dbg(a, "Alloc [fixed] @ 0x%010llx + 0x%llx (%llu)\n",
710 alloc->base, aligned_len, pages); 711 alloc->base, aligned_len, pages);
711 list_for_each_entry(c, &alloc->alloc_chunks, list_entry) { 712 nvgpu_list_for_each_entry(c, &alloc->alloc_chunks,
713 page_alloc_chunk, list_entry) {
712 palloc_dbg(a, " Chunk %2d: 0x%010llx + 0x%llx\n", 714 palloc_dbg(a, " Chunk %2d: 0x%010llx + 0x%llx\n",
713 i++, c->base, c->length); 715 i++, c->base, c->length);
714 } 716 }