summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-02-10 07:05:58 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-03 11:55:19 -0400
commit0d8830394adf94fee8f69bc034293d1880e9d8db (patch)
treec708df6bc3bdd65fccbf4a6bea6e80a128318380
parent1e355ca52e2b3ac5f4e433e1bb115f6fd8499954 (diff)
gpu: nvgpu: use nvgpu list for page chunks
Use nvgpu list APIs instead of linux list APIs to store chunks of page allocator Jira NVGPU-13 Change-Id: I63375fc2df683e018c48a90b76eca368438cc32f Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1326814 Reviewed-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/mm/page_allocator.c42
-rw-r--r--drivers/gpu/nvgpu/common/pramin.c6
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c11
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/page_allocator.h12
4 files changed, 43 insertions, 28 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c
index 7d2cedc9..d74db3e3 100644
--- a/drivers/gpu/nvgpu/common/mm/page_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c
@@ -135,11 +135,11 @@ static void __nvgpu_free_pages(struct nvgpu_page_allocator *a,
135{ 135{
136 struct page_alloc_chunk *chunk; 136 struct page_alloc_chunk *chunk;
137 137
138 while (!list_empty(&alloc->alloc_chunks)) { 138 while (!nvgpu_list_empty(&alloc->alloc_chunks)) {
139 chunk = list_first_entry(&alloc->alloc_chunks, 139 chunk = nvgpu_list_first_entry(&alloc->alloc_chunks,
140 struct page_alloc_chunk, 140 page_alloc_chunk,
141 list_entry); 141 list_entry);
142 list_del(&chunk->list_entry); 142 nvgpu_list_del(&chunk->list_entry);
143 143
144 if (free_buddy_alloc) 144 if (free_buddy_alloc)
145 nvgpu_free(&a->source_allocator, chunk->base); 145 nvgpu_free(&a->source_allocator, chunk->base);
@@ -322,8 +322,8 @@ static int __do_slab_alloc(struct nvgpu_page_allocator *a,
322 alloc->length = slab_page->slab_size; 322 alloc->length = slab_page->slab_size;
323 alloc->base = slab_page->page_addr + (offs * slab_page->slab_size); 323 alloc->base = slab_page->page_addr + (offs * slab_page->slab_size);
324 324
325 chunk = list_first_entry(&alloc->alloc_chunks, 325 chunk = nvgpu_list_first_entry(&alloc->alloc_chunks,
326 struct page_alloc_chunk, list_entry); 326 page_alloc_chunk, list_entry);
327 chunk->base = alloc->base; 327 chunk->base = alloc->base;
328 chunk->length = alloc->length; 328 chunk->length = alloc->length;
329 329
@@ -359,8 +359,8 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_slab(
359 goto fail; 359 goto fail;
360 } 360 }
361 361
362 INIT_LIST_HEAD(&alloc->alloc_chunks); 362 nvgpu_init_list_node(&alloc->alloc_chunks);
363 list_add(&chunk->list_entry, &alloc->alloc_chunks); 363 nvgpu_list_add(&chunk->list_entry, &alloc->alloc_chunks);
364 364
365 err = __do_slab_alloc(a, slab, alloc); 365 err = __do_slab_alloc(a, slab, alloc);
366 if (err) 366 if (err)
@@ -448,7 +448,7 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages(
448 448
449 memset(alloc, 0, sizeof(*alloc)); 449 memset(alloc, 0, sizeof(*alloc));
450 450
451 INIT_LIST_HEAD(&alloc->alloc_chunks); 451 nvgpu_init_list_node(&alloc->alloc_chunks);
452 alloc->length = pages << a->page_shift; 452 alloc->length = pages << a->page_shift;
453 453
454 while (pages) { 454 while (pages) {
@@ -504,23 +504,23 @@ static struct nvgpu_page_alloc *__do_nvgpu_alloc_pages(
504 504
505 c->base = chunk_addr; 505 c->base = chunk_addr;
506 c->length = chunk_len; 506 c->length = chunk_len;
507 list_add(&c->list_entry, &alloc->alloc_chunks); 507 nvgpu_list_add(&c->list_entry, &alloc->alloc_chunks);
508 508
509 i++; 509 i++;
510 } 510 }
511 511
512 alloc->nr_chunks = i; 512 alloc->nr_chunks = i;
513 c = list_first_entry(&alloc->alloc_chunks, 513 c = nvgpu_list_first_entry(&alloc->alloc_chunks,
514 struct page_alloc_chunk, list_entry); 514 page_alloc_chunk, list_entry);
515 alloc->base = c->base; 515 alloc->base = c->base;
516 516
517 return alloc; 517 return alloc;
518 518
519fail_cleanup: 519fail_cleanup:
520 while (!list_empty(&alloc->alloc_chunks)) { 520 while (!nvgpu_list_empty(&alloc->alloc_chunks)) {
521 c = list_first_entry(&alloc->alloc_chunks, 521 c = nvgpu_list_first_entry(&alloc->alloc_chunks,
522 struct page_alloc_chunk, list_entry); 522 page_alloc_chunk, list_entry);
523 list_del(&c->list_entry); 523 nvgpu_list_del(&c->list_entry);
524 nvgpu_free(&a->source_allocator, c->base); 524 nvgpu_free(&a->source_allocator, c->base);
525 nvgpu_kmem_cache_free(a->chunk_cache, c); 525 nvgpu_kmem_cache_free(a->chunk_cache, c);
526 } 526 }
@@ -548,7 +548,8 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages(
548 548
549 palloc_dbg(a, "Alloc 0x%llx (%llu) id=0x%010llx\n", 549 palloc_dbg(a, "Alloc 0x%llx (%llu) id=0x%010llx\n",
550 pages << a->page_shift, pages, alloc->base); 550 pages << a->page_shift, pages, alloc->base);
551 list_for_each_entry(c, &alloc->alloc_chunks, list_entry) { 551 nvgpu_list_for_each_entry(c, &alloc->alloc_chunks,
552 page_alloc_chunk, list_entry) {
552 palloc_dbg(a, " Chunk %2d: 0x%010llx + 0x%llx\n", 553 palloc_dbg(a, " Chunk %2d: 0x%010llx + 0x%llx\n",
553 i++, c->base, c->length); 554 i++, c->base, c->length);
554 } 555 }
@@ -664,11 +665,11 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages_fixed(
664 665
665 alloc->nr_chunks = 1; 666 alloc->nr_chunks = 1;
666 alloc->length = length; 667 alloc->length = length;
667 INIT_LIST_HEAD(&alloc->alloc_chunks); 668 nvgpu_init_list_node(&alloc->alloc_chunks);
668 669
669 c->base = alloc->base; 670 c->base = alloc->base;
670 c->length = length; 671 c->length = length;
671 list_add(&c->list_entry, &alloc->alloc_chunks); 672 nvgpu_list_add(&c->list_entry, &alloc->alloc_chunks);
672 673
673 return alloc; 674 return alloc;
674 675
@@ -708,7 +709,8 @@ static u64 nvgpu_page_alloc_fixed(struct nvgpu_allocator *__a,
708 709
709 palloc_dbg(a, "Alloc [fixed] @ 0x%010llx + 0x%llx (%llu)\n", 710 palloc_dbg(a, "Alloc [fixed] @ 0x%010llx + 0x%llx (%llu)\n",
710 alloc->base, aligned_len, pages); 711 alloc->base, aligned_len, pages);
711 list_for_each_entry(c, &alloc->alloc_chunks, list_entry) { 712 nvgpu_list_for_each_entry(c, &alloc->alloc_chunks,
713 page_alloc_chunk, list_entry) {
712 palloc_dbg(a, " Chunk %2d: 0x%010llx + 0x%llx\n", 714 palloc_dbg(a, " Chunk %2d: 0x%010llx + 0x%llx\n",
713 i++, c->base, c->length); 715 i++, c->base, c->length);
714 } 716 }
diff --git a/drivers/gpu/nvgpu/common/pramin.c b/drivers/gpu/nvgpu/common/pramin.c
index b9216309..aa732368 100644
--- a/drivers/gpu/nvgpu/common/pramin.c
+++ b/drivers/gpu/nvgpu/common/pramin.c
@@ -88,7 +88,8 @@ void nvgpu_pramin_access_batched(struct gk20a *g, struct mem_desc *mem,
88 u32 byteoff, start_reg, until_end, n; 88 u32 byteoff, start_reg, until_end, n;
89 89
90 alloc = get_vidmem_page_alloc(mem->sgt->sgl); 90 alloc = get_vidmem_page_alloc(mem->sgt->sgl);
91 list_for_each_entry(chunk, &alloc->alloc_chunks, list_entry) { 91 nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks,
92 page_alloc_chunk, list_entry) {
92 if (offset >= chunk->length) 93 if (offset >= chunk->length)
93 offset -= chunk->length; 94 offset -= chunk->length;
94 else 95 else
@@ -113,7 +114,8 @@ void nvgpu_pramin_access_batched(struct gk20a *g, struct mem_desc *mem,
113 size -= n; 114 size -= n;
114 115
115 if (n == (chunk->length - offset)) { 116 if (n == (chunk->length - offset)) {
116 chunk = list_next_entry(chunk, list_entry); 117 chunk = nvgpu_list_next_entry(chunk, page_alloc_chunk,
118 list_entry);
117 offset = 0; 119 offset = 0;
118 } else { 120 } else {
119 offset += n / sizeof(u32); 121 offset += n / sizeof(u32);
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 94d31273..3cda3034 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -33,6 +33,7 @@
33#include <nvgpu/kmem.h> 33#include <nvgpu/kmem.h>
34#include <nvgpu/timers.h> 34#include <nvgpu/timers.h>
35#include <nvgpu/pramin.h> 35#include <nvgpu/pramin.h>
36#include <nvgpu/list.h>
36#include <nvgpu/allocator.h> 37#include <nvgpu/allocator.h>
37#include <nvgpu/semaphore.h> 38#include <nvgpu/semaphore.h>
38#include <nvgpu/page_allocator.h> 39#include <nvgpu/page_allocator.h>
@@ -2174,7 +2175,8 @@ static u64 gk20a_mm_get_align(struct gk20a *g, struct scatterlist *sgl,
2174 struct nvgpu_page_alloc *alloc = get_vidmem_page_alloc(sgl); 2175 struct nvgpu_page_alloc *alloc = get_vidmem_page_alloc(sgl);
2175 struct page_alloc_chunk *chunk = NULL; 2176 struct page_alloc_chunk *chunk = NULL;
2176 2177
2177 list_for_each_entry(chunk, &alloc->alloc_chunks, list_entry) { 2178 nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks,
2179 page_alloc_chunk, list_entry) {
2178 chunk_align = 1ULL << __ffs(chunk->base | chunk->length); 2180 chunk_align = 1ULL << __ffs(chunk->base | chunk->length);
2179 2181
2180 if (align) 2182 if (align)
@@ -2875,7 +2877,8 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct mem_desc *mem)
2875 2877
2876 alloc = get_vidmem_page_alloc(mem->sgt->sgl); 2878 alloc = get_vidmem_page_alloc(mem->sgt->sgl);
2877 2879
2878 list_for_each_entry(chunk, &alloc->alloc_chunks, list_entry) { 2880 nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks,
2881 page_alloc_chunk, list_entry) {
2879 if (gk20a_last_fence) 2882 if (gk20a_last_fence)
2880 gk20a_fence_put(gk20a_last_fence); 2883 gk20a_fence_put(gk20a_last_fence);
2881 2884
@@ -3732,8 +3735,8 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
3732 if (sgt) { 3735 if (sgt) {
3733 alloc = get_vidmem_page_alloc(sgt->sgl); 3736 alloc = get_vidmem_page_alloc(sgt->sgl);
3734 3737
3735 list_for_each_entry(chunk, &alloc->alloc_chunks, 3738 nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks,
3736 list_entry) { 3739 page_alloc_chunk, list_entry) {
3737 if (space_to_skip && 3740 if (space_to_skip &&
3738 space_to_skip > chunk->length) { 3741 space_to_skip > chunk->length) {
3739 space_to_skip -= chunk->length; 3742 space_to_skip -= chunk->length;
diff --git a/drivers/gpu/nvgpu/include/nvgpu/page_allocator.h b/drivers/gpu/nvgpu/include/nvgpu/page_allocator.h
index fa586dba..92f48ac5 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/page_allocator.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/page_allocator.h
@@ -22,6 +22,7 @@
22 22
23#include <nvgpu/allocator.h> 23#include <nvgpu/allocator.h>
24#include <nvgpu/kmem.h> 24#include <nvgpu/kmem.h>
25#include <nvgpu/list.h>
25 26
26struct nvgpu_allocator; 27struct nvgpu_allocator;
27 28
@@ -78,19 +79,26 @@ struct page_alloc_slab_page {
78}; 79};
79 80
80struct page_alloc_chunk { 81struct page_alloc_chunk {
81 struct list_head list_entry; 82 struct nvgpu_list_node list_entry;
82 83
83 u64 base; 84 u64 base;
84 u64 length; 85 u64 length;
85}; 86};
86 87
88static inline struct page_alloc_chunk *
89page_alloc_chunk_from_list_entry(struct nvgpu_list_node *node)
90{
91 return (struct page_alloc_chunk *)
92 ((uintptr_t)node - offsetof(struct page_alloc_chunk, list_entry));
93};
94
87/* 95/*
88 * Struct to handle internal management of page allocation. It holds a list 96 * Struct to handle internal management of page allocation. It holds a list
89 * of the chunks of pages that make up the overall allocation - much like a 97 * of the chunks of pages that make up the overall allocation - much like a
90 * scatter gather table. 98 * scatter gather table.
91 */ 99 */
92struct nvgpu_page_alloc { 100struct nvgpu_page_alloc {
93 struct list_head alloc_chunks; 101 struct nvgpu_list_node alloc_chunks;
94 102
95 int nr_chunks; 103 int nr_chunks;
96 u64 length; 104 u64 length;