summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-04-28 15:11:11 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-05-14 03:05:12 -0400
commit6070d99a502e71b7290a444556370d3956f1263b (patch)
tree4df6378dc6d96035b2d3019ab6fe3cdb01cfd841 /drivers/gpu/nvgpu/common
parent648f43fe1e474b7232204da7dd68140a197e41c3 (diff)
gpu: nvgpu: Remove <linux/mm.h> from the page allocator
Remove the <linux/mm.h> include from the VIDMEM page allocator. To do this PAGE_SIZE needed to be defined for VIDMEM. Technically using the Linux PAGE_SIZE macro for VIDMEM was a bug since PAGE_SIZE need not be 4K on Linux so this patch also fixes a theoretical bug. Also usage of ERR_PTR(), PTR_ERR() and IS_ERR() was removed. These are Linux specific error handling macros. Change-Id: Iadaf5b8e0154b0c3adf593449023005afffad90d Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1472371 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r--drivers/gpu/nvgpu/common/mm/page_allocator.c34
1 files changed, 24 insertions, 10 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c
index 2ffff63d..14b5da3c 100644
--- a/drivers/gpu/nvgpu/common/mm/page_allocator.c
+++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c
@@ -14,8 +14,6 @@
14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */ 15 */
16 16
17#include <linux/mm.h>
18
19#include <nvgpu/bitops.h> 17#include <nvgpu/bitops.h>
20#include <nvgpu/allocator.h> 18#include <nvgpu/allocator.h>
21#include <nvgpu/page_allocator.h> 19#include <nvgpu/page_allocator.h>
@@ -29,6 +27,22 @@
29 alloc_dbg(palloc_owner(a), fmt, ##arg) 27 alloc_dbg(palloc_owner(a), fmt, ##arg)
30 28
31/* 29/*
30 * Since some Linux headers are still leaked into common code this is necessary
31 * for some builds.
32 */
33#ifdef PAGE_SIZE
34#undef PAGE_SIZE
35#undef PAGE_ALIGN
36#endif
37
38/*
39 * VIDMEM page size is 4k.
40 */
41#define PAGE_SIZE 0x1000
42#define PAGE_ALIGN(addr) ((addr + (PAGE_SIZE - 1)) & \
43 ((typeof(addr)) ~(PAGE_SIZE - 1)))
44
45/*
32 * Handle the book-keeping for these operations. 46 * Handle the book-keeping for these operations.
33 */ 47 */
34static inline void add_slab_page_to_empty(struct page_alloc_slab *slab, 48static inline void add_slab_page_to_empty(struct page_alloc_slab *slab,
@@ -186,7 +200,7 @@ static struct page_alloc_slab_page *alloc_slab_page(
186 slab_page = nvgpu_kmem_cache_alloc(a->slab_page_cache); 200 slab_page = nvgpu_kmem_cache_alloc(a->slab_page_cache);
187 if (!slab_page) { 201 if (!slab_page) {
188 palloc_dbg(a, "OOM: unable to alloc slab_page struct!\n"); 202 palloc_dbg(a, "OOM: unable to alloc slab_page struct!\n");
189 return ERR_PTR(-ENOMEM); 203 return NULL;
190 } 204 }
191 205
192 memset(slab_page, 0, sizeof(*slab_page)); 206 memset(slab_page, 0, sizeof(*slab_page));
@@ -195,7 +209,7 @@ static struct page_alloc_slab_page *alloc_slab_page(
195 if (!slab_page->page_addr) { 209 if (!slab_page->page_addr) {
196 nvgpu_kmem_cache_free(a->slab_page_cache, slab_page); 210 nvgpu_kmem_cache_free(a->slab_page_cache, slab_page);
197 palloc_dbg(a, "OOM: vidmem is full!\n"); 211 palloc_dbg(a, "OOM: vidmem is full!\n");
198 return ERR_PTR(-ENOMEM); 212 return NULL;
199 } 213 }
200 214
201 nvgpu_init_list_node(&slab_page->list_entry); 215 nvgpu_init_list_node(&slab_page->list_entry);
@@ -259,8 +273,8 @@ static int __do_slab_alloc(struct nvgpu_page_allocator *a,
259 273
260 if (!slab_page) { 274 if (!slab_page) {
261 slab_page = alloc_slab_page(a, slab); 275 slab_page = alloc_slab_page(a, slab);
262 if (IS_ERR(slab_page)) 276 if (!slab_page)
263 return PTR_ERR(slab_page); 277 return -ENOMEM;
264 } 278 }
265 279
266 /* 280 /*
@@ -500,7 +514,7 @@ fail_cleanup:
500 } 514 }
501 nvgpu_kmem_cache_free(a->alloc_cache, alloc); 515 nvgpu_kmem_cache_free(a->alloc_cache, alloc);
502fail: 516fail:
503 return ERR_PTR(-ENOMEM); 517 return NULL;
504} 518}
505 519
506static struct nvgpu_page_alloc *__nvgpu_alloc_pages( 520static struct nvgpu_page_alloc *__nvgpu_alloc_pages(
@@ -514,7 +528,7 @@ static struct nvgpu_page_alloc *__nvgpu_alloc_pages(
514 pages = ALIGN(len, a->page_size) >> a->page_shift; 528 pages = ALIGN(len, a->page_size) >> a->page_shift;
515 529
516 alloc = __do_nvgpu_alloc_pages(a, pages); 530 alloc = __do_nvgpu_alloc_pages(a, pages);
517 if (IS_ERR(alloc)) { 531 if (!alloc) {
518 palloc_dbg(a, "Alloc 0x%llx (%llu) (failed)\n", 532 palloc_dbg(a, "Alloc 0x%llx (%llu) (failed)\n",
519 pages << a->page_shift, pages); 533 pages << a->page_shift, pages);
520 return NULL; 534 return NULL;
@@ -652,7 +666,7 @@ fail:
652 nvgpu_kmem_cache_free(a->chunk_cache, c); 666 nvgpu_kmem_cache_free(a->chunk_cache, c);
653 if (alloc) 667 if (alloc)
654 nvgpu_kmem_cache_free(a->alloc_cache, alloc); 668 nvgpu_kmem_cache_free(a->alloc_cache, alloc);
655 return ERR_PTR(-ENOMEM); 669 return NULL;
656} 670}
657 671
658/* 672/*
@@ -673,7 +687,7 @@ static u64 nvgpu_page_alloc_fixed(struct nvgpu_allocator *__a,
673 alloc_lock(__a); 687 alloc_lock(__a);
674 688
675 alloc = __nvgpu_alloc_pages_fixed(a, base, aligned_len, 0); 689 alloc = __nvgpu_alloc_pages_fixed(a, base, aligned_len, 0);
676 if (IS_ERR(alloc)) { 690 if (!alloc) {
677 alloc_unlock(__a); 691 alloc_unlock(__a);
678 return 0; 692 return 0;
679 } 693 }