diff options
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/page_allocator.c | 34 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/include/nvgpu/page_allocator.h | 17 |
2 files changed, 28 insertions, 23 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/page_allocator.c b/drivers/gpu/nvgpu/common/mm/page_allocator.c index 1b6a2092..11d4ca73 100644 --- a/drivers/gpu/nvgpu/common/mm/page_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/page_allocator.c | |||
@@ -34,7 +34,7 @@ static inline void add_slab_page_to_empty(struct page_alloc_slab *slab, | |||
34 | struct page_alloc_slab_page *page) | 34 | struct page_alloc_slab_page *page) |
35 | { | 35 | { |
36 | BUG_ON(page->state != SP_NONE); | 36 | BUG_ON(page->state != SP_NONE); |
37 | list_add(&page->list_entry, &slab->empty); | 37 | nvgpu_list_add(&page->list_entry, &slab->empty); |
38 | slab->nr_empty++; | 38 | slab->nr_empty++; |
39 | page->state = SP_EMPTY; | 39 | page->state = SP_EMPTY; |
40 | } | 40 | } |
@@ -42,7 +42,7 @@ static inline void add_slab_page_to_partial(struct page_alloc_slab *slab, | |||
42 | struct page_alloc_slab_page *page) | 42 | struct page_alloc_slab_page *page) |
43 | { | 43 | { |
44 | BUG_ON(page->state != SP_NONE); | 44 | BUG_ON(page->state != SP_NONE); |
45 | list_add(&page->list_entry, &slab->partial); | 45 | nvgpu_list_add(&page->list_entry, &slab->partial); |
46 | slab->nr_partial++; | 46 | slab->nr_partial++; |
47 | page->state = SP_PARTIAL; | 47 | page->state = SP_PARTIAL; |
48 | } | 48 | } |
@@ -50,7 +50,7 @@ static inline void add_slab_page_to_full(struct page_alloc_slab *slab, | |||
50 | struct page_alloc_slab_page *page) | 50 | struct page_alloc_slab_page *page) |
51 | { | 51 | { |
52 | BUG_ON(page->state != SP_NONE); | 52 | BUG_ON(page->state != SP_NONE); |
53 | list_add(&page->list_entry, &slab->full); | 53 | nvgpu_list_add(&page->list_entry, &slab->full); |
54 | slab->nr_full++; | 54 | slab->nr_full++; |
55 | page->state = SP_FULL; | 55 | page->state = SP_FULL; |
56 | } | 56 | } |
@@ -58,21 +58,21 @@ static inline void add_slab_page_to_full(struct page_alloc_slab *slab, | |||
58 | static inline void del_slab_page_from_empty(struct page_alloc_slab *slab, | 58 | static inline void del_slab_page_from_empty(struct page_alloc_slab *slab, |
59 | struct page_alloc_slab_page *page) | 59 | struct page_alloc_slab_page *page) |
60 | { | 60 | { |
61 | list_del_init(&page->list_entry); | 61 | nvgpu_list_del(&page->list_entry); |
62 | slab->nr_empty--; | 62 | slab->nr_empty--; |
63 | page->state = SP_NONE; | 63 | page->state = SP_NONE; |
64 | } | 64 | } |
65 | static inline void del_slab_page_from_partial(struct page_alloc_slab *slab, | 65 | static inline void del_slab_page_from_partial(struct page_alloc_slab *slab, |
66 | struct page_alloc_slab_page *page) | 66 | struct page_alloc_slab_page *page) |
67 | { | 67 | { |
68 | list_del_init(&page->list_entry); | 68 | nvgpu_list_del(&page->list_entry); |
69 | slab->nr_partial--; | 69 | slab->nr_partial--; |
70 | page->state = SP_NONE; | 70 | page->state = SP_NONE; |
71 | } | 71 | } |
72 | static inline void del_slab_page_from_full(struct page_alloc_slab *slab, | 72 | static inline void del_slab_page_from_full(struct page_alloc_slab *slab, |
73 | struct page_alloc_slab_page *page) | 73 | struct page_alloc_slab_page *page) |
74 | { | 74 | { |
75 | list_del_init(&page->list_entry); | 75 | nvgpu_list_del(&page->list_entry); |
76 | slab->nr_full--; | 76 | slab->nr_full--; |
77 | page->state = SP_NONE; | 77 | page->state = SP_NONE; |
78 | } | 78 | } |
@@ -197,7 +197,7 @@ static struct page_alloc_slab_page *alloc_slab_page( | |||
197 | return ERR_PTR(-ENOMEM); | 197 | return ERR_PTR(-ENOMEM); |
198 | } | 198 | } |
199 | 199 | ||
200 | INIT_LIST_HEAD(&slab_page->list_entry); | 200 | nvgpu_init_list_node(&slab_page->list_entry); |
201 | slab_page->slab_size = slab->slab_size; | 201 | slab_page->slab_size = slab->slab_size; |
202 | slab_page->nr_objects = (u32)a->page_size / slab->slab_size; | 202 | slab_page->nr_objects = (u32)a->page_size / slab->slab_size; |
203 | slab_page->nr_objects_alloced = 0; | 203 | slab_page->nr_objects_alloced = 0; |
@@ -244,14 +244,14 @@ static int __do_slab_alloc(struct nvgpu_page_allocator *a, | |||
244 | * readily available. Take the slab_page out of what ever list it | 244 | * readily available. Take the slab_page out of what ever list it |
245 | * was in since it may be put back into a different list later. | 245 | * was in since it may be put back into a different list later. |
246 | */ | 246 | */ |
247 | if (!list_empty(&slab->partial)) { | 247 | if (!nvgpu_list_empty(&slab->partial)) { |
248 | slab_page = list_first_entry(&slab->partial, | 248 | slab_page = nvgpu_list_first_entry(&slab->partial, |
249 | struct page_alloc_slab_page, | 249 | page_alloc_slab_page, |
250 | list_entry); | 250 | list_entry); |
251 | del_slab_page_from_partial(slab, slab_page); | 251 | del_slab_page_from_partial(slab, slab_page); |
252 | } else if (!list_empty(&slab->empty)) { | 252 | } else if (!nvgpu_list_empty(&slab->empty)) { |
253 | slab_page = list_first_entry(&slab->empty, | 253 | slab_page = nvgpu_list_first_entry(&slab->empty, |
254 | struct page_alloc_slab_page, | 254 | page_alloc_slab_page, |
255 | list_entry); | 255 | list_entry); |
256 | del_slab_page_from_empty(slab, slab_page); | 256 | del_slab_page_from_empty(slab, slab_page); |
257 | } | 257 | } |
@@ -383,7 +383,7 @@ static void __nvgpu_free_slab(struct nvgpu_page_allocator *a, | |||
383 | 383 | ||
384 | /* And add. */ | 384 | /* And add. */ |
385 | if (new_state == SP_EMPTY) { | 385 | if (new_state == SP_EMPTY) { |
386 | if (list_empty(&slab->empty)) | 386 | if (nvgpu_list_empty(&slab->empty)) |
387 | add_slab_page_to_empty(slab, slab_page); | 387 | add_slab_page_to_empty(slab, slab_page); |
388 | else | 388 | else |
389 | free_slab_page(a, slab_page); | 389 | free_slab_page(a, slab_page); |
@@ -835,9 +835,9 @@ static int nvgpu_page_alloc_init_slabs(struct nvgpu_page_allocator *a) | |||
835 | struct page_alloc_slab *slab = &a->slabs[i]; | 835 | struct page_alloc_slab *slab = &a->slabs[i]; |
836 | 836 | ||
837 | slab->slab_size = SZ_4K * (1 << i); | 837 | slab->slab_size = SZ_4K * (1 << i); |
838 | INIT_LIST_HEAD(&slab->empty); | 838 | nvgpu_init_list_node(&slab->empty); |
839 | INIT_LIST_HEAD(&slab->partial); | 839 | nvgpu_init_list_node(&slab->partial); |
840 | INIT_LIST_HEAD(&slab->full); | 840 | nvgpu_init_list_node(&slab->full); |
841 | slab->nr_empty = 0; | 841 | slab->nr_empty = 0; |
842 | slab->nr_partial = 0; | 842 | slab->nr_partial = 0; |
843 | slab->nr_full = 0; | 843 | slab->nr_full = 0; |
diff --git a/drivers/gpu/nvgpu/include/nvgpu/page_allocator.h b/drivers/gpu/nvgpu/include/nvgpu/page_allocator.h index 70ed81c3..9a5ef8d3 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/page_allocator.h +++ b/drivers/gpu/nvgpu/include/nvgpu/page_allocator.h | |||
@@ -17,8 +17,6 @@ | |||
17 | #ifndef PAGE_ALLOCATOR_PRIV_H | 17 | #ifndef PAGE_ALLOCATOR_PRIV_H |
18 | #define PAGE_ALLOCATOR_PRIV_H | 18 | #define PAGE_ALLOCATOR_PRIV_H |
19 | 19 | ||
20 | #include <linux/list.h> | ||
21 | |||
22 | #include <nvgpu/allocator.h> | 20 | #include <nvgpu/allocator.h> |
23 | #include <nvgpu/kmem.h> | 21 | #include <nvgpu/kmem.h> |
24 | #include <nvgpu/list.h> | 22 | #include <nvgpu/list.h> |
@@ -46,9 +44,9 @@ struct nvgpu_allocator; | |||
46 | * assumed to be 64k) the allocation is satisfied by one of the buckets. | 44 | * assumed to be 64k) the allocation is satisfied by one of the buckets. |
47 | */ | 45 | */ |
48 | struct page_alloc_slab { | 46 | struct page_alloc_slab { |
49 | struct list_head empty; | 47 | struct nvgpu_list_node empty; |
50 | struct list_head partial; | 48 | struct nvgpu_list_node partial; |
51 | struct list_head full; | 49 | struct nvgpu_list_node full; |
52 | 50 | ||
53 | int nr_empty; | 51 | int nr_empty; |
54 | int nr_partial; | 52 | int nr_partial; |
@@ -75,7 +73,14 @@ struct page_alloc_slab_page { | |||
75 | enum slab_page_state state; | 73 | enum slab_page_state state; |
76 | 74 | ||
77 | struct page_alloc_slab *owner; | 75 | struct page_alloc_slab *owner; |
78 | struct list_head list_entry; | 76 | struct nvgpu_list_node list_entry; |
77 | }; | ||
78 | |||
79 | static inline struct page_alloc_slab_page * | ||
80 | page_alloc_slab_page_from_list_entry(struct nvgpu_list_node *node) | ||
81 | { | ||
82 | return (struct page_alloc_slab_page *) | ||
83 | ((uintptr_t)node - offsetof(struct page_alloc_slab_page, list_entry)); | ||
79 | }; | 84 | }; |
80 | 85 | ||
81 | struct page_alloc_chunk { | 86 | struct page_alloc_chunk { |