summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h')
-rw-r--r--drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h222
1 files changed, 222 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h b/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h
new file mode 100644
index 00000000..c9e332a5
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/mm/buddy_allocator_priv.h
@@ -0,0 +1,222 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef BUDDY_ALLOCATOR_PRIV_H
24#define BUDDY_ALLOCATOR_PRIV_H
25
26#include <nvgpu/rbtree.h>
27#include <nvgpu/list.h>
28
29struct nvgpu_kmem_cache;
30struct nvgpu_allocator;
31struct vm_gk20a;
32
33/*
34 * Each buddy is an element in a binary tree.
35 */
36struct nvgpu_buddy {
37 struct nvgpu_buddy *parent; /* Parent node. */
38 struct nvgpu_buddy *buddy; /* This node's buddy. */
39 struct nvgpu_buddy *left; /* Lower address sub-node. */
40 struct nvgpu_buddy *right; /* Higher address sub-node. */
41
42 struct nvgpu_list_node buddy_entry; /* List entry for various lists. */
43 struct nvgpu_rbtree_node alloced_entry; /* RB tree of allocations. */
44
45 u64 start; /* Start address of this buddy. */
46 u64 end; /* End address of this buddy. */
47 u64 order; /* Buddy order. */
48
49#define BALLOC_BUDDY_ALLOCED 0x1
50#define BALLOC_BUDDY_SPLIT 0x2
51#define BALLOC_BUDDY_IN_LIST 0x4
52 int flags; /* List of associated flags. */
53
54 /*
55 * Size of the PDE this buddy is using. This allows for grouping like
56 * sized allocations into the same PDE. This uses the gmmu_pgsz_gk20a
57 * enum except for the BALLOC_PTE_SIZE_ANY specifier.
58 */
59#define BALLOC_PTE_SIZE_ANY -1
60 int pte_size;
61};
62
63static inline struct nvgpu_buddy *
64nvgpu_buddy_from_buddy_entry(struct nvgpu_list_node *node)
65{
66 return (struct nvgpu_buddy *)
67 ((uintptr_t)node - offsetof(struct nvgpu_buddy, buddy_entry));
68};
69
70static inline struct nvgpu_buddy *
71nvgpu_buddy_from_rbtree_node(struct nvgpu_rbtree_node *node)
72{
73 return (struct nvgpu_buddy *)
74 ((uintptr_t)node - offsetof(struct nvgpu_buddy, alloced_entry));
75};
76
77#define __buddy_flag_ops(flag, flag_up) \
78 static inline int buddy_is_ ## flag(struct nvgpu_buddy *b) \
79 { \
80 return b->flags & BALLOC_BUDDY_ ## flag_up; \
81 } \
82 static inline void buddy_set_ ## flag(struct nvgpu_buddy *b) \
83 { \
84 b->flags |= BALLOC_BUDDY_ ## flag_up; \
85 } \
86 static inline void buddy_clr_ ## flag(struct nvgpu_buddy *b) \
87 { \
88 b->flags &= ~BALLOC_BUDDY_ ## flag_up; \
89 }
90
91/*
92 * int buddy_is_alloced(struct nvgpu_buddy *b);
93 * void buddy_set_alloced(struct nvgpu_buddy *b);
94 * void buddy_clr_alloced(struct nvgpu_buddy *b);
95 *
96 * int buddy_is_split(struct nvgpu_buddy *b);
97 * void buddy_set_split(struct nvgpu_buddy *b);
98 * void buddy_clr_split(struct nvgpu_buddy *b);
99 *
100 * int buddy_is_in_list(struct nvgpu_buddy *b);
101 * void buddy_set_in_list(struct nvgpu_buddy *b);
102 * void buddy_clr_in_list(struct nvgpu_buddy *b);
103 */
104__buddy_flag_ops(alloced, ALLOCED);
105__buddy_flag_ops(split, SPLIT);
106__buddy_flag_ops(in_list, IN_LIST);
107
108/*
109 * Keeps info for a fixed allocation.
110 */
111struct nvgpu_fixed_alloc {
112 struct nvgpu_list_node buddies; /* List of buddies. */
113 struct nvgpu_rbtree_node alloced_entry; /* RB tree of fixed allocations. */
114
115 u64 start; /* Start of fixed block. */
116 u64 end; /* End address. */
117};
118
119static inline struct nvgpu_fixed_alloc *
120nvgpu_fixed_alloc_from_rbtree_node(struct nvgpu_rbtree_node *node)
121{
122 return (struct nvgpu_fixed_alloc *)
123 ((uintptr_t)node - offsetof(struct nvgpu_fixed_alloc, alloced_entry));
124};
125
126/*
127 * GPU buddy allocator for the various GPU address spaces. Each addressable unit
128 * doesn't have to correspond to a byte. In some cases each unit is a more
129 * complex object such as a comp_tag line or the like.
130 *
131 * The max order is computed based on the size of the minimum order and the size
132 * of the address space.
133 *
134 * order_size is the size of an order 0 buddy.
135 */
136struct nvgpu_buddy_allocator {
137 struct nvgpu_allocator *owner; /* Owner of this buddy allocator. */
138 struct vm_gk20a *vm; /* Parent VM - can be NULL. */
139
140 u64 base; /* Base address of the space. */
141 u64 length; /* Length of the space. */
142 u64 blk_size; /* Size of order 0 allocation. */
143 u64 blk_shift; /* Shift to divide by blk_size. */
144
145 /* Internal stuff. */
146 u64 start; /* Real start (aligned to blk_size). */
147 u64 end; /* Real end, trimmed if needed. */
148 u64 count; /* Count of objects in space. */
149 u64 blks; /* Count of blks in the space. */
150 u64 max_order; /* Specific maximum order. */
151
152 struct nvgpu_rbtree_node *alloced_buddies; /* Outstanding allocations. */
153 struct nvgpu_rbtree_node *fixed_allocs; /* Outstanding fixed allocations. */
154
155 struct nvgpu_list_node co_list;
156
157 struct nvgpu_kmem_cache *buddy_cache;
158
159 /*
160 * Impose an upper bound on the maximum order.
161 */
162#define GPU_BALLOC_ORDER_LIST_LEN (GPU_BALLOC_MAX_ORDER + 1)
163
164 struct nvgpu_list_node buddy_list[GPU_BALLOC_ORDER_LIST_LEN];
165 u64 buddy_list_len[GPU_BALLOC_ORDER_LIST_LEN];
166 u64 buddy_list_split[GPU_BALLOC_ORDER_LIST_LEN];
167 u64 buddy_list_alloced[GPU_BALLOC_ORDER_LIST_LEN];
168
169 /*
170 * This is for when the allocator is managing a GVA space (the
171 * GPU_ALLOC_GVA_SPACE bit is set in @flags). This requires
172 * that we group like sized allocations into PDE blocks.
173 */
174 u64 pte_blk_order;
175
176 int initialized;
177 int alloc_made; /* True after the first alloc. */
178
179 u64 flags;
180
181 u64 bytes_alloced;
182 u64 bytes_alloced_real;
183 u64 bytes_freed;
184};
185
186static inline struct nvgpu_buddy_allocator *buddy_allocator(
187 struct nvgpu_allocator *a)
188{
189 return (struct nvgpu_buddy_allocator *)(a)->priv;
190}
191
192static inline struct nvgpu_list_node *balloc_get_order_list(
193 struct nvgpu_buddy_allocator *a, int order)
194{
195 return &a->buddy_list[order];
196}
197
198static inline u64 balloc_order_to_len(struct nvgpu_buddy_allocator *a,
199 int order)
200{
201 return (1 << order) * a->blk_size;
202}
203
204static inline u64 balloc_base_shift(struct nvgpu_buddy_allocator *a,
205 u64 base)
206{
207 return base - a->start;
208}
209
210static inline u64 balloc_base_unshift(struct nvgpu_buddy_allocator *a,
211 u64 base)
212{
213 return base + a->start;
214}
215
216static inline struct nvgpu_allocator *balloc_owner(
217 struct nvgpu_buddy_allocator *a)
218{
219 return a->owner;
220}
221
222#endif