aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_mm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_mm.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.c182
1 files changed, 41 insertions, 141 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
index cdbb11eb701b..8844b50c3e54 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -48,175 +48,76 @@ region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
48 48
49 b->offset = a->offset; 49 b->offset = a->offset;
50 b->length = size; 50 b->length = size;
51 b->free = a->free;
52 b->type = a->type; 51 b->type = a->type;
53 a->offset += size; 52 a->offset += size;
54 a->length -= size; 53 a->length -= size;
55 list_add_tail(&b->nl_entry, &a->nl_entry); 54 list_add_tail(&b->nl_entry, &a->nl_entry);
56 if (b->free) 55 if (b->type == 0)
57 list_add_tail(&b->fl_entry, &a->fl_entry); 56 list_add_tail(&b->fl_entry, &a->fl_entry);
58 return b; 57 return b;
59} 58}
60 59
61static struct nouveau_mm_node * 60#define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \
62nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this) 61 list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
63{
64 struct nouveau_mm_node *prev, *next;
65
66 /* try to merge with free adjacent entries of same type */
67 prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry);
68 if (this->nl_entry.prev != &rmm->nodes) {
69 if (prev->free && prev->type == this->type) {
70 prev->length += this->length;
71 region_put(rmm, this);
72 this = prev;
73 }
74 }
75
76 next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
77 if (this->nl_entry.next != &rmm->nodes) {
78 if (next->free && next->type == this->type) {
79 next->offset = this->offset;
80 next->length += this->length;
81 region_put(rmm, this);
82 this = next;
83 }
84 }
85
86 return this;
87}
88 62
89void 63void
90nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this) 64nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
91{ 65{
92 u32 block_s, block_l; 66 struct nouveau_mm_node *prev = node(this, prev);
67 struct nouveau_mm_node *next = node(this, next);
93 68
94 this->free = true;
95 list_add(&this->fl_entry, &rmm->free); 69 list_add(&this->fl_entry, &rmm->free);
96 this = nouveau_mm_merge(rmm, this); 70 this->type = 0;
97
98 /* any entirely free blocks now? we'll want to remove typing
99 * on them now so they can be use for any memory allocation
100 */
101 block_s = roundup(this->offset, rmm->block_size);
102 if (block_s + rmm->block_size > this->offset + this->length)
103 return;
104 71
105 /* split off any still-typed region at the start */ 72 if (prev && prev->type == 0) {
106 if (block_s != this->offset) { 73 prev->length += this->length;
107 if (!region_split(rmm, this, block_s - this->offset)) 74 region_put(rmm, this);
108 return; 75 this = prev;
109 } 76 }
110 77
111 /* split off the soon-to-be-untyped block(s) */ 78 if (next && next->type == 0) {
112 block_l = rounddown(this->length, rmm->block_size); 79 next->offset = this->offset;
113 if (block_l != this->length) { 80 next->length += this->length;
114 this = region_split(rmm, this, block_l); 81 region_put(rmm, this);
115 if (!this)
116 return;
117 } 82 }
118
119 /* mark as having no type, and retry merge with any adjacent
120 * untyped blocks
121 */
122 this->type = 0;
123 nouveau_mm_merge(rmm, this);
124} 83}
125 84
126int 85int
127nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, 86nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
128 u32 align, struct nouveau_mm_node **pnode) 87 u32 align, struct nouveau_mm_node **pnode)
129{ 88{
130 struct nouveau_mm_node *this, *tmp, *next; 89 struct nouveau_mm_node *prev, *this, *next;
131 u32 splitoff, avail, alloc; 90 u32 min = size_nc ? size_nc : size;
132 91 u32 align_mask = align - 1;
133 list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) { 92 u32 splitoff;
134 next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry); 93 u32 s, e;
135 if (this->nl_entry.next == &rmm->nodes) 94
136 next = NULL; 95 list_for_each_entry(this, &rmm->free, fl_entry) {
137 96 e = this->offset + this->length;
138 /* skip wrongly typed blocks */ 97 s = this->offset;
139 if (this->type && this->type != type) 98
99 prev = node(this, prev);
100 if (prev && prev->type != type)
101 s = roundup(s, rmm->block_size);
102
103 next = node(this, next);
104 if (next && next->type != type)
105 e = rounddown(e, rmm->block_size);
106
107 s = (s + align_mask) & ~align_mask;
108 e &= ~align_mask;
109 if (s > e || e - s < min)
140 continue; 110 continue;
141 111
142 /* account for alignment */ 112 splitoff = s - this->offset;
143 splitoff = this->offset & (align - 1); 113 if (splitoff && !region_split(rmm, this, splitoff))
144 if (splitoff) 114 return -ENOMEM;
145 splitoff = align - splitoff;
146
147 if (this->length <= splitoff)
148 continue;
149
150 /* determine total memory available from this, and
151 * the next block (if appropriate)
152 */
153 avail = this->length;
154 if (next && next->free && (!next->type || next->type == type))
155 avail += next->length;
156
157 avail -= splitoff;
158
159 /* determine allocation size */
160 if (size_nc) {
161 alloc = min(avail, size);
162 alloc = rounddown(alloc, size_nc);
163 if (alloc == 0)
164 continue;
165 } else {
166 alloc = size;
167 if (avail < alloc)
168 continue;
169 }
170
171 /* untyped block, split off a chunk that's a multiple
172 * of block_size and type it
173 */
174 if (!this->type) {
175 u32 block = roundup(alloc + splitoff, rmm->block_size);
176 if (this->length < block)
177 continue;
178
179 this = region_split(rmm, this, block);
180 if (!this)
181 return -ENOMEM;
182
183 this->type = type;
184 }
185
186 /* stealing memory from adjacent block */
187 if (alloc > this->length) {
188 u32 amount = alloc - (this->length - splitoff);
189
190 if (!next->type) {
191 amount = roundup(amount, rmm->block_size);
192
193 next = region_split(rmm, next, amount);
194 if (!next)
195 return -ENOMEM;
196
197 next->type = type;
198 }
199
200 this->length += amount;
201 next->offset += amount;
202 next->length -= amount;
203 if (!next->length) {
204 list_del(&next->nl_entry);
205 list_del(&next->fl_entry);
206 kfree(next);
207 }
208 }
209
210 if (splitoff) {
211 if (!region_split(rmm, this, splitoff))
212 return -ENOMEM;
213 }
214 115
215 this = region_split(rmm, this, alloc); 116 this = region_split(rmm, this, min(size, e - s));
216 if (this == NULL) 117 if (!this)
217 return -ENOMEM; 118 return -ENOMEM;
218 119
219 this->free = false; 120 this->type = type;
220 list_del(&this->fl_entry); 121 list_del(&this->fl_entry);
221 *pnode = this; 122 *pnode = this;
222 return 0; 123 return 0;
@@ -234,7 +135,6 @@ nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
234 heap = kzalloc(sizeof(*heap), GFP_KERNEL); 135 heap = kzalloc(sizeof(*heap), GFP_KERNEL);
235 if (!heap) 136 if (!heap)
236 return -ENOMEM; 137 return -ENOMEM;
237 heap->free = true;
238 heap->offset = roundup(offset, block); 138 heap->offset = roundup(offset, block);
239 heap->length = rounddown(offset + length, block) - heap->offset; 139 heap->length = rounddown(offset + length, block) - heap->offset;
240 140