diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2010-06-01 01:32:24 -0400 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2010-07-12 20:12:51 -0400 |
commit | b833ac26f1f1c8e8d9149d83dbdd91432f2807d5 (patch) | |
tree | 2899ec81e2c41c6942d75d9039748c0a882ce321 | |
parent | d17f395cdcec39033a481f96d75e8b3d3c41d43a (diff) |
drm/nouveau: use drm_mm in preference to custom code doing the same thing
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_drv.h | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_mem.c | 176 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_notifier.c | 29 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_object.c | 28 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv04_instmem.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_display.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_instmem.c | 12 |
7 files changed, 45 insertions, 240 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index c69719106489..20c54884dcb7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -123,14 +123,6 @@ nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo) | |||
123 | return ioptr; | 123 | return ioptr; |
124 | } | 124 | } |
125 | 125 | ||
126 | struct mem_block { | ||
127 | struct mem_block *next; | ||
128 | struct mem_block *prev; | ||
129 | uint64_t start; | ||
130 | uint64_t size; | ||
131 | struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ | ||
132 | }; | ||
133 | |||
134 | enum nouveau_flags { | 126 | enum nouveau_flags { |
135 | NV_NFORCE = 0x10000000, | 127 | NV_NFORCE = 0x10000000, |
136 | NV_NFORCE2 = 0x20000000 | 128 | NV_NFORCE2 = 0x20000000 |
@@ -149,7 +141,7 @@ struct nouveau_gpuobj { | |||
149 | struct list_head list; | 141 | struct list_head list; |
150 | 142 | ||
151 | struct nouveau_channel *im_channel; | 143 | struct nouveau_channel *im_channel; |
152 | struct mem_block *im_pramin; | 144 | struct drm_mm_node *im_pramin; |
153 | struct nouveau_bo *im_backing; | 145 | struct nouveau_bo *im_backing; |
154 | uint32_t im_backing_start; | 146 | uint32_t im_backing_start; |
155 | uint32_t *im_backing_suspend; | 147 | uint32_t *im_backing_suspend; |
@@ -206,7 +198,7 @@ struct nouveau_channel { | |||
206 | 198 | ||
207 | /* Notifier memory */ | 199 | /* Notifier memory */ |
208 | struct nouveau_bo *notifier_bo; | 200 | struct nouveau_bo *notifier_bo; |
209 | struct mem_block *notifier_heap; | 201 | struct drm_mm notifier_heap; |
210 | 202 | ||
211 | /* PFIFO context */ | 203 | /* PFIFO context */ |
212 | struct nouveau_gpuobj_ref *ramfc; | 204 | struct nouveau_gpuobj_ref *ramfc; |
@@ -224,7 +216,7 @@ struct nouveau_channel { | |||
224 | 216 | ||
225 | /* Objects */ | 217 | /* Objects */ |
226 | struct nouveau_gpuobj_ref *ramin; /* Private instmem */ | 218 | struct nouveau_gpuobj_ref *ramin; /* Private instmem */ |
227 | struct mem_block *ramin_heap; /* Private PRAMIN heap */ | 219 | struct drm_mm ramin_heap; /* Private PRAMIN heap */ |
228 | struct nouveau_gpuobj_ref *ramht; /* Hash table */ | 220 | struct nouveau_gpuobj_ref *ramht; /* Hash table */ |
229 | struct list_head ramht_refs; /* Objects referenced by RAMHT */ | 221 | struct list_head ramht_refs; /* Objects referenced by RAMHT */ |
230 | 222 | ||
@@ -595,7 +587,7 @@ struct drm_nouveau_private { | |||
595 | struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; | 587 | struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; |
596 | int vm_vram_pt_nr; | 588 | int vm_vram_pt_nr; |
597 | 589 | ||
598 | struct mem_block *ramin_heap; | 590 | struct drm_mm ramin_heap; |
599 | 591 | ||
600 | /* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */ | 592 | /* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */ |
601 | uint32_t ctx_table_size; | 593 | uint32_t ctx_table_size; |
@@ -707,15 +699,7 @@ extern bool nouveau_wait_for_idle(struct drm_device *); | |||
707 | extern int nouveau_card_init(struct drm_device *); | 699 | extern int nouveau_card_init(struct drm_device *); |
708 | 700 | ||
709 | /* nouveau_mem.c */ | 701 | /* nouveau_mem.c */ |
710 | extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start, | ||
711 | uint64_t size); | ||
712 | extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *, | ||
713 | uint64_t size, int align2, | ||
714 | struct drm_file *, int tail); | ||
715 | extern void nouveau_mem_takedown(struct mem_block **heap); | ||
716 | extern void nouveau_mem_free_block(struct mem_block *); | ||
717 | extern int nouveau_mem_detect(struct drm_device *dev); | 702 | extern int nouveau_mem_detect(struct drm_device *dev); |
718 | extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap); | ||
719 | extern int nouveau_mem_init(struct drm_device *); | 703 | extern int nouveau_mem_init(struct drm_device *); |
720 | extern int nouveau_mem_init_agp(struct drm_device *); | 704 | extern int nouveau_mem_init_agp(struct drm_device *); |
721 | extern void nouveau_mem_close(struct drm_device *); | 705 | extern void nouveau_mem_close(struct drm_device *); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index fb6b791506b2..4274281f45ce 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
@@ -35,162 +35,6 @@ | |||
35 | #include "drm_sarea.h" | 35 | #include "drm_sarea.h" |
36 | #include "nouveau_drv.h" | 36 | #include "nouveau_drv.h" |
37 | 37 | ||
38 | static struct mem_block * | ||
39 | split_block(struct mem_block *p, uint64_t start, uint64_t size, | ||
40 | struct drm_file *file_priv) | ||
41 | { | ||
42 | /* Maybe cut off the start of an existing block */ | ||
43 | if (start > p->start) { | ||
44 | struct mem_block *newblock = | ||
45 | kmalloc(sizeof(*newblock), GFP_KERNEL); | ||
46 | if (!newblock) | ||
47 | goto out; | ||
48 | newblock->start = start; | ||
49 | newblock->size = p->size - (start - p->start); | ||
50 | newblock->file_priv = NULL; | ||
51 | newblock->next = p->next; | ||
52 | newblock->prev = p; | ||
53 | p->next->prev = newblock; | ||
54 | p->next = newblock; | ||
55 | p->size -= newblock->size; | ||
56 | p = newblock; | ||
57 | } | ||
58 | |||
59 | /* Maybe cut off the end of an existing block */ | ||
60 | if (size < p->size) { | ||
61 | struct mem_block *newblock = | ||
62 | kmalloc(sizeof(*newblock), GFP_KERNEL); | ||
63 | if (!newblock) | ||
64 | goto out; | ||
65 | newblock->start = start + size; | ||
66 | newblock->size = p->size - size; | ||
67 | newblock->file_priv = NULL; | ||
68 | newblock->next = p->next; | ||
69 | newblock->prev = p; | ||
70 | p->next->prev = newblock; | ||
71 | p->next = newblock; | ||
72 | p->size = size; | ||
73 | } | ||
74 | |||
75 | out: | ||
76 | /* Our block is in the middle */ | ||
77 | p->file_priv = file_priv; | ||
78 | return p; | ||
79 | } | ||
80 | |||
81 | struct mem_block * | ||
82 | nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size, | ||
83 | int align2, struct drm_file *file_priv, int tail) | ||
84 | { | ||
85 | struct mem_block *p; | ||
86 | uint64_t mask = (1 << align2) - 1; | ||
87 | |||
88 | if (!heap) | ||
89 | return NULL; | ||
90 | |||
91 | if (tail) { | ||
92 | list_for_each_prev(p, heap) { | ||
93 | uint64_t start = ((p->start + p->size) - size) & ~mask; | ||
94 | |||
95 | if (p->file_priv == NULL && start >= p->start && | ||
96 | start + size <= p->start + p->size) | ||
97 | return split_block(p, start, size, file_priv); | ||
98 | } | ||
99 | } else { | ||
100 | list_for_each(p, heap) { | ||
101 | uint64_t start = (p->start + mask) & ~mask; | ||
102 | |||
103 | if (p->file_priv == NULL && | ||
104 | start + size <= p->start + p->size) | ||
105 | return split_block(p, start, size, file_priv); | ||
106 | } | ||
107 | } | ||
108 | |||
109 | return NULL; | ||
110 | } | ||
111 | |||
112 | void nouveau_mem_free_block(struct mem_block *p) | ||
113 | { | ||
114 | p->file_priv = NULL; | ||
115 | |||
116 | /* Assumes a single contiguous range. Needs a special file_priv in | ||
117 | * 'heap' to stop it being subsumed. | ||
118 | */ | ||
119 | if (p->next->file_priv == NULL) { | ||
120 | struct mem_block *q = p->next; | ||
121 | p->size += q->size; | ||
122 | p->next = q->next; | ||
123 | p->next->prev = p; | ||
124 | kfree(q); | ||
125 | } | ||
126 | |||
127 | if (p->prev->file_priv == NULL) { | ||
128 | struct mem_block *q = p->prev; | ||
129 | q->size += p->size; | ||
130 | q->next = p->next; | ||
131 | q->next->prev = q; | ||
132 | kfree(p); | ||
133 | } | ||
134 | } | ||
135 | |||
136 | /* Initialize. How to check for an uninitialized heap? | ||
137 | */ | ||
138 | int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start, | ||
139 | uint64_t size) | ||
140 | { | ||
141 | struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL); | ||
142 | |||
143 | if (!blocks) | ||
144 | return -ENOMEM; | ||
145 | |||
146 | *heap = kmalloc(sizeof(**heap), GFP_KERNEL); | ||
147 | if (!*heap) { | ||
148 | kfree(blocks); | ||
149 | return -ENOMEM; | ||
150 | } | ||
151 | |||
152 | blocks->start = start; | ||
153 | blocks->size = size; | ||
154 | blocks->file_priv = NULL; | ||
155 | blocks->next = blocks->prev = *heap; | ||
156 | |||
157 | memset(*heap, 0, sizeof(**heap)); | ||
158 | (*heap)->file_priv = (struct drm_file *) -1; | ||
159 | (*heap)->next = (*heap)->prev = blocks; | ||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | /* | ||
164 | * Free all blocks associated with the releasing file_priv | ||
165 | */ | ||
166 | void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap) | ||
167 | { | ||
168 | struct mem_block *p; | ||
169 | |||
170 | if (!heap || !heap->next) | ||
171 | return; | ||
172 | |||
173 | list_for_each(p, heap) { | ||
174 | if (p->file_priv == file_priv) | ||
175 | p->file_priv = NULL; | ||
176 | } | ||
177 | |||
178 | /* Assumes a single contiguous range. Needs a special file_priv in | ||
179 | * 'heap' to stop it being subsumed. | ||
180 | */ | ||
181 | list_for_each(p, heap) { | ||
182 | while ((p->file_priv == NULL) && | ||
183 | (p->next->file_priv == NULL) && | ||
184 | (p->next != heap)) { | ||
185 | struct mem_block *q = p->next; | ||
186 | p->size += q->size; | ||
187 | p->next = q->next; | ||
188 | p->next->prev = p; | ||
189 | kfree(q); | ||
190 | } | ||
191 | } | ||
192 | } | ||
193 | |||
194 | /* | 38 | /* |
195 | * NV10-NV40 tiling helpers | 39 | * NV10-NV40 tiling helpers |
196 | */ | 40 | */ |
@@ -421,24 +265,8 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) | |||
421 | /* | 265 | /* |
422 | * Cleanup everything | 266 | * Cleanup everything |
423 | */ | 267 | */ |
424 | void nouveau_mem_takedown(struct mem_block **heap) | 268 | void |
425 | { | 269 | nouveau_mem_close(struct drm_device *dev) |
426 | struct mem_block *p; | ||
427 | |||
428 | if (!*heap) | ||
429 | return; | ||
430 | |||
431 | for (p = (*heap)->next; p != *heap;) { | ||
432 | struct mem_block *q = p; | ||
433 | p = p->next; | ||
434 | kfree(q); | ||
435 | } | ||
436 | |||
437 | kfree(*heap); | ||
438 | *heap = NULL; | ||
439 | } | ||
440 | |||
441 | void nouveau_mem_close(struct drm_device *dev) | ||
442 | { | 270 | { |
443 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 271 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
444 | 272 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 9537f3e30115..32f7fbd7484d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c | |||
@@ -55,7 +55,7 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) | |||
55 | if (ret) | 55 | if (ret) |
56 | goto out_err; | 56 | goto out_err; |
57 | 57 | ||
58 | ret = nouveau_mem_init_heap(&chan->notifier_heap, 0, ntfy->bo.mem.size); | 58 | ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size); |
59 | if (ret) | 59 | if (ret) |
60 | goto out_err; | 60 | goto out_err; |
61 | 61 | ||
@@ -80,7 +80,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan) | |||
80 | nouveau_bo_unpin(chan->notifier_bo); | 80 | nouveau_bo_unpin(chan->notifier_bo); |
81 | mutex_unlock(&dev->struct_mutex); | 81 | mutex_unlock(&dev->struct_mutex); |
82 | drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); | 82 | drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); |
83 | nouveau_mem_takedown(&chan->notifier_heap); | 83 | drm_mm_takedown(&chan->notifier_heap); |
84 | } | 84 | } |
85 | 85 | ||
86 | static void | 86 | static void |
@@ -90,7 +90,7 @@ nouveau_notifier_gpuobj_dtor(struct drm_device *dev, | |||
90 | NV_DEBUG(dev, "\n"); | 90 | NV_DEBUG(dev, "\n"); |
91 | 91 | ||
92 | if (gpuobj->priv) | 92 | if (gpuobj->priv) |
93 | nouveau_mem_free_block(gpuobj->priv); | 93 | drm_mm_put_block(gpuobj->priv); |
94 | } | 94 | } |
95 | 95 | ||
96 | int | 96 | int |
@@ -100,18 +100,13 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, | |||
100 | struct drm_device *dev = chan->dev; | 100 | struct drm_device *dev = chan->dev; |
101 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 101 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
102 | struct nouveau_gpuobj *nobj = NULL; | 102 | struct nouveau_gpuobj *nobj = NULL; |
103 | struct mem_block *mem; | 103 | struct drm_mm_node *mem; |
104 | uint32_t offset; | 104 | uint32_t offset; |
105 | int target, ret; | 105 | int target, ret; |
106 | 106 | ||
107 | if (!chan->notifier_heap) { | 107 | mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0); |
108 | NV_ERROR(dev, "Channel %d doesn't have a notifier heap!\n", | 108 | if (mem) |
109 | chan->id); | 109 | mem = drm_mm_get_block(mem, size, 0); |
110 | return -EINVAL; | ||
111 | } | ||
112 | |||
113 | mem = nouveau_mem_alloc_block(chan->notifier_heap, size, 0, | ||
114 | (struct drm_file *)-2, 0); | ||
115 | if (!mem) { | 110 | if (!mem) { |
116 | NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); | 111 | NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); |
117 | return -ENOMEM; | 112 | return -ENOMEM; |
@@ -144,17 +139,17 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, | |||
144 | mem->size, NV_DMA_ACCESS_RW, target, | 139 | mem->size, NV_DMA_ACCESS_RW, target, |
145 | &nobj); | 140 | &nobj); |
146 | if (ret) { | 141 | if (ret) { |
147 | nouveau_mem_free_block(mem); | 142 | drm_mm_put_block(mem); |
148 | NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret); | 143 | NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret); |
149 | return ret; | 144 | return ret; |
150 | } | 145 | } |
151 | nobj->dtor = nouveau_notifier_gpuobj_dtor; | 146 | nobj->dtor = nouveau_notifier_gpuobj_dtor; |
152 | nobj->priv = mem; | 147 | nobj->priv = mem; |
153 | 148 | ||
154 | ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL); | 149 | ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL); |
155 | if (ret) { | 150 | if (ret) { |
156 | nouveau_gpuobj_del(dev, &nobj); | 151 | nouveau_gpuobj_del(dev, &nobj); |
157 | nouveau_mem_free_block(mem); | 152 | drm_mm_put_block(mem); |
158 | NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret); | 153 | NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret); |
159 | return ret; | 154 | return ret; |
160 | } | 155 | } |
@@ -170,7 +165,7 @@ nouveau_notifier_offset(struct nouveau_gpuobj *nobj, uint32_t *poffset) | |||
170 | return -EINVAL; | 165 | return -EINVAL; |
171 | 166 | ||
172 | if (poffset) { | 167 | if (poffset) { |
173 | struct mem_block *mem = nobj->priv; | 168 | struct drm_mm_node *mem = nobj->priv; |
174 | 169 | ||
175 | if (*poffset >= mem->size) | 170 | if (*poffset >= mem->size) |
176 | return false; | 171 | return false; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index e7c100ba63a1..d436c3c7f4f5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c | |||
@@ -209,7 +209,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
209 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 209 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
210 | struct nouveau_engine *engine = &dev_priv->engine; | 210 | struct nouveau_engine *engine = &dev_priv->engine; |
211 | struct nouveau_gpuobj *gpuobj; | 211 | struct nouveau_gpuobj *gpuobj; |
212 | struct mem_block *pramin = NULL; | 212 | struct drm_mm *pramin = NULL; |
213 | int ret; | 213 | int ret; |
214 | 214 | ||
215 | NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", | 215 | NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", |
@@ -233,17 +233,17 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
233 | * available. | 233 | * available. |
234 | */ | 234 | */ |
235 | if (chan) { | 235 | if (chan) { |
236 | if (chan->ramin_heap) { | 236 | if (chan->ramin_heap.ml_entry.next) { |
237 | NV_DEBUG(dev, "private heap\n"); | 237 | NV_DEBUG(dev, "private heap\n"); |
238 | pramin = chan->ramin_heap; | 238 | pramin = &chan->ramin_heap; |
239 | } else | 239 | } else |
240 | if (dev_priv->card_type < NV_50) { | 240 | if (dev_priv->card_type < NV_50) { |
241 | NV_DEBUG(dev, "global heap fallback\n"); | 241 | NV_DEBUG(dev, "global heap fallback\n"); |
242 | pramin = dev_priv->ramin_heap; | 242 | pramin = &dev_priv->ramin_heap; |
243 | } | 243 | } |
244 | } else { | 244 | } else { |
245 | NV_DEBUG(dev, "global heap\n"); | 245 | NV_DEBUG(dev, "global heap\n"); |
246 | pramin = dev_priv->ramin_heap; | 246 | pramin = &dev_priv->ramin_heap; |
247 | } | 247 | } |
248 | 248 | ||
249 | if (!pramin) { | 249 | if (!pramin) { |
@@ -260,9 +260,10 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
260 | } | 260 | } |
261 | 261 | ||
262 | /* Allocate a chunk of the PRAMIN aperture */ | 262 | /* Allocate a chunk of the PRAMIN aperture */ |
263 | gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size, | 263 | gpuobj->im_pramin = drm_mm_search_free(pramin, size, align, 0); |
264 | drm_order(align), | 264 | if (gpuobj->im_pramin) |
265 | (struct drm_file *)-2, 0); | 265 | gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align); |
266 | |||
266 | if (!gpuobj->im_pramin) { | 267 | if (!gpuobj->im_pramin) { |
267 | nouveau_gpuobj_del(dev, &gpuobj); | 268 | nouveau_gpuobj_del(dev, &gpuobj); |
268 | return -ENOMEM; | 269 | return -ENOMEM; |
@@ -386,7 +387,7 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) | |||
386 | if (gpuobj->flags & NVOBJ_FLAG_FAKE) | 387 | if (gpuobj->flags & NVOBJ_FLAG_FAKE) |
387 | kfree(gpuobj->im_pramin); | 388 | kfree(gpuobj->im_pramin); |
388 | else | 389 | else |
389 | nouveau_mem_free_block(gpuobj->im_pramin); | 390 | drm_mm_put_block(gpuobj->im_pramin); |
390 | } | 391 | } |
391 | 392 | ||
392 | list_del(&gpuobj->list); | 393 | list_del(&gpuobj->list); |
@@ -589,7 +590,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, | |||
589 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); | 590 | list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); |
590 | 591 | ||
591 | if (p_offset != ~0) { | 592 | if (p_offset != ~0) { |
592 | gpuobj->im_pramin = kzalloc(sizeof(struct mem_block), | 593 | gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node), |
593 | GFP_KERNEL); | 594 | GFP_KERNEL); |
594 | if (!gpuobj->im_pramin) { | 595 | if (!gpuobj->im_pramin) { |
595 | nouveau_gpuobj_del(dev, &gpuobj); | 596 | nouveau_gpuobj_del(dev, &gpuobj); |
@@ -944,8 +945,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) | |||
944 | } | 945 | } |
945 | pramin = chan->ramin->gpuobj; | 946 | pramin = chan->ramin->gpuobj; |
946 | 947 | ||
947 | ret = nouveau_mem_init_heap(&chan->ramin_heap, | 948 | ret = drm_mm_init(&chan->ramin_heap, pramin->im_pramin->start + base, size); |
948 | pramin->im_pramin->start + base, size); | ||
949 | if (ret) { | 949 | if (ret) { |
950 | NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); | 950 | NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); |
951 | nouveau_gpuobj_ref_del(dev, &chan->ramin); | 951 | nouveau_gpuobj_ref_del(dev, &chan->ramin); |
@@ -1130,8 +1130,8 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) | |||
1130 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) | 1130 | for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) |
1131 | nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]); | 1131 | nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]); |
1132 | 1132 | ||
1133 | if (chan->ramin_heap) | 1133 | if (chan->ramin_heap.free_stack.next) |
1134 | nouveau_mem_takedown(&chan->ramin_heap); | 1134 | drm_mm_takedown(&chan->ramin_heap); |
1135 | if (chan->ramin) | 1135 | if (chan->ramin) |
1136 | nouveau_gpuobj_ref_del(dev, &chan->ramin); | 1136 | nouveau_gpuobj_ref_del(dev, &chan->ramin); |
1137 | 1137 | ||
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c index a3b9563a6f60..17af702d6ddc 100644 --- a/drivers/gpu/drm/nouveau/nv04_instmem.c +++ b/drivers/gpu/drm/nouveau/nv04_instmem.c | |||
@@ -106,7 +106,7 @@ int nv04_instmem_init(struct drm_device *dev) | |||
106 | { | 106 | { |
107 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 107 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
108 | uint32_t offset; | 108 | uint32_t offset; |
109 | int ret = 0; | 109 | int ret; |
110 | 110 | ||
111 | nv04_instmem_determine_amount(dev); | 111 | nv04_instmem_determine_amount(dev); |
112 | nv04_instmem_configure_fixed_tables(dev); | 112 | nv04_instmem_configure_fixed_tables(dev); |
@@ -129,14 +129,14 @@ int nv04_instmem_init(struct drm_device *dev) | |||
129 | offset = 0x40000; | 129 | offset = 0x40000; |
130 | } | 130 | } |
131 | 131 | ||
132 | ret = nouveau_mem_init_heap(&dev_priv->ramin_heap, | 132 | ret = drm_mm_init(&dev_priv->ramin_heap, offset, |
133 | offset, dev_priv->ramin_rsvd_vram - offset); | 133 | dev_priv->ramin_rsvd_vram - offset); |
134 | if (ret) { | 134 | if (ret) { |
135 | dev_priv->ramin_heap = NULL; | 135 | NV_ERROR(dev, "Failed to init RAMIN heap: %d\n", ret); |
136 | NV_ERROR(dev, "Failed to init RAMIN heap\n"); | 136 | return ret; |
137 | } | 137 | } |
138 | 138 | ||
139 | return ret; | 139 | return 0; |
140 | } | 140 | } |
141 | 141 | ||
142 | void | 142 | void |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 580a5d10be93..515edde2c59f 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -110,8 +110,8 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) | |||
110 | return ret; | 110 | return ret; |
111 | } | 111 | } |
112 | 112 | ||
113 | ret = nouveau_mem_init_heap(&chan->ramin_heap, chan->ramin->gpuobj-> | 113 | ret = drm_mm_init(&chan->ramin_heap, |
114 | im_pramin->start, 32768); | 114 | chan->ramin->gpuobj->im_pramin->start, 32768); |
115 | if (ret) { | 115 | if (ret) { |
116 | NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); | 116 | NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); |
117 | nv50_evo_channel_del(pchan); | 117 | nv50_evo_channel_del(pchan); |
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 71c01b6e5731..a361d1612bd7 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
@@ -147,7 +147,7 @@ nv50_instmem_init(struct drm_device *dev) | |||
147 | if (ret) | 147 | if (ret) |
148 | return ret; | 148 | return ret; |
149 | 149 | ||
150 | if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base)) | 150 | if (drm_mm_init(&chan->ramin_heap, c_base, c_size - c_base)) |
151 | return -ENOMEM; | 151 | return -ENOMEM; |
152 | 152 | ||
153 | /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */ | 153 | /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */ |
@@ -276,9 +276,7 @@ nv50_instmem_init(struct drm_device *dev) | |||
276 | nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700); | 276 | nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700); |
277 | 277 | ||
278 | /* Global PRAMIN heap */ | 278 | /* Global PRAMIN heap */ |
279 | if (nouveau_mem_init_heap(&dev_priv->ramin_heap, | 279 | if (drm_mm_init(&dev_priv->ramin_heap, c_size, dev_priv->ramin_size - c_size)) { |
280 | c_size, dev_priv->ramin_size - c_size)) { | ||
281 | dev_priv->ramin_heap = NULL; | ||
282 | NV_ERROR(dev, "Failed to init RAMIN heap\n"); | 280 | NV_ERROR(dev, "Failed to init RAMIN heap\n"); |
283 | } | 281 | } |
284 | 282 | ||
@@ -321,7 +319,7 @@ nv50_instmem_takedown(struct drm_device *dev) | |||
321 | nouveau_gpuobj_del(dev, &chan->vm_pd); | 319 | nouveau_gpuobj_del(dev, &chan->vm_pd); |
322 | nouveau_gpuobj_ref_del(dev, &chan->ramfc); | 320 | nouveau_gpuobj_ref_del(dev, &chan->ramfc); |
323 | nouveau_gpuobj_ref_del(dev, &chan->ramin); | 321 | nouveau_gpuobj_ref_del(dev, &chan->ramin); |
324 | nouveau_mem_takedown(&chan->ramin_heap); | 322 | drm_mm_takedown(&chan->ramin_heap); |
325 | 323 | ||
326 | dev_priv->fifos[0] = dev_priv->fifos[127] = NULL; | 324 | dev_priv->fifos[0] = dev_priv->fifos[127] = NULL; |
327 | kfree(chan); | 325 | kfree(chan); |
@@ -436,14 +434,14 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | |||
436 | if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) | 434 | if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) |
437 | return -EINVAL; | 435 | return -EINVAL; |
438 | 436 | ||
439 | NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n", | 437 | NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n", |
440 | gpuobj->im_pramin->start, gpuobj->im_pramin->size); | 438 | gpuobj->im_pramin->start, gpuobj->im_pramin->size); |
441 | 439 | ||
442 | pte = (gpuobj->im_pramin->start >> 12) << 1; | 440 | pte = (gpuobj->im_pramin->start >> 12) << 1; |
443 | pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; | 441 | pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; |
444 | vram = gpuobj->im_backing_start; | 442 | vram = gpuobj->im_backing_start; |
445 | 443 | ||
446 | NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n", | 444 | NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", |
447 | gpuobj->im_pramin->start, pte, pte_end); | 445 | gpuobj->im_pramin->start, pte, pte_end); |
448 | NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); | 446 | NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); |
449 | 447 | ||