diff options
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_drv.h | 44 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_object.c | 112 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_state.c | 56 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv04_instmem.c | 50 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv50_instmem.c | 130 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvc0_instmem.c | 121 |
6 files changed, 270 insertions, 243 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 18a611e1ab8..822cd40b3eb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -146,15 +146,16 @@ enum nouveau_flags { | |||
146 | 146 | ||
147 | #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) | 147 | #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) |
148 | #define NVOBJ_FLAG_ZERO_FREE (1 << 2) | 148 | #define NVOBJ_FLAG_ZERO_FREE (1 << 2) |
149 | |||
150 | #define NVOBJ_CINST_GLOBAL 0xdeadbeef | ||
151 | |||
149 | struct nouveau_gpuobj { | 152 | struct nouveau_gpuobj { |
150 | struct drm_device *dev; | 153 | struct drm_device *dev; |
151 | struct kref refcount; | 154 | struct kref refcount; |
152 | struct list_head list; | 155 | struct list_head list; |
153 | 156 | ||
154 | struct drm_mm_node *im_pramin; | 157 | void *node; |
155 | struct nouveau_bo *im_backing; | ||
156 | u32 *suspend; | 158 | u32 *suspend; |
157 | int im_bound; | ||
158 | 159 | ||
159 | uint32_t flags; | 160 | uint32_t flags; |
160 | 161 | ||
@@ -288,11 +289,11 @@ struct nouveau_instmem_engine { | |||
288 | int (*suspend)(struct drm_device *dev); | 289 | int (*suspend)(struct drm_device *dev); |
289 | void (*resume)(struct drm_device *dev); | 290 | void (*resume)(struct drm_device *dev); |
290 | 291 | ||
291 | int (*populate)(struct drm_device *, struct nouveau_gpuobj *, | 292 | int (*get)(struct nouveau_gpuobj *, u32 size, u32 align); |
292 | u32 *size, u32 align); | 293 | void (*put)(struct nouveau_gpuobj *); |
293 | void (*clear)(struct drm_device *, struct nouveau_gpuobj *); | 294 | int (*map)(struct nouveau_gpuobj *); |
294 | int (*bind)(struct drm_device *, struct nouveau_gpuobj *); | 295 | void (*unmap)(struct nouveau_gpuobj *); |
295 | int (*unbind)(struct drm_device *, struct nouveau_gpuobj *); | 296 | |
296 | void (*flush)(struct drm_device *); | 297 | void (*flush)(struct drm_device *); |
297 | }; | 298 | }; |
298 | 299 | ||
@@ -1182,11 +1183,10 @@ extern int nv04_instmem_init(struct drm_device *); | |||
1182 | extern void nv04_instmem_takedown(struct drm_device *); | 1183 | extern void nv04_instmem_takedown(struct drm_device *); |
1183 | extern int nv04_instmem_suspend(struct drm_device *); | 1184 | extern int nv04_instmem_suspend(struct drm_device *); |
1184 | extern void nv04_instmem_resume(struct drm_device *); | 1185 | extern void nv04_instmem_resume(struct drm_device *); |
1185 | extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, | 1186 | extern int nv04_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); |
1186 | u32 *size, u32 align); | 1187 | extern void nv04_instmem_put(struct nouveau_gpuobj *); |
1187 | extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); | 1188 | extern int nv04_instmem_map(struct nouveau_gpuobj *); |
1188 | extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); | 1189 | extern void nv04_instmem_unmap(struct nouveau_gpuobj *); |
1189 | extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); | ||
1190 | extern void nv04_instmem_flush(struct drm_device *); | 1190 | extern void nv04_instmem_flush(struct drm_device *); |
1191 | 1191 | ||
1192 | /* nv50_instmem.c */ | 1192 | /* nv50_instmem.c */ |
@@ -1194,11 +1194,10 @@ extern int nv50_instmem_init(struct drm_device *); | |||
1194 | extern void nv50_instmem_takedown(struct drm_device *); | 1194 | extern void nv50_instmem_takedown(struct drm_device *); |
1195 | extern int nv50_instmem_suspend(struct drm_device *); | 1195 | extern int nv50_instmem_suspend(struct drm_device *); |
1196 | extern void nv50_instmem_resume(struct drm_device *); | 1196 | extern void nv50_instmem_resume(struct drm_device *); |
1197 | extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, | 1197 | extern int nv50_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); |
1198 | u32 *size, u32 align); | 1198 | extern void nv50_instmem_put(struct nouveau_gpuobj *); |
1199 | extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); | 1199 | extern int nv50_instmem_map(struct nouveau_gpuobj *); |
1200 | extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); | 1200 | extern void nv50_instmem_unmap(struct nouveau_gpuobj *); |
1201 | extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); | ||
1202 | extern void nv50_instmem_flush(struct drm_device *); | 1201 | extern void nv50_instmem_flush(struct drm_device *); |
1203 | extern void nv84_instmem_flush(struct drm_device *); | 1202 | extern void nv84_instmem_flush(struct drm_device *); |
1204 | extern void nv50_vm_flush(struct drm_device *, int engine); | 1203 | extern void nv50_vm_flush(struct drm_device *, int engine); |
@@ -1208,11 +1207,10 @@ extern int nvc0_instmem_init(struct drm_device *); | |||
1208 | extern void nvc0_instmem_takedown(struct drm_device *); | 1207 | extern void nvc0_instmem_takedown(struct drm_device *); |
1209 | extern int nvc0_instmem_suspend(struct drm_device *); | 1208 | extern int nvc0_instmem_suspend(struct drm_device *); |
1210 | extern void nvc0_instmem_resume(struct drm_device *); | 1209 | extern void nvc0_instmem_resume(struct drm_device *); |
1211 | extern int nvc0_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, | 1210 | extern int nvc0_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); |
1212 | u32 *size, u32 align); | 1211 | extern void nvc0_instmem_put(struct nouveau_gpuobj *); |
1213 | extern void nvc0_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); | 1212 | extern int nvc0_instmem_map(struct nouveau_gpuobj *); |
1214 | extern int nvc0_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); | 1213 | extern void nvc0_instmem_unmap(struct nouveau_gpuobj *); |
1215 | extern int nvc0_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); | ||
1216 | extern void nvc0_instmem_flush(struct drm_device *); | 1214 | extern void nvc0_instmem_flush(struct drm_device *); |
1217 | 1215 | ||
1218 | /* nv04_mc.c */ | 1216 | /* nv04_mc.c */ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 8c5e35cc04d..e8c74de905e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c | |||
@@ -168,17 +168,14 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
168 | struct nouveau_gpuobj **gpuobj_ret) | 168 | struct nouveau_gpuobj **gpuobj_ret) |
169 | { | 169 | { |
170 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 170 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
171 | struct nouveau_engine *engine = &dev_priv->engine; | 171 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; |
172 | struct nouveau_gpuobj *gpuobj; | 172 | struct nouveau_gpuobj *gpuobj; |
173 | struct drm_mm_node *ramin = NULL; | 173 | struct drm_mm_node *ramin = NULL; |
174 | int ret; | 174 | int ret, i; |
175 | 175 | ||
176 | NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", | 176 | NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", |
177 | chan ? chan->id : -1, size, align, flags); | 177 | chan ? chan->id : -1, size, align, flags); |
178 | 178 | ||
179 | if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL) | ||
180 | return -EINVAL; | ||
181 | |||
182 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); | 179 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); |
183 | if (!gpuobj) | 180 | if (!gpuobj) |
184 | return -ENOMEM; | 181 | return -ENOMEM; |
@@ -193,88 +190,45 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
193 | spin_unlock(&dev_priv->ramin_lock); | 190 | spin_unlock(&dev_priv->ramin_lock); |
194 | 191 | ||
195 | if (chan) { | 192 | if (chan) { |
196 | NV_DEBUG(dev, "channel heap\n"); | ||
197 | |||
198 | ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); | 193 | ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); |
199 | if (ramin) | 194 | if (ramin) |
200 | ramin = drm_mm_get_block(ramin, size, align); | 195 | ramin = drm_mm_get_block(ramin, size, align); |
201 | |||
202 | if (!ramin) { | 196 | if (!ramin) { |
203 | nouveau_gpuobj_ref(NULL, &gpuobj); | 197 | nouveau_gpuobj_ref(NULL, &gpuobj); |
204 | return -ENOMEM; | 198 | return -ENOMEM; |
205 | } | 199 | } |
206 | } else { | ||
207 | NV_DEBUG(dev, "global heap\n"); | ||
208 | |||
209 | /* allocate backing pages, sets vinst */ | ||
210 | ret = engine->instmem.populate(dev, gpuobj, &size, align); | ||
211 | if (ret) { | ||
212 | nouveau_gpuobj_ref(NULL, &gpuobj); | ||
213 | return ret; | ||
214 | } | ||
215 | 200 | ||
216 | /* try and get aperture space */ | 201 | gpuobj->pinst = chan->ramin->pinst; |
217 | do { | 202 | if (gpuobj->pinst != ~0) |
218 | if (drm_mm_pre_get(&dev_priv->ramin_heap)) | 203 | gpuobj->pinst += ramin->start; |
219 | return -ENOMEM; | ||
220 | |||
221 | spin_lock(&dev_priv->ramin_lock); | ||
222 | ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, | ||
223 | align, 0); | ||
224 | if (ramin == NULL) { | ||
225 | spin_unlock(&dev_priv->ramin_lock); | ||
226 | nouveau_gpuobj_ref(NULL, &gpuobj); | ||
227 | return -ENOMEM; | ||
228 | } | ||
229 | |||
230 | ramin = drm_mm_get_block_atomic(ramin, size, align); | ||
231 | spin_unlock(&dev_priv->ramin_lock); | ||
232 | } while (ramin == NULL); | ||
233 | 204 | ||
234 | /* on nv50 it's ok to fail, we have a fallback path */ | 205 | if (dev_priv->card_type < NV_50) |
235 | if (!ramin && dev_priv->card_type < NV_50) { | 206 | gpuobj->cinst = gpuobj->pinst; |
236 | nouveau_gpuobj_ref(NULL, &gpuobj); | 207 | else |
237 | return -ENOMEM; | 208 | gpuobj->cinst = ramin->start; |
238 | } | ||
239 | } | ||
240 | 209 | ||
241 | /* if we got a chunk of the aperture, map pages into it */ | 210 | gpuobj->vinst = ramin->start + chan->ramin->vinst; |
242 | gpuobj->im_pramin = ramin; | 211 | gpuobj->node = ramin; |
243 | if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) { | 212 | } else { |
244 | ret = engine->instmem.bind(dev, gpuobj); | 213 | ret = instmem->get(gpuobj, size, align); |
245 | if (ret) { | 214 | if (ret) { |
246 | nouveau_gpuobj_ref(NULL, &gpuobj); | 215 | nouveau_gpuobj_ref(NULL, &gpuobj); |
247 | return ret; | 216 | return ret; |
248 | } | 217 | } |
249 | } | ||
250 | |||
251 | /* calculate the various different addresses for the object */ | ||
252 | if (chan) { | ||
253 | gpuobj->pinst = chan->ramin->pinst; | ||
254 | if (gpuobj->pinst != ~0) | ||
255 | gpuobj->pinst += gpuobj->im_pramin->start; | ||
256 | 218 | ||
257 | if (dev_priv->card_type < NV_50) { | 219 | ret = -ENOSYS; |
258 | gpuobj->cinst = gpuobj->pinst; | 220 | if (dev_priv->ramin_available) |
259 | } else { | 221 | ret = instmem->map(gpuobj); |
260 | gpuobj->cinst = gpuobj->im_pramin->start; | 222 | if (ret) |
261 | gpuobj->vinst = gpuobj->im_pramin->start + | ||
262 | chan->ramin->vinst; | ||
263 | } | ||
264 | } else { | ||
265 | if (gpuobj->im_pramin) | ||
266 | gpuobj->pinst = gpuobj->im_pramin->start; | ||
267 | else | ||
268 | gpuobj->pinst = ~0; | 223 | gpuobj->pinst = ~0; |
269 | gpuobj->cinst = 0xdeadbeef; | 224 | |
225 | gpuobj->cinst = NVOBJ_CINST_GLOBAL; | ||
270 | } | 226 | } |
271 | 227 | ||
272 | if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { | 228 | if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { |
273 | int i; | ||
274 | |||
275 | for (i = 0; i < gpuobj->size; i += 4) | 229 | for (i = 0; i < gpuobj->size; i += 4) |
276 | nv_wo32(gpuobj, i, 0); | 230 | nv_wo32(gpuobj, i, 0); |
277 | engine->instmem.flush(dev); | 231 | instmem->flush(dev); |
278 | } | 232 | } |
279 | 233 | ||
280 | 234 | ||
@@ -326,26 +280,34 @@ nouveau_gpuobj_del(struct kref *ref) | |||
326 | container_of(ref, struct nouveau_gpuobj, refcount); | 280 | container_of(ref, struct nouveau_gpuobj, refcount); |
327 | struct drm_device *dev = gpuobj->dev; | 281 | struct drm_device *dev = gpuobj->dev; |
328 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 282 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
329 | struct nouveau_engine *engine = &dev_priv->engine; | 283 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; |
330 | int i; | 284 | int i; |
331 | 285 | ||
332 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); | 286 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); |
333 | 287 | ||
334 | if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { | 288 | if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { |
335 | for (i = 0; i < gpuobj->size; i += 4) | 289 | for (i = 0; i < gpuobj->size; i += 4) |
336 | nv_wo32(gpuobj, i, 0); | 290 | nv_wo32(gpuobj, i, 0); |
337 | engine->instmem.flush(dev); | 291 | instmem->flush(dev); |
338 | } | 292 | } |
339 | 293 | ||
340 | if (gpuobj->dtor) | 294 | if (gpuobj->dtor) |
341 | gpuobj->dtor(dev, gpuobj); | 295 | gpuobj->dtor(dev, gpuobj); |
342 | 296 | ||
343 | if (gpuobj->im_backing) | 297 | if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) { |
344 | engine->instmem.clear(dev, gpuobj); | 298 | if (gpuobj->node) { |
299 | instmem->unmap(gpuobj); | ||
300 | instmem->put(gpuobj); | ||
301 | } | ||
302 | } else { | ||
303 | if (gpuobj->node) { | ||
304 | spin_lock(&dev_priv->ramin_lock); | ||
305 | drm_mm_put_block(gpuobj->node); | ||
306 | spin_unlock(&dev_priv->ramin_lock); | ||
307 | } | ||
308 | } | ||
345 | 309 | ||
346 | spin_lock(&dev_priv->ramin_lock); | 310 | spin_lock(&dev_priv->ramin_lock); |
347 | if (gpuobj->im_pramin) | ||
348 | drm_mm_put_block(gpuobj->im_pramin); | ||
349 | list_del(&gpuobj->list); | 311 | list_del(&gpuobj->list); |
350 | spin_unlock(&dev_priv->ramin_lock); | 312 | spin_unlock(&dev_priv->ramin_lock); |
351 | 313 | ||
@@ -385,7 +347,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst, | |||
385 | kref_init(&gpuobj->refcount); | 347 | kref_init(&gpuobj->refcount); |
386 | gpuobj->size = size; | 348 | gpuobj->size = size; |
387 | gpuobj->pinst = pinst; | 349 | gpuobj->pinst = pinst; |
388 | gpuobj->cinst = 0xdeadbeef; | 350 | gpuobj->cinst = NVOBJ_CINST_GLOBAL; |
389 | gpuobj->vinst = vinst; | 351 | gpuobj->vinst = vinst; |
390 | 352 | ||
391 | if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { | 353 | if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { |
@@ -935,7 +897,7 @@ nouveau_gpuobj_suspend(struct drm_device *dev) | |||
935 | int i; | 897 | int i; |
936 | 898 | ||
937 | list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { | 899 | list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { |
938 | if (gpuobj->cinst != 0xdeadbeef) | 900 | if (gpuobj->cinst != NVOBJ_CINST_GLOBAL) |
939 | continue; | 901 | continue; |
940 | 902 | ||
941 | gpuobj->suspend = vmalloc(gpuobj->size); | 903 | gpuobj->suspend = vmalloc(gpuobj->size); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index b26b34c419c..b42e29d1935 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
@@ -53,10 +53,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
53 | engine->instmem.takedown = nv04_instmem_takedown; | 53 | engine->instmem.takedown = nv04_instmem_takedown; |
54 | engine->instmem.suspend = nv04_instmem_suspend; | 54 | engine->instmem.suspend = nv04_instmem_suspend; |
55 | engine->instmem.resume = nv04_instmem_resume; | 55 | engine->instmem.resume = nv04_instmem_resume; |
56 | engine->instmem.populate = nv04_instmem_populate; | 56 | engine->instmem.get = nv04_instmem_get; |
57 | engine->instmem.clear = nv04_instmem_clear; | 57 | engine->instmem.put = nv04_instmem_put; |
58 | engine->instmem.bind = nv04_instmem_bind; | 58 | engine->instmem.map = nv04_instmem_map; |
59 | engine->instmem.unbind = nv04_instmem_unbind; | 59 | engine->instmem.unmap = nv04_instmem_unmap; |
60 | engine->instmem.flush = nv04_instmem_flush; | 60 | engine->instmem.flush = nv04_instmem_flush; |
61 | engine->mc.init = nv04_mc_init; | 61 | engine->mc.init = nv04_mc_init; |
62 | engine->mc.takedown = nv04_mc_takedown; | 62 | engine->mc.takedown = nv04_mc_takedown; |
@@ -106,10 +106,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
106 | engine->instmem.takedown = nv04_instmem_takedown; | 106 | engine->instmem.takedown = nv04_instmem_takedown; |
107 | engine->instmem.suspend = nv04_instmem_suspend; | 107 | engine->instmem.suspend = nv04_instmem_suspend; |
108 | engine->instmem.resume = nv04_instmem_resume; | 108 | engine->instmem.resume = nv04_instmem_resume; |
109 | engine->instmem.populate = nv04_instmem_populate; | 109 | engine->instmem.get = nv04_instmem_get; |
110 | engine->instmem.clear = nv04_instmem_clear; | 110 | engine->instmem.put = nv04_instmem_put; |
111 | engine->instmem.bind = nv04_instmem_bind; | 111 | engine->instmem.map = nv04_instmem_map; |
112 | engine->instmem.unbind = nv04_instmem_unbind; | 112 | engine->instmem.unmap = nv04_instmem_unmap; |
113 | engine->instmem.flush = nv04_instmem_flush; | 113 | engine->instmem.flush = nv04_instmem_flush; |
114 | engine->mc.init = nv04_mc_init; | 114 | engine->mc.init = nv04_mc_init; |
115 | engine->mc.takedown = nv04_mc_takedown; | 115 | engine->mc.takedown = nv04_mc_takedown; |
@@ -163,10 +163,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
163 | engine->instmem.takedown = nv04_instmem_takedown; | 163 | engine->instmem.takedown = nv04_instmem_takedown; |
164 | engine->instmem.suspend = nv04_instmem_suspend; | 164 | engine->instmem.suspend = nv04_instmem_suspend; |
165 | engine->instmem.resume = nv04_instmem_resume; | 165 | engine->instmem.resume = nv04_instmem_resume; |
166 | engine->instmem.populate = nv04_instmem_populate; | 166 | engine->instmem.get = nv04_instmem_get; |
167 | engine->instmem.clear = nv04_instmem_clear; | 167 | engine->instmem.put = nv04_instmem_put; |
168 | engine->instmem.bind = nv04_instmem_bind; | 168 | engine->instmem.map = nv04_instmem_map; |
169 | engine->instmem.unbind = nv04_instmem_unbind; | 169 | engine->instmem.unmap = nv04_instmem_unmap; |
170 | engine->instmem.flush = nv04_instmem_flush; | 170 | engine->instmem.flush = nv04_instmem_flush; |
171 | engine->mc.init = nv04_mc_init; | 171 | engine->mc.init = nv04_mc_init; |
172 | engine->mc.takedown = nv04_mc_takedown; | 172 | engine->mc.takedown = nv04_mc_takedown; |
@@ -220,10 +220,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
220 | engine->instmem.takedown = nv04_instmem_takedown; | 220 | engine->instmem.takedown = nv04_instmem_takedown; |
221 | engine->instmem.suspend = nv04_instmem_suspend; | 221 | engine->instmem.suspend = nv04_instmem_suspend; |
222 | engine->instmem.resume = nv04_instmem_resume; | 222 | engine->instmem.resume = nv04_instmem_resume; |
223 | engine->instmem.populate = nv04_instmem_populate; | 223 | engine->instmem.get = nv04_instmem_get; |
224 | engine->instmem.clear = nv04_instmem_clear; | 224 | engine->instmem.put = nv04_instmem_put; |
225 | engine->instmem.bind = nv04_instmem_bind; | 225 | engine->instmem.map = nv04_instmem_map; |
226 | engine->instmem.unbind = nv04_instmem_unbind; | 226 | engine->instmem.unmap = nv04_instmem_unmap; |
227 | engine->instmem.flush = nv04_instmem_flush; | 227 | engine->instmem.flush = nv04_instmem_flush; |
228 | engine->mc.init = nv04_mc_init; | 228 | engine->mc.init = nv04_mc_init; |
229 | engine->mc.takedown = nv04_mc_takedown; | 229 | engine->mc.takedown = nv04_mc_takedown; |
@@ -280,10 +280,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
280 | engine->instmem.takedown = nv04_instmem_takedown; | 280 | engine->instmem.takedown = nv04_instmem_takedown; |
281 | engine->instmem.suspend = nv04_instmem_suspend; | 281 | engine->instmem.suspend = nv04_instmem_suspend; |
282 | engine->instmem.resume = nv04_instmem_resume; | 282 | engine->instmem.resume = nv04_instmem_resume; |
283 | engine->instmem.populate = nv04_instmem_populate; | 283 | engine->instmem.get = nv04_instmem_get; |
284 | engine->instmem.clear = nv04_instmem_clear; | 284 | engine->instmem.put = nv04_instmem_put; |
285 | engine->instmem.bind = nv04_instmem_bind; | 285 | engine->instmem.map = nv04_instmem_map; |
286 | engine->instmem.unbind = nv04_instmem_unbind; | 286 | engine->instmem.unmap = nv04_instmem_unmap; |
287 | engine->instmem.flush = nv04_instmem_flush; | 287 | engine->instmem.flush = nv04_instmem_flush; |
288 | engine->mc.init = nv40_mc_init; | 288 | engine->mc.init = nv40_mc_init; |
289 | engine->mc.takedown = nv40_mc_takedown; | 289 | engine->mc.takedown = nv40_mc_takedown; |
@@ -343,10 +343,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
343 | engine->instmem.takedown = nv50_instmem_takedown; | 343 | engine->instmem.takedown = nv50_instmem_takedown; |
344 | engine->instmem.suspend = nv50_instmem_suspend; | 344 | engine->instmem.suspend = nv50_instmem_suspend; |
345 | engine->instmem.resume = nv50_instmem_resume; | 345 | engine->instmem.resume = nv50_instmem_resume; |
346 | engine->instmem.populate = nv50_instmem_populate; | 346 | engine->instmem.get = nv50_instmem_get; |
347 | engine->instmem.clear = nv50_instmem_clear; | 347 | engine->instmem.put = nv50_instmem_put; |
348 | engine->instmem.bind = nv50_instmem_bind; | 348 | engine->instmem.map = nv50_instmem_map; |
349 | engine->instmem.unbind = nv50_instmem_unbind; | 349 | engine->instmem.unmap = nv50_instmem_unmap; |
350 | if (dev_priv->chipset == 0x50) | 350 | if (dev_priv->chipset == 0x50) |
351 | engine->instmem.flush = nv50_instmem_flush; | 351 | engine->instmem.flush = nv50_instmem_flush; |
352 | else | 352 | else |
@@ -449,10 +449,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
449 | engine->instmem.takedown = nvc0_instmem_takedown; | 449 | engine->instmem.takedown = nvc0_instmem_takedown; |
450 | engine->instmem.suspend = nvc0_instmem_suspend; | 450 | engine->instmem.suspend = nvc0_instmem_suspend; |
451 | engine->instmem.resume = nvc0_instmem_resume; | 451 | engine->instmem.resume = nvc0_instmem_resume; |
452 | engine->instmem.populate = nvc0_instmem_populate; | 452 | engine->instmem.get = nvc0_instmem_get; |
453 | engine->instmem.clear = nvc0_instmem_clear; | 453 | engine->instmem.put = nvc0_instmem_put; |
454 | engine->instmem.bind = nvc0_instmem_bind; | 454 | engine->instmem.map = nvc0_instmem_map; |
455 | engine->instmem.unbind = nvc0_instmem_unbind; | 455 | engine->instmem.unmap = nvc0_instmem_unmap; |
456 | engine->instmem.flush = nvc0_instmem_flush; | 456 | engine->instmem.flush = nvc0_instmem_flush; |
457 | engine->mc.init = nv50_mc_init; | 457 | engine->mc.init = nv50_mc_init; |
458 | engine->mc.takedown = nv50_mc_takedown; | 458 | engine->mc.takedown = nv50_mc_takedown; |
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c index 554e55d0ec4..b8e3edb5c06 100644 --- a/drivers/gpu/drm/nouveau/nv04_instmem.c +++ b/drivers/gpu/drm/nouveau/nv04_instmem.c | |||
@@ -98,42 +98,66 @@ nv04_instmem_takedown(struct drm_device *dev) | |||
98 | } | 98 | } |
99 | 99 | ||
100 | int | 100 | int |
101 | nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, | 101 | nv04_instmem_suspend(struct drm_device *dev) |
102 | u32 *size, u32 align) | ||
103 | { | 102 | { |
104 | return 0; | 103 | return 0; |
105 | } | 104 | } |
106 | 105 | ||
107 | void | 106 | void |
108 | nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | 107 | nv04_instmem_resume(struct drm_device *dev) |
109 | { | ||
110 | } | ||
111 | |||
112 | int | ||
113 | nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | ||
114 | { | 108 | { |
115 | return 0; | ||
116 | } | 109 | } |
117 | 110 | ||
118 | int | 111 | int |
119 | nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | 112 | nv04_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) |
120 | { | 113 | { |
114 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; | ||
115 | struct drm_mm_node *ramin = NULL; | ||
116 | |||
117 | do { | ||
118 | if (drm_mm_pre_get(&dev_priv->ramin_heap)) | ||
119 | return -ENOMEM; | ||
120 | |||
121 | spin_lock(&dev_priv->ramin_lock); | ||
122 | ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0); | ||
123 | if (ramin == NULL) { | ||
124 | spin_unlock(&dev_priv->ramin_lock); | ||
125 | return -ENOMEM; | ||
126 | } | ||
127 | |||
128 | ramin = drm_mm_get_block_atomic(ramin, size, align); | ||
129 | spin_unlock(&dev_priv->ramin_lock); | ||
130 | } while (ramin == NULL); | ||
131 | |||
132 | gpuobj->node = ramin; | ||
133 | gpuobj->vinst = ramin->start; | ||
121 | return 0; | 134 | return 0; |
122 | } | 135 | } |
123 | 136 | ||
124 | void | 137 | void |
125 | nv04_instmem_flush(struct drm_device *dev) | 138 | nv04_instmem_put(struct nouveau_gpuobj *gpuobj) |
126 | { | 139 | { |
140 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; | ||
141 | |||
142 | spin_lock(&dev_priv->ramin_lock); | ||
143 | drm_mm_put_block(gpuobj->node); | ||
144 | gpuobj->node = NULL; | ||
145 | spin_unlock(&dev_priv->ramin_lock); | ||
127 | } | 146 | } |
128 | 147 | ||
129 | int | 148 | int |
130 | nv04_instmem_suspend(struct drm_device *dev) | 149 | nv04_instmem_map(struct nouveau_gpuobj *gpuobj) |
131 | { | 150 | { |
151 | gpuobj->pinst = gpuobj->vinst; | ||
132 | return 0; | 152 | return 0; |
133 | } | 153 | } |
134 | 154 | ||
135 | void | 155 | void |
136 | nv04_instmem_resume(struct drm_device *dev) | 156 | nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj) |
137 | { | 157 | { |
138 | } | 158 | } |
139 | 159 | ||
160 | void | ||
161 | nv04_instmem_flush(struct drm_device *dev) | ||
162 | { | ||
163 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 1640c12d8b3..87160952a30 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
@@ -157,10 +157,7 @@ nv50_instmem_init(struct drm_device *dev) | |||
157 | nv_wo32(priv->pramin_bar, 0x10, 0x00000000); | 157 | nv_wo32(priv->pramin_bar, 0x10, 0x00000000); |
158 | nv_wo32(priv->pramin_bar, 0x14, 0x00000000); | 158 | nv_wo32(priv->pramin_bar, 0x14, 0x00000000); |
159 | 159 | ||
160 | /* map channel into PRAMIN, gpuobj didn't do it for us */ | 160 | nv50_instmem_map(chan->ramin); |
161 | ret = nv50_instmem_bind(dev, chan->ramin); | ||
162 | if (ret) | ||
163 | return ret; | ||
164 | 161 | ||
165 | /* poke regs... */ | 162 | /* poke regs... */ |
166 | nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12)); | 163 | nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12)); |
@@ -305,72 +302,91 @@ nv50_instmem_resume(struct drm_device *dev) | |||
305 | dev_priv->ramin_available = true; | 302 | dev_priv->ramin_available = true; |
306 | } | 303 | } |
307 | 304 | ||
305 | struct nv50_gpuobj_node { | ||
306 | struct nouveau_bo *vram; | ||
307 | struct drm_mm_node *ramin; | ||
308 | u32 align; | ||
309 | }; | ||
310 | |||
311 | |||
308 | int | 312 | int |
309 | nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, | 313 | nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) |
310 | u32 *size, u32 align) | ||
311 | { | 314 | { |
315 | struct drm_device *dev = gpuobj->dev; | ||
316 | struct nv50_gpuobj_node *node = NULL; | ||
312 | int ret; | 317 | int ret; |
313 | 318 | ||
314 | if (gpuobj->im_backing) | 319 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
315 | return -EINVAL; | 320 | if (!node) |
316 | 321 | return -ENOMEM; | |
317 | *size = ALIGN(*size, 4096); | 322 | node->align = align; |
318 | if (*size == 0) | ||
319 | return -EINVAL; | ||
320 | 323 | ||
321 | ret = nouveau_bo_new(dev, NULL, *size, align, TTM_PL_FLAG_VRAM, | 324 | ret = nouveau_bo_new(dev, NULL, size, align, TTM_PL_FLAG_VRAM, |
322 | 0, 0x0000, true, false, &gpuobj->im_backing); | 325 | 0, 0x0000, true, false, &node->vram); |
323 | if (ret) { | 326 | if (ret) { |
324 | NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret); | 327 | NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret); |
325 | return ret; | 328 | return ret; |
326 | } | 329 | } |
327 | 330 | ||
328 | ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM); | 331 | ret = nouveau_bo_pin(node->vram, TTM_PL_FLAG_VRAM); |
329 | if (ret) { | 332 | if (ret) { |
330 | NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret); | 333 | NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret); |
331 | nouveau_bo_ref(NULL, &gpuobj->im_backing); | 334 | nouveau_bo_ref(NULL, &node->vram); |
332 | return ret; | 335 | return ret; |
333 | } | 336 | } |
334 | 337 | ||
335 | gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT; | 338 | gpuobj->vinst = node->vram->bo.mem.start << PAGE_SHIFT; |
339 | gpuobj->size = node->vram->bo.mem.num_pages << PAGE_SHIFT; | ||
340 | gpuobj->node = node; | ||
336 | return 0; | 341 | return 0; |
337 | } | 342 | } |
338 | 343 | ||
339 | void | 344 | void |
340 | nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | 345 | nv50_instmem_put(struct nouveau_gpuobj *gpuobj) |
341 | { | 346 | { |
342 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 347 | struct nv50_gpuobj_node *node; |
343 | 348 | ||
344 | if (gpuobj && gpuobj->im_backing) { | 349 | node = gpuobj->node; |
345 | if (gpuobj->im_bound) | 350 | gpuobj->node = NULL; |
346 | dev_priv->engine.instmem.unbind(dev, gpuobj); | 351 | |
347 | nouveau_bo_unpin(gpuobj->im_backing); | 352 | nouveau_bo_unpin(node->vram); |
348 | nouveau_bo_ref(NULL, &gpuobj->im_backing); | 353 | nouveau_bo_ref(NULL, &node->vram); |
349 | gpuobj->im_backing = NULL; | 354 | kfree(node); |
350 | } | ||
351 | } | 355 | } |
352 | 356 | ||
353 | int | 357 | int |
354 | nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | 358 | nv50_instmem_map(struct nouveau_gpuobj *gpuobj) |
355 | { | 359 | { |
356 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 360 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; |
357 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; | 361 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; |
358 | struct nouveau_gpuobj *pramin_pt = priv->pramin_pt; | 362 | struct nv50_gpuobj_node *node = gpuobj->node; |
359 | uint32_t pte, pte_end; | 363 | struct drm_device *dev = gpuobj->dev; |
360 | uint64_t vram; | 364 | struct drm_mm_node *ramin = NULL; |
361 | 365 | u32 pte, pte_end; | |
362 | if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) | 366 | u64 vram; |
363 | return -EINVAL; | 367 | |
368 | do { | ||
369 | if (drm_mm_pre_get(&dev_priv->ramin_heap)) | ||
370 | return -ENOMEM; | ||
371 | |||
372 | spin_lock(&dev_priv->ramin_lock); | ||
373 | ramin = drm_mm_search_free(&dev_priv->ramin_heap, gpuobj->size, | ||
374 | node->align, 0); | ||
375 | if (ramin == NULL) { | ||
376 | spin_unlock(&dev_priv->ramin_lock); | ||
377 | return -ENOMEM; | ||
378 | } | ||
364 | 379 | ||
365 | NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n", | 380 | ramin = drm_mm_get_block_atomic(ramin, gpuobj->size, node->align); |
366 | gpuobj->im_pramin->start, gpuobj->im_pramin->size); | 381 | spin_unlock(&dev_priv->ramin_lock); |
382 | } while (ramin == NULL); | ||
367 | 383 | ||
368 | pte = (gpuobj->im_pramin->start >> 12) << 1; | 384 | pte = (ramin->start >> 12) << 1; |
369 | pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; | 385 | pte_end = ((ramin->size >> 12) << 1) + pte; |
370 | vram = gpuobj->vinst; | 386 | vram = gpuobj->vinst; |
371 | 387 | ||
372 | NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", | 388 | NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", |
373 | gpuobj->im_pramin->start, pte, pte_end); | 389 | ramin->start, pte, pte_end); |
374 | NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst); | 390 | NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst); |
375 | 391 | ||
376 | vram |= 1; | 392 | vram |= 1; |
@@ -380,8 +396,8 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | |||
380 | } | 396 | } |
381 | 397 | ||
382 | while (pte < pte_end) { | 398 | while (pte < pte_end) { |
383 | nv_wo32(pramin_pt, (pte * 4) + 0, lower_32_bits(vram)); | 399 | nv_wo32(priv->pramin_pt, (pte * 4) + 0, lower_32_bits(vram)); |
384 | nv_wo32(pramin_pt, (pte * 4) + 4, upper_32_bits(vram)); | 400 | nv_wo32(priv->pramin_pt, (pte * 4) + 4, upper_32_bits(vram)); |
385 | vram += 0x1000; | 401 | vram += 0x1000; |
386 | pte += 2; | 402 | pte += 2; |
387 | } | 403 | } |
@@ -389,36 +405,36 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | |||
389 | 405 | ||
390 | nv50_vm_flush(dev, 6); | 406 | nv50_vm_flush(dev, 6); |
391 | 407 | ||
392 | gpuobj->im_bound = 1; | 408 | node->ramin = ramin; |
409 | gpuobj->pinst = ramin->start; | ||
393 | return 0; | 410 | return 0; |
394 | } | 411 | } |
395 | 412 | ||
396 | int | 413 | void |
397 | nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | 414 | nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj) |
398 | { | 415 | { |
399 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 416 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; |
400 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; | 417 | struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; |
401 | uint32_t pte, pte_end; | 418 | struct nv50_gpuobj_node *node = gpuobj->node; |
402 | 419 | u32 pte, pte_end; | |
403 | if (gpuobj->im_bound == 0) | ||
404 | return -EINVAL; | ||
405 | 420 | ||
406 | /* can happen during late takedown */ | 421 | if (!node->ramin || !dev_priv->ramin_available) |
407 | if (unlikely(!dev_priv->ramin_available)) | 422 | return; |
408 | return 0; | ||
409 | 423 | ||
410 | pte = (gpuobj->im_pramin->start >> 12) << 1; | 424 | pte = (node->ramin->start >> 12) << 1; |
411 | pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; | 425 | pte_end = ((node->ramin->size >> 12) << 1) + pte; |
412 | 426 | ||
413 | while (pte < pte_end) { | 427 | while (pte < pte_end) { |
414 | nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000); | 428 | nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000); |
415 | nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000); | 429 | nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000); |
416 | pte += 2; | 430 | pte += 2; |
417 | } | 431 | } |
418 | dev_priv->engine.instmem.flush(dev); | 432 | dev_priv->engine.instmem.flush(gpuobj->dev); |
419 | 433 | ||
420 | gpuobj->im_bound = 0; | 434 | spin_lock(&dev_priv->ramin_lock); |
421 | return 0; | 435 | drm_mm_put_block(node->ramin); |
436 | node->ramin = NULL; | ||
437 | spin_unlock(&dev_priv->ramin_lock); | ||
422 | } | 438 | } |
423 | 439 | ||
424 | void | 440 | void |
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c index 7b4e71f5c27..39232085193 100644 --- a/drivers/gpu/drm/nouveau/nvc0_instmem.c +++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c | |||
@@ -26,67 +26,89 @@ | |||
26 | 26 | ||
27 | #include "nouveau_drv.h" | 27 | #include "nouveau_drv.h" |
28 | 28 | ||
29 | struct nvc0_gpuobj_node { | ||
30 | struct nouveau_bo *vram; | ||
31 | struct drm_mm_node *ramin; | ||
32 | u32 align; | ||
33 | }; | ||
34 | |||
29 | int | 35 | int |
30 | nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, | 36 | nvc0_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) |
31 | u32 *size, u32 align) | ||
32 | { | 37 | { |
38 | struct drm_device *dev = gpuobj->dev; | ||
39 | struct nvc0_gpuobj_node *node = NULL; | ||
33 | int ret; | 40 | int ret; |
34 | 41 | ||
35 | *size = ALIGN(*size, 4096); | 42 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
36 | if (*size == 0) | 43 | if (!node) |
37 | return -EINVAL; | 44 | return -ENOMEM; |
45 | node->align = align; | ||
38 | 46 | ||
39 | ret = nouveau_bo_new(dev, NULL, *size, align, TTM_PL_FLAG_VRAM, | 47 | ret = nouveau_bo_new(dev, NULL, size, align, TTM_PL_FLAG_VRAM, |
40 | 0, 0x0000, true, false, &gpuobj->im_backing); | 48 | 0, 0x0000, true, false, &node->vram); |
41 | if (ret) { | 49 | if (ret) { |
42 | NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret); | 50 | NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret); |
43 | return ret; | 51 | return ret; |
44 | } | 52 | } |
45 | 53 | ||
46 | ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM); | 54 | ret = nouveau_bo_pin(node->vram, TTM_PL_FLAG_VRAM); |
47 | if (ret) { | 55 | if (ret) { |
48 | NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret); | 56 | NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret); |
49 | nouveau_bo_ref(NULL, &gpuobj->im_backing); | 57 | nouveau_bo_ref(NULL, &node->vram); |
50 | return ret; | 58 | return ret; |
51 | } | 59 | } |
52 | 60 | ||
53 | gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT; | 61 | gpuobj->vinst = node->vram->bo.mem.start << PAGE_SHIFT; |
62 | gpuobj->size = node->vram->bo.mem.num_pages << PAGE_SHIFT; | ||
63 | gpuobj->node = node; | ||
54 | return 0; | 64 | return 0; |
55 | } | 65 | } |
56 | 66 | ||
57 | void | 67 | void |
58 | nvc0_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | 68 | nvc0_instmem_put(struct nouveau_gpuobj *gpuobj) |
59 | { | 69 | { |
60 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 70 | struct nvc0_gpuobj_node *node; |
61 | 71 | ||
62 | if (gpuobj && gpuobj->im_backing) { | 72 | node = gpuobj->node; |
63 | if (gpuobj->im_bound) | 73 | gpuobj->node = NULL; |
64 | dev_priv->engine.instmem.unbind(dev, gpuobj); | 74 | |
65 | nouveau_bo_unpin(gpuobj->im_backing); | 75 | nouveau_bo_unpin(node->vram); |
66 | nouveau_bo_ref(NULL, &gpuobj->im_backing); | 76 | nouveau_bo_ref(NULL, &node->vram); |
67 | gpuobj->im_backing = NULL; | 77 | kfree(node); |
68 | } | ||
69 | } | 78 | } |
70 | 79 | ||
71 | int | 80 | int |
72 | nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | 81 | nvc0_instmem_map(struct nouveau_gpuobj *gpuobj) |
73 | { | 82 | { |
74 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 83 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; |
75 | uint32_t pte, pte_end; | 84 | struct nvc0_gpuobj_node *node = gpuobj->node; |
76 | uint64_t vram; | 85 | struct drm_device *dev = gpuobj->dev; |
77 | 86 | struct drm_mm_node *ramin = NULL; | |
78 | if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) | 87 | u32 pte, pte_end; |
79 | return -EINVAL; | 88 | u64 vram; |
80 | 89 | ||
81 | NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n", | 90 | do { |
82 | gpuobj->im_pramin->start, gpuobj->im_pramin->size); | 91 | if (drm_mm_pre_get(&dev_priv->ramin_heap)) |
83 | 92 | return -ENOMEM; | |
84 | pte = gpuobj->im_pramin->start >> 12; | 93 | |
85 | pte_end = (gpuobj->im_pramin->size >> 12) + pte; | 94 | spin_lock(&dev_priv->ramin_lock); |
95 | ramin = drm_mm_search_free(&dev_priv->ramin_heap, gpuobj->size, | ||
96 | node->align, 0); | ||
97 | if (ramin == NULL) { | ||
98 | spin_unlock(&dev_priv->ramin_lock); | ||
99 | return -ENOMEM; | ||
100 | } | ||
101 | |||
102 | ramin = drm_mm_get_block_atomic(ramin, gpuobj->size, node->align); | ||
103 | spin_unlock(&dev_priv->ramin_lock); | ||
104 | } while (ramin == NULL); | ||
105 | |||
106 | pte = (ramin->start >> 12) << 1; | ||
107 | pte_end = ((ramin->size >> 12) << 1) + pte; | ||
86 | vram = gpuobj->vinst; | 108 | vram = gpuobj->vinst; |
87 | 109 | ||
88 | NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", | 110 | NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", |
89 | gpuobj->im_pramin->start, pte, pte_end); | 111 | ramin->start, pte, pte_end); |
90 | NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst); | 112 | NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst); |
91 | 113 | ||
92 | while (pte < pte_end) { | 114 | while (pte < pte_end) { |
@@ -103,30 +125,35 @@ nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | |||
103 | nv_wr32(dev, 0x100cbc, 0x80000005); | 125 | nv_wr32(dev, 0x100cbc, 0x80000005); |
104 | } | 126 | } |
105 | 127 | ||
106 | gpuobj->im_bound = 1; | 128 | node->ramin = ramin; |
129 | gpuobj->pinst = ramin->start; | ||
107 | return 0; | 130 | return 0; |
108 | } | 131 | } |
109 | 132 | ||
110 | int | 133 | void |
111 | nvc0_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) | 134 | nvc0_instmem_unmap(struct nouveau_gpuobj *gpuobj) |
112 | { | 135 | { |
113 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 136 | struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; |
114 | uint32_t pte, pte_end; | 137 | struct nvc0_gpuobj_node *node = gpuobj->node; |
138 | u32 pte, pte_end; | ||
115 | 139 | ||
116 | if (gpuobj->im_bound == 0) | 140 | if (!node->ramin || !dev_priv->ramin_available) |
117 | return -EINVAL; | 141 | return; |
142 | |||
143 | pte = (node->ramin->start >> 12) << 1; | ||
144 | pte_end = ((node->ramin->size >> 12) << 1) + pte; | ||
118 | 145 | ||
119 | pte = gpuobj->im_pramin->start >> 12; | ||
120 | pte_end = (gpuobj->im_pramin->size >> 12) + pte; | ||
121 | while (pte < pte_end) { | 146 | while (pte < pte_end) { |
122 | nv_wr32(dev, 0x702000 + (pte * 8), 0); | 147 | nv_wr32(gpuobj->dev, 0x702000 + (pte * 8), 0); |
123 | nv_wr32(dev, 0x702004 + (pte * 8), 0); | 148 | nv_wr32(gpuobj->dev, 0x702004 + (pte * 8), 0); |
124 | pte++; | 149 | pte++; |
125 | } | 150 | } |
126 | dev_priv->engine.instmem.flush(dev); | 151 | dev_priv->engine.instmem.flush(gpuobj->dev); |
127 | 152 | ||
128 | gpuobj->im_bound = 0; | 153 | spin_lock(&dev_priv->ramin_lock); |
129 | return 0; | 154 | drm_mm_put_block(node->ramin); |
155 | node->ramin = NULL; | ||
156 | spin_unlock(&dev_priv->ramin_lock); | ||
130 | } | 157 | } |
131 | 158 | ||
132 | void | 159 | void |