diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2010-10-31 21:45:02 -0400 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2010-12-03 00:11:48 -0500 |
commit | e41115d0ad5c40a7ea4d85b1c77b4c02185a5581 (patch) | |
tree | ee8c82c17857fdd7989edb89735009b85c3a045c /drivers/gpu/drm/nouveau/nouveau_object.c | |
parent | dc1e5c0dbff27c2b5147eaea16c578d2337870c3 (diff) |
drm/nouveau: rework gpu-specific instmem interfaces
Reviewed-by: Francisco Jerez <currojerez@riseup.net>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_object.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_object.c | 112 |
1 files changed, 37 insertions, 75 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 8c5e35cc04df..e8c74de905ec 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c | |||
@@ -168,17 +168,14 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
168 | struct nouveau_gpuobj **gpuobj_ret) | 168 | struct nouveau_gpuobj **gpuobj_ret) |
169 | { | 169 | { |
170 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 170 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
171 | struct nouveau_engine *engine = &dev_priv->engine; | 171 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; |
172 | struct nouveau_gpuobj *gpuobj; | 172 | struct nouveau_gpuobj *gpuobj; |
173 | struct drm_mm_node *ramin = NULL; | 173 | struct drm_mm_node *ramin = NULL; |
174 | int ret; | 174 | int ret, i; |
175 | 175 | ||
176 | NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", | 176 | NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", |
177 | chan ? chan->id : -1, size, align, flags); | 177 | chan ? chan->id : -1, size, align, flags); |
178 | 178 | ||
179 | if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL) | ||
180 | return -EINVAL; | ||
181 | |||
182 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); | 179 | gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); |
183 | if (!gpuobj) | 180 | if (!gpuobj) |
184 | return -ENOMEM; | 181 | return -ENOMEM; |
@@ -193,88 +190,45 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
193 | spin_unlock(&dev_priv->ramin_lock); | 190 | spin_unlock(&dev_priv->ramin_lock); |
194 | 191 | ||
195 | if (chan) { | 192 | if (chan) { |
196 | NV_DEBUG(dev, "channel heap\n"); | ||
197 | |||
198 | ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); | 193 | ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); |
199 | if (ramin) | 194 | if (ramin) |
200 | ramin = drm_mm_get_block(ramin, size, align); | 195 | ramin = drm_mm_get_block(ramin, size, align); |
201 | |||
202 | if (!ramin) { | 196 | if (!ramin) { |
203 | nouveau_gpuobj_ref(NULL, &gpuobj); | 197 | nouveau_gpuobj_ref(NULL, &gpuobj); |
204 | return -ENOMEM; | 198 | return -ENOMEM; |
205 | } | 199 | } |
206 | } else { | ||
207 | NV_DEBUG(dev, "global heap\n"); | ||
208 | |||
209 | /* allocate backing pages, sets vinst */ | ||
210 | ret = engine->instmem.populate(dev, gpuobj, &size, align); | ||
211 | if (ret) { | ||
212 | nouveau_gpuobj_ref(NULL, &gpuobj); | ||
213 | return ret; | ||
214 | } | ||
215 | 200 | ||
216 | /* try and get aperture space */ | 201 | gpuobj->pinst = chan->ramin->pinst; |
217 | do { | 202 | if (gpuobj->pinst != ~0) |
218 | if (drm_mm_pre_get(&dev_priv->ramin_heap)) | 203 | gpuobj->pinst += ramin->start; |
219 | return -ENOMEM; | ||
220 | |||
221 | spin_lock(&dev_priv->ramin_lock); | ||
222 | ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, | ||
223 | align, 0); | ||
224 | if (ramin == NULL) { | ||
225 | spin_unlock(&dev_priv->ramin_lock); | ||
226 | nouveau_gpuobj_ref(NULL, &gpuobj); | ||
227 | return -ENOMEM; | ||
228 | } | ||
229 | |||
230 | ramin = drm_mm_get_block_atomic(ramin, size, align); | ||
231 | spin_unlock(&dev_priv->ramin_lock); | ||
232 | } while (ramin == NULL); | ||
233 | 204 | ||
234 | /* on nv50 it's ok to fail, we have a fallback path */ | 205 | if (dev_priv->card_type < NV_50) |
235 | if (!ramin && dev_priv->card_type < NV_50) { | 206 | gpuobj->cinst = gpuobj->pinst; |
236 | nouveau_gpuobj_ref(NULL, &gpuobj); | 207 | else |
237 | return -ENOMEM; | 208 | gpuobj->cinst = ramin->start; |
238 | } | ||
239 | } | ||
240 | 209 | ||
241 | /* if we got a chunk of the aperture, map pages into it */ | 210 | gpuobj->vinst = ramin->start + chan->ramin->vinst; |
242 | gpuobj->im_pramin = ramin; | 211 | gpuobj->node = ramin; |
243 | if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) { | 212 | } else { |
244 | ret = engine->instmem.bind(dev, gpuobj); | 213 | ret = instmem->get(gpuobj, size, align); |
245 | if (ret) { | 214 | if (ret) { |
246 | nouveau_gpuobj_ref(NULL, &gpuobj); | 215 | nouveau_gpuobj_ref(NULL, &gpuobj); |
247 | return ret; | 216 | return ret; |
248 | } | 217 | } |
249 | } | ||
250 | |||
251 | /* calculate the various different addresses for the object */ | ||
252 | if (chan) { | ||
253 | gpuobj->pinst = chan->ramin->pinst; | ||
254 | if (gpuobj->pinst != ~0) | ||
255 | gpuobj->pinst += gpuobj->im_pramin->start; | ||
256 | 218 | ||
257 | if (dev_priv->card_type < NV_50) { | 219 | ret = -ENOSYS; |
258 | gpuobj->cinst = gpuobj->pinst; | 220 | if (dev_priv->ramin_available) |
259 | } else { | 221 | ret = instmem->map(gpuobj); |
260 | gpuobj->cinst = gpuobj->im_pramin->start; | 222 | if (ret) |
261 | gpuobj->vinst = gpuobj->im_pramin->start + | ||
262 | chan->ramin->vinst; | ||
263 | } | ||
264 | } else { | ||
265 | if (gpuobj->im_pramin) | ||
266 | gpuobj->pinst = gpuobj->im_pramin->start; | ||
267 | else | ||
268 | gpuobj->pinst = ~0; | 223 | gpuobj->pinst = ~0; |
269 | gpuobj->cinst = 0xdeadbeef; | 224 | |
225 | gpuobj->cinst = NVOBJ_CINST_GLOBAL; | ||
270 | } | 226 | } |
271 | 227 | ||
272 | if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { | 228 | if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { |
273 | int i; | ||
274 | |||
275 | for (i = 0; i < gpuobj->size; i += 4) | 229 | for (i = 0; i < gpuobj->size; i += 4) |
276 | nv_wo32(gpuobj, i, 0); | 230 | nv_wo32(gpuobj, i, 0); |
277 | engine->instmem.flush(dev); | 231 | instmem->flush(dev); |
278 | } | 232 | } |
279 | 233 | ||
280 | 234 | ||
@@ -326,26 +280,34 @@ nouveau_gpuobj_del(struct kref *ref) | |||
326 | container_of(ref, struct nouveau_gpuobj, refcount); | 280 | container_of(ref, struct nouveau_gpuobj, refcount); |
327 | struct drm_device *dev = gpuobj->dev; | 281 | struct drm_device *dev = gpuobj->dev; |
328 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 282 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
329 | struct nouveau_engine *engine = &dev_priv->engine; | 283 | struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; |
330 | int i; | 284 | int i; |
331 | 285 | ||
332 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); | 286 | NV_DEBUG(dev, "gpuobj %p\n", gpuobj); |
333 | 287 | ||
334 | if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { | 288 | if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { |
335 | for (i = 0; i < gpuobj->size; i += 4) | 289 | for (i = 0; i < gpuobj->size; i += 4) |
336 | nv_wo32(gpuobj, i, 0); | 290 | nv_wo32(gpuobj, i, 0); |
337 | engine->instmem.flush(dev); | 291 | instmem->flush(dev); |
338 | } | 292 | } |
339 | 293 | ||
340 | if (gpuobj->dtor) | 294 | if (gpuobj->dtor) |
341 | gpuobj->dtor(dev, gpuobj); | 295 | gpuobj->dtor(dev, gpuobj); |
342 | 296 | ||
343 | if (gpuobj->im_backing) | 297 | if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) { |
344 | engine->instmem.clear(dev, gpuobj); | 298 | if (gpuobj->node) { |
299 | instmem->unmap(gpuobj); | ||
300 | instmem->put(gpuobj); | ||
301 | } | ||
302 | } else { | ||
303 | if (gpuobj->node) { | ||
304 | spin_lock(&dev_priv->ramin_lock); | ||
305 | drm_mm_put_block(gpuobj->node); | ||
306 | spin_unlock(&dev_priv->ramin_lock); | ||
307 | } | ||
308 | } | ||
345 | 309 | ||
346 | spin_lock(&dev_priv->ramin_lock); | 310 | spin_lock(&dev_priv->ramin_lock); |
347 | if (gpuobj->im_pramin) | ||
348 | drm_mm_put_block(gpuobj->im_pramin); | ||
349 | list_del(&gpuobj->list); | 311 | list_del(&gpuobj->list); |
350 | spin_unlock(&dev_priv->ramin_lock); | 312 | spin_unlock(&dev_priv->ramin_lock); |
351 | 313 | ||
@@ -385,7 +347,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst, | |||
385 | kref_init(&gpuobj->refcount); | 347 | kref_init(&gpuobj->refcount); |
386 | gpuobj->size = size; | 348 | gpuobj->size = size; |
387 | gpuobj->pinst = pinst; | 349 | gpuobj->pinst = pinst; |
388 | gpuobj->cinst = 0xdeadbeef; | 350 | gpuobj->cinst = NVOBJ_CINST_GLOBAL; |
389 | gpuobj->vinst = vinst; | 351 | gpuobj->vinst = vinst; |
390 | 352 | ||
391 | if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { | 353 | if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { |
@@ -935,7 +897,7 @@ nouveau_gpuobj_suspend(struct drm_device *dev) | |||
935 | int i; | 897 | int i; |
936 | 898 | ||
937 | list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { | 899 | list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { |
938 | if (gpuobj->cinst != 0xdeadbeef) | 900 | if (gpuobj->cinst != NVOBJ_CINST_GLOBAL) |
939 | continue; | 901 | continue; |
940 | 902 | ||
941 | gpuobj->suspend = vmalloc(gpuobj->size); | 903 | gpuobj->suspend = vmalloc(gpuobj->size); |