diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-21 14:14:52 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-21 14:14:52 -0400 |
commit | 59534f7298c5e28aaa64e6ed550e247f64ee72ae (patch) | |
tree | b9fef7756abf897d9e1b10950cdf10bf6dfe5cb7 /drivers/gpu/drm/radeon/radeon_ring.c | |
parent | ac3ee84c604502240122c47b52f0542ec8774f15 (diff) | |
parent | b486787ee4797d6e42a9bd3a6f079385ad0f4472 (diff) |
Merge branch 'drm-for-2.6.35' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-for-2.6.35' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (207 commits)
drm/radeon/kms/pm/r600: select the mid clock mode for single head low profile
drm/radeon: fix power supply kconfig interaction.
drm/radeon/kms: record object that have been list reserved
drm/radeon: AGP memory is only I/O if the aperture can be mapped by the CPU.
drm/radeon/kms: don't default display priority to high on rs4xx
drm/edid: fix typo in 1600x1200@75 mode
drm/nouveau: fix i2c-related init table handlers
drm/nouveau: support init table i2c device identifier 0x81
drm/nouveau: ensure we've parsed i2c table entry for INIT_*I2C* handlers
drm/nouveau: display error message for any failed init table opcode
drm/nouveau: fix init table handlers to return proper error codes
drm/nv50: support fractional feedback divider on newer chips
drm/nv50: fix monitor detection on certain chipsets
drm/nv50: store full dcb i2c entry from vbios
drm/nv50: fix suspend/resume with DP outputs
drm/nv50: output calculated crtc pll when debugging on
drm/nouveau: dump pll limits entries when debugging is on
drm/nouveau: bios parser fixes for eDP boards
drm/nouveau: fix a nouveau_bo dereference after it's been destroyed
drm/nv40: remove some completed ctxprog TODOs
...
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_ring.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_ring.c | 66 |
1 files changed, 43 insertions, 23 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index f6e1e8d4d986..261e98a276db 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -219,24 +219,26 @@ int radeon_ib_pool_init(struct radeon_device *rdev) | |||
219 | void radeon_ib_pool_fini(struct radeon_device *rdev) | 219 | void radeon_ib_pool_fini(struct radeon_device *rdev) |
220 | { | 220 | { |
221 | int r; | 221 | int r; |
222 | struct radeon_bo *robj; | ||
222 | 223 | ||
223 | if (!rdev->ib_pool.ready) { | 224 | if (!rdev->ib_pool.ready) { |
224 | return; | 225 | return; |
225 | } | 226 | } |
226 | mutex_lock(&rdev->ib_pool.mutex); | 227 | mutex_lock(&rdev->ib_pool.mutex); |
227 | radeon_ib_bogus_cleanup(rdev); | 228 | radeon_ib_bogus_cleanup(rdev); |
229 | robj = rdev->ib_pool.robj; | ||
230 | rdev->ib_pool.robj = NULL; | ||
231 | mutex_unlock(&rdev->ib_pool.mutex); | ||
228 | 232 | ||
229 | if (rdev->ib_pool.robj) { | 233 | if (robj) { |
230 | r = radeon_bo_reserve(rdev->ib_pool.robj, false); | 234 | r = radeon_bo_reserve(robj, false); |
231 | if (likely(r == 0)) { | 235 | if (likely(r == 0)) { |
232 | radeon_bo_kunmap(rdev->ib_pool.robj); | 236 | radeon_bo_kunmap(robj); |
233 | radeon_bo_unpin(rdev->ib_pool.robj); | 237 | radeon_bo_unpin(robj); |
234 | radeon_bo_unreserve(rdev->ib_pool.robj); | 238 | radeon_bo_unreserve(robj); |
235 | } | 239 | } |
236 | radeon_bo_unref(&rdev->ib_pool.robj); | 240 | radeon_bo_unref(&robj); |
237 | rdev->ib_pool.robj = NULL; | ||
238 | } | 241 | } |
239 | mutex_unlock(&rdev->ib_pool.mutex); | ||
240 | } | 242 | } |
241 | 243 | ||
242 | 244 | ||
@@ -258,31 +260,41 @@ void radeon_ring_free_size(struct radeon_device *rdev) | |||
258 | } | 260 | } |
259 | } | 261 | } |
260 | 262 | ||
261 | int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw) | 263 | int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw) |
262 | { | 264 | { |
263 | int r; | 265 | int r; |
264 | 266 | ||
265 | /* Align requested size with padding so unlock_commit can | 267 | /* Align requested size with padding so unlock_commit can |
266 | * pad safely */ | 268 | * pad safely */ |
267 | ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask; | 269 | ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask; |
268 | mutex_lock(&rdev->cp.mutex); | ||
269 | while (ndw > (rdev->cp.ring_free_dw - 1)) { | 270 | while (ndw > (rdev->cp.ring_free_dw - 1)) { |
270 | radeon_ring_free_size(rdev); | 271 | radeon_ring_free_size(rdev); |
271 | if (ndw < rdev->cp.ring_free_dw) { | 272 | if (ndw < rdev->cp.ring_free_dw) { |
272 | break; | 273 | break; |
273 | } | 274 | } |
274 | r = radeon_fence_wait_next(rdev); | 275 | r = radeon_fence_wait_next(rdev); |
275 | if (r) { | 276 | if (r) |
276 | mutex_unlock(&rdev->cp.mutex); | ||
277 | return r; | 277 | return r; |
278 | } | ||
279 | } | 278 | } |
280 | rdev->cp.count_dw = ndw; | 279 | rdev->cp.count_dw = ndw; |
281 | rdev->cp.wptr_old = rdev->cp.wptr; | 280 | rdev->cp.wptr_old = rdev->cp.wptr; |
282 | return 0; | 281 | return 0; |
283 | } | 282 | } |
284 | 283 | ||
285 | void radeon_ring_unlock_commit(struct radeon_device *rdev) | 284 | int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw) |
285 | { | ||
286 | int r; | ||
287 | |||
288 | mutex_lock(&rdev->cp.mutex); | ||
289 | r = radeon_ring_alloc(rdev, ndw); | ||
290 | if (r) { | ||
291 | mutex_unlock(&rdev->cp.mutex); | ||
292 | return r; | ||
293 | } | ||
294 | return 0; | ||
295 | } | ||
296 | |||
297 | void radeon_ring_commit(struct radeon_device *rdev) | ||
286 | { | 298 | { |
287 | unsigned count_dw_pad; | 299 | unsigned count_dw_pad; |
288 | unsigned i; | 300 | unsigned i; |
@@ -295,6 +307,11 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev) | |||
295 | } | 307 | } |
296 | DRM_MEMORYBARRIER(); | 308 | DRM_MEMORYBARRIER(); |
297 | radeon_cp_commit(rdev); | 309 | radeon_cp_commit(rdev); |
310 | } | ||
311 | |||
312 | void radeon_ring_unlock_commit(struct radeon_device *rdev) | ||
313 | { | ||
314 | radeon_ring_commit(rdev); | ||
298 | mutex_unlock(&rdev->cp.mutex); | 315 | mutex_unlock(&rdev->cp.mutex); |
299 | } | 316 | } |
300 | 317 | ||
@@ -344,20 +361,23 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) | |||
344 | void radeon_ring_fini(struct radeon_device *rdev) | 361 | void radeon_ring_fini(struct radeon_device *rdev) |
345 | { | 362 | { |
346 | int r; | 363 | int r; |
364 | struct radeon_bo *ring_obj; | ||
347 | 365 | ||
348 | mutex_lock(&rdev->cp.mutex); | 366 | mutex_lock(&rdev->cp.mutex); |
349 | if (rdev->cp.ring_obj) { | 367 | ring_obj = rdev->cp.ring_obj; |
350 | r = radeon_bo_reserve(rdev->cp.ring_obj, false); | 368 | rdev->cp.ring = NULL; |
369 | rdev->cp.ring_obj = NULL; | ||
370 | mutex_unlock(&rdev->cp.mutex); | ||
371 | |||
372 | if (ring_obj) { | ||
373 | r = radeon_bo_reserve(ring_obj, false); | ||
351 | if (likely(r == 0)) { | 374 | if (likely(r == 0)) { |
352 | radeon_bo_kunmap(rdev->cp.ring_obj); | 375 | radeon_bo_kunmap(ring_obj); |
353 | radeon_bo_unpin(rdev->cp.ring_obj); | 376 | radeon_bo_unpin(ring_obj); |
354 | radeon_bo_unreserve(rdev->cp.ring_obj); | 377 | radeon_bo_unreserve(ring_obj); |
355 | } | 378 | } |
356 | radeon_bo_unref(&rdev->cp.ring_obj); | 379 | radeon_bo_unref(&ring_obj); |
357 | rdev->cp.ring = NULL; | ||
358 | rdev->cp.ring_obj = NULL; | ||
359 | } | 380 | } |
360 | mutex_unlock(&rdev->cp.mutex); | ||
361 | } | 381 | } |
362 | 382 | ||
363 | 383 | ||