diff options
author | Dave Airlie <airlied@redhat.com> | 2010-04-20 00:15:09 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-04-20 00:15:09 -0400 |
commit | 7547a917fa5f3b2406f52c7dcf7ec9ad3c8532eb (patch) | |
tree | 59b0d0e9b6c251c4df5799b93395454592004d57 /drivers/gpu/drm/radeon/radeon_ttm.c | |
parent | a8089e849a32c5b6bfd6c88dbd09c0ea4a779b71 (diff) | |
parent | 6b8b1786a8c29ce6e32298b93ac8d4a18a2b11c4 (diff) |
Merge branch 'drm-ttm-unmappable' into drm-core-next
* drm-ttm-unmappable:
drm/radeon/kms: enable use of unmappable VRAM V2
drm/ttm: remove io_ field from TTM V6
drm/vmwgfx: add support for new TTM fault callback V5
drm/nouveau/kms: add support for new TTM fault callback V5
drm/radeon/kms: add support for new fault callback V7
drm/ttm: ttm_fault callback to allow driver to handle bo placement V6
drm/ttm: split no_wait argument in 2 GPU or reserve wait
Conflicts:
drivers/gpu/drm/nouveau/nouveau_bo.c
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_ttm.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_ttm.c | 103 |
1 files changed, 69 insertions, 34 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index f06533676e7..af98f45954b 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -163,34 +163,21 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
163 | (unsigned)type); | 163 | (unsigned)type); |
164 | return -EINVAL; | 164 | return -EINVAL; |
165 | } | 165 | } |
166 | man->io_offset = rdev->mc.agp_base; | ||
167 | man->io_size = rdev->mc.gtt_size; | ||
168 | man->io_addr = NULL; | ||
169 | if (!rdev->ddev->agp->cant_use_aperture) | 166 | if (!rdev->ddev->agp->cant_use_aperture) |
170 | man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | | 167 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
171 | TTM_MEMTYPE_FLAG_MAPPABLE; | ||
172 | man->available_caching = TTM_PL_FLAG_UNCACHED | | 168 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
173 | TTM_PL_FLAG_WC; | 169 | TTM_PL_FLAG_WC; |
174 | man->default_caching = TTM_PL_FLAG_WC; | 170 | man->default_caching = TTM_PL_FLAG_WC; |
175 | } else | ||
176 | #endif | ||
177 | { | ||
178 | man->io_offset = 0; | ||
179 | man->io_size = 0; | ||
180 | man->io_addr = NULL; | ||
181 | } | 171 | } |
172 | #endif | ||
182 | break; | 173 | break; |
183 | case TTM_PL_VRAM: | 174 | case TTM_PL_VRAM: |
184 | /* "On-card" video ram */ | 175 | /* "On-card" video ram */ |
185 | man->gpu_offset = rdev->mc.vram_start; | 176 | man->gpu_offset = rdev->mc.vram_start; |
186 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | 177 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
187 | TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | | ||
188 | TTM_MEMTYPE_FLAG_MAPPABLE; | 178 | TTM_MEMTYPE_FLAG_MAPPABLE; |
189 | man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; | 179 | man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; |
190 | man->default_caching = TTM_PL_FLAG_WC; | 180 | man->default_caching = TTM_PL_FLAG_WC; |
191 | man->io_addr = NULL; | ||
192 | man->io_offset = rdev->mc.aper_base; | ||
193 | man->io_size = rdev->mc.aper_size; | ||
194 | break; | 181 | break; |
195 | default: | 182 | default: |
196 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | 183 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); |
@@ -245,9 +232,9 @@ static void radeon_move_null(struct ttm_buffer_object *bo, | |||
245 | } | 232 | } |
246 | 233 | ||
247 | static int radeon_move_blit(struct ttm_buffer_object *bo, | 234 | static int radeon_move_blit(struct ttm_buffer_object *bo, |
248 | bool evict, int no_wait, | 235 | bool evict, int no_wait_reserve, bool no_wait_gpu, |
249 | struct ttm_mem_reg *new_mem, | 236 | struct ttm_mem_reg *new_mem, |
250 | struct ttm_mem_reg *old_mem) | 237 | struct ttm_mem_reg *old_mem) |
251 | { | 238 | { |
252 | struct radeon_device *rdev; | 239 | struct radeon_device *rdev; |
253 | uint64_t old_start, new_start; | 240 | uint64_t old_start, new_start; |
@@ -291,13 +278,14 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, | |||
291 | r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); | 278 | r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); |
292 | /* FIXME: handle copy error */ | 279 | /* FIXME: handle copy error */ |
293 | r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, | 280 | r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, |
294 | evict, no_wait, new_mem); | 281 | evict, no_wait_reserve, no_wait_gpu, new_mem); |
295 | radeon_fence_unref(&fence); | 282 | radeon_fence_unref(&fence); |
296 | return r; | 283 | return r; |
297 | } | 284 | } |
298 | 285 | ||
299 | static int radeon_move_vram_ram(struct ttm_buffer_object *bo, | 286 | static int radeon_move_vram_ram(struct ttm_buffer_object *bo, |
300 | bool evict, bool interruptible, bool no_wait, | 287 | bool evict, bool interruptible, |
288 | bool no_wait_reserve, bool no_wait_gpu, | ||
301 | struct ttm_mem_reg *new_mem) | 289 | struct ttm_mem_reg *new_mem) |
302 | { | 290 | { |
303 | struct radeon_device *rdev; | 291 | struct radeon_device *rdev; |
@@ -318,7 +306,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, | |||
318 | placement.busy_placement = &placements; | 306 | placement.busy_placement = &placements; |
319 | placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | 307 | placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; |
320 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, | 308 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, |
321 | interruptible, no_wait); | 309 | interruptible, no_wait_reserve, no_wait_gpu); |
322 | if (unlikely(r)) { | 310 | if (unlikely(r)) { |
323 | return r; | 311 | return r; |
324 | } | 312 | } |
@@ -332,11 +320,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, | |||
332 | if (unlikely(r)) { | 320 | if (unlikely(r)) { |
333 | goto out_cleanup; | 321 | goto out_cleanup; |
334 | } | 322 | } |
335 | r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem); | 323 | r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem); |
336 | if (unlikely(r)) { | 324 | if (unlikely(r)) { |
337 | goto out_cleanup; | 325 | goto out_cleanup; |
338 | } | 326 | } |
339 | r = ttm_bo_move_ttm(bo, true, no_wait, new_mem); | 327 | r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); |
340 | out_cleanup: | 328 | out_cleanup: |
341 | if (tmp_mem.mm_node) { | 329 | if (tmp_mem.mm_node) { |
342 | struct ttm_bo_global *glob = rdev->mman.bdev.glob; | 330 | struct ttm_bo_global *glob = rdev->mman.bdev.glob; |
@@ -350,7 +338,8 @@ out_cleanup: | |||
350 | } | 338 | } |
351 | 339 | ||
352 | static int radeon_move_ram_vram(struct ttm_buffer_object *bo, | 340 | static int radeon_move_ram_vram(struct ttm_buffer_object *bo, |
353 | bool evict, bool interruptible, bool no_wait, | 341 | bool evict, bool interruptible, |
342 | bool no_wait_reserve, bool no_wait_gpu, | ||
354 | struct ttm_mem_reg *new_mem) | 343 | struct ttm_mem_reg *new_mem) |
355 | { | 344 | { |
356 | struct radeon_device *rdev; | 345 | struct radeon_device *rdev; |
@@ -370,15 +359,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, | |||
370 | placement.num_busy_placement = 1; | 359 | placement.num_busy_placement = 1; |
371 | placement.busy_placement = &placements; | 360 | placement.busy_placement = &placements; |
372 | placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | 361 | placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; |
373 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait); | 362 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu); |
374 | if (unlikely(r)) { | 363 | if (unlikely(r)) { |
375 | return r; | 364 | return r; |
376 | } | 365 | } |
377 | r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem); | 366 | r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); |
378 | if (unlikely(r)) { | 367 | if (unlikely(r)) { |
379 | goto out_cleanup; | 368 | goto out_cleanup; |
380 | } | 369 | } |
381 | r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem); | 370 | r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem); |
382 | if (unlikely(r)) { | 371 | if (unlikely(r)) { |
383 | goto out_cleanup; | 372 | goto out_cleanup; |
384 | } | 373 | } |
@@ -395,8 +384,9 @@ out_cleanup: | |||
395 | } | 384 | } |
396 | 385 | ||
397 | static int radeon_bo_move(struct ttm_buffer_object *bo, | 386 | static int radeon_bo_move(struct ttm_buffer_object *bo, |
398 | bool evict, bool interruptible, bool no_wait, | 387 | bool evict, bool interruptible, |
399 | struct ttm_mem_reg *new_mem) | 388 | bool no_wait_reserve, bool no_wait_gpu, |
389 | struct ttm_mem_reg *new_mem) | ||
400 | { | 390 | { |
401 | struct radeon_device *rdev; | 391 | struct radeon_device *rdev; |
402 | struct ttm_mem_reg *old_mem = &bo->mem; | 392 | struct ttm_mem_reg *old_mem = &bo->mem; |
@@ -423,23 +413,66 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, | |||
423 | if (old_mem->mem_type == TTM_PL_VRAM && | 413 | if (old_mem->mem_type == TTM_PL_VRAM && |
424 | new_mem->mem_type == TTM_PL_SYSTEM) { | 414 | new_mem->mem_type == TTM_PL_SYSTEM) { |
425 | r = radeon_move_vram_ram(bo, evict, interruptible, | 415 | r = radeon_move_vram_ram(bo, evict, interruptible, |
426 | no_wait, new_mem); | 416 | no_wait_reserve, no_wait_gpu, new_mem); |
427 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && | 417 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && |
428 | new_mem->mem_type == TTM_PL_VRAM) { | 418 | new_mem->mem_type == TTM_PL_VRAM) { |
429 | r = radeon_move_ram_vram(bo, evict, interruptible, | 419 | r = radeon_move_ram_vram(bo, evict, interruptible, |
430 | no_wait, new_mem); | 420 | no_wait_reserve, no_wait_gpu, new_mem); |
431 | } else { | 421 | } else { |
432 | r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem); | 422 | r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem); |
433 | } | 423 | } |
434 | 424 | ||
435 | if (r) { | 425 | if (r) { |
436 | memcpy: | 426 | memcpy: |
437 | r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | 427 | r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); |
438 | } | 428 | } |
439 | |||
440 | return r; | 429 | return r; |
441 | } | 430 | } |
442 | 431 | ||
432 | static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | ||
433 | { | ||
434 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | ||
435 | struct radeon_device *rdev = radeon_get_rdev(bdev); | ||
436 | |||
437 | mem->bus.addr = NULL; | ||
438 | mem->bus.offset = 0; | ||
439 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | ||
440 | mem->bus.base = 0; | ||
441 | mem->bus.is_iomem = false; | ||
442 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | ||
443 | return -EINVAL; | ||
444 | switch (mem->mem_type) { | ||
445 | case TTM_PL_SYSTEM: | ||
446 | /* system memory */ | ||
447 | return 0; | ||
448 | case TTM_PL_TT: | ||
449 | #if __OS_HAS_AGP | ||
450 | if (rdev->flags & RADEON_IS_AGP) { | ||
451 | /* RADEON_IS_AGP is set only if AGP is active */ | ||
452 | mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; | ||
453 | mem->bus.base = rdev->mc.agp_base; | ||
454 | mem->bus.is_iomem = true; | ||
455 | } | ||
456 | #endif | ||
457 | break; | ||
458 | case TTM_PL_VRAM: | ||
459 | mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; | ||
460 | /* check if it's visible */ | ||
461 | if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) | ||
462 | return -EINVAL; | ||
463 | mem->bus.base = rdev->mc.aper_base; | ||
464 | mem->bus.is_iomem = true; | ||
465 | break; | ||
466 | default: | ||
467 | return -EINVAL; | ||
468 | } | ||
469 | return 0; | ||
470 | } | ||
471 | |||
472 | static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | ||
473 | { | ||
474 | } | ||
475 | |||
443 | static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg, | 476 | static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg, |
444 | bool lazy, bool interruptible) | 477 | bool lazy, bool interruptible) |
445 | { | 478 | { |
@@ -480,6 +513,8 @@ static struct ttm_bo_driver radeon_bo_driver = { | |||
480 | .sync_obj_ref = &radeon_sync_obj_ref, | 513 | .sync_obj_ref = &radeon_sync_obj_ref, |
481 | .move_notify = &radeon_bo_move_notify, | 514 | .move_notify = &radeon_bo_move_notify, |
482 | .fault_reserve_notify = &radeon_bo_fault_reserve_notify, | 515 | .fault_reserve_notify = &radeon_bo_fault_reserve_notify, |
516 | .io_mem_reserve = &radeon_ttm_io_mem_reserve, | ||
517 | .io_mem_free = &radeon_ttm_io_mem_free, | ||
483 | }; | 518 | }; |
484 | 519 | ||
485 | int radeon_ttm_init(struct radeon_device *rdev) | 520 | int radeon_ttm_init(struct radeon_device *rdev) |