aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c114
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c5
-rw-r--r--drivers/gpu/drm/radeon/r100.c5
-rw-r--r--drivers/gpu/drm/radeon/r600.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c103
-rw-r--r--drivers/gpu/drm/radeon/rv770.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c84
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c122
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c41
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c50
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--include/drm/ttm/ttm_bo_api.h29
-rw-r--r--include/drm/ttm/ttm_bo_driver.h57
18 files changed, 397 insertions, 269 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 957d17629840..fb164efada3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -225,7 +225,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
225 225
226 nouveau_bo_placement_set(nvbo, memtype, 0); 226 nouveau_bo_placement_set(nvbo, memtype, 0);
227 227
228 ret = ttm_bo_validate(bo, &nvbo->placement, false, false); 228 ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
229 if (ret == 0) { 229 if (ret == 0) {
230 switch (bo->mem.mem_type) { 230 switch (bo->mem.mem_type) {
231 case TTM_PL_VRAM: 231 case TTM_PL_VRAM:
@@ -261,7 +261,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
261 261
262 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); 262 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
263 263
264 ret = ttm_bo_validate(bo, &nvbo->placement, false, false); 264 ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
265 if (ret == 0) { 265 if (ret == 0) {
266 switch (bo->mem.mem_type) { 266 switch (bo->mem.mem_type) {
267 case TTM_PL_VRAM: 267 case TTM_PL_VRAM:
@@ -391,25 +391,16 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
391 break; 391 break;
392 case TTM_PL_VRAM: 392 case TTM_PL_VRAM:
393 man->flags = TTM_MEMTYPE_FLAG_FIXED | 393 man->flags = TTM_MEMTYPE_FLAG_FIXED |
394 TTM_MEMTYPE_FLAG_MAPPABLE | 394 TTM_MEMTYPE_FLAG_MAPPABLE;
395 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
396 man->available_caching = TTM_PL_FLAG_UNCACHED | 395 man->available_caching = TTM_PL_FLAG_UNCACHED |
397 TTM_PL_FLAG_WC; 396 TTM_PL_FLAG_WC;
398 man->default_caching = TTM_PL_FLAG_WC; 397 man->default_caching = TTM_PL_FLAG_WC;
399
400 man->io_addr = NULL;
401 man->io_offset = drm_get_resource_start(dev, 1);
402 man->io_size = drm_get_resource_len(dev, 1);
403 if (man->io_size > dev_priv->vram_size)
404 man->io_size = dev_priv->vram_size;
405
406 man->gpu_offset = dev_priv->vm_vram_base; 398 man->gpu_offset = dev_priv->vm_vram_base;
407 break; 399 break;
408 case TTM_PL_TT: 400 case TTM_PL_TT:
409 switch (dev_priv->gart_info.type) { 401 switch (dev_priv->gart_info.type) {
410 case NOUVEAU_GART_AGP: 402 case NOUVEAU_GART_AGP:
411 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | 403 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
412 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
413 man->available_caching = TTM_PL_FLAG_UNCACHED; 404 man->available_caching = TTM_PL_FLAG_UNCACHED;
414 man->default_caching = TTM_PL_FLAG_UNCACHED; 405 man->default_caching = TTM_PL_FLAG_UNCACHED;
415 break; 406 break;
@@ -424,10 +415,6 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
424 dev_priv->gart_info.type); 415 dev_priv->gart_info.type);
425 return -EINVAL; 416 return -EINVAL;
426 } 417 }
427
428 man->io_offset = dev_priv->gart_info.aper_base;
429 man->io_size = dev_priv->gart_info.aper_size;
430 man->io_addr = NULL;
431 man->gpu_offset = dev_priv->vm_gart_base; 418 man->gpu_offset = dev_priv->vm_gart_base;
432 break; 419 break;
433 default: 420 default:
@@ -462,7 +449,8 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
462 449
463static int 450static int
464nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, 451nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
465 struct nouveau_bo *nvbo, bool evict, bool no_wait, 452 struct nouveau_bo *nvbo, bool evict,
453 bool no_wait_reserve, bool no_wait_gpu,
466 struct ttm_mem_reg *new_mem) 454 struct ttm_mem_reg *new_mem)
467{ 455{
468 struct nouveau_fence *fence = NULL; 456 struct nouveau_fence *fence = NULL;
@@ -473,7 +461,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
473 return ret; 461 return ret;
474 462
475 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, 463 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
476 evict, no_wait, new_mem); 464 evict, no_wait_reserve, no_wait_gpu, new_mem);
477 if (nvbo->channel && nvbo->channel != chan) 465 if (nvbo->channel && nvbo->channel != chan)
478 ret = nouveau_fence_wait(fence, NULL, false, false); 466 ret = nouveau_fence_wait(fence, NULL, false, false);
479 nouveau_fence_unref((void *)&fence); 467 nouveau_fence_unref((void *)&fence);
@@ -497,7 +485,8 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
497 485
498static int 486static int
499nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, 487nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
500 int no_wait, struct ttm_mem_reg *new_mem) 488 bool no_wait_reserve, bool no_wait_gpu,
489 struct ttm_mem_reg *new_mem)
501{ 490{
502 struct nouveau_bo *nvbo = nouveau_bo(bo); 491 struct nouveau_bo *nvbo = nouveau_bo(bo);
503 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 492 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
@@ -575,12 +564,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
575 dst_offset += (PAGE_SIZE * line_count); 564 dst_offset += (PAGE_SIZE * line_count);
576 } 565 }
577 566
578 return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem); 567 return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
579} 568}
580 569
581static int 570static int
582nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, 571nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
583 bool no_wait, struct ttm_mem_reg *new_mem) 572 bool no_wait_reserve, bool no_wait_gpu,
573 struct ttm_mem_reg *new_mem)
584{ 574{
585 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 575 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
586 struct ttm_placement placement; 576 struct ttm_placement placement;
@@ -593,7 +583,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
593 583
594 tmp_mem = *new_mem; 584 tmp_mem = *new_mem;
595 tmp_mem.mm_node = NULL; 585 tmp_mem.mm_node = NULL;
596 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait); 586 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
597 if (ret) 587 if (ret)
598 return ret; 588 return ret;
599 589
@@ -601,11 +591,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
601 if (ret) 591 if (ret)
602 goto out; 592 goto out;
603 593
604 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem); 594 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
605 if (ret) 595 if (ret)
606 goto out; 596 goto out;
607 597
608 ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem); 598 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
609out: 599out:
610 if (tmp_mem.mm_node) { 600 if (tmp_mem.mm_node) {
611 spin_lock(&bo->bdev->glob->lru_lock); 601 spin_lock(&bo->bdev->glob->lru_lock);
@@ -618,7 +608,8 @@ out:
618 608
619static int 609static int
620nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, 610nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
621 bool no_wait, struct ttm_mem_reg *new_mem) 611 bool no_wait_reserve, bool no_wait_gpu,
612 struct ttm_mem_reg *new_mem)
622{ 613{
623 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 614 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
624 struct ttm_placement placement; 615 struct ttm_placement placement;
@@ -631,15 +622,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
631 622
632 tmp_mem = *new_mem; 623 tmp_mem = *new_mem;
633 tmp_mem.mm_node = NULL; 624 tmp_mem.mm_node = NULL;
634 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait); 625 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
635 if (ret) 626 if (ret)
636 return ret; 627 return ret;
637 628
638 ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem); 629 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
639 if (ret) 630 if (ret)
640 goto out; 631 goto out;
641 632
642 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem); 633 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
643 if (ret) 634 if (ret)
644 goto out; 635 goto out;
645 636
@@ -706,7 +697,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
706 697
707static int 698static int
708nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, 699nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
709 bool no_wait, struct ttm_mem_reg *new_mem) 700 bool no_wait_reserve, bool no_wait_gpu,
701 struct ttm_mem_reg *new_mem)
710{ 702{
711 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 703 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
712 struct nouveau_bo *nvbo = nouveau_bo(bo); 704 struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -721,7 +713,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
721 /* Software copy if the card isn't up and running yet. */ 713 /* Software copy if the card isn't up and running yet. */
722 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE || 714 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
723 !dev_priv->channel) { 715 !dev_priv->channel) {
724 ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 716 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
725 goto out; 717 goto out;
726 } 718 }
727 719
@@ -735,17 +727,17 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
735 727
736 /* Hardware assisted copy. */ 728 /* Hardware assisted copy. */
737 if (new_mem->mem_type == TTM_PL_SYSTEM) 729 if (new_mem->mem_type == TTM_PL_SYSTEM)
738 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem); 730 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
739 else if (old_mem->mem_type == TTM_PL_SYSTEM) 731 else if (old_mem->mem_type == TTM_PL_SYSTEM)
740 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem); 732 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
741 else 733 else
742 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem); 734 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
743 735
744 if (!ret) 736 if (!ret)
745 goto out; 737 goto out;
746 738
747 /* Fallback to software copy. */ 739 /* Fallback to software copy. */
748 ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 740 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
749 741
750out: 742out:
751 if (ret) 743 if (ret)
@@ -762,6 +754,55 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
762 return 0; 754 return 0;
763} 755}
764 756
757static int
758nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
759{
760 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
761 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
762 struct drm_device *dev = dev_priv->dev;
763
764 mem->bus.addr = NULL;
765 mem->bus.offset = 0;
766 mem->bus.size = mem->num_pages << PAGE_SHIFT;
767 mem->bus.base = 0;
768 mem->bus.is_iomem = false;
769 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
770 return -EINVAL;
771 switch (mem->mem_type) {
772 case TTM_PL_SYSTEM:
773 /* System memory */
774 return 0;
775 case TTM_PL_TT:
776#if __OS_HAS_AGP
777 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
778 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
779 mem->bus.base = dev_priv->gart_info.aper_base;
780 mem->bus.is_iomem = true;
781 }
782#endif
783 break;
784 case TTM_PL_VRAM:
785 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
786 mem->bus.base = drm_get_resource_start(dev, 1);
787 mem->bus.is_iomem = true;
788 break;
789 default:
790 return -EINVAL;
791 }
792 return 0;
793}
794
795static void
796nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
797{
798}
799
800static int
801nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
802{
803 return 0;
804}
805
765struct ttm_bo_driver nouveau_bo_driver = { 806struct ttm_bo_driver nouveau_bo_driver = {
766 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, 807 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
767 .invalidate_caches = nouveau_bo_invalidate_caches, 808 .invalidate_caches = nouveau_bo_invalidate_caches,
@@ -774,5 +815,8 @@ struct ttm_bo_driver nouveau_bo_driver = {
774 .sync_obj_flush = nouveau_fence_flush, 815 .sync_obj_flush = nouveau_fence_flush,
775 .sync_obj_unref = nouveau_fence_unref, 816 .sync_obj_unref = nouveau_fence_unref,
776 .sync_obj_ref = nouveau_fence_ref, 817 .sync_obj_ref = nouveau_fence_ref,
818 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
819 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
820 .io_mem_free = &nouveau_ttm_io_mem_free,
777}; 821};
778 822
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 6d1aa89ec870..69c76cf93407 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -385,7 +385,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
385 385
386 nvbo->channel = chan; 386 nvbo->channel = chan;
387 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, 387 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
388 false, false); 388 false, false, false);
389 nvbo->channel = NULL; 389 nvbo->channel = NULL;
390 if (unlikely(ret)) { 390 if (unlikely(ret)) {
391 NV_ERROR(dev, "fail ttm_validate\n"); 391 NV_ERROR(dev, "fail ttm_validate\n");
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 3295154e5934..b3d168fb89e5 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1266,11 +1266,6 @@ int evergreen_mc_init(struct radeon_device *rdev)
1266 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 1266 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
1267 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 1267 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
1268 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1268 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1269 /* FIXME remove this once we support unmappable VRAM */
1270 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
1271 rdev->mc.mc_vram_size = rdev->mc.aper_size;
1272 rdev->mc.real_vram_size = rdev->mc.aper_size;
1273 }
1274 r600_vram_gtt_location(rdev, &rdev->mc); 1269 r600_vram_gtt_location(rdev, &rdev->mc);
1275 radeon_update_bandwidth_info(rdev); 1270 radeon_update_bandwidth_info(rdev);
1276 1271
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 9d3b47deecb3..9bdccb964999 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2036,11 +2036,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
2036 else 2036 else
2037 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 2037 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2038 } 2038 }
2039 /* FIXME remove this once we support unmappable VRAM */
2040 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
2041 rdev->mc.mc_vram_size = rdev->mc.aper_size;
2042 rdev->mc.real_vram_size = rdev->mc.aper_size;
2043 }
2044} 2039}
2045 2040
2046void r100_vga_set_state(struct radeon_device *rdev, bool state) 2041void r100_vga_set_state(struct radeon_device *rdev, bool state)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 9b08c5743c86..c325cb121059 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -730,11 +730,6 @@ int r600_mc_init(struct radeon_device *rdev)
730 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 730 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
731 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 731 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
732 rdev->mc.visible_vram_size = rdev->mc.aper_size; 732 rdev->mc.visible_vram_size = rdev->mc.aper_size;
733 /* FIXME remove this once we support unmappable VRAM */
734 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
735 rdev->mc.mc_vram_size = rdev->mc.aper_size;
736 rdev->mc.real_vram_size = rdev->mc.aper_size;
737 }
738 r600_vram_gtt_location(rdev, &rdev->mc); 733 r600_vram_gtt_location(rdev, &rdev->mc);
739 734
740 if (rdev->flags & RADEON_IS_IGP) 735 if (rdev->flags & RADEON_IS_IGP)
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 122774742bd5..6a8617bac142 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -192,7 +192,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
192 } 192 }
193 for (i = 0; i < bo->placement.num_placement; i++) 193 for (i = 0; i < bo->placement.num_placement; i++)
194 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 194 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
195 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 195 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
196 if (likely(r == 0)) { 196 if (likely(r == 0)) {
197 bo->pin_count = 1; 197 bo->pin_count = 1;
198 if (gpu_addr != NULL) 198 if (gpu_addr != NULL)
@@ -216,7 +216,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
216 return 0; 216 return 0;
217 for (i = 0; i < bo->placement.num_placement; i++) 217 for (i = 0; i < bo->placement.num_placement; i++)
218 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 218 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
219 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 219 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
220 if (unlikely(r != 0)) 220 if (unlikely(r != 0))
221 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); 221 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
222 return r; 222 return r;
@@ -331,7 +331,7 @@ int radeon_bo_list_validate(struct list_head *head)
331 lobj->rdomain); 331 lobj->rdomain);
332 } 332 }
333 r = ttm_bo_validate(&bo->tbo, &bo->placement, 333 r = ttm_bo_validate(&bo->tbo, &bo->placement,
334 true, false); 334 true, false, false);
335 if (unlikely(r)) 335 if (unlikely(r))
336 return r; 336 return r;
337 } 337 }
@@ -499,11 +499,33 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
499 radeon_bo_check_tiling(rbo, 0, 1); 499 radeon_bo_check_tiling(rbo, 0, 1);
500} 500}
501 501
502void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 502int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
503{ 503{
504 struct radeon_device *rdev;
504 struct radeon_bo *rbo; 505 struct radeon_bo *rbo;
506 unsigned long offset, size;
507 int r;
508
505 if (!radeon_ttm_bo_is_radeon_bo(bo)) 509 if (!radeon_ttm_bo_is_radeon_bo(bo))
506 return; 510 return 0;
507 rbo = container_of(bo, struct radeon_bo, tbo); 511 rbo = container_of(bo, struct radeon_bo, tbo);
508 radeon_bo_check_tiling(rbo, 0, 0); 512 radeon_bo_check_tiling(rbo, 0, 0);
513 rdev = rbo->rdev;
514 if (bo->mem.mem_type == TTM_PL_VRAM) {
515 size = bo->mem.num_pages << PAGE_SHIFT;
516 offset = bo->mem.mm_node->start << PAGE_SHIFT;
517 if ((offset + size) > rdev->mc.visible_vram_size) {
518 /* hurrah the memory is not visible ! */
519 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
520 rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
521 r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
522 if (unlikely(r != 0))
523 return r;
524 offset = bo->mem.mm_node->start << PAGE_SHIFT;
525 /* this should not happen */
526 if ((offset + size) > rdev->mc.visible_vram_size)
527 return -EINVAL;
528 }
529 }
530 return 0;
509} 531}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 7ab43de1e244..353998dc2c03 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -168,6 +168,6 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
168 bool force_drop); 168 bool force_drop);
169extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, 169extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
170 struct ttm_mem_reg *mem); 170 struct ttm_mem_reg *mem);
171extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); 171extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
172extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); 172extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
173#endif 173#endif
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index f06533676e7d..af98f45954b3 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -163,34 +163,21 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
163 (unsigned)type); 163 (unsigned)type);
164 return -EINVAL; 164 return -EINVAL;
165 } 165 }
166 man->io_offset = rdev->mc.agp_base;
167 man->io_size = rdev->mc.gtt_size;
168 man->io_addr = NULL;
169 if (!rdev->ddev->agp->cant_use_aperture) 166 if (!rdev->ddev->agp->cant_use_aperture)
170 man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | 167 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
171 TTM_MEMTYPE_FLAG_MAPPABLE;
172 man->available_caching = TTM_PL_FLAG_UNCACHED | 168 man->available_caching = TTM_PL_FLAG_UNCACHED |
173 TTM_PL_FLAG_WC; 169 TTM_PL_FLAG_WC;
174 man->default_caching = TTM_PL_FLAG_WC; 170 man->default_caching = TTM_PL_FLAG_WC;
175 } else
176#endif
177 {
178 man->io_offset = 0;
179 man->io_size = 0;
180 man->io_addr = NULL;
181 } 171 }
172#endif
182 break; 173 break;
183 case TTM_PL_VRAM: 174 case TTM_PL_VRAM:
184 /* "On-card" video ram */ 175 /* "On-card" video ram */
185 man->gpu_offset = rdev->mc.vram_start; 176 man->gpu_offset = rdev->mc.vram_start;
186 man->flags = TTM_MEMTYPE_FLAG_FIXED | 177 man->flags = TTM_MEMTYPE_FLAG_FIXED |
187 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
188 TTM_MEMTYPE_FLAG_MAPPABLE; 178 TTM_MEMTYPE_FLAG_MAPPABLE;
189 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; 179 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
190 man->default_caching = TTM_PL_FLAG_WC; 180 man->default_caching = TTM_PL_FLAG_WC;
191 man->io_addr = NULL;
192 man->io_offset = rdev->mc.aper_base;
193 man->io_size = rdev->mc.aper_size;
194 break; 181 break;
195 default: 182 default:
196 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); 183 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
@@ -245,9 +232,9 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
245} 232}
246 233
247static int radeon_move_blit(struct ttm_buffer_object *bo, 234static int radeon_move_blit(struct ttm_buffer_object *bo,
248 bool evict, int no_wait, 235 bool evict, int no_wait_reserve, bool no_wait_gpu,
249 struct ttm_mem_reg *new_mem, 236 struct ttm_mem_reg *new_mem,
250 struct ttm_mem_reg *old_mem) 237 struct ttm_mem_reg *old_mem)
251{ 238{
252 struct radeon_device *rdev; 239 struct radeon_device *rdev;
253 uint64_t old_start, new_start; 240 uint64_t old_start, new_start;
@@ -291,13 +278,14 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
291 r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); 278 r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
292 /* FIXME: handle copy error */ 279 /* FIXME: handle copy error */
293 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, 280 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
294 evict, no_wait, new_mem); 281 evict, no_wait_reserve, no_wait_gpu, new_mem);
295 radeon_fence_unref(&fence); 282 radeon_fence_unref(&fence);
296 return r; 283 return r;
297} 284}
298 285
299static int radeon_move_vram_ram(struct ttm_buffer_object *bo, 286static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
300 bool evict, bool interruptible, bool no_wait, 287 bool evict, bool interruptible,
288 bool no_wait_reserve, bool no_wait_gpu,
301 struct ttm_mem_reg *new_mem) 289 struct ttm_mem_reg *new_mem)
302{ 290{
303 struct radeon_device *rdev; 291 struct radeon_device *rdev;
@@ -318,7 +306,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
318 placement.busy_placement = &placements; 306 placement.busy_placement = &placements;
319 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 307 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
320 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, 308 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
321 interruptible, no_wait); 309 interruptible, no_wait_reserve, no_wait_gpu);
322 if (unlikely(r)) { 310 if (unlikely(r)) {
323 return r; 311 return r;
324 } 312 }
@@ -332,11 +320,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
332 if (unlikely(r)) { 320 if (unlikely(r)) {
333 goto out_cleanup; 321 goto out_cleanup;
334 } 322 }
335 r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem); 323 r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
336 if (unlikely(r)) { 324 if (unlikely(r)) {
337 goto out_cleanup; 325 goto out_cleanup;
338 } 326 }
339 r = ttm_bo_move_ttm(bo, true, no_wait, new_mem); 327 r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
340out_cleanup: 328out_cleanup:
341 if (tmp_mem.mm_node) { 329 if (tmp_mem.mm_node) {
342 struct ttm_bo_global *glob = rdev->mman.bdev.glob; 330 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
@@ -350,7 +338,8 @@ out_cleanup:
350} 338}
351 339
352static int radeon_move_ram_vram(struct ttm_buffer_object *bo, 340static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
353 bool evict, bool interruptible, bool no_wait, 341 bool evict, bool interruptible,
342 bool no_wait_reserve, bool no_wait_gpu,
354 struct ttm_mem_reg *new_mem) 343 struct ttm_mem_reg *new_mem)
355{ 344{
356 struct radeon_device *rdev; 345 struct radeon_device *rdev;
@@ -370,15 +359,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
370 placement.num_busy_placement = 1; 359 placement.num_busy_placement = 1;
371 placement.busy_placement = &placements; 360 placement.busy_placement = &placements;
372 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 361 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
373 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait); 362 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
374 if (unlikely(r)) { 363 if (unlikely(r)) {
375 return r; 364 return r;
376 } 365 }
377 r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem); 366 r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
378 if (unlikely(r)) { 367 if (unlikely(r)) {
379 goto out_cleanup; 368 goto out_cleanup;
380 } 369 }
381 r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem); 370 r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
382 if (unlikely(r)) { 371 if (unlikely(r)) {
383 goto out_cleanup; 372 goto out_cleanup;
384 } 373 }
@@ -395,8 +384,9 @@ out_cleanup:
395} 384}
396 385
397static int radeon_bo_move(struct ttm_buffer_object *bo, 386static int radeon_bo_move(struct ttm_buffer_object *bo,
398 bool evict, bool interruptible, bool no_wait, 387 bool evict, bool interruptible,
399 struct ttm_mem_reg *new_mem) 388 bool no_wait_reserve, bool no_wait_gpu,
389 struct ttm_mem_reg *new_mem)
400{ 390{
401 struct radeon_device *rdev; 391 struct radeon_device *rdev;
402 struct ttm_mem_reg *old_mem = &bo->mem; 392 struct ttm_mem_reg *old_mem = &bo->mem;
@@ -423,23 +413,66 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
423 if (old_mem->mem_type == TTM_PL_VRAM && 413 if (old_mem->mem_type == TTM_PL_VRAM &&
424 new_mem->mem_type == TTM_PL_SYSTEM) { 414 new_mem->mem_type == TTM_PL_SYSTEM) {
425 r = radeon_move_vram_ram(bo, evict, interruptible, 415 r = radeon_move_vram_ram(bo, evict, interruptible,
426 no_wait, new_mem); 416 no_wait_reserve, no_wait_gpu, new_mem);
427 } else if (old_mem->mem_type == TTM_PL_SYSTEM && 417 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
428 new_mem->mem_type == TTM_PL_VRAM) { 418 new_mem->mem_type == TTM_PL_VRAM) {
429 r = radeon_move_ram_vram(bo, evict, interruptible, 419 r = radeon_move_ram_vram(bo, evict, interruptible,
430 no_wait, new_mem); 420 no_wait_reserve, no_wait_gpu, new_mem);
431 } else { 421 } else {
432 r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem); 422 r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
433 } 423 }
434 424
435 if (r) { 425 if (r) {
436memcpy: 426memcpy:
437 r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); 427 r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
438 } 428 }
439
440 return r; 429 return r;
441} 430}
442 431
432static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
433{
434 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
435 struct radeon_device *rdev = radeon_get_rdev(bdev);
436
437 mem->bus.addr = NULL;
438 mem->bus.offset = 0;
439 mem->bus.size = mem->num_pages << PAGE_SHIFT;
440 mem->bus.base = 0;
441 mem->bus.is_iomem = false;
442 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
443 return -EINVAL;
444 switch (mem->mem_type) {
445 case TTM_PL_SYSTEM:
446 /* system memory */
447 return 0;
448 case TTM_PL_TT:
449#if __OS_HAS_AGP
450 if (rdev->flags & RADEON_IS_AGP) {
451 /* RADEON_IS_AGP is set only if AGP is active */
452 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
453 mem->bus.base = rdev->mc.agp_base;
454 mem->bus.is_iomem = true;
455 }
456#endif
457 break;
458 case TTM_PL_VRAM:
459 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
460 /* check if it's visible */
461 if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
462 return -EINVAL;
463 mem->bus.base = rdev->mc.aper_base;
464 mem->bus.is_iomem = true;
465 break;
466 default:
467 return -EINVAL;
468 }
469 return 0;
470}
471
472static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
473{
474}
475
443static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg, 476static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
444 bool lazy, bool interruptible) 477 bool lazy, bool interruptible)
445{ 478{
@@ -480,6 +513,8 @@ static struct ttm_bo_driver radeon_bo_driver = {
480 .sync_obj_ref = &radeon_sync_obj_ref, 513 .sync_obj_ref = &radeon_sync_obj_ref,
481 .move_notify = &radeon_bo_move_notify, 514 .move_notify = &radeon_bo_move_notify,
482 .fault_reserve_notify = &radeon_bo_fault_reserve_notify, 515 .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
516 .io_mem_reserve = &radeon_ttm_io_mem_reserve,
517 .io_mem_free = &radeon_ttm_io_mem_free,
483}; 518};
484 519
485int radeon_ttm_init(struct radeon_device *rdev) 520int radeon_ttm_init(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index c14f3be25b4b..a74683e18612 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -910,11 +910,6 @@ int rv770_mc_init(struct radeon_device *rdev)
910 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 910 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
911 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 911 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
912 rdev->mc.visible_vram_size = rdev->mc.aper_size; 912 rdev->mc.visible_vram_size = rdev->mc.aper_size;
913 /* FIXME remove this once we support unmappable VRAM */
914 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
915 rdev->mc.mc_vram_size = rdev->mc.aper_size;
916 rdev->mc.real_vram_size = rdev->mc.aper_size;
917 }
918 r600_vram_gtt_location(rdev, &rdev->mc); 913 r600_vram_gtt_location(rdev, &rdev->mc);
919 radeon_update_bandwidth_info(rdev); 914 radeon_update_bandwidth_info(rdev);
920 915
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index dd47b2a9a791..3b5b094b1397 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -79,8 +79,6 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
79 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type); 79 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
80 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags); 80 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
81 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset); 81 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
82 printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
83 printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
84 printk(KERN_ERR TTM_PFX " size: %llu\n", man->size); 82 printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
85 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n", 83 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
86 man->available_caching); 84 man->available_caching);
@@ -357,7 +355,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
357 355
358static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 356static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
359 struct ttm_mem_reg *mem, 357 struct ttm_mem_reg *mem,
360 bool evict, bool interruptible, bool no_wait) 358 bool evict, bool interruptible,
359 bool no_wait_reserve, bool no_wait_gpu)
361{ 360{
362 struct ttm_bo_device *bdev = bo->bdev; 361 struct ttm_bo_device *bdev = bo->bdev;
363 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); 362 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -402,12 +401,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
402 401
403 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 402 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
404 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 403 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
405 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); 404 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
406 else if (bdev->driver->move) 405 else if (bdev->driver->move)
407 ret = bdev->driver->move(bo, evict, interruptible, 406 ret = bdev->driver->move(bo, evict, interruptible,
408 no_wait, mem); 407 no_wait_reserve, no_wait_gpu, mem);
409 else 408 else
410 ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem); 409 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
411 410
412 if (ret) 411 if (ret)
413 goto out_err; 412 goto out_err;
@@ -606,7 +605,7 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
606EXPORT_SYMBOL(ttm_bo_unref); 605EXPORT_SYMBOL(ttm_bo_unref);
607 606
608static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, 607static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
609 bool no_wait) 608 bool no_wait_reserve, bool no_wait_gpu)
610{ 609{
611 struct ttm_bo_device *bdev = bo->bdev; 610 struct ttm_bo_device *bdev = bo->bdev;
612 struct ttm_bo_global *glob = bo->glob; 611 struct ttm_bo_global *glob = bo->glob;
@@ -615,7 +614,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
615 int ret = 0; 614 int ret = 0;
616 615
617 spin_lock(&bo->lock); 616 spin_lock(&bo->lock);
618 ret = ttm_bo_wait(bo, false, interruptible, no_wait); 617 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
619 spin_unlock(&bo->lock); 618 spin_unlock(&bo->lock);
620 619
621 if (unlikely(ret != 0)) { 620 if (unlikely(ret != 0)) {
@@ -631,6 +630,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
631 630
632 evict_mem = bo->mem; 631 evict_mem = bo->mem;
633 evict_mem.mm_node = NULL; 632 evict_mem.mm_node = NULL;
633 evict_mem.bus.io_reserved = false;
634 634
635 placement.fpfn = 0; 635 placement.fpfn = 0;
636 placement.lpfn = 0; 636 placement.lpfn = 0;
@@ -638,7 +638,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
638 placement.num_busy_placement = 0; 638 placement.num_busy_placement = 0;
639 bdev->driver->evict_flags(bo, &placement); 639 bdev->driver->evict_flags(bo, &placement);
640 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, 640 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
641 no_wait); 641 no_wait_reserve, no_wait_gpu);
642 if (ret) { 642 if (ret) {
643 if (ret != -ERESTARTSYS) { 643 if (ret != -ERESTARTSYS) {
644 printk(KERN_ERR TTM_PFX 644 printk(KERN_ERR TTM_PFX
@@ -650,7 +650,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
650 } 650 }
651 651
652 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 652 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
653 no_wait); 653 no_wait_reserve, no_wait_gpu);
654 if (ret) { 654 if (ret) {
655 if (ret != -ERESTARTSYS) 655 if (ret != -ERESTARTSYS)
656 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); 656 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
@@ -670,7 +670,8 @@ out:
670 670
671static int ttm_mem_evict_first(struct ttm_bo_device *bdev, 671static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
672 uint32_t mem_type, 672 uint32_t mem_type,
673 bool interruptible, bool no_wait) 673 bool interruptible, bool no_wait_reserve,
674 bool no_wait_gpu)
674{ 675{
675 struct ttm_bo_global *glob = bdev->glob; 676 struct ttm_bo_global *glob = bdev->glob;
676 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 677 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
@@ -687,11 +688,11 @@ retry:
687 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); 688 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
688 kref_get(&bo->list_kref); 689 kref_get(&bo->list_kref);
689 690
690 ret = ttm_bo_reserve_locked(bo, false, true, false, 0); 691 ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
691 692
692 if (unlikely(ret == -EBUSY)) { 693 if (unlikely(ret == -EBUSY)) {
693 spin_unlock(&glob->lru_lock); 694 spin_unlock(&glob->lru_lock);
694 if (likely(!no_wait)) 695 if (likely(!no_wait_gpu))
695 ret = ttm_bo_wait_unreserved(bo, interruptible); 696 ret = ttm_bo_wait_unreserved(bo, interruptible);
696 697
697 kref_put(&bo->list_kref, ttm_bo_release_list); 698 kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -713,7 +714,7 @@ retry:
713 while (put_count--) 714 while (put_count--)
714 kref_put(&bo->list_kref, ttm_bo_ref_bug); 715 kref_put(&bo->list_kref, ttm_bo_ref_bug);
715 716
716 ret = ttm_bo_evict(bo, interruptible, no_wait); 717 ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
717 ttm_bo_unreserve(bo); 718 ttm_bo_unreserve(bo);
718 719
719 kref_put(&bo->list_kref, ttm_bo_release_list); 720 kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -764,7 +765,9 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
764 uint32_t mem_type, 765 uint32_t mem_type,
765 struct ttm_placement *placement, 766 struct ttm_placement *placement,
766 struct ttm_mem_reg *mem, 767 struct ttm_mem_reg *mem,
767 bool interruptible, bool no_wait) 768 bool interruptible,
769 bool no_wait_reserve,
770 bool no_wait_gpu)
768{ 771{
769 struct ttm_bo_device *bdev = bo->bdev; 772 struct ttm_bo_device *bdev = bo->bdev;
770 struct ttm_bo_global *glob = bdev->glob; 773 struct ttm_bo_global *glob = bdev->glob;
@@ -785,7 +788,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
785 } 788 }
786 spin_unlock(&glob->lru_lock); 789 spin_unlock(&glob->lru_lock);
787 ret = ttm_mem_evict_first(bdev, mem_type, interruptible, 790 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
788 no_wait); 791 no_wait_reserve, no_wait_gpu);
789 if (unlikely(ret != 0)) 792 if (unlikely(ret != 0))
790 return ret; 793 return ret;
791 } while (1); 794 } while (1);
@@ -855,7 +858,8 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
855int ttm_bo_mem_space(struct ttm_buffer_object *bo, 858int ttm_bo_mem_space(struct ttm_buffer_object *bo,
856 struct ttm_placement *placement, 859 struct ttm_placement *placement,
857 struct ttm_mem_reg *mem, 860 struct ttm_mem_reg *mem,
858 bool interruptible, bool no_wait) 861 bool interruptible, bool no_wait_reserve,
862 bool no_wait_gpu)
859{ 863{
860 struct ttm_bo_device *bdev = bo->bdev; 864 struct ttm_bo_device *bdev = bo->bdev;
861 struct ttm_mem_type_manager *man; 865 struct ttm_mem_type_manager *man;
@@ -952,7 +956,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
952 } 956 }
953 957
954 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 958 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
955 interruptible, no_wait); 959 interruptible, no_wait_reserve, no_wait_gpu);
956 if (ret == 0 && mem->mm_node) { 960 if (ret == 0 && mem->mm_node) {
957 mem->placement = cur_flags; 961 mem->placement = cur_flags;
958 mem->mm_node->private = bo; 962 mem->mm_node->private = bo;
@@ -978,7 +982,8 @@ EXPORT_SYMBOL(ttm_bo_wait_cpu);
978 982
979int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 983int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
980 struct ttm_placement *placement, 984 struct ttm_placement *placement,
981 bool interruptible, bool no_wait) 985 bool interruptible, bool no_wait_reserve,
986 bool no_wait_gpu)
982{ 987{
983 struct ttm_bo_global *glob = bo->glob; 988 struct ttm_bo_global *glob = bo->glob;
984 int ret = 0; 989 int ret = 0;
@@ -992,20 +997,21 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
992 * instead of doing it here. 997 * instead of doing it here.
993 */ 998 */
994 spin_lock(&bo->lock); 999 spin_lock(&bo->lock);
995 ret = ttm_bo_wait(bo, false, interruptible, no_wait); 1000 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
996 spin_unlock(&bo->lock); 1001 spin_unlock(&bo->lock);
997 if (ret) 1002 if (ret)
998 return ret; 1003 return ret;
999 mem.num_pages = bo->num_pages; 1004 mem.num_pages = bo->num_pages;
1000 mem.size = mem.num_pages << PAGE_SHIFT; 1005 mem.size = mem.num_pages << PAGE_SHIFT;
1001 mem.page_alignment = bo->mem.page_alignment; 1006 mem.page_alignment = bo->mem.page_alignment;
1007 mem.bus.io_reserved = false;
1002 /* 1008 /*
1003 * Determine where to move the buffer. 1009 * Determine where to move the buffer.
1004 */ 1010 */
1005 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait); 1011 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
1006 if (ret) 1012 if (ret)
1007 goto out_unlock; 1013 goto out_unlock;
1008 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); 1014 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
1009out_unlock: 1015out_unlock:
1010 if (ret && mem.mm_node) { 1016 if (ret && mem.mm_node) {
1011 spin_lock(&glob->lru_lock); 1017 spin_lock(&glob->lru_lock);
@@ -1039,7 +1045,8 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
1039 1045
1040int ttm_bo_validate(struct ttm_buffer_object *bo, 1046int ttm_bo_validate(struct ttm_buffer_object *bo,
1041 struct ttm_placement *placement, 1047 struct ttm_placement *placement,
1042 bool interruptible, bool no_wait) 1048 bool interruptible, bool no_wait_reserve,
1049 bool no_wait_gpu)
1043{ 1050{
1044 int ret; 1051 int ret;
1045 1052
@@ -1054,7 +1061,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
1054 */ 1061 */
1055 ret = ttm_bo_mem_compat(placement, &bo->mem); 1062 ret = ttm_bo_mem_compat(placement, &bo->mem);
1056 if (ret < 0) { 1063 if (ret < 0) {
1057 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait); 1064 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
1058 if (ret) 1065 if (ret)
1059 return ret; 1066 return ret;
1060 } else { 1067 } else {
@@ -1153,6 +1160,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1153 bo->mem.num_pages = bo->num_pages; 1160 bo->mem.num_pages = bo->num_pages;
1154 bo->mem.mm_node = NULL; 1161 bo->mem.mm_node = NULL;
1155 bo->mem.page_alignment = page_alignment; 1162 bo->mem.page_alignment = page_alignment;
1163 bo->mem.bus.io_reserved = false;
1156 bo->buffer_start = buffer_start & PAGE_MASK; 1164 bo->buffer_start = buffer_start & PAGE_MASK;
1157 bo->priv_flags = 0; 1165 bo->priv_flags = 0;
1158 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1166 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
@@ -1175,7 +1183,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1175 goto out_err; 1183 goto out_err;
1176 } 1184 }
1177 1185
1178 ret = ttm_bo_validate(bo, placement, interruptible, false); 1186 ret = ttm_bo_validate(bo, placement, interruptible, false, false);
1179 if (ret) 1187 if (ret)
1180 goto out_err; 1188 goto out_err;
1181 1189
@@ -1249,7 +1257,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1249 spin_lock(&glob->lru_lock); 1257 spin_lock(&glob->lru_lock);
1250 while (!list_empty(&man->lru)) { 1258 while (!list_empty(&man->lru)) {
1251 spin_unlock(&glob->lru_lock); 1259 spin_unlock(&glob->lru_lock);
1252 ret = ttm_mem_evict_first(bdev, mem_type, false, false); 1260 ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
1253 if (ret) { 1261 if (ret) {
1254 if (allow_errors) { 1262 if (allow_errors) {
1255 return ret; 1263 return ret;
@@ -1553,26 +1561,6 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1553 return true; 1561 return true;
1554} 1562}
1555 1563
1556int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
1557 struct ttm_mem_reg *mem,
1558 unsigned long *bus_base,
1559 unsigned long *bus_offset, unsigned long *bus_size)
1560{
1561 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1562
1563 *bus_size = 0;
1564 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1565 return -EINVAL;
1566
1567 if (ttm_mem_reg_is_pci(bdev, mem)) {
1568 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
1569 *bus_size = mem->num_pages << PAGE_SHIFT;
1570 *bus_base = man->io_offset;
1571 }
1572
1573 return 0;
1574}
1575
1576void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) 1564void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1577{ 1565{
1578 struct ttm_bo_device *bdev = bo->bdev; 1566 struct ttm_bo_device *bdev = bo->bdev;
@@ -1581,8 +1569,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1581 1569
1582 if (!bdev->dev_mapping) 1570 if (!bdev->dev_mapping)
1583 return; 1571 return;
1584
1585 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); 1572 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1573 ttm_mem_io_free(bdev, &bo->mem);
1586} 1574}
1587EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1575EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1588 1576
@@ -1839,7 +1827,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1839 evict_mem.mem_type = TTM_PL_SYSTEM; 1827 evict_mem.mem_type = TTM_PL_SYSTEM;
1840 1828
1841 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, 1829 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1842 false, false); 1830 false, false, false);
1843 if (unlikely(ret != 0)) 1831 if (unlikely(ret != 0))
1844 goto out; 1832 goto out;
1845 } 1833 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index d764e82e799b..a37a94872a14 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -50,7 +50,8 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
50} 50}
51 51
52int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 52int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
53 bool evict, bool no_wait, struct ttm_mem_reg *new_mem) 53 bool evict, bool no_wait_reserve,
54 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
54{ 55{
55 struct ttm_tt *ttm = bo->ttm; 56 struct ttm_tt *ttm = bo->ttm;
56 struct ttm_mem_reg *old_mem = &bo->mem; 57 struct ttm_mem_reg *old_mem = &bo->mem;
@@ -81,30 +82,51 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
81} 82}
82EXPORT_SYMBOL(ttm_bo_move_ttm); 83EXPORT_SYMBOL(ttm_bo_move_ttm);
83 84
85int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
86{
87 int ret;
88
89 if (!mem->bus.io_reserved) {
90 mem->bus.io_reserved = true;
91 ret = bdev->driver->io_mem_reserve(bdev, mem);
92 if (unlikely(ret != 0))
93 return ret;
94 }
95 return 0;
96}
97
98void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
99{
100 if (bdev->driver->io_mem_reserve) {
101 if (mem->bus.io_reserved) {
102 mem->bus.io_reserved = false;
103 bdev->driver->io_mem_free(bdev, mem);
104 }
105 }
106}
107
84int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, 108int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
85 void **virtual) 109 void **virtual)
86{ 110{
87 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
88 unsigned long bus_offset;
89 unsigned long bus_size;
90 unsigned long bus_base;
91 int ret; 111 int ret;
92 void *addr; 112 void *addr;
93 113
94 *virtual = NULL; 114 *virtual = NULL;
95 ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size); 115 ret = ttm_mem_io_reserve(bdev, mem);
96 if (ret || bus_size == 0) 116 if (ret)
97 return ret; 117 return ret;
98 118
99 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) 119 if (mem->bus.addr) {
100 addr = (void *)(((u8 *) man->io_addr) + bus_offset); 120 addr = mem->bus.addr;
101 else { 121 } else {
102 if (mem->placement & TTM_PL_FLAG_WC) 122 if (mem->placement & TTM_PL_FLAG_WC)
103 addr = ioremap_wc(bus_base + bus_offset, bus_size); 123 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
104 else 124 else
105 addr = ioremap_nocache(bus_base + bus_offset, bus_size); 125 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
106 if (!addr) 126 if (!addr) {
127 ttm_mem_io_free(bdev, mem);
107 return -ENOMEM; 128 return -ENOMEM;
129 }
108 } 130 }
109 *virtual = addr; 131 *virtual = addr;
110 return 0; 132 return 0;
@@ -117,8 +139,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
117 139
118 man = &bdev->man[mem->mem_type]; 140 man = &bdev->man[mem->mem_type];
119 141
120 if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) 142 if (virtual && mem->bus.addr == NULL)
121 iounmap(virtual); 143 iounmap(virtual);
144 ttm_mem_io_free(bdev, mem);
122} 145}
123 146
124static int ttm_copy_io_page(void *dst, void *src, unsigned long page) 147static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -208,7 +231,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
208} 231}
209 232
210int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 233int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
211 bool evict, bool no_wait, struct ttm_mem_reg *new_mem) 234 bool evict, bool no_wait_reserve, bool no_wait_gpu,
235 struct ttm_mem_reg *new_mem)
212{ 236{
213 struct ttm_bo_device *bdev = bo->bdev; 237 struct ttm_bo_device *bdev = bo->bdev;
214 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 238 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
@@ -369,26 +393,23 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
369EXPORT_SYMBOL(ttm_io_prot); 393EXPORT_SYMBOL(ttm_io_prot);
370 394
371static int ttm_bo_ioremap(struct ttm_buffer_object *bo, 395static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
372 unsigned long bus_base, 396 unsigned long offset,
373 unsigned long bus_offset, 397 unsigned long size,
374 unsigned long bus_size,
375 struct ttm_bo_kmap_obj *map) 398 struct ttm_bo_kmap_obj *map)
376{ 399{
377 struct ttm_bo_device *bdev = bo->bdev;
378 struct ttm_mem_reg *mem = &bo->mem; 400 struct ttm_mem_reg *mem = &bo->mem;
379 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
380 401
381 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) { 402 if (bo->mem.bus.addr) {
382 map->bo_kmap_type = ttm_bo_map_premapped; 403 map->bo_kmap_type = ttm_bo_map_premapped;
383 map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset); 404 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
384 } else { 405 } else {
385 map->bo_kmap_type = ttm_bo_map_iomap; 406 map->bo_kmap_type = ttm_bo_map_iomap;
386 if (mem->placement & TTM_PL_FLAG_WC) 407 if (mem->placement & TTM_PL_FLAG_WC)
387 map->virtual = ioremap_wc(bus_base + bus_offset, 408 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
388 bus_size); 409 size);
389 else 410 else
390 map->virtual = ioremap_nocache(bus_base + bus_offset, 411 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
391 bus_size); 412 size);
392 } 413 }
393 return (!map->virtual) ? -ENOMEM : 0; 414 return (!map->virtual) ? -ENOMEM : 0;
394} 415}
@@ -441,13 +462,12 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
441 unsigned long start_page, unsigned long num_pages, 462 unsigned long start_page, unsigned long num_pages,
442 struct ttm_bo_kmap_obj *map) 463 struct ttm_bo_kmap_obj *map)
443{ 464{
465 unsigned long offset, size;
444 int ret; 466 int ret;
445 unsigned long bus_base;
446 unsigned long bus_offset;
447 unsigned long bus_size;
448 467
449 BUG_ON(!list_empty(&bo->swap)); 468 BUG_ON(!list_empty(&bo->swap));
450 map->virtual = NULL; 469 map->virtual = NULL;
470 map->bo = bo;
451 if (num_pages > bo->num_pages) 471 if (num_pages > bo->num_pages)
452 return -EINVAL; 472 return -EINVAL;
453 if (start_page > bo->num_pages) 473 if (start_page > bo->num_pages)
@@ -456,16 +476,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
456 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) 476 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
457 return -EPERM; 477 return -EPERM;
458#endif 478#endif
459 ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base, 479 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
460 &bus_offset, &bus_size);
461 if (ret) 480 if (ret)
462 return ret; 481 return ret;
463 if (bus_size == 0) { 482 if (!bo->mem.bus.is_iomem) {
464 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); 483 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
465 } else { 484 } else {
466 bus_offset += start_page << PAGE_SHIFT; 485 offset = start_page << PAGE_SHIFT;
467 bus_size = num_pages << PAGE_SHIFT; 486 size = num_pages << PAGE_SHIFT;
468 return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map); 487 return ttm_bo_ioremap(bo, offset, size, map);
469 } 488 }
470} 489}
471EXPORT_SYMBOL(ttm_bo_kmap); 490EXPORT_SYMBOL(ttm_bo_kmap);
@@ -477,6 +496,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
477 switch (map->bo_kmap_type) { 496 switch (map->bo_kmap_type) {
478 case ttm_bo_map_iomap: 497 case ttm_bo_map_iomap:
479 iounmap(map->virtual); 498 iounmap(map->virtual);
499 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
480 break; 500 break;
481 case ttm_bo_map_vmap: 501 case ttm_bo_map_vmap:
482 vunmap(map->virtual); 502 vunmap(map->virtual);
@@ -494,39 +514,11 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
494} 514}
495EXPORT_SYMBOL(ttm_bo_kunmap); 515EXPORT_SYMBOL(ttm_bo_kunmap);
496 516
497int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
498 unsigned long dst_offset,
499 unsigned long *pfn, pgprot_t *prot)
500{
501 struct ttm_mem_reg *mem = &bo->mem;
502 struct ttm_bo_device *bdev = bo->bdev;
503 unsigned long bus_offset;
504 unsigned long bus_size;
505 unsigned long bus_base;
506 int ret;
507 ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
508 &bus_size);
509 if (ret)
510 return -EINVAL;
511 if (bus_size != 0)
512 *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
513 else
514 if (!bo->ttm)
515 return -EINVAL;
516 else
517 *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
518 dst_offset >>
519 PAGE_SHIFT));
520 *prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
521 PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
522
523 return 0;
524}
525
526int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 517int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
527 void *sync_obj, 518 void *sync_obj,
528 void *sync_obj_arg, 519 void *sync_obj_arg,
529 bool evict, bool no_wait, 520 bool evict, bool no_wait_reserve,
521 bool no_wait_gpu,
530 struct ttm_mem_reg *new_mem) 522 struct ttm_mem_reg *new_mem)
531{ 523{
532 struct ttm_bo_device *bdev = bo->bdev; 524 struct ttm_bo_device *bdev = bo->bdev;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 668dbe8b8dd3..fe6cb77899f4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -74,9 +74,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
74 struct ttm_buffer_object *bo = (struct ttm_buffer_object *) 74 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
75 vma->vm_private_data; 75 vma->vm_private_data;
76 struct ttm_bo_device *bdev = bo->bdev; 76 struct ttm_bo_device *bdev = bo->bdev;
77 unsigned long bus_base;
78 unsigned long bus_offset;
79 unsigned long bus_size;
80 unsigned long page_offset; 77 unsigned long page_offset;
81 unsigned long page_last; 78 unsigned long page_last;
82 unsigned long pfn; 79 unsigned long pfn;
@@ -84,7 +81,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
84 struct page *page; 81 struct page *page;
85 int ret; 82 int ret;
86 int i; 83 int i;
87 bool is_iomem;
88 unsigned long address = (unsigned long)vmf->virtual_address; 84 unsigned long address = (unsigned long)vmf->virtual_address;
89 int retval = VM_FAULT_NOPAGE; 85 int retval = VM_FAULT_NOPAGE;
90 86
@@ -101,8 +97,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
101 return VM_FAULT_NOPAGE; 97 return VM_FAULT_NOPAGE;
102 } 98 }
103 99
104 if (bdev->driver->fault_reserve_notify) 100 if (bdev->driver->fault_reserve_notify) {
105 bdev->driver->fault_reserve_notify(bo); 101 ret = bdev->driver->fault_reserve_notify(bo);
102 switch (ret) {
103 case 0:
104 break;
105 case -EBUSY:
106 set_need_resched();
107 case -ERESTARTSYS:
108 retval = VM_FAULT_NOPAGE;
109 goto out_unlock;
110 default:
111 retval = VM_FAULT_SIGBUS;
112 goto out_unlock;
113 }
114 }
106 115
107 /* 116 /*
108 * Wait for buffer data in transit, due to a pipelined 117 * Wait for buffer data in transit, due to a pipelined
@@ -122,15 +131,12 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
122 spin_unlock(&bo->lock); 131 spin_unlock(&bo->lock);
123 132
124 133
125 ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset, 134 ret = ttm_mem_io_reserve(bdev, &bo->mem);
126 &bus_size); 135 if (ret) {
127 if (unlikely(ret != 0)) {
128 retval = VM_FAULT_SIGBUS; 136 retval = VM_FAULT_SIGBUS;
129 goto out_unlock; 137 goto out_unlock;
130 } 138 }
131 139
132 is_iomem = (bus_size != 0);
133
134 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + 140 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
135 bo->vm_node->start - vma->vm_pgoff; 141 bo->vm_node->start - vma->vm_pgoff;
136 page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + 142 page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
@@ -154,8 +160,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
154 * vma->vm_page_prot when the object changes caching policy, with 160 * vma->vm_page_prot when the object changes caching policy, with
155 * the correct locks held. 161 * the correct locks held.
156 */ 162 */
157 163 if (bo->mem.bus.is_iomem) {
158 if (is_iomem) {
159 vma->vm_page_prot = ttm_io_prot(bo->mem.placement, 164 vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
160 vma->vm_page_prot); 165 vma->vm_page_prot);
161 } else { 166 } else {
@@ -171,10 +176,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
171 */ 176 */
172 177
173 for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { 178 for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
174 179 if (bo->mem.bus.is_iomem)
175 if (is_iomem) 180 pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
176 pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
177 page_offset;
178 else { 181 else {
179 page = ttm_tt_get_page(ttm, page_offset); 182 page = ttm_tt_get_page(ttm, page_offset);
180 if (unlikely(!page && i == 0)) { 183 if (unlikely(!page && i == 0)) {
@@ -198,7 +201,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
198 retval = 201 retval =
199 (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; 202 (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
200 goto out_unlock; 203 goto out_unlock;
201
202 } 204 }
203 205
204 address += PAGE_SIZE; 206 address += PAGE_SIZE;
@@ -221,8 +223,7 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
221 223
222static void ttm_bo_vm_close(struct vm_area_struct *vma) 224static void ttm_bo_vm_close(struct vm_area_struct *vma)
223{ 225{
224 struct ttm_buffer_object *bo = 226 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
225 (struct ttm_buffer_object *)vma->vm_private_data;
226 227
227 ttm_bo_unref(&bo); 228 ttm_bo_unref(&bo);
228 vma->vm_private_data = NULL; 229 vma->vm_private_data = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 825ebe3d89d5..c4f5114aee7c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -137,9 +137,6 @@ int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
137int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, 137int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
138 struct ttm_mem_type_manager *man) 138 struct ttm_mem_type_manager *man)
139{ 139{
140 struct vmw_private *dev_priv =
141 container_of(bdev, struct vmw_private, bdev);
142
143 switch (type) { 140 switch (type) {
144 case TTM_PL_SYSTEM: 141 case TTM_PL_SYSTEM:
145 /* System memory */ 142 /* System memory */
@@ -151,11 +148,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
151 case TTM_PL_VRAM: 148 case TTM_PL_VRAM:
152 /* "On-card" video ram */ 149 /* "On-card" video ram */
153 man->gpu_offset = 0; 150 man->gpu_offset = 0;
154 man->io_offset = dev_priv->vram_start; 151 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
155 man->io_size = dev_priv->vram_size;
156 man->flags = TTM_MEMTYPE_FLAG_FIXED |
157 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE;
158 man->io_addr = NULL;
159 man->available_caching = TTM_PL_MASK_CACHING; 152 man->available_caching = TTM_PL_MASK_CACHING;
160 man->default_caching = TTM_PL_FLAG_WC; 153 man->default_caching = TTM_PL_FLAG_WC;
161 break; 154 break;
@@ -193,6 +186,42 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo)
193 vmw_dmabuf_gmr_unbind(bo); 186 vmw_dmabuf_gmr_unbind(bo);
194} 187}
195 188
189static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
190{
191 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
192 struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
193
194 mem->bus.addr = NULL;
195 mem->bus.is_iomem = false;
196 mem->bus.offset = 0;
197 mem->bus.size = mem->num_pages << PAGE_SHIFT;
198 mem->bus.base = 0;
199 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
200 return -EINVAL;
201 switch (mem->mem_type) {
202 case TTM_PL_SYSTEM:
203 /* System memory */
204 return 0;
205 case TTM_PL_VRAM:
206 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
207 mem->bus.base = dev_priv->vram_start;
208 mem->bus.is_iomem = true;
209 break;
210 default:
211 return -EINVAL;
212 }
213 return 0;
214}
215
216static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
217{
218}
219
220static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
221{
222 return 0;
223}
224
196/** 225/**
197 * FIXME: We're using the old vmware polling method to sync. 226 * FIXME: We're using the old vmware polling method to sync.
198 * Do this with fences instead. 227 * Do this with fences instead.
@@ -248,5 +277,8 @@ struct ttm_bo_driver vmw_bo_driver = {
248 .sync_obj_unref = vmw_sync_obj_unref, 277 .sync_obj_unref = vmw_sync_obj_unref,
249 .sync_obj_ref = vmw_sync_obj_ref, 278 .sync_obj_ref = vmw_sync_obj_ref,
250 .move_notify = vmw_move_notify, 279 .move_notify = vmw_move_notify,
251 .swap_notify = vmw_swap_notify 280 .swap_notify = vmw_swap_notify,
281 .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
282 .io_mem_reserve = &vmw_ttm_io_mem_reserve,
283 .io_mem_free = &vmw_ttm_io_mem_free,
252}; 284};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 0897359b3e4e..dbd36b8910cf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -570,7 +570,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
570 * Put BO in VRAM, only if there is space. 570 * Put BO in VRAM, only if there is space.
571 */ 571 */
572 572
573 ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false); 573 ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false);
574 if (unlikely(ret == -ERESTARTSYS)) 574 if (unlikely(ret == -ERESTARTSYS))
575 return ret; 575 return ret;
576 576
@@ -590,7 +590,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
590 * previous contents. 590 * previous contents.
591 */ 591 */
592 592
593 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); 593 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
594 return ret; 594 return ret;
595} 595}
596 596
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index a93367041cdc..80125ffc4e28 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -628,7 +628,7 @@ int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
628 if (unlikely(ret != 0)) 628 if (unlikely(ret != 0))
629 return ret; 629 return ret;
630 630
631 ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false); 631 ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false);
632 ttm_bo_unreserve(bo); 632 ttm_bo_unreserve(bo);
633 633
634 return ret; 634 return ret;
@@ -652,7 +652,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
652 if (unlikely(ret != 0)) 652 if (unlikely(ret != 0))
653 goto err_unlock; 653 goto err_unlock;
654 654
655 ret = ttm_bo_validate(bo, &ne_placement, false, false); 655 ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
656 ttm_bo_unreserve(bo); 656 ttm_bo_unreserve(bo);
657err_unlock: 657err_unlock:
658 ttm_write_unlock(&vmw_priv->active_master->lock); 658 ttm_write_unlock(&vmw_priv->active_master->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 5b6eabeb7f51..ad566c85b075 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -118,7 +118,7 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
118 if (pin) 118 if (pin)
119 overlay_placement = &vmw_vram_ne_placement; 119 overlay_placement = &vmw_vram_ne_placement;
120 120
121 ret = ttm_bo_validate(bo, overlay_placement, interruptible, false); 121 ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false);
122 122
123 ttm_bo_unreserve(bo); 123 ttm_bo_unreserve(bo);
124 124
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 81eb9f45883c..3e273e0b9417 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -66,6 +66,26 @@ struct ttm_placement {
66 const uint32_t *busy_placement; 66 const uint32_t *busy_placement;
67}; 67};
68 68
69/**
70 * struct ttm_bus_placement
71 *
72 * @addr: mapped virtual address
73 * @base: bus base address
74 * @is_iomem: is this io memory ?
75 * @size: size in byte
76 * @offset: offset from the base address
77 *
78 * Structure indicating the bus placement of an object.
79 */
80struct ttm_bus_placement {
81 void *addr;
82 unsigned long base;
83 unsigned long size;
84 unsigned long offset;
85 bool is_iomem;
86 bool io_reserved;
87};
88
69 89
70/** 90/**
71 * struct ttm_mem_reg 91 * struct ttm_mem_reg
@@ -75,6 +95,7 @@ struct ttm_placement {
75 * @num_pages: Actual size of memory region in pages. 95 * @num_pages: Actual size of memory region in pages.
76 * @page_alignment: Page alignment. 96 * @page_alignment: Page alignment.
77 * @placement: Placement flags. 97 * @placement: Placement flags.
98 * @bus: Placement on io bus accessible to the CPU
78 * 99 *
79 * Structure indicating the placement and space resources used by a 100 * Structure indicating the placement and space resources used by a
80 * buffer object. 101 * buffer object.
@@ -87,6 +108,7 @@ struct ttm_mem_reg {
87 uint32_t page_alignment; 108 uint32_t page_alignment;
88 uint32_t mem_type; 109 uint32_t mem_type;
89 uint32_t placement; 110 uint32_t placement;
111 struct ttm_bus_placement bus;
90}; 112};
91 113
92/** 114/**
@@ -274,6 +296,7 @@ struct ttm_bo_kmap_obj {
274 ttm_bo_map_kmap = 3, 296 ttm_bo_map_kmap = 3,
275 ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK, 297 ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK,
276 } bo_kmap_type; 298 } bo_kmap_type;
299 struct ttm_buffer_object *bo;
277}; 300};
278 301
279/** 302/**
@@ -313,7 +336,8 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
313 * @bo: The buffer object. 336 * @bo: The buffer object.
314 * @placement: Proposed placement for the buffer object. 337 * @placement: Proposed placement for the buffer object.
315 * @interruptible: Sleep interruptible if sleeping. 338 * @interruptible: Sleep interruptible if sleeping.
316 * @no_wait: Return immediately if the buffer is busy. 339 * @no_wait_reserve: Return immediately if other buffers are busy.
340 * @no_wait_gpu: Return immediately if the GPU is busy.
317 * 341 *
318 * Changes placement and caching policy of the buffer object 342 * Changes placement and caching policy of the buffer object
319 * according proposed placement. 343 * according proposed placement.
@@ -325,7 +349,8 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
325 */ 349 */
326extern int ttm_bo_validate(struct ttm_buffer_object *bo, 350extern int ttm_bo_validate(struct ttm_buffer_object *bo,
327 struct ttm_placement *placement, 351 struct ttm_placement *placement,
328 bool interruptible, bool no_wait); 352 bool interruptible, bool no_wait_reserve,
353 bool no_wait_gpu);
329 354
330/** 355/**
331 * ttm_bo_unref 356 * ttm_bo_unref
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index e929c27ede22..7720b1787e23 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -176,8 +176,6 @@ struct ttm_tt {
176 176
177#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */ 177#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
178#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ 178#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
179#define TTM_MEMTYPE_FLAG_NEEDS_IOREMAP (1 << 2) /* Fixed memory needs ioremap
180 before kernel access. */
181#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ 179#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
182 180
183/** 181/**
@@ -189,13 +187,6 @@ struct ttm_tt {
189 * managed by this memory type. 187 * managed by this memory type.
190 * @gpu_offset: If used, the GPU offset of the first managed page of 188 * @gpu_offset: If used, the GPU offset of the first managed page of
191 * fixed memory or the first managed location in an aperture. 189 * fixed memory or the first managed location in an aperture.
192 * @io_offset: The io_offset of the first managed page of IO memory or
193 * the first managed location in an aperture. For TTM_MEMTYPE_FLAG_CMA
194 * memory, this should be set to NULL.
195 * @io_size: The size of a managed IO region (fixed memory or aperture).
196 * @io_addr: Virtual kernel address if the io region is pre-mapped. For
197 * TTM_MEMTYPE_FLAG_NEEDS_IOREMAP there is no pre-mapped io map and
198 * @io_addr should be set to NULL.
199 * @size: Size of the managed region. 190 * @size: Size of the managed region.
200 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX, 191 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
201 * as defined in ttm_placement_common.h 192 * as defined in ttm_placement_common.h
@@ -221,9 +212,6 @@ struct ttm_mem_type_manager {
221 bool use_type; 212 bool use_type;
222 uint32_t flags; 213 uint32_t flags;
223 unsigned long gpu_offset; 214 unsigned long gpu_offset;
224 unsigned long io_offset;
225 unsigned long io_size;
226 void *io_addr;
227 uint64_t size; 215 uint64_t size;
228 uint32_t available_caching; 216 uint32_t available_caching;
229 uint32_t default_caching; 217 uint32_t default_caching;
@@ -311,7 +299,8 @@ struct ttm_bo_driver {
311 */ 299 */
312 int (*move) (struct ttm_buffer_object *bo, 300 int (*move) (struct ttm_buffer_object *bo,
313 bool evict, bool interruptible, 301 bool evict, bool interruptible,
314 bool no_wait, struct ttm_mem_reg *new_mem); 302 bool no_wait_reserve, bool no_wait_gpu,
303 struct ttm_mem_reg *new_mem);
315 304
316 /** 305 /**
317 * struct ttm_bo_driver_member verify_access 306 * struct ttm_bo_driver_member verify_access
@@ -351,12 +340,21 @@ struct ttm_bo_driver {
351 struct ttm_mem_reg *new_mem); 340 struct ttm_mem_reg *new_mem);
352 /* notify the driver we are taking a fault on this BO 341 /* notify the driver we are taking a fault on this BO
353 * and have reserved it */ 342 * and have reserved it */
354 void (*fault_reserve_notify)(struct ttm_buffer_object *bo); 343 int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
355 344
356 /** 345 /**
357 * notify the driver that we're about to swap out this bo 346 * notify the driver that we're about to swap out this bo
358 */ 347 */
359 void (*swap_notify) (struct ttm_buffer_object *bo); 348 void (*swap_notify) (struct ttm_buffer_object *bo);
349
350 /**
351 * Driver callback on when mapping io memory (for bo_move_memcpy
352 * for instance). TTM will take care to call io_mem_free whenever
353 * the mapping is not use anymore. io_mem_reserve & io_mem_free
354 * are balanced.
355 */
356 int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
357 void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
360}; 358};
361 359
362/** 360/**
@@ -633,7 +631,8 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
633 * @proposed_placement: Proposed new placement for the buffer object. 631 * @proposed_placement: Proposed new placement for the buffer object.
634 * @mem: A struct ttm_mem_reg. 632 * @mem: A struct ttm_mem_reg.
635 * @interruptible: Sleep interruptible when sliping. 633 * @interruptible: Sleep interruptible when sliping.
636 * @no_wait: Don't sleep waiting for space to become available. 634 * @no_wait_reserve: Return immediately if other buffers are busy.
635 * @no_wait_gpu: Return immediately if the GPU is busy.
637 * 636 *
638 * Allocate memory space for the buffer object pointed to by @bo, using 637 * Allocate memory space for the buffer object pointed to by @bo, using
639 * the placement flags in @mem, potentially evicting other idle buffer objects. 638 * the placement flags in @mem, potentially evicting other idle buffer objects.
@@ -647,7 +646,8 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
647extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, 646extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
648 struct ttm_placement *placement, 647 struct ttm_placement *placement,
649 struct ttm_mem_reg *mem, 648 struct ttm_mem_reg *mem,
650 bool interruptible, bool no_wait); 649 bool interruptible,
650 bool no_wait_reserve, bool no_wait_gpu);
651/** 651/**
652 * ttm_bo_wait_for_cpu 652 * ttm_bo_wait_for_cpu
653 * 653 *
@@ -682,6 +682,11 @@ extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
682 unsigned long *bus_offset, 682 unsigned long *bus_offset,
683 unsigned long *bus_size); 683 unsigned long *bus_size);
684 684
685extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
686 struct ttm_mem_reg *mem);
687extern void ttm_mem_io_free(struct ttm_bo_device *bdev,
688 struct ttm_mem_reg *mem);
689
685extern void ttm_bo_global_release(struct ttm_global_reference *ref); 690extern void ttm_bo_global_release(struct ttm_global_reference *ref);
686extern int ttm_bo_global_init(struct ttm_global_reference *ref); 691extern int ttm_bo_global_init(struct ttm_global_reference *ref);
687 692
@@ -826,7 +831,8 @@ extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
826 * 831 *
827 * @bo: A pointer to a struct ttm_buffer_object. 832 * @bo: A pointer to a struct ttm_buffer_object.
828 * @evict: 1: This is an eviction. Don't try to pipeline. 833 * @evict: 1: This is an eviction. Don't try to pipeline.
829 * @no_wait: Never sleep, but rather return with -EBUSY. 834 * @no_wait_reserve: Return immediately if other buffers are busy.
835 * @no_wait_gpu: Return immediately if the GPU is busy.
830 * @new_mem: struct ttm_mem_reg indicating where to move. 836 * @new_mem: struct ttm_mem_reg indicating where to move.
831 * 837 *
832 * Optimized move function for a buffer object with both old and 838 * Optimized move function for a buffer object with both old and
@@ -840,15 +846,16 @@ extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
840 */ 846 */
841 847
842extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 848extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
843 bool evict, bool no_wait, 849 bool evict, bool no_wait_reserve,
844 struct ttm_mem_reg *new_mem); 850 bool no_wait_gpu, struct ttm_mem_reg *new_mem);
845 851
846/** 852/**
847 * ttm_bo_move_memcpy 853 * ttm_bo_move_memcpy
848 * 854 *
849 * @bo: A pointer to a struct ttm_buffer_object. 855 * @bo: A pointer to a struct ttm_buffer_object.
850 * @evict: 1: This is an eviction. Don't try to pipeline. 856 * @evict: 1: This is an eviction. Don't try to pipeline.
851 * @no_wait: Never sleep, but rather return with -EBUSY. 857 * @no_wait_reserve: Return immediately if other buffers are busy.
858 * @no_wait_gpu: Return immediately if the GPU is busy.
852 * @new_mem: struct ttm_mem_reg indicating where to move. 859 * @new_mem: struct ttm_mem_reg indicating where to move.
853 * 860 *
854 * Fallback move function for a mappable buffer object in mappable memory. 861 * Fallback move function for a mappable buffer object in mappable memory.
@@ -862,8 +869,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
862 */ 869 */
863 870
864extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 871extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
865 bool evict, 872 bool evict, bool no_wait_reserve,
866 bool no_wait, struct ttm_mem_reg *new_mem); 873 bool no_wait_gpu, struct ttm_mem_reg *new_mem);
867 874
868/** 875/**
869 * ttm_bo_free_old_node 876 * ttm_bo_free_old_node
@@ -882,7 +889,8 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
882 * @sync_obj_arg: An argument to pass to the sync object idle / wait 889 * @sync_obj_arg: An argument to pass to the sync object idle / wait
883 * functions. 890 * functions.
884 * @evict: This is an evict move. Don't return until the buffer is idle. 891 * @evict: This is an evict move. Don't return until the buffer is idle.
885 * @no_wait: Never sleep, but rather return with -EBUSY. 892 * @no_wait_reserve: Return immediately if other buffers are busy.
893 * @no_wait_gpu: Return immediately if the GPU is busy.
886 * @new_mem: struct ttm_mem_reg indicating where to move. 894 * @new_mem: struct ttm_mem_reg indicating where to move.
887 * 895 *
888 * Accelerated move function to be called when an accelerated move 896 * Accelerated move function to be called when an accelerated move
@@ -896,7 +904,8 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
896extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 904extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
897 void *sync_obj, 905 void *sync_obj,
898 void *sync_obj_arg, 906 void *sync_obj_arg,
899 bool evict, bool no_wait, 907 bool evict, bool no_wait_reserve,
908 bool no_wait_gpu,
900 struct ttm_mem_reg *new_mem); 909 struct ttm_mem_reg *new_mem);
901/** 910/**
902 * ttm_io_prot 911 * ttm_io_prot