aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 14:14:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 14:14:52 -0400
commit59534f7298c5e28aaa64e6ed550e247f64ee72ae (patch)
treeb9fef7756abf897d9e1b10950cdf10bf6dfe5cb7 /drivers/gpu/drm/ttm
parentac3ee84c604502240122c47b52f0542ec8774f15 (diff)
parentb486787ee4797d6e42a9bd3a6f079385ad0f4472 (diff)
Merge branch 'drm-for-2.6.35' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-for-2.6.35' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (207 commits) drm/radeon/kms/pm/r600: select the mid clock mode for single head low profile drm/radeon: fix power supply kconfig interaction. drm/radeon/kms: record object that have been list reserved drm/radeon: AGP memory is only I/O if the aperture can be mapped by the CPU. drm/radeon/kms: don't default display priority to high on rs4xx drm/edid: fix typo in 1600x1200@75 mode drm/nouveau: fix i2c-related init table handlers drm/nouveau: support init table i2c device identifier 0x81 drm/nouveau: ensure we've parsed i2c table entry for INIT_*I2C* handlers drm/nouveau: display error message for any failed init table opcode drm/nouveau: fix init table handlers to return proper error codes drm/nv50: support fractional feedback divider on newer chips drm/nv50: fix monitor detection on certain chipsets drm/nv50: store full dcb i2c entry from vbios drm/nv50: fix suspend/resume with DP outputs drm/nv50: output calculated crtc pll when debugging on drm/nouveau: dump pll limits entries when debugging is on drm/nouveau: bios parser fixes for eDP boards drm/nouveau: fix a nouveau_bo dereference after it's been destroyed drm/nv40: remove some completed ctxprog TODOs ...
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/Makefile2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c98
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c122
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c41
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c845
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c44
7 files changed, 1001 insertions, 158 deletions
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index 1e138f5bae09..4256e2006476 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,6 +4,6 @@
4ccflags-y := -Iinclude/drm 4ccflags-y := -Iinclude/drm
5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ 5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \ 6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
7 ttm_object.o ttm_lock.o ttm_execbuf_util.o 7 ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o
8 8
9obj-$(CONFIG_DRM_TTM) += ttm.o 9obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 0e3754a3a303..555ebb12ace8 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -79,8 +79,6 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
79 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type); 79 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
80 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags); 80 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
81 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset); 81 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
82 printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
83 printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
84 printk(KERN_ERR TTM_PFX " size: %llu\n", man->size); 82 printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
85 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n", 83 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
86 man->available_caching); 84 man->available_caching);
@@ -357,7 +355,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
357 355
358static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 356static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
359 struct ttm_mem_reg *mem, 357 struct ttm_mem_reg *mem,
360 bool evict, bool interruptible, bool no_wait) 358 bool evict, bool interruptible,
359 bool no_wait_reserve, bool no_wait_gpu)
361{ 360{
362 struct ttm_bo_device *bdev = bo->bdev; 361 struct ttm_bo_device *bdev = bo->bdev;
363 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); 362 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -402,12 +401,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
402 401
403 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 402 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
404 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 403 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
405 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); 404 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
406 else if (bdev->driver->move) 405 else if (bdev->driver->move)
407 ret = bdev->driver->move(bo, evict, interruptible, 406 ret = bdev->driver->move(bo, evict, interruptible,
408 no_wait, mem); 407 no_wait_reserve, no_wait_gpu, mem);
409 else 408 else
410 ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem); 409 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
411 410
412 if (ret) 411 if (ret)
413 goto out_err; 412 goto out_err;
@@ -605,8 +604,22 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
605} 604}
606EXPORT_SYMBOL(ttm_bo_unref); 605EXPORT_SYMBOL(ttm_bo_unref);
607 606
607int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
608{
609 return cancel_delayed_work_sync(&bdev->wq);
610}
611EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
612
613void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
614{
615 if (resched)
616 schedule_delayed_work(&bdev->wq,
617 ((HZ / 100) < 1) ? 1 : HZ / 100);
618}
619EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
620
608static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, 621static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
609 bool no_wait) 622 bool no_wait_reserve, bool no_wait_gpu)
610{ 623{
611 struct ttm_bo_device *bdev = bo->bdev; 624 struct ttm_bo_device *bdev = bo->bdev;
612 struct ttm_bo_global *glob = bo->glob; 625 struct ttm_bo_global *glob = bo->glob;
@@ -615,7 +628,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
615 int ret = 0; 628 int ret = 0;
616 629
617 spin_lock(&bo->lock); 630 spin_lock(&bo->lock);
618 ret = ttm_bo_wait(bo, false, interruptible, no_wait); 631 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
619 spin_unlock(&bo->lock); 632 spin_unlock(&bo->lock);
620 633
621 if (unlikely(ret != 0)) { 634 if (unlikely(ret != 0)) {
@@ -631,6 +644,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
631 644
632 evict_mem = bo->mem; 645 evict_mem = bo->mem;
633 evict_mem.mm_node = NULL; 646 evict_mem.mm_node = NULL;
647 evict_mem.bus.io_reserved = false;
634 648
635 placement.fpfn = 0; 649 placement.fpfn = 0;
636 placement.lpfn = 0; 650 placement.lpfn = 0;
@@ -638,7 +652,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
638 placement.num_busy_placement = 0; 652 placement.num_busy_placement = 0;
639 bdev->driver->evict_flags(bo, &placement); 653 bdev->driver->evict_flags(bo, &placement);
640 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, 654 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
641 no_wait); 655 no_wait_reserve, no_wait_gpu);
642 if (ret) { 656 if (ret) {
643 if (ret != -ERESTARTSYS) { 657 if (ret != -ERESTARTSYS) {
644 printk(KERN_ERR TTM_PFX 658 printk(KERN_ERR TTM_PFX
@@ -650,7 +664,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
650 } 664 }
651 665
652 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 666 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
653 no_wait); 667 no_wait_reserve, no_wait_gpu);
654 if (ret) { 668 if (ret) {
655 if (ret != -ERESTARTSYS) 669 if (ret != -ERESTARTSYS)
656 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); 670 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
@@ -670,7 +684,8 @@ out:
670 684
671static int ttm_mem_evict_first(struct ttm_bo_device *bdev, 685static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
672 uint32_t mem_type, 686 uint32_t mem_type,
673 bool interruptible, bool no_wait) 687 bool interruptible, bool no_wait_reserve,
688 bool no_wait_gpu)
674{ 689{
675 struct ttm_bo_global *glob = bdev->glob; 690 struct ttm_bo_global *glob = bdev->glob;
676 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 691 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
@@ -687,11 +702,11 @@ retry:
687 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); 702 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
688 kref_get(&bo->list_kref); 703 kref_get(&bo->list_kref);
689 704
690 ret = ttm_bo_reserve_locked(bo, false, true, false, 0); 705 ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
691 706
692 if (unlikely(ret == -EBUSY)) { 707 if (unlikely(ret == -EBUSY)) {
693 spin_unlock(&glob->lru_lock); 708 spin_unlock(&glob->lru_lock);
694 if (likely(!no_wait)) 709 if (likely(!no_wait_gpu))
695 ret = ttm_bo_wait_unreserved(bo, interruptible); 710 ret = ttm_bo_wait_unreserved(bo, interruptible);
696 711
697 kref_put(&bo->list_kref, ttm_bo_release_list); 712 kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -713,7 +728,7 @@ retry:
713 while (put_count--) 728 while (put_count--)
714 kref_put(&bo->list_kref, ttm_bo_ref_bug); 729 kref_put(&bo->list_kref, ttm_bo_ref_bug);
715 730
716 ret = ttm_bo_evict(bo, interruptible, no_wait); 731 ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
717 ttm_bo_unreserve(bo); 732 ttm_bo_unreserve(bo);
718 733
719 kref_put(&bo->list_kref, ttm_bo_release_list); 734 kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -764,7 +779,9 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
764 uint32_t mem_type, 779 uint32_t mem_type,
765 struct ttm_placement *placement, 780 struct ttm_placement *placement,
766 struct ttm_mem_reg *mem, 781 struct ttm_mem_reg *mem,
767 bool interruptible, bool no_wait) 782 bool interruptible,
783 bool no_wait_reserve,
784 bool no_wait_gpu)
768{ 785{
769 struct ttm_bo_device *bdev = bo->bdev; 786 struct ttm_bo_device *bdev = bo->bdev;
770 struct ttm_bo_global *glob = bdev->glob; 787 struct ttm_bo_global *glob = bdev->glob;
@@ -785,7 +802,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
785 } 802 }
786 spin_unlock(&glob->lru_lock); 803 spin_unlock(&glob->lru_lock);
787 ret = ttm_mem_evict_first(bdev, mem_type, interruptible, 804 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
788 no_wait); 805 no_wait_reserve, no_wait_gpu);
789 if (unlikely(ret != 0)) 806 if (unlikely(ret != 0))
790 return ret; 807 return ret;
791 } while (1); 808 } while (1);
@@ -855,7 +872,8 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
855int ttm_bo_mem_space(struct ttm_buffer_object *bo, 872int ttm_bo_mem_space(struct ttm_buffer_object *bo,
856 struct ttm_placement *placement, 873 struct ttm_placement *placement,
857 struct ttm_mem_reg *mem, 874 struct ttm_mem_reg *mem,
858 bool interruptible, bool no_wait) 875 bool interruptible, bool no_wait_reserve,
876 bool no_wait_gpu)
859{ 877{
860 struct ttm_bo_device *bdev = bo->bdev; 878 struct ttm_bo_device *bdev = bo->bdev;
861 struct ttm_mem_type_manager *man; 879 struct ttm_mem_type_manager *man;
@@ -952,7 +970,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
952 } 970 }
953 971
954 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 972 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
955 interruptible, no_wait); 973 interruptible, no_wait_reserve, no_wait_gpu);
956 if (ret == 0 && mem->mm_node) { 974 if (ret == 0 && mem->mm_node) {
957 mem->placement = cur_flags; 975 mem->placement = cur_flags;
958 mem->mm_node->private = bo; 976 mem->mm_node->private = bo;
@@ -978,7 +996,8 @@ EXPORT_SYMBOL(ttm_bo_wait_cpu);
978 996
979int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 997int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
980 struct ttm_placement *placement, 998 struct ttm_placement *placement,
981 bool interruptible, bool no_wait) 999 bool interruptible, bool no_wait_reserve,
1000 bool no_wait_gpu)
982{ 1001{
983 struct ttm_bo_global *glob = bo->glob; 1002 struct ttm_bo_global *glob = bo->glob;
984 int ret = 0; 1003 int ret = 0;
@@ -992,20 +1011,21 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
992 * instead of doing it here. 1011 * instead of doing it here.
993 */ 1012 */
994 spin_lock(&bo->lock); 1013 spin_lock(&bo->lock);
995 ret = ttm_bo_wait(bo, false, interruptible, no_wait); 1014 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
996 spin_unlock(&bo->lock); 1015 spin_unlock(&bo->lock);
997 if (ret) 1016 if (ret)
998 return ret; 1017 return ret;
999 mem.num_pages = bo->num_pages; 1018 mem.num_pages = bo->num_pages;
1000 mem.size = mem.num_pages << PAGE_SHIFT; 1019 mem.size = mem.num_pages << PAGE_SHIFT;
1001 mem.page_alignment = bo->mem.page_alignment; 1020 mem.page_alignment = bo->mem.page_alignment;
1021 mem.bus.io_reserved = false;
1002 /* 1022 /*
1003 * Determine where to move the buffer. 1023 * Determine where to move the buffer.
1004 */ 1024 */
1005 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait); 1025 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
1006 if (ret) 1026 if (ret)
1007 goto out_unlock; 1027 goto out_unlock;
1008 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); 1028 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
1009out_unlock: 1029out_unlock:
1010 if (ret && mem.mm_node) { 1030 if (ret && mem.mm_node) {
1011 spin_lock(&glob->lru_lock); 1031 spin_lock(&glob->lru_lock);
@@ -1039,7 +1059,8 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
1039 1059
1040int ttm_bo_validate(struct ttm_buffer_object *bo, 1060int ttm_bo_validate(struct ttm_buffer_object *bo,
1041 struct ttm_placement *placement, 1061 struct ttm_placement *placement,
1042 bool interruptible, bool no_wait) 1062 bool interruptible, bool no_wait_reserve,
1063 bool no_wait_gpu)
1043{ 1064{
1044 int ret; 1065 int ret;
1045 1066
@@ -1054,7 +1075,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
1054 */ 1075 */
1055 ret = ttm_bo_mem_compat(placement, &bo->mem); 1076 ret = ttm_bo_mem_compat(placement, &bo->mem);
1056 if (ret < 0) { 1077 if (ret < 0) {
1057 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait); 1078 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
1058 if (ret) 1079 if (ret)
1059 return ret; 1080 return ret;
1060 } else { 1081 } else {
@@ -1153,6 +1174,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1153 bo->mem.num_pages = bo->num_pages; 1174 bo->mem.num_pages = bo->num_pages;
1154 bo->mem.mm_node = NULL; 1175 bo->mem.mm_node = NULL;
1155 bo->mem.page_alignment = page_alignment; 1176 bo->mem.page_alignment = page_alignment;
1177 bo->mem.bus.io_reserved = false;
1156 bo->buffer_start = buffer_start & PAGE_MASK; 1178 bo->buffer_start = buffer_start & PAGE_MASK;
1157 bo->priv_flags = 0; 1179 bo->priv_flags = 0;
1158 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1180 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
@@ -1175,7 +1197,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1175 goto out_err; 1197 goto out_err;
1176 } 1198 }
1177 1199
1178 ret = ttm_bo_validate(bo, placement, interruptible, false); 1200 ret = ttm_bo_validate(bo, placement, interruptible, false, false);
1179 if (ret) 1201 if (ret)
1180 goto out_err; 1202 goto out_err;
1181 1203
@@ -1249,7 +1271,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1249 spin_lock(&glob->lru_lock); 1271 spin_lock(&glob->lru_lock);
1250 while (!list_empty(&man->lru)) { 1272 while (!list_empty(&man->lru)) {
1251 spin_unlock(&glob->lru_lock); 1273 spin_unlock(&glob->lru_lock);
1252 ret = ttm_mem_evict_first(bdev, mem_type, false, false); 1274 ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
1253 if (ret) { 1275 if (ret) {
1254 if (allow_errors) { 1276 if (allow_errors) {
1255 return ret; 1277 return ret;
@@ -1553,26 +1575,6 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1553 return true; 1575 return true;
1554} 1576}
1555 1577
1556int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
1557 struct ttm_mem_reg *mem,
1558 unsigned long *bus_base,
1559 unsigned long *bus_offset, unsigned long *bus_size)
1560{
1561 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1562
1563 *bus_size = 0;
1564 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1565 return -EINVAL;
1566
1567 if (ttm_mem_reg_is_pci(bdev, mem)) {
1568 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
1569 *bus_size = mem->num_pages << PAGE_SHIFT;
1570 *bus_base = man->io_offset;
1571 }
1572
1573 return 0;
1574}
1575
1576void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) 1578void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1577{ 1579{
1578 struct ttm_bo_device *bdev = bo->bdev; 1580 struct ttm_bo_device *bdev = bo->bdev;
@@ -1581,8 +1583,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1581 1583
1582 if (!bdev->dev_mapping) 1584 if (!bdev->dev_mapping)
1583 return; 1585 return;
1584
1585 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); 1586 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1587 ttm_mem_io_free(bdev, &bo->mem);
1586} 1588}
1587EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1589EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1588 1590
@@ -1811,7 +1813,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1811 evict_mem.mem_type = TTM_PL_SYSTEM; 1813 evict_mem.mem_type = TTM_PL_SYSTEM;
1812 1814
1813 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, 1815 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1814 false, false); 1816 false, false, false);
1815 if (unlikely(ret != 0)) 1817 if (unlikely(ret != 0))
1816 goto out; 1818 goto out;
1817 } 1819 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index d764e82e799b..13012a1f1486 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -50,7 +50,8 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
50} 50}
51 51
52int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 52int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
53 bool evict, bool no_wait, struct ttm_mem_reg *new_mem) 53 bool evict, bool no_wait_reserve,
54 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
54{ 55{
55 struct ttm_tt *ttm = bo->ttm; 56 struct ttm_tt *ttm = bo->ttm;
56 struct ttm_mem_reg *old_mem = &bo->mem; 57 struct ttm_mem_reg *old_mem = &bo->mem;
@@ -81,30 +82,51 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
81} 82}
82EXPORT_SYMBOL(ttm_bo_move_ttm); 83EXPORT_SYMBOL(ttm_bo_move_ttm);
83 84
85int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
86{
87 int ret;
88
89 if (!mem->bus.io_reserved) {
90 mem->bus.io_reserved = true;
91 ret = bdev->driver->io_mem_reserve(bdev, mem);
92 if (unlikely(ret != 0))
93 return ret;
94 }
95 return 0;
96}
97
98void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
99{
100 if (bdev->driver->io_mem_reserve) {
101 if (mem->bus.io_reserved) {
102 mem->bus.io_reserved = false;
103 bdev->driver->io_mem_free(bdev, mem);
104 }
105 }
106}
107
84int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, 108int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
85 void **virtual) 109 void **virtual)
86{ 110{
87 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
88 unsigned long bus_offset;
89 unsigned long bus_size;
90 unsigned long bus_base;
91 int ret; 111 int ret;
92 void *addr; 112 void *addr;
93 113
94 *virtual = NULL; 114 *virtual = NULL;
95 ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size); 115 ret = ttm_mem_io_reserve(bdev, mem);
96 if (ret || bus_size == 0) 116 if (ret || !mem->bus.is_iomem)
97 return ret; 117 return ret;
98 118
99 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) 119 if (mem->bus.addr) {
100 addr = (void *)(((u8 *) man->io_addr) + bus_offset); 120 addr = mem->bus.addr;
101 else { 121 } else {
102 if (mem->placement & TTM_PL_FLAG_WC) 122 if (mem->placement & TTM_PL_FLAG_WC)
103 addr = ioremap_wc(bus_base + bus_offset, bus_size); 123 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
104 else 124 else
105 addr = ioremap_nocache(bus_base + bus_offset, bus_size); 125 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
106 if (!addr) 126 if (!addr) {
127 ttm_mem_io_free(bdev, mem);
107 return -ENOMEM; 128 return -ENOMEM;
129 }
108 } 130 }
109 *virtual = addr; 131 *virtual = addr;
110 return 0; 132 return 0;
@@ -117,8 +139,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
117 139
118 man = &bdev->man[mem->mem_type]; 140 man = &bdev->man[mem->mem_type];
119 141
120 if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) 142 if (virtual && mem->bus.addr == NULL)
121 iounmap(virtual); 143 iounmap(virtual);
144 ttm_mem_io_free(bdev, mem);
122} 145}
123 146
124static int ttm_copy_io_page(void *dst, void *src, unsigned long page) 147static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -208,7 +231,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
208} 231}
209 232
210int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 233int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
211 bool evict, bool no_wait, struct ttm_mem_reg *new_mem) 234 bool evict, bool no_wait_reserve, bool no_wait_gpu,
235 struct ttm_mem_reg *new_mem)
212{ 236{
213 struct ttm_bo_device *bdev = bo->bdev; 237 struct ttm_bo_device *bdev = bo->bdev;
214 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 238 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
@@ -369,26 +393,23 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
369EXPORT_SYMBOL(ttm_io_prot); 393EXPORT_SYMBOL(ttm_io_prot);
370 394
371static int ttm_bo_ioremap(struct ttm_buffer_object *bo, 395static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
372 unsigned long bus_base, 396 unsigned long offset,
373 unsigned long bus_offset, 397 unsigned long size,
374 unsigned long bus_size,
375 struct ttm_bo_kmap_obj *map) 398 struct ttm_bo_kmap_obj *map)
376{ 399{
377 struct ttm_bo_device *bdev = bo->bdev;
378 struct ttm_mem_reg *mem = &bo->mem; 400 struct ttm_mem_reg *mem = &bo->mem;
379 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
380 401
381 if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) { 402 if (bo->mem.bus.addr) {
382 map->bo_kmap_type = ttm_bo_map_premapped; 403 map->bo_kmap_type = ttm_bo_map_premapped;
383 map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset); 404 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
384 } else { 405 } else {
385 map->bo_kmap_type = ttm_bo_map_iomap; 406 map->bo_kmap_type = ttm_bo_map_iomap;
386 if (mem->placement & TTM_PL_FLAG_WC) 407 if (mem->placement & TTM_PL_FLAG_WC)
387 map->virtual = ioremap_wc(bus_base + bus_offset, 408 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
388 bus_size); 409 size);
389 else 410 else
390 map->virtual = ioremap_nocache(bus_base + bus_offset, 411 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
391 bus_size); 412 size);
392 } 413 }
393 return (!map->virtual) ? -ENOMEM : 0; 414 return (!map->virtual) ? -ENOMEM : 0;
394} 415}
@@ -441,13 +462,12 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
441 unsigned long start_page, unsigned long num_pages, 462 unsigned long start_page, unsigned long num_pages,
442 struct ttm_bo_kmap_obj *map) 463 struct ttm_bo_kmap_obj *map)
443{ 464{
465 unsigned long offset, size;
444 int ret; 466 int ret;
445 unsigned long bus_base;
446 unsigned long bus_offset;
447 unsigned long bus_size;
448 467
449 BUG_ON(!list_empty(&bo->swap)); 468 BUG_ON(!list_empty(&bo->swap));
450 map->virtual = NULL; 469 map->virtual = NULL;
470 map->bo = bo;
451 if (num_pages > bo->num_pages) 471 if (num_pages > bo->num_pages)
452 return -EINVAL; 472 return -EINVAL;
453 if (start_page > bo->num_pages) 473 if (start_page > bo->num_pages)
@@ -456,16 +476,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
456 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) 476 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
457 return -EPERM; 477 return -EPERM;
458#endif 478#endif
459 ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base, 479 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
460 &bus_offset, &bus_size);
461 if (ret) 480 if (ret)
462 return ret; 481 return ret;
463 if (bus_size == 0) { 482 if (!bo->mem.bus.is_iomem) {
464 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); 483 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
465 } else { 484 } else {
466 bus_offset += start_page << PAGE_SHIFT; 485 offset = start_page << PAGE_SHIFT;
467 bus_size = num_pages << PAGE_SHIFT; 486 size = num_pages << PAGE_SHIFT;
468 return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map); 487 return ttm_bo_ioremap(bo, offset, size, map);
469 } 488 }
470} 489}
471EXPORT_SYMBOL(ttm_bo_kmap); 490EXPORT_SYMBOL(ttm_bo_kmap);
@@ -477,6 +496,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
477 switch (map->bo_kmap_type) { 496 switch (map->bo_kmap_type) {
478 case ttm_bo_map_iomap: 497 case ttm_bo_map_iomap:
479 iounmap(map->virtual); 498 iounmap(map->virtual);
499 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
480 break; 500 break;
481 case ttm_bo_map_vmap: 501 case ttm_bo_map_vmap:
482 vunmap(map->virtual); 502 vunmap(map->virtual);
@@ -494,39 +514,11 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
494} 514}
495EXPORT_SYMBOL(ttm_bo_kunmap); 515EXPORT_SYMBOL(ttm_bo_kunmap);
496 516
497int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
498 unsigned long dst_offset,
499 unsigned long *pfn, pgprot_t *prot)
500{
501 struct ttm_mem_reg *mem = &bo->mem;
502 struct ttm_bo_device *bdev = bo->bdev;
503 unsigned long bus_offset;
504 unsigned long bus_size;
505 unsigned long bus_base;
506 int ret;
507 ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
508 &bus_size);
509 if (ret)
510 return -EINVAL;
511 if (bus_size != 0)
512 *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
513 else
514 if (!bo->ttm)
515 return -EINVAL;
516 else
517 *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
518 dst_offset >>
519 PAGE_SHIFT));
520 *prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
521 PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
522
523 return 0;
524}
525
526int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 517int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
527 void *sync_obj, 518 void *sync_obj,
528 void *sync_obj_arg, 519 void *sync_obj_arg,
529 bool evict, bool no_wait, 520 bool evict, bool no_wait_reserve,
521 bool no_wait_gpu,
530 struct ttm_mem_reg *new_mem) 522 struct ttm_mem_reg *new_mem)
531{ 523{
532 struct ttm_bo_device *bdev = bo->bdev; 524 struct ttm_bo_device *bdev = bo->bdev;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 668dbe8b8dd3..fe6cb77899f4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -74,9 +74,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
74 struct ttm_buffer_object *bo = (struct ttm_buffer_object *) 74 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
75 vma->vm_private_data; 75 vma->vm_private_data;
76 struct ttm_bo_device *bdev = bo->bdev; 76 struct ttm_bo_device *bdev = bo->bdev;
77 unsigned long bus_base;
78 unsigned long bus_offset;
79 unsigned long bus_size;
80 unsigned long page_offset; 77 unsigned long page_offset;
81 unsigned long page_last; 78 unsigned long page_last;
82 unsigned long pfn; 79 unsigned long pfn;
@@ -84,7 +81,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
84 struct page *page; 81 struct page *page;
85 int ret; 82 int ret;
86 int i; 83 int i;
87 bool is_iomem;
88 unsigned long address = (unsigned long)vmf->virtual_address; 84 unsigned long address = (unsigned long)vmf->virtual_address;
89 int retval = VM_FAULT_NOPAGE; 85 int retval = VM_FAULT_NOPAGE;
90 86
@@ -101,8 +97,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
101 return VM_FAULT_NOPAGE; 97 return VM_FAULT_NOPAGE;
102 } 98 }
103 99
104 if (bdev->driver->fault_reserve_notify) 100 if (bdev->driver->fault_reserve_notify) {
105 bdev->driver->fault_reserve_notify(bo); 101 ret = bdev->driver->fault_reserve_notify(bo);
102 switch (ret) {
103 case 0:
104 break;
105 case -EBUSY:
106 set_need_resched();
107 case -ERESTARTSYS:
108 retval = VM_FAULT_NOPAGE;
109 goto out_unlock;
110 default:
111 retval = VM_FAULT_SIGBUS;
112 goto out_unlock;
113 }
114 }
106 115
107 /* 116 /*
108 * Wait for buffer data in transit, due to a pipelined 117 * Wait for buffer data in transit, due to a pipelined
@@ -122,15 +131,12 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
122 spin_unlock(&bo->lock); 131 spin_unlock(&bo->lock);
123 132
124 133
125 ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset, 134 ret = ttm_mem_io_reserve(bdev, &bo->mem);
126 &bus_size); 135 if (ret) {
127 if (unlikely(ret != 0)) {
128 retval = VM_FAULT_SIGBUS; 136 retval = VM_FAULT_SIGBUS;
129 goto out_unlock; 137 goto out_unlock;
130 } 138 }
131 139
132 is_iomem = (bus_size != 0);
133
134 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + 140 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
135 bo->vm_node->start - vma->vm_pgoff; 141 bo->vm_node->start - vma->vm_pgoff;
136 page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + 142 page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
@@ -154,8 +160,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
154 * vma->vm_page_prot when the object changes caching policy, with 160 * vma->vm_page_prot when the object changes caching policy, with
155 * the correct locks held. 161 * the correct locks held.
156 */ 162 */
157 163 if (bo->mem.bus.is_iomem) {
158 if (is_iomem) {
159 vma->vm_page_prot = ttm_io_prot(bo->mem.placement, 164 vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
160 vma->vm_page_prot); 165 vma->vm_page_prot);
161 } else { 166 } else {
@@ -171,10 +176,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
171 */ 176 */
172 177
173 for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { 178 for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
174 179 if (bo->mem.bus.is_iomem)
175 if (is_iomem) 180 pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
176 pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
177 page_offset;
178 else { 181 else {
179 page = ttm_tt_get_page(ttm, page_offset); 182 page = ttm_tt_get_page(ttm, page_offset);
180 if (unlikely(!page && i == 0)) { 183 if (unlikely(!page && i == 0)) {
@@ -198,7 +201,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
198 retval = 201 retval =
199 (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; 202 (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
200 goto out_unlock; 203 goto out_unlock;
201
202 } 204 }
203 205
204 address += PAGE_SIZE; 206 address += PAGE_SIZE;
@@ -221,8 +223,7 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
221 223
222static void ttm_bo_vm_close(struct vm_area_struct *vma) 224static void ttm_bo_vm_close(struct vm_area_struct *vma)
223{ 225{
224 struct ttm_buffer_object *bo = 226 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
225 (struct ttm_buffer_object *)vma->vm_private_data;
226 227
227 ttm_bo_unref(&bo); 228 ttm_bo_unref(&bo);
228 vma->vm_private_data = NULL; 229 vma->vm_private_data = NULL;
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 801b702566e6..e70ddd82dc02 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -27,6 +27,7 @@
27 27
28#include "ttm/ttm_memory.h" 28#include "ttm/ttm_memory.h"
29#include "ttm/ttm_module.h" 29#include "ttm/ttm_module.h"
30#include "ttm/ttm_page_alloc.h"
30#include <linux/spinlock.h> 31#include <linux/spinlock.h>
31#include <linux/sched.h> 32#include <linux/sched.h>
32#include <linux/wait.h> 33#include <linux/wait.h>
@@ -393,6 +394,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
393 "Zone %7s: Available graphics memory: %llu kiB.\n", 394 "Zone %7s: Available graphics memory: %llu kiB.\n",
394 zone->name, (unsigned long long) zone->max_mem >> 10); 395 zone->name, (unsigned long long) zone->max_mem >> 10);
395 } 396 }
397 ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
396 return 0; 398 return 0;
397out_no_zone: 399out_no_zone:
398 ttm_mem_global_release(glob); 400 ttm_mem_global_release(glob);
@@ -405,6 +407,9 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
405 unsigned int i; 407 unsigned int i;
406 struct ttm_mem_zone *zone; 408 struct ttm_mem_zone *zone;
407 409
410 /* let the page allocator first stop the shrink work. */
411 ttm_page_alloc_fini();
412
408 flush_workqueue(glob->swap_queue); 413 flush_workqueue(glob->swap_queue);
409 destroy_workqueue(glob->swap_queue); 414 destroy_workqueue(glob->swap_queue);
410 glob->swap_queue = NULL; 415 glob->swap_queue = NULL;
@@ -412,7 +417,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
412 zone = glob->zones[i]; 417 zone = glob->zones[i];
413 kobject_del(&zone->kobj); 418 kobject_del(&zone->kobj);
414 kobject_put(&zone->kobj); 419 kobject_put(&zone->kobj);
415 } 420 }
416 kobject_del(&glob->kobj); 421 kobject_del(&glob->kobj);
417 kobject_put(&glob->kobj); 422 kobject_put(&glob->kobj);
418} 423}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
new file mode 100644
index 000000000000..0d9a42c2394f
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -0,0 +1,845 @@
1/*
2 * Copyright (c) Red Hat Inc.
3
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie <airlied@redhat.com>
24 * Jerome Glisse <jglisse@redhat.com>
25 * Pauli Nieminen <suokkos@gmail.com>
26 */
27
28/* simple list based uncached page pool
29 * - Pool collects resently freed pages for reuse
30 * - Use page->lru to keep a free list
31 * - doesn't track currently in use pages
32 */
33#include <linux/list.h>
34#include <linux/spinlock.h>
35#include <linux/highmem.h>
36#include <linux/mm_types.h>
37#include <linux/module.h>
38#include <linux/mm.h>
39#include <linux/seq_file.h> /* for seq_printf */
40#include <linux/slab.h>
41
42#include <asm/atomic.h>
43#include <asm/agp.h>
44
45#include "ttm/ttm_bo_driver.h"
46#include "ttm/ttm_page_alloc.h"
47
48
49#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
50#define SMALL_ALLOCATION 16
51#define FREE_ALL_PAGES (~0U)
52/* times are in msecs */
53#define PAGE_FREE_INTERVAL 1000
54
55/**
56 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
57 *
58 * @lock: Protects the shared pool from concurrnet access. Must be used with
59 * irqsave/irqrestore variants because pool allocator maybe called from
60 * delayed work.
61 * @fill_lock: Prevent concurrent calls to fill.
62 * @list: Pool of free uc/wc pages for fast reuse.
63 * @gfp_flags: Flags to pass for alloc_page.
64 * @npages: Number of pages in pool.
65 */
66struct ttm_page_pool {
67 spinlock_t lock;
68 bool fill_lock;
69 struct list_head list;
70 int gfp_flags;
71 unsigned npages;
72 char *name;
73 unsigned long nfrees;
74 unsigned long nrefills;
75};
76
77/**
78 * Limits for the pool. They are handled without locks because only place where
79 * they may change is in sysfs store. They won't have immediate effect anyway
80 * so forcing serialiazation to access them is pointless.
81 */
82
83struct ttm_pool_opts {
84 unsigned alloc_size;
85 unsigned max_size;
86 unsigned small;
87};
88
89#define NUM_POOLS 4
90
91/**
92 * struct ttm_pool_manager - Holds memory pools for fst allocation
93 *
94 * Manager is read only object for pool code so it doesn't need locking.
95 *
96 * @free_interval: minimum number of jiffies between freeing pages from pool.
97 * @page_alloc_inited: reference counting for pool allocation.
98 * @work: Work that is used to shrink the pool. Work is only run when there is
99 * some pages to free.
100 * @small_allocation: Limit in number of pages what is small allocation.
101 *
102 * @pools: All pool objects in use.
103 **/
104struct ttm_pool_manager {
105 struct kobject kobj;
106 struct shrinker mm_shrink;
107 atomic_t page_alloc_inited;
108 struct ttm_pool_opts options;
109
110 union {
111 struct ttm_page_pool pools[NUM_POOLS];
112 struct {
113 struct ttm_page_pool wc_pool;
114 struct ttm_page_pool uc_pool;
115 struct ttm_page_pool wc_pool_dma32;
116 struct ttm_page_pool uc_pool_dma32;
117 } ;
118 };
119};
120
121static struct attribute ttm_page_pool_max = {
122 .name = "pool_max_size",
123 .mode = S_IRUGO | S_IWUSR
124};
125static struct attribute ttm_page_pool_small = {
126 .name = "pool_small_allocation",
127 .mode = S_IRUGO | S_IWUSR
128};
129static struct attribute ttm_page_pool_alloc_size = {
130 .name = "pool_allocation_size",
131 .mode = S_IRUGO | S_IWUSR
132};
133
134static struct attribute *ttm_pool_attrs[] = {
135 &ttm_page_pool_max,
136 &ttm_page_pool_small,
137 &ttm_page_pool_alloc_size,
138 NULL
139};
140
141static void ttm_pool_kobj_release(struct kobject *kobj)
142{
143 struct ttm_pool_manager *m =
144 container_of(kobj, struct ttm_pool_manager, kobj);
145 (void)m;
146}
147
148static ssize_t ttm_pool_store(struct kobject *kobj,
149 struct attribute *attr, const char *buffer, size_t size)
150{
151 struct ttm_pool_manager *m =
152 container_of(kobj, struct ttm_pool_manager, kobj);
153 int chars;
154 unsigned val;
155 chars = sscanf(buffer, "%u", &val);
156 if (chars == 0)
157 return size;
158
159 /* Convert kb to number of pages */
160 val = val / (PAGE_SIZE >> 10);
161
162 if (attr == &ttm_page_pool_max)
163 m->options.max_size = val;
164 else if (attr == &ttm_page_pool_small)
165 m->options.small = val;
166 else if (attr == &ttm_page_pool_alloc_size) {
167 if (val > NUM_PAGES_TO_ALLOC*8) {
168 printk(KERN_ERR "[ttm] Setting allocation size to %lu "
169 "is not allowed. Recomended size is "
170 "%lu\n",
171 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
172 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
173 return size;
174 } else if (val > NUM_PAGES_TO_ALLOC) {
175 printk(KERN_WARNING "[ttm] Setting allocation size to "
176 "larger than %lu is not recomended.\n",
177 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
178 }
179 m->options.alloc_size = val;
180 }
181
182 return size;
183}
184
185static ssize_t ttm_pool_show(struct kobject *kobj,
186 struct attribute *attr, char *buffer)
187{
188 struct ttm_pool_manager *m =
189 container_of(kobj, struct ttm_pool_manager, kobj);
190 unsigned val = 0;
191
192 if (attr == &ttm_page_pool_max)
193 val = m->options.max_size;
194 else if (attr == &ttm_page_pool_small)
195 val = m->options.small;
196 else if (attr == &ttm_page_pool_alloc_size)
197 val = m->options.alloc_size;
198
199 val = val * (PAGE_SIZE >> 10);
200
201 return snprintf(buffer, PAGE_SIZE, "%u\n", val);
202}
203
204static const struct sysfs_ops ttm_pool_sysfs_ops = {
205 .show = &ttm_pool_show,
206 .store = &ttm_pool_store,
207};
208
209static struct kobj_type ttm_pool_kobj_type = {
210 .release = &ttm_pool_kobj_release,
211 .sysfs_ops = &ttm_pool_sysfs_ops,
212 .default_attrs = ttm_pool_attrs,
213};
214
215static struct ttm_pool_manager _manager = {
216 .page_alloc_inited = ATOMIC_INIT(0)
217};
218
219#ifndef CONFIG_X86
220static int set_pages_array_wb(struct page **pages, int addrinarray)
221{
222#ifdef TTM_HAS_AGP
223 int i;
224
225 for (i = 0; i < addrinarray; i++)
226 unmap_page_from_agp(pages[i]);
227#endif
228 return 0;
229}
230
231static int set_pages_array_wc(struct page **pages, int addrinarray)
232{
233#ifdef TTM_HAS_AGP
234 int i;
235
236 for (i = 0; i < addrinarray; i++)
237 map_page_into_agp(pages[i]);
238#endif
239 return 0;
240}
241
242static int set_pages_array_uc(struct page **pages, int addrinarray)
243{
244#ifdef TTM_HAS_AGP
245 int i;
246
247 for (i = 0; i < addrinarray; i++)
248 map_page_into_agp(pages[i]);
249#endif
250 return 0;
251}
252#endif
253
254/**
255 * Select the right pool or requested caching state and ttm flags. */
256static struct ttm_page_pool *ttm_get_pool(int flags,
257 enum ttm_caching_state cstate)
258{
259 int pool_index;
260
261 if (cstate == tt_cached)
262 return NULL;
263
264 if (cstate == tt_wc)
265 pool_index = 0x0;
266 else
267 pool_index = 0x1;
268
269 if (flags & TTM_PAGE_FLAG_DMA32)
270 pool_index |= 0x2;
271
272 return &_manager.pools[pool_index];
273}
274
275/* set memory back to wb and free the pages. */
276static void ttm_pages_put(struct page *pages[], unsigned npages)
277{
278 unsigned i;
279 if (set_pages_array_wb(pages, npages))
280 printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n",
281 npages);
282 for (i = 0; i < npages; ++i)
283 __free_page(pages[i]);
284}
285
286static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
287 unsigned freed_pages)
288{
289 pool->npages -= freed_pages;
290 pool->nfrees += freed_pages;
291}
292
293/**
294 * Free pages from pool.
295 *
296 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
297 * number of pages in one go.
298 *
299 * @pool: to free the pages from
300 * @free_all: If set to true will free all pages in pool
301 **/
302static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
303{
304 unsigned long irq_flags;
305 struct page *p;
306 struct page **pages_to_free;
307 unsigned freed_pages = 0,
308 npages_to_free = nr_free;
309
310 if (NUM_PAGES_TO_ALLOC < nr_free)
311 npages_to_free = NUM_PAGES_TO_ALLOC;
312
313 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
314 GFP_KERNEL);
315 if (!pages_to_free) {
316 printk(KERN_ERR "Failed to allocate memory for pool free operation.\n");
317 return 0;
318 }
319
320restart:
321 spin_lock_irqsave(&pool->lock, irq_flags);
322
323 list_for_each_entry_reverse(p, &pool->list, lru) {
324 if (freed_pages >= npages_to_free)
325 break;
326
327 pages_to_free[freed_pages++] = p;
328 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
329 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
330 /* remove range of pages from the pool */
331 __list_del(p->lru.prev, &pool->list);
332
333 ttm_pool_update_free_locked(pool, freed_pages);
334 /**
335 * Because changing page caching is costly
336 * we unlock the pool to prevent stalling.
337 */
338 spin_unlock_irqrestore(&pool->lock, irq_flags);
339
340 ttm_pages_put(pages_to_free, freed_pages);
341 if (likely(nr_free != FREE_ALL_PAGES))
342 nr_free -= freed_pages;
343
344 if (NUM_PAGES_TO_ALLOC >= nr_free)
345 npages_to_free = nr_free;
346 else
347 npages_to_free = NUM_PAGES_TO_ALLOC;
348
349 freed_pages = 0;
350
351 /* free all so restart the processing */
352 if (nr_free)
353 goto restart;
354
355 /* Not allowed to fall tough or break because
356 * following context is inside spinlock while we are
357 * outside here.
358 */
359 goto out;
360
361 }
362 }
363
364 /* remove range of pages from the pool */
365 if (freed_pages) {
366 __list_del(&p->lru, &pool->list);
367
368 ttm_pool_update_free_locked(pool, freed_pages);
369 nr_free -= freed_pages;
370 }
371
372 spin_unlock_irqrestore(&pool->lock, irq_flags);
373
374 if (freed_pages)
375 ttm_pages_put(pages_to_free, freed_pages);
376out:
377 kfree(pages_to_free);
378 return nr_free;
379}
380
381/* Get good estimation how many pages are free in pools */
382static int ttm_pool_get_num_unused_pages(void)
383{
384 unsigned i;
385 int total = 0;
386 for (i = 0; i < NUM_POOLS; ++i)
387 total += _manager.pools[i].npages;
388
389 return total;
390}
391
392/**
393 * Calback for mm to request pool to reduce number of page held.
394 */
395static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
396{
397 static atomic_t start_pool = ATOMIC_INIT(0);
398 unsigned i;
399 unsigned pool_offset = atomic_add_return(1, &start_pool);
400 struct ttm_page_pool *pool;
401
402 pool_offset = pool_offset % NUM_POOLS;
403 /* select start pool in round robin fashion */
404 for (i = 0; i < NUM_POOLS; ++i) {
405 unsigned nr_free = shrink_pages;
406 if (shrink_pages == 0)
407 break;
408 pool = &_manager.pools[(i + pool_offset)%NUM_POOLS];
409 shrink_pages = ttm_page_pool_free(pool, nr_free);
410 }
411 /* return estimated number of unused pages in pool */
412 return ttm_pool_get_num_unused_pages();
413}
414
415static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
416{
417 manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
418 manager->mm_shrink.seeks = 1;
419 register_shrinker(&manager->mm_shrink);
420}
421
422static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
423{
424 unregister_shrinker(&manager->mm_shrink);
425}
426
427static int ttm_set_pages_caching(struct page **pages,
428 enum ttm_caching_state cstate, unsigned cpages)
429{
430 int r = 0;
431 /* Set page caching */
432 switch (cstate) {
433 case tt_uncached:
434 r = set_pages_array_uc(pages, cpages);
435 if (r)
436 printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n",
437 cpages);
438 break;
439 case tt_wc:
440 r = set_pages_array_wc(pages, cpages);
441 if (r)
442 printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n",
443 cpages);
444 break;
445 default:
446 break;
447 }
448 return r;
449}
450
451/**
452 * Free pages the pages that failed to change the caching state. If there is
453 * any pages that have changed their caching state already put them to the
454 * pool.
455 */
456static void ttm_handle_caching_state_failure(struct list_head *pages,
457 int ttm_flags, enum ttm_caching_state cstate,
458 struct page **failed_pages, unsigned cpages)
459{
460 unsigned i;
461 /* Failed pages has to be reed */
462 for (i = 0; i < cpages; ++i) {
463 list_del(&failed_pages[i]->lru);
464 __free_page(failed_pages[i]);
465 }
466}
467
468/**
469 * Allocate new pages with correct caching.
470 *
471 * This function is reentrant if caller updates count depending on number of
472 * pages returned in pages array.
473 */
474static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
475 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
476{
477 struct page **caching_array;
478 struct page *p;
479 int r = 0;
480 unsigned i, cpages;
481 unsigned max_cpages = min(count,
482 (unsigned)(PAGE_SIZE/sizeof(struct page *)));
483
484 /* allocate array for page caching change */
485 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
486
487 if (!caching_array) {
488 printk(KERN_ERR "[ttm] unable to allocate table for new pages.");
489 return -ENOMEM;
490 }
491
492 for (i = 0, cpages = 0; i < count; ++i) {
493 p = alloc_page(gfp_flags);
494
495 if (!p) {
496 printk(KERN_ERR "[ttm] unable to get page %u\n", i);
497
498 /* store already allocated pages in the pool after
499 * setting the caching state */
500 if (cpages) {
501 r = ttm_set_pages_caching(caching_array, cstate, cpages);
502 if (r)
503 ttm_handle_caching_state_failure(pages,
504 ttm_flags, cstate,
505 caching_array, cpages);
506 }
507 r = -ENOMEM;
508 goto out;
509 }
510
511#ifdef CONFIG_HIGHMEM
512 /* gfp flags of highmem page should never be dma32 so we
513 * we should be fine in such case
514 */
515 if (!PageHighMem(p))
516#endif
517 {
518 caching_array[cpages++] = p;
519 if (cpages == max_cpages) {
520
521 r = ttm_set_pages_caching(caching_array,
522 cstate, cpages);
523 if (r) {
524 ttm_handle_caching_state_failure(pages,
525 ttm_flags, cstate,
526 caching_array, cpages);
527 goto out;
528 }
529 cpages = 0;
530 }
531 }
532
533 list_add(&p->lru, pages);
534 }
535
536 if (cpages) {
537 r = ttm_set_pages_caching(caching_array, cstate, cpages);
538 if (r)
539 ttm_handle_caching_state_failure(pages,
540 ttm_flags, cstate,
541 caching_array, cpages);
542 }
543out:
544 kfree(caching_array);
545
546 return r;
547}
548
549/**
550 * Fill the given pool if there isn't enough pages and requested number of
551 * pages is small.
552 */
553static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
554 int ttm_flags, enum ttm_caching_state cstate, unsigned count,
555 unsigned long *irq_flags)
556{
557 struct page *p;
558 int r;
559 unsigned cpages = 0;
560 /**
561 * Only allow one pool fill operation at a time.
562 * If pool doesn't have enough pages for the allocation new pages are
563 * allocated from outside of pool.
564 */
565 if (pool->fill_lock)
566 return;
567
568 pool->fill_lock = true;
569
570 /* If allocation request is small and there is not enough
571 * pages in pool we fill the pool first */
572 if (count < _manager.options.small
573 && count > pool->npages) {
574 struct list_head new_pages;
575 unsigned alloc_size = _manager.options.alloc_size;
576
577 /**
578 * Can't change page caching if in irqsave context. We have to
579 * drop the pool->lock.
580 */
581 spin_unlock_irqrestore(&pool->lock, *irq_flags);
582
583 INIT_LIST_HEAD(&new_pages);
584 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
585 cstate, alloc_size);
586 spin_lock_irqsave(&pool->lock, *irq_flags);
587
588 if (!r) {
589 list_splice(&new_pages, &pool->list);
590 ++pool->nrefills;
591 pool->npages += alloc_size;
592 } else {
593 printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool);
594 /* If we have any pages left put them to the pool. */
595 list_for_each_entry(p, &pool->list, lru) {
596 ++cpages;
597 }
598 list_splice(&new_pages, &pool->list);
599 pool->npages += cpages;
600 }
601
602 }
603 pool->fill_lock = false;
604}
605
606/**
607 * Cut count nubmer of pages from the pool and put them to return list
608 *
609 * @return count of pages still to allocate to fill the request.
610 */
611static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
612 struct list_head *pages, int ttm_flags,
613 enum ttm_caching_state cstate, unsigned count)
614{
615 unsigned long irq_flags;
616 struct list_head *p;
617 unsigned i;
618
619 spin_lock_irqsave(&pool->lock, irq_flags);
620 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
621
622 if (count >= pool->npages) {
623 /* take all pages from the pool */
624 list_splice_init(&pool->list, pages);
625 count -= pool->npages;
626 pool->npages = 0;
627 goto out;
628 }
629 /* find the last pages to include for requested number of pages. Split
630 * pool to begin and halves to reduce search space. */
631 if (count <= pool->npages/2) {
632 i = 0;
633 list_for_each(p, &pool->list) {
634 if (++i == count)
635 break;
636 }
637 } else {
638 i = pool->npages + 1;
639 list_for_each_prev(p, &pool->list) {
640 if (--i == count)
641 break;
642 }
643 }
644 /* Cut count number of pages from pool */
645 list_cut_position(pages, &pool->list, p);
646 pool->npages -= count;
647 count = 0;
648out:
649 spin_unlock_irqrestore(&pool->lock, irq_flags);
650 return count;
651}
652
653/*
654 * On success pages list will hold count number of correctly
655 * cached pages.
656 */
657int ttm_get_pages(struct list_head *pages, int flags,
658 enum ttm_caching_state cstate, unsigned count)
659{
660 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
661 struct page *p = NULL;
662 int gfp_flags = 0;
663 int r;
664
665 /* set zero flag for page allocation if required */
666 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
667 gfp_flags |= __GFP_ZERO;
668
669 /* No pool for cached pages */
670 if (pool == NULL) {
671 if (flags & TTM_PAGE_FLAG_DMA32)
672 gfp_flags |= GFP_DMA32;
673 else
674 gfp_flags |= __GFP_HIGHMEM;
675
676 for (r = 0; r < count; ++r) {
677 p = alloc_page(gfp_flags);
678 if (!p) {
679
680 printk(KERN_ERR "[ttm] unable to allocate page.");
681 return -ENOMEM;
682 }
683
684 list_add(&p->lru, pages);
685 }
686 return 0;
687 }
688
689
690 /* combine zero flag to pool flags */
691 gfp_flags |= pool->gfp_flags;
692
693 /* First we take pages from the pool */
694 count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
695
696 /* clear the pages coming from the pool if requested */
697 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
698 list_for_each_entry(p, pages, lru) {
699 clear_page(page_address(p));
700 }
701 }
702
703 /* If pool didn't have enough pages allocate new one. */
704 if (count > 0) {
705 /* ttm_alloc_new_pages doesn't reference pool so we can run
706 * multiple requests in parallel.
707 **/
708 r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
709 if (r) {
710 /* If there is any pages in the list put them back to
711 * the pool. */
712 printk(KERN_ERR "[ttm] Failed to allocate extra pages "
713 "for large request.");
714 ttm_put_pages(pages, 0, flags, cstate);
715 return r;
716 }
717 }
718
719
720 return 0;
721}
722
723/* Put all pages in pages list to correct pool to wait for reuse */
724void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
725 enum ttm_caching_state cstate)
726{
727 unsigned long irq_flags;
728 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
729 struct page *p, *tmp;
730
731 if (pool == NULL) {
732 /* No pool for this memory type so free the pages */
733
734 list_for_each_entry_safe(p, tmp, pages, lru) {
735 __free_page(p);
736 }
737 /* Make the pages list empty */
738 INIT_LIST_HEAD(pages);
739 return;
740 }
741 if (page_count == 0) {
742 list_for_each_entry_safe(p, tmp, pages, lru) {
743 ++page_count;
744 }
745 }
746
747 spin_lock_irqsave(&pool->lock, irq_flags);
748 list_splice_init(pages, &pool->list);
749 pool->npages += page_count;
750 /* Check that we don't go over the pool limit */
751 page_count = 0;
752 if (pool->npages > _manager.options.max_size) {
753 page_count = pool->npages - _manager.options.max_size;
754 /* free at least NUM_PAGES_TO_ALLOC number of pages
755 * to reduce calls to set_memory_wb */
756 if (page_count < NUM_PAGES_TO_ALLOC)
757 page_count = NUM_PAGES_TO_ALLOC;
758 }
759 spin_unlock_irqrestore(&pool->lock, irq_flags);
760 if (page_count)
761 ttm_page_pool_free(pool, page_count);
762}
763
764static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
765 char *name)
766{
767 spin_lock_init(&pool->lock);
768 pool->fill_lock = false;
769 INIT_LIST_HEAD(&pool->list);
770 pool->npages = pool->nfrees = 0;
771 pool->gfp_flags = flags;
772 pool->name = name;
773}
774
775int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
776{
777 int ret;
778 if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
779 return 0;
780
781 printk(KERN_INFO "[ttm] Initializing pool allocator.\n");
782
783 ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc");
784
785 ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc");
786
787 ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32,
788 "wc dma");
789
790 ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32,
791 "uc dma");
792
793 _manager.options.max_size = max_pages;
794 _manager.options.small = SMALL_ALLOCATION;
795 _manager.options.alloc_size = NUM_PAGES_TO_ALLOC;
796
797 kobject_init(&_manager.kobj, &ttm_pool_kobj_type);
798 ret = kobject_add(&_manager.kobj, &glob->kobj, "pool");
799 if (unlikely(ret != 0)) {
800 kobject_put(&_manager.kobj);
801 return ret;
802 }
803
804 ttm_pool_mm_shrink_init(&_manager);
805
806 return 0;
807}
808
809void ttm_page_alloc_fini()
810{
811 int i;
812
813 if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0)
814 return;
815
816 printk(KERN_INFO "[ttm] Finilizing pool allocator.\n");
817 ttm_pool_mm_shrink_fini(&_manager);
818
819 for (i = 0; i < NUM_POOLS; ++i)
820 ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES);
821
822 kobject_put(&_manager.kobj);
823}
824
825int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
826{
827 struct ttm_page_pool *p;
828 unsigned i;
829 char *h[] = {"pool", "refills", "pages freed", "size"};
830 if (atomic_read(&_manager.page_alloc_inited) == 0) {
831 seq_printf(m, "No pool allocator running.\n");
832 return 0;
833 }
834 seq_printf(m, "%6s %12s %13s %8s\n",
835 h[0], h[1], h[2], h[3]);
836 for (i = 0; i < NUM_POOLS; ++i) {
837 p = &_manager.pools[i];
838
839 seq_printf(m, "%6s %12ld %13ld %8d\n",
840 p->name, p->nrefills,
841 p->nfrees, p->npages);
842 }
843 return 0;
844}
845EXPORT_SYMBOL(ttm_page_alloc_debugfs);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index d5fd5b8faeb3..a7bab87a548b 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -39,6 +39,7 @@
39#include "ttm/ttm_module.h" 39#include "ttm/ttm_module.h"
40#include "ttm/ttm_bo_driver.h" 40#include "ttm/ttm_bo_driver.h"
41#include "ttm/ttm_placement.h" 41#include "ttm/ttm_placement.h"
42#include "ttm/ttm_page_alloc.h"
42 43
43static int ttm_tt_swapin(struct ttm_tt *ttm); 44static int ttm_tt_swapin(struct ttm_tt *ttm);
44 45
@@ -56,21 +57,6 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
56 ttm->pages = NULL; 57 ttm->pages = NULL;
57} 58}
58 59
59static struct page *ttm_tt_alloc_page(unsigned page_flags)
60{
61 gfp_t gfp_flags = GFP_USER;
62
63 if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
64 gfp_flags |= __GFP_ZERO;
65
66 if (page_flags & TTM_PAGE_FLAG_DMA32)
67 gfp_flags |= __GFP_DMA32;
68 else
69 gfp_flags |= __GFP_HIGHMEM;
70
71 return alloc_page(gfp_flags);
72}
73
74static void ttm_tt_free_user_pages(struct ttm_tt *ttm) 60static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
75{ 61{
76 int write; 62 int write;
@@ -111,15 +97,21 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
111static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) 97static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
112{ 98{
113 struct page *p; 99 struct page *p;
100 struct list_head h;
114 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; 101 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
115 int ret; 102 int ret;
116 103
117 while (NULL == (p = ttm->pages[index])) { 104 while (NULL == (p = ttm->pages[index])) {
118 p = ttm_tt_alloc_page(ttm->page_flags);
119 105
120 if (!p) 106 INIT_LIST_HEAD(&h);
107
108 ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1);
109
110 if (ret != 0)
121 return NULL; 111 return NULL;
122 112
113 p = list_first_entry(&h, struct page, lru);
114
123 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false); 115 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
124 if (unlikely(ret != 0)) 116 if (unlikely(ret != 0))
125 goto out_err; 117 goto out_err;
@@ -228,10 +220,10 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
228 if (ttm->caching_state == c_state) 220 if (ttm->caching_state == c_state)
229 return 0; 221 return 0;
230 222
231 if (c_state != tt_cached) { 223 if (ttm->state == tt_unpopulated) {
232 ret = ttm_tt_populate(ttm); 224 /* Change caching but don't populate */
233 if (unlikely(ret != 0)) 225 ttm->caching_state = c_state;
234 return ret; 226 return 0;
235 } 227 }
236 228
237 if (ttm->caching_state == tt_cached) 229 if (ttm->caching_state == tt_cached)
@@ -282,13 +274,17 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
282static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) 274static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
283{ 275{
284 int i; 276 int i;
277 unsigned count = 0;
278 struct list_head h;
285 struct page *cur_page; 279 struct page *cur_page;
286 struct ttm_backend *be = ttm->be; 280 struct ttm_backend *be = ttm->be;
287 281
282 INIT_LIST_HEAD(&h);
283
288 if (be) 284 if (be)
289 be->func->clear(be); 285 be->func->clear(be);
290 (void)ttm_tt_set_caching(ttm, tt_cached);
291 for (i = 0; i < ttm->num_pages; ++i) { 286 for (i = 0; i < ttm->num_pages; ++i) {
287
292 cur_page = ttm->pages[i]; 288 cur_page = ttm->pages[i];
293 ttm->pages[i] = NULL; 289 ttm->pages[i] = NULL;
294 if (cur_page) { 290 if (cur_page) {
@@ -298,9 +294,11 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
298 "Leaking pages.\n"); 294 "Leaking pages.\n");
299 ttm_mem_global_free_page(ttm->glob->mem_glob, 295 ttm_mem_global_free_page(ttm->glob->mem_glob,
300 cur_page); 296 cur_page);
301 __free_page(cur_page); 297 list_add(&cur_page->lru, &h);
298 count++;
302 } 299 }
303 } 300 }
301 ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state);
304 ttm->state = tt_unpopulated; 302 ttm->state = tt_unpopulated;
305 ttm->first_himem_page = ttm->num_pages; 303 ttm->first_himem_page = ttm->num_pages;
306 ttm->last_lomem_page = -1; 304 ttm->last_lomem_page = -1;