aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c44
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c129
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c23
-rw-r--r--include/drm/ttm/ttm_bo_api.h6
-rw-r--r--include/drm/ttm/ttm_bo_driver.h104
5 files changed, 226 insertions, 80 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 25e4c2a1d1d8..cf2ec562550e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -378,8 +378,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
378 int ret = 0; 378 int ret = 0;
379 379
380 if (old_is_pci || new_is_pci || 380 if (old_is_pci || new_is_pci ||
381 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) 381 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
382 ttm_bo_unmap_virtual(bo); 382 ret = ttm_mem_io_lock(old_man, true);
383 if (unlikely(ret != 0))
384 goto out_err;
385 ttm_bo_unmap_virtual_locked(bo);
386 ttm_mem_io_unlock(old_man);
387 }
383 388
384 /* 389 /*
385 * Create and bind a ttm if required. 390 * Create and bind a ttm if required.
@@ -466,7 +471,6 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
466 ttm_tt_destroy(bo->ttm); 471 ttm_tt_destroy(bo->ttm);
467 bo->ttm = NULL; 472 bo->ttm = NULL;
468 } 473 }
469
470 ttm_bo_mem_put(bo, &bo->mem); 474 ttm_bo_mem_put(bo, &bo->mem);
471 475
472 atomic_set(&bo->reserved, 0); 476 atomic_set(&bo->reserved, 0);
@@ -665,6 +669,7 @@ static void ttm_bo_release(struct kref *kref)
665 struct ttm_buffer_object *bo = 669 struct ttm_buffer_object *bo =
666 container_of(kref, struct ttm_buffer_object, kref); 670 container_of(kref, struct ttm_buffer_object, kref);
667 struct ttm_bo_device *bdev = bo->bdev; 671 struct ttm_bo_device *bdev = bo->bdev;
672 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
668 673
669 if (likely(bo->vm_node != NULL)) { 674 if (likely(bo->vm_node != NULL)) {
670 rb_erase(&bo->vm_rb, &bdev->addr_space_rb); 675 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
@@ -672,6 +677,9 @@ static void ttm_bo_release(struct kref *kref)
672 bo->vm_node = NULL; 677 bo->vm_node = NULL;
673 } 678 }
674 write_unlock(&bdev->vm_lock); 679 write_unlock(&bdev->vm_lock);
680 ttm_mem_io_lock(man, false);
681 ttm_mem_io_free_vm(bo);
682 ttm_mem_io_unlock(man);
675 ttm_bo_cleanup_refs_or_queue(bo); 683 ttm_bo_cleanup_refs_or_queue(bo);
676 kref_put(&bo->list_kref, ttm_bo_release_list); 684 kref_put(&bo->list_kref, ttm_bo_release_list);
677 write_lock(&bdev->vm_lock); 685 write_lock(&bdev->vm_lock);
@@ -728,7 +736,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
728 736
729 evict_mem = bo->mem; 737 evict_mem = bo->mem;
730 evict_mem.mm_node = NULL; 738 evict_mem.mm_node = NULL;
731 evict_mem.bus.io_reserved = false; 739 evict_mem.bus.io_reserved_vm = false;
740 evict_mem.bus.io_reserved_count = 0;
732 741
733 placement.fpfn = 0; 742 placement.fpfn = 0;
734 placement.lpfn = 0; 743 placement.lpfn = 0;
@@ -1065,7 +1074,8 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1065 mem.num_pages = bo->num_pages; 1074 mem.num_pages = bo->num_pages;
1066 mem.size = mem.num_pages << PAGE_SHIFT; 1075 mem.size = mem.num_pages << PAGE_SHIFT;
1067 mem.page_alignment = bo->mem.page_alignment; 1076 mem.page_alignment = bo->mem.page_alignment;
1068 mem.bus.io_reserved = false; 1077 mem.bus.io_reserved_vm = false;
1078 mem.bus.io_reserved_count = 0;
1069 /* 1079 /*
1070 * Determine where to move the buffer. 1080 * Determine where to move the buffer.
1071 */ 1081 */
@@ -1184,6 +1194,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1184 INIT_LIST_HEAD(&bo->lru); 1194 INIT_LIST_HEAD(&bo->lru);
1185 INIT_LIST_HEAD(&bo->ddestroy); 1195 INIT_LIST_HEAD(&bo->ddestroy);
1186 INIT_LIST_HEAD(&bo->swap); 1196 INIT_LIST_HEAD(&bo->swap);
1197 INIT_LIST_HEAD(&bo->io_reserve_lru);
1187 bo->bdev = bdev; 1198 bo->bdev = bdev;
1188 bo->glob = bdev->glob; 1199 bo->glob = bdev->glob;
1189 bo->type = type; 1200 bo->type = type;
@@ -1193,7 +1204,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1193 bo->mem.num_pages = bo->num_pages; 1204 bo->mem.num_pages = bo->num_pages;
1194 bo->mem.mm_node = NULL; 1205 bo->mem.mm_node = NULL;
1195 bo->mem.page_alignment = page_alignment; 1206 bo->mem.page_alignment = page_alignment;
1196 bo->mem.bus.io_reserved = false; 1207 bo->mem.bus.io_reserved_vm = false;
1208 bo->mem.bus.io_reserved_count = 0;
1197 bo->buffer_start = buffer_start & PAGE_MASK; 1209 bo->buffer_start = buffer_start & PAGE_MASK;
1198 bo->priv_flags = 0; 1210 bo->priv_flags = 0;
1199 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1211 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
@@ -1367,6 +1379,10 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1367 BUG_ON(type >= TTM_NUM_MEM_TYPES); 1379 BUG_ON(type >= TTM_NUM_MEM_TYPES);
1368 man = &bdev->man[type]; 1380 man = &bdev->man[type];
1369 BUG_ON(man->has_type); 1381 BUG_ON(man->has_type);
1382 man->io_reserve_fastpath = true;
1383 man->use_io_reserve_lru = false;
1384 mutex_init(&man->io_reserve_mutex);
1385 INIT_LIST_HEAD(&man->io_reserve_lru);
1370 1386
1371 ret = bdev->driver->init_mem_type(bdev, type, man); 1387 ret = bdev->driver->init_mem_type(bdev, type, man);
1372 if (ret) 1388 if (ret)
@@ -1574,7 +1590,7 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1574 return true; 1590 return true;
1575} 1591}
1576 1592
1577void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) 1593void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1578{ 1594{
1579 struct ttm_bo_device *bdev = bo->bdev; 1595 struct ttm_bo_device *bdev = bo->bdev;
1580 loff_t offset = (loff_t) bo->addr_space_offset; 1596 loff_t offset = (loff_t) bo->addr_space_offset;
@@ -1583,8 +1599,20 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1583 if (!bdev->dev_mapping) 1599 if (!bdev->dev_mapping)
1584 return; 1600 return;
1585 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); 1601 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1586 ttm_mem_io_free(bdev, &bo->mem); 1602 ttm_mem_io_free_vm(bo);
1587} 1603}
1604
1605void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1606{
1607 struct ttm_bo_device *bdev = bo->bdev;
1608 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1609
1610 ttm_mem_io_lock(man, false);
1611 ttm_bo_unmap_virtual_locked(bo);
1612 ttm_mem_io_unlock(man);
1613}
1614
1615
1588EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1616EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1589 1617
1590static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) 1618static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 4b75133d6606..a89839f83f6c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -75,37 +75,123 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
75} 75}
76EXPORT_SYMBOL(ttm_bo_move_ttm); 76EXPORT_SYMBOL(ttm_bo_move_ttm);
77 77
78int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 78int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
79{ 79{
80 int ret; 80 if (likely(man->io_reserve_fastpath))
81 return 0;
82
83 if (interruptible)
84 return mutex_lock_interruptible(&man->io_reserve_mutex);
85
86 mutex_lock(&man->io_reserve_mutex);
87 return 0;
88}
81 89
82 if (!mem->bus.io_reserved) { 90void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
83 mem->bus.io_reserved = true; 91{
92 if (likely(man->io_reserve_fastpath))
93 return;
94
95 mutex_unlock(&man->io_reserve_mutex);
96}
97
98static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
99{
100 struct ttm_buffer_object *bo;
101
102 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
103 return -EAGAIN;
104
105 bo = list_first_entry(&man->io_reserve_lru,
106 struct ttm_buffer_object,
107 io_reserve_lru);
108 list_del_init(&bo->io_reserve_lru);
109 ttm_bo_unmap_virtual_locked(bo);
110
111 return 0;
112}
113
114static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
115 struct ttm_mem_reg *mem)
116{
117 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
118 int ret = 0;
119
120 if (!bdev->driver->io_mem_reserve)
121 return 0;
122 if (likely(man->io_reserve_fastpath))
123 return bdev->driver->io_mem_reserve(bdev, mem);
124
125 if (bdev->driver->io_mem_reserve &&
126 mem->bus.io_reserved_count++ == 0) {
127retry:
84 ret = bdev->driver->io_mem_reserve(bdev, mem); 128 ret = bdev->driver->io_mem_reserve(bdev, mem);
129 if (ret == -EAGAIN) {
130 ret = ttm_mem_io_evict(man);
131 if (ret == 0)
132 goto retry;
133 }
134 }
135 return ret;
136}
137
138static void ttm_mem_io_free(struct ttm_bo_device *bdev,
139 struct ttm_mem_reg *mem)
140{
141 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
142
143 if (likely(man->io_reserve_fastpath))
144 return;
145
146 if (bdev->driver->io_mem_reserve &&
147 --mem->bus.io_reserved_count == 0 &&
148 bdev->driver->io_mem_free)
149 bdev->driver->io_mem_free(bdev, mem);
150
151}
152
153int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
154{
155 struct ttm_mem_reg *mem = &bo->mem;
156 int ret;
157
158 if (!mem->bus.io_reserved_vm) {
159 struct ttm_mem_type_manager *man =
160 &bo->bdev->man[mem->mem_type];
161
162 ret = ttm_mem_io_reserve(bo->bdev, mem);
85 if (unlikely(ret != 0)) 163 if (unlikely(ret != 0))
86 return ret; 164 return ret;
165 mem->bus.io_reserved_vm = true;
166 if (man->use_io_reserve_lru)
167 list_add_tail(&bo->io_reserve_lru,
168 &man->io_reserve_lru);
87 } 169 }
88 return 0; 170 return 0;
89} 171}
90 172
91void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 173void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
92{ 174{
93 if (bdev->driver->io_mem_reserve) { 175 struct ttm_mem_reg *mem = &bo->mem;
94 if (mem->bus.io_reserved) { 176
95 mem->bus.io_reserved = false; 177 if (mem->bus.io_reserved_vm) {
96 bdev->driver->io_mem_free(bdev, mem); 178 mem->bus.io_reserved_vm = false;
97 } 179 list_del_init(&bo->io_reserve_lru);
180 ttm_mem_io_free(bo->bdev, mem);
98 } 181 }
99} 182}
100 183
101int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, 184int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
102 void **virtual) 185 void **virtual)
103{ 186{
187 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
104 int ret; 188 int ret;
105 void *addr; 189 void *addr;
106 190
107 *virtual = NULL; 191 *virtual = NULL;
192 (void) ttm_mem_io_lock(man, false);
108 ret = ttm_mem_io_reserve(bdev, mem); 193 ret = ttm_mem_io_reserve(bdev, mem);
194 ttm_mem_io_unlock(man);
109 if (ret || !mem->bus.is_iomem) 195 if (ret || !mem->bus.is_iomem)
110 return ret; 196 return ret;
111 197
@@ -117,7 +203,9 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
117 else 203 else
118 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); 204 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
119 if (!addr) { 205 if (!addr) {
206 (void) ttm_mem_io_lock(man, false);
120 ttm_mem_io_free(bdev, mem); 207 ttm_mem_io_free(bdev, mem);
208 ttm_mem_io_unlock(man);
121 return -ENOMEM; 209 return -ENOMEM;
122 } 210 }
123 } 211 }
@@ -134,7 +222,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
134 222
135 if (virtual && mem->bus.addr == NULL) 223 if (virtual && mem->bus.addr == NULL)
136 iounmap(virtual); 224 iounmap(virtual);
225 (void) ttm_mem_io_lock(man, false);
137 ttm_mem_io_free(bdev, mem); 226 ttm_mem_io_free(bdev, mem);
227 ttm_mem_io_unlock(man);
138} 228}
139 229
140static int ttm_copy_io_page(void *dst, void *src, unsigned long page) 230static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -231,7 +321,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
231 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 321 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
232 struct ttm_tt *ttm = bo->ttm; 322 struct ttm_tt *ttm = bo->ttm;
233 struct ttm_mem_reg *old_mem = &bo->mem; 323 struct ttm_mem_reg *old_mem = &bo->mem;
234 struct ttm_mem_reg old_copy = *old_mem; 324 struct ttm_mem_reg old_copy;
235 void *old_iomap; 325 void *old_iomap;
236 void *new_iomap; 326 void *new_iomap;
237 int ret; 327 int ret;
@@ -281,7 +371,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
281 mb(); 371 mb();
282out2: 372out2:
283 ttm_bo_free_old_node(bo); 373 ttm_bo_free_old_node(bo);
284 374 old_copy = *old_mem;
285 *old_mem = *new_mem; 375 *old_mem = *new_mem;
286 new_mem->mm_node = NULL; 376 new_mem->mm_node = NULL;
287 377
@@ -292,7 +382,7 @@ out2:
292 } 382 }
293 383
294out1: 384out1:
295 ttm_mem_reg_iounmap(bdev, new_mem, new_iomap); 385 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
296out: 386out:
297 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); 387 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
298 return ret; 388 return ret;
@@ -341,6 +431,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
341 INIT_LIST_HEAD(&fbo->ddestroy); 431 INIT_LIST_HEAD(&fbo->ddestroy);
342 INIT_LIST_HEAD(&fbo->lru); 432 INIT_LIST_HEAD(&fbo->lru);
343 INIT_LIST_HEAD(&fbo->swap); 433 INIT_LIST_HEAD(&fbo->swap);
434 INIT_LIST_HEAD(&fbo->io_reserve_lru);
344 fbo->vm_node = NULL; 435 fbo->vm_node = NULL;
345 atomic_set(&fbo->cpu_writers, 0); 436 atomic_set(&fbo->cpu_writers, 0);
346 437
@@ -452,6 +543,8 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
452 unsigned long start_page, unsigned long num_pages, 543 unsigned long start_page, unsigned long num_pages,
453 struct ttm_bo_kmap_obj *map) 544 struct ttm_bo_kmap_obj *map)
454{ 545{
546 struct ttm_mem_type_manager *man =
547 &bo->bdev->man[bo->mem.mem_type];
455 unsigned long offset, size; 548 unsigned long offset, size;
456 int ret; 549 int ret;
457 550
@@ -466,7 +559,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
466 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) 559 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
467 return -EPERM; 560 return -EPERM;
468#endif 561#endif
562 (void) ttm_mem_io_lock(man, false);
469 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); 563 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
564 ttm_mem_io_unlock(man);
470 if (ret) 565 if (ret)
471 return ret; 566 return ret;
472 if (!bo->mem.bus.is_iomem) { 567 if (!bo->mem.bus.is_iomem) {
@@ -481,12 +576,15 @@ EXPORT_SYMBOL(ttm_bo_kmap);
481 576
482void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) 577void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
483{ 578{
579 struct ttm_buffer_object *bo = map->bo;
580 struct ttm_mem_type_manager *man =
581 &bo->bdev->man[bo->mem.mem_type];
582
484 if (!map->virtual) 583 if (!map->virtual)
485 return; 584 return;
486 switch (map->bo_kmap_type) { 585 switch (map->bo_kmap_type) {
487 case ttm_bo_map_iomap: 586 case ttm_bo_map_iomap:
488 iounmap(map->virtual); 587 iounmap(map->virtual);
489 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
490 break; 588 break;
491 case ttm_bo_map_vmap: 589 case ttm_bo_map_vmap:
492 vunmap(map->virtual); 590 vunmap(map->virtual);
@@ -499,6 +597,9 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
499 default: 597 default:
500 BUG(); 598 BUG();
501 } 599 }
600 (void) ttm_mem_io_lock(man, false);
601 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
602 ttm_mem_io_unlock(man);
502 map->virtual = NULL; 603 map->virtual = NULL;
503 map->page = NULL; 604 map->page = NULL;
504} 605}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 8dd446cb778e..221b924acebe 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -83,6 +83,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
83 int i; 83 int i;
84 unsigned long address = (unsigned long)vmf->virtual_address; 84 unsigned long address = (unsigned long)vmf->virtual_address;
85 int retval = VM_FAULT_NOPAGE; 85 int retval = VM_FAULT_NOPAGE;
86 struct ttm_mem_type_manager *man =
87 &bdev->man[bo->mem.mem_type];
86 88
87 /* 89 /*
88 * Work around locking order reversal in fault / nopfn 90 * Work around locking order reversal in fault / nopfn
@@ -130,12 +132,16 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
130 } else 132 } else
131 spin_unlock(&bdev->fence_lock); 133 spin_unlock(&bdev->fence_lock);
132 134
133 135 ret = ttm_mem_io_lock(man, true);
134 ret = ttm_mem_io_reserve(bdev, &bo->mem); 136 if (unlikely(ret != 0)) {
135 if (ret) { 137 retval = VM_FAULT_NOPAGE;
136 retval = VM_FAULT_SIGBUS;
137 goto out_unlock; 138 goto out_unlock;
138 } 139 }
140 ret = ttm_mem_io_reserve_vm(bo);
141 if (unlikely(ret != 0)) {
142 retval = VM_FAULT_SIGBUS;
143 goto out_io_unlock;
144 }
139 145
140 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + 146 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
141 bo->vm_node->start - vma->vm_pgoff; 147 bo->vm_node->start - vma->vm_pgoff;
@@ -144,7 +150,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
144 150
145 if (unlikely(page_offset >= bo->num_pages)) { 151 if (unlikely(page_offset >= bo->num_pages)) {
146 retval = VM_FAULT_SIGBUS; 152 retval = VM_FAULT_SIGBUS;
147 goto out_unlock; 153 goto out_io_unlock;
148 } 154 }
149 155
150 /* 156 /*
@@ -182,7 +188,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
182 page = ttm_tt_get_page(ttm, page_offset); 188 page = ttm_tt_get_page(ttm, page_offset);
183 if (unlikely(!page && i == 0)) { 189 if (unlikely(!page && i == 0)) {
184 retval = VM_FAULT_OOM; 190 retval = VM_FAULT_OOM;
185 goto out_unlock; 191 goto out_io_unlock;
186 } else if (unlikely(!page)) { 192 } else if (unlikely(!page)) {
187 break; 193 break;
188 } 194 }
@@ -200,14 +206,15 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
200 else if (unlikely(ret != 0)) { 206 else if (unlikely(ret != 0)) {
201 retval = 207 retval =
202 (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; 208 (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
203 goto out_unlock; 209 goto out_io_unlock;
204 } 210 }
205 211
206 address += PAGE_SIZE; 212 address += PAGE_SIZE;
207 if (unlikely(++page_offset >= page_last)) 213 if (unlikely(++page_offset >= page_last))
208 break; 214 break;
209 } 215 }
210 216out_io_unlock:
217 ttm_mem_io_unlock(man);
211out_unlock: 218out_unlock:
212 ttm_bo_unreserve(bo); 219 ttm_bo_unreserve(bo);
213 return retval; 220 return retval;
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index edacd483c59c..50852aad260a 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -74,6 +74,8 @@ struct ttm_placement {
74 * @is_iomem: is this io memory ? 74 * @is_iomem: is this io memory ?
75 * @size: size in byte 75 * @size: size in byte
76 * @offset: offset from the base address 76 * @offset: offset from the base address
77 * @io_reserved_vm: The VM system has a refcount in @io_reserved_count
78 * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve
77 * 79 *
78 * Structure indicating the bus placement of an object. 80 * Structure indicating the bus placement of an object.
79 */ 81 */
@@ -83,7 +85,8 @@ struct ttm_bus_placement {
83 unsigned long size; 85 unsigned long size;
84 unsigned long offset; 86 unsigned long offset;
85 bool is_iomem; 87 bool is_iomem;
86 bool io_reserved; 88 bool io_reserved_vm;
89 uint64_t io_reserved_count;
87}; 90};
88 91
89 92
@@ -235,6 +238,7 @@ struct ttm_buffer_object {
235 struct list_head lru; 238 struct list_head lru;
236 struct list_head ddestroy; 239 struct list_head ddestroy;
237 struct list_head swap; 240 struct list_head swap;
241 struct list_head io_reserve_lru;
238 uint32_t val_seq; 242 uint32_t val_seq;
239 bool seq_valid; 243 bool seq_valid;
240 244
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index e3b2e245db1b..1da8af6ac884 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -179,30 +179,6 @@ struct ttm_tt {
179#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */ 179#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
180#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */ 180#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
181 181
182/**
183 * struct ttm_mem_type_manager
184 *
185 * @has_type: The memory type has been initialized.
186 * @use_type: The memory type is enabled.
187 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
188 * managed by this memory type.
189 * @gpu_offset: If used, the GPU offset of the first managed page of
190 * fixed memory or the first managed location in an aperture.
191 * @size: Size of the managed region.
192 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
193 * as defined in ttm_placement_common.h
194 * @default_caching: The default caching policy used for a buffer object
195 * placed in this memory type if the user doesn't provide one.
196 * @manager: The range manager used for this memory type. FIXME: If the aperture
197 * has a page size different from the underlying system, the granularity
198 * of this manager should take care of this. But the range allocating code
199 * in ttm_bo.c needs to be modified for this.
200 * @lru: The lru list for this memory type.
201 *
202 * This structure is used to identify and manage memory types for a device.
203 * It's set up by the ttm_bo_driver::init_mem_type method.
204 */
205
206struct ttm_mem_type_manager; 182struct ttm_mem_type_manager;
207 183
208struct ttm_mem_type_manager_func { 184struct ttm_mem_type_manager_func {
@@ -287,6 +263,36 @@ struct ttm_mem_type_manager_func {
287 void (*debug)(struct ttm_mem_type_manager *man, const char *prefix); 263 void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
288}; 264};
289 265
266/**
267 * struct ttm_mem_type_manager
268 *
269 * @has_type: The memory type has been initialized.
270 * @use_type: The memory type is enabled.
271 * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
272 * managed by this memory type.
273 * @gpu_offset: If used, the GPU offset of the first managed page of
274 * fixed memory or the first managed location in an aperture.
275 * @size: Size of the managed region.
276 * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
277 * as defined in ttm_placement_common.h
278 * @default_caching: The default caching policy used for a buffer object
279 * placed in this memory type if the user doesn't provide one.
280 * @func: structure pointer implementing the range manager. See above
281 * @priv: Driver private closure for @func.
282 * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
283 * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
284 * reserved by the TTM vm system.
285 * @io_reserve_lru: Optional lru list for unreserving io mem regions.
286 * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
287 * static information. bdev::driver::io_mem_free is never used.
288 * @lru: The lru list for this memory type.
289 *
290 * This structure is used to identify and manage memory types for a device.
291 * It's set up by the ttm_bo_driver::init_mem_type method.
292 */
293
294
295
290struct ttm_mem_type_manager { 296struct ttm_mem_type_manager {
291 struct ttm_bo_device *bdev; 297 struct ttm_bo_device *bdev;
292 298
@@ -303,6 +309,15 @@ struct ttm_mem_type_manager {
303 uint32_t default_caching; 309 uint32_t default_caching;
304 const struct ttm_mem_type_manager_func *func; 310 const struct ttm_mem_type_manager_func *func;
305 void *priv; 311 void *priv;
312 struct mutex io_reserve_mutex;
313 bool use_io_reserve_lru;
314 bool io_reserve_fastpath;
315
316 /*
317 * Protected by @io_reserve_mutex:
318 */
319
320 struct list_head io_reserve_lru;
306 321
307 /* 322 /*
308 * Protected by the global->lru_lock. 323 * Protected by the global->lru_lock.
@@ -758,31 +773,6 @@ extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
758 773
759extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); 774extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
760 775
761/**
762 * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
763 *
764 * @bo Pointer to a struct ttm_buffer_object.
765 * @bus_base On return the base of the PCI region
766 * @bus_offset On return the byte offset into the PCI region
767 * @bus_size On return the byte size of the buffer object or zero if
768 * the buffer object memory is not accessible through a PCI region.
769 *
770 * Returns:
771 * -EINVAL if the buffer object is currently not mappable.
772 * 0 otherwise.
773 */
774
775extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
776 struct ttm_mem_reg *mem,
777 unsigned long *bus_base,
778 unsigned long *bus_offset,
779 unsigned long *bus_size);
780
781extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
782 struct ttm_mem_reg *mem);
783extern void ttm_mem_io_free(struct ttm_bo_device *bdev,
784 struct ttm_mem_reg *mem);
785
786extern void ttm_bo_global_release(struct drm_global_reference *ref); 776extern void ttm_bo_global_release(struct drm_global_reference *ref);
787extern int ttm_bo_global_init(struct drm_global_reference *ref); 777extern int ttm_bo_global_init(struct drm_global_reference *ref);
788 778
@@ -815,6 +805,22 @@ extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
815extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); 805extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
816 806
817/** 807/**
808 * ttm_bo_unmap_virtual
809 *
810 * @bo: tear down the virtual mappings for this BO
811 *
812 * The caller must take ttm_mem_io_lock before calling this function.
813 */
814extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
815
816extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
817extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
818extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
819 bool interruptible);
820extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
821
822
823/**
818 * ttm_bo_reserve: 824 * ttm_bo_reserve:
819 * 825 *
820 * @bo: A pointer to a struct ttm_buffer_object. 826 * @bo: A pointer to a struct ttm_buffer_object.