aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2010-11-11 03:41:57 -0500
committerDave Airlie <airlied@redhat.com>2010-11-21 22:25:22 -0500
commiteba67093f535322cb4f1c4b737319c0907a0c81d (patch)
tree695b9a43d558a2870b23d6813f60387d7d61c614 /drivers/gpu/drm/ttm
parent65705962025df490d13df59ec57c5329d1bd0a16 (diff)
drm/ttm: Fix up io_mem_reserve / io_mem_free calling
This patch attempts to fix up shortcomings with the current calling sequences. 1) There's a fastpath where no locking occurs and only io_mem_reserved is called to obtain needed info for mapping. The fastpath is set per memory type manager. 2) If the fastpath is disabled, io_mem_reserve and io_mem_free will be exactly balanced and not called recursively for the same struct ttm_mem_reg. 3) Optionally the driver can choose to enable a per memory type manager LRU eviction mechanism that, when io_mem_reserve returns -EAGAIN will attempt to kill user-space mappings of memory in that manager to free up needed resources Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Ben Skeggs <bskeggs@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c44
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c129
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c23
3 files changed, 166 insertions, 30 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 25e4c2a1d1d..cf2ec562550 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -378,8 +378,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
378 int ret = 0; 378 int ret = 0;
379 379
380 if (old_is_pci || new_is_pci || 380 if (old_is_pci || new_is_pci ||
381 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) 381 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
382 ttm_bo_unmap_virtual(bo); 382 ret = ttm_mem_io_lock(old_man, true);
383 if (unlikely(ret != 0))
384 goto out_err;
385 ttm_bo_unmap_virtual_locked(bo);
386 ttm_mem_io_unlock(old_man);
387 }
383 388
384 /* 389 /*
385 * Create and bind a ttm if required. 390 * Create and bind a ttm if required.
@@ -466,7 +471,6 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
466 ttm_tt_destroy(bo->ttm); 471 ttm_tt_destroy(bo->ttm);
467 bo->ttm = NULL; 472 bo->ttm = NULL;
468 } 473 }
469
470 ttm_bo_mem_put(bo, &bo->mem); 474 ttm_bo_mem_put(bo, &bo->mem);
471 475
472 atomic_set(&bo->reserved, 0); 476 atomic_set(&bo->reserved, 0);
@@ -665,6 +669,7 @@ static void ttm_bo_release(struct kref *kref)
665 struct ttm_buffer_object *bo = 669 struct ttm_buffer_object *bo =
666 container_of(kref, struct ttm_buffer_object, kref); 670 container_of(kref, struct ttm_buffer_object, kref);
667 struct ttm_bo_device *bdev = bo->bdev; 671 struct ttm_bo_device *bdev = bo->bdev;
672 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
668 673
669 if (likely(bo->vm_node != NULL)) { 674 if (likely(bo->vm_node != NULL)) {
670 rb_erase(&bo->vm_rb, &bdev->addr_space_rb); 675 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
@@ -672,6 +677,9 @@ static void ttm_bo_release(struct kref *kref)
672 bo->vm_node = NULL; 677 bo->vm_node = NULL;
673 } 678 }
674 write_unlock(&bdev->vm_lock); 679 write_unlock(&bdev->vm_lock);
680 ttm_mem_io_lock(man, false);
681 ttm_mem_io_free_vm(bo);
682 ttm_mem_io_unlock(man);
675 ttm_bo_cleanup_refs_or_queue(bo); 683 ttm_bo_cleanup_refs_or_queue(bo);
676 kref_put(&bo->list_kref, ttm_bo_release_list); 684 kref_put(&bo->list_kref, ttm_bo_release_list);
677 write_lock(&bdev->vm_lock); 685 write_lock(&bdev->vm_lock);
@@ -728,7 +736,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
728 736
729 evict_mem = bo->mem; 737 evict_mem = bo->mem;
730 evict_mem.mm_node = NULL; 738 evict_mem.mm_node = NULL;
731 evict_mem.bus.io_reserved = false; 739 evict_mem.bus.io_reserved_vm = false;
740 evict_mem.bus.io_reserved_count = 0;
732 741
733 placement.fpfn = 0; 742 placement.fpfn = 0;
734 placement.lpfn = 0; 743 placement.lpfn = 0;
@@ -1065,7 +1074,8 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1065 mem.num_pages = bo->num_pages; 1074 mem.num_pages = bo->num_pages;
1066 mem.size = mem.num_pages << PAGE_SHIFT; 1075 mem.size = mem.num_pages << PAGE_SHIFT;
1067 mem.page_alignment = bo->mem.page_alignment; 1076 mem.page_alignment = bo->mem.page_alignment;
1068 mem.bus.io_reserved = false; 1077 mem.bus.io_reserved_vm = false;
1078 mem.bus.io_reserved_count = 0;
1069 /* 1079 /*
1070 * Determine where to move the buffer. 1080 * Determine where to move the buffer.
1071 */ 1081 */
@@ -1184,6 +1194,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1184 INIT_LIST_HEAD(&bo->lru); 1194 INIT_LIST_HEAD(&bo->lru);
1185 INIT_LIST_HEAD(&bo->ddestroy); 1195 INIT_LIST_HEAD(&bo->ddestroy);
1186 INIT_LIST_HEAD(&bo->swap); 1196 INIT_LIST_HEAD(&bo->swap);
1197 INIT_LIST_HEAD(&bo->io_reserve_lru);
1187 bo->bdev = bdev; 1198 bo->bdev = bdev;
1188 bo->glob = bdev->glob; 1199 bo->glob = bdev->glob;
1189 bo->type = type; 1200 bo->type = type;
@@ -1193,7 +1204,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1193 bo->mem.num_pages = bo->num_pages; 1204 bo->mem.num_pages = bo->num_pages;
1194 bo->mem.mm_node = NULL; 1205 bo->mem.mm_node = NULL;
1195 bo->mem.page_alignment = page_alignment; 1206 bo->mem.page_alignment = page_alignment;
1196 bo->mem.bus.io_reserved = false; 1207 bo->mem.bus.io_reserved_vm = false;
1208 bo->mem.bus.io_reserved_count = 0;
1197 bo->buffer_start = buffer_start & PAGE_MASK; 1209 bo->buffer_start = buffer_start & PAGE_MASK;
1198 bo->priv_flags = 0; 1210 bo->priv_flags = 0;
1199 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1211 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
@@ -1367,6 +1379,10 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1367 BUG_ON(type >= TTM_NUM_MEM_TYPES); 1379 BUG_ON(type >= TTM_NUM_MEM_TYPES);
1368 man = &bdev->man[type]; 1380 man = &bdev->man[type];
1369 BUG_ON(man->has_type); 1381 BUG_ON(man->has_type);
1382 man->io_reserve_fastpath = true;
1383 man->use_io_reserve_lru = false;
1384 mutex_init(&man->io_reserve_mutex);
1385 INIT_LIST_HEAD(&man->io_reserve_lru);
1370 1386
1371 ret = bdev->driver->init_mem_type(bdev, type, man); 1387 ret = bdev->driver->init_mem_type(bdev, type, man);
1372 if (ret) 1388 if (ret)
@@ -1574,7 +1590,7 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1574 return true; 1590 return true;
1575} 1591}
1576 1592
1577void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) 1593void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1578{ 1594{
1579 struct ttm_bo_device *bdev = bo->bdev; 1595 struct ttm_bo_device *bdev = bo->bdev;
1580 loff_t offset = (loff_t) bo->addr_space_offset; 1596 loff_t offset = (loff_t) bo->addr_space_offset;
@@ -1583,8 +1599,20 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1583 if (!bdev->dev_mapping) 1599 if (!bdev->dev_mapping)
1584 return; 1600 return;
1585 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); 1601 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1586 ttm_mem_io_free(bdev, &bo->mem); 1602 ttm_mem_io_free_vm(bo);
1587} 1603}
1604
1605void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1606{
1607 struct ttm_bo_device *bdev = bo->bdev;
1608 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1609
1610 ttm_mem_io_lock(man, false);
1611 ttm_bo_unmap_virtual_locked(bo);
1612 ttm_mem_io_unlock(man);
1613}
1614
1615
1588EXPORT_SYMBOL(ttm_bo_unmap_virtual); 1616EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1589 1617
1590static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) 1618static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 4b75133d660..a89839f83f6 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -75,37 +75,123 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
75} 75}
76EXPORT_SYMBOL(ttm_bo_move_ttm); 76EXPORT_SYMBOL(ttm_bo_move_ttm);
77 77
78int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 78int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
79{ 79{
80 int ret; 80 if (likely(man->io_reserve_fastpath))
81 return 0;
82
83 if (interruptible)
84 return mutex_lock_interruptible(&man->io_reserve_mutex);
85
86 mutex_lock(&man->io_reserve_mutex);
87 return 0;
88}
81 89
82 if (!mem->bus.io_reserved) { 90void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
83 mem->bus.io_reserved = true; 91{
92 if (likely(man->io_reserve_fastpath))
93 return;
94
95 mutex_unlock(&man->io_reserve_mutex);
96}
97
98static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
99{
100 struct ttm_buffer_object *bo;
101
102 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
103 return -EAGAIN;
104
105 bo = list_first_entry(&man->io_reserve_lru,
106 struct ttm_buffer_object,
107 io_reserve_lru);
108 list_del_init(&bo->io_reserve_lru);
109 ttm_bo_unmap_virtual_locked(bo);
110
111 return 0;
112}
113
114static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
115 struct ttm_mem_reg *mem)
116{
117 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
118 int ret = 0;
119
120 if (!bdev->driver->io_mem_reserve)
121 return 0;
122 if (likely(man->io_reserve_fastpath))
123 return bdev->driver->io_mem_reserve(bdev, mem);
124
125 if (bdev->driver->io_mem_reserve &&
126 mem->bus.io_reserved_count++ == 0) {
127retry:
84 ret = bdev->driver->io_mem_reserve(bdev, mem); 128 ret = bdev->driver->io_mem_reserve(bdev, mem);
129 if (ret == -EAGAIN) {
130 ret = ttm_mem_io_evict(man);
131 if (ret == 0)
132 goto retry;
133 }
134 }
135 return ret;
136}
137
138static void ttm_mem_io_free(struct ttm_bo_device *bdev,
139 struct ttm_mem_reg *mem)
140{
141 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
142
143 if (likely(man->io_reserve_fastpath))
144 return;
145
146 if (bdev->driver->io_mem_reserve &&
147 --mem->bus.io_reserved_count == 0 &&
148 bdev->driver->io_mem_free)
149 bdev->driver->io_mem_free(bdev, mem);
150
151}
152
153int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
154{
155 struct ttm_mem_reg *mem = &bo->mem;
156 int ret;
157
158 if (!mem->bus.io_reserved_vm) {
159 struct ttm_mem_type_manager *man =
160 &bo->bdev->man[mem->mem_type];
161
162 ret = ttm_mem_io_reserve(bo->bdev, mem);
85 if (unlikely(ret != 0)) 163 if (unlikely(ret != 0))
86 return ret; 164 return ret;
165 mem->bus.io_reserved_vm = true;
166 if (man->use_io_reserve_lru)
167 list_add_tail(&bo->io_reserve_lru,
168 &man->io_reserve_lru);
87 } 169 }
88 return 0; 170 return 0;
89} 171}
90 172
91void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 173void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
92{ 174{
93 if (bdev->driver->io_mem_reserve) { 175 struct ttm_mem_reg *mem = &bo->mem;
94 if (mem->bus.io_reserved) { 176
95 mem->bus.io_reserved = false; 177 if (mem->bus.io_reserved_vm) {
96 bdev->driver->io_mem_free(bdev, mem); 178 mem->bus.io_reserved_vm = false;
97 } 179 list_del_init(&bo->io_reserve_lru);
180 ttm_mem_io_free(bo->bdev, mem);
98 } 181 }
99} 182}
100 183
101int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, 184int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
102 void **virtual) 185 void **virtual)
103{ 186{
187 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
104 int ret; 188 int ret;
105 void *addr; 189 void *addr;
106 190
107 *virtual = NULL; 191 *virtual = NULL;
192 (void) ttm_mem_io_lock(man, false);
108 ret = ttm_mem_io_reserve(bdev, mem); 193 ret = ttm_mem_io_reserve(bdev, mem);
194 ttm_mem_io_unlock(man);
109 if (ret || !mem->bus.is_iomem) 195 if (ret || !mem->bus.is_iomem)
110 return ret; 196 return ret;
111 197
@@ -117,7 +203,9 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
117 else 203 else
118 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); 204 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
119 if (!addr) { 205 if (!addr) {
206 (void) ttm_mem_io_lock(man, false);
120 ttm_mem_io_free(bdev, mem); 207 ttm_mem_io_free(bdev, mem);
208 ttm_mem_io_unlock(man);
121 return -ENOMEM; 209 return -ENOMEM;
122 } 210 }
123 } 211 }
@@ -134,7 +222,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
134 222
135 if (virtual && mem->bus.addr == NULL) 223 if (virtual && mem->bus.addr == NULL)
136 iounmap(virtual); 224 iounmap(virtual);
225 (void) ttm_mem_io_lock(man, false);
137 ttm_mem_io_free(bdev, mem); 226 ttm_mem_io_free(bdev, mem);
227 ttm_mem_io_unlock(man);
138} 228}
139 229
140static int ttm_copy_io_page(void *dst, void *src, unsigned long page) 230static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -231,7 +321,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
231 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 321 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
232 struct ttm_tt *ttm = bo->ttm; 322 struct ttm_tt *ttm = bo->ttm;
233 struct ttm_mem_reg *old_mem = &bo->mem; 323 struct ttm_mem_reg *old_mem = &bo->mem;
234 struct ttm_mem_reg old_copy = *old_mem; 324 struct ttm_mem_reg old_copy;
235 void *old_iomap; 325 void *old_iomap;
236 void *new_iomap; 326 void *new_iomap;
237 int ret; 327 int ret;
@@ -281,7 +371,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
281 mb(); 371 mb();
282out2: 372out2:
283 ttm_bo_free_old_node(bo); 373 ttm_bo_free_old_node(bo);
284 374 old_copy = *old_mem;
285 *old_mem = *new_mem; 375 *old_mem = *new_mem;
286 new_mem->mm_node = NULL; 376 new_mem->mm_node = NULL;
287 377
@@ -292,7 +382,7 @@ out2:
292 } 382 }
293 383
294out1: 384out1:
295 ttm_mem_reg_iounmap(bdev, new_mem, new_iomap); 385 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
296out: 386out:
297 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); 387 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
298 return ret; 388 return ret;
@@ -341,6 +431,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
341 INIT_LIST_HEAD(&fbo->ddestroy); 431 INIT_LIST_HEAD(&fbo->ddestroy);
342 INIT_LIST_HEAD(&fbo->lru); 432 INIT_LIST_HEAD(&fbo->lru);
343 INIT_LIST_HEAD(&fbo->swap); 433 INIT_LIST_HEAD(&fbo->swap);
434 INIT_LIST_HEAD(&fbo->io_reserve_lru);
344 fbo->vm_node = NULL; 435 fbo->vm_node = NULL;
345 atomic_set(&fbo->cpu_writers, 0); 436 atomic_set(&fbo->cpu_writers, 0);
346 437
@@ -452,6 +543,8 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
452 unsigned long start_page, unsigned long num_pages, 543 unsigned long start_page, unsigned long num_pages,
453 struct ttm_bo_kmap_obj *map) 544 struct ttm_bo_kmap_obj *map)
454{ 545{
546 struct ttm_mem_type_manager *man =
547 &bo->bdev->man[bo->mem.mem_type];
455 unsigned long offset, size; 548 unsigned long offset, size;
456 int ret; 549 int ret;
457 550
@@ -466,7 +559,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
466 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) 559 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
467 return -EPERM; 560 return -EPERM;
468#endif 561#endif
562 (void) ttm_mem_io_lock(man, false);
469 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); 563 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
564 ttm_mem_io_unlock(man);
470 if (ret) 565 if (ret)
471 return ret; 566 return ret;
472 if (!bo->mem.bus.is_iomem) { 567 if (!bo->mem.bus.is_iomem) {
@@ -481,12 +576,15 @@ EXPORT_SYMBOL(ttm_bo_kmap);
481 576
482void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) 577void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
483{ 578{
579 struct ttm_buffer_object *bo = map->bo;
580 struct ttm_mem_type_manager *man =
581 &bo->bdev->man[bo->mem.mem_type];
582
484 if (!map->virtual) 583 if (!map->virtual)
485 return; 584 return;
486 switch (map->bo_kmap_type) { 585 switch (map->bo_kmap_type) {
487 case ttm_bo_map_iomap: 586 case ttm_bo_map_iomap:
488 iounmap(map->virtual); 587 iounmap(map->virtual);
489 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
490 break; 588 break;
491 case ttm_bo_map_vmap: 589 case ttm_bo_map_vmap:
492 vunmap(map->virtual); 590 vunmap(map->virtual);
@@ -499,6 +597,9 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
499 default: 597 default:
500 BUG(); 598 BUG();
501 } 599 }
600 (void) ttm_mem_io_lock(man, false);
601 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
602 ttm_mem_io_unlock(man);
502 map->virtual = NULL; 603 map->virtual = NULL;
503 map->page = NULL; 604 map->page = NULL;
504} 605}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 8dd446cb778..221b924aceb 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -83,6 +83,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
83 int i; 83 int i;
84 unsigned long address = (unsigned long)vmf->virtual_address; 84 unsigned long address = (unsigned long)vmf->virtual_address;
85 int retval = VM_FAULT_NOPAGE; 85 int retval = VM_FAULT_NOPAGE;
86 struct ttm_mem_type_manager *man =
87 &bdev->man[bo->mem.mem_type];
86 88
87 /* 89 /*
88 * Work around locking order reversal in fault / nopfn 90 * Work around locking order reversal in fault / nopfn
@@ -130,12 +132,16 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
130 } else 132 } else
131 spin_unlock(&bdev->fence_lock); 133 spin_unlock(&bdev->fence_lock);
132 134
133 135 ret = ttm_mem_io_lock(man, true);
134 ret = ttm_mem_io_reserve(bdev, &bo->mem); 136 if (unlikely(ret != 0)) {
135 if (ret) { 137 retval = VM_FAULT_NOPAGE;
136 retval = VM_FAULT_SIGBUS;
137 goto out_unlock; 138 goto out_unlock;
138 } 139 }
140 ret = ttm_mem_io_reserve_vm(bo);
141 if (unlikely(ret != 0)) {
142 retval = VM_FAULT_SIGBUS;
143 goto out_io_unlock;
144 }
139 145
140 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + 146 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
141 bo->vm_node->start - vma->vm_pgoff; 147 bo->vm_node->start - vma->vm_pgoff;
@@ -144,7 +150,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
144 150
145 if (unlikely(page_offset >= bo->num_pages)) { 151 if (unlikely(page_offset >= bo->num_pages)) {
146 retval = VM_FAULT_SIGBUS; 152 retval = VM_FAULT_SIGBUS;
147 goto out_unlock; 153 goto out_io_unlock;
148 } 154 }
149 155
150 /* 156 /*
@@ -182,7 +188,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
182 page = ttm_tt_get_page(ttm, page_offset); 188 page = ttm_tt_get_page(ttm, page_offset);
183 if (unlikely(!page && i == 0)) { 189 if (unlikely(!page && i == 0)) {
184 retval = VM_FAULT_OOM; 190 retval = VM_FAULT_OOM;
185 goto out_unlock; 191 goto out_io_unlock;
186 } else if (unlikely(!page)) { 192 } else if (unlikely(!page)) {
187 break; 193 break;
188 } 194 }
@@ -200,14 +206,15 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
200 else if (unlikely(ret != 0)) { 206 else if (unlikely(ret != 0)) {
201 retval = 207 retval =
202 (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; 208 (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
203 goto out_unlock; 209 goto out_io_unlock;
204 } 210 }
205 211
206 address += PAGE_SIZE; 212 address += PAGE_SIZE;
207 if (unlikely(++page_offset >= page_last)) 213 if (unlikely(++page_offset >= page_last))
208 break; 214 break;
209 } 215 }
210 216out_io_unlock:
217 ttm_mem_io_unlock(man);
211out_unlock: 218out_unlock:
212 ttm_bo_unreserve(bo); 219 ttm_bo_unreserve(bo);
213 return retval; 220 return retval;