diff options
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r-- | drivers/gpu/drm/ttm/Makefile | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_agp_backend.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 550 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_manager.c | 157 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 158 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 29 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_execbuf_util.c | 169 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_object.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_page_alloc.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_tt.c | 37 |
10 files changed, 747 insertions, 376 deletions
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index b256d4adfafe..f3cf6f02c997 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile | |||
@@ -4,6 +4,7 @@ | |||
4 | ccflags-y := -Iinclude/drm | 4 | ccflags-y := -Iinclude/drm |
5 | ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ | 5 | ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ |
6 | ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ | 6 | ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ |
7 | ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o | 7 | ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \ |
8 | ttm_bo_manager.o | ||
8 | 9 | ||
9 | obj-$(CONFIG_DRM_TTM) += ttm.o | 10 | obj-$(CONFIG_DRM_TTM) += ttm.o |
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index 4bf69c404491..1c4a72f681c1 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c | |||
@@ -47,7 +47,8 @@ struct ttm_agp_backend { | |||
47 | 47 | ||
48 | static int ttm_agp_populate(struct ttm_backend *backend, | 48 | static int ttm_agp_populate(struct ttm_backend *backend, |
49 | unsigned long num_pages, struct page **pages, | 49 | unsigned long num_pages, struct page **pages, |
50 | struct page *dummy_read_page) | 50 | struct page *dummy_read_page, |
51 | dma_addr_t *dma_addrs) | ||
51 | { | 52 | { |
52 | struct ttm_agp_backend *agp_be = | 53 | struct ttm_agp_backend *agp_be = |
53 | container_of(backend, struct ttm_agp_backend, backend); | 54 | container_of(backend, struct ttm_agp_backend, backend); |
@@ -74,6 +75,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) | |||
74 | { | 75 | { |
75 | struct ttm_agp_backend *agp_be = | 76 | struct ttm_agp_backend *agp_be = |
76 | container_of(backend, struct ttm_agp_backend, backend); | 77 | container_of(backend, struct ttm_agp_backend, backend); |
78 | struct drm_mm_node *node = bo_mem->mm_node; | ||
77 | struct agp_memory *mem = agp_be->mem; | 79 | struct agp_memory *mem = agp_be->mem; |
78 | int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED); | 80 | int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED); |
79 | int ret; | 81 | int ret; |
@@ -81,7 +83,7 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) | |||
81 | mem->is_flushed = 1; | 83 | mem->is_flushed = 1; |
82 | mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY; | 84 | mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY; |
83 | 85 | ||
84 | ret = agp_bind_memory(mem, bo_mem->mm_node->start); | 86 | ret = agp_bind_memory(mem, node->start); |
85 | if (ret) | 87 | if (ret) |
86 | printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n"); | 88 | printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n"); |
87 | 89 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index db809e034cc4..2e618b5ac465 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -27,14 +27,6 @@ | |||
27 | /* | 27 | /* |
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | 28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
29 | */ | 29 | */ |
30 | /* Notes: | ||
31 | * | ||
32 | * We store bo pointer in drm_mm_node struct so we know which bo own a | ||
33 | * specific node. There is no protection on the pointer, thus to make | ||
34 | * sure things don't go berserk you have to access this pointer while | ||
35 | * holding the global lru lock and make sure anytime you free a node you | ||
36 | * reset the pointer to NULL. | ||
37 | */ | ||
38 | 30 | ||
39 | #include "ttm/ttm_module.h" | 31 | #include "ttm/ttm_module.h" |
40 | #include "ttm/ttm_bo_driver.h" | 32 | #include "ttm/ttm_bo_driver.h" |
@@ -45,6 +37,7 @@ | |||
45 | #include <linux/mm.h> | 37 | #include <linux/mm.h> |
46 | #include <linux/file.h> | 38 | #include <linux/file.h> |
47 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | #include <asm/atomic.h> | ||
48 | 41 | ||
49 | #define TTM_ASSERT_LOCKED(param) | 42 | #define TTM_ASSERT_LOCKED(param) |
50 | #define TTM_DEBUG(fmt, arg...) | 43 | #define TTM_DEBUG(fmt, arg...) |
@@ -84,11 +77,8 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) | |||
84 | man->available_caching); | 77 | man->available_caching); |
85 | printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n", | 78 | printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n", |
86 | man->default_caching); | 79 | man->default_caching); |
87 | if (mem_type != TTM_PL_SYSTEM) { | 80 | if (mem_type != TTM_PL_SYSTEM) |
88 | spin_lock(&bdev->glob->lru_lock); | 81 | (*man->func->debug)(man, TTM_PFX); |
89 | drm_mm_debug_table(&man->manager, TTM_PFX); | ||
90 | spin_unlock(&bdev->glob->lru_lock); | ||
91 | } | ||
92 | } | 82 | } |
93 | 83 | ||
94 | static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, | 84 | static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, |
@@ -169,22 +159,17 @@ static void ttm_bo_release_list(struct kref *list_kref) | |||
169 | 159 | ||
170 | int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) | 160 | int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) |
171 | { | 161 | { |
172 | |||
173 | if (interruptible) { | 162 | if (interruptible) { |
174 | int ret = 0; | 163 | return wait_event_interruptible(bo->event_queue, |
175 | |||
176 | ret = wait_event_interruptible(bo->event_queue, | ||
177 | atomic_read(&bo->reserved) == 0); | 164 | atomic_read(&bo->reserved) == 0); |
178 | if (unlikely(ret != 0)) | ||
179 | return ret; | ||
180 | } else { | 165 | } else { |
181 | wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); | 166 | wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); |
167 | return 0; | ||
182 | } | 168 | } |
183 | return 0; | ||
184 | } | 169 | } |
185 | EXPORT_SYMBOL(ttm_bo_wait_unreserved); | 170 | EXPORT_SYMBOL(ttm_bo_wait_unreserved); |
186 | 171 | ||
187 | static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) | 172 | void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) |
188 | { | 173 | { |
189 | struct ttm_bo_device *bdev = bo->bdev; | 174 | struct ttm_bo_device *bdev = bo->bdev; |
190 | struct ttm_mem_type_manager *man; | 175 | struct ttm_mem_type_manager *man; |
@@ -206,11 +191,7 @@ static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) | |||
206 | } | 191 | } |
207 | } | 192 | } |
208 | 193 | ||
209 | /** | 194 | int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) |
210 | * Call with the lru_lock held. | ||
211 | */ | ||
212 | |||
213 | static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) | ||
214 | { | 195 | { |
215 | int put_count = 0; | 196 | int put_count = 0; |
216 | 197 | ||
@@ -239,9 +220,21 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, | |||
239 | int ret; | 220 | int ret; |
240 | 221 | ||
241 | while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { | 222 | while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { |
242 | if (use_sequence && bo->seq_valid && | 223 | /** |
243 | (sequence - bo->val_seq < (1 << 31))) { | 224 | * Deadlock avoidance for multi-bo reserving. |
244 | return -EAGAIN; | 225 | */ |
226 | if (use_sequence && bo->seq_valid) { | ||
227 | /** | ||
228 | * We've already reserved this one. | ||
229 | */ | ||
230 | if (unlikely(sequence == bo->val_seq)) | ||
231 | return -EDEADLK; | ||
232 | /** | ||
233 | * Already reserved by a thread that will not back | ||
234 | * off for us. We need to back off. | ||
235 | */ | ||
236 | if (unlikely(sequence - bo->val_seq < (1 << 31))) | ||
237 | return -EAGAIN; | ||
245 | } | 238 | } |
246 | 239 | ||
247 | if (no_wait) | 240 | if (no_wait) |
@@ -256,6 +249,14 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, | |||
256 | } | 249 | } |
257 | 250 | ||
258 | if (use_sequence) { | 251 | if (use_sequence) { |
252 | /** | ||
253 | * Wake up waiters that may need to recheck for deadlock, | ||
254 | * if we decreased the sequence number. | ||
255 | */ | ||
256 | if (unlikely((bo->val_seq - sequence < (1 << 31)) | ||
257 | || !bo->seq_valid)) | ||
258 | wake_up_all(&bo->event_queue); | ||
259 | |||
259 | bo->val_seq = sequence; | 260 | bo->val_seq = sequence; |
260 | bo->seq_valid = true; | 261 | bo->seq_valid = true; |
261 | } else { | 262 | } else { |
@@ -271,6 +272,13 @@ static void ttm_bo_ref_bug(struct kref *list_kref) | |||
271 | BUG(); | 272 | BUG(); |
272 | } | 273 | } |
273 | 274 | ||
275 | void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count, | ||
276 | bool never_free) | ||
277 | { | ||
278 | kref_sub(&bo->list_kref, count, | ||
279 | (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list); | ||
280 | } | ||
281 | |||
274 | int ttm_bo_reserve(struct ttm_buffer_object *bo, | 282 | int ttm_bo_reserve(struct ttm_buffer_object *bo, |
275 | bool interruptible, | 283 | bool interruptible, |
276 | bool no_wait, bool use_sequence, uint32_t sequence) | 284 | bool no_wait, bool use_sequence, uint32_t sequence) |
@@ -286,20 +294,24 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, | |||
286 | put_count = ttm_bo_del_from_lru(bo); | 294 | put_count = ttm_bo_del_from_lru(bo); |
287 | spin_unlock(&glob->lru_lock); | 295 | spin_unlock(&glob->lru_lock); |
288 | 296 | ||
289 | while (put_count--) | 297 | ttm_bo_list_ref_sub(bo, put_count, true); |
290 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | ||
291 | 298 | ||
292 | return ret; | 299 | return ret; |
293 | } | 300 | } |
294 | 301 | ||
302 | void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) | ||
303 | { | ||
304 | ttm_bo_add_to_lru(bo); | ||
305 | atomic_set(&bo->reserved, 0); | ||
306 | wake_up_all(&bo->event_queue); | ||
307 | } | ||
308 | |||
295 | void ttm_bo_unreserve(struct ttm_buffer_object *bo) | 309 | void ttm_bo_unreserve(struct ttm_buffer_object *bo) |
296 | { | 310 | { |
297 | struct ttm_bo_global *glob = bo->glob; | 311 | struct ttm_bo_global *glob = bo->glob; |
298 | 312 | ||
299 | spin_lock(&glob->lru_lock); | 313 | spin_lock(&glob->lru_lock); |
300 | ttm_bo_add_to_lru(bo); | 314 | ttm_bo_unreserve_locked(bo); |
301 | atomic_set(&bo->reserved, 0); | ||
302 | wake_up_all(&bo->event_queue); | ||
303 | spin_unlock(&glob->lru_lock); | 315 | spin_unlock(&glob->lru_lock); |
304 | } | 316 | } |
305 | EXPORT_SYMBOL(ttm_bo_unreserve); | 317 | EXPORT_SYMBOL(ttm_bo_unreserve); |
@@ -366,8 +378,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, | |||
366 | int ret = 0; | 378 | int ret = 0; |
367 | 379 | ||
368 | if (old_is_pci || new_is_pci || | 380 | if (old_is_pci || new_is_pci || |
369 | ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) | 381 | ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { |
370 | ttm_bo_unmap_virtual(bo); | 382 | ret = ttm_mem_io_lock(old_man, true); |
383 | if (unlikely(ret != 0)) | ||
384 | goto out_err; | ||
385 | ttm_bo_unmap_virtual_locked(bo); | ||
386 | ttm_mem_io_unlock(old_man); | ||
387 | } | ||
371 | 388 | ||
372 | /* | 389 | /* |
373 | * Create and bind a ttm if required. | 390 | * Create and bind a ttm if required. |
@@ -389,11 +406,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, | |||
389 | } | 406 | } |
390 | 407 | ||
391 | if (bo->mem.mem_type == TTM_PL_SYSTEM) { | 408 | if (bo->mem.mem_type == TTM_PL_SYSTEM) { |
409 | if (bdev->driver->move_notify) | ||
410 | bdev->driver->move_notify(bo, mem); | ||
392 | bo->mem = *mem; | 411 | bo->mem = *mem; |
393 | mem->mm_node = NULL; | 412 | mem->mm_node = NULL; |
394 | goto moved; | 413 | goto moved; |
395 | } | 414 | } |
396 | |||
397 | } | 415 | } |
398 | 416 | ||
399 | if (bdev->driver->move_notify) | 417 | if (bdev->driver->move_notify) |
@@ -420,11 +438,9 @@ moved: | |||
420 | } | 438 | } |
421 | 439 | ||
422 | if (bo->mem.mm_node) { | 440 | if (bo->mem.mm_node) { |
423 | spin_lock(&bo->lock); | 441 | bo->offset = (bo->mem.start << PAGE_SHIFT) + |
424 | bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) + | ||
425 | bdev->man[bo->mem.mem_type].gpu_offset; | 442 | bdev->man[bo->mem.mem_type].gpu_offset; |
426 | bo->cur_placement = bo->mem.placement; | 443 | bo->cur_placement = bo->mem.placement; |
427 | spin_unlock(&bo->lock); | ||
428 | } else | 444 | } else |
429 | bo->offset = 0; | 445 | bo->offset = 0; |
430 | 446 | ||
@@ -442,135 +458,150 @@ out_err: | |||
442 | } | 458 | } |
443 | 459 | ||
444 | /** | 460 | /** |
445 | * Call bo::reserved and with the lru lock held. | 461 | * Call bo::reserved. |
446 | * Will release GPU memory type usage on destruction. | 462 | * Will release GPU memory type usage on destruction. |
447 | * This is the place to put in driver specific hooks. | 463 | * This is the place to put in driver specific hooks to release |
448 | * Will release the bo::reserved lock and the | 464 | * driver private resources. |
449 | * lru lock on exit. | 465 | * Will release the bo::reserved lock. |
450 | */ | 466 | */ |
451 | 467 | ||
452 | static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) | 468 | static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) |
453 | { | 469 | { |
454 | struct ttm_bo_global *glob = bo->glob; | ||
455 | |||
456 | if (bo->ttm) { | 470 | if (bo->ttm) { |
457 | |||
458 | /** | ||
459 | * Release the lru_lock, since we don't want to have | ||
460 | * an atomic requirement on ttm_tt[unbind|destroy]. | ||
461 | */ | ||
462 | |||
463 | spin_unlock(&glob->lru_lock); | ||
464 | ttm_tt_unbind(bo->ttm); | 471 | ttm_tt_unbind(bo->ttm); |
465 | ttm_tt_destroy(bo->ttm); | 472 | ttm_tt_destroy(bo->ttm); |
466 | bo->ttm = NULL; | 473 | bo->ttm = NULL; |
467 | spin_lock(&glob->lru_lock); | ||
468 | } | ||
469 | |||
470 | if (bo->mem.mm_node) { | ||
471 | drm_mm_put_block(bo->mem.mm_node); | ||
472 | bo->mem.mm_node = NULL; | ||
473 | } | 474 | } |
475 | ttm_bo_mem_put(bo, &bo->mem); | ||
474 | 476 | ||
475 | atomic_set(&bo->reserved, 0); | 477 | atomic_set(&bo->reserved, 0); |
478 | |||
479 | /* | ||
480 | * Make processes trying to reserve really pick it up. | ||
481 | */ | ||
482 | smp_mb__after_atomic_dec(); | ||
476 | wake_up_all(&bo->event_queue); | 483 | wake_up_all(&bo->event_queue); |
477 | spin_unlock(&glob->lru_lock); | ||
478 | } | 484 | } |
479 | 485 | ||
480 | 486 | static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) | |
481 | /** | ||
482 | * If bo idle, remove from delayed- and lru lists, and unref. | ||
483 | * If not idle, and already on delayed list, do nothing. | ||
484 | * If not idle, and not on delayed list, put on delayed list, | ||
485 | * up the list_kref and schedule a delayed list check. | ||
486 | */ | ||
487 | |||
488 | static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) | ||
489 | { | 487 | { |
490 | struct ttm_bo_device *bdev = bo->bdev; | 488 | struct ttm_bo_device *bdev = bo->bdev; |
491 | struct ttm_bo_global *glob = bo->glob; | 489 | struct ttm_bo_global *glob = bo->glob; |
492 | struct ttm_bo_driver *driver = bdev->driver; | 490 | struct ttm_bo_driver *driver; |
491 | void *sync_obj = NULL; | ||
492 | void *sync_obj_arg; | ||
493 | int put_count; | ||
493 | int ret; | 494 | int ret; |
494 | 495 | ||
495 | spin_lock(&bo->lock); | 496 | spin_lock(&bdev->fence_lock); |
496 | retry: | 497 | (void) ttm_bo_wait(bo, false, false, true); |
497 | (void) ttm_bo_wait(bo, false, false, !remove_all); | ||
498 | |||
499 | if (!bo->sync_obj) { | 498 | if (!bo->sync_obj) { |
500 | int put_count; | ||
501 | |||
502 | spin_unlock(&bo->lock); | ||
503 | 499 | ||
504 | spin_lock(&glob->lru_lock); | 500 | spin_lock(&glob->lru_lock); |
505 | ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0); | ||
506 | 501 | ||
507 | /** | 502 | /** |
508 | * Someone else has the object reserved. Bail and retry. | 503 | * Lock inversion between bo:reserve and bdev::fence_lock here, |
504 | * but that's OK, since we're only trylocking. | ||
509 | */ | 505 | */ |
510 | 506 | ||
511 | if (unlikely(ret == -EBUSY)) { | 507 | ret = ttm_bo_reserve_locked(bo, false, true, false, 0); |
512 | spin_unlock(&glob->lru_lock); | ||
513 | spin_lock(&bo->lock); | ||
514 | goto requeue; | ||
515 | } | ||
516 | |||
517 | /** | ||
518 | * We can re-check for sync object without taking | ||
519 | * the bo::lock since setting the sync object requires | ||
520 | * also bo::reserved. A busy object at this point may | ||
521 | * be caused by another thread starting an accelerated | ||
522 | * eviction. | ||
523 | */ | ||
524 | 508 | ||
525 | if (unlikely(bo->sync_obj)) { | 509 | if (unlikely(ret == -EBUSY)) |
526 | atomic_set(&bo->reserved, 0); | 510 | goto queue; |
527 | wake_up_all(&bo->event_queue); | ||
528 | spin_unlock(&glob->lru_lock); | ||
529 | spin_lock(&bo->lock); | ||
530 | if (remove_all) | ||
531 | goto retry; | ||
532 | else | ||
533 | goto requeue; | ||
534 | } | ||
535 | 511 | ||
512 | spin_unlock(&bdev->fence_lock); | ||
536 | put_count = ttm_bo_del_from_lru(bo); | 513 | put_count = ttm_bo_del_from_lru(bo); |
537 | 514 | ||
538 | if (!list_empty(&bo->ddestroy)) { | 515 | spin_unlock(&glob->lru_lock); |
539 | list_del_init(&bo->ddestroy); | ||
540 | ++put_count; | ||
541 | } | ||
542 | |||
543 | ttm_bo_cleanup_memtype_use(bo); | 516 | ttm_bo_cleanup_memtype_use(bo); |
544 | 517 | ||
545 | while (put_count--) | 518 | ttm_bo_list_ref_sub(bo, put_count, true); |
546 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | ||
547 | 519 | ||
548 | return 0; | 520 | return; |
521 | } else { | ||
522 | spin_lock(&glob->lru_lock); | ||
523 | } | ||
524 | queue: | ||
525 | driver = bdev->driver; | ||
526 | if (bo->sync_obj) | ||
527 | sync_obj = driver->sync_obj_ref(bo->sync_obj); | ||
528 | sync_obj_arg = bo->sync_obj_arg; | ||
529 | |||
530 | kref_get(&bo->list_kref); | ||
531 | list_add_tail(&bo->ddestroy, &bdev->ddestroy); | ||
532 | spin_unlock(&glob->lru_lock); | ||
533 | spin_unlock(&bdev->fence_lock); | ||
534 | |||
535 | if (sync_obj) { | ||
536 | driver->sync_obj_flush(sync_obj, sync_obj_arg); | ||
537 | driver->sync_obj_unref(&sync_obj); | ||
549 | } | 538 | } |
550 | requeue: | 539 | schedule_delayed_work(&bdev->wq, |
540 | ((HZ / 100) < 1) ? 1 : HZ / 100); | ||
541 | } | ||
542 | |||
543 | /** | ||
544 | * function ttm_bo_cleanup_refs | ||
545 | * If bo idle, remove from delayed- and lru lists, and unref. | ||
546 | * If not idle, do nothing. | ||
547 | * | ||
548 | * @interruptible Any sleeps should occur interruptibly. | ||
549 | * @no_wait_reserve Never wait for reserve. Return -EBUSY instead. | ||
550 | * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. | ||
551 | */ | ||
552 | |||
553 | static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, | ||
554 | bool interruptible, | ||
555 | bool no_wait_reserve, | ||
556 | bool no_wait_gpu) | ||
557 | { | ||
558 | struct ttm_bo_device *bdev = bo->bdev; | ||
559 | struct ttm_bo_global *glob = bo->glob; | ||
560 | int put_count; | ||
561 | int ret = 0; | ||
562 | |||
563 | retry: | ||
564 | spin_lock(&bdev->fence_lock); | ||
565 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); | ||
566 | spin_unlock(&bdev->fence_lock); | ||
567 | |||
568 | if (unlikely(ret != 0)) | ||
569 | return ret; | ||
570 | |||
551 | spin_lock(&glob->lru_lock); | 571 | spin_lock(&glob->lru_lock); |
552 | if (list_empty(&bo->ddestroy)) { | 572 | ret = ttm_bo_reserve_locked(bo, interruptible, |
553 | void *sync_obj = bo->sync_obj; | 573 | no_wait_reserve, false, 0); |
554 | void *sync_obj_arg = bo->sync_obj_arg; | ||
555 | 574 | ||
556 | kref_get(&bo->list_kref); | 575 | if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) { |
557 | list_add_tail(&bo->ddestroy, &bdev->ddestroy); | ||
558 | spin_unlock(&glob->lru_lock); | 576 | spin_unlock(&glob->lru_lock); |
559 | spin_unlock(&bo->lock); | 577 | return ret; |
578 | } | ||
560 | 579 | ||
561 | if (sync_obj) | 580 | /** |
562 | driver->sync_obj_flush(sync_obj, sync_obj_arg); | 581 | * We can re-check for sync object without taking |
563 | schedule_delayed_work(&bdev->wq, | 582 | * the bo::lock since setting the sync object requires |
564 | ((HZ / 100) < 1) ? 1 : HZ / 100); | 583 | * also bo::reserved. A busy object at this point may |
565 | ret = 0; | 584 | * be caused by another thread recently starting an accelerated |
585 | * eviction. | ||
586 | */ | ||
566 | 587 | ||
567 | } else { | 588 | if (unlikely(bo->sync_obj)) { |
589 | atomic_set(&bo->reserved, 0); | ||
590 | wake_up_all(&bo->event_queue); | ||
568 | spin_unlock(&glob->lru_lock); | 591 | spin_unlock(&glob->lru_lock); |
569 | spin_unlock(&bo->lock); | 592 | goto retry; |
570 | ret = -EBUSY; | ||
571 | } | 593 | } |
572 | 594 | ||
573 | return ret; | 595 | put_count = ttm_bo_del_from_lru(bo); |
596 | list_del_init(&bo->ddestroy); | ||
597 | ++put_count; | ||
598 | |||
599 | spin_unlock(&glob->lru_lock); | ||
600 | ttm_bo_cleanup_memtype_use(bo); | ||
601 | |||
602 | ttm_bo_list_ref_sub(bo, put_count, true); | ||
603 | |||
604 | return 0; | ||
574 | } | 605 | } |
575 | 606 | ||
576 | /** | 607 | /** |
@@ -602,7 +633,8 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) | |||
602 | } | 633 | } |
603 | 634 | ||
604 | spin_unlock(&glob->lru_lock); | 635 | spin_unlock(&glob->lru_lock); |
605 | ret = ttm_bo_cleanup_refs(entry, remove_all); | 636 | ret = ttm_bo_cleanup_refs(entry, false, !remove_all, |
637 | !remove_all); | ||
606 | kref_put(&entry->list_kref, ttm_bo_release_list); | 638 | kref_put(&entry->list_kref, ttm_bo_release_list); |
607 | entry = nentry; | 639 | entry = nentry; |
608 | 640 | ||
@@ -638,6 +670,7 @@ static void ttm_bo_release(struct kref *kref) | |||
638 | struct ttm_buffer_object *bo = | 670 | struct ttm_buffer_object *bo = |
639 | container_of(kref, struct ttm_buffer_object, kref); | 671 | container_of(kref, struct ttm_buffer_object, kref); |
640 | struct ttm_bo_device *bdev = bo->bdev; | 672 | struct ttm_bo_device *bdev = bo->bdev; |
673 | struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; | ||
641 | 674 | ||
642 | if (likely(bo->vm_node != NULL)) { | 675 | if (likely(bo->vm_node != NULL)) { |
643 | rb_erase(&bo->vm_rb, &bdev->addr_space_rb); | 676 | rb_erase(&bo->vm_rb, &bdev->addr_space_rb); |
@@ -645,7 +678,10 @@ static void ttm_bo_release(struct kref *kref) | |||
645 | bo->vm_node = NULL; | 678 | bo->vm_node = NULL; |
646 | } | 679 | } |
647 | write_unlock(&bdev->vm_lock); | 680 | write_unlock(&bdev->vm_lock); |
648 | ttm_bo_cleanup_refs(bo, false); | 681 | ttm_mem_io_lock(man, false); |
682 | ttm_mem_io_free_vm(bo); | ||
683 | ttm_mem_io_unlock(man); | ||
684 | ttm_bo_cleanup_refs_or_queue(bo); | ||
649 | kref_put(&bo->list_kref, ttm_bo_release_list); | 685 | kref_put(&bo->list_kref, ttm_bo_release_list); |
650 | write_lock(&bdev->vm_lock); | 686 | write_lock(&bdev->vm_lock); |
651 | } | 687 | } |
@@ -680,14 +716,13 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, | |||
680 | bool no_wait_reserve, bool no_wait_gpu) | 716 | bool no_wait_reserve, bool no_wait_gpu) |
681 | { | 717 | { |
682 | struct ttm_bo_device *bdev = bo->bdev; | 718 | struct ttm_bo_device *bdev = bo->bdev; |
683 | struct ttm_bo_global *glob = bo->glob; | ||
684 | struct ttm_mem_reg evict_mem; | 719 | struct ttm_mem_reg evict_mem; |
685 | struct ttm_placement placement; | 720 | struct ttm_placement placement; |
686 | int ret = 0; | 721 | int ret = 0; |
687 | 722 | ||
688 | spin_lock(&bo->lock); | 723 | spin_lock(&bdev->fence_lock); |
689 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); | 724 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); |
690 | spin_unlock(&bo->lock); | 725 | spin_unlock(&bdev->fence_lock); |
691 | 726 | ||
692 | if (unlikely(ret != 0)) { | 727 | if (unlikely(ret != 0)) { |
693 | if (ret != -ERESTARTSYS) { | 728 | if (ret != -ERESTARTSYS) { |
@@ -702,7 +737,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, | |||
702 | 737 | ||
703 | evict_mem = bo->mem; | 738 | evict_mem = bo->mem; |
704 | evict_mem.mm_node = NULL; | 739 | evict_mem.mm_node = NULL; |
705 | evict_mem.bus.io_reserved = false; | 740 | evict_mem.bus.io_reserved_vm = false; |
741 | evict_mem.bus.io_reserved_count = 0; | ||
706 | 742 | ||
707 | placement.fpfn = 0; | 743 | placement.fpfn = 0; |
708 | placement.lpfn = 0; | 744 | placement.lpfn = 0; |
@@ -726,12 +762,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, | |||
726 | if (ret) { | 762 | if (ret) { |
727 | if (ret != -ERESTARTSYS) | 763 | if (ret != -ERESTARTSYS) |
728 | printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); | 764 | printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); |
729 | spin_lock(&glob->lru_lock); | 765 | ttm_bo_mem_put(bo, &evict_mem); |
730 | if (evict_mem.mm_node) { | ||
731 | drm_mm_put_block(evict_mem.mm_node); | ||
732 | evict_mem.mm_node = NULL; | ||
733 | } | ||
734 | spin_unlock(&glob->lru_lock); | ||
735 | goto out; | 766 | goto out; |
736 | } | 767 | } |
737 | bo->evicted = true; | 768 | bo->evicted = true; |
@@ -759,6 +790,18 @@ retry: | |||
759 | bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); | 790 | bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); |
760 | kref_get(&bo->list_kref); | 791 | kref_get(&bo->list_kref); |
761 | 792 | ||
793 | if (!list_empty(&bo->ddestroy)) { | ||
794 | spin_unlock(&glob->lru_lock); | ||
795 | ret = ttm_bo_cleanup_refs(bo, interruptible, | ||
796 | no_wait_reserve, no_wait_gpu); | ||
797 | kref_put(&bo->list_kref, ttm_bo_release_list); | ||
798 | |||
799 | if (likely(ret == 0 || ret == -ERESTARTSYS)) | ||
800 | return ret; | ||
801 | |||
802 | goto retry; | ||
803 | } | ||
804 | |||
762 | ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0); | 805 | ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0); |
763 | 806 | ||
764 | if (unlikely(ret == -EBUSY)) { | 807 | if (unlikely(ret == -EBUSY)) { |
@@ -782,8 +825,7 @@ retry: | |||
782 | 825 | ||
783 | BUG_ON(ret != 0); | 826 | BUG_ON(ret != 0); |
784 | 827 | ||
785 | while (put_count--) | 828 | ttm_bo_list_ref_sub(bo, put_count, true); |
786 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | ||
787 | 829 | ||
788 | ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu); | 830 | ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu); |
789 | ttm_bo_unreserve(bo); | 831 | ttm_bo_unreserve(bo); |
@@ -792,41 +834,14 @@ retry: | |||
792 | return ret; | 834 | return ret; |
793 | } | 835 | } |
794 | 836 | ||
795 | static int ttm_bo_man_get_node(struct ttm_buffer_object *bo, | 837 | void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) |
796 | struct ttm_mem_type_manager *man, | ||
797 | struct ttm_placement *placement, | ||
798 | struct ttm_mem_reg *mem, | ||
799 | struct drm_mm_node **node) | ||
800 | { | 838 | { |
801 | struct ttm_bo_global *glob = bo->glob; | 839 | struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type]; |
802 | unsigned long lpfn; | ||
803 | int ret; | ||
804 | |||
805 | lpfn = placement->lpfn; | ||
806 | if (!lpfn) | ||
807 | lpfn = man->size; | ||
808 | *node = NULL; | ||
809 | do { | ||
810 | ret = drm_mm_pre_get(&man->manager); | ||
811 | if (unlikely(ret)) | ||
812 | return ret; | ||
813 | 840 | ||
814 | spin_lock(&glob->lru_lock); | 841 | if (mem->mm_node) |
815 | *node = drm_mm_search_free_in_range(&man->manager, | 842 | (*man->func->put_node)(man, mem); |
816 | mem->num_pages, mem->page_alignment, | ||
817 | placement->fpfn, lpfn, 1); | ||
818 | if (unlikely(*node == NULL)) { | ||
819 | spin_unlock(&glob->lru_lock); | ||
820 | return 0; | ||
821 | } | ||
822 | *node = drm_mm_get_block_atomic_range(*node, mem->num_pages, | ||
823 | mem->page_alignment, | ||
824 | placement->fpfn, | ||
825 | lpfn); | ||
826 | spin_unlock(&glob->lru_lock); | ||
827 | } while (*node == NULL); | ||
828 | return 0; | ||
829 | } | 843 | } |
844 | EXPORT_SYMBOL(ttm_bo_mem_put); | ||
830 | 845 | ||
831 | /** | 846 | /** |
832 | * Repeatedly evict memory from the LRU for @mem_type until we create enough | 847 | * Repeatedly evict memory from the LRU for @mem_type until we create enough |
@@ -841,31 +856,22 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, | |||
841 | bool no_wait_gpu) | 856 | bool no_wait_gpu) |
842 | { | 857 | { |
843 | struct ttm_bo_device *bdev = bo->bdev; | 858 | struct ttm_bo_device *bdev = bo->bdev; |
844 | struct ttm_bo_global *glob = bdev->glob; | ||
845 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; | 859 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
846 | struct drm_mm_node *node; | ||
847 | int ret; | 860 | int ret; |
848 | 861 | ||
849 | do { | 862 | do { |
850 | ret = ttm_bo_man_get_node(bo, man, placement, mem, &node); | 863 | ret = (*man->func->get_node)(man, bo, placement, mem); |
851 | if (unlikely(ret != 0)) | 864 | if (unlikely(ret != 0)) |
852 | return ret; | 865 | return ret; |
853 | if (node) | 866 | if (mem->mm_node) |
854 | break; | ||
855 | spin_lock(&glob->lru_lock); | ||
856 | if (list_empty(&man->lru)) { | ||
857 | spin_unlock(&glob->lru_lock); | ||
858 | break; | 867 | break; |
859 | } | ||
860 | spin_unlock(&glob->lru_lock); | ||
861 | ret = ttm_mem_evict_first(bdev, mem_type, interruptible, | 868 | ret = ttm_mem_evict_first(bdev, mem_type, interruptible, |
862 | no_wait_reserve, no_wait_gpu); | 869 | no_wait_reserve, no_wait_gpu); |
863 | if (unlikely(ret != 0)) | 870 | if (unlikely(ret != 0)) |
864 | return ret; | 871 | return ret; |
865 | } while (1); | 872 | } while (1); |
866 | if (node == NULL) | 873 | if (mem->mm_node == NULL) |
867 | return -ENOMEM; | 874 | return -ENOMEM; |
868 | mem->mm_node = node; | ||
869 | mem->mem_type = mem_type; | 875 | mem->mem_type = mem_type; |
870 | return 0; | 876 | return 0; |
871 | } | 877 | } |
@@ -939,7 +945,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
939 | bool type_found = false; | 945 | bool type_found = false; |
940 | bool type_ok = false; | 946 | bool type_ok = false; |
941 | bool has_erestartsys = false; | 947 | bool has_erestartsys = false; |
942 | struct drm_mm_node *node = NULL; | ||
943 | int i, ret; | 948 | int i, ret; |
944 | 949 | ||
945 | mem->mm_node = NULL; | 950 | mem->mm_node = NULL; |
@@ -973,17 +978,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
973 | 978 | ||
974 | if (man->has_type && man->use_type) { | 979 | if (man->has_type && man->use_type) { |
975 | type_found = true; | 980 | type_found = true; |
976 | ret = ttm_bo_man_get_node(bo, man, placement, mem, | 981 | ret = (*man->func->get_node)(man, bo, placement, mem); |
977 | &node); | ||
978 | if (unlikely(ret)) | 982 | if (unlikely(ret)) |
979 | return ret; | 983 | return ret; |
980 | } | 984 | } |
981 | if (node) | 985 | if (mem->mm_node) |
982 | break; | 986 | break; |
983 | } | 987 | } |
984 | 988 | ||
985 | if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) { | 989 | if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { |
986 | mem->mm_node = node; | ||
987 | mem->mem_type = mem_type; | 990 | mem->mem_type = mem_type; |
988 | mem->placement = cur_flags; | 991 | mem->placement = cur_flags; |
989 | return 0; | 992 | return 0; |
@@ -1053,9 +1056,9 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | |||
1053 | bool interruptible, bool no_wait_reserve, | 1056 | bool interruptible, bool no_wait_reserve, |
1054 | bool no_wait_gpu) | 1057 | bool no_wait_gpu) |
1055 | { | 1058 | { |
1056 | struct ttm_bo_global *glob = bo->glob; | ||
1057 | int ret = 0; | 1059 | int ret = 0; |
1058 | struct ttm_mem_reg mem; | 1060 | struct ttm_mem_reg mem; |
1061 | struct ttm_bo_device *bdev = bo->bdev; | ||
1059 | 1062 | ||
1060 | BUG_ON(!atomic_read(&bo->reserved)); | 1063 | BUG_ON(!atomic_read(&bo->reserved)); |
1061 | 1064 | ||
@@ -1064,15 +1067,16 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | |||
1064 | * Have the driver move function wait for idle when necessary, | 1067 | * Have the driver move function wait for idle when necessary, |
1065 | * instead of doing it here. | 1068 | * instead of doing it here. |
1066 | */ | 1069 | */ |
1067 | spin_lock(&bo->lock); | 1070 | spin_lock(&bdev->fence_lock); |
1068 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); | 1071 | ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); |
1069 | spin_unlock(&bo->lock); | 1072 | spin_unlock(&bdev->fence_lock); |
1070 | if (ret) | 1073 | if (ret) |
1071 | return ret; | 1074 | return ret; |
1072 | mem.num_pages = bo->num_pages; | 1075 | mem.num_pages = bo->num_pages; |
1073 | mem.size = mem.num_pages << PAGE_SHIFT; | 1076 | mem.size = mem.num_pages << PAGE_SHIFT; |
1074 | mem.page_alignment = bo->mem.page_alignment; | 1077 | mem.page_alignment = bo->mem.page_alignment; |
1075 | mem.bus.io_reserved = false; | 1078 | mem.bus.io_reserved_vm = false; |
1079 | mem.bus.io_reserved_count = 0; | ||
1076 | /* | 1080 | /* |
1077 | * Determine where to move the buffer. | 1081 | * Determine where to move the buffer. |
1078 | */ | 1082 | */ |
@@ -1081,11 +1085,8 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | |||
1081 | goto out_unlock; | 1085 | goto out_unlock; |
1082 | ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu); | 1086 | ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu); |
1083 | out_unlock: | 1087 | out_unlock: |
1084 | if (ret && mem.mm_node) { | 1088 | if (ret && mem.mm_node) |
1085 | spin_lock(&glob->lru_lock); | 1089 | ttm_bo_mem_put(bo, &mem); |
1086 | drm_mm_put_block(mem.mm_node); | ||
1087 | spin_unlock(&glob->lru_lock); | ||
1088 | } | ||
1089 | return ret; | 1090 | return ret; |
1090 | } | 1091 | } |
1091 | 1092 | ||
@@ -1093,11 +1094,10 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement, | |||
1093 | struct ttm_mem_reg *mem) | 1094 | struct ttm_mem_reg *mem) |
1094 | { | 1095 | { |
1095 | int i; | 1096 | int i; |
1096 | struct drm_mm_node *node = mem->mm_node; | ||
1097 | 1097 | ||
1098 | if (node && placement->lpfn != 0 && | 1098 | if (mem->mm_node && placement->lpfn != 0 && |
1099 | (node->start < placement->fpfn || | 1099 | (mem->start < placement->fpfn || |
1100 | node->start + node->size > placement->lpfn)) | 1100 | mem->start + mem->num_pages > placement->lpfn)) |
1101 | return -1; | 1101 | return -1; |
1102 | 1102 | ||
1103 | for (i = 0; i < placement->num_placement; i++) { | 1103 | for (i = 0; i < placement->num_placement; i++) { |
@@ -1154,35 +1154,9 @@ EXPORT_SYMBOL(ttm_bo_validate); | |||
1154 | int ttm_bo_check_placement(struct ttm_buffer_object *bo, | 1154 | int ttm_bo_check_placement(struct ttm_buffer_object *bo, |
1155 | struct ttm_placement *placement) | 1155 | struct ttm_placement *placement) |
1156 | { | 1156 | { |
1157 | int i; | 1157 | BUG_ON((placement->fpfn || placement->lpfn) && |
1158 | (bo->mem.num_pages > (placement->lpfn - placement->fpfn))); | ||
1158 | 1159 | ||
1159 | if (placement->fpfn || placement->lpfn) { | ||
1160 | if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) { | ||
1161 | printk(KERN_ERR TTM_PFX "Page number range to small " | ||
1162 | "Need %lu pages, range is [%u, %u]\n", | ||
1163 | bo->mem.num_pages, placement->fpfn, | ||
1164 | placement->lpfn); | ||
1165 | return -EINVAL; | ||
1166 | } | ||
1167 | } | ||
1168 | for (i = 0; i < placement->num_placement; i++) { | ||
1169 | if (!capable(CAP_SYS_ADMIN)) { | ||
1170 | if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) { | ||
1171 | printk(KERN_ERR TTM_PFX "Need to be root to " | ||
1172 | "modify NO_EVICT status.\n"); | ||
1173 | return -EINVAL; | ||
1174 | } | ||
1175 | } | ||
1176 | } | ||
1177 | for (i = 0; i < placement->num_busy_placement; i++) { | ||
1178 | if (!capable(CAP_SYS_ADMIN)) { | ||
1179 | if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) { | ||
1180 | printk(KERN_ERR TTM_PFX "Need to be root to " | ||
1181 | "modify NO_EVICT status.\n"); | ||
1182 | return -EINVAL; | ||
1183 | } | ||
1184 | } | ||
1185 | } | ||
1186 | return 0; | 1160 | return 0; |
1187 | } | 1161 | } |
1188 | 1162 | ||
@@ -1194,7 +1168,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
1194 | uint32_t page_alignment, | 1168 | uint32_t page_alignment, |
1195 | unsigned long buffer_start, | 1169 | unsigned long buffer_start, |
1196 | bool interruptible, | 1170 | bool interruptible, |
1197 | struct file *persistant_swap_storage, | 1171 | struct file *persistent_swap_storage, |
1198 | size_t acc_size, | 1172 | size_t acc_size, |
1199 | void (*destroy) (struct ttm_buffer_object *)) | 1173 | void (*destroy) (struct ttm_buffer_object *)) |
1200 | { | 1174 | { |
@@ -1205,11 +1179,14 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
1205 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 1179 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
1206 | if (num_pages == 0) { | 1180 | if (num_pages == 0) { |
1207 | printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n"); | 1181 | printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n"); |
1182 | if (destroy) | ||
1183 | (*destroy)(bo); | ||
1184 | else | ||
1185 | kfree(bo); | ||
1208 | return -EINVAL; | 1186 | return -EINVAL; |
1209 | } | 1187 | } |
1210 | bo->destroy = destroy; | 1188 | bo->destroy = destroy; |
1211 | 1189 | ||
1212 | spin_lock_init(&bo->lock); | ||
1213 | kref_init(&bo->kref); | 1190 | kref_init(&bo->kref); |
1214 | kref_init(&bo->list_kref); | 1191 | kref_init(&bo->list_kref); |
1215 | atomic_set(&bo->cpu_writers, 0); | 1192 | atomic_set(&bo->cpu_writers, 0); |
@@ -1218,6 +1195,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
1218 | INIT_LIST_HEAD(&bo->lru); | 1195 | INIT_LIST_HEAD(&bo->lru); |
1219 | INIT_LIST_HEAD(&bo->ddestroy); | 1196 | INIT_LIST_HEAD(&bo->ddestroy); |
1220 | INIT_LIST_HEAD(&bo->swap); | 1197 | INIT_LIST_HEAD(&bo->swap); |
1198 | INIT_LIST_HEAD(&bo->io_reserve_lru); | ||
1221 | bo->bdev = bdev; | 1199 | bo->bdev = bdev; |
1222 | bo->glob = bdev->glob; | 1200 | bo->glob = bdev->glob; |
1223 | bo->type = type; | 1201 | bo->type = type; |
@@ -1227,12 +1205,13 @@ int ttm_bo_init(struct ttm_bo_device *bdev, | |||
1227 | bo->mem.num_pages = bo->num_pages; | 1205 | bo->mem.num_pages = bo->num_pages; |
1228 | bo->mem.mm_node = NULL; | 1206 | bo->mem.mm_node = NULL; |
1229 | bo->mem.page_alignment = page_alignment; | 1207 | bo->mem.page_alignment = page_alignment; |
1230 | bo->mem.bus.io_reserved = false; | 1208 | bo->mem.bus.io_reserved_vm = false; |
1209 | bo->mem.bus.io_reserved_count = 0; | ||
1231 | bo->buffer_start = buffer_start & PAGE_MASK; | 1210 | bo->buffer_start = buffer_start & PAGE_MASK; |
1232 | bo->priv_flags = 0; | 1211 | bo->priv_flags = 0; |
1233 | bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); | 1212 | bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); |
1234 | bo->seq_valid = false; | 1213 | bo->seq_valid = false; |
1235 | bo->persistant_swap_storage = persistant_swap_storage; | 1214 | bo->persistent_swap_storage = persistent_swap_storage; |
1236 | bo->acc_size = acc_size; | 1215 | bo->acc_size = acc_size; |
1237 | atomic_inc(&bo->glob->bo_count); | 1216 | atomic_inc(&bo->glob->bo_count); |
1238 | 1217 | ||
@@ -1281,7 +1260,7 @@ int ttm_bo_create(struct ttm_bo_device *bdev, | |||
1281 | uint32_t page_alignment, | 1260 | uint32_t page_alignment, |
1282 | unsigned long buffer_start, | 1261 | unsigned long buffer_start, |
1283 | bool interruptible, | 1262 | bool interruptible, |
1284 | struct file *persistant_swap_storage, | 1263 | struct file *persistent_swap_storage, |
1285 | struct ttm_buffer_object **p_bo) | 1264 | struct ttm_buffer_object **p_bo) |
1286 | { | 1265 | { |
1287 | struct ttm_buffer_object *bo; | 1266 | struct ttm_buffer_object *bo; |
@@ -1303,7 +1282,7 @@ int ttm_bo_create(struct ttm_bo_device *bdev, | |||
1303 | 1282 | ||
1304 | ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, | 1283 | ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, |
1305 | buffer_start, interruptible, | 1284 | buffer_start, interruptible, |
1306 | persistant_swap_storage, acc_size, NULL); | 1285 | persistent_swap_storage, acc_size, NULL); |
1307 | if (likely(ret == 0)) | 1286 | if (likely(ret == 0)) |
1308 | *p_bo = bo; | 1287 | *p_bo = bo; |
1309 | 1288 | ||
@@ -1341,7 +1320,6 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, | |||
1341 | 1320 | ||
1342 | int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) | 1321 | int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) |
1343 | { | 1322 | { |
1344 | struct ttm_bo_global *glob = bdev->glob; | ||
1345 | struct ttm_mem_type_manager *man; | 1323 | struct ttm_mem_type_manager *man; |
1346 | int ret = -EINVAL; | 1324 | int ret = -EINVAL; |
1347 | 1325 | ||
@@ -1364,13 +1342,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) | |||
1364 | if (mem_type > 0) { | 1342 | if (mem_type > 0) { |
1365 | ttm_bo_force_list_clean(bdev, mem_type, false); | 1343 | ttm_bo_force_list_clean(bdev, mem_type, false); |
1366 | 1344 | ||
1367 | spin_lock(&glob->lru_lock); | 1345 | ret = (*man->func->takedown)(man); |
1368 | if (drm_mm_clean(&man->manager)) | ||
1369 | drm_mm_takedown(&man->manager); | ||
1370 | else | ||
1371 | ret = -EBUSY; | ||
1372 | |||
1373 | spin_unlock(&glob->lru_lock); | ||
1374 | } | 1346 | } |
1375 | 1347 | ||
1376 | return ret; | 1348 | return ret; |
@@ -1405,32 +1377,22 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, | |||
1405 | int ret = -EINVAL; | 1377 | int ret = -EINVAL; |
1406 | struct ttm_mem_type_manager *man; | 1378 | struct ttm_mem_type_manager *man; |
1407 | 1379 | ||
1408 | if (type >= TTM_NUM_MEM_TYPES) { | 1380 | BUG_ON(type >= TTM_NUM_MEM_TYPES); |
1409 | printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type); | ||
1410 | return ret; | ||
1411 | } | ||
1412 | |||
1413 | man = &bdev->man[type]; | 1381 | man = &bdev->man[type]; |
1414 | if (man->has_type) { | 1382 | BUG_ON(man->has_type); |
1415 | printk(KERN_ERR TTM_PFX | 1383 | man->io_reserve_fastpath = true; |
1416 | "Memory manager already initialized for type %d\n", | 1384 | man->use_io_reserve_lru = false; |
1417 | type); | 1385 | mutex_init(&man->io_reserve_mutex); |
1418 | return ret; | 1386 | INIT_LIST_HEAD(&man->io_reserve_lru); |
1419 | } | ||
1420 | 1387 | ||
1421 | ret = bdev->driver->init_mem_type(bdev, type, man); | 1388 | ret = bdev->driver->init_mem_type(bdev, type, man); |
1422 | if (ret) | 1389 | if (ret) |
1423 | return ret; | 1390 | return ret; |
1391 | man->bdev = bdev; | ||
1424 | 1392 | ||
1425 | ret = 0; | 1393 | ret = 0; |
1426 | if (type != TTM_PL_SYSTEM) { | 1394 | if (type != TTM_PL_SYSTEM) { |
1427 | if (!p_size) { | 1395 | ret = (*man->func->init)(man, p_size); |
1428 | printk(KERN_ERR TTM_PFX | ||
1429 | "Zero size memory manager type %d\n", | ||
1430 | type); | ||
1431 | return ret; | ||
1432 | } | ||
1433 | ret = drm_mm_init(&man->manager, 0, p_size); | ||
1434 | if (ret) | 1396 | if (ret) |
1435 | return ret; | 1397 | return ret; |
1436 | } | 1398 | } |
@@ -1539,8 +1501,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) | |||
1539 | list_del(&bdev->device_list); | 1501 | list_del(&bdev->device_list); |
1540 | mutex_unlock(&glob->device_list_mutex); | 1502 | mutex_unlock(&glob->device_list_mutex); |
1541 | 1503 | ||
1542 | if (!cancel_delayed_work(&bdev->wq)) | 1504 | cancel_delayed_work_sync(&bdev->wq); |
1543 | flush_scheduled_work(); | ||
1544 | 1505 | ||
1545 | while (ttm_bo_delayed_delete(bdev, true)) | 1506 | while (ttm_bo_delayed_delete(bdev, true)) |
1546 | ; | 1507 | ; |
@@ -1594,7 +1555,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, | |||
1594 | bdev->dev_mapping = NULL; | 1555 | bdev->dev_mapping = NULL; |
1595 | bdev->glob = glob; | 1556 | bdev->glob = glob; |
1596 | bdev->need_dma32 = need_dma32; | 1557 | bdev->need_dma32 = need_dma32; |
1597 | 1558 | bdev->val_seq = 0; | |
1559 | spin_lock_init(&bdev->fence_lock); | ||
1598 | mutex_lock(&glob->device_list_mutex); | 1560 | mutex_lock(&glob->device_list_mutex); |
1599 | list_add_tail(&bdev->device_list, &glob->device_list); | 1561 | list_add_tail(&bdev->device_list, &glob->device_list); |
1600 | mutex_unlock(&glob->device_list_mutex); | 1562 | mutex_unlock(&glob->device_list_mutex); |
@@ -1628,7 +1590,7 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |||
1628 | return true; | 1590 | return true; |
1629 | } | 1591 | } |
1630 | 1592 | ||
1631 | void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) | 1593 | void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) |
1632 | { | 1594 | { |
1633 | struct ttm_bo_device *bdev = bo->bdev; | 1595 | struct ttm_bo_device *bdev = bo->bdev; |
1634 | loff_t offset = (loff_t) bo->addr_space_offset; | 1596 | loff_t offset = (loff_t) bo->addr_space_offset; |
@@ -1637,8 +1599,20 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) | |||
1637 | if (!bdev->dev_mapping) | 1599 | if (!bdev->dev_mapping) |
1638 | return; | 1600 | return; |
1639 | unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); | 1601 | unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); |
1640 | ttm_mem_io_free(bdev, &bo->mem); | 1602 | ttm_mem_io_free_vm(bo); |
1603 | } | ||
1604 | |||
1605 | void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) | ||
1606 | { | ||
1607 | struct ttm_bo_device *bdev = bo->bdev; | ||
1608 | struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; | ||
1609 | |||
1610 | ttm_mem_io_lock(man, false); | ||
1611 | ttm_bo_unmap_virtual_locked(bo); | ||
1612 | ttm_mem_io_unlock(man); | ||
1641 | } | 1613 | } |
1614 | |||
1615 | |||
1642 | EXPORT_SYMBOL(ttm_bo_unmap_virtual); | 1616 | EXPORT_SYMBOL(ttm_bo_unmap_virtual); |
1643 | 1617 | ||
1644 | static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) | 1618 | static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) |
@@ -1718,6 +1692,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, | |||
1718 | bool lazy, bool interruptible, bool no_wait) | 1692 | bool lazy, bool interruptible, bool no_wait) |
1719 | { | 1693 | { |
1720 | struct ttm_bo_driver *driver = bo->bdev->driver; | 1694 | struct ttm_bo_driver *driver = bo->bdev->driver; |
1695 | struct ttm_bo_device *bdev = bo->bdev; | ||
1721 | void *sync_obj; | 1696 | void *sync_obj; |
1722 | void *sync_obj_arg; | 1697 | void *sync_obj_arg; |
1723 | int ret = 0; | 1698 | int ret = 0; |
@@ -1731,9 +1706,9 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, | |||
1731 | void *tmp_obj = bo->sync_obj; | 1706 | void *tmp_obj = bo->sync_obj; |
1732 | bo->sync_obj = NULL; | 1707 | bo->sync_obj = NULL; |
1733 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); | 1708 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
1734 | spin_unlock(&bo->lock); | 1709 | spin_unlock(&bdev->fence_lock); |
1735 | driver->sync_obj_unref(&tmp_obj); | 1710 | driver->sync_obj_unref(&tmp_obj); |
1736 | spin_lock(&bo->lock); | 1711 | spin_lock(&bdev->fence_lock); |
1737 | continue; | 1712 | continue; |
1738 | } | 1713 | } |
1739 | 1714 | ||
@@ -1742,29 +1717,29 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, | |||
1742 | 1717 | ||
1743 | sync_obj = driver->sync_obj_ref(bo->sync_obj); | 1718 | sync_obj = driver->sync_obj_ref(bo->sync_obj); |
1744 | sync_obj_arg = bo->sync_obj_arg; | 1719 | sync_obj_arg = bo->sync_obj_arg; |
1745 | spin_unlock(&bo->lock); | 1720 | spin_unlock(&bdev->fence_lock); |
1746 | ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, | 1721 | ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, |
1747 | lazy, interruptible); | 1722 | lazy, interruptible); |
1748 | if (unlikely(ret != 0)) { | 1723 | if (unlikely(ret != 0)) { |
1749 | driver->sync_obj_unref(&sync_obj); | 1724 | driver->sync_obj_unref(&sync_obj); |
1750 | spin_lock(&bo->lock); | 1725 | spin_lock(&bdev->fence_lock); |
1751 | return ret; | 1726 | return ret; |
1752 | } | 1727 | } |
1753 | spin_lock(&bo->lock); | 1728 | spin_lock(&bdev->fence_lock); |
1754 | if (likely(bo->sync_obj == sync_obj && | 1729 | if (likely(bo->sync_obj == sync_obj && |
1755 | bo->sync_obj_arg == sync_obj_arg)) { | 1730 | bo->sync_obj_arg == sync_obj_arg)) { |
1756 | void *tmp_obj = bo->sync_obj; | 1731 | void *tmp_obj = bo->sync_obj; |
1757 | bo->sync_obj = NULL; | 1732 | bo->sync_obj = NULL; |
1758 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, | 1733 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, |
1759 | &bo->priv_flags); | 1734 | &bo->priv_flags); |
1760 | spin_unlock(&bo->lock); | 1735 | spin_unlock(&bdev->fence_lock); |
1761 | driver->sync_obj_unref(&sync_obj); | 1736 | driver->sync_obj_unref(&sync_obj); |
1762 | driver->sync_obj_unref(&tmp_obj); | 1737 | driver->sync_obj_unref(&tmp_obj); |
1763 | spin_lock(&bo->lock); | 1738 | spin_lock(&bdev->fence_lock); |
1764 | } else { | 1739 | } else { |
1765 | spin_unlock(&bo->lock); | 1740 | spin_unlock(&bdev->fence_lock); |
1766 | driver->sync_obj_unref(&sync_obj); | 1741 | driver->sync_obj_unref(&sync_obj); |
1767 | spin_lock(&bo->lock); | 1742 | spin_lock(&bdev->fence_lock); |
1768 | } | 1743 | } |
1769 | } | 1744 | } |
1770 | return 0; | 1745 | return 0; |
@@ -1773,6 +1748,7 @@ EXPORT_SYMBOL(ttm_bo_wait); | |||
1773 | 1748 | ||
1774 | int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) | 1749 | int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) |
1775 | { | 1750 | { |
1751 | struct ttm_bo_device *bdev = bo->bdev; | ||
1776 | int ret = 0; | 1752 | int ret = 0; |
1777 | 1753 | ||
1778 | /* | 1754 | /* |
@@ -1782,9 +1758,9 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) | |||
1782 | ret = ttm_bo_reserve(bo, true, no_wait, false, 0); | 1758 | ret = ttm_bo_reserve(bo, true, no_wait, false, 0); |
1783 | if (unlikely(ret != 0)) | 1759 | if (unlikely(ret != 0)) |
1784 | return ret; | 1760 | return ret; |
1785 | spin_lock(&bo->lock); | 1761 | spin_lock(&bdev->fence_lock); |
1786 | ret = ttm_bo_wait(bo, false, true, no_wait); | 1762 | ret = ttm_bo_wait(bo, false, true, no_wait); |
1787 | spin_unlock(&bo->lock); | 1763 | spin_unlock(&bdev->fence_lock); |
1788 | if (likely(ret == 0)) | 1764 | if (likely(ret == 0)) |
1789 | atomic_inc(&bo->cpu_writers); | 1765 | atomic_inc(&bo->cpu_writers); |
1790 | ttm_bo_unreserve(bo); | 1766 | ttm_bo_unreserve(bo); |
@@ -1824,6 +1800,13 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) | |||
1824 | struct ttm_buffer_object, swap); | 1800 | struct ttm_buffer_object, swap); |
1825 | kref_get(&bo->list_kref); | 1801 | kref_get(&bo->list_kref); |
1826 | 1802 | ||
1803 | if (!list_empty(&bo->ddestroy)) { | ||
1804 | spin_unlock(&glob->lru_lock); | ||
1805 | (void) ttm_bo_cleanup_refs(bo, false, false, false); | ||
1806 | kref_put(&bo->list_kref, ttm_bo_release_list); | ||
1807 | continue; | ||
1808 | } | ||
1809 | |||
1827 | /** | 1810 | /** |
1828 | * Reserve buffer. Since we unlock while sleeping, we need | 1811 | * Reserve buffer. Since we unlock while sleeping, we need |
1829 | * to re-check that nobody removed us from the swap-list while | 1812 | * to re-check that nobody removed us from the swap-list while |
@@ -1843,16 +1826,15 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) | |||
1843 | put_count = ttm_bo_del_from_lru(bo); | 1826 | put_count = ttm_bo_del_from_lru(bo); |
1844 | spin_unlock(&glob->lru_lock); | 1827 | spin_unlock(&glob->lru_lock); |
1845 | 1828 | ||
1846 | while (put_count--) | 1829 | ttm_bo_list_ref_sub(bo, put_count, true); |
1847 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | ||
1848 | 1830 | ||
1849 | /** | 1831 | /** |
1850 | * Wait for GPU, then move to system cached. | 1832 | * Wait for GPU, then move to system cached. |
1851 | */ | 1833 | */ |
1852 | 1834 | ||
1853 | spin_lock(&bo->lock); | 1835 | spin_lock(&bo->bdev->fence_lock); |
1854 | ret = ttm_bo_wait(bo, false, false, false); | 1836 | ret = ttm_bo_wait(bo, false, false, false); |
1855 | spin_unlock(&bo->lock); | 1837 | spin_unlock(&bo->bdev->fence_lock); |
1856 | 1838 | ||
1857 | if (unlikely(ret != 0)) | 1839 | if (unlikely(ret != 0)) |
1858 | goto out; | 1840 | goto out; |
@@ -1881,7 +1863,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) | |||
1881 | if (bo->bdev->driver->swap_notify) | 1863 | if (bo->bdev->driver->swap_notify) |
1882 | bo->bdev->driver->swap_notify(bo); | 1864 | bo->bdev->driver->swap_notify(bo); |
1883 | 1865 | ||
1884 | ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage); | 1866 | ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage); |
1885 | out: | 1867 | out: |
1886 | 1868 | ||
1887 | /** | 1869 | /** |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c new file mode 100644 index 000000000000..038e947d00f9 --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c | |||
@@ -0,0 +1,157 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | /* | ||
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | ||
29 | */ | ||
30 | |||
31 | #include "ttm/ttm_module.h" | ||
32 | #include "ttm/ttm_bo_driver.h" | ||
33 | #include "ttm/ttm_placement.h" | ||
34 | #include "drm_mm.h" | ||
35 | #include <linux/slab.h> | ||
36 | #include <linux/spinlock.h> | ||
37 | #include <linux/module.h> | ||
38 | |||
39 | /** | ||
40 | * Currently we use a spinlock for the lock, but a mutex *may* be | ||
41 | * more appropriate to reduce scheduling latency if the range manager | ||
42 | * ends up with very fragmented allocation patterns. | ||
43 | */ | ||
44 | |||
45 | struct ttm_range_manager { | ||
46 | struct drm_mm mm; | ||
47 | spinlock_t lock; | ||
48 | }; | ||
49 | |||
50 | static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, | ||
51 | struct ttm_buffer_object *bo, | ||
52 | struct ttm_placement *placement, | ||
53 | struct ttm_mem_reg *mem) | ||
54 | { | ||
55 | struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; | ||
56 | struct drm_mm *mm = &rman->mm; | ||
57 | struct drm_mm_node *node = NULL; | ||
58 | unsigned long lpfn; | ||
59 | int ret; | ||
60 | |||
61 | lpfn = placement->lpfn; | ||
62 | if (!lpfn) | ||
63 | lpfn = man->size; | ||
64 | do { | ||
65 | ret = drm_mm_pre_get(mm); | ||
66 | if (unlikely(ret)) | ||
67 | return ret; | ||
68 | |||
69 | spin_lock(&rman->lock); | ||
70 | node = drm_mm_search_free_in_range(mm, | ||
71 | mem->num_pages, mem->page_alignment, | ||
72 | placement->fpfn, lpfn, 1); | ||
73 | if (unlikely(node == NULL)) { | ||
74 | spin_unlock(&rman->lock); | ||
75 | return 0; | ||
76 | } | ||
77 | node = drm_mm_get_block_atomic_range(node, mem->num_pages, | ||
78 | mem->page_alignment, | ||
79 | placement->fpfn, | ||
80 | lpfn); | ||
81 | spin_unlock(&rman->lock); | ||
82 | } while (node == NULL); | ||
83 | |||
84 | mem->mm_node = node; | ||
85 | mem->start = node->start; | ||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, | ||
90 | struct ttm_mem_reg *mem) | ||
91 | { | ||
92 | struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; | ||
93 | |||
94 | if (mem->mm_node) { | ||
95 | spin_lock(&rman->lock); | ||
96 | drm_mm_put_block(mem->mm_node); | ||
97 | spin_unlock(&rman->lock); | ||
98 | mem->mm_node = NULL; | ||
99 | } | ||
100 | } | ||
101 | |||
102 | static int ttm_bo_man_init(struct ttm_mem_type_manager *man, | ||
103 | unsigned long p_size) | ||
104 | { | ||
105 | struct ttm_range_manager *rman; | ||
106 | int ret; | ||
107 | |||
108 | rman = kzalloc(sizeof(*rman), GFP_KERNEL); | ||
109 | if (!rman) | ||
110 | return -ENOMEM; | ||
111 | |||
112 | ret = drm_mm_init(&rman->mm, 0, p_size); | ||
113 | if (ret) { | ||
114 | kfree(rman); | ||
115 | return ret; | ||
116 | } | ||
117 | |||
118 | spin_lock_init(&rman->lock); | ||
119 | man->priv = rman; | ||
120 | return 0; | ||
121 | } | ||
122 | |||
123 | static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) | ||
124 | { | ||
125 | struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; | ||
126 | struct drm_mm *mm = &rman->mm; | ||
127 | |||
128 | spin_lock(&rman->lock); | ||
129 | if (drm_mm_clean(mm)) { | ||
130 | drm_mm_takedown(mm); | ||
131 | spin_unlock(&rman->lock); | ||
132 | kfree(rman); | ||
133 | man->priv = NULL; | ||
134 | return 0; | ||
135 | } | ||
136 | spin_unlock(&rman->lock); | ||
137 | return -EBUSY; | ||
138 | } | ||
139 | |||
140 | static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, | ||
141 | const char *prefix) | ||
142 | { | ||
143 | struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; | ||
144 | |||
145 | spin_lock(&rman->lock); | ||
146 | drm_mm_debug_table(&rman->mm, prefix); | ||
147 | spin_unlock(&rman->lock); | ||
148 | } | ||
149 | |||
150 | const struct ttm_mem_type_manager_func ttm_bo_manager_func = { | ||
151 | ttm_bo_man_init, | ||
152 | ttm_bo_man_takedown, | ||
153 | ttm_bo_man_get_node, | ||
154 | ttm_bo_man_put_node, | ||
155 | ttm_bo_man_debug | ||
156 | }; | ||
157 | EXPORT_SYMBOL(ttm_bo_manager_func); | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 3451a82adba7..77dbf408c0d0 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -39,14 +39,7 @@ | |||
39 | 39 | ||
40 | void ttm_bo_free_old_node(struct ttm_buffer_object *bo) | 40 | void ttm_bo_free_old_node(struct ttm_buffer_object *bo) |
41 | { | 41 | { |
42 | struct ttm_mem_reg *old_mem = &bo->mem; | 42 | ttm_bo_mem_put(bo, &bo->mem); |
43 | |||
44 | if (old_mem->mm_node) { | ||
45 | spin_lock(&bo->glob->lru_lock); | ||
46 | drm_mm_put_block(old_mem->mm_node); | ||
47 | spin_unlock(&bo->glob->lru_lock); | ||
48 | } | ||
49 | old_mem->mm_node = NULL; | ||
50 | } | 43 | } |
51 | 44 | ||
52 | int ttm_bo_move_ttm(struct ttm_buffer_object *bo, | 45 | int ttm_bo_move_ttm(struct ttm_buffer_object *bo, |
@@ -82,37 +75,123 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, | |||
82 | } | 75 | } |
83 | EXPORT_SYMBOL(ttm_bo_move_ttm); | 76 | EXPORT_SYMBOL(ttm_bo_move_ttm); |
84 | 77 | ||
85 | int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | 78 | int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) |
86 | { | 79 | { |
87 | int ret; | 80 | if (likely(man->io_reserve_fastpath)) |
81 | return 0; | ||
82 | |||
83 | if (interruptible) | ||
84 | return mutex_lock_interruptible(&man->io_reserve_mutex); | ||
85 | |||
86 | mutex_lock(&man->io_reserve_mutex); | ||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) | ||
91 | { | ||
92 | if (likely(man->io_reserve_fastpath)) | ||
93 | return; | ||
94 | |||
95 | mutex_unlock(&man->io_reserve_mutex); | ||
96 | } | ||
97 | |||
98 | static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) | ||
99 | { | ||
100 | struct ttm_buffer_object *bo; | ||
101 | |||
102 | if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) | ||
103 | return -EAGAIN; | ||
104 | |||
105 | bo = list_first_entry(&man->io_reserve_lru, | ||
106 | struct ttm_buffer_object, | ||
107 | io_reserve_lru); | ||
108 | list_del_init(&bo->io_reserve_lru); | ||
109 | ttm_bo_unmap_virtual_locked(bo); | ||
110 | |||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | static int ttm_mem_io_reserve(struct ttm_bo_device *bdev, | ||
115 | struct ttm_mem_reg *mem) | ||
116 | { | ||
117 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | ||
118 | int ret = 0; | ||
119 | |||
120 | if (!bdev->driver->io_mem_reserve) | ||
121 | return 0; | ||
122 | if (likely(man->io_reserve_fastpath)) | ||
123 | return bdev->driver->io_mem_reserve(bdev, mem); | ||
88 | 124 | ||
89 | if (!mem->bus.io_reserved) { | 125 | if (bdev->driver->io_mem_reserve && |
90 | mem->bus.io_reserved = true; | 126 | mem->bus.io_reserved_count++ == 0) { |
127 | retry: | ||
91 | ret = bdev->driver->io_mem_reserve(bdev, mem); | 128 | ret = bdev->driver->io_mem_reserve(bdev, mem); |
129 | if (ret == -EAGAIN) { | ||
130 | ret = ttm_mem_io_evict(man); | ||
131 | if (ret == 0) | ||
132 | goto retry; | ||
133 | } | ||
134 | } | ||
135 | return ret; | ||
136 | } | ||
137 | |||
138 | static void ttm_mem_io_free(struct ttm_bo_device *bdev, | ||
139 | struct ttm_mem_reg *mem) | ||
140 | { | ||
141 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | ||
142 | |||
143 | if (likely(man->io_reserve_fastpath)) | ||
144 | return; | ||
145 | |||
146 | if (bdev->driver->io_mem_reserve && | ||
147 | --mem->bus.io_reserved_count == 0 && | ||
148 | bdev->driver->io_mem_free) | ||
149 | bdev->driver->io_mem_free(bdev, mem); | ||
150 | |||
151 | } | ||
152 | |||
153 | int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) | ||
154 | { | ||
155 | struct ttm_mem_reg *mem = &bo->mem; | ||
156 | int ret; | ||
157 | |||
158 | if (!mem->bus.io_reserved_vm) { | ||
159 | struct ttm_mem_type_manager *man = | ||
160 | &bo->bdev->man[mem->mem_type]; | ||
161 | |||
162 | ret = ttm_mem_io_reserve(bo->bdev, mem); | ||
92 | if (unlikely(ret != 0)) | 163 | if (unlikely(ret != 0)) |
93 | return ret; | 164 | return ret; |
165 | mem->bus.io_reserved_vm = true; | ||
166 | if (man->use_io_reserve_lru) | ||
167 | list_add_tail(&bo->io_reserve_lru, | ||
168 | &man->io_reserve_lru); | ||
94 | } | 169 | } |
95 | return 0; | 170 | return 0; |
96 | } | 171 | } |
97 | 172 | ||
98 | void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | 173 | void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) |
99 | { | 174 | { |
100 | if (bdev->driver->io_mem_reserve) { | 175 | struct ttm_mem_reg *mem = &bo->mem; |
101 | if (mem->bus.io_reserved) { | 176 | |
102 | mem->bus.io_reserved = false; | 177 | if (mem->bus.io_reserved_vm) { |
103 | bdev->driver->io_mem_free(bdev, mem); | 178 | mem->bus.io_reserved_vm = false; |
104 | } | 179 | list_del_init(&bo->io_reserve_lru); |
180 | ttm_mem_io_free(bo->bdev, mem); | ||
105 | } | 181 | } |
106 | } | 182 | } |
107 | 183 | ||
108 | int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, | 184 | int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
109 | void **virtual) | 185 | void **virtual) |
110 | { | 186 | { |
187 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | ||
111 | int ret; | 188 | int ret; |
112 | void *addr; | 189 | void *addr; |
113 | 190 | ||
114 | *virtual = NULL; | 191 | *virtual = NULL; |
192 | (void) ttm_mem_io_lock(man, false); | ||
115 | ret = ttm_mem_io_reserve(bdev, mem); | 193 | ret = ttm_mem_io_reserve(bdev, mem); |
194 | ttm_mem_io_unlock(man); | ||
116 | if (ret || !mem->bus.is_iomem) | 195 | if (ret || !mem->bus.is_iomem) |
117 | return ret; | 196 | return ret; |
118 | 197 | ||
@@ -124,7 +203,9 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, | |||
124 | else | 203 | else |
125 | addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); | 204 | addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); |
126 | if (!addr) { | 205 | if (!addr) { |
206 | (void) ttm_mem_io_lock(man, false); | ||
127 | ttm_mem_io_free(bdev, mem); | 207 | ttm_mem_io_free(bdev, mem); |
208 | ttm_mem_io_unlock(man); | ||
128 | return -ENOMEM; | 209 | return -ENOMEM; |
129 | } | 210 | } |
130 | } | 211 | } |
@@ -141,7 +222,9 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, | |||
141 | 222 | ||
142 | if (virtual && mem->bus.addr == NULL) | 223 | if (virtual && mem->bus.addr == NULL) |
143 | iounmap(virtual); | 224 | iounmap(virtual); |
225 | (void) ttm_mem_io_lock(man, false); | ||
144 | ttm_mem_io_free(bdev, mem); | 226 | ttm_mem_io_free(bdev, mem); |
227 | ttm_mem_io_unlock(man); | ||
145 | } | 228 | } |
146 | 229 | ||
147 | static int ttm_copy_io_page(void *dst, void *src, unsigned long page) | 230 | static int ttm_copy_io_page(void *dst, void *src, unsigned long page) |
@@ -170,7 +253,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, | |||
170 | src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); | 253 | src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); |
171 | 254 | ||
172 | #ifdef CONFIG_X86 | 255 | #ifdef CONFIG_X86 |
173 | dst = kmap_atomic_prot(d, KM_USER0, prot); | 256 | dst = kmap_atomic_prot(d, prot); |
174 | #else | 257 | #else |
175 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) | 258 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
176 | dst = vmap(&d, 1, 0, prot); | 259 | dst = vmap(&d, 1, 0, prot); |
@@ -183,7 +266,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, | |||
183 | memcpy_fromio(dst, src, PAGE_SIZE); | 266 | memcpy_fromio(dst, src, PAGE_SIZE); |
184 | 267 | ||
185 | #ifdef CONFIG_X86 | 268 | #ifdef CONFIG_X86 |
186 | kunmap_atomic(dst, KM_USER0); | 269 | kunmap_atomic(dst); |
187 | #else | 270 | #else |
188 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) | 271 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
189 | vunmap(dst); | 272 | vunmap(dst); |
@@ -206,7 +289,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, | |||
206 | 289 | ||
207 | dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); | 290 | dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); |
208 | #ifdef CONFIG_X86 | 291 | #ifdef CONFIG_X86 |
209 | src = kmap_atomic_prot(s, KM_USER0, prot); | 292 | src = kmap_atomic_prot(s, prot); |
210 | #else | 293 | #else |
211 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) | 294 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
212 | src = vmap(&s, 1, 0, prot); | 295 | src = vmap(&s, 1, 0, prot); |
@@ -219,7 +302,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, | |||
219 | memcpy_toio(dst, src, PAGE_SIZE); | 302 | memcpy_toio(dst, src, PAGE_SIZE); |
220 | 303 | ||
221 | #ifdef CONFIG_X86 | 304 | #ifdef CONFIG_X86 |
222 | kunmap_atomic(src, KM_USER0); | 305 | kunmap_atomic(src); |
223 | #else | 306 | #else |
224 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) | 307 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) |
225 | vunmap(src); | 308 | vunmap(src); |
@@ -238,7 +321,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |||
238 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; | 321 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
239 | struct ttm_tt *ttm = bo->ttm; | 322 | struct ttm_tt *ttm = bo->ttm; |
240 | struct ttm_mem_reg *old_mem = &bo->mem; | 323 | struct ttm_mem_reg *old_mem = &bo->mem; |
241 | struct ttm_mem_reg old_copy = *old_mem; | 324 | struct ttm_mem_reg old_copy; |
242 | void *old_iomap; | 325 | void *old_iomap; |
243 | void *new_iomap; | 326 | void *new_iomap; |
244 | int ret; | 327 | int ret; |
@@ -263,8 +346,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |||
263 | dir = 1; | 346 | dir = 1; |
264 | 347 | ||
265 | if ((old_mem->mem_type == new_mem->mem_type) && | 348 | if ((old_mem->mem_type == new_mem->mem_type) && |
266 | (new_mem->mm_node->start < | 349 | (new_mem->start < old_mem->start + old_mem->size)) { |
267 | old_mem->mm_node->start + old_mem->mm_node->size)) { | ||
268 | dir = -1; | 350 | dir = -1; |
269 | add = new_mem->num_pages - 1; | 351 | add = new_mem->num_pages - 1; |
270 | } | 352 | } |
@@ -288,8 +370,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |||
288 | } | 370 | } |
289 | mb(); | 371 | mb(); |
290 | out2: | 372 | out2: |
291 | ttm_bo_free_old_node(bo); | 373 | old_copy = *old_mem; |
292 | |||
293 | *old_mem = *new_mem; | 374 | *old_mem = *new_mem; |
294 | new_mem->mm_node = NULL; | 375 | new_mem->mm_node = NULL; |
295 | 376 | ||
@@ -300,9 +381,10 @@ out2: | |||
300 | } | 381 | } |
301 | 382 | ||
302 | out1: | 383 | out1: |
303 | ttm_mem_reg_iounmap(bdev, new_mem, new_iomap); | 384 | ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); |
304 | out: | 385 | out: |
305 | ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); | 386 | ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); |
387 | ttm_bo_mem_put(bo, &old_copy); | ||
306 | return ret; | 388 | return ret; |
307 | } | 389 | } |
308 | EXPORT_SYMBOL(ttm_bo_move_memcpy); | 390 | EXPORT_SYMBOL(ttm_bo_move_memcpy); |
@@ -345,11 +427,11 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, | |||
345 | * TODO: Explicit member copy would probably be better here. | 427 | * TODO: Explicit member copy would probably be better here. |
346 | */ | 428 | */ |
347 | 429 | ||
348 | spin_lock_init(&fbo->lock); | ||
349 | init_waitqueue_head(&fbo->event_queue); | 430 | init_waitqueue_head(&fbo->event_queue); |
350 | INIT_LIST_HEAD(&fbo->ddestroy); | 431 | INIT_LIST_HEAD(&fbo->ddestroy); |
351 | INIT_LIST_HEAD(&fbo->lru); | 432 | INIT_LIST_HEAD(&fbo->lru); |
352 | INIT_LIST_HEAD(&fbo->swap); | 433 | INIT_LIST_HEAD(&fbo->swap); |
434 | INIT_LIST_HEAD(&fbo->io_reserve_lru); | ||
353 | fbo->vm_node = NULL; | 435 | fbo->vm_node = NULL; |
354 | atomic_set(&fbo->cpu_writers, 0); | 436 | atomic_set(&fbo->cpu_writers, 0); |
355 | 437 | ||
@@ -461,6 +543,8 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, | |||
461 | unsigned long start_page, unsigned long num_pages, | 543 | unsigned long start_page, unsigned long num_pages, |
462 | struct ttm_bo_kmap_obj *map) | 544 | struct ttm_bo_kmap_obj *map) |
463 | { | 545 | { |
546 | struct ttm_mem_type_manager *man = | ||
547 | &bo->bdev->man[bo->mem.mem_type]; | ||
464 | unsigned long offset, size; | 548 | unsigned long offset, size; |
465 | int ret; | 549 | int ret; |
466 | 550 | ||
@@ -475,7 +559,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, | |||
475 | if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) | 559 | if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) |
476 | return -EPERM; | 560 | return -EPERM; |
477 | #endif | 561 | #endif |
562 | (void) ttm_mem_io_lock(man, false); | ||
478 | ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); | 563 | ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); |
564 | ttm_mem_io_unlock(man); | ||
479 | if (ret) | 565 | if (ret) |
480 | return ret; | 566 | return ret; |
481 | if (!bo->mem.bus.is_iomem) { | 567 | if (!bo->mem.bus.is_iomem) { |
@@ -490,12 +576,15 @@ EXPORT_SYMBOL(ttm_bo_kmap); | |||
490 | 576 | ||
491 | void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) | 577 | void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) |
492 | { | 578 | { |
579 | struct ttm_buffer_object *bo = map->bo; | ||
580 | struct ttm_mem_type_manager *man = | ||
581 | &bo->bdev->man[bo->mem.mem_type]; | ||
582 | |||
493 | if (!map->virtual) | 583 | if (!map->virtual) |
494 | return; | 584 | return; |
495 | switch (map->bo_kmap_type) { | 585 | switch (map->bo_kmap_type) { |
496 | case ttm_bo_map_iomap: | 586 | case ttm_bo_map_iomap: |
497 | iounmap(map->virtual); | 587 | iounmap(map->virtual); |
498 | ttm_mem_io_free(map->bo->bdev, &map->bo->mem); | ||
499 | break; | 588 | break; |
500 | case ttm_bo_map_vmap: | 589 | case ttm_bo_map_vmap: |
501 | vunmap(map->virtual); | 590 | vunmap(map->virtual); |
@@ -508,6 +597,9 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) | |||
508 | default: | 597 | default: |
509 | BUG(); | 598 | BUG(); |
510 | } | 599 | } |
600 | (void) ttm_mem_io_lock(man, false); | ||
601 | ttm_mem_io_free(map->bo->bdev, &map->bo->mem); | ||
602 | ttm_mem_io_unlock(man); | ||
511 | map->virtual = NULL; | 603 | map->virtual = NULL; |
512 | map->page = NULL; | 604 | map->page = NULL; |
513 | } | 605 | } |
@@ -528,7 +620,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
528 | struct ttm_buffer_object *ghost_obj; | 620 | struct ttm_buffer_object *ghost_obj; |
529 | void *tmp_obj = NULL; | 621 | void *tmp_obj = NULL; |
530 | 622 | ||
531 | spin_lock(&bo->lock); | 623 | spin_lock(&bdev->fence_lock); |
532 | if (bo->sync_obj) { | 624 | if (bo->sync_obj) { |
533 | tmp_obj = bo->sync_obj; | 625 | tmp_obj = bo->sync_obj; |
534 | bo->sync_obj = NULL; | 626 | bo->sync_obj = NULL; |
@@ -537,7 +629,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
537 | bo->sync_obj_arg = sync_obj_arg; | 629 | bo->sync_obj_arg = sync_obj_arg; |
538 | if (evict) { | 630 | if (evict) { |
539 | ret = ttm_bo_wait(bo, false, false, false); | 631 | ret = ttm_bo_wait(bo, false, false, false); |
540 | spin_unlock(&bo->lock); | 632 | spin_unlock(&bdev->fence_lock); |
541 | if (tmp_obj) | 633 | if (tmp_obj) |
542 | driver->sync_obj_unref(&tmp_obj); | 634 | driver->sync_obj_unref(&tmp_obj); |
543 | if (ret) | 635 | if (ret) |
@@ -560,7 +652,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
560 | */ | 652 | */ |
561 | 653 | ||
562 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); | 654 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
563 | spin_unlock(&bo->lock); | 655 | spin_unlock(&bdev->fence_lock); |
564 | if (tmp_obj) | 656 | if (tmp_obj) |
565 | driver->sync_obj_unref(&tmp_obj); | 657 | driver->sync_obj_unref(&tmp_obj); |
566 | 658 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index fe6cb77899f4..221b924acebe 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
@@ -83,6 +83,8 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
83 | int i; | 83 | int i; |
84 | unsigned long address = (unsigned long)vmf->virtual_address; | 84 | unsigned long address = (unsigned long)vmf->virtual_address; |
85 | int retval = VM_FAULT_NOPAGE; | 85 | int retval = VM_FAULT_NOPAGE; |
86 | struct ttm_mem_type_manager *man = | ||
87 | &bdev->man[bo->mem.mem_type]; | ||
86 | 88 | ||
87 | /* | 89 | /* |
88 | * Work around locking order reversal in fault / nopfn | 90 | * Work around locking order reversal in fault / nopfn |
@@ -118,24 +120,28 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
118 | * move. | 120 | * move. |
119 | */ | 121 | */ |
120 | 122 | ||
121 | spin_lock(&bo->lock); | 123 | spin_lock(&bdev->fence_lock); |
122 | if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { | 124 | if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) { |
123 | ret = ttm_bo_wait(bo, false, true, false); | 125 | ret = ttm_bo_wait(bo, false, true, false); |
124 | spin_unlock(&bo->lock); | 126 | spin_unlock(&bdev->fence_lock); |
125 | if (unlikely(ret != 0)) { | 127 | if (unlikely(ret != 0)) { |
126 | retval = (ret != -ERESTARTSYS) ? | 128 | retval = (ret != -ERESTARTSYS) ? |
127 | VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; | 129 | VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; |
128 | goto out_unlock; | 130 | goto out_unlock; |
129 | } | 131 | } |
130 | } else | 132 | } else |
131 | spin_unlock(&bo->lock); | 133 | spin_unlock(&bdev->fence_lock); |
132 | 134 | ||
133 | 135 | ret = ttm_mem_io_lock(man, true); | |
134 | ret = ttm_mem_io_reserve(bdev, &bo->mem); | 136 | if (unlikely(ret != 0)) { |
135 | if (ret) { | 137 | retval = VM_FAULT_NOPAGE; |
136 | retval = VM_FAULT_SIGBUS; | ||
137 | goto out_unlock; | 138 | goto out_unlock; |
138 | } | 139 | } |
140 | ret = ttm_mem_io_reserve_vm(bo); | ||
141 | if (unlikely(ret != 0)) { | ||
142 | retval = VM_FAULT_SIGBUS; | ||
143 | goto out_io_unlock; | ||
144 | } | ||
139 | 145 | ||
140 | page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + | 146 | page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + |
141 | bo->vm_node->start - vma->vm_pgoff; | 147 | bo->vm_node->start - vma->vm_pgoff; |
@@ -144,7 +150,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
144 | 150 | ||
145 | if (unlikely(page_offset >= bo->num_pages)) { | 151 | if (unlikely(page_offset >= bo->num_pages)) { |
146 | retval = VM_FAULT_SIGBUS; | 152 | retval = VM_FAULT_SIGBUS; |
147 | goto out_unlock; | 153 | goto out_io_unlock; |
148 | } | 154 | } |
149 | 155 | ||
150 | /* | 156 | /* |
@@ -182,7 +188,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
182 | page = ttm_tt_get_page(ttm, page_offset); | 188 | page = ttm_tt_get_page(ttm, page_offset); |
183 | if (unlikely(!page && i == 0)) { | 189 | if (unlikely(!page && i == 0)) { |
184 | retval = VM_FAULT_OOM; | 190 | retval = VM_FAULT_OOM; |
185 | goto out_unlock; | 191 | goto out_io_unlock; |
186 | } else if (unlikely(!page)) { | 192 | } else if (unlikely(!page)) { |
187 | break; | 193 | break; |
188 | } | 194 | } |
@@ -200,14 +206,15 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
200 | else if (unlikely(ret != 0)) { | 206 | else if (unlikely(ret != 0)) { |
201 | retval = | 207 | retval = |
202 | (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; | 208 | (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; |
203 | goto out_unlock; | 209 | goto out_io_unlock; |
204 | } | 210 | } |
205 | 211 | ||
206 | address += PAGE_SIZE; | 212 | address += PAGE_SIZE; |
207 | if (unlikely(++page_offset >= page_last)) | 213 | if (unlikely(++page_offset >= page_last)) |
208 | break; | 214 | break; |
209 | } | 215 | } |
210 | 216 | out_io_unlock: | |
217 | ttm_mem_io_unlock(man); | ||
211 | out_unlock: | 218 | out_unlock: |
212 | ttm_bo_unreserve(bo); | 219 | ttm_bo_unreserve(bo); |
213 | return retval; | 220 | return retval; |
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index c285c2902d15..3832fe10b4df 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c | |||
@@ -32,7 +32,7 @@ | |||
32 | #include <linux/sched.h> | 32 | #include <linux/sched.h> |
33 | #include <linux/module.h> | 33 | #include <linux/module.h> |
34 | 34 | ||
35 | void ttm_eu_backoff_reservation(struct list_head *list) | 35 | static void ttm_eu_backoff_reservation_locked(struct list_head *list) |
36 | { | 36 | { |
37 | struct ttm_validate_buffer *entry; | 37 | struct ttm_validate_buffer *entry; |
38 | 38 | ||
@@ -41,10 +41,77 @@ void ttm_eu_backoff_reservation(struct list_head *list) | |||
41 | if (!entry->reserved) | 41 | if (!entry->reserved) |
42 | continue; | 42 | continue; |
43 | 43 | ||
44 | if (entry->removed) { | ||
45 | ttm_bo_add_to_lru(bo); | ||
46 | entry->removed = false; | ||
47 | |||
48 | } | ||
44 | entry->reserved = false; | 49 | entry->reserved = false; |
45 | ttm_bo_unreserve(bo); | 50 | atomic_set(&bo->reserved, 0); |
51 | wake_up_all(&bo->event_queue); | ||
52 | } | ||
53 | } | ||
54 | |||
55 | static void ttm_eu_del_from_lru_locked(struct list_head *list) | ||
56 | { | ||
57 | struct ttm_validate_buffer *entry; | ||
58 | |||
59 | list_for_each_entry(entry, list, head) { | ||
60 | struct ttm_buffer_object *bo = entry->bo; | ||
61 | if (!entry->reserved) | ||
62 | continue; | ||
63 | |||
64 | if (!entry->removed) { | ||
65 | entry->put_count = ttm_bo_del_from_lru(bo); | ||
66 | entry->removed = true; | ||
67 | } | ||
46 | } | 68 | } |
47 | } | 69 | } |
70 | |||
71 | static void ttm_eu_list_ref_sub(struct list_head *list) | ||
72 | { | ||
73 | struct ttm_validate_buffer *entry; | ||
74 | |||
75 | list_for_each_entry(entry, list, head) { | ||
76 | struct ttm_buffer_object *bo = entry->bo; | ||
77 | |||
78 | if (entry->put_count) { | ||
79 | ttm_bo_list_ref_sub(bo, entry->put_count, true); | ||
80 | entry->put_count = 0; | ||
81 | } | ||
82 | } | ||
83 | } | ||
84 | |||
85 | static int ttm_eu_wait_unreserved_locked(struct list_head *list, | ||
86 | struct ttm_buffer_object *bo) | ||
87 | { | ||
88 | struct ttm_bo_global *glob = bo->glob; | ||
89 | int ret; | ||
90 | |||
91 | ttm_eu_del_from_lru_locked(list); | ||
92 | spin_unlock(&glob->lru_lock); | ||
93 | ret = ttm_bo_wait_unreserved(bo, true); | ||
94 | spin_lock(&glob->lru_lock); | ||
95 | if (unlikely(ret != 0)) | ||
96 | ttm_eu_backoff_reservation_locked(list); | ||
97 | return ret; | ||
98 | } | ||
99 | |||
100 | |||
101 | void ttm_eu_backoff_reservation(struct list_head *list) | ||
102 | { | ||
103 | struct ttm_validate_buffer *entry; | ||
104 | struct ttm_bo_global *glob; | ||
105 | |||
106 | if (list_empty(list)) | ||
107 | return; | ||
108 | |||
109 | entry = list_first_entry(list, struct ttm_validate_buffer, head); | ||
110 | glob = entry->bo->glob; | ||
111 | spin_lock(&glob->lru_lock); | ||
112 | ttm_eu_backoff_reservation_locked(list); | ||
113 | spin_unlock(&glob->lru_lock); | ||
114 | } | ||
48 | EXPORT_SYMBOL(ttm_eu_backoff_reservation); | 115 | EXPORT_SYMBOL(ttm_eu_backoff_reservation); |
49 | 116 | ||
50 | /* | 117 | /* |
@@ -59,37 +126,76 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation); | |||
59 | * buffers in different orders. | 126 | * buffers in different orders. |
60 | */ | 127 | */ |
61 | 128 | ||
62 | int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq) | 129 | int ttm_eu_reserve_buffers(struct list_head *list) |
63 | { | 130 | { |
131 | struct ttm_bo_global *glob; | ||
64 | struct ttm_validate_buffer *entry; | 132 | struct ttm_validate_buffer *entry; |
65 | int ret; | 133 | int ret; |
134 | uint32_t val_seq; | ||
135 | |||
136 | if (list_empty(list)) | ||
137 | return 0; | ||
138 | |||
139 | list_for_each_entry(entry, list, head) { | ||
140 | entry->reserved = false; | ||
141 | entry->put_count = 0; | ||
142 | entry->removed = false; | ||
143 | } | ||
144 | |||
145 | entry = list_first_entry(list, struct ttm_validate_buffer, head); | ||
146 | glob = entry->bo->glob; | ||
66 | 147 | ||
67 | retry: | 148 | retry: |
149 | spin_lock(&glob->lru_lock); | ||
150 | val_seq = entry->bo->bdev->val_seq++; | ||
151 | |||
68 | list_for_each_entry(entry, list, head) { | 152 | list_for_each_entry(entry, list, head) { |
69 | struct ttm_buffer_object *bo = entry->bo; | 153 | struct ttm_buffer_object *bo = entry->bo; |
70 | 154 | ||
71 | entry->reserved = false; | 155 | retry_this_bo: |
72 | ret = ttm_bo_reserve(bo, true, false, true, val_seq); | 156 | ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq); |
73 | if (ret != 0) { | 157 | switch (ret) { |
74 | ttm_eu_backoff_reservation(list); | 158 | case 0: |
75 | if (ret == -EAGAIN) { | 159 | break; |
76 | ret = ttm_bo_wait_unreserved(bo, true); | 160 | case -EBUSY: |
77 | if (unlikely(ret != 0)) | 161 | ret = ttm_eu_wait_unreserved_locked(list, bo); |
78 | return ret; | 162 | if (unlikely(ret != 0)) { |
79 | goto retry; | 163 | spin_unlock(&glob->lru_lock); |
80 | } else | 164 | ttm_eu_list_ref_sub(list); |
81 | return ret; | 165 | return ret; |
166 | } | ||
167 | goto retry_this_bo; | ||
168 | case -EAGAIN: | ||
169 | ttm_eu_backoff_reservation_locked(list); | ||
170 | spin_unlock(&glob->lru_lock); | ||
171 | ttm_eu_list_ref_sub(list); | ||
172 | ret = ttm_bo_wait_unreserved(bo, true); | ||
173 | if (unlikely(ret != 0)) | ||
174 | return ret; | ||
175 | goto retry; | ||
176 | default: | ||
177 | ttm_eu_backoff_reservation_locked(list); | ||
178 | spin_unlock(&glob->lru_lock); | ||
179 | ttm_eu_list_ref_sub(list); | ||
180 | return ret; | ||
82 | } | 181 | } |
83 | 182 | ||
84 | entry->reserved = true; | 183 | entry->reserved = true; |
85 | if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { | 184 | if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { |
86 | ttm_eu_backoff_reservation(list); | 185 | ttm_eu_backoff_reservation_locked(list); |
186 | spin_unlock(&glob->lru_lock); | ||
187 | ttm_eu_list_ref_sub(list); | ||
87 | ret = ttm_bo_wait_cpu(bo, false); | 188 | ret = ttm_bo_wait_cpu(bo, false); |
88 | if (ret) | 189 | if (ret) |
89 | return ret; | 190 | return ret; |
90 | goto retry; | 191 | goto retry; |
91 | } | 192 | } |
92 | } | 193 | } |
194 | |||
195 | ttm_eu_del_from_lru_locked(list); | ||
196 | spin_unlock(&glob->lru_lock); | ||
197 | ttm_eu_list_ref_sub(list); | ||
198 | |||
93 | return 0; | 199 | return 0; |
94 | } | 200 | } |
95 | EXPORT_SYMBOL(ttm_eu_reserve_buffers); | 201 | EXPORT_SYMBOL(ttm_eu_reserve_buffers); |
@@ -97,21 +203,36 @@ EXPORT_SYMBOL(ttm_eu_reserve_buffers); | |||
97 | void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) | 203 | void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) |
98 | { | 204 | { |
99 | struct ttm_validate_buffer *entry; | 205 | struct ttm_validate_buffer *entry; |
206 | struct ttm_buffer_object *bo; | ||
207 | struct ttm_bo_global *glob; | ||
208 | struct ttm_bo_device *bdev; | ||
209 | struct ttm_bo_driver *driver; | ||
100 | 210 | ||
101 | list_for_each_entry(entry, list, head) { | 211 | if (list_empty(list)) |
102 | struct ttm_buffer_object *bo = entry->bo; | 212 | return; |
103 | struct ttm_bo_driver *driver = bo->bdev->driver; | 213 | |
104 | void *old_sync_obj; | 214 | bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; |
215 | bdev = bo->bdev; | ||
216 | driver = bdev->driver; | ||
217 | glob = bo->glob; | ||
105 | 218 | ||
106 | spin_lock(&bo->lock); | 219 | spin_lock(&bdev->fence_lock); |
107 | old_sync_obj = bo->sync_obj; | 220 | spin_lock(&glob->lru_lock); |
221 | |||
222 | list_for_each_entry(entry, list, head) { | ||
223 | bo = entry->bo; | ||
224 | entry->old_sync_obj = bo->sync_obj; | ||
108 | bo->sync_obj = driver->sync_obj_ref(sync_obj); | 225 | bo->sync_obj = driver->sync_obj_ref(sync_obj); |
109 | bo->sync_obj_arg = entry->new_sync_obj_arg; | 226 | bo->sync_obj_arg = entry->new_sync_obj_arg; |
110 | spin_unlock(&bo->lock); | 227 | ttm_bo_unreserve_locked(bo); |
111 | ttm_bo_unreserve(bo); | ||
112 | entry->reserved = false; | 228 | entry->reserved = false; |
113 | if (old_sync_obj) | 229 | } |
114 | driver->sync_obj_unref(&old_sync_obj); | 230 | spin_unlock(&glob->lru_lock); |
231 | spin_unlock(&bdev->fence_lock); | ||
232 | |||
233 | list_for_each_entry(entry, list, head) { | ||
234 | if (entry->old_sync_obj) | ||
235 | driver->sync_obj_unref(&entry->old_sync_obj); | ||
115 | } | 236 | } |
116 | } | 237 | } |
117 | EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); | 238 | EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); |
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c index 75e9d6f86ba4..ebddd443d91a 100644 --- a/drivers/gpu/drm/ttm/ttm_object.c +++ b/drivers/gpu/drm/ttm/ttm_object.c | |||
@@ -206,7 +206,7 @@ void ttm_base_object_unref(struct ttm_base_object **p_base) | |||
206 | */ | 206 | */ |
207 | 207 | ||
208 | write_lock(&tdev->object_lock); | 208 | write_lock(&tdev->object_lock); |
209 | (void)kref_put(&base->refcount, &ttm_release_base); | 209 | kref_put(&base->refcount, ttm_release_base); |
210 | write_unlock(&tdev->object_lock); | 210 | write_unlock(&tdev->object_lock); |
211 | } | 211 | } |
212 | EXPORT_SYMBOL(ttm_base_object_unref); | 212 | EXPORT_SYMBOL(ttm_base_object_unref); |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index b1e02fffd3cc..d948575717bf 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/mm.h> | 38 | #include <linux/mm.h> |
39 | #include <linux/seq_file.h> /* for seq_printf */ | 39 | #include <linux/seq_file.h> /* for seq_printf */ |
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <linux/dma-mapping.h> | ||
41 | 42 | ||
42 | #include <asm/atomic.h> | 43 | #include <asm/atomic.h> |
43 | 44 | ||
@@ -394,12 +395,14 @@ static int ttm_pool_get_num_unused_pages(void) | |||
394 | /** | 395 | /** |
395 | * Callback for mm to request pool to reduce number of page held. | 396 | * Callback for mm to request pool to reduce number of page held. |
396 | */ | 397 | */ |
397 | static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask) | 398 | static int ttm_pool_mm_shrink(struct shrinker *shrink, |
399 | struct shrink_control *sc) | ||
398 | { | 400 | { |
399 | static atomic_t start_pool = ATOMIC_INIT(0); | 401 | static atomic_t start_pool = ATOMIC_INIT(0); |
400 | unsigned i; | 402 | unsigned i; |
401 | unsigned pool_offset = atomic_add_return(1, &start_pool); | 403 | unsigned pool_offset = atomic_add_return(1, &start_pool); |
402 | struct ttm_page_pool *pool; | 404 | struct ttm_page_pool *pool; |
405 | int shrink_pages = sc->nr_to_scan; | ||
403 | 406 | ||
404 | pool_offset = pool_offset % NUM_POOLS; | 407 | pool_offset = pool_offset % NUM_POOLS; |
405 | /* select start pool in round robin fashion */ | 408 | /* select start pool in round robin fashion */ |
@@ -662,7 +665,8 @@ out: | |||
662 | * cached pages. | 665 | * cached pages. |
663 | */ | 666 | */ |
664 | int ttm_get_pages(struct list_head *pages, int flags, | 667 | int ttm_get_pages(struct list_head *pages, int flags, |
665 | enum ttm_caching_state cstate, unsigned count) | 668 | enum ttm_caching_state cstate, unsigned count, |
669 | dma_addr_t *dma_address) | ||
666 | { | 670 | { |
667 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); | 671 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); |
668 | struct page *p = NULL; | 672 | struct page *p = NULL; |
@@ -720,7 +724,7 @@ int ttm_get_pages(struct list_head *pages, int flags, | |||
720 | printk(KERN_ERR TTM_PFX | 724 | printk(KERN_ERR TTM_PFX |
721 | "Failed to allocate extra pages " | 725 | "Failed to allocate extra pages " |
722 | "for large request."); | 726 | "for large request."); |
723 | ttm_put_pages(pages, 0, flags, cstate); | 727 | ttm_put_pages(pages, 0, flags, cstate, NULL); |
724 | return r; | 728 | return r; |
725 | } | 729 | } |
726 | } | 730 | } |
@@ -731,7 +735,7 @@ int ttm_get_pages(struct list_head *pages, int flags, | |||
731 | 735 | ||
732 | /* Put all pages in pages list to correct pool to wait for reuse */ | 736 | /* Put all pages in pages list to correct pool to wait for reuse */ |
733 | void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, | 737 | void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags, |
734 | enum ttm_caching_state cstate) | 738 | enum ttm_caching_state cstate, dma_addr_t *dma_address) |
735 | { | 739 | { |
736 | unsigned long irq_flags; | 740 | unsigned long irq_flags; |
737 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); | 741 | struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); |
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index a7bab87a548b..58c271ebc0f7 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/sched.h> | 31 | #include <linux/sched.h> |
32 | #include <linux/highmem.h> | 32 | #include <linux/highmem.h> |
33 | #include <linux/pagemap.h> | 33 | #include <linux/pagemap.h> |
34 | #include <linux/shmem_fs.h> | ||
34 | #include <linux/file.h> | 35 | #include <linux/file.h> |
35 | #include <linux/swap.h> | 36 | #include <linux/swap.h> |
36 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
@@ -49,12 +50,16 @@ static int ttm_tt_swapin(struct ttm_tt *ttm); | |||
49 | static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) | 50 | static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) |
50 | { | 51 | { |
51 | ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages)); | 52 | ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages)); |
53 | ttm->dma_address = drm_calloc_large(ttm->num_pages, | ||
54 | sizeof(*ttm->dma_address)); | ||
52 | } | 55 | } |
53 | 56 | ||
54 | static void ttm_tt_free_page_directory(struct ttm_tt *ttm) | 57 | static void ttm_tt_free_page_directory(struct ttm_tt *ttm) |
55 | { | 58 | { |
56 | drm_free_large(ttm->pages); | 59 | drm_free_large(ttm->pages); |
57 | ttm->pages = NULL; | 60 | ttm->pages = NULL; |
61 | drm_free_large(ttm->dma_address); | ||
62 | ttm->dma_address = NULL; | ||
58 | } | 63 | } |
59 | 64 | ||
60 | static void ttm_tt_free_user_pages(struct ttm_tt *ttm) | 65 | static void ttm_tt_free_user_pages(struct ttm_tt *ttm) |
@@ -105,7 +110,8 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index) | |||
105 | 110 | ||
106 | INIT_LIST_HEAD(&h); | 111 | INIT_LIST_HEAD(&h); |
107 | 112 | ||
108 | ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1); | 113 | ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1, |
114 | &ttm->dma_address[index]); | ||
109 | 115 | ||
110 | if (ret != 0) | 116 | if (ret != 0) |
111 | return NULL; | 117 | return NULL; |
@@ -164,7 +170,7 @@ int ttm_tt_populate(struct ttm_tt *ttm) | |||
164 | } | 170 | } |
165 | 171 | ||
166 | be->func->populate(be, ttm->num_pages, ttm->pages, | 172 | be->func->populate(be, ttm->num_pages, ttm->pages, |
167 | ttm->dummy_read_page); | 173 | ttm->dummy_read_page, ttm->dma_address); |
168 | ttm->state = tt_unbound; | 174 | ttm->state = tt_unbound; |
169 | return 0; | 175 | return 0; |
170 | } | 176 | } |
@@ -298,7 +304,8 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm) | |||
298 | count++; | 304 | count++; |
299 | } | 305 | } |
300 | } | 306 | } |
301 | ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state); | 307 | ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state, |
308 | ttm->dma_address); | ||
302 | ttm->state = tt_unpopulated; | 309 | ttm->state = tt_unpopulated; |
303 | ttm->first_himem_page = ttm->num_pages; | 310 | ttm->first_himem_page = ttm->num_pages; |
304 | ttm->last_lomem_page = -1; | 311 | ttm->last_lomem_page = -1; |
@@ -326,7 +333,7 @@ void ttm_tt_destroy(struct ttm_tt *ttm) | |||
326 | ttm_tt_free_page_directory(ttm); | 333 | ttm_tt_free_page_directory(ttm); |
327 | } | 334 | } |
328 | 335 | ||
329 | if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) && | 336 | if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) && |
330 | ttm->swap_storage) | 337 | ttm->swap_storage) |
331 | fput(ttm->swap_storage); | 338 | fput(ttm->swap_storage); |
332 | 339 | ||
@@ -440,10 +447,8 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) | |||
440 | return ret; | 447 | return ret; |
441 | 448 | ||
442 | ret = be->func->bind(be, bo_mem); | 449 | ret = be->func->bind(be, bo_mem); |
443 | if (ret) { | 450 | if (unlikely(ret != 0)) |
444 | printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n"); | ||
445 | return ret; | 451 | return ret; |
446 | } | ||
447 | 452 | ||
448 | ttm->state = tt_bound; | 453 | ttm->state = tt_bound; |
449 | 454 | ||
@@ -480,7 +485,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm) | |||
480 | swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; | 485 | swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; |
481 | 486 | ||
482 | for (i = 0; i < ttm->num_pages; ++i) { | 487 | for (i = 0; i < ttm->num_pages; ++i) { |
483 | from_page = read_mapping_page(swap_space, i, NULL); | 488 | from_page = shmem_read_mapping_page(swap_space, i); |
484 | if (IS_ERR(from_page)) { | 489 | if (IS_ERR(from_page)) { |
485 | ret = PTR_ERR(from_page); | 490 | ret = PTR_ERR(from_page); |
486 | goto out_err; | 491 | goto out_err; |
@@ -499,7 +504,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm) | |||
499 | page_cache_release(from_page); | 504 | page_cache_release(from_page); |
500 | } | 505 | } |
501 | 506 | ||
502 | if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP)) | 507 | if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP)) |
503 | fput(swap_storage); | 508 | fput(swap_storage); |
504 | ttm->swap_storage = NULL; | 509 | ttm->swap_storage = NULL; |
505 | ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; | 510 | ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED; |
@@ -510,7 +515,7 @@ out_err: | |||
510 | return ret; | 515 | return ret; |
511 | } | 516 | } |
512 | 517 | ||
513 | int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) | 518 | int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) |
514 | { | 519 | { |
515 | struct address_space *swap_space; | 520 | struct address_space *swap_space; |
516 | struct file *swap_storage; | 521 | struct file *swap_storage; |
@@ -536,7 +541,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) | |||
536 | return 0; | 541 | return 0; |
537 | } | 542 | } |
538 | 543 | ||
539 | if (!persistant_swap_storage) { | 544 | if (!persistent_swap_storage) { |
540 | swap_storage = shmem_file_setup("ttm swap", | 545 | swap_storage = shmem_file_setup("ttm swap", |
541 | ttm->num_pages << PAGE_SHIFT, | 546 | ttm->num_pages << PAGE_SHIFT, |
542 | 0); | 547 | 0); |
@@ -545,7 +550,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) | |||
545 | return PTR_ERR(swap_storage); | 550 | return PTR_ERR(swap_storage); |
546 | } | 551 | } |
547 | } else | 552 | } else |
548 | swap_storage = persistant_swap_storage; | 553 | swap_storage = persistent_swap_storage; |
549 | 554 | ||
550 | swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; | 555 | swap_space = swap_storage->f_path.dentry->d_inode->i_mapping; |
551 | 556 | ||
@@ -553,7 +558,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) | |||
553 | from_page = ttm->pages[i]; | 558 | from_page = ttm->pages[i]; |
554 | if (unlikely(from_page == NULL)) | 559 | if (unlikely(from_page == NULL)) |
555 | continue; | 560 | continue; |
556 | to_page = read_mapping_page(swap_space, i, NULL); | 561 | to_page = shmem_read_mapping_page(swap_space, i); |
557 | if (unlikely(IS_ERR(to_page))) { | 562 | if (unlikely(IS_ERR(to_page))) { |
558 | ret = PTR_ERR(to_page); | 563 | ret = PTR_ERR(to_page); |
559 | goto out_err; | 564 | goto out_err; |
@@ -573,12 +578,12 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) | |||
573 | ttm_tt_free_alloced_pages(ttm); | 578 | ttm_tt_free_alloced_pages(ttm); |
574 | ttm->swap_storage = swap_storage; | 579 | ttm->swap_storage = swap_storage; |
575 | ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; | 580 | ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED; |
576 | if (persistant_swap_storage) | 581 | if (persistent_swap_storage) |
577 | ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP; | 582 | ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP; |
578 | 583 | ||
579 | return 0; | 584 | return 0; |
580 | out_err: | 585 | out_err: |
581 | if (!persistant_swap_storage) | 586 | if (!persistent_swap_storage) |
582 | fput(swap_storage); | 587 | fput(swap_storage); |
583 | 588 | ||
584 | return ret; | 589 | return ret; |