diff options
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo.c')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 781 |
1 files changed, 400 insertions, 381 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 87c06252d464..0e3754a3a303 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -27,6 +27,14 @@ | |||
27 | /* | 27 | /* |
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | 28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
29 | */ | 29 | */ |
30 | /* Notes: | ||
31 | * | ||
32 | * We store bo pointer in drm_mm_node struct so we know which bo own a | ||
33 | * specific node. There is no protection on the pointer, thus to make | ||
34 | * sure things don't go berserk you have to access this pointer while | ||
35 | * holding the global lru lock and make sure anytime you free a node you | ||
36 | * reset the pointer to NULL. | ||
37 | */ | ||
30 | 38 | ||
31 | #include "ttm/ttm_module.h" | 39 | #include "ttm/ttm_module.h" |
32 | #include "ttm/ttm_bo_driver.h" | 40 | #include "ttm/ttm_bo_driver.h" |
@@ -51,6 +59,59 @@ static struct attribute ttm_bo_count = { | |||
51 | .mode = S_IRUGO | 59 | .mode = S_IRUGO |
52 | }; | 60 | }; |
53 | 61 | ||
62 | static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type) | ||
63 | { | ||
64 | int i; | ||
65 | |||
66 | for (i = 0; i <= TTM_PL_PRIV5; i++) | ||
67 | if (flags & (1 << i)) { | ||
68 | *mem_type = i; | ||
69 | return 0; | ||
70 | } | ||
71 | return -EINVAL; | ||
72 | } | ||
73 | |||
74 | static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) | ||
75 | { | ||
76 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; | ||
77 | |||
78 | printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type); | ||
79 | printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type); | ||
80 | printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags); | ||
81 | printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset); | ||
82 | printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset); | ||
83 | printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size); | ||
84 | printk(KERN_ERR TTM_PFX " size: %llu\n", man->size); | ||
85 | printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n", | ||
86 | man->available_caching); | ||
87 | printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n", | ||
88 | man->default_caching); | ||
89 | if (mem_type != TTM_PL_SYSTEM) { | ||
90 | spin_lock(&bdev->glob->lru_lock); | ||
91 | drm_mm_debug_table(&man->manager, TTM_PFX); | ||
92 | spin_unlock(&bdev->glob->lru_lock); | ||
93 | } | ||
94 | } | ||
95 | |||
96 | static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, | ||
97 | struct ttm_placement *placement) | ||
98 | { | ||
99 | int i, ret, mem_type; | ||
100 | |||
101 | printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n", | ||
102 | bo, bo->mem.num_pages, bo->mem.size >> 10, | ||
103 | bo->mem.size >> 20); | ||
104 | for (i = 0; i < placement->num_placement; i++) { | ||
105 | ret = ttm_mem_type_from_flags(placement->placement[i], | ||
106 | &mem_type); | ||
107 | if (ret) | ||
108 | return; | ||
109 | printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n", | ||
110 | i, placement->placement[i], mem_type); | ||
111 | ttm_mem_type_debug(bo->bdev, mem_type); | ||
112 | } | ||
113 | } | ||
114 | |||
54 | static ssize_t ttm_bo_global_show(struct kobject *kobj, | 115 | static ssize_t ttm_bo_global_show(struct kobject *kobj, |
55 | struct attribute *attr, | 116 | struct attribute *attr, |
56 | char *buffer) | 117 | char *buffer) |
@@ -67,7 +128,7 @@ static struct attribute *ttm_bo_global_attrs[] = { | |||
67 | NULL | 128 | NULL |
68 | }; | 129 | }; |
69 | 130 | ||
70 | static struct sysfs_ops ttm_bo_global_ops = { | 131 | static const struct sysfs_ops ttm_bo_global_ops = { |
71 | .show = &ttm_bo_global_show | 132 | .show = &ttm_bo_global_show |
72 | }; | 133 | }; |
73 | 134 | ||
@@ -117,12 +178,13 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) | |||
117 | ret = wait_event_interruptible(bo->event_queue, | 178 | ret = wait_event_interruptible(bo->event_queue, |
118 | atomic_read(&bo->reserved) == 0); | 179 | atomic_read(&bo->reserved) == 0); |
119 | if (unlikely(ret != 0)) | 180 | if (unlikely(ret != 0)) |
120 | return -ERESTART; | 181 | return ret; |
121 | } else { | 182 | } else { |
122 | wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); | 183 | wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); |
123 | } | 184 | } |
124 | return 0; | 185 | return 0; |
125 | } | 186 | } |
187 | EXPORT_SYMBOL(ttm_bo_wait_unreserved); | ||
126 | 188 | ||
127 | static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) | 189 | static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) |
128 | { | 190 | { |
@@ -247,7 +309,6 @@ EXPORT_SYMBOL(ttm_bo_unreserve); | |||
247 | /* | 309 | /* |
248 | * Call bo->mutex locked. | 310 | * Call bo->mutex locked. |
249 | */ | 311 | */ |
250 | |||
251 | static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) | 312 | static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) |
252 | { | 313 | { |
253 | struct ttm_bo_device *bdev = bo->bdev; | 314 | struct ttm_bo_device *bdev = bo->bdev; |
@@ -275,9 +336,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) | |||
275 | bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, | 336 | bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, |
276 | page_flags | TTM_PAGE_FLAG_USER, | 337 | page_flags | TTM_PAGE_FLAG_USER, |
277 | glob->dummy_read_page); | 338 | glob->dummy_read_page); |
278 | if (unlikely(bo->ttm == NULL)) | 339 | if (unlikely(bo->ttm == NULL)) { |
279 | ret = -ENOMEM; | 340 | ret = -ENOMEM; |
280 | break; | 341 | break; |
342 | } | ||
281 | 343 | ||
282 | ret = ttm_tt_set_user(bo->ttm, current, | 344 | ret = ttm_tt_set_user(bo->ttm, current, |
283 | bo->buffer_start, bo->num_pages); | 345 | bo->buffer_start, bo->num_pages); |
@@ -328,14 +390,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, | |||
328 | } | 390 | } |
329 | 391 | ||
330 | if (bo->mem.mem_type == TTM_PL_SYSTEM) { | 392 | if (bo->mem.mem_type == TTM_PL_SYSTEM) { |
331 | 393 | bo->mem = *mem; | |
332 | struct ttm_mem_reg *old_mem = &bo->mem; | ||
333 | uint32_t save_flags = old_mem->placement; | ||
334 | |||
335 | *old_mem = *mem; | ||
336 | mem->mm_node = NULL; | 394 | mem->mm_node = NULL; |
337 | ttm_flag_masked(&save_flags, mem->placement, | ||
338 | TTM_PL_MASK_MEMTYPE); | ||
339 | goto moved; | 395 | goto moved; |
340 | } | 396 | } |
341 | 397 | ||
@@ -370,7 +426,8 @@ moved: | |||
370 | bdev->man[bo->mem.mem_type].gpu_offset; | 426 | bdev->man[bo->mem.mem_type].gpu_offset; |
371 | bo->cur_placement = bo->mem.placement; | 427 | bo->cur_placement = bo->mem.placement; |
372 | spin_unlock(&bo->lock); | 428 | spin_unlock(&bo->lock); |
373 | } | 429 | } else |
430 | bo->offset = 0; | ||
374 | 431 | ||
375 | return 0; | 432 | return 0; |
376 | 433 | ||
@@ -408,6 +465,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) | |||
408 | spin_unlock(&bo->lock); | 465 | spin_unlock(&bo->lock); |
409 | 466 | ||
410 | spin_lock(&glob->lru_lock); | 467 | spin_lock(&glob->lru_lock); |
468 | put_count = ttm_bo_del_from_lru(bo); | ||
469 | |||
411 | ret = ttm_bo_reserve_locked(bo, false, false, false, 0); | 470 | ret = ttm_bo_reserve_locked(bo, false, false, false, 0); |
412 | BUG_ON(ret); | 471 | BUG_ON(ret); |
413 | if (bo->ttm) | 472 | if (bo->ttm) |
@@ -415,19 +474,19 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) | |||
415 | 474 | ||
416 | if (!list_empty(&bo->ddestroy)) { | 475 | if (!list_empty(&bo->ddestroy)) { |
417 | list_del_init(&bo->ddestroy); | 476 | list_del_init(&bo->ddestroy); |
418 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | 477 | ++put_count; |
419 | } | 478 | } |
420 | if (bo->mem.mm_node) { | 479 | if (bo->mem.mm_node) { |
480 | bo->mem.mm_node->private = NULL; | ||
421 | drm_mm_put_block(bo->mem.mm_node); | 481 | drm_mm_put_block(bo->mem.mm_node); |
422 | bo->mem.mm_node = NULL; | 482 | bo->mem.mm_node = NULL; |
423 | } | 483 | } |
424 | put_count = ttm_bo_del_from_lru(bo); | ||
425 | spin_unlock(&glob->lru_lock); | 484 | spin_unlock(&glob->lru_lock); |
426 | 485 | ||
427 | atomic_set(&bo->reserved, 0); | 486 | atomic_set(&bo->reserved, 0); |
428 | 487 | ||
429 | while (put_count--) | 488 | while (put_count--) |
430 | kref_put(&bo->list_kref, ttm_bo_release_list); | 489 | kref_put(&bo->list_kref, ttm_bo_ref_bug); |
431 | 490 | ||
432 | return 0; | 491 | return 0; |
433 | } | 492 | } |
@@ -465,52 +524,44 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) | |||
465 | static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) | 524 | static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) |
466 | { | 525 | { |
467 | struct ttm_bo_global *glob = bdev->glob; | 526 | struct ttm_bo_global *glob = bdev->glob; |
468 | struct ttm_buffer_object *entry, *nentry; | 527 | struct ttm_buffer_object *entry = NULL; |
469 | struct list_head *list, *next; | 528 | int ret = 0; |
470 | int ret; | ||
471 | 529 | ||
472 | spin_lock(&glob->lru_lock); | 530 | spin_lock(&glob->lru_lock); |
473 | list_for_each_safe(list, next, &bdev->ddestroy) { | 531 | if (list_empty(&bdev->ddestroy)) |
474 | entry = list_entry(list, struct ttm_buffer_object, ddestroy); | 532 | goto out_unlock; |
475 | nentry = NULL; | ||
476 | 533 | ||
477 | /* | 534 | entry = list_first_entry(&bdev->ddestroy, |
478 | * Protect the next list entry from destruction while we | 535 | struct ttm_buffer_object, ddestroy); |
479 | * unlock the lru_lock. | 536 | kref_get(&entry->list_kref); |
480 | */ | ||
481 | 537 | ||
482 | if (next != &bdev->ddestroy) { | 538 | for (;;) { |
483 | nentry = list_entry(next, struct ttm_buffer_object, | 539 | struct ttm_buffer_object *nentry = NULL; |
484 | ddestroy); | 540 | |
541 | if (entry->ddestroy.next != &bdev->ddestroy) { | ||
542 | nentry = list_first_entry(&entry->ddestroy, | ||
543 | struct ttm_buffer_object, ddestroy); | ||
485 | kref_get(&nentry->list_kref); | 544 | kref_get(&nentry->list_kref); |
486 | } | 545 | } |
487 | kref_get(&entry->list_kref); | ||
488 | 546 | ||
489 | spin_unlock(&glob->lru_lock); | 547 | spin_unlock(&glob->lru_lock); |
490 | ret = ttm_bo_cleanup_refs(entry, remove_all); | 548 | ret = ttm_bo_cleanup_refs(entry, remove_all); |
491 | kref_put(&entry->list_kref, ttm_bo_release_list); | 549 | kref_put(&entry->list_kref, ttm_bo_release_list); |
550 | entry = nentry; | ||
551 | |||
552 | if (ret || !entry) | ||
553 | goto out; | ||
492 | 554 | ||
493 | spin_lock(&glob->lru_lock); | 555 | spin_lock(&glob->lru_lock); |
494 | if (nentry) { | 556 | if (list_empty(&entry->ddestroy)) |
495 | bool next_onlist = !list_empty(next); | ||
496 | spin_unlock(&glob->lru_lock); | ||
497 | kref_put(&nentry->list_kref, ttm_bo_release_list); | ||
498 | spin_lock(&glob->lru_lock); | ||
499 | /* | ||
500 | * Someone might have raced us and removed the | ||
501 | * next entry from the list. We don't bother restarting | ||
502 | * list traversal. | ||
503 | */ | ||
504 | |||
505 | if (!next_onlist) | ||
506 | break; | ||
507 | } | ||
508 | if (ret) | ||
509 | break; | 557 | break; |
510 | } | 558 | } |
511 | ret = !list_empty(&bdev->ddestroy); | ||
512 | spin_unlock(&glob->lru_lock); | ||
513 | 559 | ||
560 | out_unlock: | ||
561 | spin_unlock(&glob->lru_lock); | ||
562 | out: | ||
563 | if (entry) | ||
564 | kref_put(&entry->list_kref, ttm_bo_release_list); | ||
514 | return ret; | 565 | return ret; |
515 | } | 566 | } |
516 | 567 | ||
@@ -554,24 +605,21 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo) | |||
554 | } | 605 | } |
555 | EXPORT_SYMBOL(ttm_bo_unref); | 606 | EXPORT_SYMBOL(ttm_bo_unref); |
556 | 607 | ||
557 | static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, | 608 | static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, |
558 | bool interruptible, bool no_wait) | 609 | bool no_wait) |
559 | { | 610 | { |
560 | int ret = 0; | ||
561 | struct ttm_bo_device *bdev = bo->bdev; | 611 | struct ttm_bo_device *bdev = bo->bdev; |
562 | struct ttm_bo_global *glob = bo->glob; | 612 | struct ttm_bo_global *glob = bo->glob; |
563 | struct ttm_mem_reg evict_mem; | 613 | struct ttm_mem_reg evict_mem; |
564 | uint32_t proposed_placement; | 614 | struct ttm_placement placement; |
565 | 615 | int ret = 0; | |
566 | if (bo->mem.mem_type != mem_type) | ||
567 | goto out; | ||
568 | 616 | ||
569 | spin_lock(&bo->lock); | 617 | spin_lock(&bo->lock); |
570 | ret = ttm_bo_wait(bo, false, interruptible, no_wait); | 618 | ret = ttm_bo_wait(bo, false, interruptible, no_wait); |
571 | spin_unlock(&bo->lock); | 619 | spin_unlock(&bo->lock); |
572 | 620 | ||
573 | if (unlikely(ret != 0)) { | 621 | if (unlikely(ret != 0)) { |
574 | if (ret != -ERESTART) { | 622 | if (ret != -ERESTARTSYS) { |
575 | printk(KERN_ERR TTM_PFX | 623 | printk(KERN_ERR TTM_PFX |
576 | "Failed to expire sync object before " | 624 | "Failed to expire sync object before " |
577 | "buffer eviction.\n"); | 625 | "buffer eviction.\n"); |
@@ -584,116 +632,165 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, | |||
584 | evict_mem = bo->mem; | 632 | evict_mem = bo->mem; |
585 | evict_mem.mm_node = NULL; | 633 | evict_mem.mm_node = NULL; |
586 | 634 | ||
587 | proposed_placement = bdev->driver->evict_flags(bo); | 635 | placement.fpfn = 0; |
588 | 636 | placement.lpfn = 0; | |
589 | ret = ttm_bo_mem_space(bo, proposed_placement, | 637 | placement.num_placement = 0; |
590 | &evict_mem, interruptible, no_wait); | 638 | placement.num_busy_placement = 0; |
591 | if (unlikely(ret != 0 && ret != -ERESTART)) | 639 | bdev->driver->evict_flags(bo, &placement); |
592 | ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM, | 640 | ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, |
593 | &evict_mem, interruptible, no_wait); | 641 | no_wait); |
594 | |||
595 | if (ret) { | 642 | if (ret) { |
596 | if (ret != -ERESTART) | 643 | if (ret != -ERESTARTSYS) { |
597 | printk(KERN_ERR TTM_PFX | 644 | printk(KERN_ERR TTM_PFX |
598 | "Failed to find memory space for " | 645 | "Failed to find memory space for " |
599 | "buffer 0x%p eviction.\n", bo); | 646 | "buffer 0x%p eviction.\n", bo); |
647 | ttm_bo_mem_space_debug(bo, &placement); | ||
648 | } | ||
600 | goto out; | 649 | goto out; |
601 | } | 650 | } |
602 | 651 | ||
603 | ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, | 652 | ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, |
604 | no_wait); | 653 | no_wait); |
605 | if (ret) { | 654 | if (ret) { |
606 | if (ret != -ERESTART) | 655 | if (ret != -ERESTARTSYS) |
607 | printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); | 656 | printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); |
657 | spin_lock(&glob->lru_lock); | ||
658 | if (evict_mem.mm_node) { | ||
659 | evict_mem.mm_node->private = NULL; | ||
660 | drm_mm_put_block(evict_mem.mm_node); | ||
661 | evict_mem.mm_node = NULL; | ||
662 | } | ||
663 | spin_unlock(&glob->lru_lock); | ||
608 | goto out; | 664 | goto out; |
609 | } | 665 | } |
610 | |||
611 | spin_lock(&glob->lru_lock); | ||
612 | if (evict_mem.mm_node) { | ||
613 | drm_mm_put_block(evict_mem.mm_node); | ||
614 | evict_mem.mm_node = NULL; | ||
615 | } | ||
616 | spin_unlock(&glob->lru_lock); | ||
617 | bo->evicted = true; | 666 | bo->evicted = true; |
618 | out: | 667 | out: |
619 | return ret; | 668 | return ret; |
620 | } | 669 | } |
621 | 670 | ||
622 | /** | 671 | static int ttm_mem_evict_first(struct ttm_bo_device *bdev, |
623 | * Repeatedly evict memory from the LRU for @mem_type until we create enough | 672 | uint32_t mem_type, |
624 | * space, or we've evicted everything and there isn't enough space. | 673 | bool interruptible, bool no_wait) |
625 | */ | ||
626 | static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev, | ||
627 | struct ttm_mem_reg *mem, | ||
628 | uint32_t mem_type, | ||
629 | bool interruptible, bool no_wait) | ||
630 | { | 674 | { |
631 | struct ttm_bo_global *glob = bdev->glob; | 675 | struct ttm_bo_global *glob = bdev->glob; |
632 | struct drm_mm_node *node; | ||
633 | struct ttm_buffer_object *entry; | ||
634 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; | 676 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; |
635 | struct list_head *lru; | 677 | struct ttm_buffer_object *bo; |
636 | unsigned long num_pages = mem->num_pages; | 678 | int ret, put_count = 0; |
637 | int put_count = 0; | ||
638 | int ret; | ||
639 | |||
640 | retry_pre_get: | ||
641 | ret = drm_mm_pre_get(&man->manager); | ||
642 | if (unlikely(ret != 0)) | ||
643 | return ret; | ||
644 | 679 | ||
680 | retry: | ||
645 | spin_lock(&glob->lru_lock); | 681 | spin_lock(&glob->lru_lock); |
646 | do { | 682 | if (list_empty(&man->lru)) { |
647 | node = drm_mm_search_free(&man->manager, num_pages, | 683 | spin_unlock(&glob->lru_lock); |
648 | mem->page_alignment, 1); | 684 | return -EBUSY; |
649 | if (node) | 685 | } |
650 | break; | ||
651 | 686 | ||
652 | lru = &man->lru; | 687 | bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); |
653 | if (list_empty(lru)) | 688 | kref_get(&bo->list_kref); |
654 | break; | ||
655 | 689 | ||
656 | entry = list_first_entry(lru, struct ttm_buffer_object, lru); | 690 | ret = ttm_bo_reserve_locked(bo, false, true, false, 0); |
657 | kref_get(&entry->list_kref); | ||
658 | 691 | ||
659 | ret = | 692 | if (unlikely(ret == -EBUSY)) { |
660 | ttm_bo_reserve_locked(entry, interruptible, no_wait, | 693 | spin_unlock(&glob->lru_lock); |
661 | false, 0); | 694 | if (likely(!no_wait)) |
695 | ret = ttm_bo_wait_unreserved(bo, interruptible); | ||
662 | 696 | ||
663 | if (likely(ret == 0)) | 697 | kref_put(&bo->list_kref, ttm_bo_release_list); |
664 | put_count = ttm_bo_del_from_lru(entry); | ||
665 | 698 | ||
666 | spin_unlock(&glob->lru_lock); | 699 | /** |
700 | * We *need* to retry after releasing the lru lock. | ||
701 | */ | ||
667 | 702 | ||
668 | if (unlikely(ret != 0)) | 703 | if (unlikely(ret != 0)) |
669 | return ret; | 704 | return ret; |
705 | goto retry; | ||
706 | } | ||
670 | 707 | ||
671 | while (put_count--) | 708 | put_count = ttm_bo_del_from_lru(bo); |
672 | kref_put(&entry->list_kref, ttm_bo_ref_bug); | 709 | spin_unlock(&glob->lru_lock); |
673 | 710 | ||
674 | ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait); | 711 | BUG_ON(ret != 0); |
675 | 712 | ||
676 | ttm_bo_unreserve(entry); | 713 | while (put_count--) |
714 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | ||
677 | 715 | ||
678 | kref_put(&entry->list_kref, ttm_bo_release_list); | 716 | ret = ttm_bo_evict(bo, interruptible, no_wait); |
679 | if (ret) | 717 | ttm_bo_unreserve(bo); |
718 | |||
719 | kref_put(&bo->list_kref, ttm_bo_release_list); | ||
720 | return ret; | ||
721 | } | ||
722 | |||
723 | static int ttm_bo_man_get_node(struct ttm_buffer_object *bo, | ||
724 | struct ttm_mem_type_manager *man, | ||
725 | struct ttm_placement *placement, | ||
726 | struct ttm_mem_reg *mem, | ||
727 | struct drm_mm_node **node) | ||
728 | { | ||
729 | struct ttm_bo_global *glob = bo->glob; | ||
730 | unsigned long lpfn; | ||
731 | int ret; | ||
732 | |||
733 | lpfn = placement->lpfn; | ||
734 | if (!lpfn) | ||
735 | lpfn = man->size; | ||
736 | *node = NULL; | ||
737 | do { | ||
738 | ret = drm_mm_pre_get(&man->manager); | ||
739 | if (unlikely(ret)) | ||
680 | return ret; | 740 | return ret; |
681 | 741 | ||
682 | spin_lock(&glob->lru_lock); | 742 | spin_lock(&glob->lru_lock); |
683 | } while (1); | 743 | *node = drm_mm_search_free_in_range(&man->manager, |
684 | 744 | mem->num_pages, mem->page_alignment, | |
685 | if (!node) { | 745 | placement->fpfn, lpfn, 1); |
746 | if (unlikely(*node == NULL)) { | ||
747 | spin_unlock(&glob->lru_lock); | ||
748 | return 0; | ||
749 | } | ||
750 | *node = drm_mm_get_block_atomic_range(*node, mem->num_pages, | ||
751 | mem->page_alignment, | ||
752 | placement->fpfn, | ||
753 | lpfn); | ||
686 | spin_unlock(&glob->lru_lock); | 754 | spin_unlock(&glob->lru_lock); |
687 | return -ENOMEM; | 755 | } while (*node == NULL); |
688 | } | 756 | return 0; |
757 | } | ||
689 | 758 | ||
690 | node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment); | 759 | /** |
691 | if (unlikely(!node)) { | 760 | * Repeatedly evict memory from the LRU for @mem_type until we create enough |
692 | spin_unlock(&glob->lru_lock); | 761 | * space, or we've evicted everything and there isn't enough space. |
693 | goto retry_pre_get; | 762 | */ |
694 | } | 763 | static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, |
764 | uint32_t mem_type, | ||
765 | struct ttm_placement *placement, | ||
766 | struct ttm_mem_reg *mem, | ||
767 | bool interruptible, bool no_wait) | ||
768 | { | ||
769 | struct ttm_bo_device *bdev = bo->bdev; | ||
770 | struct ttm_bo_global *glob = bdev->glob; | ||
771 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; | ||
772 | struct drm_mm_node *node; | ||
773 | int ret; | ||
695 | 774 | ||
696 | spin_unlock(&glob->lru_lock); | 775 | do { |
776 | ret = ttm_bo_man_get_node(bo, man, placement, mem, &node); | ||
777 | if (unlikely(ret != 0)) | ||
778 | return ret; | ||
779 | if (node) | ||
780 | break; | ||
781 | spin_lock(&glob->lru_lock); | ||
782 | if (list_empty(&man->lru)) { | ||
783 | spin_unlock(&glob->lru_lock); | ||
784 | break; | ||
785 | } | ||
786 | spin_unlock(&glob->lru_lock); | ||
787 | ret = ttm_mem_evict_first(bdev, mem_type, interruptible, | ||
788 | no_wait); | ||
789 | if (unlikely(ret != 0)) | ||
790 | return ret; | ||
791 | } while (1); | ||
792 | if (node == NULL) | ||
793 | return -ENOMEM; | ||
697 | mem->mm_node = node; | 794 | mem->mm_node = node; |
698 | mem->mem_type = mem_type; | 795 | mem->mem_type = mem_type; |
699 | return 0; | 796 | return 0; |
@@ -724,7 +821,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, | |||
724 | return result; | 821 | return result; |
725 | } | 822 | } |
726 | 823 | ||
727 | |||
728 | static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, | 824 | static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, |
729 | bool disallow_fixed, | 825 | bool disallow_fixed, |
730 | uint32_t mem_type, | 826 | uint32_t mem_type, |
@@ -757,66 +853,55 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, | |||
757 | * space. | 853 | * space. |
758 | */ | 854 | */ |
759 | int ttm_bo_mem_space(struct ttm_buffer_object *bo, | 855 | int ttm_bo_mem_space(struct ttm_buffer_object *bo, |
760 | uint32_t proposed_placement, | 856 | struct ttm_placement *placement, |
761 | struct ttm_mem_reg *mem, | 857 | struct ttm_mem_reg *mem, |
762 | bool interruptible, bool no_wait) | 858 | bool interruptible, bool no_wait) |
763 | { | 859 | { |
764 | struct ttm_bo_device *bdev = bo->bdev; | 860 | struct ttm_bo_device *bdev = bo->bdev; |
765 | struct ttm_bo_global *glob = bo->glob; | ||
766 | struct ttm_mem_type_manager *man; | 861 | struct ttm_mem_type_manager *man; |
767 | |||
768 | uint32_t num_prios = bdev->driver->num_mem_type_prio; | ||
769 | const uint32_t *prios = bdev->driver->mem_type_prio; | ||
770 | uint32_t i; | ||
771 | uint32_t mem_type = TTM_PL_SYSTEM; | 862 | uint32_t mem_type = TTM_PL_SYSTEM; |
772 | uint32_t cur_flags = 0; | 863 | uint32_t cur_flags = 0; |
773 | bool type_found = false; | 864 | bool type_found = false; |
774 | bool type_ok = false; | 865 | bool type_ok = false; |
775 | bool has_eagain = false; | 866 | bool has_erestartsys = false; |
776 | struct drm_mm_node *node = NULL; | 867 | struct drm_mm_node *node = NULL; |
777 | int ret; | 868 | int i, ret; |
778 | 869 | ||
779 | mem->mm_node = NULL; | 870 | mem->mm_node = NULL; |
780 | for (i = 0; i < num_prios; ++i) { | 871 | for (i = 0; i < placement->num_placement; ++i) { |
781 | mem_type = prios[i]; | 872 | ret = ttm_mem_type_from_flags(placement->placement[i], |
873 | &mem_type); | ||
874 | if (ret) | ||
875 | return ret; | ||
782 | man = &bdev->man[mem_type]; | 876 | man = &bdev->man[mem_type]; |
783 | 877 | ||
784 | type_ok = ttm_bo_mt_compatible(man, | 878 | type_ok = ttm_bo_mt_compatible(man, |
785 | bo->type == ttm_bo_type_user, | 879 | bo->type == ttm_bo_type_user, |
786 | mem_type, proposed_placement, | 880 | mem_type, |
787 | &cur_flags); | 881 | placement->placement[i], |
882 | &cur_flags); | ||
788 | 883 | ||
789 | if (!type_ok) | 884 | if (!type_ok) |
790 | continue; | 885 | continue; |
791 | 886 | ||
792 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, | 887 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
793 | cur_flags); | 888 | cur_flags); |
889 | /* | ||
890 | * Use the access and other non-mapping-related flag bits from | ||
891 | * the memory placement flags to the current flags | ||
892 | */ | ||
893 | ttm_flag_masked(&cur_flags, placement->placement[i], | ||
894 | ~TTM_PL_MASK_MEMTYPE); | ||
794 | 895 | ||
795 | if (mem_type == TTM_PL_SYSTEM) | 896 | if (mem_type == TTM_PL_SYSTEM) |
796 | break; | 897 | break; |
797 | 898 | ||
798 | if (man->has_type && man->use_type) { | 899 | if (man->has_type && man->use_type) { |
799 | type_found = true; | 900 | type_found = true; |
800 | do { | 901 | ret = ttm_bo_man_get_node(bo, man, placement, mem, |
801 | ret = drm_mm_pre_get(&man->manager); | 902 | &node); |
802 | if (unlikely(ret)) | 903 | if (unlikely(ret)) |
803 | return ret; | 904 | return ret; |
804 | |||
805 | spin_lock(&glob->lru_lock); | ||
806 | node = drm_mm_search_free(&man->manager, | ||
807 | mem->num_pages, | ||
808 | mem->page_alignment, | ||
809 | 1); | ||
810 | if (unlikely(!node)) { | ||
811 | spin_unlock(&glob->lru_lock); | ||
812 | break; | ||
813 | } | ||
814 | node = drm_mm_get_block_atomic(node, | ||
815 | mem->num_pages, | ||
816 | mem-> | ||
817 | page_alignment); | ||
818 | spin_unlock(&glob->lru_lock); | ||
819 | } while (!node); | ||
820 | } | 905 | } |
821 | if (node) | 906 | if (node) |
822 | break; | 907 | break; |
@@ -826,67 +911,74 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |||
826 | mem->mm_node = node; | 911 | mem->mm_node = node; |
827 | mem->mem_type = mem_type; | 912 | mem->mem_type = mem_type; |
828 | mem->placement = cur_flags; | 913 | mem->placement = cur_flags; |
914 | if (node) | ||
915 | node->private = bo; | ||
829 | return 0; | 916 | return 0; |
830 | } | 917 | } |
831 | 918 | ||
832 | if (!type_found) | 919 | if (!type_found) |
833 | return -EINVAL; | 920 | return -EINVAL; |
834 | 921 | ||
835 | num_prios = bdev->driver->num_mem_busy_prio; | 922 | for (i = 0; i < placement->num_busy_placement; ++i) { |
836 | prios = bdev->driver->mem_busy_prio; | 923 | ret = ttm_mem_type_from_flags(placement->busy_placement[i], |
837 | 924 | &mem_type); | |
838 | for (i = 0; i < num_prios; ++i) { | 925 | if (ret) |
839 | mem_type = prios[i]; | 926 | return ret; |
840 | man = &bdev->man[mem_type]; | 927 | man = &bdev->man[mem_type]; |
841 | |||
842 | if (!man->has_type) | 928 | if (!man->has_type) |
843 | continue; | 929 | continue; |
844 | |||
845 | if (!ttm_bo_mt_compatible(man, | 930 | if (!ttm_bo_mt_compatible(man, |
846 | bo->type == ttm_bo_type_user, | 931 | bo->type == ttm_bo_type_user, |
847 | mem_type, | 932 | mem_type, |
848 | proposed_placement, &cur_flags)) | 933 | placement->busy_placement[i], |
934 | &cur_flags)) | ||
849 | continue; | 935 | continue; |
850 | 936 | ||
851 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, | 937 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
852 | cur_flags); | 938 | cur_flags); |
939 | /* | ||
940 | * Use the access and other non-mapping-related flag bits from | ||
941 | * the memory placement flags to the current flags | ||
942 | */ | ||
943 | ttm_flag_masked(&cur_flags, placement->busy_placement[i], | ||
944 | ~TTM_PL_MASK_MEMTYPE); | ||
853 | 945 | ||
854 | ret = ttm_bo_mem_force_space(bdev, mem, mem_type, | ||
855 | interruptible, no_wait); | ||
856 | 946 | ||
857 | if (ret == 0 && mem->mm_node) { | 947 | if (mem_type == TTM_PL_SYSTEM) { |
948 | mem->mem_type = mem_type; | ||
858 | mem->placement = cur_flags; | 949 | mem->placement = cur_flags; |
950 | mem->mm_node = NULL; | ||
859 | return 0; | 951 | return 0; |
860 | } | 952 | } |
861 | 953 | ||
862 | if (ret == -ERESTART) | 954 | ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, |
863 | has_eagain = true; | 955 | interruptible, no_wait); |
956 | if (ret == 0 && mem->mm_node) { | ||
957 | mem->placement = cur_flags; | ||
958 | mem->mm_node->private = bo; | ||
959 | return 0; | ||
960 | } | ||
961 | if (ret == -ERESTARTSYS) | ||
962 | has_erestartsys = true; | ||
864 | } | 963 | } |
865 | 964 | ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM; | |
866 | ret = (has_eagain) ? -ERESTART : -ENOMEM; | ||
867 | return ret; | 965 | return ret; |
868 | } | 966 | } |
869 | EXPORT_SYMBOL(ttm_bo_mem_space); | 967 | EXPORT_SYMBOL(ttm_bo_mem_space); |
870 | 968 | ||
871 | int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait) | 969 | int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait) |
872 | { | 970 | { |
873 | int ret = 0; | ||
874 | |||
875 | if ((atomic_read(&bo->cpu_writers) > 0) && no_wait) | 971 | if ((atomic_read(&bo->cpu_writers) > 0) && no_wait) |
876 | return -EBUSY; | 972 | return -EBUSY; |
877 | 973 | ||
878 | ret = wait_event_interruptible(bo->event_queue, | 974 | return wait_event_interruptible(bo->event_queue, |
879 | atomic_read(&bo->cpu_writers) == 0); | 975 | atomic_read(&bo->cpu_writers) == 0); |
880 | |||
881 | if (ret == -ERESTARTSYS) | ||
882 | ret = -ERESTART; | ||
883 | |||
884 | return ret; | ||
885 | } | 976 | } |
977 | EXPORT_SYMBOL(ttm_bo_wait_cpu); | ||
886 | 978 | ||
887 | int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | 979 | int ttm_bo_move_buffer(struct ttm_buffer_object *bo, |
888 | uint32_t proposed_placement, | 980 | struct ttm_placement *placement, |
889 | bool interruptible, bool no_wait) | 981 | bool interruptible, bool no_wait) |
890 | { | 982 | { |
891 | struct ttm_bo_global *glob = bo->glob; | 983 | struct ttm_bo_global *glob = bo->glob; |
892 | int ret = 0; | 984 | int ret = 0; |
@@ -899,147 +991,138 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | |||
899 | * Have the driver move function wait for idle when necessary, | 991 | * Have the driver move function wait for idle when necessary, |
900 | * instead of doing it here. | 992 | * instead of doing it here. |
901 | */ | 993 | */ |
902 | |||
903 | spin_lock(&bo->lock); | 994 | spin_lock(&bo->lock); |
904 | ret = ttm_bo_wait(bo, false, interruptible, no_wait); | 995 | ret = ttm_bo_wait(bo, false, interruptible, no_wait); |
905 | spin_unlock(&bo->lock); | 996 | spin_unlock(&bo->lock); |
906 | |||
907 | if (ret) | 997 | if (ret) |
908 | return ret; | 998 | return ret; |
909 | |||
910 | mem.num_pages = bo->num_pages; | 999 | mem.num_pages = bo->num_pages; |
911 | mem.size = mem.num_pages << PAGE_SHIFT; | 1000 | mem.size = mem.num_pages << PAGE_SHIFT; |
912 | mem.page_alignment = bo->mem.page_alignment; | 1001 | mem.page_alignment = bo->mem.page_alignment; |
913 | |||
914 | /* | 1002 | /* |
915 | * Determine where to move the buffer. | 1003 | * Determine where to move the buffer. |
916 | */ | 1004 | */ |
917 | 1005 | ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait); | |
918 | ret = ttm_bo_mem_space(bo, proposed_placement, &mem, | ||
919 | interruptible, no_wait); | ||
920 | if (ret) | 1006 | if (ret) |
921 | goto out_unlock; | 1007 | goto out_unlock; |
922 | |||
923 | ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); | 1008 | ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); |
924 | |||
925 | out_unlock: | 1009 | out_unlock: |
926 | if (ret && mem.mm_node) { | 1010 | if (ret && mem.mm_node) { |
927 | spin_lock(&glob->lru_lock); | 1011 | spin_lock(&glob->lru_lock); |
1012 | mem.mm_node->private = NULL; | ||
928 | drm_mm_put_block(mem.mm_node); | 1013 | drm_mm_put_block(mem.mm_node); |
929 | spin_unlock(&glob->lru_lock); | 1014 | spin_unlock(&glob->lru_lock); |
930 | } | 1015 | } |
931 | return ret; | 1016 | return ret; |
932 | } | 1017 | } |
933 | 1018 | ||
934 | static int ttm_bo_mem_compat(uint32_t proposed_placement, | 1019 | static int ttm_bo_mem_compat(struct ttm_placement *placement, |
935 | struct ttm_mem_reg *mem) | 1020 | struct ttm_mem_reg *mem) |
936 | { | 1021 | { |
937 | if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0) | 1022 | int i; |
938 | return 0; | 1023 | struct drm_mm_node *node = mem->mm_node; |
939 | if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0) | 1024 | |
940 | return 0; | 1025 | if (node && placement->lpfn != 0 && |
941 | 1026 | (node->start < placement->fpfn || | |
942 | return 1; | 1027 | node->start + node->size > placement->lpfn)) |
1028 | return -1; | ||
1029 | |||
1030 | for (i = 0; i < placement->num_placement; i++) { | ||
1031 | if ((placement->placement[i] & mem->placement & | ||
1032 | TTM_PL_MASK_CACHING) && | ||
1033 | (placement->placement[i] & mem->placement & | ||
1034 | TTM_PL_MASK_MEM)) | ||
1035 | return i; | ||
1036 | } | ||
1037 | return -1; | ||
943 | } | 1038 | } |
944 | 1039 | ||
945 | int ttm_buffer_object_validate(struct ttm_buffer_object *bo, | 1040 | int ttm_bo_validate(struct ttm_buffer_object *bo, |
946 | uint32_t proposed_placement, | 1041 | struct ttm_placement *placement, |
947 | bool interruptible, bool no_wait) | 1042 | bool interruptible, bool no_wait) |
948 | { | 1043 | { |
949 | int ret; | 1044 | int ret; |
950 | 1045 | ||
951 | BUG_ON(!atomic_read(&bo->reserved)); | 1046 | BUG_ON(!atomic_read(&bo->reserved)); |
952 | bo->proposed_placement = proposed_placement; | 1047 | /* Check that range is valid */ |
953 | 1048 | if (placement->lpfn || placement->fpfn) | |
954 | TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n", | 1049 | if (placement->fpfn > placement->lpfn || |
955 | (unsigned long)proposed_placement, | 1050 | (placement->lpfn - placement->fpfn) < bo->num_pages) |
956 | (unsigned long)bo->mem.placement); | 1051 | return -EINVAL; |
957 | |||
958 | /* | 1052 | /* |
959 | * Check whether we need to move buffer. | 1053 | * Check whether we need to move buffer. |
960 | */ | 1054 | */ |
961 | 1055 | ret = ttm_bo_mem_compat(placement, &bo->mem); | |
962 | if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) { | 1056 | if (ret < 0) { |
963 | ret = ttm_bo_move_buffer(bo, bo->proposed_placement, | 1057 | ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait); |
964 | interruptible, no_wait); | 1058 | if (ret) |
965 | if (ret) { | ||
966 | if (ret != -ERESTART) | ||
967 | printk(KERN_ERR TTM_PFX | ||
968 | "Failed moving buffer. " | ||
969 | "Proposed placement 0x%08x\n", | ||
970 | bo->proposed_placement); | ||
971 | if (ret == -ENOMEM) | ||
972 | printk(KERN_ERR TTM_PFX | ||
973 | "Out of aperture space or " | ||
974 | "DRM memory quota.\n"); | ||
975 | return ret; | 1059 | return ret; |
976 | } | 1060 | } else { |
1061 | /* | ||
1062 | * Use the access and other non-mapping-related flag bits from | ||
1063 | * the compatible memory placement flags to the active flags | ||
1064 | */ | ||
1065 | ttm_flag_masked(&bo->mem.placement, placement->placement[ret], | ||
1066 | ~TTM_PL_MASK_MEMTYPE); | ||
977 | } | 1067 | } |
978 | |||
979 | /* | 1068 | /* |
980 | * We might need to add a TTM. | 1069 | * We might need to add a TTM. |
981 | */ | 1070 | */ |
982 | |||
983 | if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { | 1071 | if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { |
984 | ret = ttm_bo_add_ttm(bo, true); | 1072 | ret = ttm_bo_add_ttm(bo, true); |
985 | if (ret) | 1073 | if (ret) |
986 | return ret; | 1074 | return ret; |
987 | } | 1075 | } |
988 | /* | ||
989 | * Validation has succeeded, move the access and other | ||
990 | * non-mapping-related flag bits from the proposed flags to | ||
991 | * the active flags | ||
992 | */ | ||
993 | |||
994 | ttm_flag_masked(&bo->mem.placement, bo->proposed_placement, | ||
995 | ~TTM_PL_MASK_MEMTYPE); | ||
996 | |||
997 | return 0; | 1076 | return 0; |
998 | } | 1077 | } |
999 | EXPORT_SYMBOL(ttm_buffer_object_validate); | 1078 | EXPORT_SYMBOL(ttm_bo_validate); |
1000 | 1079 | ||
1001 | int | 1080 | int ttm_bo_check_placement(struct ttm_buffer_object *bo, |
1002 | ttm_bo_check_placement(struct ttm_buffer_object *bo, | 1081 | struct ttm_placement *placement) |
1003 | uint32_t set_flags, uint32_t clr_flags) | ||
1004 | { | 1082 | { |
1005 | uint32_t new_mask = set_flags | clr_flags; | 1083 | int i; |
1006 | 1084 | ||
1007 | if ((bo->type == ttm_bo_type_user) && | 1085 | if (placement->fpfn || placement->lpfn) { |
1008 | (clr_flags & TTM_PL_FLAG_CACHED)) { | 1086 | if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) { |
1009 | printk(KERN_ERR TTM_PFX | 1087 | printk(KERN_ERR TTM_PFX "Page number range to small " |
1010 | "User buffers require cache-coherent memory.\n"); | 1088 | "Need %lu pages, range is [%u, %u]\n", |
1011 | return -EINVAL; | 1089 | bo->mem.num_pages, placement->fpfn, |
1012 | } | 1090 | placement->lpfn); |
1013 | |||
1014 | if (!capable(CAP_SYS_ADMIN)) { | ||
1015 | if (new_mask & TTM_PL_FLAG_NO_EVICT) { | ||
1016 | printk(KERN_ERR TTM_PFX "Need to be root to modify" | ||
1017 | " NO_EVICT status.\n"); | ||
1018 | return -EINVAL; | 1091 | return -EINVAL; |
1019 | } | 1092 | } |
1020 | 1093 | } | |
1021 | if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) && | 1094 | for (i = 0; i < placement->num_placement; i++) { |
1022 | (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { | 1095 | if (!capable(CAP_SYS_ADMIN)) { |
1023 | printk(KERN_ERR TTM_PFX | 1096 | if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) { |
1024 | "Incompatible memory specification" | 1097 | printk(KERN_ERR TTM_PFX "Need to be root to " |
1025 | " for NO_EVICT buffer.\n"); | 1098 | "modify NO_EVICT status.\n"); |
1026 | return -EINVAL; | 1099 | return -EINVAL; |
1100 | } | ||
1101 | } | ||
1102 | } | ||
1103 | for (i = 0; i < placement->num_busy_placement; i++) { | ||
1104 | if (!capable(CAP_SYS_ADMIN)) { | ||
1105 | if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) { | ||
1106 | printk(KERN_ERR TTM_PFX "Need to be root to " | ||
1107 | "modify NO_EVICT status.\n"); | ||
1108 | return -EINVAL; | ||
1109 | } | ||
1027 | } | 1110 | } |
1028 | } | 1111 | } |
1029 | return 0; | 1112 | return 0; |
1030 | } | 1113 | } |
1031 | 1114 | ||
1032 | int ttm_buffer_object_init(struct ttm_bo_device *bdev, | 1115 | int ttm_bo_init(struct ttm_bo_device *bdev, |
1033 | struct ttm_buffer_object *bo, | 1116 | struct ttm_buffer_object *bo, |
1034 | unsigned long size, | 1117 | unsigned long size, |
1035 | enum ttm_bo_type type, | 1118 | enum ttm_bo_type type, |
1036 | uint32_t flags, | 1119 | struct ttm_placement *placement, |
1037 | uint32_t page_alignment, | 1120 | uint32_t page_alignment, |
1038 | unsigned long buffer_start, | 1121 | unsigned long buffer_start, |
1039 | bool interruptible, | 1122 | bool interruptible, |
1040 | struct file *persistant_swap_storage, | 1123 | struct file *persistant_swap_storage, |
1041 | size_t acc_size, | 1124 | size_t acc_size, |
1042 | void (*destroy) (struct ttm_buffer_object *)) | 1125 | void (*destroy) (struct ttm_buffer_object *)) |
1043 | { | 1126 | { |
1044 | int ret = 0; | 1127 | int ret = 0; |
1045 | unsigned long num_pages; | 1128 | unsigned long num_pages; |
@@ -1065,6 +1148,7 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev, | |||
1065 | bo->glob = bdev->glob; | 1148 | bo->glob = bdev->glob; |
1066 | bo->type = type; | 1149 | bo->type = type; |
1067 | bo->num_pages = num_pages; | 1150 | bo->num_pages = num_pages; |
1151 | bo->mem.size = num_pages << PAGE_SHIFT; | ||
1068 | bo->mem.mem_type = TTM_PL_SYSTEM; | 1152 | bo->mem.mem_type = TTM_PL_SYSTEM; |
1069 | bo->mem.num_pages = bo->num_pages; | 1153 | bo->mem.num_pages = bo->num_pages; |
1070 | bo->mem.mm_node = NULL; | 1154 | bo->mem.mm_node = NULL; |
@@ -1077,29 +1161,21 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev, | |||
1077 | bo->acc_size = acc_size; | 1161 | bo->acc_size = acc_size; |
1078 | atomic_inc(&bo->glob->bo_count); | 1162 | atomic_inc(&bo->glob->bo_count); |
1079 | 1163 | ||
1080 | ret = ttm_bo_check_placement(bo, flags, 0ULL); | 1164 | ret = ttm_bo_check_placement(bo, placement); |
1081 | if (unlikely(ret != 0)) | 1165 | if (unlikely(ret != 0)) |
1082 | goto out_err; | 1166 | goto out_err; |
1083 | 1167 | ||
1084 | /* | 1168 | /* |
1085 | * If no caching attributes are set, accept any form of caching. | ||
1086 | */ | ||
1087 | |||
1088 | if ((flags & TTM_PL_MASK_CACHING) == 0) | ||
1089 | flags |= TTM_PL_MASK_CACHING; | ||
1090 | |||
1091 | /* | ||
1092 | * For ttm_bo_type_device buffers, allocate | 1169 | * For ttm_bo_type_device buffers, allocate |
1093 | * address space from the device. | 1170 | * address space from the device. |
1094 | */ | 1171 | */ |
1095 | |||
1096 | if (bo->type == ttm_bo_type_device) { | 1172 | if (bo->type == ttm_bo_type_device) { |
1097 | ret = ttm_bo_setup_vm(bo); | 1173 | ret = ttm_bo_setup_vm(bo); |
1098 | if (ret) | 1174 | if (ret) |
1099 | goto out_err; | 1175 | goto out_err; |
1100 | } | 1176 | } |
1101 | 1177 | ||
1102 | ret = ttm_buffer_object_validate(bo, flags, interruptible, false); | 1178 | ret = ttm_bo_validate(bo, placement, interruptible, false); |
1103 | if (ret) | 1179 | if (ret) |
1104 | goto out_err; | 1180 | goto out_err; |
1105 | 1181 | ||
@@ -1112,7 +1188,7 @@ out_err: | |||
1112 | 1188 | ||
1113 | return ret; | 1189 | return ret; |
1114 | } | 1190 | } |
1115 | EXPORT_SYMBOL(ttm_buffer_object_init); | 1191 | EXPORT_SYMBOL(ttm_bo_init); |
1116 | 1192 | ||
1117 | static inline size_t ttm_bo_size(struct ttm_bo_global *glob, | 1193 | static inline size_t ttm_bo_size(struct ttm_bo_global *glob, |
1118 | unsigned long num_pages) | 1194 | unsigned long num_pages) |
@@ -1123,19 +1199,19 @@ static inline size_t ttm_bo_size(struct ttm_bo_global *glob, | |||
1123 | return glob->ttm_bo_size + 2 * page_array_size; | 1199 | return glob->ttm_bo_size + 2 * page_array_size; |
1124 | } | 1200 | } |
1125 | 1201 | ||
1126 | int ttm_buffer_object_create(struct ttm_bo_device *bdev, | 1202 | int ttm_bo_create(struct ttm_bo_device *bdev, |
1127 | unsigned long size, | 1203 | unsigned long size, |
1128 | enum ttm_bo_type type, | 1204 | enum ttm_bo_type type, |
1129 | uint32_t flags, | 1205 | struct ttm_placement *placement, |
1130 | uint32_t page_alignment, | 1206 | uint32_t page_alignment, |
1131 | unsigned long buffer_start, | 1207 | unsigned long buffer_start, |
1132 | bool interruptible, | 1208 | bool interruptible, |
1133 | struct file *persistant_swap_storage, | 1209 | struct file *persistant_swap_storage, |
1134 | struct ttm_buffer_object **p_bo) | 1210 | struct ttm_buffer_object **p_bo) |
1135 | { | 1211 | { |
1136 | struct ttm_buffer_object *bo; | 1212 | struct ttm_buffer_object *bo; |
1137 | int ret; | ||
1138 | struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; | 1213 | struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; |
1214 | int ret; | ||
1139 | 1215 | ||
1140 | size_t acc_size = | 1216 | size_t acc_size = |
1141 | ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); | 1217 | ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); |
@@ -1150,76 +1226,41 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev, | |||
1150 | return -ENOMEM; | 1226 | return -ENOMEM; |
1151 | } | 1227 | } |
1152 | 1228 | ||
1153 | ret = ttm_buffer_object_init(bdev, bo, size, type, flags, | 1229 | ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, |
1154 | page_alignment, buffer_start, | 1230 | buffer_start, interruptible, |
1155 | interruptible, | 1231 | persistant_swap_storage, acc_size, NULL); |
1156 | persistant_swap_storage, acc_size, NULL); | ||
1157 | if (likely(ret == 0)) | 1232 | if (likely(ret == 0)) |
1158 | *p_bo = bo; | 1233 | *p_bo = bo; |
1159 | 1234 | ||
1160 | return ret; | 1235 | return ret; |
1161 | } | 1236 | } |
1162 | 1237 | ||
1163 | static int ttm_bo_leave_list(struct ttm_buffer_object *bo, | ||
1164 | uint32_t mem_type, bool allow_errors) | ||
1165 | { | ||
1166 | int ret; | ||
1167 | |||
1168 | spin_lock(&bo->lock); | ||
1169 | ret = ttm_bo_wait(bo, false, false, false); | ||
1170 | spin_unlock(&bo->lock); | ||
1171 | |||
1172 | if (ret && allow_errors) | ||
1173 | goto out; | ||
1174 | |||
1175 | if (bo->mem.mem_type == mem_type) | ||
1176 | ret = ttm_bo_evict(bo, mem_type, false, false); | ||
1177 | |||
1178 | if (ret) { | ||
1179 | if (allow_errors) { | ||
1180 | goto out; | ||
1181 | } else { | ||
1182 | ret = 0; | ||
1183 | printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n"); | ||
1184 | } | ||
1185 | } | ||
1186 | |||
1187 | out: | ||
1188 | return ret; | ||
1189 | } | ||
1190 | |||
1191 | static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, | 1238 | static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, |
1192 | struct list_head *head, | 1239 | unsigned mem_type, bool allow_errors) |
1193 | unsigned mem_type, bool allow_errors) | ||
1194 | { | 1240 | { |
1241 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; | ||
1195 | struct ttm_bo_global *glob = bdev->glob; | 1242 | struct ttm_bo_global *glob = bdev->glob; |
1196 | struct ttm_buffer_object *entry; | ||
1197 | int ret; | 1243 | int ret; |
1198 | int put_count; | ||
1199 | 1244 | ||
1200 | /* | 1245 | /* |
1201 | * Can't use standard list traversal since we're unlocking. | 1246 | * Can't use standard list traversal since we're unlocking. |
1202 | */ | 1247 | */ |
1203 | 1248 | ||
1204 | spin_lock(&glob->lru_lock); | 1249 | spin_lock(&glob->lru_lock); |
1205 | 1250 | while (!list_empty(&man->lru)) { | |
1206 | while (!list_empty(head)) { | ||
1207 | entry = list_first_entry(head, struct ttm_buffer_object, lru); | ||
1208 | kref_get(&entry->list_kref); | ||
1209 | ret = ttm_bo_reserve_locked(entry, false, false, false, 0); | ||
1210 | put_count = ttm_bo_del_from_lru(entry); | ||
1211 | spin_unlock(&glob->lru_lock); | 1251 | spin_unlock(&glob->lru_lock); |
1212 | while (put_count--) | 1252 | ret = ttm_mem_evict_first(bdev, mem_type, false, false); |
1213 | kref_put(&entry->list_kref, ttm_bo_ref_bug); | 1253 | if (ret) { |
1214 | BUG_ON(ret); | 1254 | if (allow_errors) { |
1215 | ret = ttm_bo_leave_list(entry, mem_type, allow_errors); | 1255 | return ret; |
1216 | ttm_bo_unreserve(entry); | 1256 | } else { |
1217 | kref_put(&entry->list_kref, ttm_bo_release_list); | 1257 | printk(KERN_ERR TTM_PFX |
1258 | "Cleanup eviction failed\n"); | ||
1259 | } | ||
1260 | } | ||
1218 | spin_lock(&glob->lru_lock); | 1261 | spin_lock(&glob->lru_lock); |
1219 | } | 1262 | } |
1220 | |||
1221 | spin_unlock(&glob->lru_lock); | 1263 | spin_unlock(&glob->lru_lock); |
1222 | |||
1223 | return 0; | 1264 | return 0; |
1224 | } | 1265 | } |
1225 | 1266 | ||
@@ -1246,7 +1287,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) | |||
1246 | 1287 | ||
1247 | ret = 0; | 1288 | ret = 0; |
1248 | if (mem_type > 0) { | 1289 | if (mem_type > 0) { |
1249 | ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false); | 1290 | ttm_bo_force_list_clean(bdev, mem_type, false); |
1250 | 1291 | ||
1251 | spin_lock(&glob->lru_lock); | 1292 | spin_lock(&glob->lru_lock); |
1252 | if (drm_mm_clean(&man->manager)) | 1293 | if (drm_mm_clean(&man->manager)) |
@@ -1279,12 +1320,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) | |||
1279 | return 0; | 1320 | return 0; |
1280 | } | 1321 | } |
1281 | 1322 | ||
1282 | return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true); | 1323 | return ttm_bo_force_list_clean(bdev, mem_type, true); |
1283 | } | 1324 | } |
1284 | EXPORT_SYMBOL(ttm_bo_evict_mm); | 1325 | EXPORT_SYMBOL(ttm_bo_evict_mm); |
1285 | 1326 | ||
1286 | int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, | 1327 | int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, |
1287 | unsigned long p_offset, unsigned long p_size) | 1328 | unsigned long p_size) |
1288 | { | 1329 | { |
1289 | int ret = -EINVAL; | 1330 | int ret = -EINVAL; |
1290 | struct ttm_mem_type_manager *man; | 1331 | struct ttm_mem_type_manager *man; |
@@ -1314,7 +1355,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, | |||
1314 | type); | 1355 | type); |
1315 | return ret; | 1356 | return ret; |
1316 | } | 1357 | } |
1317 | ret = drm_mm_init(&man->manager, p_offset, p_size); | 1358 | ret = drm_mm_init(&man->manager, 0, p_size); |
1318 | if (ret) | 1359 | if (ret) |
1319 | return ret; | 1360 | return ret; |
1320 | } | 1361 | } |
@@ -1384,8 +1425,8 @@ int ttm_bo_global_init(struct ttm_global_reference *ref) | |||
1384 | 1425 | ||
1385 | atomic_set(&glob->bo_count, 0); | 1426 | atomic_set(&glob->bo_count, 0); |
1386 | 1427 | ||
1387 | kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type); | 1428 | ret = kobject_init_and_add( |
1388 | ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects"); | 1429 | &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects"); |
1389 | if (unlikely(ret != 0)) | 1430 | if (unlikely(ret != 0)) |
1390 | kobject_put(&glob->kobj); | 1431 | kobject_put(&glob->kobj); |
1391 | return ret; | 1432 | return ret; |
@@ -1463,7 +1504,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, | |||
1463 | * Initialize the system memory buffer type. | 1504 | * Initialize the system memory buffer type. |
1464 | * Other types need to be driver / IOCTL initialized. | 1505 | * Other types need to be driver / IOCTL initialized. |
1465 | */ | 1506 | */ |
1466 | ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0); | 1507 | ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0); |
1467 | if (unlikely(ret != 0)) | 1508 | if (unlikely(ret != 0)) |
1468 | goto out_no_sys; | 1509 | goto out_no_sys; |
1469 | 1510 | ||
@@ -1675,40 +1716,12 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, | |||
1675 | } | 1716 | } |
1676 | EXPORT_SYMBOL(ttm_bo_wait); | 1717 | EXPORT_SYMBOL(ttm_bo_wait); |
1677 | 1718 | ||
1678 | void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo) | ||
1679 | { | ||
1680 | atomic_set(&bo->reserved, 0); | ||
1681 | wake_up_all(&bo->event_queue); | ||
1682 | } | ||
1683 | |||
1684 | int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible, | ||
1685 | bool no_wait) | ||
1686 | { | ||
1687 | int ret; | ||
1688 | |||
1689 | while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { | ||
1690 | if (no_wait) | ||
1691 | return -EBUSY; | ||
1692 | else if (interruptible) { | ||
1693 | ret = wait_event_interruptible | ||
1694 | (bo->event_queue, atomic_read(&bo->reserved) == 0); | ||
1695 | if (unlikely(ret != 0)) | ||
1696 | return -ERESTART; | ||
1697 | } else { | ||
1698 | wait_event(bo->event_queue, | ||
1699 | atomic_read(&bo->reserved) == 0); | ||
1700 | } | ||
1701 | } | ||
1702 | return 0; | ||
1703 | } | ||
1704 | |||
1705 | int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) | 1719 | int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) |
1706 | { | 1720 | { |
1707 | int ret = 0; | 1721 | int ret = 0; |
1708 | 1722 | ||
1709 | /* | 1723 | /* |
1710 | * Using ttm_bo_reserve instead of ttm_bo_block_reservation | 1724 | * Using ttm_bo_reserve makes sure the lru lists are updated. |
1711 | * makes sure the lru lists are updated. | ||
1712 | */ | 1725 | */ |
1713 | 1726 | ||
1714 | ret = ttm_bo_reserve(bo, true, no_wait, false, 0); | 1727 | ret = ttm_bo_reserve(bo, true, no_wait, false, 0); |
@@ -1722,12 +1735,14 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) | |||
1722 | ttm_bo_unreserve(bo); | 1735 | ttm_bo_unreserve(bo); |
1723 | return ret; | 1736 | return ret; |
1724 | } | 1737 | } |
1738 | EXPORT_SYMBOL(ttm_bo_synccpu_write_grab); | ||
1725 | 1739 | ||
1726 | void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) | 1740 | void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) |
1727 | { | 1741 | { |
1728 | if (atomic_dec_and_test(&bo->cpu_writers)) | 1742 | if (atomic_dec_and_test(&bo->cpu_writers)) |
1729 | wake_up_all(&bo->event_queue); | 1743 | wake_up_all(&bo->event_queue); |
1730 | } | 1744 | } |
1745 | EXPORT_SYMBOL(ttm_bo_synccpu_write_release); | ||
1731 | 1746 | ||
1732 | /** | 1747 | /** |
1733 | * A buffer object shrink method that tries to swap out the first | 1748 | * A buffer object shrink method that tries to swap out the first |
@@ -1808,6 +1823,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) | |||
1808 | * anyone tries to access a ttm page. | 1823 | * anyone tries to access a ttm page. |
1809 | */ | 1824 | */ |
1810 | 1825 | ||
1826 | if (bo->bdev->driver->swap_notify) | ||
1827 | bo->bdev->driver->swap_notify(bo); | ||
1828 | |||
1811 | ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage); | 1829 | ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage); |
1812 | out: | 1830 | out: |
1813 | 1831 | ||
@@ -1828,3 +1846,4 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev) | |||
1828 | while (ttm_bo_swapout(&bdev->glob->shrink) == 0) | 1846 | while (ttm_bo_swapout(&bdev->glob->shrink) == 0) |
1829 | ; | 1847 | ; |
1830 | } | 1848 | } |
1849 | EXPORT_SYMBOL(ttm_bo_swapout_all); | ||