aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /drivers/gpu/drm/ttm
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/Makefile3
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c781
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c13
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c117
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c310
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c37
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c452
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c78
10 files changed, 1350 insertions, 451 deletions
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b0a9de7a57c2..1e138f5bae09 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -3,6 +3,7 @@
3 3
4ccflags-y := -Iinclude/drm 4ccflags-y := -Iinclude/drm
5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ 5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o 6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
7 ttm_object.o ttm_lock.o ttm_execbuf_util.o
7 8
8obj-$(CONFIG_DRM_TTM) += ttm.o 9obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 4648ed2f0143..4bf69c404491 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -35,6 +35,7 @@
35#include "ttm/ttm_placement.h" 35#include "ttm/ttm_placement.h"
36#include <linux/agp_backend.h> 36#include <linux/agp_backend.h>
37#include <linux/module.h> 37#include <linux/module.h>
38#include <linux/slab.h>
38#include <linux/io.h> 39#include <linux/io.h>
39#include <asm/agp.h> 40#include <asm/agp.h>
40 41
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 87c06252d464..0e3754a3a303 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -27,6 +27,14 @@
27/* 27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */ 29 */
30/* Notes:
31 *
32 * We store bo pointer in drm_mm_node struct so we know which bo own a
33 * specific node. There is no protection on the pointer, thus to make
34 * sure things don't go berserk you have to access this pointer while
35 * holding the global lru lock and make sure anytime you free a node you
36 * reset the pointer to NULL.
37 */
30 38
31#include "ttm/ttm_module.h" 39#include "ttm/ttm_module.h"
32#include "ttm/ttm_bo_driver.h" 40#include "ttm/ttm_bo_driver.h"
@@ -51,6 +59,59 @@ static struct attribute ttm_bo_count = {
51 .mode = S_IRUGO 59 .mode = S_IRUGO
52}; 60};
53 61
62static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
63{
64 int i;
65
66 for (i = 0; i <= TTM_PL_PRIV5; i++)
67 if (flags & (1 << i)) {
68 *mem_type = i;
69 return 0;
70 }
71 return -EINVAL;
72}
73
74static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
75{
76 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
77
78 printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
79 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
80 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
81 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
82 printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
83 printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
84 printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
85 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
86 man->available_caching);
87 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
88 man->default_caching);
89 if (mem_type != TTM_PL_SYSTEM) {
90 spin_lock(&bdev->glob->lru_lock);
91 drm_mm_debug_table(&man->manager, TTM_PFX);
92 spin_unlock(&bdev->glob->lru_lock);
93 }
94}
95
96static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
97 struct ttm_placement *placement)
98{
99 int i, ret, mem_type;
100
101 printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
102 bo, bo->mem.num_pages, bo->mem.size >> 10,
103 bo->mem.size >> 20);
104 for (i = 0; i < placement->num_placement; i++) {
105 ret = ttm_mem_type_from_flags(placement->placement[i],
106 &mem_type);
107 if (ret)
108 return;
109 printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
110 i, placement->placement[i], mem_type);
111 ttm_mem_type_debug(bo->bdev, mem_type);
112 }
113}
114
54static ssize_t ttm_bo_global_show(struct kobject *kobj, 115static ssize_t ttm_bo_global_show(struct kobject *kobj,
55 struct attribute *attr, 116 struct attribute *attr,
56 char *buffer) 117 char *buffer)
@@ -67,7 +128,7 @@ static struct attribute *ttm_bo_global_attrs[] = {
67 NULL 128 NULL
68}; 129};
69 130
70static struct sysfs_ops ttm_bo_global_ops = { 131static const struct sysfs_ops ttm_bo_global_ops = {
71 .show = &ttm_bo_global_show 132 .show = &ttm_bo_global_show
72}; 133};
73 134
@@ -117,12 +178,13 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
117 ret = wait_event_interruptible(bo->event_queue, 178 ret = wait_event_interruptible(bo->event_queue,
118 atomic_read(&bo->reserved) == 0); 179 atomic_read(&bo->reserved) == 0);
119 if (unlikely(ret != 0)) 180 if (unlikely(ret != 0))
120 return -ERESTART; 181 return ret;
121 } else { 182 } else {
122 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); 183 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
123 } 184 }
124 return 0; 185 return 0;
125} 186}
187EXPORT_SYMBOL(ttm_bo_wait_unreserved);
126 188
127static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) 189static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
128{ 190{
@@ -247,7 +309,6 @@ EXPORT_SYMBOL(ttm_bo_unreserve);
247/* 309/*
248 * Call bo->mutex locked. 310 * Call bo->mutex locked.
249 */ 311 */
250
251static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) 312static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
252{ 313{
253 struct ttm_bo_device *bdev = bo->bdev; 314 struct ttm_bo_device *bdev = bo->bdev;
@@ -275,9 +336,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
275 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 336 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
276 page_flags | TTM_PAGE_FLAG_USER, 337 page_flags | TTM_PAGE_FLAG_USER,
277 glob->dummy_read_page); 338 glob->dummy_read_page);
278 if (unlikely(bo->ttm == NULL)) 339 if (unlikely(bo->ttm == NULL)) {
279 ret = -ENOMEM; 340 ret = -ENOMEM;
280 break; 341 break;
342 }
281 343
282 ret = ttm_tt_set_user(bo->ttm, current, 344 ret = ttm_tt_set_user(bo->ttm, current,
283 bo->buffer_start, bo->num_pages); 345 bo->buffer_start, bo->num_pages);
@@ -328,14 +390,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
328 } 390 }
329 391
330 if (bo->mem.mem_type == TTM_PL_SYSTEM) { 392 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
331 393 bo->mem = *mem;
332 struct ttm_mem_reg *old_mem = &bo->mem;
333 uint32_t save_flags = old_mem->placement;
334
335 *old_mem = *mem;
336 mem->mm_node = NULL; 394 mem->mm_node = NULL;
337 ttm_flag_masked(&save_flags, mem->placement,
338 TTM_PL_MASK_MEMTYPE);
339 goto moved; 395 goto moved;
340 } 396 }
341 397
@@ -370,7 +426,8 @@ moved:
370 bdev->man[bo->mem.mem_type].gpu_offset; 426 bdev->man[bo->mem.mem_type].gpu_offset;
371 bo->cur_placement = bo->mem.placement; 427 bo->cur_placement = bo->mem.placement;
372 spin_unlock(&bo->lock); 428 spin_unlock(&bo->lock);
373 } 429 } else
430 bo->offset = 0;
374 431
375 return 0; 432 return 0;
376 433
@@ -408,6 +465,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
408 spin_unlock(&bo->lock); 465 spin_unlock(&bo->lock);
409 466
410 spin_lock(&glob->lru_lock); 467 spin_lock(&glob->lru_lock);
468 put_count = ttm_bo_del_from_lru(bo);
469
411 ret = ttm_bo_reserve_locked(bo, false, false, false, 0); 470 ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
412 BUG_ON(ret); 471 BUG_ON(ret);
413 if (bo->ttm) 472 if (bo->ttm)
@@ -415,19 +474,19 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
415 474
416 if (!list_empty(&bo->ddestroy)) { 475 if (!list_empty(&bo->ddestroy)) {
417 list_del_init(&bo->ddestroy); 476 list_del_init(&bo->ddestroy);
418 kref_put(&bo->list_kref, ttm_bo_ref_bug); 477 ++put_count;
419 } 478 }
420 if (bo->mem.mm_node) { 479 if (bo->mem.mm_node) {
480 bo->mem.mm_node->private = NULL;
421 drm_mm_put_block(bo->mem.mm_node); 481 drm_mm_put_block(bo->mem.mm_node);
422 bo->mem.mm_node = NULL; 482 bo->mem.mm_node = NULL;
423 } 483 }
424 put_count = ttm_bo_del_from_lru(bo);
425 spin_unlock(&glob->lru_lock); 484 spin_unlock(&glob->lru_lock);
426 485
427 atomic_set(&bo->reserved, 0); 486 atomic_set(&bo->reserved, 0);
428 487
429 while (put_count--) 488 while (put_count--)
430 kref_put(&bo->list_kref, ttm_bo_release_list); 489 kref_put(&bo->list_kref, ttm_bo_ref_bug);
431 490
432 return 0; 491 return 0;
433 } 492 }
@@ -465,52 +524,44 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
465static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) 524static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
466{ 525{
467 struct ttm_bo_global *glob = bdev->glob; 526 struct ttm_bo_global *glob = bdev->glob;
468 struct ttm_buffer_object *entry, *nentry; 527 struct ttm_buffer_object *entry = NULL;
469 struct list_head *list, *next; 528 int ret = 0;
470 int ret;
471 529
472 spin_lock(&glob->lru_lock); 530 spin_lock(&glob->lru_lock);
473 list_for_each_safe(list, next, &bdev->ddestroy) { 531 if (list_empty(&bdev->ddestroy))
474 entry = list_entry(list, struct ttm_buffer_object, ddestroy); 532 goto out_unlock;
475 nentry = NULL;
476 533
477 /* 534 entry = list_first_entry(&bdev->ddestroy,
478 * Protect the next list entry from destruction while we 535 struct ttm_buffer_object, ddestroy);
479 * unlock the lru_lock. 536 kref_get(&entry->list_kref);
480 */
481 537
482 if (next != &bdev->ddestroy) { 538 for (;;) {
483 nentry = list_entry(next, struct ttm_buffer_object, 539 struct ttm_buffer_object *nentry = NULL;
484 ddestroy); 540
541 if (entry->ddestroy.next != &bdev->ddestroy) {
542 nentry = list_first_entry(&entry->ddestroy,
543 struct ttm_buffer_object, ddestroy);
485 kref_get(&nentry->list_kref); 544 kref_get(&nentry->list_kref);
486 } 545 }
487 kref_get(&entry->list_kref);
488 546
489 spin_unlock(&glob->lru_lock); 547 spin_unlock(&glob->lru_lock);
490 ret = ttm_bo_cleanup_refs(entry, remove_all); 548 ret = ttm_bo_cleanup_refs(entry, remove_all);
491 kref_put(&entry->list_kref, ttm_bo_release_list); 549 kref_put(&entry->list_kref, ttm_bo_release_list);
550 entry = nentry;
551
552 if (ret || !entry)
553 goto out;
492 554
493 spin_lock(&glob->lru_lock); 555 spin_lock(&glob->lru_lock);
494 if (nentry) { 556 if (list_empty(&entry->ddestroy))
495 bool next_onlist = !list_empty(next);
496 spin_unlock(&glob->lru_lock);
497 kref_put(&nentry->list_kref, ttm_bo_release_list);
498 spin_lock(&glob->lru_lock);
499 /*
500 * Someone might have raced us and removed the
501 * next entry from the list. We don't bother restarting
502 * list traversal.
503 */
504
505 if (!next_onlist)
506 break;
507 }
508 if (ret)
509 break; 557 break;
510 } 558 }
511 ret = !list_empty(&bdev->ddestroy);
512 spin_unlock(&glob->lru_lock);
513 559
560out_unlock:
561 spin_unlock(&glob->lru_lock);
562out:
563 if (entry)
564 kref_put(&entry->list_kref, ttm_bo_release_list);
514 return ret; 565 return ret;
515} 566}
516 567
@@ -554,24 +605,21 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
554} 605}
555EXPORT_SYMBOL(ttm_bo_unref); 606EXPORT_SYMBOL(ttm_bo_unref);
556 607
557static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, 608static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
558 bool interruptible, bool no_wait) 609 bool no_wait)
559{ 610{
560 int ret = 0;
561 struct ttm_bo_device *bdev = bo->bdev; 611 struct ttm_bo_device *bdev = bo->bdev;
562 struct ttm_bo_global *glob = bo->glob; 612 struct ttm_bo_global *glob = bo->glob;
563 struct ttm_mem_reg evict_mem; 613 struct ttm_mem_reg evict_mem;
564 uint32_t proposed_placement; 614 struct ttm_placement placement;
565 615 int ret = 0;
566 if (bo->mem.mem_type != mem_type)
567 goto out;
568 616
569 spin_lock(&bo->lock); 617 spin_lock(&bo->lock);
570 ret = ttm_bo_wait(bo, false, interruptible, no_wait); 618 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
571 spin_unlock(&bo->lock); 619 spin_unlock(&bo->lock);
572 620
573 if (unlikely(ret != 0)) { 621 if (unlikely(ret != 0)) {
574 if (ret != -ERESTART) { 622 if (ret != -ERESTARTSYS) {
575 printk(KERN_ERR TTM_PFX 623 printk(KERN_ERR TTM_PFX
576 "Failed to expire sync object before " 624 "Failed to expire sync object before "
577 "buffer eviction.\n"); 625 "buffer eviction.\n");
@@ -584,116 +632,165 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
584 evict_mem = bo->mem; 632 evict_mem = bo->mem;
585 evict_mem.mm_node = NULL; 633 evict_mem.mm_node = NULL;
586 634
587 proposed_placement = bdev->driver->evict_flags(bo); 635 placement.fpfn = 0;
588 636 placement.lpfn = 0;
589 ret = ttm_bo_mem_space(bo, proposed_placement, 637 placement.num_placement = 0;
590 &evict_mem, interruptible, no_wait); 638 placement.num_busy_placement = 0;
591 if (unlikely(ret != 0 && ret != -ERESTART)) 639 bdev->driver->evict_flags(bo, &placement);
592 ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM, 640 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
593 &evict_mem, interruptible, no_wait); 641 no_wait);
594
595 if (ret) { 642 if (ret) {
596 if (ret != -ERESTART) 643 if (ret != -ERESTARTSYS) {
597 printk(KERN_ERR TTM_PFX 644 printk(KERN_ERR TTM_PFX
598 "Failed to find memory space for " 645 "Failed to find memory space for "
599 "buffer 0x%p eviction.\n", bo); 646 "buffer 0x%p eviction.\n", bo);
647 ttm_bo_mem_space_debug(bo, &placement);
648 }
600 goto out; 649 goto out;
601 } 650 }
602 651
603 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 652 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
604 no_wait); 653 no_wait);
605 if (ret) { 654 if (ret) {
606 if (ret != -ERESTART) 655 if (ret != -ERESTARTSYS)
607 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); 656 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
657 spin_lock(&glob->lru_lock);
658 if (evict_mem.mm_node) {
659 evict_mem.mm_node->private = NULL;
660 drm_mm_put_block(evict_mem.mm_node);
661 evict_mem.mm_node = NULL;
662 }
663 spin_unlock(&glob->lru_lock);
608 goto out; 664 goto out;
609 } 665 }
610
611 spin_lock(&glob->lru_lock);
612 if (evict_mem.mm_node) {
613 drm_mm_put_block(evict_mem.mm_node);
614 evict_mem.mm_node = NULL;
615 }
616 spin_unlock(&glob->lru_lock);
617 bo->evicted = true; 666 bo->evicted = true;
618out: 667out:
619 return ret; 668 return ret;
620} 669}
621 670
622/** 671static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
623 * Repeatedly evict memory from the LRU for @mem_type until we create enough 672 uint32_t mem_type,
624 * space, or we've evicted everything and there isn't enough space. 673 bool interruptible, bool no_wait)
625 */
626static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
627 struct ttm_mem_reg *mem,
628 uint32_t mem_type,
629 bool interruptible, bool no_wait)
630{ 674{
631 struct ttm_bo_global *glob = bdev->glob; 675 struct ttm_bo_global *glob = bdev->glob;
632 struct drm_mm_node *node;
633 struct ttm_buffer_object *entry;
634 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 676 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
635 struct list_head *lru; 677 struct ttm_buffer_object *bo;
636 unsigned long num_pages = mem->num_pages; 678 int ret, put_count = 0;
637 int put_count = 0;
638 int ret;
639
640retry_pre_get:
641 ret = drm_mm_pre_get(&man->manager);
642 if (unlikely(ret != 0))
643 return ret;
644 679
680retry:
645 spin_lock(&glob->lru_lock); 681 spin_lock(&glob->lru_lock);
646 do { 682 if (list_empty(&man->lru)) {
647 node = drm_mm_search_free(&man->manager, num_pages, 683 spin_unlock(&glob->lru_lock);
648 mem->page_alignment, 1); 684 return -EBUSY;
649 if (node) 685 }
650 break;
651 686
652 lru = &man->lru; 687 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
653 if (list_empty(lru)) 688 kref_get(&bo->list_kref);
654 break;
655 689
656 entry = list_first_entry(lru, struct ttm_buffer_object, lru); 690 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
657 kref_get(&entry->list_kref);
658 691
659 ret = 692 if (unlikely(ret == -EBUSY)) {
660 ttm_bo_reserve_locked(entry, interruptible, no_wait, 693 spin_unlock(&glob->lru_lock);
661 false, 0); 694 if (likely(!no_wait))
695 ret = ttm_bo_wait_unreserved(bo, interruptible);
662 696
663 if (likely(ret == 0)) 697 kref_put(&bo->list_kref, ttm_bo_release_list);
664 put_count = ttm_bo_del_from_lru(entry);
665 698
666 spin_unlock(&glob->lru_lock); 699 /**
700 * We *need* to retry after releasing the lru lock.
701 */
667 702
668 if (unlikely(ret != 0)) 703 if (unlikely(ret != 0))
669 return ret; 704 return ret;
705 goto retry;
706 }
670 707
671 while (put_count--) 708 put_count = ttm_bo_del_from_lru(bo);
672 kref_put(&entry->list_kref, ttm_bo_ref_bug); 709 spin_unlock(&glob->lru_lock);
673 710
674 ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait); 711 BUG_ON(ret != 0);
675 712
676 ttm_bo_unreserve(entry); 713 while (put_count--)
714 kref_put(&bo->list_kref, ttm_bo_ref_bug);
677 715
678 kref_put(&entry->list_kref, ttm_bo_release_list); 716 ret = ttm_bo_evict(bo, interruptible, no_wait);
679 if (ret) 717 ttm_bo_unreserve(bo);
718
719 kref_put(&bo->list_kref, ttm_bo_release_list);
720 return ret;
721}
722
723static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
724 struct ttm_mem_type_manager *man,
725 struct ttm_placement *placement,
726 struct ttm_mem_reg *mem,
727 struct drm_mm_node **node)
728{
729 struct ttm_bo_global *glob = bo->glob;
730 unsigned long lpfn;
731 int ret;
732
733 lpfn = placement->lpfn;
734 if (!lpfn)
735 lpfn = man->size;
736 *node = NULL;
737 do {
738 ret = drm_mm_pre_get(&man->manager);
739 if (unlikely(ret))
680 return ret; 740 return ret;
681 741
682 spin_lock(&glob->lru_lock); 742 spin_lock(&glob->lru_lock);
683 } while (1); 743 *node = drm_mm_search_free_in_range(&man->manager,
684 744 mem->num_pages, mem->page_alignment,
685 if (!node) { 745 placement->fpfn, lpfn, 1);
746 if (unlikely(*node == NULL)) {
747 spin_unlock(&glob->lru_lock);
748 return 0;
749 }
750 *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
751 mem->page_alignment,
752 placement->fpfn,
753 lpfn);
686 spin_unlock(&glob->lru_lock); 754 spin_unlock(&glob->lru_lock);
687 return -ENOMEM; 755 } while (*node == NULL);
688 } 756 return 0;
757}
689 758
690 node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment); 759/**
691 if (unlikely(!node)) { 760 * Repeatedly evict memory from the LRU for @mem_type until we create enough
692 spin_unlock(&glob->lru_lock); 761 * space, or we've evicted everything and there isn't enough space.
693 goto retry_pre_get; 762 */
694 } 763static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
764 uint32_t mem_type,
765 struct ttm_placement *placement,
766 struct ttm_mem_reg *mem,
767 bool interruptible, bool no_wait)
768{
769 struct ttm_bo_device *bdev = bo->bdev;
770 struct ttm_bo_global *glob = bdev->glob;
771 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
772 struct drm_mm_node *node;
773 int ret;
695 774
696 spin_unlock(&glob->lru_lock); 775 do {
776 ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
777 if (unlikely(ret != 0))
778 return ret;
779 if (node)
780 break;
781 spin_lock(&glob->lru_lock);
782 if (list_empty(&man->lru)) {
783 spin_unlock(&glob->lru_lock);
784 break;
785 }
786 spin_unlock(&glob->lru_lock);
787 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
788 no_wait);
789 if (unlikely(ret != 0))
790 return ret;
791 } while (1);
792 if (node == NULL)
793 return -ENOMEM;
697 mem->mm_node = node; 794 mem->mm_node = node;
698 mem->mem_type = mem_type; 795 mem->mem_type = mem_type;
699 return 0; 796 return 0;
@@ -724,7 +821,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
724 return result; 821 return result;
725} 822}
726 823
727
728static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 824static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
729 bool disallow_fixed, 825 bool disallow_fixed,
730 uint32_t mem_type, 826 uint32_t mem_type,
@@ -757,66 +853,55 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
757 * space. 853 * space.
758 */ 854 */
759int ttm_bo_mem_space(struct ttm_buffer_object *bo, 855int ttm_bo_mem_space(struct ttm_buffer_object *bo,
760 uint32_t proposed_placement, 856 struct ttm_placement *placement,
761 struct ttm_mem_reg *mem, 857 struct ttm_mem_reg *mem,
762 bool interruptible, bool no_wait) 858 bool interruptible, bool no_wait)
763{ 859{
764 struct ttm_bo_device *bdev = bo->bdev; 860 struct ttm_bo_device *bdev = bo->bdev;
765 struct ttm_bo_global *glob = bo->glob;
766 struct ttm_mem_type_manager *man; 861 struct ttm_mem_type_manager *man;
767
768 uint32_t num_prios = bdev->driver->num_mem_type_prio;
769 const uint32_t *prios = bdev->driver->mem_type_prio;
770 uint32_t i;
771 uint32_t mem_type = TTM_PL_SYSTEM; 862 uint32_t mem_type = TTM_PL_SYSTEM;
772 uint32_t cur_flags = 0; 863 uint32_t cur_flags = 0;
773 bool type_found = false; 864 bool type_found = false;
774 bool type_ok = false; 865 bool type_ok = false;
775 bool has_eagain = false; 866 bool has_erestartsys = false;
776 struct drm_mm_node *node = NULL; 867 struct drm_mm_node *node = NULL;
777 int ret; 868 int i, ret;
778 869
779 mem->mm_node = NULL; 870 mem->mm_node = NULL;
780 for (i = 0; i < num_prios; ++i) { 871 for (i = 0; i < placement->num_placement; ++i) {
781 mem_type = prios[i]; 872 ret = ttm_mem_type_from_flags(placement->placement[i],
873 &mem_type);
874 if (ret)
875 return ret;
782 man = &bdev->man[mem_type]; 876 man = &bdev->man[mem_type];
783 877
784 type_ok = ttm_bo_mt_compatible(man, 878 type_ok = ttm_bo_mt_compatible(man,
785 bo->type == ttm_bo_type_user, 879 bo->type == ttm_bo_type_user,
786 mem_type, proposed_placement, 880 mem_type,
787 &cur_flags); 881 placement->placement[i],
882 &cur_flags);
788 883
789 if (!type_ok) 884 if (!type_ok)
790 continue; 885 continue;
791 886
792 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 887 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
793 cur_flags); 888 cur_flags);
889 /*
890 * Use the access and other non-mapping-related flag bits from
891 * the memory placement flags to the current flags
892 */
893 ttm_flag_masked(&cur_flags, placement->placement[i],
894 ~TTM_PL_MASK_MEMTYPE);
794 895
795 if (mem_type == TTM_PL_SYSTEM) 896 if (mem_type == TTM_PL_SYSTEM)
796 break; 897 break;
797 898
798 if (man->has_type && man->use_type) { 899 if (man->has_type && man->use_type) {
799 type_found = true; 900 type_found = true;
800 do { 901 ret = ttm_bo_man_get_node(bo, man, placement, mem,
801 ret = drm_mm_pre_get(&man->manager); 902 &node);
802 if (unlikely(ret)) 903 if (unlikely(ret))
803 return ret; 904 return ret;
804
805 spin_lock(&glob->lru_lock);
806 node = drm_mm_search_free(&man->manager,
807 mem->num_pages,
808 mem->page_alignment,
809 1);
810 if (unlikely(!node)) {
811 spin_unlock(&glob->lru_lock);
812 break;
813 }
814 node = drm_mm_get_block_atomic(node,
815 mem->num_pages,
816 mem->
817 page_alignment);
818 spin_unlock(&glob->lru_lock);
819 } while (!node);
820 } 905 }
821 if (node) 906 if (node)
822 break; 907 break;
@@ -826,67 +911,74 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
826 mem->mm_node = node; 911 mem->mm_node = node;
827 mem->mem_type = mem_type; 912 mem->mem_type = mem_type;
828 mem->placement = cur_flags; 913 mem->placement = cur_flags;
914 if (node)
915 node->private = bo;
829 return 0; 916 return 0;
830 } 917 }
831 918
832 if (!type_found) 919 if (!type_found)
833 return -EINVAL; 920 return -EINVAL;
834 921
835 num_prios = bdev->driver->num_mem_busy_prio; 922 for (i = 0; i < placement->num_busy_placement; ++i) {
836 prios = bdev->driver->mem_busy_prio; 923 ret = ttm_mem_type_from_flags(placement->busy_placement[i],
837 924 &mem_type);
838 for (i = 0; i < num_prios; ++i) { 925 if (ret)
839 mem_type = prios[i]; 926 return ret;
840 man = &bdev->man[mem_type]; 927 man = &bdev->man[mem_type];
841
842 if (!man->has_type) 928 if (!man->has_type)
843 continue; 929 continue;
844
845 if (!ttm_bo_mt_compatible(man, 930 if (!ttm_bo_mt_compatible(man,
846 bo->type == ttm_bo_type_user, 931 bo->type == ttm_bo_type_user,
847 mem_type, 932 mem_type,
848 proposed_placement, &cur_flags)) 933 placement->busy_placement[i],
934 &cur_flags))
849 continue; 935 continue;
850 936
851 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 937 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
852 cur_flags); 938 cur_flags);
939 /*
940 * Use the access and other non-mapping-related flag bits from
941 * the memory placement flags to the current flags
942 */
943 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
944 ~TTM_PL_MASK_MEMTYPE);
853 945
854 ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
855 interruptible, no_wait);
856 946
857 if (ret == 0 && mem->mm_node) { 947 if (mem_type == TTM_PL_SYSTEM) {
948 mem->mem_type = mem_type;
858 mem->placement = cur_flags; 949 mem->placement = cur_flags;
950 mem->mm_node = NULL;
859 return 0; 951 return 0;
860 } 952 }
861 953
862 if (ret == -ERESTART) 954 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
863 has_eagain = true; 955 interruptible, no_wait);
956 if (ret == 0 && mem->mm_node) {
957 mem->placement = cur_flags;
958 mem->mm_node->private = bo;
959 return 0;
960 }
961 if (ret == -ERESTARTSYS)
962 has_erestartsys = true;
864 } 963 }
865 964 ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
866 ret = (has_eagain) ? -ERESTART : -ENOMEM;
867 return ret; 965 return ret;
868} 966}
869EXPORT_SYMBOL(ttm_bo_mem_space); 967EXPORT_SYMBOL(ttm_bo_mem_space);
870 968
871int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait) 969int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
872{ 970{
873 int ret = 0;
874
875 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait) 971 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
876 return -EBUSY; 972 return -EBUSY;
877 973
878 ret = wait_event_interruptible(bo->event_queue, 974 return wait_event_interruptible(bo->event_queue,
879 atomic_read(&bo->cpu_writers) == 0); 975 atomic_read(&bo->cpu_writers) == 0);
880
881 if (ret == -ERESTARTSYS)
882 ret = -ERESTART;
883
884 return ret;
885} 976}
977EXPORT_SYMBOL(ttm_bo_wait_cpu);
886 978
887int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 979int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
888 uint32_t proposed_placement, 980 struct ttm_placement *placement,
889 bool interruptible, bool no_wait) 981 bool interruptible, bool no_wait)
890{ 982{
891 struct ttm_bo_global *glob = bo->glob; 983 struct ttm_bo_global *glob = bo->glob;
892 int ret = 0; 984 int ret = 0;
@@ -899,147 +991,138 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
899 * Have the driver move function wait for idle when necessary, 991 * Have the driver move function wait for idle when necessary,
900 * instead of doing it here. 992 * instead of doing it here.
901 */ 993 */
902
903 spin_lock(&bo->lock); 994 spin_lock(&bo->lock);
904 ret = ttm_bo_wait(bo, false, interruptible, no_wait); 995 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
905 spin_unlock(&bo->lock); 996 spin_unlock(&bo->lock);
906
907 if (ret) 997 if (ret)
908 return ret; 998 return ret;
909
910 mem.num_pages = bo->num_pages; 999 mem.num_pages = bo->num_pages;
911 mem.size = mem.num_pages << PAGE_SHIFT; 1000 mem.size = mem.num_pages << PAGE_SHIFT;
912 mem.page_alignment = bo->mem.page_alignment; 1001 mem.page_alignment = bo->mem.page_alignment;
913
914 /* 1002 /*
915 * Determine where to move the buffer. 1003 * Determine where to move the buffer.
916 */ 1004 */
917 1005 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
918 ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
919 interruptible, no_wait);
920 if (ret) 1006 if (ret)
921 goto out_unlock; 1007 goto out_unlock;
922
923 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); 1008 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
924
925out_unlock: 1009out_unlock:
926 if (ret && mem.mm_node) { 1010 if (ret && mem.mm_node) {
927 spin_lock(&glob->lru_lock); 1011 spin_lock(&glob->lru_lock);
1012 mem.mm_node->private = NULL;
928 drm_mm_put_block(mem.mm_node); 1013 drm_mm_put_block(mem.mm_node);
929 spin_unlock(&glob->lru_lock); 1014 spin_unlock(&glob->lru_lock);
930 } 1015 }
931 return ret; 1016 return ret;
932} 1017}
933 1018
934static int ttm_bo_mem_compat(uint32_t proposed_placement, 1019static int ttm_bo_mem_compat(struct ttm_placement *placement,
935 struct ttm_mem_reg *mem) 1020 struct ttm_mem_reg *mem)
936{ 1021{
937 if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0) 1022 int i;
938 return 0; 1023 struct drm_mm_node *node = mem->mm_node;
939 if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0) 1024
940 return 0; 1025 if (node && placement->lpfn != 0 &&
941 1026 (node->start < placement->fpfn ||
942 return 1; 1027 node->start + node->size > placement->lpfn))
1028 return -1;
1029
1030 for (i = 0; i < placement->num_placement; i++) {
1031 if ((placement->placement[i] & mem->placement &
1032 TTM_PL_MASK_CACHING) &&
1033 (placement->placement[i] & mem->placement &
1034 TTM_PL_MASK_MEM))
1035 return i;
1036 }
1037 return -1;
943} 1038}
944 1039
945int ttm_buffer_object_validate(struct ttm_buffer_object *bo, 1040int ttm_bo_validate(struct ttm_buffer_object *bo,
946 uint32_t proposed_placement, 1041 struct ttm_placement *placement,
947 bool interruptible, bool no_wait) 1042 bool interruptible, bool no_wait)
948{ 1043{
949 int ret; 1044 int ret;
950 1045
951 BUG_ON(!atomic_read(&bo->reserved)); 1046 BUG_ON(!atomic_read(&bo->reserved));
952 bo->proposed_placement = proposed_placement; 1047 /* Check that range is valid */
953 1048 if (placement->lpfn || placement->fpfn)
954 TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n", 1049 if (placement->fpfn > placement->lpfn ||
955 (unsigned long)proposed_placement, 1050 (placement->lpfn - placement->fpfn) < bo->num_pages)
956 (unsigned long)bo->mem.placement); 1051 return -EINVAL;
957
958 /* 1052 /*
959 * Check whether we need to move buffer. 1053 * Check whether we need to move buffer.
960 */ 1054 */
961 1055 ret = ttm_bo_mem_compat(placement, &bo->mem);
962 if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) { 1056 if (ret < 0) {
963 ret = ttm_bo_move_buffer(bo, bo->proposed_placement, 1057 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
964 interruptible, no_wait); 1058 if (ret)
965 if (ret) {
966 if (ret != -ERESTART)
967 printk(KERN_ERR TTM_PFX
968 "Failed moving buffer. "
969 "Proposed placement 0x%08x\n",
970 bo->proposed_placement);
971 if (ret == -ENOMEM)
972 printk(KERN_ERR TTM_PFX
973 "Out of aperture space or "
974 "DRM memory quota.\n");
975 return ret; 1059 return ret;
976 } 1060 } else {
1061 /*
1062 * Use the access and other non-mapping-related flag bits from
1063 * the compatible memory placement flags to the active flags
1064 */
1065 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1066 ~TTM_PL_MASK_MEMTYPE);
977 } 1067 }
978
979 /* 1068 /*
980 * We might need to add a TTM. 1069 * We might need to add a TTM.
981 */ 1070 */
982
983 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 1071 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
984 ret = ttm_bo_add_ttm(bo, true); 1072 ret = ttm_bo_add_ttm(bo, true);
985 if (ret) 1073 if (ret)
986 return ret; 1074 return ret;
987 } 1075 }
988 /*
989 * Validation has succeeded, move the access and other
990 * non-mapping-related flag bits from the proposed flags to
991 * the active flags
992 */
993
994 ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
995 ~TTM_PL_MASK_MEMTYPE);
996
997 return 0; 1076 return 0;
998} 1077}
999EXPORT_SYMBOL(ttm_buffer_object_validate); 1078EXPORT_SYMBOL(ttm_bo_validate);
1000 1079
1001int 1080int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1002ttm_bo_check_placement(struct ttm_buffer_object *bo, 1081 struct ttm_placement *placement)
1003 uint32_t set_flags, uint32_t clr_flags)
1004{ 1082{
1005 uint32_t new_mask = set_flags | clr_flags; 1083 int i;
1006 1084
1007 if ((bo->type == ttm_bo_type_user) && 1085 if (placement->fpfn || placement->lpfn) {
1008 (clr_flags & TTM_PL_FLAG_CACHED)) { 1086 if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
1009 printk(KERN_ERR TTM_PFX 1087 printk(KERN_ERR TTM_PFX "Page number range to small "
1010 "User buffers require cache-coherent memory.\n"); 1088 "Need %lu pages, range is [%u, %u]\n",
1011 return -EINVAL; 1089 bo->mem.num_pages, placement->fpfn,
1012 } 1090 placement->lpfn);
1013
1014 if (!capable(CAP_SYS_ADMIN)) {
1015 if (new_mask & TTM_PL_FLAG_NO_EVICT) {
1016 printk(KERN_ERR TTM_PFX "Need to be root to modify"
1017 " NO_EVICT status.\n");
1018 return -EINVAL; 1091 return -EINVAL;
1019 } 1092 }
1020 1093 }
1021 if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) && 1094 for (i = 0; i < placement->num_placement; i++) {
1022 (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 1095 if (!capable(CAP_SYS_ADMIN)) {
1023 printk(KERN_ERR TTM_PFX 1096 if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
1024 "Incompatible memory specification" 1097 printk(KERN_ERR TTM_PFX "Need to be root to "
1025 " for NO_EVICT buffer.\n"); 1098 "modify NO_EVICT status.\n");
1026 return -EINVAL; 1099 return -EINVAL;
1100 }
1101 }
1102 }
1103 for (i = 0; i < placement->num_busy_placement; i++) {
1104 if (!capable(CAP_SYS_ADMIN)) {
1105 if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
1106 printk(KERN_ERR TTM_PFX "Need to be root to "
1107 "modify NO_EVICT status.\n");
1108 return -EINVAL;
1109 }
1027 } 1110 }
1028 } 1111 }
1029 return 0; 1112 return 0;
1030} 1113}
1031 1114
1032int ttm_buffer_object_init(struct ttm_bo_device *bdev, 1115int ttm_bo_init(struct ttm_bo_device *bdev,
1033 struct ttm_buffer_object *bo, 1116 struct ttm_buffer_object *bo,
1034 unsigned long size, 1117 unsigned long size,
1035 enum ttm_bo_type type, 1118 enum ttm_bo_type type,
1036 uint32_t flags, 1119 struct ttm_placement *placement,
1037 uint32_t page_alignment, 1120 uint32_t page_alignment,
1038 unsigned long buffer_start, 1121 unsigned long buffer_start,
1039 bool interruptible, 1122 bool interruptible,
1040 struct file *persistant_swap_storage, 1123 struct file *persistant_swap_storage,
1041 size_t acc_size, 1124 size_t acc_size,
1042 void (*destroy) (struct ttm_buffer_object *)) 1125 void (*destroy) (struct ttm_buffer_object *))
1043{ 1126{
1044 int ret = 0; 1127 int ret = 0;
1045 unsigned long num_pages; 1128 unsigned long num_pages;
@@ -1065,6 +1148,7 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
1065 bo->glob = bdev->glob; 1148 bo->glob = bdev->glob;
1066 bo->type = type; 1149 bo->type = type;
1067 bo->num_pages = num_pages; 1150 bo->num_pages = num_pages;
1151 bo->mem.size = num_pages << PAGE_SHIFT;
1068 bo->mem.mem_type = TTM_PL_SYSTEM; 1152 bo->mem.mem_type = TTM_PL_SYSTEM;
1069 bo->mem.num_pages = bo->num_pages; 1153 bo->mem.num_pages = bo->num_pages;
1070 bo->mem.mm_node = NULL; 1154 bo->mem.mm_node = NULL;
@@ -1077,29 +1161,21 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
1077 bo->acc_size = acc_size; 1161 bo->acc_size = acc_size;
1078 atomic_inc(&bo->glob->bo_count); 1162 atomic_inc(&bo->glob->bo_count);
1079 1163
1080 ret = ttm_bo_check_placement(bo, flags, 0ULL); 1164 ret = ttm_bo_check_placement(bo, placement);
1081 if (unlikely(ret != 0)) 1165 if (unlikely(ret != 0))
1082 goto out_err; 1166 goto out_err;
1083 1167
1084 /* 1168 /*
1085 * If no caching attributes are set, accept any form of caching.
1086 */
1087
1088 if ((flags & TTM_PL_MASK_CACHING) == 0)
1089 flags |= TTM_PL_MASK_CACHING;
1090
1091 /*
1092 * For ttm_bo_type_device buffers, allocate 1169 * For ttm_bo_type_device buffers, allocate
1093 * address space from the device. 1170 * address space from the device.
1094 */ 1171 */
1095
1096 if (bo->type == ttm_bo_type_device) { 1172 if (bo->type == ttm_bo_type_device) {
1097 ret = ttm_bo_setup_vm(bo); 1173 ret = ttm_bo_setup_vm(bo);
1098 if (ret) 1174 if (ret)
1099 goto out_err; 1175 goto out_err;
1100 } 1176 }
1101 1177
1102 ret = ttm_buffer_object_validate(bo, flags, interruptible, false); 1178 ret = ttm_bo_validate(bo, placement, interruptible, false);
1103 if (ret) 1179 if (ret)
1104 goto out_err; 1180 goto out_err;
1105 1181
@@ -1112,7 +1188,7 @@ out_err:
1112 1188
1113 return ret; 1189 return ret;
1114} 1190}
1115EXPORT_SYMBOL(ttm_buffer_object_init); 1191EXPORT_SYMBOL(ttm_bo_init);
1116 1192
1117static inline size_t ttm_bo_size(struct ttm_bo_global *glob, 1193static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
1118 unsigned long num_pages) 1194 unsigned long num_pages)
@@ -1123,19 +1199,19 @@ static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
1123 return glob->ttm_bo_size + 2 * page_array_size; 1199 return glob->ttm_bo_size + 2 * page_array_size;
1124} 1200}
1125 1201
1126int ttm_buffer_object_create(struct ttm_bo_device *bdev, 1202int ttm_bo_create(struct ttm_bo_device *bdev,
1127 unsigned long size, 1203 unsigned long size,
1128 enum ttm_bo_type type, 1204 enum ttm_bo_type type,
1129 uint32_t flags, 1205 struct ttm_placement *placement,
1130 uint32_t page_alignment, 1206 uint32_t page_alignment,
1131 unsigned long buffer_start, 1207 unsigned long buffer_start,
1132 bool interruptible, 1208 bool interruptible,
1133 struct file *persistant_swap_storage, 1209 struct file *persistant_swap_storage,
1134 struct ttm_buffer_object **p_bo) 1210 struct ttm_buffer_object **p_bo)
1135{ 1211{
1136 struct ttm_buffer_object *bo; 1212 struct ttm_buffer_object *bo;
1137 int ret;
1138 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 1213 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1214 int ret;
1139 1215
1140 size_t acc_size = 1216 size_t acc_size =
1141 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); 1217 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
@@ -1150,76 +1226,41 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
1150 return -ENOMEM; 1226 return -ENOMEM;
1151 } 1227 }
1152 1228
1153 ret = ttm_buffer_object_init(bdev, bo, size, type, flags, 1229 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1154 page_alignment, buffer_start, 1230 buffer_start, interruptible,
1155 interruptible, 1231 persistant_swap_storage, acc_size, NULL);
1156 persistant_swap_storage, acc_size, NULL);
1157 if (likely(ret == 0)) 1232 if (likely(ret == 0))
1158 *p_bo = bo; 1233 *p_bo = bo;
1159 1234
1160 return ret; 1235 return ret;
1161} 1236}
1162 1237
1163static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
1164 uint32_t mem_type, bool allow_errors)
1165{
1166 int ret;
1167
1168 spin_lock(&bo->lock);
1169 ret = ttm_bo_wait(bo, false, false, false);
1170 spin_unlock(&bo->lock);
1171
1172 if (ret && allow_errors)
1173 goto out;
1174
1175 if (bo->mem.mem_type == mem_type)
1176 ret = ttm_bo_evict(bo, mem_type, false, false);
1177
1178 if (ret) {
1179 if (allow_errors) {
1180 goto out;
1181 } else {
1182 ret = 0;
1183 printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
1184 }
1185 }
1186
1187out:
1188 return ret;
1189}
1190
1191static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, 1238static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1192 struct list_head *head, 1239 unsigned mem_type, bool allow_errors)
1193 unsigned mem_type, bool allow_errors)
1194{ 1240{
1241 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1195 struct ttm_bo_global *glob = bdev->glob; 1242 struct ttm_bo_global *glob = bdev->glob;
1196 struct ttm_buffer_object *entry;
1197 int ret; 1243 int ret;
1198 int put_count;
1199 1244
1200 /* 1245 /*
1201 * Can't use standard list traversal since we're unlocking. 1246 * Can't use standard list traversal since we're unlocking.
1202 */ 1247 */
1203 1248
1204 spin_lock(&glob->lru_lock); 1249 spin_lock(&glob->lru_lock);
1205 1250 while (!list_empty(&man->lru)) {
1206 while (!list_empty(head)) {
1207 entry = list_first_entry(head, struct ttm_buffer_object, lru);
1208 kref_get(&entry->list_kref);
1209 ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
1210 put_count = ttm_bo_del_from_lru(entry);
1211 spin_unlock(&glob->lru_lock); 1251 spin_unlock(&glob->lru_lock);
1212 while (put_count--) 1252 ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1213 kref_put(&entry->list_kref, ttm_bo_ref_bug); 1253 if (ret) {
1214 BUG_ON(ret); 1254 if (allow_errors) {
1215 ret = ttm_bo_leave_list(entry, mem_type, allow_errors); 1255 return ret;
1216 ttm_bo_unreserve(entry); 1256 } else {
1217 kref_put(&entry->list_kref, ttm_bo_release_list); 1257 printk(KERN_ERR TTM_PFX
1258 "Cleanup eviction failed\n");
1259 }
1260 }
1218 spin_lock(&glob->lru_lock); 1261 spin_lock(&glob->lru_lock);
1219 } 1262 }
1220
1221 spin_unlock(&glob->lru_lock); 1263 spin_unlock(&glob->lru_lock);
1222
1223 return 0; 1264 return 0;
1224} 1265}
1225 1266
@@ -1246,7 +1287,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1246 1287
1247 ret = 0; 1288 ret = 0;
1248 if (mem_type > 0) { 1289 if (mem_type > 0) {
1249 ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false); 1290 ttm_bo_force_list_clean(bdev, mem_type, false);
1250 1291
1251 spin_lock(&glob->lru_lock); 1292 spin_lock(&glob->lru_lock);
1252 if (drm_mm_clean(&man->manager)) 1293 if (drm_mm_clean(&man->manager))
@@ -1279,12 +1320,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1279 return 0; 1320 return 0;
1280 } 1321 }
1281 1322
1282 return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true); 1323 return ttm_bo_force_list_clean(bdev, mem_type, true);
1283} 1324}
1284EXPORT_SYMBOL(ttm_bo_evict_mm); 1325EXPORT_SYMBOL(ttm_bo_evict_mm);
1285 1326
1286int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, 1327int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1287 unsigned long p_offset, unsigned long p_size) 1328 unsigned long p_size)
1288{ 1329{
1289 int ret = -EINVAL; 1330 int ret = -EINVAL;
1290 struct ttm_mem_type_manager *man; 1331 struct ttm_mem_type_manager *man;
@@ -1314,7 +1355,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1314 type); 1355 type);
1315 return ret; 1356 return ret;
1316 } 1357 }
1317 ret = drm_mm_init(&man->manager, p_offset, p_size); 1358 ret = drm_mm_init(&man->manager, 0, p_size);
1318 if (ret) 1359 if (ret)
1319 return ret; 1360 return ret;
1320 } 1361 }
@@ -1384,8 +1425,8 @@ int ttm_bo_global_init(struct ttm_global_reference *ref)
1384 1425
1385 atomic_set(&glob->bo_count, 0); 1426 atomic_set(&glob->bo_count, 0);
1386 1427
1387 kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type); 1428 ret = kobject_init_and_add(
1388 ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects"); 1429 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1389 if (unlikely(ret != 0)) 1430 if (unlikely(ret != 0))
1390 kobject_put(&glob->kobj); 1431 kobject_put(&glob->kobj);
1391 return ret; 1432 return ret;
@@ -1463,7 +1504,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1463 * Initialize the system memory buffer type. 1504 * Initialize the system memory buffer type.
1464 * Other types need to be driver / IOCTL initialized. 1505 * Other types need to be driver / IOCTL initialized.
1465 */ 1506 */
1466 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0); 1507 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1467 if (unlikely(ret != 0)) 1508 if (unlikely(ret != 0))
1468 goto out_no_sys; 1509 goto out_no_sys;
1469 1510
@@ -1675,40 +1716,12 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1675} 1716}
1676EXPORT_SYMBOL(ttm_bo_wait); 1717EXPORT_SYMBOL(ttm_bo_wait);
1677 1718
1678void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
1679{
1680 atomic_set(&bo->reserved, 0);
1681 wake_up_all(&bo->event_queue);
1682}
1683
1684int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
1685 bool no_wait)
1686{
1687 int ret;
1688
1689 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
1690 if (no_wait)
1691 return -EBUSY;
1692 else if (interruptible) {
1693 ret = wait_event_interruptible
1694 (bo->event_queue, atomic_read(&bo->reserved) == 0);
1695 if (unlikely(ret != 0))
1696 return -ERESTART;
1697 } else {
1698 wait_event(bo->event_queue,
1699 atomic_read(&bo->reserved) == 0);
1700 }
1701 }
1702 return 0;
1703}
1704
1705int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) 1719int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1706{ 1720{
1707 int ret = 0; 1721 int ret = 0;
1708 1722
1709 /* 1723 /*
1710 * Using ttm_bo_reserve instead of ttm_bo_block_reservation 1724 * Using ttm_bo_reserve makes sure the lru lists are updated.
1711 * makes sure the lru lists are updated.
1712 */ 1725 */
1713 1726
1714 ret = ttm_bo_reserve(bo, true, no_wait, false, 0); 1727 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
@@ -1722,12 +1735,14 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1722 ttm_bo_unreserve(bo); 1735 ttm_bo_unreserve(bo);
1723 return ret; 1736 return ret;
1724} 1737}
1738EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1725 1739
1726void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) 1740void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1727{ 1741{
1728 if (atomic_dec_and_test(&bo->cpu_writers)) 1742 if (atomic_dec_and_test(&bo->cpu_writers))
1729 wake_up_all(&bo->event_queue); 1743 wake_up_all(&bo->event_queue);
1730} 1744}
1745EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1731 1746
1732/** 1747/**
1733 * A buffer object shrink method that tries to swap out the first 1748 * A buffer object shrink method that tries to swap out the first
@@ -1808,6 +1823,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1808 * anyone tries to access a ttm page. 1823 * anyone tries to access a ttm page.
1809 */ 1824 */
1810 1825
1826 if (bo->bdev->driver->swap_notify)
1827 bo->bdev->driver->swap_notify(bo);
1828
1811 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage); 1829 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1812out: 1830out:
1813 1831
@@ -1828,3 +1846,4 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1828 while (ttm_bo_swapout(&bdev->glob->shrink) == 0) 1846 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1829 ; 1847 ;
1830} 1848}
1849EXPORT_SYMBOL(ttm_bo_swapout_all);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index c70927ecda21..d764e82e799b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -33,6 +33,7 @@
33#include <linux/io.h> 33#include <linux/io.h>
34#include <linux/highmem.h> 34#include <linux/highmem.h>
35#include <linux/wait.h> 35#include <linux/wait.h>
36#include <linux/slab.h>
36#include <linux/vmalloc.h> 37#include <linux/vmalloc.h>
37#include <linux/module.h> 38#include <linux/module.h>
38 39
@@ -53,7 +54,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
53{ 54{
54 struct ttm_tt *ttm = bo->ttm; 55 struct ttm_tt *ttm = bo->ttm;
55 struct ttm_mem_reg *old_mem = &bo->mem; 56 struct ttm_mem_reg *old_mem = &bo->mem;
56 uint32_t save_flags = old_mem->placement;
57 int ret; 57 int ret;
58 58
59 if (old_mem->mem_type != TTM_PL_SYSTEM) { 59 if (old_mem->mem_type != TTM_PL_SYSTEM) {
@@ -62,7 +62,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
62 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, 62 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
63 TTM_PL_MASK_MEM); 63 TTM_PL_MASK_MEM);
64 old_mem->mem_type = TTM_PL_SYSTEM; 64 old_mem->mem_type = TTM_PL_SYSTEM;
65 save_flags = old_mem->placement;
66 } 65 }
67 66
68 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); 67 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
@@ -77,7 +76,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
77 76
78 *old_mem = *new_mem; 77 *old_mem = *new_mem;
79 new_mem->mm_node = NULL; 78 new_mem->mm_node = NULL;
80 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE); 79
81 return 0; 80 return 0;
82} 81}
83EXPORT_SYMBOL(ttm_bo_move_ttm); 82EXPORT_SYMBOL(ttm_bo_move_ttm);
@@ -219,7 +218,6 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
219 void *old_iomap; 218 void *old_iomap;
220 void *new_iomap; 219 void *new_iomap;
221 int ret; 220 int ret;
222 uint32_t save_flags = old_mem->placement;
223 unsigned long i; 221 unsigned long i;
224 unsigned long page; 222 unsigned long page;
225 unsigned long add = 0; 223 unsigned long add = 0;
@@ -270,7 +268,6 @@ out2:
270 268
271 *old_mem = *new_mem; 269 *old_mem = *new_mem;
272 new_mem->mm_node = NULL; 270 new_mem->mm_node = NULL;
273 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE);
274 271
275 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { 272 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
276 ttm_tt_unbind(ttm); 273 ttm_tt_unbind(ttm);
@@ -369,6 +366,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
369#endif 366#endif
370 return tmp; 367 return tmp;
371} 368}
369EXPORT_SYMBOL(ttm_io_prot);
372 370
373static int ttm_bo_ioremap(struct ttm_buffer_object *bo, 371static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
374 unsigned long bus_base, 372 unsigned long bus_base,
@@ -427,7 +425,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
427 425
428 /* 426 /*
429 * We need to use vmap to get the desired page protection 427 * We need to use vmap to get the desired page protection
430 * or to make the buffer object look contigous. 428 * or to make the buffer object look contiguous.
431 */ 429 */
432 prot = (mem->placement & TTM_PL_FLAG_CACHED) ? 430 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
433 PAGE_KERNEL : 431 PAGE_KERNEL :
@@ -536,7 +534,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
536 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 534 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
537 struct ttm_mem_reg *old_mem = &bo->mem; 535 struct ttm_mem_reg *old_mem = &bo->mem;
538 int ret; 536 int ret;
539 uint32_t save_flags = old_mem->placement;
540 struct ttm_buffer_object *ghost_obj; 537 struct ttm_buffer_object *ghost_obj;
541 void *tmp_obj = NULL; 538 void *tmp_obj = NULL;
542 539
@@ -597,7 +594,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
597 594
598 *old_mem = *new_mem; 595 *old_mem = *new_mem;
599 new_mem->mm_node = NULL; 596 new_mem->mm_node = NULL;
600 ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE); 597
601 return 0; 598 return 0;
602} 599}
603EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); 600EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 1c040d040338..668dbe8b8dd3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -114,7 +114,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
114 ret = ttm_bo_wait(bo, false, true, false); 114 ret = ttm_bo_wait(bo, false, true, false);
115 spin_unlock(&bo->lock); 115 spin_unlock(&bo->lock);
116 if (unlikely(ret != 0)) { 116 if (unlikely(ret != 0)) {
117 retval = (ret != -ERESTART) ? 117 retval = (ret != -ERESTARTSYS) ?
118 VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; 118 VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
119 goto out_unlock; 119 goto out_unlock;
120 } 120 }
@@ -320,7 +320,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
320 return -EFAULT; 320 return -EFAULT;
321 321
322 driver = bo->bdev->driver; 322 driver = bo->bdev->driver;
323 if (unlikely(driver->verify_access)) { 323 if (unlikely(!driver->verify_access)) {
324 ret = -EPERM; 324 ret = -EPERM;
325 goto out_unref; 325 goto out_unref;
326 } 326 }
@@ -349,9 +349,6 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
349 switch (ret) { 349 switch (ret) {
350 case 0: 350 case 0:
351 break; 351 break;
352 case -ERESTART:
353 ret = -EINTR;
354 goto out_unref;
355 case -EBUSY: 352 case -EBUSY:
356 ret = -EAGAIN; 353 ret = -EAGAIN;
357 goto out_unref; 354 goto out_unref;
@@ -421,8 +418,6 @@ ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
421 switch (ret) { 418 switch (ret) {
422 case 0: 419 case 0:
423 break; 420 break;
424 case -ERESTART:
425 return -EINTR;
426 case -EBUSY: 421 case -EBUSY:
427 return -EAGAIN; 422 return -EAGAIN;
428 default: 423 default:
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
new file mode 100644
index 000000000000..c285c2902d15
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -0,0 +1,117 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "ttm/ttm_execbuf_util.h"
29#include "ttm/ttm_bo_driver.h"
30#include "ttm/ttm_placement.h"
31#include <linux/wait.h>
32#include <linux/sched.h>
33#include <linux/module.h>
34
35void ttm_eu_backoff_reservation(struct list_head *list)
36{
37 struct ttm_validate_buffer *entry;
38
39 list_for_each_entry(entry, list, head) {
40 struct ttm_buffer_object *bo = entry->bo;
41 if (!entry->reserved)
42 continue;
43
44 entry->reserved = false;
45 ttm_bo_unreserve(bo);
46 }
47}
48EXPORT_SYMBOL(ttm_eu_backoff_reservation);
49
50/*
51 * Reserve buffers for validation.
52 *
53 * If a buffer in the list is marked for CPU access, we back off and
54 * wait for that buffer to become free for GPU access.
55 *
56 * If a buffer is reserved for another validation, the validator with
57 * the highest validation sequence backs off and waits for that buffer
58 * to become unreserved. This prevents deadlocks when validating multiple
59 * buffers in different orders.
60 */
61
62int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
63{
64 struct ttm_validate_buffer *entry;
65 int ret;
66
67retry:
68 list_for_each_entry(entry, list, head) {
69 struct ttm_buffer_object *bo = entry->bo;
70
71 entry->reserved = false;
72 ret = ttm_bo_reserve(bo, true, false, true, val_seq);
73 if (ret != 0) {
74 ttm_eu_backoff_reservation(list);
75 if (ret == -EAGAIN) {
76 ret = ttm_bo_wait_unreserved(bo, true);
77 if (unlikely(ret != 0))
78 return ret;
79 goto retry;
80 } else
81 return ret;
82 }
83
84 entry->reserved = true;
85 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
86 ttm_eu_backoff_reservation(list);
87 ret = ttm_bo_wait_cpu(bo, false);
88 if (ret)
89 return ret;
90 goto retry;
91 }
92 }
93 return 0;
94}
95EXPORT_SYMBOL(ttm_eu_reserve_buffers);
96
97void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
98{
99 struct ttm_validate_buffer *entry;
100
101 list_for_each_entry(entry, list, head) {
102 struct ttm_buffer_object *bo = entry->bo;
103 struct ttm_bo_driver *driver = bo->bdev->driver;
104 void *old_sync_obj;
105
106 spin_lock(&bo->lock);
107 old_sync_obj = bo->sync_obj;
108 bo->sync_obj = driver->sync_obj_ref(sync_obj);
109 bo->sync_obj_arg = entry->new_sync_obj_arg;
110 spin_unlock(&bo->lock);
111 ttm_bo_unreserve(bo);
112 entry->reserved = false;
113 if (old_sync_obj)
114 driver->sync_obj_unref(&old_sync_obj);
115 }
116}
117EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
new file mode 100644
index 000000000000..de41e55a944a
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -0,0 +1,310 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "ttm/ttm_lock.h"
32#include "ttm/ttm_module.h"
33#include <asm/atomic.h>
34#include <linux/errno.h>
35#include <linux/wait.h>
36#include <linux/sched.h>
37#include <linux/module.h>
38
39#define TTM_WRITE_LOCK_PENDING (1 << 0)
40#define TTM_VT_LOCK_PENDING (1 << 1)
41#define TTM_SUSPEND_LOCK_PENDING (1 << 2)
42#define TTM_VT_LOCK (1 << 3)
43#define TTM_SUSPEND_LOCK (1 << 4)
44
45void ttm_lock_init(struct ttm_lock *lock)
46{
47 spin_lock_init(&lock->lock);
48 init_waitqueue_head(&lock->queue);
49 lock->rw = 0;
50 lock->flags = 0;
51 lock->kill_takers = false;
52 lock->signal = SIGKILL;
53}
54EXPORT_SYMBOL(ttm_lock_init);
55
56void ttm_read_unlock(struct ttm_lock *lock)
57{
58 spin_lock(&lock->lock);
59 if (--lock->rw == 0)
60 wake_up_all(&lock->queue);
61 spin_unlock(&lock->lock);
62}
63EXPORT_SYMBOL(ttm_read_unlock);
64
65static bool __ttm_read_lock(struct ttm_lock *lock)
66{
67 bool locked = false;
68
69 spin_lock(&lock->lock);
70 if (unlikely(lock->kill_takers)) {
71 send_sig(lock->signal, current, 0);
72 spin_unlock(&lock->lock);
73 return false;
74 }
75 if (lock->rw >= 0 && lock->flags == 0) {
76 ++lock->rw;
77 locked = true;
78 }
79 spin_unlock(&lock->lock);
80 return locked;
81}
82
83int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
84{
85 int ret = 0;
86
87 if (interruptible)
88 ret = wait_event_interruptible(lock->queue,
89 __ttm_read_lock(lock));
90 else
91 wait_event(lock->queue, __ttm_read_lock(lock));
92 return ret;
93}
94EXPORT_SYMBOL(ttm_read_lock);
95
96static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
97{
98 bool block = true;
99
100 *locked = false;
101
102 spin_lock(&lock->lock);
103 if (unlikely(lock->kill_takers)) {
104 send_sig(lock->signal, current, 0);
105 spin_unlock(&lock->lock);
106 return false;
107 }
108 if (lock->rw >= 0 && lock->flags == 0) {
109 ++lock->rw;
110 block = false;
111 *locked = true;
112 } else if (lock->flags == 0) {
113 block = false;
114 }
115 spin_unlock(&lock->lock);
116
117 return !block;
118}
119
120int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
121{
122 int ret = 0;
123 bool locked;
124
125 if (interruptible)
126 ret = wait_event_interruptible
127 (lock->queue, __ttm_read_trylock(lock, &locked));
128 else
129 wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
130
131 if (unlikely(ret != 0)) {
132 BUG_ON(locked);
133 return ret;
134 }
135
136 return (locked) ? 0 : -EBUSY;
137}
138
139void ttm_write_unlock(struct ttm_lock *lock)
140{
141 spin_lock(&lock->lock);
142 lock->rw = 0;
143 wake_up_all(&lock->queue);
144 spin_unlock(&lock->lock);
145}
146EXPORT_SYMBOL(ttm_write_unlock);
147
148static bool __ttm_write_lock(struct ttm_lock *lock)
149{
150 bool locked = false;
151
152 spin_lock(&lock->lock);
153 if (unlikely(lock->kill_takers)) {
154 send_sig(lock->signal, current, 0);
155 spin_unlock(&lock->lock);
156 return false;
157 }
158 if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
159 lock->rw = -1;
160 lock->flags &= ~TTM_WRITE_LOCK_PENDING;
161 locked = true;
162 } else {
163 lock->flags |= TTM_WRITE_LOCK_PENDING;
164 }
165 spin_unlock(&lock->lock);
166 return locked;
167}
168
169int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
170{
171 int ret = 0;
172
173 if (interruptible) {
174 ret = wait_event_interruptible(lock->queue,
175 __ttm_write_lock(lock));
176 if (unlikely(ret != 0)) {
177 spin_lock(&lock->lock);
178 lock->flags &= ~TTM_WRITE_LOCK_PENDING;
179 wake_up_all(&lock->queue);
180 spin_unlock(&lock->lock);
181 }
182 } else
183 wait_event(lock->queue, __ttm_read_lock(lock));
184
185 return ret;
186}
187EXPORT_SYMBOL(ttm_write_lock);
188
189void ttm_write_lock_downgrade(struct ttm_lock *lock)
190{
191 spin_lock(&lock->lock);
192 lock->rw = 1;
193 wake_up_all(&lock->queue);
194 spin_unlock(&lock->lock);
195}
196
197static int __ttm_vt_unlock(struct ttm_lock *lock)
198{
199 int ret = 0;
200
201 spin_lock(&lock->lock);
202 if (unlikely(!(lock->flags & TTM_VT_LOCK)))
203 ret = -EINVAL;
204 lock->flags &= ~TTM_VT_LOCK;
205 wake_up_all(&lock->queue);
206 spin_unlock(&lock->lock);
207
208 return ret;
209}
210
211static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
212{
213 struct ttm_base_object *base = *p_base;
214 struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
215 int ret;
216
217 *p_base = NULL;
218 ret = __ttm_vt_unlock(lock);
219 BUG_ON(ret != 0);
220}
221
222static bool __ttm_vt_lock(struct ttm_lock *lock)
223{
224 bool locked = false;
225
226 spin_lock(&lock->lock);
227 if (lock->rw == 0) {
228 lock->flags &= ~TTM_VT_LOCK_PENDING;
229 lock->flags |= TTM_VT_LOCK;
230 locked = true;
231 } else {
232 lock->flags |= TTM_VT_LOCK_PENDING;
233 }
234 spin_unlock(&lock->lock);
235 return locked;
236}
237
238int ttm_vt_lock(struct ttm_lock *lock,
239 bool interruptible,
240 struct ttm_object_file *tfile)
241{
242 int ret = 0;
243
244 if (interruptible) {
245 ret = wait_event_interruptible(lock->queue,
246 __ttm_vt_lock(lock));
247 if (unlikely(ret != 0)) {
248 spin_lock(&lock->lock);
249 lock->flags &= ~TTM_VT_LOCK_PENDING;
250 wake_up_all(&lock->queue);
251 spin_unlock(&lock->lock);
252 return ret;
253 }
254 } else
255 wait_event(lock->queue, __ttm_vt_lock(lock));
256
257 /*
258 * Add a base-object, the destructor of which will
259 * make sure the lock is released if the client dies
260 * while holding it.
261 */
262
263 ret = ttm_base_object_init(tfile, &lock->base, false,
264 ttm_lock_type, &ttm_vt_lock_remove, NULL);
265 if (ret)
266 (void)__ttm_vt_unlock(lock);
267 else
268 lock->vt_holder = tfile;
269
270 return ret;
271}
272EXPORT_SYMBOL(ttm_vt_lock);
273
274int ttm_vt_unlock(struct ttm_lock *lock)
275{
276 return ttm_ref_object_base_unref(lock->vt_holder,
277 lock->base.hash.key, TTM_REF_USAGE);
278}
279EXPORT_SYMBOL(ttm_vt_unlock);
280
281void ttm_suspend_unlock(struct ttm_lock *lock)
282{
283 spin_lock(&lock->lock);
284 lock->flags &= ~TTM_SUSPEND_LOCK;
285 wake_up_all(&lock->queue);
286 spin_unlock(&lock->lock);
287}
288EXPORT_SYMBOL(ttm_suspend_unlock);
289
290static bool __ttm_suspend_lock(struct ttm_lock *lock)
291{
292 bool locked = false;
293
294 spin_lock(&lock->lock);
295 if (lock->rw == 0) {
296 lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
297 lock->flags |= TTM_SUSPEND_LOCK;
298 locked = true;
299 } else {
300 lock->flags |= TTM_SUSPEND_LOCK_PENDING;
301 }
302 spin_unlock(&lock->lock);
303 return locked;
304}
305
306void ttm_suspend_lock(struct ttm_lock *lock)
307{
308 wait_event(lock->queue, __ttm_suspend_lock(lock));
309}
310EXPORT_SYMBOL(ttm_suspend_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 072c281a6bb5..801b702566e6 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -32,6 +32,7 @@
32#include <linux/wait.h> 32#include <linux/wait.h>
33#include <linux/mm.h> 33#include <linux/mm.h>
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/slab.h>
35 36
36#define TTM_MEMORY_ALLOC_RETRIES 4 37#define TTM_MEMORY_ALLOC_RETRIES 4
37 38
@@ -152,7 +153,7 @@ static struct attribute *ttm_mem_zone_attrs[] = {
152 NULL 153 NULL
153}; 154};
154 155
155static struct sysfs_ops ttm_mem_zone_ops = { 156static const struct sysfs_ops ttm_mem_zone_ops = {
156 .show = &ttm_mem_zone_show, 157 .show = &ttm_mem_zone_show,
157 .store = &ttm_mem_zone_store 158 .store = &ttm_mem_zone_store
158}; 159};
@@ -260,8 +261,8 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
260 zone->used_mem = 0; 261 zone->used_mem = 0;
261 zone->glob = glob; 262 zone->glob = glob;
262 glob->zone_kernel = zone; 263 glob->zone_kernel = zone;
263 kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); 264 ret = kobject_init_and_add(
264 ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); 265 &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
265 if (unlikely(ret != 0)) { 266 if (unlikely(ret != 0)) {
266 kobject_put(&zone->kobj); 267 kobject_put(&zone->kobj);
267 return ret; 268 return ret;
@@ -274,16 +275,17 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
274static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, 275static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
275 const struct sysinfo *si) 276 const struct sysinfo *si)
276{ 277{
277 struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); 278 struct ttm_mem_zone *zone;
278 uint64_t mem; 279 uint64_t mem;
279 int ret; 280 int ret;
280 281
281 if (unlikely(!zone))
282 return -ENOMEM;
283
284 if (si->totalhigh == 0) 282 if (si->totalhigh == 0)
285 return 0; 283 return 0;
286 284
285 zone = kzalloc(sizeof(*zone), GFP_KERNEL);
286 if (unlikely(!zone))
287 return -ENOMEM;
288
287 mem = si->totalram; 289 mem = si->totalram;
288 mem *= si->mem_unit; 290 mem *= si->mem_unit;
289 291
@@ -295,8 +297,8 @@ static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
295 zone->used_mem = 0; 297 zone->used_mem = 0;
296 zone->glob = glob; 298 zone->glob = glob;
297 glob->zone_highmem = zone; 299 glob->zone_highmem = zone;
298 kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); 300 ret = kobject_init_and_add(
299 ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); 301 &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
300 if (unlikely(ret != 0)) { 302 if (unlikely(ret != 0)) {
301 kobject_put(&zone->kobj); 303 kobject_put(&zone->kobj);
302 return ret; 304 return ret;
@@ -322,8 +324,10 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
322 * No special dma32 zone needed. 324 * No special dma32 zone needed.
323 */ 325 */
324 326
325 if (mem <= ((uint64_t) 1ULL << 32)) 327 if (mem <= ((uint64_t) 1ULL << 32)) {
328 kfree(zone);
326 return 0; 329 return 0;
330 }
327 331
328 /* 332 /*
329 * Limit max dma32 memory to 4GB for now 333 * Limit max dma32 memory to 4GB for now
@@ -340,8 +344,8 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
340 zone->used_mem = 0; 344 zone->used_mem = 0;
341 zone->glob = glob; 345 zone->glob = glob;
342 glob->zone_dma32 = zone; 346 glob->zone_dma32 = zone;
343 kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); 347 ret = kobject_init_and_add(
344 ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); 348 &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
345 if (unlikely(ret != 0)) { 349 if (unlikely(ret != 0)) {
346 kobject_put(&zone->kobj); 350 kobject_put(&zone->kobj);
347 return ret; 351 return ret;
@@ -362,10 +366,8 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
362 glob->swap_queue = create_singlethread_workqueue("ttm_swap"); 366 glob->swap_queue = create_singlethread_workqueue("ttm_swap");
363 INIT_WORK(&glob->work, ttm_shrink_work); 367 INIT_WORK(&glob->work, ttm_shrink_work);
364 init_waitqueue_head(&glob->queue); 368 init_waitqueue_head(&glob->queue);
365 kobject_init(&glob->kobj, &ttm_mem_glob_kobj_type); 369 ret = kobject_init_and_add(
366 ret = kobject_add(&glob->kobj, 370 &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
367 ttm_get_kobj(),
368 "memory_accounting");
369 if (unlikely(ret != 0)) { 371 if (unlikely(ret != 0)) {
370 kobject_put(&glob->kobj); 372 kobject_put(&glob->kobj);
371 return ret; 373 return ret;
@@ -460,6 +462,7 @@ void ttm_mem_global_free(struct ttm_mem_global *glob,
460{ 462{
461 return ttm_mem_global_free_zone(glob, NULL, amount); 463 return ttm_mem_global_free_zone(glob, NULL, amount);
462} 464}
465EXPORT_SYMBOL(ttm_mem_global_free);
463 466
464static int ttm_mem_global_reserve(struct ttm_mem_global *glob, 467static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
465 struct ttm_mem_zone *single_zone, 468 struct ttm_mem_zone *single_zone,
@@ -533,6 +536,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
533 return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait, 536 return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
534 interruptible); 537 interruptible);
535} 538}
539EXPORT_SYMBOL(ttm_mem_global_alloc);
536 540
537int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, 541int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
538 struct page *page, 542 struct page *page,
@@ -588,3 +592,4 @@ size_t ttm_round_pot(size_t size)
588 } 592 }
589 return 0; 593 return 0;
590} 594}
595EXPORT_SYMBOL(ttm_round_pot);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
new file mode 100644
index 000000000000..75e9d6f86ba4
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -0,0 +1,452 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30/** @file ttm_ref_object.c
31 *
32 * Base- and reference object implementation for the various
33 * ttm objects. Implements reference counting, minimal security checks
34 * and release on file close.
35 */
36
37/**
38 * struct ttm_object_file
39 *
40 * @tdev: Pointer to the ttm_object_device.
41 *
42 * @lock: Lock that protects the ref_list list and the
43 * ref_hash hash tables.
44 *
45 * @ref_list: List of ttm_ref_objects to be destroyed at
46 * file release.
47 *
48 * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
49 * for fast lookup of ref objects given a base object.
50 */
51
52#include "ttm/ttm_object.h"
53#include "ttm/ttm_module.h"
54#include <linux/list.h>
55#include <linux/spinlock.h>
56#include <linux/slab.h>
57#include <linux/module.h>
58#include <asm/atomic.h>
59
60struct ttm_object_file {
61 struct ttm_object_device *tdev;
62 rwlock_t lock;
63 struct list_head ref_list;
64 struct drm_open_hash ref_hash[TTM_REF_NUM];
65 struct kref refcount;
66};
67
68/**
69 * struct ttm_object_device
70 *
71 * @object_lock: lock that protects the object_hash hash table.
72 *
73 * @object_hash: hash table for fast lookup of object global names.
74 *
75 * @object_count: Per device object count.
76 *
77 * This is the per-device data structure needed for ttm object management.
78 */
79
80struct ttm_object_device {
81 rwlock_t object_lock;
82 struct drm_open_hash object_hash;
83 atomic_t object_count;
84 struct ttm_mem_global *mem_glob;
85};
86
87/**
88 * struct ttm_ref_object
89 *
90 * @hash: Hash entry for the per-file object reference hash.
91 *
92 * @head: List entry for the per-file list of ref-objects.
93 *
94 * @kref: Ref count.
95 *
96 * @obj: Base object this ref object is referencing.
97 *
98 * @ref_type: Type of ref object.
99 *
100 * This is similar to an idr object, but it also has a hash table entry
101 * that allows lookup with a pointer to the referenced object as a key. In
102 * that way, one can easily detect whether a base object is referenced by
103 * a particular ttm_object_file. It also carries a ref count to avoid creating
104 * multiple ref objects if a ttm_object_file references the same base
105 * object more than once.
106 */
107
108struct ttm_ref_object {
109 struct drm_hash_item hash;
110 struct list_head head;
111 struct kref kref;
112 enum ttm_ref_type ref_type;
113 struct ttm_base_object *obj;
114 struct ttm_object_file *tfile;
115};
116
117static inline struct ttm_object_file *
118ttm_object_file_ref(struct ttm_object_file *tfile)
119{
120 kref_get(&tfile->refcount);
121 return tfile;
122}
123
124static void ttm_object_file_destroy(struct kref *kref)
125{
126 struct ttm_object_file *tfile =
127 container_of(kref, struct ttm_object_file, refcount);
128
129 kfree(tfile);
130}
131
132
133static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
134{
135 struct ttm_object_file *tfile = *p_tfile;
136
137 *p_tfile = NULL;
138 kref_put(&tfile->refcount, ttm_object_file_destroy);
139}
140
141
142int ttm_base_object_init(struct ttm_object_file *tfile,
143 struct ttm_base_object *base,
144 bool shareable,
145 enum ttm_object_type object_type,
146 void (*refcount_release) (struct ttm_base_object **),
147 void (*ref_obj_release) (struct ttm_base_object *,
148 enum ttm_ref_type ref_type))
149{
150 struct ttm_object_device *tdev = tfile->tdev;
151 int ret;
152
153 base->shareable = shareable;
154 base->tfile = ttm_object_file_ref(tfile);
155 base->refcount_release = refcount_release;
156 base->ref_obj_release = ref_obj_release;
157 base->object_type = object_type;
158 write_lock(&tdev->object_lock);
159 kref_init(&base->refcount);
160 ret = drm_ht_just_insert_please(&tdev->object_hash,
161 &base->hash,
162 (unsigned long)base, 31, 0, 0);
163 write_unlock(&tdev->object_lock);
164 if (unlikely(ret != 0))
165 goto out_err0;
166
167 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
168 if (unlikely(ret != 0))
169 goto out_err1;
170
171 ttm_base_object_unref(&base);
172
173 return 0;
174out_err1:
175 (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
176out_err0:
177 return ret;
178}
179EXPORT_SYMBOL(ttm_base_object_init);
180
181static void ttm_release_base(struct kref *kref)
182{
183 struct ttm_base_object *base =
184 container_of(kref, struct ttm_base_object, refcount);
185 struct ttm_object_device *tdev = base->tfile->tdev;
186
187 (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
188 write_unlock(&tdev->object_lock);
189 if (base->refcount_release) {
190 ttm_object_file_unref(&base->tfile);
191 base->refcount_release(&base);
192 }
193 write_lock(&tdev->object_lock);
194}
195
196void ttm_base_object_unref(struct ttm_base_object **p_base)
197{
198 struct ttm_base_object *base = *p_base;
199 struct ttm_object_device *tdev = base->tfile->tdev;
200
201 *p_base = NULL;
202
203 /*
204 * Need to take the lock here to avoid racing with
205 * users trying to look up the object.
206 */
207
208 write_lock(&tdev->object_lock);
209 (void)kref_put(&base->refcount, &ttm_release_base);
210 write_unlock(&tdev->object_lock);
211}
212EXPORT_SYMBOL(ttm_base_object_unref);
213
214struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
215 uint32_t key)
216{
217 struct ttm_object_device *tdev = tfile->tdev;
218 struct ttm_base_object *base;
219 struct drm_hash_item *hash;
220 int ret;
221
222 read_lock(&tdev->object_lock);
223 ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
224
225 if (likely(ret == 0)) {
226 base = drm_hash_entry(hash, struct ttm_base_object, hash);
227 kref_get(&base->refcount);
228 }
229 read_unlock(&tdev->object_lock);
230
231 if (unlikely(ret != 0))
232 return NULL;
233
234 if (tfile != base->tfile && !base->shareable) {
235 printk(KERN_ERR TTM_PFX
236 "Attempted access of non-shareable object.\n");
237 ttm_base_object_unref(&base);
238 return NULL;
239 }
240
241 return base;
242}
243EXPORT_SYMBOL(ttm_base_object_lookup);
244
245int ttm_ref_object_add(struct ttm_object_file *tfile,
246 struct ttm_base_object *base,
247 enum ttm_ref_type ref_type, bool *existed)
248{
249 struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
250 struct ttm_ref_object *ref;
251 struct drm_hash_item *hash;
252 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
253 int ret = -EINVAL;
254
255 if (existed != NULL)
256 *existed = true;
257
258 while (ret == -EINVAL) {
259 read_lock(&tfile->lock);
260 ret = drm_ht_find_item(ht, base->hash.key, &hash);
261
262 if (ret == 0) {
263 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
264 kref_get(&ref->kref);
265 read_unlock(&tfile->lock);
266 break;
267 }
268
269 read_unlock(&tfile->lock);
270 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
271 false, false);
272 if (unlikely(ret != 0))
273 return ret;
274 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
275 if (unlikely(ref == NULL)) {
276 ttm_mem_global_free(mem_glob, sizeof(*ref));
277 return -ENOMEM;
278 }
279
280 ref->hash.key = base->hash.key;
281 ref->obj = base;
282 ref->tfile = tfile;
283 ref->ref_type = ref_type;
284 kref_init(&ref->kref);
285
286 write_lock(&tfile->lock);
287 ret = drm_ht_insert_item(ht, &ref->hash);
288
289 if (likely(ret == 0)) {
290 list_add_tail(&ref->head, &tfile->ref_list);
291 kref_get(&base->refcount);
292 write_unlock(&tfile->lock);
293 if (existed != NULL)
294 *existed = false;
295 break;
296 }
297
298 write_unlock(&tfile->lock);
299 BUG_ON(ret != -EINVAL);
300
301 ttm_mem_global_free(mem_glob, sizeof(*ref));
302 kfree(ref);
303 }
304
305 return ret;
306}
307EXPORT_SYMBOL(ttm_ref_object_add);
308
309static void ttm_ref_object_release(struct kref *kref)
310{
311 struct ttm_ref_object *ref =
312 container_of(kref, struct ttm_ref_object, kref);
313 struct ttm_base_object *base = ref->obj;
314 struct ttm_object_file *tfile = ref->tfile;
315 struct drm_open_hash *ht;
316 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
317
318 ht = &tfile->ref_hash[ref->ref_type];
319 (void)drm_ht_remove_item(ht, &ref->hash);
320 list_del(&ref->head);
321 write_unlock(&tfile->lock);
322
323 if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
324 base->ref_obj_release(base, ref->ref_type);
325
326 ttm_base_object_unref(&ref->obj);
327 ttm_mem_global_free(mem_glob, sizeof(*ref));
328 kfree(ref);
329 write_lock(&tfile->lock);
330}
331
332int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
333 unsigned long key, enum ttm_ref_type ref_type)
334{
335 struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
336 struct ttm_ref_object *ref;
337 struct drm_hash_item *hash;
338 int ret;
339
340 write_lock(&tfile->lock);
341 ret = drm_ht_find_item(ht, key, &hash);
342 if (unlikely(ret != 0)) {
343 write_unlock(&tfile->lock);
344 return -EINVAL;
345 }
346 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
347 kref_put(&ref->kref, ttm_ref_object_release);
348 write_unlock(&tfile->lock);
349 return 0;
350}
351EXPORT_SYMBOL(ttm_ref_object_base_unref);
352
353void ttm_object_file_release(struct ttm_object_file **p_tfile)
354{
355 struct ttm_ref_object *ref;
356 struct list_head *list;
357 unsigned int i;
358 struct ttm_object_file *tfile = *p_tfile;
359
360 *p_tfile = NULL;
361 write_lock(&tfile->lock);
362
363 /*
364 * Since we release the lock within the loop, we have to
365 * restart it from the beginning each time.
366 */
367
368 while (!list_empty(&tfile->ref_list)) {
369 list = tfile->ref_list.next;
370 ref = list_entry(list, struct ttm_ref_object, head);
371 ttm_ref_object_release(&ref->kref);
372 }
373
374 for (i = 0; i < TTM_REF_NUM; ++i)
375 drm_ht_remove(&tfile->ref_hash[i]);
376
377 write_unlock(&tfile->lock);
378 ttm_object_file_unref(&tfile);
379}
380EXPORT_SYMBOL(ttm_object_file_release);
381
382struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
383 unsigned int hash_order)
384{
385 struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
386 unsigned int i;
387 unsigned int j = 0;
388 int ret;
389
390 if (unlikely(tfile == NULL))
391 return NULL;
392
393 rwlock_init(&tfile->lock);
394 tfile->tdev = tdev;
395 kref_init(&tfile->refcount);
396 INIT_LIST_HEAD(&tfile->ref_list);
397
398 for (i = 0; i < TTM_REF_NUM; ++i) {
399 ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
400 if (ret) {
401 j = i;
402 goto out_err;
403 }
404 }
405
406 return tfile;
407out_err:
408 for (i = 0; i < j; ++i)
409 drm_ht_remove(&tfile->ref_hash[i]);
410
411 kfree(tfile);
412
413 return NULL;
414}
415EXPORT_SYMBOL(ttm_object_file_init);
416
417struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
418 *mem_glob,
419 unsigned int hash_order)
420{
421 struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
422 int ret;
423
424 if (unlikely(tdev == NULL))
425 return NULL;
426
427 tdev->mem_glob = mem_glob;
428 rwlock_init(&tdev->object_lock);
429 atomic_set(&tdev->object_count, 0);
430 ret = drm_ht_create(&tdev->object_hash, hash_order);
431
432 if (likely(ret == 0))
433 return tdev;
434
435 kfree(tdev);
436 return NULL;
437}
438EXPORT_SYMBOL(ttm_object_device_init);
439
440void ttm_object_device_release(struct ttm_object_device **p_tdev)
441{
442 struct ttm_object_device *tdev = *p_tdev;
443
444 *p_tdev = NULL;
445
446 write_lock(&tdev->object_lock);
447 drm_ht_remove(&tdev->object_hash);
448 write_unlock(&tdev->object_lock);
449
450 kfree(tdev);
451}
452EXPORT_SYMBOL(ttm_object_device_release);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 7bcb89f39ce8..d5fd5b8faeb3 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -28,13 +28,14 @@
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */ 29 */
30 30
31#include <linux/vmalloc.h>
32#include <linux/sched.h> 31#include <linux/sched.h>
33#include <linux/highmem.h> 32#include <linux/highmem.h>
34#include <linux/pagemap.h> 33#include <linux/pagemap.h>
35#include <linux/file.h> 34#include <linux/file.h>
36#include <linux/swap.h> 35#include <linux/swap.h>
36#include <linux/slab.h>
37#include "drm_cache.h" 37#include "drm_cache.h"
38#include "drm_mem_util.h"
38#include "ttm/ttm_module.h" 39#include "ttm/ttm_module.h"
39#include "ttm/ttm_bo_driver.h" 40#include "ttm/ttm_bo_driver.h"
40#include "ttm/ttm_placement.h" 41#include "ttm/ttm_placement.h"
@@ -43,32 +44,15 @@ static int ttm_tt_swapin(struct ttm_tt *ttm);
43 44
44/** 45/**
45 * Allocates storage for pointers to the pages that back the ttm. 46 * Allocates storage for pointers to the pages that back the ttm.
46 *
47 * Uses kmalloc if possible. Otherwise falls back to vmalloc.
48 */ 47 */
49static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) 48static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
50{ 49{
51 unsigned long size = ttm->num_pages * sizeof(*ttm->pages); 50 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
52 ttm->pages = NULL;
53
54 if (size <= PAGE_SIZE)
55 ttm->pages = kzalloc(size, GFP_KERNEL);
56
57 if (!ttm->pages) {
58 ttm->pages = vmalloc_user(size);
59 if (ttm->pages)
60 ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
61 }
62} 51}
63 52
64static void ttm_tt_free_page_directory(struct ttm_tt *ttm) 53static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
65{ 54{
66 if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) { 55 drm_free_large(ttm->pages);
67 vfree(ttm->pages);
68 ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
69 } else {
70 kfree(ttm->pages);
71 }
72 ttm->pages = NULL; 56 ttm->pages = NULL;
73} 57}
74 58
@@ -192,26 +176,38 @@ int ttm_tt_populate(struct ttm_tt *ttm)
192 ttm->state = tt_unbound; 176 ttm->state = tt_unbound;
193 return 0; 177 return 0;
194} 178}
179EXPORT_SYMBOL(ttm_tt_populate);
195 180
196#ifdef CONFIG_X86 181#ifdef CONFIG_X86
197static inline int ttm_tt_set_page_caching(struct page *p, 182static inline int ttm_tt_set_page_caching(struct page *p,
198 enum ttm_caching_state c_state) 183 enum ttm_caching_state c_old,
184 enum ttm_caching_state c_new)
199{ 185{
186 int ret = 0;
187
200 if (PageHighMem(p)) 188 if (PageHighMem(p))
201 return 0; 189 return 0;
202 190
203 switch (c_state) { 191 if (c_old != tt_cached) {
204 case tt_cached: 192 /* p isn't in the default caching state, set it to
205 return set_pages_wb(p, 1); 193 * writeback first to free its current memtype. */
206 case tt_wc: 194
207 return set_memory_wc((unsigned long) page_address(p), 1); 195 ret = set_pages_wb(p, 1);
208 default: 196 if (ret)
209 return set_pages_uc(p, 1); 197 return ret;
210 } 198 }
199
200 if (c_new == tt_wc)
201 ret = set_memory_wc((unsigned long) page_address(p), 1);
202 else if (c_new == tt_uncached)
203 ret = set_pages_uc(p, 1);
204
205 return ret;
211} 206}
212#else /* CONFIG_X86 */ 207#else /* CONFIG_X86 */
213static inline int ttm_tt_set_page_caching(struct page *p, 208static inline int ttm_tt_set_page_caching(struct page *p,
214 enum ttm_caching_state c_state) 209 enum ttm_caching_state c_old,
210 enum ttm_caching_state c_new)
215{ 211{
216 return 0; 212 return 0;
217} 213}
@@ -244,7 +240,9 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
244 for (i = 0; i < ttm->num_pages; ++i) { 240 for (i = 0; i < ttm->num_pages; ++i) {
245 cur_page = ttm->pages[i]; 241 cur_page = ttm->pages[i];
246 if (likely(cur_page != NULL)) { 242 if (likely(cur_page != NULL)) {
247 ret = ttm_tt_set_page_caching(cur_page, c_state); 243 ret = ttm_tt_set_page_caching(cur_page,
244 ttm->caching_state,
245 c_state);
248 if (unlikely(ret != 0)) 246 if (unlikely(ret != 0))
249 goto out_err; 247 goto out_err;
250 } 248 }
@@ -258,7 +256,7 @@ out_err:
258 for (j = 0; j < i; ++j) { 256 for (j = 0; j < i; ++j) {
259 cur_page = ttm->pages[j]; 257 cur_page = ttm->pages[j];
260 if (likely(cur_page != NULL)) { 258 if (likely(cur_page != NULL)) {
261 (void)ttm_tt_set_page_caching(cur_page, 259 (void)ttm_tt_set_page_caching(cur_page, c_state,
262 ttm->caching_state); 260 ttm->caching_state);
263 } 261 }
264 } 262 }
@@ -466,7 +464,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
466 void *from_virtual; 464 void *from_virtual;
467 void *to_virtual; 465 void *to_virtual;
468 int i; 466 int i;
469 int ret; 467 int ret = -ENOMEM;
470 468
471 if (ttm->page_flags & TTM_PAGE_FLAG_USER) { 469 if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
472 ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start, 470 ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
@@ -485,8 +483,10 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
485 483
486 for (i = 0; i < ttm->num_pages; ++i) { 484 for (i = 0; i < ttm->num_pages; ++i) {
487 from_page = read_mapping_page(swap_space, i, NULL); 485 from_page = read_mapping_page(swap_space, i, NULL);
488 if (IS_ERR(from_page)) 486 if (IS_ERR(from_page)) {
487 ret = PTR_ERR(from_page);
489 goto out_err; 488 goto out_err;
489 }
490 to_page = __ttm_tt_get_page(ttm, i); 490 to_page = __ttm_tt_get_page(ttm, i);
491 if (unlikely(to_page == NULL)) 491 if (unlikely(to_page == NULL))
492 goto out_err; 492 goto out_err;
@@ -509,7 +509,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
509 return 0; 509 return 0;
510out_err: 510out_err:
511 ttm_tt_free_alloced_pages(ttm); 511 ttm_tt_free_alloced_pages(ttm);
512 return -ENOMEM; 512 return ret;
513} 513}
514 514
515int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) 515int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
@@ -521,6 +521,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
521 void *from_virtual; 521 void *from_virtual;
522 void *to_virtual; 522 void *to_virtual;
523 int i; 523 int i;
524 int ret = -ENOMEM;
524 525
525 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); 526 BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
526 BUG_ON(ttm->caching_state != tt_cached); 527 BUG_ON(ttm->caching_state != tt_cached);
@@ -543,7 +544,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
543 0); 544 0);
544 if (unlikely(IS_ERR(swap_storage))) { 545 if (unlikely(IS_ERR(swap_storage))) {
545 printk(KERN_ERR "Failed allocating swap storage.\n"); 546 printk(KERN_ERR "Failed allocating swap storage.\n");
546 return -ENOMEM; 547 return PTR_ERR(swap_storage);
547 } 548 }
548 } else 549 } else
549 swap_storage = persistant_swap_storage; 550 swap_storage = persistant_swap_storage;
@@ -555,9 +556,10 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
555 if (unlikely(from_page == NULL)) 556 if (unlikely(from_page == NULL))
556 continue; 557 continue;
557 to_page = read_mapping_page(swap_space, i, NULL); 558 to_page = read_mapping_page(swap_space, i, NULL);
558 if (unlikely(to_page == NULL)) 559 if (unlikely(IS_ERR(to_page))) {
560 ret = PTR_ERR(to_page);
559 goto out_err; 561 goto out_err;
560 562 }
561 preempt_disable(); 563 preempt_disable();
562 from_virtual = kmap_atomic(from_page, KM_USER0); 564 from_virtual = kmap_atomic(from_page, KM_USER0);
563 to_virtual = kmap_atomic(to_page, KM_USER1); 565 to_virtual = kmap_atomic(to_page, KM_USER1);
@@ -581,5 +583,5 @@ out_err:
581 if (!persistant_swap_storage) 583 if (!persistant_swap_storage)
582 fput(swap_storage); 584 fput(swap_storage);
583 585
584 return -ENOMEM; 586 return ret;
585} 587}