aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm/ttm_execbuf_util.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2010-11-21 22:24:40 -0500
committerDave Airlie <airlied@redhat.com>2010-11-21 22:24:40 -0500
commitd6ea88865d3e5b0c62040531310c1f2c6a994f46 (patch)
treeb80a7cbc6eeab003b412e3037fd335ce9d572f67 /drivers/gpu/drm/ttm/ttm_execbuf_util.c
parent27641c3f003e7f3b6585c01d8a788883603eb262 (diff)
drm/ttm: Add a bo list reserve fastpath (v2)
Makes it possible to reserve a list of buffer objects with a single spin lock / unlock if there is no contention. Should improve cpu usage on SMP kernels. v2: Initialize private list members on reserve and don't call ttm_bo_list_ref_sub() with zero put_count. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_execbuf_util.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c124
1 files changed, 113 insertions, 11 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index c285c2902d15..201a71d111ec 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,6 +32,72 @@
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/module.h> 33#include <linux/module.h>
34 34
35static void ttm_eu_backoff_reservation_locked(struct list_head *list)
36{
37 struct ttm_validate_buffer *entry;
38
39 list_for_each_entry(entry, list, head) {
40 struct ttm_buffer_object *bo = entry->bo;
41 if (!entry->reserved)
42 continue;
43
44 if (entry->removed) {
45 ttm_bo_add_to_lru(bo);
46 entry->removed = false;
47
48 }
49 entry->reserved = false;
50 atomic_set(&bo->reserved, 0);
51 wake_up_all(&bo->event_queue);
52 }
53}
54
55static void ttm_eu_del_from_lru_locked(struct list_head *list)
56{
57 struct ttm_validate_buffer *entry;
58
59 list_for_each_entry(entry, list, head) {
60 struct ttm_buffer_object *bo = entry->bo;
61 if (!entry->reserved)
62 continue;
63
64 if (!entry->removed) {
65 entry->put_count = ttm_bo_del_from_lru(bo);
66 entry->removed = true;
67 }
68 }
69}
70
71static void ttm_eu_list_ref_sub(struct list_head *list)
72{
73 struct ttm_validate_buffer *entry;
74
75 list_for_each_entry(entry, list, head) {
76 struct ttm_buffer_object *bo = entry->bo;
77
78 if (entry->put_count) {
79 ttm_bo_list_ref_sub(bo, entry->put_count, true);
80 entry->put_count = 0;
81 }
82 }
83}
84
85static int ttm_eu_wait_unreserved_locked(struct list_head *list,
86 struct ttm_buffer_object *bo)
87{
88 struct ttm_bo_global *glob = bo->glob;
89 int ret;
90
91 ttm_eu_del_from_lru_locked(list);
92 spin_unlock(&glob->lru_lock);
93 ret = ttm_bo_wait_unreserved(bo, true);
94 spin_lock(&glob->lru_lock);
95 if (unlikely(ret != 0))
96 ttm_eu_backoff_reservation_locked(list);
97 return ret;
98}
99
100
35void ttm_eu_backoff_reservation(struct list_head *list) 101void ttm_eu_backoff_reservation(struct list_head *list)
36{ 102{
37 struct ttm_validate_buffer *entry; 103 struct ttm_validate_buffer *entry;
@@ -61,35 +127,71 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
61 127
62int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq) 128int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
63{ 129{
130 struct ttm_bo_global *glob;
64 struct ttm_validate_buffer *entry; 131 struct ttm_validate_buffer *entry;
65 int ret; 132 int ret;
66 133
134 if (list_empty(list))
135 return 0;
136
137 list_for_each_entry(entry, list, head) {
138 entry->reserved = false;
139 entry->put_count = 0;
140 entry->removed = false;
141 }
142
143 entry = list_first_entry(list, struct ttm_validate_buffer, head);
144 glob = entry->bo->glob;
145
67retry: 146retry:
147 spin_lock(&glob->lru_lock);
68 list_for_each_entry(entry, list, head) { 148 list_for_each_entry(entry, list, head) {
69 struct ttm_buffer_object *bo = entry->bo; 149 struct ttm_buffer_object *bo = entry->bo;
70 150
71 entry->reserved = false; 151retry_this_bo:
72 ret = ttm_bo_reserve(bo, true, false, true, val_seq); 152 ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
73 if (ret != 0) { 153 switch (ret) {
74 ttm_eu_backoff_reservation(list); 154 case 0:
75 if (ret == -EAGAIN) { 155 break;
76 ret = ttm_bo_wait_unreserved(bo, true); 156 case -EBUSY:
77 if (unlikely(ret != 0)) 157 ret = ttm_eu_wait_unreserved_locked(list, bo);
78 return ret; 158 if (unlikely(ret != 0)) {
79 goto retry; 159 spin_unlock(&glob->lru_lock);
80 } else 160 ttm_eu_list_ref_sub(list);
161 return ret;
162 }
163 goto retry_this_bo;
164 case -EAGAIN:
165 ttm_eu_backoff_reservation_locked(list);
166 spin_unlock(&glob->lru_lock);
167 ttm_eu_list_ref_sub(list);
168 ret = ttm_bo_wait_unreserved(bo, true);
169 if (unlikely(ret != 0))
81 return ret; 170 return ret;
171 goto retry;
172 default:
173 ttm_eu_backoff_reservation_locked(list);
174 spin_unlock(&glob->lru_lock);
175 ttm_eu_list_ref_sub(list);
176 return ret;
82 } 177 }
83 178
84 entry->reserved = true; 179 entry->reserved = true;
85 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 180 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
86 ttm_eu_backoff_reservation(list); 181 ttm_eu_backoff_reservation_locked(list);
182 spin_unlock(&glob->lru_lock);
183 ttm_eu_list_ref_sub(list);
87 ret = ttm_bo_wait_cpu(bo, false); 184 ret = ttm_bo_wait_cpu(bo, false);
88 if (ret) 185 if (ret)
89 return ret; 186 return ret;
90 goto retry; 187 goto retry;
91 } 188 }
92 } 189 }
190
191 ttm_eu_del_from_lru_locked(list);
192 spin_unlock(&glob->lru_lock);
193 ttm_eu_list_ref_sub(list);
194
93 return 0; 195 return 0;
94} 196}
95EXPORT_SYMBOL(ttm_eu_reserve_buffers); 197EXPORT_SYMBOL(ttm_eu_reserve_buffers);