aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c497
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c17
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c22
10 files changed, 383 insertions, 205 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 8d8e5f6340d0..2d026c81ca1b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -970,7 +970,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
970 } 970 }
971 971
972 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); 972 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
973 ret = nouveau_fence_sync(bo->sync_obj, chan); 973 ret = nouveau_fence_sync(nouveau_bo(bo), chan);
974 if (ret == 0) { 974 if (ret == 0) {
975 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); 975 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
976 if (ret == 0) { 976 if (ret == 0) {
@@ -1464,10 +1464,12 @@ nouveau_bo_fence_unref(void **sync_obj)
1464void 1464void
1465nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) 1465nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1466{ 1466{
1467 lockdep_assert_held(&nvbo->bo.resv->lock.base); 1467 struct reservation_object *resv = nvbo->bo.resv;
1468 1468
1469 nouveau_bo_fence_unref(&nvbo->bo.sync_obj); 1469 nouveau_bo_fence_unref(&nvbo->bo.sync_obj);
1470 nvbo->bo.sync_obj = nouveau_fence_ref(fence); 1470 nvbo->bo.sync_obj = nouveau_fence_ref(fence);
1471
1472 reservation_object_add_excl_fence(resv, &fence->base);
1471} 1473}
1472 1474
1473static void * 1475static void *
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index e6867b9ebb46..ec1960a9412c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -658,7 +658,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
658 spin_unlock_irqrestore(&dev->event_lock, flags); 658 spin_unlock_irqrestore(&dev->event_lock, flags);
659 659
660 /* Synchronize with the old framebuffer */ 660 /* Synchronize with the old framebuffer */
661 ret = nouveau_fence_sync(old_bo->bo.sync_obj, chan); 661 ret = nouveau_fence_sync(old_bo, chan);
662 if (ret) 662 if (ret)
663 goto fail; 663 goto fail;
664 664
@@ -722,7 +722,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
722 goto fail_unpin; 722 goto fail_unpin;
723 723
724 /* synchronise rendering channel with the kernel's channel */ 724 /* synchronise rendering channel with the kernel's channel */
725 ret = nouveau_fence_sync(new_bo->bo.sync_obj, chan); 725 ret = nouveau_fence_sync(new_bo, chan);
726 if (ret) { 726 if (ret) {
727 ttm_bo_unreserve(&new_bo->bo); 727 ttm_bo_unreserve(&new_bo->bo);
728 goto fail_unpin; 728 goto fail_unpin;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 0a93114158cd..3beb3bf130e2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -28,6 +28,7 @@
28 28
29#include <linux/ktime.h> 29#include <linux/ktime.h>
30#include <linux/hrtimer.h> 30#include <linux/hrtimer.h>
31#include <trace/events/fence.h>
31 32
32#include <nvif/notify.h> 33#include <nvif/notify.h>
33#include <nvif/event.h> 34#include <nvif/event.h>
@@ -36,123 +37,209 @@
36#include "nouveau_dma.h" 37#include "nouveau_dma.h"
37#include "nouveau_fence.h" 38#include "nouveau_fence.h"
38 39
39struct fence_work { 40static const struct fence_ops nouveau_fence_ops_uevent;
40 struct work_struct base; 41static const struct fence_ops nouveau_fence_ops_legacy;
41 struct list_head head; 42
42 void (*func)(void *); 43static inline struct nouveau_fence *
43 void *data; 44from_fence(struct fence *fence)
44}; 45{
46 return container_of(fence, struct nouveau_fence, base);
47}
48
49static inline struct nouveau_fence_chan *
50nouveau_fctx(struct nouveau_fence *fence)
51{
52 return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
53}
45 54
46static void 55static void
47nouveau_fence_signal(struct nouveau_fence *fence) 56nouveau_fence_signal(struct nouveau_fence *fence)
48{ 57{
49 struct fence_work *work, *temp; 58 fence_signal_locked(&fence->base);
59 list_del(&fence->head);
60
61 if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
62 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
50 63
51 list_for_each_entry_safe(work, temp, &fence->work, head) { 64 if (!--fctx->notify_ref)
52 schedule_work(&work->base); 65 nvif_notify_put(&fctx->notify);
53 list_del(&work->head);
54 } 66 }
55 67
56 fence->channel = NULL; 68 fence_put(&fence->base);
57 list_del(&fence->head); 69}
70
71static struct nouveau_fence *
72nouveau_local_fence(struct fence *fence, struct nouveau_drm *drm) {
73 struct nouveau_fence_priv *priv = (void*)drm->fence;
74
75 if (fence->ops != &nouveau_fence_ops_legacy &&
76 fence->ops != &nouveau_fence_ops_uevent)
77 return NULL;
78
79 if (fence->context < priv->context_base ||
80 fence->context >= priv->context_base + priv->contexts)
81 return NULL;
82
83 return from_fence(fence);
58} 84}
59 85
60void 86void
61nouveau_fence_context_del(struct nouveau_fence_chan *fctx) 87nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
62{ 88{
63 struct nouveau_fence *fence, *fnext; 89 struct nouveau_fence *fence;
64 spin_lock(&fctx->lock); 90
65 list_for_each_entry_safe(fence, fnext, &fctx->pending, head) { 91 nvif_notify_fini(&fctx->notify);
92
93 spin_lock_irq(&fctx->lock);
94 while (!list_empty(&fctx->pending)) {
95 fence = list_entry(fctx->pending.next, typeof(*fence), head);
96
66 nouveau_fence_signal(fence); 97 nouveau_fence_signal(fence);
98 fence->channel = NULL;
67 } 99 }
68 spin_unlock(&fctx->lock); 100 spin_unlock_irq(&fctx->lock);
101}
102
103static void
104nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
105{
106 struct nouveau_fence *fence;
107
108 u32 seq = fctx->read(chan);
109
110 while (!list_empty(&fctx->pending)) {
111 fence = list_entry(fctx->pending.next, typeof(*fence), head);
112
113 if ((int)(seq - fence->base.seqno) < 0)
114 return;
115
116 nouveau_fence_signal(fence);
117 }
118}
119
120static int
121nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
122{
123 struct nouveau_fence_chan *fctx =
124 container_of(notify, typeof(*fctx), notify);
125 unsigned long flags;
126
127 spin_lock_irqsave(&fctx->lock, flags);
128 if (!list_empty(&fctx->pending)) {
129 struct nouveau_fence *fence;
130
131 fence = list_entry(fctx->pending.next, typeof(*fence), head);
132 nouveau_fence_update(fence->channel, fctx);
133 }
134 spin_unlock_irqrestore(&fctx->lock, flags);
135
136 /* Always return keep here. NVIF refcount is handled with nouveau_fence_update */
137 return NVIF_NOTIFY_KEEP;
69} 138}
70 139
71void 140void
72nouveau_fence_context_new(struct nouveau_fence_chan *fctx) 141nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
73{ 142{
143 struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
144 int ret;
145
74 INIT_LIST_HEAD(&fctx->flip); 146 INIT_LIST_HEAD(&fctx->flip);
75 INIT_LIST_HEAD(&fctx->pending); 147 INIT_LIST_HEAD(&fctx->pending);
76 spin_lock_init(&fctx->lock); 148 spin_lock_init(&fctx->lock);
149 fctx->context = priv->context_base + chan->chid;
150
151 if (!priv->uevent)
152 return;
153
154 ret = nvif_notify_init(chan->object, NULL,
155 nouveau_fence_wait_uevent_handler, false,
156 G82_CHANNEL_DMA_V0_NTFY_UEVENT,
157 &(struct nvif_notify_uevent_req) { },
158 sizeof(struct nvif_notify_uevent_req),
159 sizeof(struct nvif_notify_uevent_rep),
160 &fctx->notify);
161
162 WARN_ON(ret);
77} 163}
78 164
165struct nouveau_fence_work {
166 struct work_struct work;
167 struct fence_cb cb;
168 void (*func)(void *);
169 void *data;
170};
171
79static void 172static void
80nouveau_fence_work_handler(struct work_struct *kwork) 173nouveau_fence_work_handler(struct work_struct *kwork)
81{ 174{
82 struct fence_work *work = container_of(kwork, typeof(*work), base); 175 struct nouveau_fence_work *work = container_of(kwork, typeof(*work), work);
83 work->func(work->data); 176 work->func(work->data);
84 kfree(work); 177 kfree(work);
85} 178}
86 179
180static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb)
181{
182 struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb);
183
184 schedule_work(&work->work);
185}
186
87void 187void
88nouveau_fence_work(struct nouveau_fence *fence, 188nouveau_fence_work(struct nouveau_fence *fence,
89 void (*func)(void *), void *data) 189 void (*func)(void *), void *data)
90{ 190{
91 struct nouveau_channel *chan = fence->channel; 191 struct nouveau_fence_work *work;
92 struct nouveau_fence_chan *fctx;
93 struct fence_work *work = NULL;
94 192
95 if (nouveau_fence_done(fence)) { 193 if (fence_is_signaled(&fence->base))
96 func(data); 194 goto err;
97 return;
98 }
99 195
100 fctx = chan->fence;
101 work = kmalloc(sizeof(*work), GFP_KERNEL); 196 work = kmalloc(sizeof(*work), GFP_KERNEL);
102 if (!work) { 197 if (!work) {
103 WARN_ON(nouveau_fence_wait(fence, false, false)); 198 WARN_ON(nouveau_fence_wait(fence, false, false));
104 func(data); 199 goto err;
105 return;
106 } 200 }
107 201
108 spin_lock(&fctx->lock); 202 INIT_WORK(&work->work, nouveau_fence_work_handler);
109 if (!fence->channel) {
110 spin_unlock(&fctx->lock);
111 kfree(work);
112 func(data);
113 return;
114 }
115
116 INIT_WORK(&work->base, nouveau_fence_work_handler);
117 work->func = func; 203 work->func = func;
118 work->data = data; 204 work->data = data;
119 list_add(&work->head, &fence->work);
120 spin_unlock(&fctx->lock);
121}
122
123static void
124nouveau_fence_update(struct nouveau_channel *chan)
125{
126 struct nouveau_fence_chan *fctx = chan->fence;
127 struct nouveau_fence *fence, *fnext;
128 205
129 spin_lock(&fctx->lock); 206 if (fence_add_callback(&fence->base, &work->cb, nouveau_fence_work_cb) < 0)
130 list_for_each_entry_safe(fence, fnext, &fctx->pending, head) { 207 goto err_free;
131 if (fctx->read(chan) < fence->sequence) 208 return;
132 break;
133 209
134 nouveau_fence_signal(fence); 210err_free:
135 nouveau_fence_unref(&fence); 211 kfree(work);
136 } 212err:
137 spin_unlock(&fctx->lock); 213 func(data);
138} 214}
139 215
140int 216int
141nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) 217nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
142{ 218{
143 struct nouveau_fence_chan *fctx = chan->fence; 219 struct nouveau_fence_chan *fctx = chan->fence;
220 struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
144 int ret; 221 int ret;
145 222
146 fence->channel = chan; 223 fence->channel = chan;
147 fence->timeout = jiffies + (15 * HZ); 224 fence->timeout = jiffies + (15 * HZ);
148 fence->sequence = ++fctx->sequence;
149 225
226 if (priv->uevent)
227 fence_init(&fence->base, &nouveau_fence_ops_uevent,
228 &fctx->lock,
229 priv->context_base + chan->chid, ++fctx->sequence);
230 else
231 fence_init(&fence->base, &nouveau_fence_ops_legacy,
232 &fctx->lock,
233 priv->context_base + chan->chid, ++fctx->sequence);
234
235 trace_fence_emit(&fence->base);
150 ret = fctx->emit(fence); 236 ret = fctx->emit(fence);
151 if (!ret) { 237 if (!ret) {
152 kref_get(&fence->kref); 238 fence_get(&fence->base);
153 spin_lock(&fctx->lock); 239 spin_lock_irq(&fctx->lock);
240 nouveau_fence_update(chan, fctx);
154 list_add_tail(&fence->head, &fctx->pending); 241 list_add_tail(&fence->head, &fctx->pending);
155 spin_unlock(&fctx->lock); 242 spin_unlock_irq(&fctx->lock);
156 } 243 }
157 244
158 return ret; 245 return ret;
@@ -161,114 +248,70 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
161bool 248bool
162nouveau_fence_done(struct nouveau_fence *fence) 249nouveau_fence_done(struct nouveau_fence *fence)
163{ 250{
164 if (fence->channel) 251 if (fence->base.ops == &nouveau_fence_ops_legacy ||
165 nouveau_fence_update(fence->channel); 252 fence->base.ops == &nouveau_fence_ops_uevent) {
166 return !fence->channel; 253 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
167} 254 unsigned long flags;
168 255
169struct nouveau_fence_wait { 256 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
170 struct nouveau_fence_priv *priv; 257 return true;
171 struct nvif_notify notify;
172};
173 258
174static int 259 spin_lock_irqsave(&fctx->lock, flags);
175nouveau_fence_wait_uevent_handler(struct nvif_notify *notify) 260 nouveau_fence_update(fence->channel, fctx);
176{ 261 spin_unlock_irqrestore(&fctx->lock, flags);
177 struct nouveau_fence_wait *wait = 262 }
178 container_of(notify, typeof(*wait), notify); 263 return fence_is_signaled(&fence->base);
179 wake_up_all(&wait->priv->waiting);
180 return NVIF_NOTIFY_KEEP;
181} 264}
182 265
183static int 266static long
184nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr) 267nouveau_fence_wait_legacy(struct fence *f, bool intr, long wait)
185
186{ 268{
187 struct nouveau_channel *chan = fence->channel; 269 struct nouveau_fence *fence = from_fence(f);
188 struct nouveau_fence_priv *priv = chan->drm->fence; 270 unsigned long sleep_time = NSEC_PER_MSEC / 1000;
189 struct nouveau_fence_wait wait = { .priv = priv }; 271 unsigned long t = jiffies, timeout = t + wait;
190 int ret = 0;
191 272
192 ret = nvif_notify_init(chan->object, NULL, 273 while (!nouveau_fence_done(fence)) {
193 nouveau_fence_wait_uevent_handler, false, 274 ktime_t kt;
194 G82_CHANNEL_DMA_V0_NTFY_UEVENT,
195 &(struct nvif_notify_uevent_req) {
196 },
197 sizeof(struct nvif_notify_uevent_req),
198 sizeof(struct nvif_notify_uevent_rep),
199 &wait.notify);
200 if (ret)
201 return ret;
202 275
203 nvif_notify_get(&wait.notify); 276 t = jiffies;
204
205 if (fence->timeout) {
206 unsigned long timeout = fence->timeout - jiffies;
207
208 if (time_before(jiffies, fence->timeout)) {
209 if (intr) {
210 ret = wait_event_interruptible_timeout(
211 priv->waiting,
212 nouveau_fence_done(fence),
213 timeout);
214 } else {
215 ret = wait_event_timeout(priv->waiting,
216 nouveau_fence_done(fence),
217 timeout);
218 }
219 }
220 277
221 if (ret >= 0) { 278 if (wait != MAX_SCHEDULE_TIMEOUT && time_after_eq(t, timeout)) {
222 fence->timeout = jiffies + ret; 279 __set_current_state(TASK_RUNNING);
223 if (time_after_eq(jiffies, fence->timeout)) 280 return 0;
224 ret = -EBUSY;
225 }
226 } else {
227 if (intr) {
228 ret = wait_event_interruptible(priv->waiting,
229 nouveau_fence_done(fence));
230 } else {
231 wait_event(priv->waiting, nouveau_fence_done(fence));
232 } 281 }
282
283 __set_current_state(intr ? TASK_INTERRUPTIBLE :
284 TASK_UNINTERRUPTIBLE);
285
286 kt = ktime_set(0, sleep_time);
287 schedule_hrtimeout(&kt, HRTIMER_MODE_REL);
288 sleep_time *= 2;
289 if (sleep_time > NSEC_PER_MSEC)
290 sleep_time = NSEC_PER_MSEC;
291
292 if (intr && signal_pending(current))
293 return -ERESTARTSYS;
233 } 294 }
234 295
235 nvif_notify_fini(&wait.notify); 296 __set_current_state(TASK_RUNNING);
236 if (unlikely(ret < 0))
237 return ret;
238 297
239 return 0; 298 return timeout - t;
240} 299}
241 300
242int 301static int
243nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr) 302nouveau_fence_wait_busy(struct nouveau_fence *fence, bool intr)
244{ 303{
245 struct nouveau_channel *chan = fence->channel;
246 struct nouveau_fence_priv *priv = chan ? chan->drm->fence : NULL;
247 unsigned long sleep_time = NSEC_PER_MSEC / 1000;
248 ktime_t t;
249 int ret = 0; 304 int ret = 0;
250 305
251 while (priv && priv->uevent && lazy && !nouveau_fence_done(fence)) {
252 ret = nouveau_fence_wait_uevent(fence, intr);
253 if (ret < 0)
254 return ret;
255 }
256
257 while (!nouveau_fence_done(fence)) { 306 while (!nouveau_fence_done(fence)) {
258 if (fence->timeout && time_after_eq(jiffies, fence->timeout)) { 307 if (time_after_eq(jiffies, fence->timeout)) {
259 ret = -EBUSY; 308 ret = -EBUSY;
260 break; 309 break;
261 } 310 }
262 311
263 __set_current_state(intr ? TASK_INTERRUPTIBLE : 312 __set_current_state(intr ?
264 TASK_UNINTERRUPTIBLE); 313 TASK_INTERRUPTIBLE :
265 if (lazy) { 314 TASK_UNINTERRUPTIBLE);
266 t = ktime_set(0, sleep_time);
267 schedule_hrtimeout(&t, HRTIMER_MODE_REL);
268 sleep_time *= 2;
269 if (sleep_time > NSEC_PER_MSEC)
270 sleep_time = NSEC_PER_MSEC;
271 }
272 315
273 if (intr && signal_pending(current)) { 316 if (intr && signal_pending(current)) {
274 ret = -ERESTARTSYS; 317 ret = -ERESTARTSYS;
@@ -281,36 +324,77 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
281} 324}
282 325
283int 326int
284nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan) 327nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
328{
329 long ret;
330
331 if (!lazy)
332 return nouveau_fence_wait_busy(fence, intr);
333
334 ret = fence_wait_timeout(&fence->base, intr, 15 * HZ);
335 if (ret < 0)
336 return ret;
337 else if (!ret)
338 return -EBUSY;
339 else
340 return 0;
341}
342
343int
344nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan)
285{ 345{
286 struct nouveau_fence_chan *fctx = chan->fence; 346 struct nouveau_fence_chan *fctx = chan->fence;
287 struct nouveau_channel *prev; 347 struct fence *fence = NULL;
288 int ret = 0; 348 struct reservation_object *resv = nvbo->bo.resv;
349 struct reservation_object_list *fobj;
350 int ret = 0, i;
351
352 fence = nvbo->bo.sync_obj;
353 if (fence && fence_is_signaled(fence)) {
354 nouveau_fence_unref((struct nouveau_fence **)
355 &nvbo->bo.sync_obj);
356 fence = NULL;
357 }
358
359 if (fence) {
360 struct nouveau_fence *f = from_fence(fence);
361 struct nouveau_channel *prev = f->channel;
289 362
290 prev = fence ? fence->channel : NULL; 363 if (prev != chan) {
291 if (prev) { 364 ret = fctx->sync(f, prev, chan);
292 if (unlikely(prev != chan && !nouveau_fence_done(fence))) {
293 ret = fctx->sync(fence, prev, chan);
294 if (unlikely(ret)) 365 if (unlikely(ret))
295 ret = nouveau_fence_wait(fence, true, false); 366 ret = nouveau_fence_wait(f, true, true);
296 } 367 }
297 } 368 }
298 369
299 return ret; 370 if (ret)
300} 371 return ret;
301 372
302static void 373 fence = reservation_object_get_excl(resv);
303nouveau_fence_del(struct kref *kref) 374 if (fence && !nouveau_local_fence(fence, chan->drm))
304{ 375 ret = fence_wait(fence, true);
305 struct nouveau_fence *fence = container_of(kref, typeof(*fence), kref); 376
306 kfree(fence); 377 fobj = reservation_object_get_list(resv);
378 if (!fobj || ret)
379 return ret;
380
381 for (i = 0; i < fobj->shared_count && !ret; ++i) {
382 fence = rcu_dereference_protected(fobj->shared[i],
383 reservation_object_held(resv));
384
385 /* should always be true, for now */
386 if (!nouveau_local_fence(fence, chan->drm))
387 ret = fence_wait(fence, true);
388 }
389
390 return ret;
307} 391}
308 392
309void 393void
310nouveau_fence_unref(struct nouveau_fence **pfence) 394nouveau_fence_unref(struct nouveau_fence **pfence)
311{ 395{
312 if (*pfence) 396 if (*pfence)
313 kref_put(&(*pfence)->kref, nouveau_fence_del); 397 fence_put(&(*pfence)->base);
314 *pfence = NULL; 398 *pfence = NULL;
315} 399}
316 400
@@ -318,7 +402,7 @@ struct nouveau_fence *
318nouveau_fence_ref(struct nouveau_fence *fence) 402nouveau_fence_ref(struct nouveau_fence *fence)
319{ 403{
320 if (fence) 404 if (fence)
321 kref_get(&fence->kref); 405 fence_get(&fence->base);
322 return fence; 406 return fence;
323} 407}
324 408
@@ -336,9 +420,7 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
336 if (!fence) 420 if (!fence)
337 return -ENOMEM; 421 return -ENOMEM;
338 422
339 INIT_LIST_HEAD(&fence->work);
340 fence->sysmem = sysmem; 423 fence->sysmem = sysmem;
341 kref_init(&fence->kref);
342 424
343 ret = nouveau_fence_emit(fence, chan); 425 ret = nouveau_fence_emit(fence, chan);
344 if (ret) 426 if (ret)
@@ -347,3 +429,92 @@ nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
347 *pfence = fence; 429 *pfence = fence;
348 return ret; 430 return ret;
349} 431}
432
433static const char *nouveau_fence_get_get_driver_name(struct fence *fence)
434{
435 return "nouveau";
436}
437
438static const char *nouveau_fence_get_timeline_name(struct fence *f)
439{
440 struct nouveau_fence *fence = from_fence(f);
441 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
442
443 return fence->channel ? fctx->name : "dead channel";
444}
445
446/*
447 * In an ideal world, read would not assume the channel context is still alive.
448 * This function may be called from another device, running into free memory as a
449 * result. The drm node should still be there, so we can derive the index from
450 * the fence context.
451 */
452static bool nouveau_fence_is_signaled(struct fence *f)
453{
454 struct nouveau_fence *fence = from_fence(f);
455 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
456 struct nouveau_channel *chan = fence->channel;
457
458 return (int)(fctx->read(chan) - fence->base.seqno) >= 0;
459}
460
461static bool nouveau_fence_no_signaling(struct fence *f)
462{
463 struct nouveau_fence *fence = from_fence(f);
464
465 /*
466 * caller should have a reference on the fence,
467 * else fence could get freed here
468 */
469 WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1);
470
471 /*
472 * This needs uevents to work correctly, but fence_add_callback relies on
473 * being able to enable signaling. It will still get signaled eventually,
474 * just not right away.
475 */
476 if (nouveau_fence_is_signaled(f)) {
477 list_del(&fence->head);
478
479 fence_put(&fence->base);
480 return false;
481 }
482
483 return true;
484}
485
486static const struct fence_ops nouveau_fence_ops_legacy = {
487 .get_driver_name = nouveau_fence_get_get_driver_name,
488 .get_timeline_name = nouveau_fence_get_timeline_name,
489 .enable_signaling = nouveau_fence_no_signaling,
490 .signaled = nouveau_fence_is_signaled,
491 .wait = nouveau_fence_wait_legacy,
492 .release = NULL
493};
494
495static bool nouveau_fence_enable_signaling(struct fence *f)
496{
497 struct nouveau_fence *fence = from_fence(f);
498 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
499 bool ret;
500
501 if (!fctx->notify_ref++)
502 nvif_notify_get(&fctx->notify);
503
504 ret = nouveau_fence_no_signaling(f);
505 if (ret)
506 set_bit(FENCE_FLAG_USER_BITS, &fence->base.flags);
507 else if (!--fctx->notify_ref)
508 nvif_notify_put(&fctx->notify);
509
510 return ret;
511}
512
513static const struct fence_ops nouveau_fence_ops_uevent = {
514 .get_driver_name = nouveau_fence_get_get_driver_name,
515 .get_timeline_name = nouveau_fence_get_timeline_name,
516 .enable_signaling = nouveau_fence_enable_signaling,
517 .signaled = nouveau_fence_is_signaled,
518 .wait = fence_default_wait,
519 .release = NULL
520};
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index c57bb61da58c..44efd8c7426c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -1,18 +1,21 @@
1#ifndef __NOUVEAU_FENCE_H__ 1#ifndef __NOUVEAU_FENCE_H__
2#define __NOUVEAU_FENCE_H__ 2#define __NOUVEAU_FENCE_H__
3 3
4#include <linux/fence.h>
5#include <nvif/notify.h>
6
4struct nouveau_drm; 7struct nouveau_drm;
8struct nouveau_bo;
5 9
6struct nouveau_fence { 10struct nouveau_fence {
11 struct fence base;
12
7 struct list_head head; 13 struct list_head head;
8 struct list_head work;
9 struct kref kref;
10 14
11 bool sysmem; 15 bool sysmem;
12 16
13 struct nouveau_channel *channel; 17 struct nouveau_channel *channel;
14 unsigned long timeout; 18 unsigned long timeout;
15 u32 sequence;
16}; 19};
17 20
18int nouveau_fence_new(struct nouveau_channel *, bool sysmem, 21int nouveau_fence_new(struct nouveau_channel *, bool sysmem,
@@ -25,9 +28,10 @@ int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
25bool nouveau_fence_done(struct nouveau_fence *); 28bool nouveau_fence_done(struct nouveau_fence *);
26void nouveau_fence_work(struct nouveau_fence *, void (*)(void *), void *); 29void nouveau_fence_work(struct nouveau_fence *, void (*)(void *), void *);
27int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); 30int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
28int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *); 31int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *);
29 32
30struct nouveau_fence_chan { 33struct nouveau_fence_chan {
34 spinlock_t lock;
31 struct list_head pending; 35 struct list_head pending;
32 struct list_head flip; 36 struct list_head flip;
33 37
@@ -38,8 +42,12 @@ struct nouveau_fence_chan {
38 int (*emit32)(struct nouveau_channel *, u64, u32); 42 int (*emit32)(struct nouveau_channel *, u64, u32);
39 int (*sync32)(struct nouveau_channel *, u64, u32); 43 int (*sync32)(struct nouveau_channel *, u64, u32);
40 44
41 spinlock_t lock;
42 u32 sequence; 45 u32 sequence;
46 u32 context;
47 char name[24];
48
49 struct nvif_notify notify;
50 int notify_ref;
43}; 51};
44 52
45struct nouveau_fence_priv { 53struct nouveau_fence_priv {
@@ -49,13 +57,13 @@ struct nouveau_fence_priv {
49 int (*context_new)(struct nouveau_channel *); 57 int (*context_new)(struct nouveau_channel *);
50 void (*context_del)(struct nouveau_channel *); 58 void (*context_del)(struct nouveau_channel *);
51 59
52 wait_queue_head_t waiting; 60 u32 contexts, context_base;
53 bool uevent; 61 bool uevent;
54}; 62};
55 63
56#define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence) 64#define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
57 65
58void nouveau_fence_context_new(struct nouveau_fence_chan *); 66void nouveau_fence_context_new(struct nouveau_channel *, struct nouveau_fence_chan *);
59void nouveau_fence_context_del(struct nouveau_fence_chan *); 67void nouveau_fence_context_del(struct nouveau_fence_chan *);
60 68
61int nv04_fence_create(struct nouveau_drm *); 69int nv04_fence_create(struct nouveau_drm *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 1650c0bdb0fc..d68c9656e409 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -426,18 +426,6 @@ retry:
426} 426}
427 427
428static int 428static int
429validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
430{
431 struct nouveau_fence *fence = nvbo->bo.sync_obj;
432 int ret = 0;
433
434 if (fence)
435 ret = nouveau_fence_sync(fence, chan);
436
437 return ret;
438}
439
440static int
441validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, 429validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
442 struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo, 430 struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
443 uint64_t user_pbbo_ptr) 431 uint64_t user_pbbo_ptr)
@@ -466,9 +454,10 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
466 return ret; 454 return ret;
467 } 455 }
468 456
469 ret = validate_sync(chan, nvbo); 457 ret = nouveau_fence_sync(nvbo, chan);
470 if (unlikely(ret)) { 458 if (unlikely(ret)) {
471 NV_PRINTK(error, cli, "fail post-validate sync\n"); 459 if (ret != -ERESTARTSYS)
460 NV_PRINTK(error, cli, "fail post-validate sync\n");
472 return ret; 461 return ret;
473 } 462 }
474 463
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index 239c2c5a9615..4484131d826a 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -41,7 +41,7 @@ nv04_fence_emit(struct nouveau_fence *fence)
41 int ret = RING_SPACE(chan, 2); 41 int ret = RING_SPACE(chan, 2);
42 if (ret == 0) { 42 if (ret == 0) {
43 BEGIN_NV04(chan, NvSubSw, 0x0150, 1); 43 BEGIN_NV04(chan, NvSubSw, 0x0150, 1);
44 OUT_RING (chan, fence->sequence); 44 OUT_RING (chan, fence->base.seqno);
45 FIRE_RING (chan); 45 FIRE_RING (chan);
46 } 46 }
47 return ret; 47 return ret;
@@ -75,7 +75,7 @@ nv04_fence_context_new(struct nouveau_channel *chan)
75{ 75{
76 struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL); 76 struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
77 if (fctx) { 77 if (fctx) {
78 nouveau_fence_context_new(&fctx->base); 78 nouveau_fence_context_new(chan, &fctx->base);
79 fctx->base.emit = nv04_fence_emit; 79 fctx->base.emit = nv04_fence_emit;
80 fctx->base.sync = nv04_fence_sync; 80 fctx->base.sync = nv04_fence_sync;
81 fctx->base.read = nv04_fence_read; 81 fctx->base.read = nv04_fence_read;
@@ -105,5 +105,7 @@ nv04_fence_create(struct nouveau_drm *drm)
105 priv->base.dtor = nv04_fence_destroy; 105 priv->base.dtor = nv04_fence_destroy;
106 priv->base.context_new = nv04_fence_context_new; 106 priv->base.context_new = nv04_fence_context_new;
107 priv->base.context_del = nv04_fence_context_del; 107 priv->base.context_del = nv04_fence_context_del;
108 priv->base.contexts = 15;
109 priv->base.context_base = fence_context_alloc(priv->base.contexts);
108 return 0; 110 return 0;
109} 111}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 4faaf0acf5d7..737d066ffc60 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -33,7 +33,7 @@ nv10_fence_emit(struct nouveau_fence *fence)
33 int ret = RING_SPACE(chan, 2); 33 int ret = RING_SPACE(chan, 2);
34 if (ret == 0) { 34 if (ret == 0) {
35 BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1); 35 BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
36 OUT_RING (chan, fence->sequence); 36 OUT_RING (chan, fence->base.seqno);
37 FIRE_RING (chan); 37 FIRE_RING (chan);
38 } 38 }
39 return ret; 39 return ret;
@@ -75,7 +75,7 @@ nv10_fence_context_new(struct nouveau_channel *chan)
75 if (!fctx) 75 if (!fctx)
76 return -ENOMEM; 76 return -ENOMEM;
77 77
78 nouveau_fence_context_new(&fctx->base); 78 nouveau_fence_context_new(chan, &fctx->base);
79 fctx->base.emit = nv10_fence_emit; 79 fctx->base.emit = nv10_fence_emit;
80 fctx->base.read = nv10_fence_read; 80 fctx->base.read = nv10_fence_read;
81 fctx->base.sync = nv10_fence_sync; 81 fctx->base.sync = nv10_fence_sync;
@@ -106,6 +106,8 @@ nv10_fence_create(struct nouveau_drm *drm)
106 priv->base.dtor = nv10_fence_destroy; 106 priv->base.dtor = nv10_fence_destroy;
107 priv->base.context_new = nv10_fence_context_new; 107 priv->base.context_new = nv10_fence_context_new;
108 priv->base.context_del = nv10_fence_context_del; 108 priv->base.context_del = nv10_fence_context_del;
109 priv->base.contexts = 31;
110 priv->base.context_base = fence_context_alloc(priv->base.contexts);
109 spin_lock_init(&priv->lock); 111 spin_lock_init(&priv->lock);
110 return 0; 112 return 0;
111} 113}
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index ca907479f92f..6f9a1f8e2d0f 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -84,7 +84,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
84 if (!fctx) 84 if (!fctx)
85 return -ENOMEM; 85 return -ENOMEM;
86 86
87 nouveau_fence_context_new(&fctx->base); 87 nouveau_fence_context_new(chan, &fctx->base);
88 fctx->base.emit = nv10_fence_emit; 88 fctx->base.emit = nv10_fence_emit;
89 fctx->base.read = nv10_fence_read; 89 fctx->base.read = nv10_fence_read;
90 fctx->base.sync = nv17_fence_sync; 90 fctx->base.sync = nv17_fence_sync;
@@ -124,6 +124,8 @@ nv17_fence_create(struct nouveau_drm *drm)
124 priv->base.resume = nv17_fence_resume; 124 priv->base.resume = nv17_fence_resume;
125 priv->base.context_new = nv17_fence_context_new; 125 priv->base.context_new = nv17_fence_context_new;
126 priv->base.context_del = nv10_fence_context_del; 126 priv->base.context_del = nv10_fence_context_del;
127 priv->base.contexts = 31;
128 priv->base.context_base = fence_context_alloc(priv->base.contexts);
127 spin_lock_init(&priv->lock); 129 spin_lock_init(&priv->lock);
128 130
129 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 131 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 195cf51a7c31..08fad3668a1c 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -46,7 +46,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
46 if (!fctx) 46 if (!fctx)
47 return -ENOMEM; 47 return -ENOMEM;
48 48
49 nouveau_fence_context_new(&fctx->base); 49 nouveau_fence_context_new(chan, &fctx->base);
50 fctx->base.emit = nv10_fence_emit; 50 fctx->base.emit = nv10_fence_emit;
51 fctx->base.read = nv10_fence_read; 51 fctx->base.read = nv10_fence_read;
52 fctx->base.sync = nv17_fence_sync; 52 fctx->base.sync = nv17_fence_sync;
@@ -95,6 +95,8 @@ nv50_fence_create(struct nouveau_drm *drm)
95 priv->base.resume = nv17_fence_resume; 95 priv->base.resume = nv17_fence_resume;
96 priv->base.context_new = nv50_fence_context_new; 96 priv->base.context_new = nv50_fence_context_new;
97 priv->base.context_del = nv10_fence_context_del; 97 priv->base.context_del = nv10_fence_context_del;
98 priv->base.contexts = 127;
99 priv->base.context_base = fence_context_alloc(priv->base.contexts);
98 spin_lock_init(&priv->lock); 100 spin_lock_init(&priv->lock);
99 101
100 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 102 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 933a779c93ab..a2f28082c272 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -82,7 +82,7 @@ nv84_fence_emit(struct nouveau_fence *fence)
82 else 82 else
83 addr += fctx->vma.offset; 83 addr += fctx->vma.offset;
84 84
85 return fctx->base.emit32(chan, addr, fence->sequence); 85 return fctx->base.emit32(chan, addr, fence->base.seqno);
86} 86}
87 87
88static int 88static int
@@ -97,7 +97,7 @@ nv84_fence_sync(struct nouveau_fence *fence,
97 else 97 else
98 addr += fctx->vma.offset; 98 addr += fctx->vma.offset;
99 99
100 return fctx->base.sync32(chan, addr, fence->sequence); 100 return fctx->base.sync32(chan, addr, fence->base.seqno);
101} 101}
102 102
103static u32 103static u32
@@ -139,12 +139,13 @@ nv84_fence_context_new(struct nouveau_channel *chan)
139 if (!fctx) 139 if (!fctx)
140 return -ENOMEM; 140 return -ENOMEM;
141 141
142 nouveau_fence_context_new(&fctx->base); 142 nouveau_fence_context_new(chan, &fctx->base);
143 fctx->base.emit = nv84_fence_emit; 143 fctx->base.emit = nv84_fence_emit;
144 fctx->base.sync = nv84_fence_sync; 144 fctx->base.sync = nv84_fence_sync;
145 fctx->base.read = nv84_fence_read; 145 fctx->base.read = nv84_fence_read;
146 fctx->base.emit32 = nv84_fence_emit32; 146 fctx->base.emit32 = nv84_fence_emit32;
147 fctx->base.sync32 = nv84_fence_sync32; 147 fctx->base.sync32 = nv84_fence_sync32;
148 fctx->base.sequence = nv84_fence_read(chan);
148 149
149 ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma); 150 ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
150 if (ret == 0) { 151 if (ret == 0) {
@@ -168,13 +169,12 @@ nv84_fence_context_new(struct nouveau_channel *chan)
168static bool 169static bool
169nv84_fence_suspend(struct nouveau_drm *drm) 170nv84_fence_suspend(struct nouveau_drm *drm)
170{ 171{
171 struct nouveau_fifo *pfifo = nvkm_fifo(&drm->device);
172 struct nv84_fence_priv *priv = drm->fence; 172 struct nv84_fence_priv *priv = drm->fence;
173 int i; 173 int i;
174 174
175 priv->suspend = vmalloc((pfifo->max + 1) * sizeof(u32)); 175 priv->suspend = vmalloc(priv->base.contexts * sizeof(u32));
176 if (priv->suspend) { 176 if (priv->suspend) {
177 for (i = 0; i <= pfifo->max; i++) 177 for (i = 0; i < priv->base.contexts; i++)
178 priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4); 178 priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4);
179 } 179 }
180 180
@@ -184,12 +184,11 @@ nv84_fence_suspend(struct nouveau_drm *drm)
184static void 184static void
185nv84_fence_resume(struct nouveau_drm *drm) 185nv84_fence_resume(struct nouveau_drm *drm)
186{ 186{
187 struct nouveau_fifo *pfifo = nvkm_fifo(&drm->device);
188 struct nv84_fence_priv *priv = drm->fence; 187 struct nv84_fence_priv *priv = drm->fence;
189 int i; 188 int i;
190 189
191 if (priv->suspend) { 190 if (priv->suspend) {
192 for (i = 0; i <= pfifo->max; i++) 191 for (i = 0; i < priv->base.contexts; i++)
193 nouveau_bo_wr32(priv->bo, i*4, priv->suspend[i]); 192 nouveau_bo_wr32(priv->bo, i*4, priv->suspend[i]);
194 vfree(priv->suspend); 193 vfree(priv->suspend);
195 priv->suspend = NULL; 194 priv->suspend = NULL;
@@ -229,10 +228,11 @@ nv84_fence_create(struct nouveau_drm *drm)
229 priv->base.context_new = nv84_fence_context_new; 228 priv->base.context_new = nv84_fence_context_new;
230 priv->base.context_del = nv84_fence_context_del; 229 priv->base.context_del = nv84_fence_context_del;
231 230
232 init_waitqueue_head(&priv->base.waiting); 231 priv->base.contexts = pfifo->max + 1;
232 priv->base.context_base = fence_context_alloc(priv->base.contexts);
233 priv->base.uevent = true; 233 priv->base.uevent = true;
234 234
235 ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0, 235 ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0,
236 TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo); 236 TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
237 if (ret == 0) { 237 if (ret == 0) {
238 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); 238 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
@@ -246,7 +246,7 @@ nv84_fence_create(struct nouveau_drm *drm)
246 } 246 }
247 247
248 if (ret == 0) 248 if (ret == 0)
249 ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0, 249 ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0,
250 TTM_PL_FLAG_TT, 0, 0, NULL, 250 TTM_PL_FLAG_TT, 0, 0, NULL,
251 &priv->bo_gart); 251 &priv->bo_gart);
252 if (ret == 0) { 252 if (ret == 0) {