aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/ioat/dma_v3.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/ioat/dma_v3.c')
-rw-r--r--drivers/dma/ioat/dma_v3.c64
1 files changed, 42 insertions, 22 deletions
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 9908c9e94b2d..26febc56dab1 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -293,17 +293,25 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
293 } 293 }
294 } 294 }
295 ioat->tail += i; 295 ioat->tail += i;
296 BUG_ON(!seen_current); /* no active descs have written a completion? */ 296 BUG_ON(active && !seen_current); /* no active descs have written a completion? */
297 chan->last_completion = phys_complete; 297 chan->last_completion = phys_complete;
298 if (ioat->head == ioat->tail) { 298
299 active = ioat2_ring_active(ioat);
300 if (active == 0) {
299 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", 301 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
300 __func__); 302 __func__);
301 clear_bit(IOAT_COMPLETION_PENDING, &chan->state); 303 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
302 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); 304 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
303 } 305 }
306 /* 5 microsecond delay per pending descriptor */
307 writew(min((5 * active), IOAT_INTRDELAY_MASK),
308 chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
304} 309}
305 310
306static void ioat3_cleanup(struct ioat2_dma_chan *ioat) 311/* try to cleanup, but yield (via spin_trylock) to incoming submissions
312 * with the expectation that we will immediately poll again shortly
313 */
314static void ioat3_cleanup_poll(struct ioat2_dma_chan *ioat)
307{ 315{
308 struct ioat_chan_common *chan = &ioat->base; 316 struct ioat_chan_common *chan = &ioat->base;
309 unsigned long phys_complete; 317 unsigned long phys_complete;
@@ -329,29 +337,41 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
329 spin_unlock_bh(&chan->cleanup_lock); 337 spin_unlock_bh(&chan->cleanup_lock);
330} 338}
331 339
332static void ioat3_cleanup_tasklet(unsigned long data) 340/* run cleanup now because we already delayed the interrupt via INTRDELAY */
341static void ioat3_cleanup_sync(struct ioat2_dma_chan *ioat)
342{
343 struct ioat_chan_common *chan = &ioat->base;
344 unsigned long phys_complete;
345
346 prefetch(chan->completion);
347
348 spin_lock_bh(&chan->cleanup_lock);
349 if (!ioat_cleanup_preamble(chan, &phys_complete)) {
350 spin_unlock_bh(&chan->cleanup_lock);
351 return;
352 }
353 spin_lock_bh(&ioat->ring_lock);
354
355 __cleanup(ioat, phys_complete);
356
357 spin_unlock_bh(&ioat->ring_lock);
358 spin_unlock_bh(&chan->cleanup_lock);
359}
360
361static void ioat3_cleanup_event(unsigned long data)
333{ 362{
334 struct ioat2_dma_chan *ioat = (void *) data; 363 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
335 364
336 ioat3_cleanup(ioat); 365 ioat3_cleanup_sync(ioat);
337 writew(IOAT_CHANCTRL_RUN | IOAT3_CHANCTRL_COMPL_DCA_EN, 366 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
338 ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
339} 367}
340 368
341static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) 369static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
342{ 370{
343 struct ioat_chan_common *chan = &ioat->base; 371 struct ioat_chan_common *chan = &ioat->base;
344 unsigned long phys_complete; 372 unsigned long phys_complete;
345 u32 status;
346
347 status = ioat_chansts(chan);
348 if (is_ioat_active(status) || is_ioat_idle(status))
349 ioat_suspend(chan);
350 while (is_ioat_active(status) || is_ioat_idle(status)) {
351 status = ioat_chansts(chan);
352 cpu_relax();
353 }
354 373
374 ioat2_quiesce(chan, 0);
355 if (ioat_cleanup_preamble(chan, &phys_complete)) 375 if (ioat_cleanup_preamble(chan, &phys_complete))
356 __cleanup(ioat, phys_complete); 376 __cleanup(ioat, phys_complete);
357 377
@@ -360,7 +380,7 @@ static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
360 380
361static void ioat3_timer_event(unsigned long data) 381static void ioat3_timer_event(unsigned long data)
362{ 382{
363 struct ioat2_dma_chan *ioat = (void *) data; 383 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
364 struct ioat_chan_common *chan = &ioat->base; 384 struct ioat_chan_common *chan = &ioat->base;
365 385
366 spin_lock_bh(&chan->cleanup_lock); 386 spin_lock_bh(&chan->cleanup_lock);
@@ -426,7 +446,7 @@ ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie,
426 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) 446 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
427 return DMA_SUCCESS; 447 return DMA_SUCCESS;
428 448
429 ioat3_cleanup(ioat); 449 ioat3_cleanup_poll(ioat);
430 450
431 return ioat_is_complete(c, cookie, done, used); 451 return ioat_is_complete(c, cookie, done, used);
432} 452}
@@ -1239,11 +1259,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1239 1259
1240 if (is_raid_device) { 1260 if (is_raid_device) {
1241 dma->device_is_tx_complete = ioat3_is_complete; 1261 dma->device_is_tx_complete = ioat3_is_complete;
1242 device->cleanup_tasklet = ioat3_cleanup_tasklet; 1262 device->cleanup_fn = ioat3_cleanup_event;
1243 device->timer_fn = ioat3_timer_event; 1263 device->timer_fn = ioat3_timer_event;
1244 } else { 1264 } else {
1245 dma->device_is_tx_complete = ioat2_is_complete; 1265 dma->device_is_tx_complete = ioat_is_dma_complete;
1246 device->cleanup_tasklet = ioat2_cleanup_tasklet; 1266 device->cleanup_fn = ioat2_cleanup_event;
1247 device->timer_fn = ioat2_timer_event; 1267 device->timer_fn = ioat2_timer_event;
1248 } 1268 }
1249 1269