diff options
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/ioat/dma.h | 1 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.c | 113 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 111 |
3 files changed, 128 insertions, 97 deletions
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 5e8fe01ba69d..1020598b80d3 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -97,6 +97,7 @@ struct ioat_chan_common { | |||
97 | #define IOAT_KOBJ_INIT_FAIL 3 | 97 | #define IOAT_KOBJ_INIT_FAIL 3 |
98 | #define IOAT_RESHAPE_PENDING 4 | 98 | #define IOAT_RESHAPE_PENDING 4 |
99 | #define IOAT_RUN 5 | 99 | #define IOAT_RUN 5 |
100 | #define IOAT_CHAN_ACTIVE 6 | ||
100 | struct timer_list timer; | 101 | struct timer_list timer; |
101 | #define COMPLETION_TIMEOUT msecs_to_jiffies(100) | 102 | #define COMPLETION_TIMEOUT msecs_to_jiffies(100) |
102 | #define IDLE_TIMEOUT msecs_to_jiffies(2000) | 103 | #define IDLE_TIMEOUT msecs_to_jiffies(2000) |
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index b9d667851445..e2b2c70f03db 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
@@ -269,61 +269,22 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) | |||
269 | __ioat2_restart_chan(ioat); | 269 | __ioat2_restart_chan(ioat); |
270 | } | 270 | } |
271 | 271 | ||
272 | void ioat2_timer_event(unsigned long data) | 272 | static void check_active(struct ioat2_dma_chan *ioat) |
273 | { | 273 | { |
274 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); | ||
275 | struct ioat_chan_common *chan = &ioat->base; | 274 | struct ioat_chan_common *chan = &ioat->base; |
276 | 275 | ||
277 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | 276 | if (ioat2_ring_active(ioat)) { |
278 | dma_addr_t phys_complete; | 277 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); |
279 | u64 status; | 278 | return; |
280 | 279 | } | |
281 | status = ioat_chansts(chan); | ||
282 | |||
283 | /* when halted due to errors check for channel | ||
284 | * programming errors before advancing the completion state | ||
285 | */ | ||
286 | if (is_ioat_halted(status)) { | ||
287 | u32 chanerr; | ||
288 | |||
289 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
290 | dev_err(to_dev(chan), "%s: Channel halted (%x)\n", | ||
291 | __func__, chanerr); | ||
292 | if (test_bit(IOAT_RUN, &chan->state)) | ||
293 | BUG_ON(is_ioat_bug(chanerr)); | ||
294 | else /* we never got off the ground */ | ||
295 | return; | ||
296 | } | ||
297 | |||
298 | /* if we haven't made progress and we have already | ||
299 | * acknowledged a pending completion once, then be more | ||
300 | * forceful with a restart | ||
301 | */ | ||
302 | spin_lock_bh(&chan->cleanup_lock); | ||
303 | if (ioat_cleanup_preamble(chan, &phys_complete)) { | ||
304 | __cleanup(ioat, phys_complete); | ||
305 | } else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { | ||
306 | spin_lock_bh(&ioat->prep_lock); | ||
307 | ioat2_restart_channel(ioat); | ||
308 | spin_unlock_bh(&ioat->prep_lock); | ||
309 | } else { | ||
310 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | ||
311 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
312 | } | ||
313 | spin_unlock_bh(&chan->cleanup_lock); | ||
314 | } else { | ||
315 | u16 active; | ||
316 | 280 | ||
281 | if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state)) | ||
282 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | ||
283 | else if (ioat->alloc_order > ioat_get_alloc_order()) { | ||
317 | /* if the ring is idle, empty, and oversized try to step | 284 | /* if the ring is idle, empty, and oversized try to step |
318 | * down the size | 285 | * down the size |
319 | */ | 286 | */ |
320 | spin_lock_bh(&chan->cleanup_lock); | 287 | reshape_ring(ioat, ioat->alloc_order - 1); |
321 | spin_lock_bh(&ioat->prep_lock); | ||
322 | active = ioat2_ring_active(ioat); | ||
323 | if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) | ||
324 | reshape_ring(ioat, ioat->alloc_order-1); | ||
325 | spin_unlock_bh(&ioat->prep_lock); | ||
326 | spin_unlock_bh(&chan->cleanup_lock); | ||
327 | 288 | ||
328 | /* keep shrinking until we get back to our minimum | 289 | /* keep shrinking until we get back to our minimum |
329 | * default size | 290 | * default size |
@@ -331,6 +292,60 @@ void ioat2_timer_event(unsigned long data) | |||
331 | if (ioat->alloc_order > ioat_get_alloc_order()) | 292 | if (ioat->alloc_order > ioat_get_alloc_order()) |
332 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | 293 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); |
333 | } | 294 | } |
295 | |||
296 | } | ||
297 | |||
298 | void ioat2_timer_event(unsigned long data) | ||
299 | { | ||
300 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); | ||
301 | struct ioat_chan_common *chan = &ioat->base; | ||
302 | dma_addr_t phys_complete; | ||
303 | u64 status; | ||
304 | |||
305 | status = ioat_chansts(chan); | ||
306 | |||
307 | /* when halted due to errors check for channel | ||
308 | * programming errors before advancing the completion state | ||
309 | */ | ||
310 | if (is_ioat_halted(status)) { | ||
311 | u32 chanerr; | ||
312 | |||
313 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
314 | dev_err(to_dev(chan), "%s: Channel halted (%x)\n", | ||
315 | __func__, chanerr); | ||
316 | if (test_bit(IOAT_RUN, &chan->state)) | ||
317 | BUG_ON(is_ioat_bug(chanerr)); | ||
318 | else /* we never got off the ground */ | ||
319 | return; | ||
320 | } | ||
321 | |||
322 | /* if we haven't made progress and we have already | ||
323 | * acknowledged a pending completion once, then be more | ||
324 | * forceful with a restart | ||
325 | */ | ||
326 | spin_lock_bh(&chan->cleanup_lock); | ||
327 | if (ioat_cleanup_preamble(chan, &phys_complete)) | ||
328 | __cleanup(ioat, phys_complete); | ||
329 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { | ||
330 | spin_lock_bh(&ioat->prep_lock); | ||
331 | ioat2_restart_channel(ioat); | ||
332 | spin_unlock_bh(&ioat->prep_lock); | ||
333 | spin_unlock_bh(&chan->cleanup_lock); | ||
334 | return; | ||
335 | } else { | ||
336 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | ||
337 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
338 | } | ||
339 | |||
340 | |||
341 | if (ioat2_ring_active(ioat)) | ||
342 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
343 | else { | ||
344 | spin_lock_bh(&ioat->prep_lock); | ||
345 | check_active(ioat); | ||
346 | spin_unlock_bh(&ioat->prep_lock); | ||
347 | } | ||
348 | spin_unlock_bh(&chan->cleanup_lock); | ||
334 | } | 349 | } |
335 | 350 | ||
336 | static int ioat2_reset_hw(struct ioat_chan_common *chan) | 351 | static int ioat2_reset_hw(struct ioat_chan_common *chan) |
@@ -404,7 +419,7 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) | |||
404 | cookie = dma_cookie_assign(tx); | 419 | cookie = dma_cookie_assign(tx); |
405 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); | 420 | dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); |
406 | 421 | ||
407 | if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) | 422 | if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &chan->state)) |
408 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | 423 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); |
409 | 424 | ||
410 | /* make descriptor updates visible before advancing ioat->head, | 425 | /* make descriptor updates visible before advancing ioat->head, |
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index e52cf1eb6839..570dd6320c32 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
@@ -342,61 +342,22 @@ static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) | |||
342 | __ioat2_restart_chan(ioat); | 342 | __ioat2_restart_chan(ioat); |
343 | } | 343 | } |
344 | 344 | ||
345 | static void ioat3_timer_event(unsigned long data) | 345 | static void check_active(struct ioat2_dma_chan *ioat) |
346 | { | 346 | { |
347 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); | ||
348 | struct ioat_chan_common *chan = &ioat->base; | 347 | struct ioat_chan_common *chan = &ioat->base; |
349 | 348 | ||
350 | if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { | 349 | if (ioat2_ring_active(ioat)) { |
351 | dma_addr_t phys_complete; | 350 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); |
352 | u64 status; | 351 | return; |
353 | 352 | } | |
354 | status = ioat_chansts(chan); | ||
355 | |||
356 | /* when halted due to errors check for channel | ||
357 | * programming errors before advancing the completion state | ||
358 | */ | ||
359 | if (is_ioat_halted(status)) { | ||
360 | u32 chanerr; | ||
361 | |||
362 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
363 | dev_err(to_dev(chan), "%s: Channel halted (%x)\n", | ||
364 | __func__, chanerr); | ||
365 | if (test_bit(IOAT_RUN, &chan->state)) | ||
366 | BUG_ON(is_ioat_bug(chanerr)); | ||
367 | else /* we never got off the ground */ | ||
368 | return; | ||
369 | } | ||
370 | |||
371 | /* if we haven't made progress and we have already | ||
372 | * acknowledged a pending completion once, then be more | ||
373 | * forceful with a restart | ||
374 | */ | ||
375 | spin_lock_bh(&chan->cleanup_lock); | ||
376 | if (ioat_cleanup_preamble(chan, &phys_complete)) | ||
377 | __cleanup(ioat, phys_complete); | ||
378 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { | ||
379 | spin_lock_bh(&ioat->prep_lock); | ||
380 | ioat3_restart_channel(ioat); | ||
381 | spin_unlock_bh(&ioat->prep_lock); | ||
382 | } else { | ||
383 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | ||
384 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
385 | } | ||
386 | spin_unlock_bh(&chan->cleanup_lock); | ||
387 | } else { | ||
388 | u16 active; | ||
389 | 353 | ||
354 | if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state)) | ||
355 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | ||
356 | else if (ioat->alloc_order > ioat_get_alloc_order()) { | ||
390 | /* if the ring is idle, empty, and oversized try to step | 357 | /* if the ring is idle, empty, and oversized try to step |
391 | * down the size | 358 | * down the size |
392 | */ | 359 | */ |
393 | spin_lock_bh(&chan->cleanup_lock); | 360 | reshape_ring(ioat, ioat->alloc_order - 1); |
394 | spin_lock_bh(&ioat->prep_lock); | ||
395 | active = ioat2_ring_active(ioat); | ||
396 | if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) | ||
397 | reshape_ring(ioat, ioat->alloc_order-1); | ||
398 | spin_unlock_bh(&ioat->prep_lock); | ||
399 | spin_unlock_bh(&chan->cleanup_lock); | ||
400 | 361 | ||
401 | /* keep shrinking until we get back to our minimum | 362 | /* keep shrinking until we get back to our minimum |
402 | * default size | 363 | * default size |
@@ -404,6 +365,60 @@ static void ioat3_timer_event(unsigned long data) | |||
404 | if (ioat->alloc_order > ioat_get_alloc_order()) | 365 | if (ioat->alloc_order > ioat_get_alloc_order()) |
405 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | 366 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); |
406 | } | 367 | } |
368 | |||
369 | } | ||
370 | |||
371 | void ioat3_timer_event(unsigned long data) | ||
372 | { | ||
373 | struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); | ||
374 | struct ioat_chan_common *chan = &ioat->base; | ||
375 | dma_addr_t phys_complete; | ||
376 | u64 status; | ||
377 | |||
378 | status = ioat_chansts(chan); | ||
379 | |||
380 | /* when halted due to errors check for channel | ||
381 | * programming errors before advancing the completion state | ||
382 | */ | ||
383 | if (is_ioat_halted(status)) { | ||
384 | u32 chanerr; | ||
385 | |||
386 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
387 | dev_err(to_dev(chan), "%s: Channel halted (%x)\n", | ||
388 | __func__, chanerr); | ||
389 | if (test_bit(IOAT_RUN, &chan->state)) | ||
390 | BUG_ON(is_ioat_bug(chanerr)); | ||
391 | else /* we never got off the ground */ | ||
392 | return; | ||
393 | } | ||
394 | |||
395 | /* if we haven't made progress and we have already | ||
396 | * acknowledged a pending completion once, then be more | ||
397 | * forceful with a restart | ||
398 | */ | ||
399 | spin_lock_bh(&chan->cleanup_lock); | ||
400 | if (ioat_cleanup_preamble(chan, &phys_complete)) | ||
401 | __cleanup(ioat, phys_complete); | ||
402 | else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { | ||
403 | spin_lock_bh(&ioat->prep_lock); | ||
404 | ioat3_restart_channel(ioat); | ||
405 | spin_unlock_bh(&ioat->prep_lock); | ||
406 | spin_unlock_bh(&chan->cleanup_lock); | ||
407 | return; | ||
408 | } else { | ||
409 | set_bit(IOAT_COMPLETION_ACK, &chan->state); | ||
410 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
411 | } | ||
412 | |||
413 | |||
414 | if (ioat2_ring_active(ioat)) | ||
415 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
416 | else { | ||
417 | spin_lock_bh(&ioat->prep_lock); | ||
418 | check_active(ioat); | ||
419 | spin_unlock_bh(&ioat->prep_lock); | ||
420 | } | ||
421 | spin_unlock_bh(&chan->cleanup_lock); | ||
407 | } | 422 | } |
408 | 423 | ||
409 | static enum dma_status | 424 | static enum dma_status |