diff options
author | Jan Nikitenko <jan.nikitenko@gmail.com> | 2008-12-01 16:13:56 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-12-01 22:55:24 -0500 |
commit | 4e253d23003b54c88d0919d6088be74f00eec3c7 (patch) | |
tree | 418741f7add461e32f4c2801e8a693f9dbd0455b | |
parent | 6a010b56e9bd2fdb32efd153e1a08305949b6b53 (diff) |
spi: au1550_spi full duplex dma fix
Fix unsafe order in dma mapping operation: always flush data from the
cache *BEFORE* invalidating it, to allow full duplex transfers where the
same buffer may be used for both writes and reads. Tested with mmc-spi.
Signed-off-by: Jan Nikitenko <jan.nikitenko@gmail.com>
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | drivers/spi/au1550_spi.c | 26 |
1 files changed, 16 insertions, 10 deletions
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c index 87b73e0169c5..b02f25c702fd 100644 --- a/drivers/spi/au1550_spi.c +++ b/drivers/spi/au1550_spi.c | |||
@@ -369,10 +369,23 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t) | |||
369 | dma_rx_addr = t->rx_dma; | 369 | dma_rx_addr = t->rx_dma; |
370 | 370 | ||
371 | /* | 371 | /* |
372 | * check if buffers are already dma mapped, map them otherwise | 372 | * check if buffers are already dma mapped, map them otherwise: |
373 | * - first map the TX buffer, so cache data gets written to memory | ||
374 | * - then map the RX buffer, so that cache entries (with | ||
375 | * soon-to-be-stale data) get removed | ||
373 | * use rx buffer in place of tx if tx buffer was not provided | 376 | * use rx buffer in place of tx if tx buffer was not provided |
374 | * use temp rx buffer (preallocated or realloc to fit) for rx dma | 377 | * use temp rx buffer (preallocated or realloc to fit) for rx dma |
375 | */ | 378 | */ |
379 | if (t->tx_buf) { | ||
380 | if (t->tx_dma == 0) { /* if DMA_ADDR_INVALID, map it */ | ||
381 | dma_tx_addr = dma_map_single(hw->dev, | ||
382 | (void *)t->tx_buf, | ||
383 | t->len, DMA_TO_DEVICE); | ||
384 | if (dma_mapping_error(hw->dev, dma_tx_addr)) | ||
385 | dev_err(hw->dev, "tx dma map error\n"); | ||
386 | } | ||
387 | } | ||
388 | |||
376 | if (t->rx_buf) { | 389 | if (t->rx_buf) { |
377 | if (t->rx_dma == 0) { /* if DMA_ADDR_INVALID, map it */ | 390 | if (t->rx_dma == 0) { /* if DMA_ADDR_INVALID, map it */ |
378 | dma_rx_addr = dma_map_single(hw->dev, | 391 | dma_rx_addr = dma_map_single(hw->dev, |
@@ -396,15 +409,8 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t) | |||
396 | dma_sync_single_for_device(hw->dev, dma_rx_addr, | 409 | dma_sync_single_for_device(hw->dev, dma_rx_addr, |
397 | t->len, DMA_FROM_DEVICE); | 410 | t->len, DMA_FROM_DEVICE); |
398 | } | 411 | } |
399 | if (t->tx_buf) { | 412 | |
400 | if (t->tx_dma == 0) { /* if DMA_ADDR_INVALID, map it */ | 413 | if (!t->tx_buf) { |
401 | dma_tx_addr = dma_map_single(hw->dev, | ||
402 | (void *)t->tx_buf, | ||
403 | t->len, DMA_TO_DEVICE); | ||
404 | if (dma_mapping_error(hw->dev, dma_tx_addr)) | ||
405 | dev_err(hw->dev, "tx dma map error\n"); | ||
406 | } | ||
407 | } else { | ||
408 | dma_sync_single_for_device(hw->dev, dma_rx_addr, | 414 | dma_sync_single_for_device(hw->dev, dma_rx_addr, |
409 | t->len, DMA_BIDIRECTIONAL); | 415 | t->len, DMA_BIDIRECTIONAL); |
410 | hw->tx = hw->rx; | 416 | hw->tx = hw->rx; |