aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi/pxa2xx_spi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi/pxa2xx_spi.c')
-rw-r--r--drivers/spi/pxa2xx_spi.c116
1 files changed, 91 insertions, 25 deletions
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 34c7c9875681..d47d3636227f 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -47,9 +47,10 @@ MODULE_ALIAS("platform:pxa2xx-spi");
47 47
48#define MAX_BUSES 3 48#define MAX_BUSES 3
49 49
50#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) 50#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
51#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) 51#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
52#define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0) 52#define IS_DMA_ALIGNED(x) ((((u32)(x)) & 0x07) == 0)
53#define MAX_DMA_LEN 8191
53 54
54/* 55/*
55 * for testing SSCR1 changes that require SSP restart, basically 56 * for testing SSCR1 changes that require SSP restart, basically
@@ -144,7 +145,6 @@ struct driver_data {
144 size_t tx_map_len; 145 size_t tx_map_len;
145 u8 n_bytes; 146 u8 n_bytes;
146 u32 dma_width; 147 u32 dma_width;
147 int cs_change;
148 int (*write)(struct driver_data *drv_data); 148 int (*write)(struct driver_data *drv_data);
149 int (*read)(struct driver_data *drv_data); 149 int (*read)(struct driver_data *drv_data);
150 irqreturn_t (*transfer_handler)(struct driver_data *drv_data); 150 irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
@@ -406,8 +406,45 @@ static void giveback(struct driver_data *drv_data)
406 struct spi_transfer, 406 struct spi_transfer,
407 transfer_list); 407 transfer_list);
408 408
409 /* Delay if requested before any change in chip select */
410 if (last_transfer->delay_usecs)
411 udelay(last_transfer->delay_usecs);
412
413 /* Drop chip select UNLESS cs_change is true or we are returning
414 * a message with an error, or next message is for another chip
415 */
409 if (!last_transfer->cs_change) 416 if (!last_transfer->cs_change)
410 drv_data->cs_control(PXA2XX_CS_DEASSERT); 417 drv_data->cs_control(PXA2XX_CS_DEASSERT);
418 else {
419 struct spi_message *next_msg;
420
421 /* Holding of cs was hinted, but we need to make sure
422 * the next message is for the same chip. Don't waste
423 * time with the following tests unless this was hinted.
424 *
425 * We cannot postpone this until pump_messages, because
426 * after calling msg->complete (below) the driver that
427 * sent the current message could be unloaded, which
428 * could invalidate the cs_control() callback...
429 */
430
431 /* get a pointer to the next message, if any */
432 spin_lock_irqsave(&drv_data->lock, flags);
433 if (list_empty(&drv_data->queue))
434 next_msg = NULL;
435 else
436 next_msg = list_entry(drv_data->queue.next,
437 struct spi_message, queue);
438 spin_unlock_irqrestore(&drv_data->lock, flags);
439
440 /* see if the next and current messages point
441 * to the same chip
442 */
443 if (next_msg && next_msg->spi != msg->spi)
444 next_msg = NULL;
445 if (!next_msg || msg->state == ERROR_STATE)
446 drv_data->cs_control(PXA2XX_CS_DEASSERT);
447 }
411 448
412 msg->state = NULL; 449 msg->state = NULL;
413 if (msg->complete) 450 if (msg->complete)
@@ -490,10 +527,9 @@ static void dma_transfer_complete(struct driver_data *drv_data)
490 msg->actual_length += drv_data->len - 527 msg->actual_length += drv_data->len -
491 (drv_data->rx_end - drv_data->rx); 528 (drv_data->rx_end - drv_data->rx);
492 529
493 /* Release chip select if requested, transfer delays are 530 /* Transfer delays and chip select release are
494 * handled in pump_transfers */ 531 * handled in pump_transfers or giveback
495 if (drv_data->cs_change) 532 */
496 drv_data->cs_control(PXA2XX_CS_DEASSERT);
497 533
498 /* Move to next transfer */ 534 /* Move to next transfer */
499 msg->state = next_transfer(drv_data); 535 msg->state = next_transfer(drv_data);
@@ -602,10 +638,9 @@ static void int_transfer_complete(struct driver_data *drv_data)
602 drv_data->cur_msg->actual_length += drv_data->len - 638 drv_data->cur_msg->actual_length += drv_data->len -
603 (drv_data->rx_end - drv_data->rx); 639 (drv_data->rx_end - drv_data->rx);
604 640
605 /* Release chip select if requested, transfer delays are 641 /* Transfer delays and chip select release are
606 * handled in pump_transfers */ 642 * handled in pump_transfers or giveback
607 if (drv_data->cs_change) 643 */
608 drv_data->cs_control(PXA2XX_CS_DEASSERT);
609 644
610 /* Move to next transfer */ 645 /* Move to next transfer */
611 drv_data->cur_msg->state = next_transfer(drv_data); 646 drv_data->cur_msg->state = next_transfer(drv_data);
@@ -840,23 +875,40 @@ static void pump_transfers(unsigned long data)
840 return; 875 return;
841 } 876 }
842 877
843 /* Delay if requested at end of transfer*/ 878 /* Delay if requested at end of transfer before CS change */
844 if (message->state == RUNNING_STATE) { 879 if (message->state == RUNNING_STATE) {
845 previous = list_entry(transfer->transfer_list.prev, 880 previous = list_entry(transfer->transfer_list.prev,
846 struct spi_transfer, 881 struct spi_transfer,
847 transfer_list); 882 transfer_list);
848 if (previous->delay_usecs) 883 if (previous->delay_usecs)
849 udelay(previous->delay_usecs); 884 udelay(previous->delay_usecs);
885
886 /* Drop chip select only if cs_change is requested */
887 if (previous->cs_change)
888 drv_data->cs_control(PXA2XX_CS_DEASSERT);
850 } 889 }
851 890
852 /* Check transfer length */ 891 /* Check for transfers that need multiple DMA segments */
853 if (transfer->len > 8191) 892 if (transfer->len > MAX_DMA_LEN && chip->enable_dma) {
854 { 893
855 dev_warn(&drv_data->pdev->dev, "pump_transfers: transfer " 894 /* reject already-mapped transfers; PIO won't always work */
856 "length greater than 8191\n"); 895 if (message->is_dma_mapped
857 message->status = -EINVAL; 896 || transfer->rx_dma || transfer->tx_dma) {
858 giveback(drv_data); 897 dev_err(&drv_data->pdev->dev,
859 return; 898 "pump_transfers: mapped transfer length "
899 "of %u is greater than %d\n",
900 transfer->len, MAX_DMA_LEN);
901 message->status = -EINVAL;
902 giveback(drv_data);
903 return;
904 }
905
906 /* warn ... we force this to PIO mode */
907 if (printk_ratelimit())
908 dev_warn(&message->spi->dev, "pump_transfers: "
909 "DMA disabled for transfer length %ld "
910 "greater than %d\n",
911 (long)drv_data->len, MAX_DMA_LEN);
860 } 912 }
861 913
862 /* Setup the transfer state based on the type of transfer */ 914 /* Setup the transfer state based on the type of transfer */
@@ -878,7 +930,6 @@ static void pump_transfers(unsigned long data)
878 drv_data->len = transfer->len & DCMD_LENGTH; 930 drv_data->len = transfer->len & DCMD_LENGTH;
879 drv_data->write = drv_data->tx ? chip->write : null_writer; 931 drv_data->write = drv_data->tx ? chip->write : null_writer;
880 drv_data->read = drv_data->rx ? chip->read : null_reader; 932 drv_data->read = drv_data->rx ? chip->read : null_reader;
881 drv_data->cs_change = transfer->cs_change;
882 933
883 /* Change speed and bit per word on a per transfer */ 934 /* Change speed and bit per word on a per transfer */
884 cr0 = chip->cr0; 935 cr0 = chip->cr0;
@@ -925,7 +976,7 @@ static void pump_transfers(unsigned long data)
925 &dma_thresh)) 976 &dma_thresh))
926 if (printk_ratelimit()) 977 if (printk_ratelimit())
927 dev_warn(&message->spi->dev, 978 dev_warn(&message->spi->dev,
928 "pump_transfer: " 979 "pump_transfers: "
929 "DMA burst size reduced to " 980 "DMA burst size reduced to "
930 "match bits_per_word\n"); 981 "match bits_per_word\n");
931 } 982 }
@@ -939,8 +990,23 @@ static void pump_transfers(unsigned long data)
939 990
940 message->state = RUNNING_STATE; 991 message->state = RUNNING_STATE;
941 992
942 /* Try to map dma buffer and do a dma transfer if successful */ 993 /* Try to map dma buffer and do a dma transfer if successful, but
943 if ((drv_data->dma_mapped = map_dma_buffers(drv_data))) { 994 * only if the length is non-zero and less than MAX_DMA_LEN.
995 *
996 * Zero-length non-descriptor DMA is illegal on PXA2xx; force use
997 * of PIO instead. Care is needed above because the transfer may
998 * have have been passed with buffers that are already dma mapped.
999 * A zero-length transfer in PIO mode will not try to write/read
1000 * to/from the buffers
1001 *
1002 * REVISIT large transfers are exactly where we most want to be
1003 * using DMA. If this happens much, split those transfers into
1004 * multiple DMA segments rather than forcing PIO.
1005 */
1006 drv_data->dma_mapped = 0;
1007 if (drv_data->len > 0 && drv_data->len <= MAX_DMA_LEN)
1008 drv_data->dma_mapped = map_dma_buffers(drv_data);
1009 if (drv_data->dma_mapped) {
944 1010
945 /* Ensure we have the correct interrupt handler */ 1011 /* Ensure we have the correct interrupt handler */
946 drv_data->transfer_handler = dma_transfer; 1012 drv_data->transfer_handler = dma_transfer;