diff options
-rw-r--r-- | drivers/spi/spi-atmel.c | 587 |
1 files changed, 566 insertions, 21 deletions
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 3625951e5df5..787bd2c22bca 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c | |||
@@ -15,11 +15,13 @@ | |||
15 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | #include <linux/dma-mapping.h> | 17 | #include <linux/dma-mapping.h> |
18 | #include <linux/dmaengine.h> | ||
18 | #include <linux/err.h> | 19 | #include <linux/err.h> |
19 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
20 | #include <linux/spi/spi.h> | 21 | #include <linux/spi/spi.h> |
21 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
22 | #include <linux/platform_data/atmel.h> | 23 | #include <linux/platform_data/atmel.h> |
24 | #include <linux/platform_data/dma-atmel.h> | ||
23 | #include <linux/of.h> | 25 | #include <linux/of.h> |
24 | 26 | ||
25 | #include <linux/io.h> | 27 | #include <linux/io.h> |
@@ -182,6 +184,22 @@ | |||
182 | #define spi_writel(port,reg,value) \ | 184 | #define spi_writel(port,reg,value) \ |
183 | __raw_writel((value), (port)->regs + SPI_##reg) | 185 | __raw_writel((value), (port)->regs + SPI_##reg) |
184 | 186 | ||
187 | /* use PIO for small transfers, avoiding DMA setup/teardown overhead and | ||
188 | * cache operations; better heuristics consider wordsize and bitrate. | ||
189 | */ | ||
190 | #define DMA_MIN_BYTES 16 | ||
191 | |||
192 | struct atmel_spi_dma { | ||
193 | struct dma_chan *chan_rx; | ||
194 | struct dma_chan *chan_tx; | ||
195 | struct scatterlist sgrx; | ||
196 | struct scatterlist sgtx; | ||
197 | struct dma_async_tx_descriptor *data_desc_rx; | ||
198 | struct dma_async_tx_descriptor *data_desc_tx; | ||
199 | |||
200 | struct at_dma_slave dma_slave; | ||
201 | }; | ||
202 | |||
185 | struct atmel_spi_caps { | 203 | struct atmel_spi_caps { |
186 | bool is_spi2; | 204 | bool is_spi2; |
187 | bool has_wdrbt; | 205 | bool has_wdrbt; |
@@ -206,16 +224,23 @@ struct atmel_spi { | |||
206 | 224 | ||
207 | u8 stopping; | 225 | u8 stopping; |
208 | struct list_head queue; | 226 | struct list_head queue; |
227 | struct tasklet_struct tasklet; | ||
209 | struct spi_transfer *current_transfer; | 228 | struct spi_transfer *current_transfer; |
210 | unsigned long current_remaining_bytes; | 229 | unsigned long current_remaining_bytes; |
211 | struct spi_transfer *next_transfer; | 230 | struct spi_transfer *next_transfer; |
212 | unsigned long next_remaining_bytes; | 231 | unsigned long next_remaining_bytes; |
213 | int done_status; | 232 | int done_status; |
214 | 233 | ||
234 | /* scratch buffer */ | ||
215 | void *buffer; | 235 | void *buffer; |
216 | dma_addr_t buffer_dma; | 236 | dma_addr_t buffer_dma; |
217 | 237 | ||
218 | struct atmel_spi_caps caps; | 238 | struct atmel_spi_caps caps; |
239 | |||
240 | bool use_dma; | ||
241 | bool use_pdc; | ||
242 | /* dmaengine data */ | ||
243 | struct atmel_spi_dma dma; | ||
219 | }; | 244 | }; |
220 | 245 | ||
221 | /* Controller-specific per-slave state */ | 246 | /* Controller-specific per-slave state */ |
@@ -284,6 +309,7 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi) | |||
284 | | SPI_BIT(MODFDIS) | 309 | | SPI_BIT(MODFDIS) |
285 | | SPI_BIT(MSTR)); | 310 | | SPI_BIT(MSTR)); |
286 | } | 311 | } |
312 | |||
287 | mr = spi_readl(as, MR); | 313 | mr = spi_readl(as, MR); |
288 | gpio_set_value(asd->npcs_pin, active); | 314 | gpio_set_value(asd->npcs_pin, active); |
289 | } else { | 315 | } else { |
@@ -344,6 +370,12 @@ static void atmel_spi_unlock(struct atmel_spi *as) | |||
344 | spin_unlock_irqrestore(&as->lock, as->flags); | 370 | spin_unlock_irqrestore(&as->lock, as->flags); |
345 | } | 371 | } |
346 | 372 | ||
373 | static inline bool atmel_spi_use_dma(struct atmel_spi *as, | ||
374 | struct spi_transfer *xfer) | ||
375 | { | ||
376 | return as->use_dma && xfer->len >= DMA_MIN_BYTES; | ||
377 | } | ||
378 | |||
347 | static inline int atmel_spi_xfer_is_last(struct spi_message *msg, | 379 | static inline int atmel_spi_xfer_is_last(struct spi_message *msg, |
348 | struct spi_transfer *xfer) | 380 | struct spi_transfer *xfer) |
349 | { | 381 | { |
@@ -355,6 +387,265 @@ static inline int atmel_spi_xfer_can_be_chained(struct spi_transfer *xfer) | |||
355 | return xfer->delay_usecs == 0 && !xfer->cs_change; | 387 | return xfer->delay_usecs == 0 && !xfer->cs_change; |
356 | } | 388 | } |
357 | 389 | ||
390 | static int atmel_spi_dma_slave_config(struct atmel_spi *as, | ||
391 | struct dma_slave_config *slave_config, | ||
392 | u8 bits_per_word) | ||
393 | { | ||
394 | int err = 0; | ||
395 | |||
396 | if (bits_per_word > 8) { | ||
397 | slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
398 | slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
399 | } else { | ||
400 | slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
401 | slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
402 | } | ||
403 | |||
404 | slave_config->dst_addr = (dma_addr_t)as->phybase + SPI_TDR; | ||
405 | slave_config->src_addr = (dma_addr_t)as->phybase + SPI_RDR; | ||
406 | slave_config->src_maxburst = 1; | ||
407 | slave_config->dst_maxburst = 1; | ||
408 | slave_config->device_fc = false; | ||
409 | |||
410 | slave_config->direction = DMA_MEM_TO_DEV; | ||
411 | if (dmaengine_slave_config(as->dma.chan_tx, slave_config)) { | ||
412 | dev_err(&as->pdev->dev, | ||
413 | "failed to configure tx dma channel\n"); | ||
414 | err = -EINVAL; | ||
415 | } | ||
416 | |||
417 | slave_config->direction = DMA_DEV_TO_MEM; | ||
418 | if (dmaengine_slave_config(as->dma.chan_rx, slave_config)) { | ||
419 | dev_err(&as->pdev->dev, | ||
420 | "failed to configure rx dma channel\n"); | ||
421 | err = -EINVAL; | ||
422 | } | ||
423 | |||
424 | return err; | ||
425 | } | ||
426 | |||
427 | static bool filter(struct dma_chan *chan, void *slave) | ||
428 | { | ||
429 | struct at_dma_slave *sl = slave; | ||
430 | |||
431 | if (sl->dma_dev == chan->device->dev) { | ||
432 | chan->private = sl; | ||
433 | return true; | ||
434 | } else { | ||
435 | return false; | ||
436 | } | ||
437 | } | ||
438 | |||
439 | static int atmel_spi_configure_dma(struct atmel_spi *as) | ||
440 | { | ||
441 | struct at_dma_slave *sdata = &as->dma.dma_slave; | ||
442 | struct dma_slave_config slave_config; | ||
443 | int err; | ||
444 | |||
445 | if (sdata && sdata->dma_dev) { | ||
446 | dma_cap_mask_t mask; | ||
447 | |||
448 | /* Try to grab two DMA channels */ | ||
449 | dma_cap_zero(mask); | ||
450 | dma_cap_set(DMA_SLAVE, mask); | ||
451 | as->dma.chan_tx = dma_request_channel(mask, filter, sdata); | ||
452 | if (as->dma.chan_tx) | ||
453 | as->dma.chan_rx = | ||
454 | dma_request_channel(mask, filter, sdata); | ||
455 | } | ||
456 | if (!as->dma.chan_rx || !as->dma.chan_tx) { | ||
457 | dev_err(&as->pdev->dev, | ||
458 | "DMA channel not available, SPI unable to use DMA\n"); | ||
459 | err = -EBUSY; | ||
460 | goto error; | ||
461 | } | ||
462 | |||
463 | err = atmel_spi_dma_slave_config(as, &slave_config, 8); | ||
464 | if (err) | ||
465 | goto error; | ||
466 | |||
467 | dev_info(&as->pdev->dev, | ||
468 | "Using %s (tx) and %s (rx) for DMA transfers\n", | ||
469 | dma_chan_name(as->dma.chan_tx), | ||
470 | dma_chan_name(as->dma.chan_rx)); | ||
471 | return 0; | ||
472 | error: | ||
473 | if (as->dma.chan_rx) | ||
474 | dma_release_channel(as->dma.chan_rx); | ||
475 | if (as->dma.chan_tx) | ||
476 | dma_release_channel(as->dma.chan_tx); | ||
477 | return err; | ||
478 | } | ||
479 | |||
480 | static void atmel_spi_stop_dma(struct atmel_spi *as) | ||
481 | { | ||
482 | if (as->dma.chan_rx) | ||
483 | as->dma.chan_rx->device->device_control(as->dma.chan_rx, | ||
484 | DMA_TERMINATE_ALL, 0); | ||
485 | if (as->dma.chan_tx) | ||
486 | as->dma.chan_tx->device->device_control(as->dma.chan_tx, | ||
487 | DMA_TERMINATE_ALL, 0); | ||
488 | } | ||
489 | |||
490 | static void atmel_spi_release_dma(struct atmel_spi *as) | ||
491 | { | ||
492 | if (as->dma.chan_rx) | ||
493 | dma_release_channel(as->dma.chan_rx); | ||
494 | if (as->dma.chan_tx) | ||
495 | dma_release_channel(as->dma.chan_tx); | ||
496 | } | ||
497 | |||
498 | /* This function is called by the DMA driver from tasklet context */ | ||
499 | static void dma_callback(void *data) | ||
500 | { | ||
501 | struct spi_master *master = data; | ||
502 | struct atmel_spi *as = spi_master_get_devdata(master); | ||
503 | |||
504 | /* trigger SPI tasklet */ | ||
505 | tasklet_schedule(&as->tasklet); | ||
506 | } | ||
507 | |||
508 | /* | ||
509 | * Next transfer using PIO. | ||
510 | * lock is held, spi tasklet is blocked | ||
511 | */ | ||
512 | static void atmel_spi_next_xfer_pio(struct spi_master *master, | ||
513 | struct spi_transfer *xfer) | ||
514 | { | ||
515 | struct atmel_spi *as = spi_master_get_devdata(master); | ||
516 | |||
517 | dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_pio\n"); | ||
518 | |||
519 | as->current_remaining_bytes = xfer->len; | ||
520 | |||
521 | /* Make sure data is not remaining in RDR */ | ||
522 | spi_readl(as, RDR); | ||
523 | while (spi_readl(as, SR) & SPI_BIT(RDRF)) { | ||
524 | spi_readl(as, RDR); | ||
525 | cpu_relax(); | ||
526 | } | ||
527 | |||
528 | if (xfer->tx_buf) | ||
529 | spi_writel(as, TDR, *(u8 *)(xfer->tx_buf)); | ||
530 | else | ||
531 | spi_writel(as, TDR, 0); | ||
532 | |||
533 | dev_dbg(master->dev.parent, | ||
534 | " start pio xfer %p: len %u tx %p rx %p\n", | ||
535 | xfer, xfer->len, xfer->tx_buf, xfer->rx_buf); | ||
536 | |||
537 | /* Enable relevant interrupts */ | ||
538 | spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES)); | ||
539 | } | ||
540 | |||
541 | /* | ||
542 | * Submit next transfer for DMA. | ||
543 | * lock is held, spi tasklet is blocked | ||
544 | */ | ||
545 | static int atmel_spi_next_xfer_dma_submit(struct spi_master *master, | ||
546 | struct spi_transfer *xfer, | ||
547 | u32 *plen) | ||
548 | { | ||
549 | struct atmel_spi *as = spi_master_get_devdata(master); | ||
550 | struct dma_chan *rxchan = as->dma.chan_rx; | ||
551 | struct dma_chan *txchan = as->dma.chan_tx; | ||
552 | struct dma_async_tx_descriptor *rxdesc; | ||
553 | struct dma_async_tx_descriptor *txdesc; | ||
554 | struct dma_slave_config slave_config; | ||
555 | dma_cookie_t cookie; | ||
556 | u32 len = *plen; | ||
557 | |||
558 | dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_dma_submit\n"); | ||
559 | |||
560 | /* Check that the channels are available */ | ||
561 | if (!rxchan || !txchan) | ||
562 | return -ENODEV; | ||
563 | |||
564 | /* release lock for DMA operations */ | ||
565 | atmel_spi_unlock(as); | ||
566 | |||
567 | /* prepare the RX dma transfer */ | ||
568 | sg_init_table(&as->dma.sgrx, 1); | ||
569 | if (xfer->rx_buf) { | ||
570 | as->dma.sgrx.dma_address = xfer->rx_dma + xfer->len - *plen; | ||
571 | } else { | ||
572 | as->dma.sgrx.dma_address = as->buffer_dma; | ||
573 | if (len > BUFFER_SIZE) | ||
574 | len = BUFFER_SIZE; | ||
575 | } | ||
576 | |||
577 | /* prepare the TX dma transfer */ | ||
578 | sg_init_table(&as->dma.sgtx, 1); | ||
579 | if (xfer->tx_buf) { | ||
580 | as->dma.sgtx.dma_address = xfer->tx_dma + xfer->len - *plen; | ||
581 | } else { | ||
582 | as->dma.sgtx.dma_address = as->buffer_dma; | ||
583 | if (len > BUFFER_SIZE) | ||
584 | len = BUFFER_SIZE; | ||
585 | memset(as->buffer, 0, len); | ||
586 | } | ||
587 | |||
588 | sg_dma_len(&as->dma.sgtx) = len; | ||
589 | sg_dma_len(&as->dma.sgrx) = len; | ||
590 | |||
591 | *plen = len; | ||
592 | |||
593 | if (atmel_spi_dma_slave_config(as, &slave_config, 8)) | ||
594 | goto err_exit; | ||
595 | |||
596 | /* Send both scatterlists */ | ||
597 | rxdesc = rxchan->device->device_prep_slave_sg(rxchan, | ||
598 | &as->dma.sgrx, | ||
599 | 1, | ||
600 | DMA_FROM_DEVICE, | ||
601 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK, | ||
602 | NULL); | ||
603 | if (!rxdesc) | ||
604 | goto err_dma; | ||
605 | |||
606 | txdesc = txchan->device->device_prep_slave_sg(txchan, | ||
607 | &as->dma.sgtx, | ||
608 | 1, | ||
609 | DMA_TO_DEVICE, | ||
610 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK, | ||
611 | NULL); | ||
612 | if (!txdesc) | ||
613 | goto err_dma; | ||
614 | |||
615 | dev_dbg(master->dev.parent, | ||
616 | " start dma xfer %p: len %u tx %p/%08x rx %p/%08x\n", | ||
617 | xfer, xfer->len, xfer->tx_buf, xfer->tx_dma, | ||
618 | xfer->rx_buf, xfer->rx_dma); | ||
619 | |||
620 | /* Enable relevant interrupts */ | ||
621 | spi_writel(as, IER, SPI_BIT(OVRES)); | ||
622 | |||
623 | /* Put the callback on the RX transfer only, that should finish last */ | ||
624 | rxdesc->callback = dma_callback; | ||
625 | rxdesc->callback_param = master; | ||
626 | |||
627 | /* Submit and fire RX and TX with TX last so we're ready to read! */ | ||
628 | cookie = rxdesc->tx_submit(rxdesc); | ||
629 | if (dma_submit_error(cookie)) | ||
630 | goto err_dma; | ||
631 | cookie = txdesc->tx_submit(txdesc); | ||
632 | if (dma_submit_error(cookie)) | ||
633 | goto err_dma; | ||
634 | rxchan->device->device_issue_pending(rxchan); | ||
635 | txchan->device->device_issue_pending(txchan); | ||
636 | |||
637 | /* take back lock */ | ||
638 | atmel_spi_lock(as); | ||
639 | return 0; | ||
640 | |||
641 | err_dma: | ||
642 | spi_writel(as, IDR, SPI_BIT(OVRES)); | ||
643 | atmel_spi_stop_dma(as); | ||
644 | err_exit: | ||
645 | atmel_spi_lock(as); | ||
646 | return -ENOMEM; | ||
647 | } | ||
648 | |||
358 | static void atmel_spi_next_xfer_data(struct spi_master *master, | 649 | static void atmel_spi_next_xfer_data(struct spi_master *master, |
359 | struct spi_transfer *xfer, | 650 | struct spi_transfer *xfer, |
360 | dma_addr_t *tx_dma, | 651 | dma_addr_t *tx_dma, |
@@ -372,6 +663,7 @@ static void atmel_spi_next_xfer_data(struct spi_master *master, | |||
372 | if (len > BUFFER_SIZE) | 663 | if (len > BUFFER_SIZE) |
373 | len = BUFFER_SIZE; | 664 | len = BUFFER_SIZE; |
374 | } | 665 | } |
666 | |||
375 | if (xfer->tx_buf) | 667 | if (xfer->tx_buf) |
376 | *tx_dma = xfer->tx_dma + xfer->len - *plen; | 668 | *tx_dma = xfer->tx_dma + xfer->len - *plen; |
377 | else { | 669 | else { |
@@ -387,10 +679,10 @@ static void atmel_spi_next_xfer_data(struct spi_master *master, | |||
387 | } | 679 | } |
388 | 680 | ||
389 | /* | 681 | /* |
390 | * Submit next transfer for DMA. | 682 | * Submit next transfer for PDC. |
391 | * lock is held, spi irq is blocked | 683 | * lock is held, spi irq is blocked |
392 | */ | 684 | */ |
393 | static void atmel_spi_next_xfer(struct spi_master *master, | 685 | static void atmel_spi_pdc_next_xfer(struct spi_master *master, |
394 | struct spi_message *msg) | 686 | struct spi_message *msg) |
395 | { | 687 | { |
396 | struct atmel_spi *as = spi_master_get_devdata(master); | 688 | struct atmel_spi *as = spi_master_get_devdata(master); |
@@ -487,6 +779,48 @@ static void atmel_spi_next_xfer(struct spi_master *master, | |||
487 | spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); | 779 | spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); |
488 | } | 780 | } |
489 | 781 | ||
782 | /* | ||
783 | * Choose way to submit next transfer and start it. | ||
784 | * lock is held, spi tasklet is blocked | ||
785 | */ | ||
786 | static void atmel_spi_dma_next_xfer(struct spi_master *master, | ||
787 | struct spi_message *msg) | ||
788 | { | ||
789 | struct atmel_spi *as = spi_master_get_devdata(master); | ||
790 | struct spi_transfer *xfer; | ||
791 | u32 remaining, len; | ||
792 | |||
793 | remaining = as->current_remaining_bytes; | ||
794 | if (remaining) { | ||
795 | xfer = as->current_transfer; | ||
796 | len = remaining; | ||
797 | } else { | ||
798 | if (!as->current_transfer) | ||
799 | xfer = list_entry(msg->transfers.next, | ||
800 | struct spi_transfer, transfer_list); | ||
801 | else | ||
802 | xfer = list_entry( | ||
803 | as->current_transfer->transfer_list.next, | ||
804 | struct spi_transfer, transfer_list); | ||
805 | |||
806 | as->current_transfer = xfer; | ||
807 | len = xfer->len; | ||
808 | } | ||
809 | |||
810 | if (atmel_spi_use_dma(as, xfer)) { | ||
811 | u32 total = len; | ||
812 | if (!atmel_spi_next_xfer_dma_submit(master, xfer, &len)) { | ||
813 | as->current_remaining_bytes = total - len; | ||
814 | return; | ||
815 | } else { | ||
816 | dev_err(&msg->spi->dev, "unable to use DMA, fallback to PIO\n"); | ||
817 | } | ||
818 | } | ||
819 | |||
820 | /* use PIO if error appened using DMA */ | ||
821 | atmel_spi_next_xfer_pio(master, xfer); | ||
822 | } | ||
823 | |||
490 | static void atmel_spi_next_message(struct spi_master *master) | 824 | static void atmel_spi_next_message(struct spi_master *master) |
491 | { | 825 | { |
492 | struct atmel_spi *as = spi_master_get_devdata(master); | 826 | struct atmel_spi *as = spi_master_get_devdata(master); |
@@ -511,7 +845,10 @@ static void atmel_spi_next_message(struct spi_master *master) | |||
511 | } else | 845 | } else |
512 | cs_activate(as, spi); | 846 | cs_activate(as, spi); |
513 | 847 | ||
514 | atmel_spi_next_xfer(master, msg); | 848 | if (as->use_pdc) |
849 | atmel_spi_pdc_next_xfer(master, msg); | ||
850 | else | ||
851 | atmel_spi_dma_next_xfer(master, msg); | ||
515 | } | 852 | } |
516 | 853 | ||
517 | /* | 854 | /* |
@@ -564,6 +901,11 @@ static void atmel_spi_dma_unmap_xfer(struct spi_master *master, | |||
564 | xfer->len, DMA_FROM_DEVICE); | 901 | xfer->len, DMA_FROM_DEVICE); |
565 | } | 902 | } |
566 | 903 | ||
904 | static void atmel_spi_disable_pdc_transfer(struct atmel_spi *as) | ||
905 | { | ||
906 | spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); | ||
907 | } | ||
908 | |||
567 | static void | 909 | static void |
568 | atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as, | 910 | atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as, |
569 | struct spi_message *msg, int stay) | 911 | struct spi_message *msg, int stay) |
@@ -589,14 +931,183 @@ atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as, | |||
589 | as->done_status = 0; | 931 | as->done_status = 0; |
590 | 932 | ||
591 | /* continue if needed */ | 933 | /* continue if needed */ |
592 | if (list_empty(&as->queue) || as->stopping) | 934 | if (list_empty(&as->queue) || as->stopping) { |
593 | spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); | 935 | if (as->use_pdc) |
594 | else | 936 | atmel_spi_disable_pdc_transfer(as); |
937 | } else { | ||
595 | atmel_spi_next_message(master); | 938 | atmel_spi_next_message(master); |
939 | } | ||
940 | } | ||
941 | |||
942 | /* Called from IRQ | ||
943 | * lock is held | ||
944 | * | ||
945 | * Must update "current_remaining_bytes" to keep track of data | ||
946 | * to transfer. | ||
947 | */ | ||
948 | static void | ||
949 | atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer) | ||
950 | { | ||
951 | u8 *txp; | ||
952 | u8 *rxp; | ||
953 | unsigned long xfer_pos = xfer->len - as->current_remaining_bytes; | ||
954 | |||
955 | if (xfer->rx_buf) { | ||
956 | rxp = ((u8 *)xfer->rx_buf) + xfer_pos; | ||
957 | *rxp = spi_readl(as, RDR); | ||
958 | } else { | ||
959 | spi_readl(as, RDR); | ||
960 | } | ||
961 | |||
962 | as->current_remaining_bytes--; | ||
963 | |||
964 | if (as->current_remaining_bytes) { | ||
965 | if (xfer->tx_buf) { | ||
966 | txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1; | ||
967 | spi_writel(as, TDR, *txp); | ||
968 | } else { | ||
969 | spi_writel(as, TDR, 0); | ||
970 | } | ||
971 | } | ||
972 | } | ||
973 | |||
974 | /* Tasklet | ||
975 | * Called from DMA callback + pio transfer and overrun IRQ. | ||
976 | */ | ||
977 | static void atmel_spi_tasklet_func(unsigned long data) | ||
978 | { | ||
979 | struct spi_master *master = (struct spi_master *)data; | ||
980 | struct atmel_spi *as = spi_master_get_devdata(master); | ||
981 | struct spi_message *msg; | ||
982 | struct spi_transfer *xfer; | ||
983 | |||
984 | dev_vdbg(master->dev.parent, "atmel_spi_tasklet_func\n"); | ||
985 | |||
986 | atmel_spi_lock(as); | ||
987 | |||
988 | xfer = as->current_transfer; | ||
989 | |||
990 | if (xfer == NULL) | ||
991 | /* already been there */ | ||
992 | goto tasklet_out; | ||
993 | |||
994 | msg = list_entry(as->queue.next, struct spi_message, queue); | ||
995 | |||
996 | if (as->current_remaining_bytes == 0) { | ||
997 | if (as->done_status < 0) { | ||
998 | /* error happened (overrun) */ | ||
999 | if (atmel_spi_use_dma(as, xfer)) | ||
1000 | atmel_spi_stop_dma(as); | ||
1001 | } else { | ||
1002 | /* only update length if no error */ | ||
1003 | msg->actual_length += xfer->len; | ||
1004 | } | ||
1005 | |||
1006 | if (atmel_spi_use_dma(as, xfer)) | ||
1007 | if (!msg->is_dma_mapped) | ||
1008 | atmel_spi_dma_unmap_xfer(master, xfer); | ||
1009 | |||
1010 | if (xfer->delay_usecs) | ||
1011 | udelay(xfer->delay_usecs); | ||
1012 | |||
1013 | if (atmel_spi_xfer_is_last(msg, xfer) || as->done_status < 0) { | ||
1014 | /* report completed (or erroneous) message */ | ||
1015 | atmel_spi_msg_done(master, as, msg, xfer->cs_change); | ||
1016 | } else { | ||
1017 | if (xfer->cs_change) { | ||
1018 | cs_deactivate(as, msg->spi); | ||
1019 | udelay(1); | ||
1020 | cs_activate(as, msg->spi); | ||
1021 | } | ||
1022 | |||
1023 | /* | ||
1024 | * Not done yet. Submit the next transfer. | ||
1025 | * | ||
1026 | * FIXME handle protocol options for xfer | ||
1027 | */ | ||
1028 | atmel_spi_dma_next_xfer(master, msg); | ||
1029 | } | ||
1030 | } else { | ||
1031 | /* | ||
1032 | * Keep going, we still have data to send in | ||
1033 | * the current transfer. | ||
1034 | */ | ||
1035 | atmel_spi_dma_next_xfer(master, msg); | ||
1036 | } | ||
1037 | |||
1038 | tasklet_out: | ||
1039 | atmel_spi_unlock(as); | ||
1040 | } | ||
1041 | |||
1042 | /* Interrupt | ||
1043 | * | ||
1044 | * No need for locking in this Interrupt handler: done_status is the | ||
1045 | * only information modified. What we need is the update of this field | ||
1046 | * before tasklet runs. This is ensured by using barrier. | ||
1047 | */ | ||
1048 | static irqreturn_t | ||
1049 | atmel_spi_pio_interrupt(int irq, void *dev_id) | ||
1050 | { | ||
1051 | struct spi_master *master = dev_id; | ||
1052 | struct atmel_spi *as = spi_master_get_devdata(master); | ||
1053 | u32 status, pending, imr; | ||
1054 | struct spi_transfer *xfer; | ||
1055 | int ret = IRQ_NONE; | ||
1056 | |||
1057 | imr = spi_readl(as, IMR); | ||
1058 | status = spi_readl(as, SR); | ||
1059 | pending = status & imr; | ||
1060 | |||
1061 | if (pending & SPI_BIT(OVRES)) { | ||
1062 | ret = IRQ_HANDLED; | ||
1063 | spi_writel(as, IDR, SPI_BIT(OVRES)); | ||
1064 | dev_warn(master->dev.parent, "overrun\n"); | ||
1065 | |||
1066 | /* | ||
1067 | * When we get an overrun, we disregard the current | ||
1068 | * transfer. Data will not be copied back from any | ||
1069 | * bounce buffer and msg->actual_len will not be | ||
1070 | * updated with the last xfer. | ||
1071 | * | ||
1072 | * We will also not process any remaning transfers in | ||
1073 | * the message. | ||
1074 | * | ||
1075 | * All actions are done in tasklet with done_status indication | ||
1076 | */ | ||
1077 | as->done_status = -EIO; | ||
1078 | smp_wmb(); | ||
1079 | |||
1080 | /* Clear any overrun happening while cleaning up */ | ||
1081 | spi_readl(as, SR); | ||
1082 | |||
1083 | tasklet_schedule(&as->tasklet); | ||
1084 | |||
1085 | } else if (pending & SPI_BIT(RDRF)) { | ||
1086 | atmel_spi_lock(as); | ||
1087 | |||
1088 | if (as->current_remaining_bytes) { | ||
1089 | ret = IRQ_HANDLED; | ||
1090 | xfer = as->current_transfer; | ||
1091 | atmel_spi_pump_pio_data(as, xfer); | ||
1092 | if (!as->current_remaining_bytes) { | ||
1093 | /* no more data to xfer, kick tasklet */ | ||
1094 | spi_writel(as, IDR, pending); | ||
1095 | tasklet_schedule(&as->tasklet); | ||
1096 | } | ||
1097 | } | ||
1098 | |||
1099 | atmel_spi_unlock(as); | ||
1100 | } else { | ||
1101 | WARN_ONCE(pending, "IRQ not handled, pending = %x\n", pending); | ||
1102 | ret = IRQ_HANDLED; | ||
1103 | spi_writel(as, IDR, pending); | ||
1104 | } | ||
1105 | |||
1106 | return ret; | ||
596 | } | 1107 | } |
597 | 1108 | ||
598 | static irqreturn_t | 1109 | static irqreturn_t |
599 | atmel_spi_interrupt(int irq, void *dev_id) | 1110 | atmel_spi_pdc_interrupt(int irq, void *dev_id) |
600 | { | 1111 | { |
601 | struct spi_master *master = dev_id; | 1112 | struct spi_master *master = dev_id; |
602 | struct atmel_spi *as = spi_master_get_devdata(master); | 1113 | struct atmel_spi *as = spi_master_get_devdata(master); |
@@ -697,14 +1208,14 @@ atmel_spi_interrupt(int irq, void *dev_id) | |||
697 | * | 1208 | * |
698 | * FIXME handle protocol options for xfer | 1209 | * FIXME handle protocol options for xfer |
699 | */ | 1210 | */ |
700 | atmel_spi_next_xfer(master, msg); | 1211 | atmel_spi_pdc_next_xfer(master, msg); |
701 | } | 1212 | } |
702 | } else { | 1213 | } else { |
703 | /* | 1214 | /* |
704 | * Keep going, we still have data to send in | 1215 | * Keep going, we still have data to send in |
705 | * the current transfer. | 1216 | * the current transfer. |
706 | */ | 1217 | */ |
707 | atmel_spi_next_xfer(master, msg); | 1218 | atmel_spi_pdc_next_xfer(master, msg); |
708 | } | 1219 | } |
709 | } | 1220 | } |
710 | 1221 | ||
@@ -875,13 +1386,10 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg) | |||
875 | 1386 | ||
876 | /* | 1387 | /* |
877 | * DMA map early, for performance (empties dcache ASAP) and | 1388 | * DMA map early, for performance (empties dcache ASAP) and |
878 | * better fault reporting. This is a DMA-only driver. | 1389 | * better fault reporting. |
879 | * | ||
880 | * NOTE that if dma_unmap_single() ever starts to do work on | ||
881 | * platforms supported by this driver, we would need to clean | ||
882 | * up mappings for previously-mapped transfers. | ||
883 | */ | 1390 | */ |
884 | if (!msg->is_dma_mapped) { | 1391 | if ((!msg->is_dma_mapped) && (atmel_spi_use_dma(as, xfer) |
1392 | || as->use_pdc)) { | ||
885 | if (atmel_spi_dma_map_xfer(as, xfer) < 0) | 1393 | if (atmel_spi_dma_map_xfer(as, xfer) < 0) |
886 | return -ENOMEM; | 1394 | return -ENOMEM; |
887 | } | 1395 | } |
@@ -1000,6 +1508,7 @@ static int atmel_spi_probe(struct platform_device *pdev) | |||
1000 | 1508 | ||
1001 | spin_lock_init(&as->lock); | 1509 | spin_lock_init(&as->lock); |
1002 | INIT_LIST_HEAD(&as->queue); | 1510 | INIT_LIST_HEAD(&as->queue); |
1511 | |||
1003 | as->pdev = pdev; | 1512 | as->pdev = pdev; |
1004 | as->regs = ioremap(regs->start, resource_size(regs)); | 1513 | as->regs = ioremap(regs->start, resource_size(regs)); |
1005 | if (!as->regs) | 1514 | if (!as->regs) |
@@ -1010,8 +1519,28 @@ static int atmel_spi_probe(struct platform_device *pdev) | |||
1010 | 1519 | ||
1011 | atmel_get_caps(as); | 1520 | atmel_get_caps(as); |
1012 | 1521 | ||
1013 | ret = request_irq(irq, atmel_spi_interrupt, 0, | 1522 | as->use_dma = false; |
1014 | dev_name(&pdev->dev), master); | 1523 | as->use_pdc = false; |
1524 | if (as->caps.has_dma_support) { | ||
1525 | if (atmel_spi_configure_dma(as) == 0) | ||
1526 | as->use_dma = true; | ||
1527 | } else { | ||
1528 | as->use_pdc = true; | ||
1529 | } | ||
1530 | |||
1531 | if (as->caps.has_dma_support && !as->use_dma) | ||
1532 | dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n"); | ||
1533 | |||
1534 | if (as->use_pdc) { | ||
1535 | ret = request_irq(irq, atmel_spi_pdc_interrupt, 0, | ||
1536 | dev_name(&pdev->dev), master); | ||
1537 | } else { | ||
1538 | tasklet_init(&as->tasklet, atmel_spi_tasklet_func, | ||
1539 | (unsigned long)master); | ||
1540 | |||
1541 | ret = request_irq(irq, atmel_spi_pio_interrupt, 0, | ||
1542 | dev_name(&pdev->dev), master); | ||
1543 | } | ||
1015 | if (ret) | 1544 | if (ret) |
1016 | goto out_unmap_regs; | 1545 | goto out_unmap_regs; |
1017 | 1546 | ||
@@ -1025,7 +1554,9 @@ static int atmel_spi_probe(struct platform_device *pdev) | |||
1025 | } else { | 1554 | } else { |
1026 | spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS)); | 1555 | spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS)); |
1027 | } | 1556 | } |
1028 | spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); | 1557 | |
1558 | if (as->use_pdc) | ||
1559 | spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); | ||
1029 | spi_writel(as, CR, SPI_BIT(SPIEN)); | 1560 | spi_writel(as, CR, SPI_BIT(SPIEN)); |
1030 | 1561 | ||
1031 | /* go! */ | 1562 | /* go! */ |
@@ -1034,11 +1565,14 @@ static int atmel_spi_probe(struct platform_device *pdev) | |||
1034 | 1565 | ||
1035 | ret = spi_register_master(master); | 1566 | ret = spi_register_master(master); |
1036 | if (ret) | 1567 | if (ret) |
1037 | goto out_reset_hw; | 1568 | goto out_free_dma; |
1038 | 1569 | ||
1039 | return 0; | 1570 | return 0; |
1040 | 1571 | ||
1041 | out_reset_hw: | 1572 | out_free_dma: |
1573 | if (as->use_dma) | ||
1574 | atmel_spi_release_dma(as); | ||
1575 | |||
1042 | spi_writel(as, CR, SPI_BIT(SWRST)); | 1576 | spi_writel(as, CR, SPI_BIT(SWRST)); |
1043 | spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ | 1577 | spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ |
1044 | clk_disable(clk); | 1578 | clk_disable(clk); |
@@ -1046,6 +1580,8 @@ out_reset_hw: | |||
1046 | out_unmap_regs: | 1580 | out_unmap_regs: |
1047 | iounmap(as->regs); | 1581 | iounmap(as->regs); |
1048 | out_free_buffer: | 1582 | out_free_buffer: |
1583 | if (!as->use_pdc) | ||
1584 | tasklet_kill(&as->tasklet); | ||
1049 | dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, | 1585 | dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, |
1050 | as->buffer_dma); | 1586 | as->buffer_dma); |
1051 | out_free: | 1587 | out_free: |
@@ -1064,6 +1600,11 @@ static int atmel_spi_remove(struct platform_device *pdev) | |||
1064 | /* reset the hardware and block queue progress */ | 1600 | /* reset the hardware and block queue progress */ |
1065 | spin_lock_irq(&as->lock); | 1601 | spin_lock_irq(&as->lock); |
1066 | as->stopping = 1; | 1602 | as->stopping = 1; |
1603 | if (as->use_dma) { | ||
1604 | atmel_spi_stop_dma(as); | ||
1605 | atmel_spi_release_dma(as); | ||
1606 | } | ||
1607 | |||
1067 | spi_writel(as, CR, SPI_BIT(SWRST)); | 1608 | spi_writel(as, CR, SPI_BIT(SWRST)); |
1068 | spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ | 1609 | spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ |
1069 | spi_readl(as, SR); | 1610 | spi_readl(as, SR); |
@@ -1072,13 +1613,17 @@ static int atmel_spi_remove(struct platform_device *pdev) | |||
1072 | /* Terminate remaining queued transfers */ | 1613 | /* Terminate remaining queued transfers */ |
1073 | list_for_each_entry(msg, &as->queue, queue) { | 1614 | list_for_each_entry(msg, &as->queue, queue) { |
1074 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | 1615 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
1075 | if (!msg->is_dma_mapped) | 1616 | if (!msg->is_dma_mapped |
1617 | && (atmel_spi_use_dma(as, xfer) | ||
1618 | || as->use_pdc)) | ||
1076 | atmel_spi_dma_unmap_xfer(master, xfer); | 1619 | atmel_spi_dma_unmap_xfer(master, xfer); |
1077 | } | 1620 | } |
1078 | msg->status = -ESHUTDOWN; | 1621 | msg->status = -ESHUTDOWN; |
1079 | msg->complete(msg->context); | 1622 | msg->complete(msg->context); |
1080 | } | 1623 | } |
1081 | 1624 | ||
1625 | if (!as->use_pdc) | ||
1626 | tasklet_kill(&as->tasklet); | ||
1082 | dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, | 1627 | dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, |
1083 | as->buffer_dma); | 1628 | as->buffer_dma); |
1084 | 1629 | ||