diff options
| -rw-r--r-- | drivers/spi/amba-pl022.c | 516 | ||||
| -rw-r--r-- | include/linux/amba/pl022.h | 6 |
2 files changed, 434 insertions, 88 deletions
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c index 8cdddc97325c..db0c67908d2b 100644 --- a/drivers/spi/amba-pl022.c +++ b/drivers/spi/amba-pl022.c | |||
| @@ -27,7 +27,6 @@ | |||
| 27 | /* | 27 | /* |
| 28 | * TODO: | 28 | * TODO: |
| 29 | * - add timeout on polled transfers | 29 | * - add timeout on polled transfers |
| 30 | * - add generic DMA framework support | ||
| 31 | */ | 30 | */ |
| 32 | 31 | ||
| 33 | #include <linux/init.h> | 32 | #include <linux/init.h> |
| @@ -45,6 +44,9 @@ | |||
| 45 | #include <linux/amba/pl022.h> | 44 | #include <linux/amba/pl022.h> |
| 46 | #include <linux/io.h> | 45 | #include <linux/io.h> |
| 47 | #include <linux/slab.h> | 46 | #include <linux/slab.h> |
| 47 | #include <linux/dmaengine.h> | ||
| 48 | #include <linux/dma-mapping.h> | ||
| 49 | #include <linux/scatterlist.h> | ||
| 48 | 50 | ||
| 49 | /* | 51 | /* |
| 50 | * This macro is used to define some register default values. | 52 | * This macro is used to define some register default values. |
| @@ -381,6 +383,14 @@ struct pl022 { | |||
| 381 | enum ssp_reading read; | 383 | enum ssp_reading read; |
| 382 | enum ssp_writing write; | 384 | enum ssp_writing write; |
| 383 | u32 exp_fifo_level; | 385 | u32 exp_fifo_level; |
| 386 | /* DMA settings */ | ||
| 387 | #ifdef CONFIG_DMA_ENGINE | ||
| 388 | struct dma_chan *dma_rx_channel; | ||
| 389 | struct dma_chan *dma_tx_channel; | ||
| 390 | struct sg_table sgt_rx; | ||
| 391 | struct sg_table sgt_tx; | ||
| 392 | char *dummypage; | ||
| 393 | #endif | ||
| 384 | }; | 394 | }; |
| 385 | 395 | ||
| 386 | /** | 396 | /** |
| @@ -406,7 +416,7 @@ struct chip_data { | |||
| 406 | u16 dmacr; | 416 | u16 dmacr; |
| 407 | u16 cpsr; | 417 | u16 cpsr; |
| 408 | u8 n_bytes; | 418 | u8 n_bytes; |
| 409 | u8 enable_dma:1; | 419 | bool enable_dma; |
| 410 | enum ssp_reading read; | 420 | enum ssp_reading read; |
| 411 | enum ssp_writing write; | 421 | enum ssp_writing write; |
| 412 | void (*cs_control) (u32 command); | 422 | void (*cs_control) (u32 command); |
| @@ -763,6 +773,371 @@ static void *next_transfer(struct pl022 *pl022) | |||
| 763 | } | 773 | } |
| 764 | return STATE_DONE; | 774 | return STATE_DONE; |
| 765 | } | 775 | } |
| 776 | |||
| 777 | /* | ||
| 778 | * This DMA functionality is only compiled in if we have | ||
| 779 | * access to the generic DMA devices/DMA engine. | ||
| 780 | */ | ||
| 781 | #ifdef CONFIG_DMA_ENGINE | ||
| 782 | static void unmap_free_dma_scatter(struct pl022 *pl022) | ||
| 783 | { | ||
| 784 | /* Unmap and free the SG tables */ | ||
| 785 | dma_unmap_sg(&pl022->adev->dev, pl022->sgt_tx.sgl, | ||
| 786 | pl022->sgt_tx.nents, DMA_TO_DEVICE); | ||
| 787 | dma_unmap_sg(&pl022->adev->dev, pl022->sgt_rx.sgl, | ||
| 788 | pl022->sgt_rx.nents, DMA_FROM_DEVICE); | ||
| 789 | sg_free_table(&pl022->sgt_rx); | ||
| 790 | sg_free_table(&pl022->sgt_tx); | ||
| 791 | } | ||
| 792 | |||
| 793 | static void dma_callback(void *data) | ||
| 794 | { | ||
| 795 | struct pl022 *pl022 = data; | ||
| 796 | struct spi_message *msg = pl022->cur_msg; | ||
| 797 | |||
| 798 | BUG_ON(!pl022->sgt_rx.sgl); | ||
| 799 | |||
| 800 | #ifdef VERBOSE_DEBUG | ||
| 801 | /* | ||
| 802 | * Optionally dump out buffers to inspect contents, this is | ||
| 803 | * good if you want to convince yourself that the loopback | ||
| 804 | * read/write contents are the same, when adopting to a new | ||
| 805 | * DMA engine. | ||
| 806 | */ | ||
| 807 | { | ||
| 808 | struct scatterlist *sg; | ||
| 809 | unsigned int i; | ||
| 810 | |||
| 811 | dma_sync_sg_for_cpu(&pl022->adev->dev, | ||
| 812 | pl022->sgt_rx.sgl, | ||
| 813 | pl022->sgt_rx.nents, | ||
| 814 | DMA_FROM_DEVICE); | ||
| 815 | |||
| 816 | for_each_sg(pl022->sgt_rx.sgl, sg, pl022->sgt_rx.nents, i) { | ||
| 817 | dev_dbg(&pl022->adev->dev, "SPI RX SG ENTRY: %d", i); | ||
| 818 | print_hex_dump(KERN_ERR, "SPI RX: ", | ||
| 819 | DUMP_PREFIX_OFFSET, | ||
| 820 | 16, | ||
| 821 | 1, | ||
| 822 | sg_virt(sg), | ||
| 823 | sg_dma_len(sg), | ||
| 824 | 1); | ||
| 825 | } | ||
| 826 | for_each_sg(pl022->sgt_tx.sgl, sg, pl022->sgt_tx.nents, i) { | ||
| 827 | dev_dbg(&pl022->adev->dev, "SPI TX SG ENTRY: %d", i); | ||
| 828 | print_hex_dump(KERN_ERR, "SPI TX: ", | ||
| 829 | DUMP_PREFIX_OFFSET, | ||
| 830 | 16, | ||
| 831 | 1, | ||
| 832 | sg_virt(sg), | ||
| 833 | sg_dma_len(sg), | ||
| 834 | 1); | ||
| 835 | } | ||
| 836 | } | ||
| 837 | #endif | ||
| 838 | |||
| 839 | unmap_free_dma_scatter(pl022); | ||
| 840 | |||
| 841 | /* Update total bytes transfered */ | ||
| 842 | msg->actual_length += pl022->cur_transfer->len; | ||
| 843 | if (pl022->cur_transfer->cs_change) | ||
| 844 | pl022->cur_chip-> | ||
| 845 | cs_control(SSP_CHIP_DESELECT); | ||
| 846 | |||
| 847 | /* Move to next transfer */ | ||
| 848 | msg->state = next_transfer(pl022); | ||
| 849 | tasklet_schedule(&pl022->pump_transfers); | ||
| 850 | } | ||
| 851 | |||
| 852 | static void setup_dma_scatter(struct pl022 *pl022, | ||
| 853 | void *buffer, | ||
| 854 | unsigned int length, | ||
| 855 | struct sg_table *sgtab) | ||
| 856 | { | ||
| 857 | struct scatterlist *sg; | ||
| 858 | int bytesleft = length; | ||
| 859 | void *bufp = buffer; | ||
| 860 | int mapbytes; | ||
| 861 | int i; | ||
| 862 | |||
| 863 | if (buffer) { | ||
| 864 | for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { | ||
| 865 | /* | ||
| 866 | * If there are less bytes left than what fits | ||
| 867 | * in the current page (plus page alignment offset) | ||
| 868 | * we just feed in this, else we stuff in as much | ||
| 869 | * as we can. | ||
| 870 | */ | ||
| 871 | if (bytesleft < (PAGE_SIZE - offset_in_page(bufp))) | ||
| 872 | mapbytes = bytesleft; | ||
| 873 | else | ||
| 874 | mapbytes = PAGE_SIZE - offset_in_page(bufp); | ||
| 875 | sg_set_page(sg, virt_to_page(bufp), | ||
| 876 | mapbytes, offset_in_page(bufp)); | ||
| 877 | bufp += mapbytes; | ||
| 878 | bytesleft -= mapbytes; | ||
| 879 | dev_dbg(&pl022->adev->dev, | ||
| 880 | "set RX/TX target page @ %p, %d bytes, %d left\n", | ||
| 881 | bufp, mapbytes, bytesleft); | ||
| 882 | } | ||
| 883 | } else { | ||
| 884 | /* Map the dummy buffer on every page */ | ||
| 885 | for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { | ||
| 886 | if (bytesleft < PAGE_SIZE) | ||
| 887 | mapbytes = bytesleft; | ||
| 888 | else | ||
| 889 | mapbytes = PAGE_SIZE; | ||
| 890 | sg_set_page(sg, virt_to_page(pl022->dummypage), | ||
| 891 | mapbytes, 0); | ||
| 892 | bytesleft -= mapbytes; | ||
| 893 | dev_dbg(&pl022->adev->dev, | ||
| 894 | "set RX/TX to dummy page %d bytes, %d left\n", | ||
| 895 | mapbytes, bytesleft); | ||
| 896 | |||
| 897 | } | ||
| 898 | } | ||
| 899 | BUG_ON(bytesleft); | ||
| 900 | } | ||
| 901 | |||
| 902 | /** | ||
| 903 | * configure_dma - configures the channels for the next transfer | ||
| 904 | * @pl022: SSP driver's private data structure | ||
| 905 | */ | ||
| 906 | static int configure_dma(struct pl022 *pl022) | ||
| 907 | { | ||
| 908 | struct dma_slave_config rx_conf = { | ||
| 909 | .src_addr = SSP_DR(pl022->phybase), | ||
| 910 | .direction = DMA_FROM_DEVICE, | ||
| 911 | .src_maxburst = pl022->vendor->fifodepth >> 1, | ||
| 912 | }; | ||
| 913 | struct dma_slave_config tx_conf = { | ||
| 914 | .dst_addr = SSP_DR(pl022->phybase), | ||
| 915 | .direction = DMA_TO_DEVICE, | ||
| 916 | .dst_maxburst = pl022->vendor->fifodepth >> 1, | ||
| 917 | }; | ||
| 918 | unsigned int pages; | ||
| 919 | int ret; | ||
| 920 | int sglen; | ||
| 921 | struct dma_chan *rxchan = pl022->dma_rx_channel; | ||
| 922 | struct dma_chan *txchan = pl022->dma_tx_channel; | ||
| 923 | struct dma_async_tx_descriptor *rxdesc; | ||
| 924 | struct dma_async_tx_descriptor *txdesc; | ||
| 925 | dma_cookie_t cookie; | ||
| 926 | |||
| 927 | /* Check that the channels are available */ | ||
| 928 | if (!rxchan || !txchan) | ||
| 929 | return -ENODEV; | ||
| 930 | |||
| 931 | switch (pl022->read) { | ||
| 932 | case READING_NULL: | ||
| 933 | /* Use the same as for writing */ | ||
| 934 | rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; | ||
| 935 | break; | ||
| 936 | case READING_U8: | ||
| 937 | rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
| 938 | break; | ||
| 939 | case READING_U16: | ||
| 940 | rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
| 941 | break; | ||
| 942 | case READING_U32: | ||
| 943 | rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
| 944 | break; | ||
| 945 | } | ||
| 946 | |||
| 947 | switch (pl022->write) { | ||
| 948 | case WRITING_NULL: | ||
| 949 | /* Use the same as for reading */ | ||
| 950 | tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; | ||
| 951 | break; | ||
| 952 | case WRITING_U8: | ||
| 953 | tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
| 954 | break; | ||
| 955 | case WRITING_U16: | ||
| 956 | tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
| 957 | break; | ||
| 958 | case WRITING_U32: | ||
| 959 | tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;; | ||
| 960 | break; | ||
| 961 | } | ||
| 962 | |||
| 963 | /* SPI pecularity: we need to read and write the same width */ | ||
| 964 | if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) | ||
| 965 | rx_conf.src_addr_width = tx_conf.dst_addr_width; | ||
| 966 | if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) | ||
| 967 | tx_conf.dst_addr_width = rx_conf.src_addr_width; | ||
| 968 | BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width); | ||
| 969 | |||
| 970 | rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG, | ||
| 971 | (unsigned long) &rx_conf); | ||
| 972 | txchan->device->device_control(txchan, DMA_SLAVE_CONFIG, | ||
| 973 | (unsigned long) &tx_conf); | ||
| 974 | |||
| 975 | /* Create sglists for the transfers */ | ||
| 976 | pages = (pl022->cur_transfer->len >> PAGE_SHIFT) + 1; | ||
| 977 | dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages); | ||
| 978 | |||
| 979 | ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_KERNEL); | ||
| 980 | if (ret) | ||
| 981 | goto err_alloc_rx_sg; | ||
| 982 | |||
| 983 | ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_KERNEL); | ||
| 984 | if (ret) | ||
| 985 | goto err_alloc_tx_sg; | ||
| 986 | |||
| 987 | /* Fill in the scatterlists for the RX+TX buffers */ | ||
| 988 | setup_dma_scatter(pl022, pl022->rx, | ||
| 989 | pl022->cur_transfer->len, &pl022->sgt_rx); | ||
| 990 | setup_dma_scatter(pl022, pl022->tx, | ||
| 991 | pl022->cur_transfer->len, &pl022->sgt_tx); | ||
| 992 | |||
| 993 | /* Map DMA buffers */ | ||
| 994 | sglen = dma_map_sg(&pl022->adev->dev, pl022->sgt_rx.sgl, | ||
| 995 | pl022->sgt_rx.nents, DMA_FROM_DEVICE); | ||
| 996 | if (!sglen) | ||
| 997 | goto err_rx_sgmap; | ||
| 998 | |||
| 999 | sglen = dma_map_sg(&pl022->adev->dev, pl022->sgt_tx.sgl, | ||
| 1000 | pl022->sgt_tx.nents, DMA_TO_DEVICE); | ||
| 1001 | if (!sglen) | ||
| 1002 | goto err_tx_sgmap; | ||
| 1003 | |||
| 1004 | /* Send both scatterlists */ | ||
| 1005 | rxdesc = rxchan->device->device_prep_slave_sg(rxchan, | ||
| 1006 | pl022->sgt_rx.sgl, | ||
| 1007 | pl022->sgt_rx.nents, | ||
| 1008 | DMA_FROM_DEVICE, | ||
| 1009 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
| 1010 | if (!rxdesc) | ||
| 1011 | goto err_rxdesc; | ||
| 1012 | |||
| 1013 | txdesc = txchan->device->device_prep_slave_sg(txchan, | ||
| 1014 | pl022->sgt_tx.sgl, | ||
| 1015 | pl022->sgt_tx.nents, | ||
| 1016 | DMA_TO_DEVICE, | ||
| 1017 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
| 1018 | if (!txdesc) | ||
| 1019 | goto err_txdesc; | ||
| 1020 | |||
| 1021 | /* Put the callback on the RX transfer only, that should finish last */ | ||
| 1022 | rxdesc->callback = dma_callback; | ||
| 1023 | rxdesc->callback_param = pl022; | ||
| 1024 | |||
| 1025 | /* Submit and fire RX and TX with TX last so we're ready to read! */ | ||
| 1026 | cookie = rxdesc->tx_submit(rxdesc); | ||
| 1027 | if (dma_submit_error(cookie)) | ||
| 1028 | goto err_submit_rx; | ||
| 1029 | cookie = txdesc->tx_submit(txdesc); | ||
| 1030 | if (dma_submit_error(cookie)) | ||
| 1031 | goto err_submit_tx; | ||
| 1032 | rxchan->device->device_issue_pending(rxchan); | ||
| 1033 | txchan->device->device_issue_pending(txchan); | ||
| 1034 | |||
| 1035 | return 0; | ||
| 1036 | |||
| 1037 | err_submit_tx: | ||
| 1038 | err_submit_rx: | ||
| 1039 | err_txdesc: | ||
| 1040 | txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0); | ||
| 1041 | err_rxdesc: | ||
| 1042 | rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0); | ||
| 1043 | dma_unmap_sg(&pl022->adev->dev, pl022->sgt_tx.sgl, | ||
| 1044 | pl022->sgt_tx.nents, DMA_TO_DEVICE); | ||
| 1045 | err_tx_sgmap: | ||
| 1046 | dma_unmap_sg(&pl022->adev->dev, pl022->sgt_rx.sgl, | ||
| 1047 | pl022->sgt_tx.nents, DMA_FROM_DEVICE); | ||
| 1048 | err_rx_sgmap: | ||
| 1049 | sg_free_table(&pl022->sgt_tx); | ||
| 1050 | err_alloc_tx_sg: | ||
| 1051 | sg_free_table(&pl022->sgt_rx); | ||
| 1052 | err_alloc_rx_sg: | ||
| 1053 | return -ENOMEM; | ||
| 1054 | } | ||
| 1055 | |||
| 1056 | static int __init pl022_dma_probe(struct pl022 *pl022) | ||
| 1057 | { | ||
| 1058 | dma_cap_mask_t mask; | ||
| 1059 | |||
| 1060 | /* Try to acquire a generic DMA engine slave channel */ | ||
| 1061 | dma_cap_zero(mask); | ||
| 1062 | dma_cap_set(DMA_SLAVE, mask); | ||
| 1063 | /* | ||
| 1064 | * We need both RX and TX channels to do DMA, else do none | ||
| 1065 | * of them. | ||
| 1066 | */ | ||
| 1067 | pl022->dma_rx_channel = dma_request_channel(mask, | ||
| 1068 | pl022->master_info->dma_filter, | ||
| 1069 | pl022->master_info->dma_rx_param); | ||
| 1070 | if (!pl022->dma_rx_channel) { | ||
| 1071 | dev_err(&pl022->adev->dev, "no RX DMA channel!\n"); | ||
| 1072 | goto err_no_rxchan; | ||
| 1073 | } | ||
| 1074 | |||
| 1075 | pl022->dma_tx_channel = dma_request_channel(mask, | ||
| 1076 | pl022->master_info->dma_filter, | ||
| 1077 | pl022->master_info->dma_tx_param); | ||
| 1078 | if (!pl022->dma_tx_channel) { | ||
| 1079 | dev_err(&pl022->adev->dev, "no TX DMA channel!\n"); | ||
| 1080 | goto err_no_txchan; | ||
| 1081 | } | ||
| 1082 | |||
| 1083 | pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
| 1084 | if (!pl022->dummypage) { | ||
| 1085 | dev_err(&pl022->adev->dev, "no DMA dummypage!\n"); | ||
| 1086 | goto err_no_dummypage; | ||
| 1087 | } | ||
| 1088 | |||
| 1089 | dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n", | ||
| 1090 | dma_chan_name(pl022->dma_rx_channel), | ||
| 1091 | dma_chan_name(pl022->dma_tx_channel)); | ||
| 1092 | |||
| 1093 | return 0; | ||
| 1094 | |||
| 1095 | err_no_dummypage: | ||
| 1096 | dma_release_channel(pl022->dma_tx_channel); | ||
| 1097 | err_no_txchan: | ||
| 1098 | dma_release_channel(pl022->dma_rx_channel); | ||
| 1099 | pl022->dma_rx_channel = NULL; | ||
| 1100 | err_no_rxchan: | ||
| 1101 | return -ENODEV; | ||
| 1102 | } | ||
| 1103 | |||
| 1104 | static void terminate_dma(struct pl022 *pl022) | ||
| 1105 | { | ||
| 1106 | struct dma_chan *rxchan = pl022->dma_rx_channel; | ||
| 1107 | struct dma_chan *txchan = pl022->dma_tx_channel; | ||
| 1108 | |||
| 1109 | rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0); | ||
| 1110 | txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0); | ||
| 1111 | unmap_free_dma_scatter(pl022); | ||
| 1112 | } | ||
| 1113 | |||
| 1114 | static void pl022_dma_remove(struct pl022 *pl022) | ||
| 1115 | { | ||
| 1116 | if (pl022->busy) | ||
| 1117 | terminate_dma(pl022); | ||
| 1118 | if (pl022->dma_tx_channel) | ||
| 1119 | dma_release_channel(pl022->dma_tx_channel); | ||
| 1120 | if (pl022->dma_rx_channel) | ||
| 1121 | dma_release_channel(pl022->dma_rx_channel); | ||
| 1122 | kfree(pl022->dummypage); | ||
| 1123 | } | ||
| 1124 | |||
| 1125 | #else | ||
| 1126 | static inline int configure_dma(struct pl022 *pl022) | ||
| 1127 | { | ||
| 1128 | return -ENODEV; | ||
| 1129 | } | ||
| 1130 | |||
| 1131 | static inline int pl022_dma_probe(struct pl022 *pl022) | ||
| 1132 | { | ||
| 1133 | return 0; | ||
| 1134 | } | ||
| 1135 | |||
| 1136 | static inline void pl022_dma_remove(struct pl022 *pl022) | ||
| 1137 | { | ||
| 1138 | } | ||
| 1139 | #endif | ||
| 1140 | |||
| 766 | /** | 1141 | /** |
| 767 | * pl022_interrupt_handler - Interrupt handler for SSP controller | 1142 | * pl022_interrupt_handler - Interrupt handler for SSP controller |
| 768 | * | 1143 | * |
| @@ -794,14 +1169,17 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) | |||
| 794 | if (unlikely(!irq_status)) | 1169 | if (unlikely(!irq_status)) |
| 795 | return IRQ_NONE; | 1170 | return IRQ_NONE; |
| 796 | 1171 | ||
| 797 | /* This handles the error code interrupts */ | 1172 | /* |
| 1173 | * This handles the FIFO interrupts, the timeout | ||
| 1174 | * interrupts are flatly ignored, they cannot be | ||
| 1175 | * trusted. | ||
| 1176 | */ | ||
| 798 | if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) { | 1177 | if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) { |
| 799 | /* | 1178 | /* |
| 800 | * Overrun interrupt - bail out since our Data has been | 1179 | * Overrun interrupt - bail out since our Data has been |
| 801 | * corrupted | 1180 | * corrupted |
| 802 | */ | 1181 | */ |
| 803 | dev_err(&pl022->adev->dev, | 1182 | dev_err(&pl022->adev->dev, "FIFO overrun\n"); |
| 804 | "FIFO overrun\n"); | ||
| 805 | if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF) | 1183 | if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF) |
| 806 | dev_err(&pl022->adev->dev, | 1184 | dev_err(&pl022->adev->dev, |
| 807 | "RXFIFO is full\n"); | 1185 | "RXFIFO is full\n"); |
| @@ -896,8 +1274,8 @@ static int set_up_next_transfer(struct pl022 *pl022, | |||
| 896 | } | 1274 | } |
| 897 | 1275 | ||
| 898 | /** | 1276 | /** |
| 899 | * pump_transfers - Tasklet function which schedules next interrupt transfer | 1277 | * pump_transfers - Tasklet function which schedules next transfer |
| 900 | * when running in interrupt transfer mode. | 1278 | * when running in interrupt or DMA transfer mode. |
| 901 | * @data: SSP driver private data structure | 1279 | * @data: SSP driver private data structure |
| 902 | * | 1280 | * |
| 903 | */ | 1281 | */ |
| @@ -954,65 +1332,23 @@ static void pump_transfers(unsigned long data) | |||
| 954 | } | 1332 | } |
| 955 | /* Flush the FIFOs and let's go! */ | 1333 | /* Flush the FIFOs and let's go! */ |
| 956 | flush(pl022); | 1334 | flush(pl022); |
| 957 | writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); | ||
| 958 | } | ||
| 959 | |||
| 960 | /** | ||
| 961 | * NOT IMPLEMENTED | ||
| 962 | * configure_dma - It configures the DMA pipes for DMA transfers | ||
| 963 | * @data: SSP driver's private data structure | ||
| 964 | * | ||
| 965 | */ | ||
| 966 | static int configure_dma(void *data) | ||
| 967 | { | ||
| 968 | struct pl022 *pl022 = data; | ||
| 969 | dev_dbg(&pl022->adev->dev, "configure DMA\n"); | ||
| 970 | return -ENOTSUPP; | ||
| 971 | } | ||
| 972 | |||
| 973 | /** | ||
| 974 | * do_dma_transfer - It handles transfers of the current message | ||
| 975 | * if it is DMA xfer. | ||
| 976 | * NOT FULLY IMPLEMENTED | ||
| 977 | * @data: SSP driver's private data structure | ||
| 978 | */ | ||
| 979 | static void do_dma_transfer(void *data) | ||
| 980 | { | ||
| 981 | struct pl022 *pl022 = data; | ||
| 982 | |||
| 983 | if (configure_dma(data)) { | ||
| 984 | dev_dbg(&pl022->adev->dev, "configuration of DMA Failed!\n"); | ||
| 985 | goto err_config_dma; | ||
| 986 | } | ||
| 987 | 1335 | ||
| 988 | /* TODO: Implememt DMA setup of pipes here */ | 1336 | if (pl022->cur_chip->enable_dma) { |
| 989 | 1337 | if (configure_dma(pl022)) { | |
| 990 | /* Enable target chip, set up transfer */ | 1338 | dev_dbg(&pl022->adev->dev, |
| 991 | pl022->cur_chip->cs_control(SSP_CHIP_SELECT); | 1339 | "configuration of DMA failed, fall back to interrupt mode\n"); |
| 992 | if (set_up_next_transfer(pl022, pl022->cur_transfer)) { | 1340 | goto err_config_dma; |
| 993 | /* Error path */ | 1341 | } |
| 994 | pl022->cur_msg->state = STATE_ERROR; | ||
| 995 | pl022->cur_msg->status = -EIO; | ||
| 996 | giveback(pl022); | ||
| 997 | return; | 1342 | return; |
| 998 | } | 1343 | } |
| 999 | /* Enable SSP */ | ||
| 1000 | writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), | ||
| 1001 | SSP_CR1(pl022->virtbase)); | ||
| 1002 | |||
| 1003 | /* TODO: Enable the DMA transfer here */ | ||
| 1004 | return; | ||
| 1005 | 1344 | ||
| 1006 | err_config_dma: | 1345 | err_config_dma: |
| 1007 | pl022->cur_msg->state = STATE_ERROR; | 1346 | writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); |
| 1008 | pl022->cur_msg->status = -EIO; | ||
| 1009 | giveback(pl022); | ||
| 1010 | return; | ||
| 1011 | } | 1347 | } |
| 1012 | 1348 | ||
| 1013 | static void do_interrupt_transfer(void *data) | 1349 | static void do_interrupt_dma_transfer(struct pl022 *pl022) |
| 1014 | { | 1350 | { |
| 1015 | struct pl022 *pl022 = data; | 1351 | u32 irqflags = ENABLE_ALL_INTERRUPTS; |
| 1016 | 1352 | ||
| 1017 | /* Enable target chip */ | 1353 | /* Enable target chip */ |
| 1018 | pl022->cur_chip->cs_control(SSP_CHIP_SELECT); | 1354 | pl022->cur_chip->cs_control(SSP_CHIP_SELECT); |
| @@ -1023,15 +1359,26 @@ static void do_interrupt_transfer(void *data) | |||
| 1023 | giveback(pl022); | 1359 | giveback(pl022); |
| 1024 | return; | 1360 | return; |
| 1025 | } | 1361 | } |
| 1362 | /* If we're using DMA, set up DMA here */ | ||
| 1363 | if (pl022->cur_chip->enable_dma) { | ||
| 1364 | /* Configure DMA transfer */ | ||
| 1365 | if (configure_dma(pl022)) { | ||
| 1366 | dev_dbg(&pl022->adev->dev, | ||
| 1367 | "configuration of DMA failed, fall back to interrupt mode\n"); | ||
| 1368 | goto err_config_dma; | ||
| 1369 | } | ||
| 1370 | /* Disable interrupts in DMA mode, IRQ from DMA controller */ | ||
| 1371 | irqflags = DISABLE_ALL_INTERRUPTS; | ||
| 1372 | } | ||
| 1373 | err_config_dma: | ||
| 1026 | /* Enable SSP, turn on interrupts */ | 1374 | /* Enable SSP, turn on interrupts */ |
| 1027 | writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), | 1375 | writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), |
| 1028 | SSP_CR1(pl022->virtbase)); | 1376 | SSP_CR1(pl022->virtbase)); |
| 1029 | writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); | 1377 | writew(irqflags, SSP_IMSC(pl022->virtbase)); |
| 1030 | } | 1378 | } |
| 1031 | 1379 | ||
| 1032 | static void do_polling_transfer(void *data) | 1380 | static void do_polling_transfer(struct pl022 *pl022) |
| 1033 | { | 1381 | { |
| 1034 | struct pl022 *pl022 = data; | ||
| 1035 | struct spi_message *message = NULL; | 1382 | struct spi_message *message = NULL; |
| 1036 | struct spi_transfer *transfer = NULL; | 1383 | struct spi_transfer *transfer = NULL; |
| 1037 | struct spi_transfer *previous = NULL; | 1384 | struct spi_transfer *previous = NULL; |
| @@ -1101,7 +1448,7 @@ static void do_polling_transfer(void *data) | |||
| 1101 | * | 1448 | * |
| 1102 | * This function checks if there is any spi message in the queue that | 1449 | * This function checks if there is any spi message in the queue that |
| 1103 | * needs processing and delegate control to appropriate function | 1450 | * needs processing and delegate control to appropriate function |
| 1104 | * do_polling_transfer()/do_interrupt_transfer()/do_dma_transfer() | 1451 | * do_polling_transfer()/do_interrupt_dma_transfer() |
| 1105 | * based on the kind of the transfer | 1452 | * based on the kind of the transfer |
| 1106 | * | 1453 | * |
| 1107 | */ | 1454 | */ |
| @@ -1150,10 +1497,8 @@ static void pump_messages(struct work_struct *work) | |||
| 1150 | 1497 | ||
| 1151 | if (pl022->cur_chip->xfer_type == POLLING_TRANSFER) | 1498 | if (pl022->cur_chip->xfer_type == POLLING_TRANSFER) |
| 1152 | do_polling_transfer(pl022); | 1499 | do_polling_transfer(pl022); |
| 1153 | else if (pl022->cur_chip->xfer_type == INTERRUPT_TRANSFER) | ||
| 1154 | do_interrupt_transfer(pl022); | ||
| 1155 | else | 1500 | else |
| 1156 | do_dma_transfer(pl022); | 1501 | do_interrupt_dma_transfer(pl022); |
| 1157 | } | 1502 | } |
| 1158 | 1503 | ||
| 1159 | 1504 | ||
| @@ -1469,23 +1814,6 @@ static int calculate_effective_freq(struct pl022 *pl022, | |||
| 1469 | } | 1814 | } |
| 1470 | 1815 | ||
| 1471 | /** | 1816 | /** |
| 1472 | * NOT IMPLEMENTED | ||
| 1473 | * process_dma_info - Processes the DMA info provided by client drivers | ||
| 1474 | * @chip_info: chip info provided by client device | ||
| 1475 | * @chip: Runtime state maintained by the SSP controller for each spi device | ||
| 1476 | * | ||
| 1477 | * This function processes and stores DMA config provided by client driver | ||
| 1478 | * into the runtime state maintained by the SSP controller driver | ||
| 1479 | */ | ||
| 1480 | static int process_dma_info(struct pl022_config_chip *chip_info, | ||
| 1481 | struct chip_data *chip) | ||
| 1482 | { | ||
| 1483 | dev_err(chip_info->dev, | ||
| 1484 | "cannot process DMA info, DMA not implemented!\n"); | ||
| 1485 | return -ENOTSUPP; | ||
| 1486 | } | ||
| 1487 | |||
| 1488 | /** | ||
| 1489 | * pl022_setup - setup function registered to SPI master framework | 1817 | * pl022_setup - setup function registered to SPI master framework |
| 1490 | * @spi: spi device which is requesting setup | 1818 | * @spi: spi device which is requesting setup |
| 1491 | * | 1819 | * |
| @@ -1552,8 +1880,6 @@ static int pl022_setup(struct spi_device *spi) | |||
| 1552 | 1880 | ||
| 1553 | dev_dbg(&spi->dev, "allocated memory for controller data\n"); | 1881 | dev_dbg(&spi->dev, "allocated memory for controller data\n"); |
| 1554 | 1882 | ||
| 1555 | /* Pointer back to the SPI device */ | ||
| 1556 | chip_info->dev = &spi->dev; | ||
| 1557 | /* | 1883 | /* |
| 1558 | * Set controller data default values: | 1884 | * Set controller data default values: |
| 1559 | * Polling is supported by default | 1885 | * Polling is supported by default |
| @@ -1579,6 +1905,9 @@ static int pl022_setup(struct spi_device *spi) | |||
| 1579 | "using user supplied controller_data settings\n"); | 1905 | "using user supplied controller_data settings\n"); |
| 1580 | } | 1906 | } |
| 1581 | 1907 | ||
| 1908 | /* Pointer back to the SPI device */ | ||
| 1909 | chip_info->dev = &spi->dev; | ||
| 1910 | |||
| 1582 | /* | 1911 | /* |
| 1583 | * We can override with custom divisors, else we use the board | 1912 | * We can override with custom divisors, else we use the board |
| 1584 | * frequency setting | 1913 | * frequency setting |
| @@ -1637,9 +1966,8 @@ static int pl022_setup(struct spi_device *spi) | |||
| 1637 | chip->cpsr = 0; | 1966 | chip->cpsr = 0; |
| 1638 | if ((chip_info->com_mode == DMA_TRANSFER) | 1967 | if ((chip_info->com_mode == DMA_TRANSFER) |
| 1639 | && ((pl022->master_info)->enable_dma)) { | 1968 | && ((pl022->master_info)->enable_dma)) { |
| 1640 | chip->enable_dma = 1; | 1969 | chip->enable_dma = true; |
| 1641 | dev_dbg(&spi->dev, "DMA mode set in controller state\n"); | 1970 | dev_dbg(&spi->dev, "DMA mode set in controller state\n"); |
| 1642 | status = process_dma_info(chip_info, chip); | ||
| 1643 | if (status < 0) | 1971 | if (status < 0) |
| 1644 | goto err_config_params; | 1972 | goto err_config_params; |
| 1645 | SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, | 1973 | SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, |
| @@ -1647,7 +1975,7 @@ static int pl022_setup(struct spi_device *spi) | |||
| 1647 | SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, | 1975 | SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, |
| 1648 | SSP_DMACR_MASK_TXDMAE, 1); | 1976 | SSP_DMACR_MASK_TXDMAE, 1); |
| 1649 | } else { | 1977 | } else { |
| 1650 | chip->enable_dma = 0; | 1978 | chip->enable_dma = false; |
| 1651 | dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n"); | 1979 | dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n"); |
| 1652 | SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, | 1980 | SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, |
| 1653 | SSP_DMACR_MASK_RXDMAE, 0); | 1981 | SSP_DMACR_MASK_RXDMAE, 0); |
| @@ -1773,6 +2101,7 @@ pl022_probe(struct amba_device *adev, struct amba_id *id) | |||
| 1773 | if (status) | 2101 | if (status) |
| 1774 | goto err_no_ioregion; | 2102 | goto err_no_ioregion; |
| 1775 | 2103 | ||
| 2104 | pl022->phybase = adev->res.start; | ||
| 1776 | pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res)); | 2105 | pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res)); |
| 1777 | if (pl022->virtbase == NULL) { | 2106 | if (pl022->virtbase == NULL) { |
| 1778 | status = -ENOMEM; | 2107 | status = -ENOMEM; |
| @@ -1799,6 +2128,14 @@ pl022_probe(struct amba_device *adev, struct amba_id *id) | |||
| 1799 | dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status); | 2128 | dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status); |
| 1800 | goto err_no_irq; | 2129 | goto err_no_irq; |
| 1801 | } | 2130 | } |
| 2131 | |||
| 2132 | /* Get DMA channels */ | ||
| 2133 | if (platform_info->enable_dma) { | ||
| 2134 | status = pl022_dma_probe(pl022); | ||
| 2135 | if (status != 0) | ||
| 2136 | goto err_no_dma; | ||
| 2137 | } | ||
| 2138 | |||
| 1802 | /* Initialize and start queue */ | 2139 | /* Initialize and start queue */ |
| 1803 | status = init_queue(pl022); | 2140 | status = init_queue(pl022); |
| 1804 | if (status != 0) { | 2141 | if (status != 0) { |
| @@ -1827,6 +2164,8 @@ pl022_probe(struct amba_device *adev, struct amba_id *id) | |||
| 1827 | err_start_queue: | 2164 | err_start_queue: |
| 1828 | err_init_queue: | 2165 | err_init_queue: |
| 1829 | destroy_queue(pl022); | 2166 | destroy_queue(pl022); |
| 2167 | pl022_dma_remove(pl022); | ||
| 2168 | err_no_dma: | ||
| 1830 | free_irq(adev->irq[0], pl022); | 2169 | free_irq(adev->irq[0], pl022); |
| 1831 | err_no_irq: | 2170 | err_no_irq: |
| 1832 | clk_put(pl022->clk); | 2171 | clk_put(pl022->clk); |
| @@ -1857,6 +2196,7 @@ pl022_remove(struct amba_device *adev) | |||
| 1857 | return status; | 2196 | return status; |
| 1858 | } | 2197 | } |
| 1859 | load_ssp_default_config(pl022); | 2198 | load_ssp_default_config(pl022); |
| 2199 | pl022_dma_remove(pl022); | ||
| 1860 | free_irq(adev->irq[0], pl022); | 2200 | free_irq(adev->irq[0], pl022); |
| 1861 | clk_disable(pl022->clk); | 2201 | clk_disable(pl022->clk); |
| 1862 | clk_put(pl022->clk); | 2202 | clk_put(pl022->clk); |
diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h index abf26cc47a2b..db6a191ddcf7 100644 --- a/include/linux/amba/pl022.h +++ b/include/linux/amba/pl022.h | |||
| @@ -228,6 +228,7 @@ enum ssp_chip_select { | |||
| 228 | }; | 228 | }; |
| 229 | 229 | ||
| 230 | 230 | ||
| 231 | struct dma_chan; | ||
| 231 | /** | 232 | /** |
| 232 | * struct pl022_ssp_master - device.platform_data for SPI controller devices. | 233 | * struct pl022_ssp_master - device.platform_data for SPI controller devices. |
| 233 | * @num_chipselect: chipselects are used to distinguish individual | 234 | * @num_chipselect: chipselects are used to distinguish individual |
| @@ -235,11 +236,16 @@ enum ssp_chip_select { | |||
| 235 | * each slave has a chipselect signal, but it's common that not | 236 | * each slave has a chipselect signal, but it's common that not |
| 236 | * every chipselect is connected to a slave. | 237 | * every chipselect is connected to a slave. |
| 237 | * @enable_dma: if true enables DMA driven transfers. | 238 | * @enable_dma: if true enables DMA driven transfers. |
| 239 | * @dma_rx_param: parameter to locate an RX DMA channel. | ||
| 240 | * @dma_tx_param: parameter to locate a TX DMA channel. | ||
| 238 | */ | 241 | */ |
| 239 | struct pl022_ssp_controller { | 242 | struct pl022_ssp_controller { |
| 240 | u16 bus_id; | 243 | u16 bus_id; |
| 241 | u8 num_chipselect; | 244 | u8 num_chipselect; |
| 242 | u8 enable_dma:1; | 245 | u8 enable_dma:1; |
| 246 | bool (*dma_filter)(struct dma_chan *chan, void *filter_param); | ||
| 247 | void *dma_rx_param; | ||
| 248 | void *dma_tx_param; | ||
| 243 | }; | 249 | }; |
| 244 | 250 | ||
| 245 | /** | 251 | /** |
