diff options
81 files changed, 5770 insertions, 775 deletions
diff --git a/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt b/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt new file mode 100644 index 000000000000..d3058768b23d --- /dev/null +++ b/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt | |||
| @@ -0,0 +1,47 @@ | |||
| 1 | Applied Micro X-Gene SoC DMA nodes | ||
| 2 | |||
| 3 | DMA nodes are defined to describe on-chip DMA interfaces in | ||
| 4 | APM X-Gene SoC. | ||
| 5 | |||
| 6 | Required properties for DMA interfaces: | ||
| 7 | - compatible: Should be "apm,xgene-dma". | ||
| 8 | - device_type: set to "dma". | ||
| 9 | - reg: Address and length of the register set for the device. | ||
| 10 | It contains the information of registers in the following order: | ||
| 11 | 1st - DMA control and status register address space. | ||
| 12 | 2nd - Descriptor ring control and status register address space. | ||
| 13 | 3rd - Descriptor ring command register address space. | ||
| 14 | 4th - Soc efuse register address space. | ||
| 15 | - interrupts: DMA has 5 interrupts sources. 1st interrupt is | ||
| 16 | DMA error reporting interrupt. 2nd, 3rd, 4th and 5th interrupts | ||
| 17 | are completion interrupts for each DMA channels. | ||
| 18 | - clocks: Reference to the clock entry. | ||
| 19 | |||
| 20 | Optional properties: | ||
| 21 | - dma-coherent : Present if dma operations are coherent | ||
| 22 | |||
| 23 | Example: | ||
| 24 | dmaclk: dmaclk@1f27c000 { | ||
| 25 | compatible = "apm,xgene-device-clock"; | ||
| 26 | #clock-cells = <1>; | ||
| 27 | clocks = <&socplldiv2 0>; | ||
| 28 | reg = <0x0 0x1f27c000 0x0 0x1000>; | ||
| 29 | reg-names = "csr-reg"; | ||
| 30 | clock-output-names = "dmaclk"; | ||
| 31 | }; | ||
| 32 | |||
| 33 | dma: dma@1f270000 { | ||
| 34 | compatible = "apm,xgene-storm-dma"; | ||
| 35 | device_type = "dma"; | ||
| 36 | reg = <0x0 0x1f270000 0x0 0x10000>, | ||
| 37 | <0x0 0x1f200000 0x0 0x10000>, | ||
| 38 | <0x0 0x1b008000 0x0 0x2000>, | ||
| 39 | <0x0 0x1054a000 0x0 0x100>; | ||
| 40 | interrupts = <0x0 0x82 0x4>, | ||
| 41 | <0x0 0xb8 0x4>, | ||
| 42 | <0x0 0xb9 0x4>, | ||
| 43 | <0x0 0xba 0x4>, | ||
| 44 | <0x0 0xbb 0x4>; | ||
| 45 | dma-coherent; | ||
| 46 | clocks = <&dmaclk 0>; | ||
| 47 | }; | ||
diff --git a/Documentation/devicetree/bindings/dma/jz4780-dma.txt b/Documentation/devicetree/bindings/dma/jz4780-dma.txt new file mode 100644 index 000000000000..f25feee62b15 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/jz4780-dma.txt | |||
| @@ -0,0 +1,56 @@ | |||
| 1 | * Ingenic JZ4780 DMA Controller | ||
| 2 | |||
| 3 | Required properties: | ||
| 4 | |||
| 5 | - compatible: Should be "ingenic,jz4780-dma" | ||
| 6 | - reg: Should contain the DMA controller registers location and length. | ||
| 7 | - interrupts: Should contain the interrupt specifier of the DMA controller. | ||
| 8 | - interrupt-parent: Should be the phandle of the interrupt controller that | ||
| 9 | - clocks: Should contain a clock specifier for the JZ4780 PDMA clock. | ||
| 10 | - #dma-cells: Must be <2>. Number of integer cells in the dmas property of | ||
| 11 | DMA clients (see below). | ||
| 12 | |||
| 13 | Optional properties: | ||
| 14 | |||
| 15 | - ingenic,reserved-channels: Bitmask of channels to reserve for devices that | ||
| 16 | need a specific channel. These channels will only be assigned when explicitly | ||
| 17 | requested by a client. The primary use for this is channels 0 and 1, which | ||
| 18 | can be configured to have special behaviour for NAND/BCH when using | ||
| 19 | programmable firmware. | ||
| 20 | |||
| 21 | Example: | ||
| 22 | |||
| 23 | dma: dma@13420000 { | ||
| 24 | compatible = "ingenic,jz4780-dma"; | ||
| 25 | reg = <0x13420000 0x10000>; | ||
| 26 | |||
| 27 | interrupt-parent = <&intc>; | ||
| 28 | interrupts = <10>; | ||
| 29 | |||
| 30 | clocks = <&cgu JZ4780_CLK_PDMA>; | ||
| 31 | |||
| 32 | #dma-cells = <2>; | ||
| 33 | |||
| 34 | ingenic,reserved-channels = <0x3>; | ||
| 35 | }; | ||
| 36 | |||
| 37 | DMA clients must use the format described in dma.txt, giving a phandle to the | ||
| 38 | DMA controller plus the following 2 integer cells: | ||
| 39 | |||
| 40 | 1. Request type: The DMA request type for transfers to/from the device on | ||
| 41 | the allocated channel, as defined in the SoC documentation. | ||
| 42 | |||
| 43 | 2. Channel: If set to 0xffffffff, any available channel will be allocated for | ||
| 44 | the client. Otherwise, the exact channel specified will be used. The channel | ||
| 45 | should be reserved on the DMA controller using the ingenic,reserved-channels | ||
| 46 | property. | ||
| 47 | |||
| 48 | Example: | ||
| 49 | |||
| 50 | uart0: serial@10030000 { | ||
| 51 | ... | ||
| 52 | dmas = <&dma 0x14 0xffffffff | ||
| 53 | &dma 0x15 0xffffffff>; | ||
| 54 | dma-names = "tx", "rx"; | ||
| 55 | ... | ||
| 56 | }; | ||
diff --git a/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt b/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt index f8c3311b7153..1c9d48ea4914 100644 --- a/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt +++ b/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt | |||
| @@ -4,6 +4,7 @@ Required properties: | |||
| 4 | - compatible: must be one of the following: | 4 | - compatible: must be one of the following: |
| 5 | * "qcom,bam-v1.4.0" for MSM8974, APQ8074 and APQ8084 | 5 | * "qcom,bam-v1.4.0" for MSM8974, APQ8074 and APQ8084 |
| 6 | * "qcom,bam-v1.3.0" for APQ8064, IPQ8064 and MSM8960 | 6 | * "qcom,bam-v1.3.0" for APQ8064, IPQ8064 and MSM8960 |
| 7 | * "qcom,bam-v1.7.0" for MSM8916 | ||
| 7 | - reg: Address range for DMA registers | 8 | - reg: Address range for DMA registers |
| 8 | - interrupts: Should contain the one interrupt shared by all channels | 9 | - interrupts: Should contain the one interrupt shared by all channels |
| 9 | - #dma-cells: must be <1>, the cell in the dmas property of the client device | 10 | - #dma-cells: must be <1>, the cell in the dmas property of the client device |
diff --git a/Documentation/devicetree/bindings/dma/rcar-audmapp.txt b/Documentation/devicetree/bindings/dma/rcar-audmapp.txt deleted file mode 100644 index 61bca509d7b9..000000000000 --- a/Documentation/devicetree/bindings/dma/rcar-audmapp.txt +++ /dev/null | |||
| @@ -1,29 +0,0 @@ | |||
| 1 | * R-Car Audio DMAC peri peri Device Tree bindings | ||
| 2 | |||
| 3 | Required properties: | ||
| 4 | - compatible: should be "renesas,rcar-audmapp" | ||
| 5 | - #dma-cells: should be <1>, see "dmas" property below | ||
| 6 | |||
| 7 | Example: | ||
| 8 | audmapp: audio-dma-pp@0xec740000 { | ||
| 9 | compatible = "renesas,rcar-audmapp"; | ||
| 10 | #dma-cells = <1>; | ||
| 11 | |||
| 12 | reg = <0 0xec740000 0 0x200>; | ||
| 13 | }; | ||
| 14 | |||
| 15 | |||
| 16 | * DMA client | ||
| 17 | |||
| 18 | Required properties: | ||
| 19 | - dmas: a list of <[DMA multiplexer phandle] [SRS << 8 | DRS]> pairs. | ||
| 20 | where SRS/DRS are specified in the SoC manual. | ||
| 21 | It will be written into PDMACHCR as high 16-bit parts. | ||
| 22 | - dma-names: a list of DMA channel names, one per "dmas" entry | ||
| 23 | |||
| 24 | Example: | ||
| 25 | |||
| 26 | dmas = <&audmapp 0x2d00 | ||
| 27 | &audmapp 0x3700>; | ||
| 28 | dma-names = "src0_ssiu0", | ||
| 29 | "dvc0_ssiu0"; | ||
diff --git a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt new file mode 100644 index 000000000000..040f365954cc --- /dev/null +++ b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt | |||
| @@ -0,0 +1,37 @@ | |||
| 1 | * Renesas USB DMA Controller Device Tree bindings | ||
| 2 | |||
| 3 | Required Properties: | ||
| 4 | - compatible: must contain "renesas,usb-dmac" | ||
| 5 | - reg: base address and length of the registers block for the DMAC | ||
| 6 | - interrupts: interrupt specifiers for the DMAC, one for each entry in | ||
| 7 | interrupt-names. | ||
| 8 | - interrupt-names: one entry per channel, named "ch%u", where %u is the | ||
| 9 | channel number ranging from zero to the number of channels minus one. | ||
| 10 | - clocks: a list of phandle + clock-specifier pairs. | ||
| 11 | - #dma-cells: must be <1>, the cell specifies the channel number of the DMAC | ||
| 12 | port connected to the DMA client. | ||
| 13 | - dma-channels: number of DMA channels | ||
| 14 | |||
| 15 | Example: R8A7790 (R-Car H2) USB-DMACs | ||
| 16 | |||
| 17 | usb_dmac0: dma-controller@e65a0000 { | ||
| 18 | compatible = "renesas,usb-dmac"; | ||
| 19 | reg = <0 0xe65a0000 0 0x100>; | ||
| 20 | interrupts = <0 109 IRQ_TYPE_LEVEL_HIGH | ||
| 21 | 0 109 IRQ_TYPE_LEVEL_HIGH>; | ||
| 22 | interrupt-names = "ch0", "ch1"; | ||
| 23 | clocks = <&mstp3_clks R8A7790_CLK_USBDMAC0>; | ||
| 24 | #dma-cells = <1>; | ||
| 25 | dma-channels = <2>; | ||
| 26 | }; | ||
| 27 | |||
| 28 | usb_dmac1: dma-controller@e65b0000 { | ||
| 29 | compatible = "renesas,usb-dmac"; | ||
| 30 | reg = <0 0xe65b0000 0 0x100>; | ||
| 31 | interrupts = <0 110 IRQ_TYPE_LEVEL_HIGH | ||
| 32 | 0 110 IRQ_TYPE_LEVEL_HIGH>; | ||
| 33 | interrupt-names = "ch0", "ch1"; | ||
| 34 | clocks = <&mstp3_clks R8A7790_CLK_USBDMAC1>; | ||
| 35 | #dma-cells = <1>; | ||
| 36 | dma-channels = <2>; | ||
| 37 | }; | ||
diff --git a/MAINTAINERS b/MAINTAINERS index ea0001760035..62cd43e7f56f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -5009,6 +5009,11 @@ W: http://industrypack.sourceforge.net | |||
| 5009 | S: Maintained | 5009 | S: Maintained |
| 5010 | F: drivers/ipack/ | 5010 | F: drivers/ipack/ |
| 5011 | 5011 | ||
| 5012 | INGENIC JZ4780 DMA Driver | ||
| 5013 | M: Zubair Lutfullah Kakakhel <Zubair.Kakakhel@imgtec.com> | ||
| 5014 | S: Maintained | ||
| 5015 | F: drivers/dma/dma-jz4780.c | ||
| 5016 | |||
| 5012 | INTEGRITY MEASUREMENT ARCHITECTURE (IMA) | 5017 | INTEGRITY MEASUREMENT ARCHITECTURE (IMA) |
| 5013 | M: Mimi Zohar <zohar@linux.vnet.ibm.com> | 5018 | M: Mimi Zohar <zohar@linux.vnet.ibm.com> |
| 5014 | M: Dmitry Kasatkin <dmitry.kasatkin@gmail.com> | 5019 | M: Dmitry Kasatkin <dmitry.kasatkin@gmail.com> |
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index 36aaeb12e1a5..bf37e3c532f6 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c | |||
| @@ -754,12 +754,12 @@ static struct platform_device vcc_sdhi1 = { | |||
| 754 | }; | 754 | }; |
| 755 | 755 | ||
| 756 | /* SDHI0 */ | 756 | /* SDHI0 */ |
| 757 | static struct sh_mobile_sdhi_info sdhi0_info = { | 757 | static struct tmio_mmc_data sdhi0_info = { |
| 758 | .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, | 758 | .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI0_TX, |
| 759 | .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, | 759 | .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI0_RX, |
| 760 | .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | | 760 | .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | |
| 761 | MMC_CAP_POWER_OFF_CARD, | 761 | MMC_CAP_POWER_OFF_CARD, |
| 762 | .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_USE_GPIO_CD, | 762 | .flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_USE_GPIO_CD, |
| 763 | .cd_gpio = 167, | 763 | .cd_gpio = 167, |
| 764 | }; | 764 | }; |
| 765 | 765 | ||
| @@ -796,12 +796,12 @@ static struct platform_device sdhi0_device = { | |||
| 796 | }; | 796 | }; |
| 797 | 797 | ||
| 798 | /* SDHI1 */ | 798 | /* SDHI1 */ |
| 799 | static struct sh_mobile_sdhi_info sdhi1_info = { | 799 | static struct tmio_mmc_data sdhi1_info = { |
| 800 | .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX, | 800 | .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI1_TX, |
| 801 | .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX, | 801 | .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI1_RX, |
| 802 | .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | | 802 | .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | |
| 803 | MMC_CAP_POWER_OFF_CARD, | 803 | MMC_CAP_POWER_OFF_CARD, |
| 804 | .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_USE_GPIO_CD, | 804 | .flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_USE_GPIO_CD, |
| 805 | /* Port72 cannot generate IRQs, will be used in polling mode. */ | 805 | /* Port72 cannot generate IRQs, will be used in polling mode. */ |
| 806 | .cd_gpio = 72, | 806 | .cd_gpio = 72, |
| 807 | }; | 807 | }; |
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c index f27b5a833bf0..25558d1f417f 100644 --- a/arch/arm/mach-shmobile/board-bockw.c +++ b/arch/arm/mach-shmobile/board-bockw.c | |||
| @@ -201,12 +201,12 @@ static struct rcar_phy_platform_data usb_phy_platform_data __initdata = | |||
| 201 | 201 | ||
| 202 | 202 | ||
| 203 | /* SDHI */ | 203 | /* SDHI */ |
| 204 | static struct sh_mobile_sdhi_info sdhi0_info __initdata = { | 204 | static struct tmio_mmc_data sdhi0_info __initdata = { |
| 205 | .dma_slave_tx = HPBDMA_SLAVE_SDHI0_TX, | 205 | .chan_priv_tx = (void *)HPBDMA_SLAVE_SDHI0_TX, |
| 206 | .dma_slave_rx = HPBDMA_SLAVE_SDHI0_RX, | 206 | .chan_priv_rx = (void *)HPBDMA_SLAVE_SDHI0_RX, |
| 207 | .tmio_caps = MMC_CAP_SD_HIGHSPEED, | 207 | .capabilities = MMC_CAP_SD_HIGHSPEED, |
| 208 | .tmio_ocr_mask = MMC_VDD_165_195 | MMC_VDD_32_33 | MMC_VDD_33_34, | 208 | .ocr_mask = MMC_VDD_165_195 | MMC_VDD_32_33 | MMC_VDD_33_34, |
| 209 | .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT, | 209 | .flags = TMIO_MMC_HAS_IDLE_WAIT, |
| 210 | }; | 210 | }; |
| 211 | 211 | ||
| 212 | static struct resource sdhi0_resources[] __initdata = { | 212 | static struct resource sdhi0_resources[] __initdata = { |
| @@ -683,7 +683,7 @@ static void __init bockw_init(void) | |||
| 683 | platform_device_register_resndata( | 683 | platform_device_register_resndata( |
| 684 | NULL, "sh_mobile_sdhi", 0, | 684 | NULL, "sh_mobile_sdhi", 0, |
| 685 | sdhi0_resources, ARRAY_SIZE(sdhi0_resources), | 685 | sdhi0_resources, ARRAY_SIZE(sdhi0_resources), |
| 686 | &sdhi0_info, sizeof(struct sh_mobile_sdhi_info)); | 686 | &sdhi0_info, sizeof(struct tmio_mmc_data)); |
| 687 | } | 687 | } |
| 688 | 688 | ||
| 689 | /* for Audio */ | 689 | /* for Audio */ |
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c index 7c9b63bdde9f..260d8319fd82 100644 --- a/arch/arm/mach-shmobile/board-kzm9g.c +++ b/arch/arm/mach-shmobile/board-kzm9g.c | |||
| @@ -442,11 +442,11 @@ static struct platform_device vcc_sdhi2 = { | |||
| 442 | }; | 442 | }; |
| 443 | 443 | ||
| 444 | /* SDHI */ | 444 | /* SDHI */ |
| 445 | static struct sh_mobile_sdhi_info sdhi0_info = { | 445 | static struct tmio_mmc_data sdhi0_info = { |
| 446 | .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, | 446 | .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI0_TX, |
| 447 | .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, | 447 | .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI0_RX, |
| 448 | .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT, | 448 | .flags = TMIO_MMC_HAS_IDLE_WAIT, |
| 449 | .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | | 449 | .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | |
| 450 | MMC_CAP_POWER_OFF_CARD, | 450 | MMC_CAP_POWER_OFF_CARD, |
| 451 | }; | 451 | }; |
| 452 | 452 | ||
| @@ -484,13 +484,13 @@ static struct platform_device sdhi0_device = { | |||
| 484 | }; | 484 | }; |
| 485 | 485 | ||
| 486 | /* Micro SD */ | 486 | /* Micro SD */ |
| 487 | static struct sh_mobile_sdhi_info sdhi2_info = { | 487 | static struct tmio_mmc_data sdhi2_info = { |
| 488 | .dma_slave_tx = SHDMA_SLAVE_SDHI2_TX, | 488 | .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI2_TX, |
| 489 | .dma_slave_rx = SHDMA_SLAVE_SDHI2_RX, | 489 | .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI2_RX, |
| 490 | .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | | 490 | .flags = TMIO_MMC_HAS_IDLE_WAIT | |
| 491 | TMIO_MMC_USE_GPIO_CD | | 491 | TMIO_MMC_USE_GPIO_CD | |
| 492 | TMIO_MMC_WRPROTECT_DISABLE, | 492 | TMIO_MMC_WRPROTECT_DISABLE, |
| 493 | .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_POWER_OFF_CARD, | 493 | .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_POWER_OFF_CARD, |
| 494 | .cd_gpio = 13, | 494 | .cd_gpio = 13, |
| 495 | }; | 495 | }; |
| 496 | 496 | ||
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c index 598f704f76ae..51db288f192a 100644 --- a/arch/arm/mach-shmobile/board-marzen.c +++ b/arch/arm/mach-shmobile/board-marzen.c | |||
| @@ -122,11 +122,11 @@ static struct resource sdhi0_resources[] = { | |||
| 122 | }, | 122 | }, |
| 123 | }; | 123 | }; |
| 124 | 124 | ||
| 125 | static struct sh_mobile_sdhi_info sdhi0_platform_data = { | 125 | static struct tmio_mmc_data sdhi0_platform_data = { |
| 126 | .dma_slave_tx = HPBDMA_SLAVE_SDHI0_TX, | 126 | .chan_priv_tx = (void *)HPBDMA_SLAVE_SDHI0_TX, |
| 127 | .dma_slave_rx = HPBDMA_SLAVE_SDHI0_RX, | 127 | .chan_priv_rx = (void *)HPBDMA_SLAVE_SDHI0_RX, |
| 128 | .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT, | 128 | .flags = TMIO_MMC_WRPROTECT_DISABLE | TMIO_MMC_HAS_IDLE_WAIT, |
| 129 | .tmio_caps = MMC_CAP_SD_HIGHSPEED, | 129 | .capabilities = MMC_CAP_SD_HIGHSPEED, |
| 130 | }; | 130 | }; |
| 131 | 131 | ||
| 132 | static struct platform_device sdhi0_device = { | 132 | static struct platform_device sdhi0_device = { |
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index e74f6e0a208c..c8d3e0e86678 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi | |||
| @@ -102,6 +102,7 @@ | |||
| 102 | #address-cells = <2>; | 102 | #address-cells = <2>; |
| 103 | #size-cells = <2>; | 103 | #size-cells = <2>; |
| 104 | ranges; | 104 | ranges; |
| 105 | dma-ranges = <0x0 0x0 0x0 0x0 0x400 0x0>; | ||
| 105 | 106 | ||
| 106 | clocks { | 107 | clocks { |
| 107 | #address-cells = <2>; | 108 | #address-cells = <2>; |
| @@ -362,6 +363,15 @@ | |||
| 362 | reg-names = "csr-reg"; | 363 | reg-names = "csr-reg"; |
| 363 | clock-output-names = "pcie4clk"; | 364 | clock-output-names = "pcie4clk"; |
| 364 | }; | 365 | }; |
| 366 | |||
| 367 | dmaclk: dmaclk@1f27c000 { | ||
| 368 | compatible = "apm,xgene-device-clock"; | ||
| 369 | #clock-cells = <1>; | ||
| 370 | clocks = <&socplldiv2 0>; | ||
| 371 | reg = <0x0 0x1f27c000 0x0 0x1000>; | ||
| 372 | reg-names = "csr-reg"; | ||
| 373 | clock-output-names = "dmaclk"; | ||
| 374 | }; | ||
| 365 | }; | 375 | }; |
| 366 | 376 | ||
| 367 | pcie0: pcie@1f2b0000 { | 377 | pcie0: pcie@1f2b0000 { |
| @@ -684,5 +694,21 @@ | |||
| 684 | interrupts = <0x0 0x41 0x4>; | 694 | interrupts = <0x0 0x41 0x4>; |
| 685 | clocks = <&rngpkaclk 0>; | 695 | clocks = <&rngpkaclk 0>; |
| 686 | }; | 696 | }; |
| 697 | |||
| 698 | dma: dma@1f270000 { | ||
| 699 | compatible = "apm,xgene-storm-dma"; | ||
| 700 | device_type = "dma"; | ||
| 701 | reg = <0x0 0x1f270000 0x0 0x10000>, | ||
| 702 | <0x0 0x1f200000 0x0 0x10000>, | ||
| 703 | <0x0 0x1b008000 0x0 0x2000>, | ||
| 704 | <0x0 0x1054a000 0x0 0x100>; | ||
| 705 | interrupts = <0x0 0x82 0x4>, | ||
| 706 | <0x0 0xb8 0x4>, | ||
| 707 | <0x0 0xb9 0x4>, | ||
| 708 | <0x0 0xba 0x4>, | ||
| 709 | <0x0 0xbb 0x4>; | ||
| 710 | dma-coherent; | ||
| 711 | clocks = <&dmaclk 0>; | ||
| 712 | }; | ||
| 687 | }; | 713 | }; |
| 688 | }; | 714 | }; |
diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c index 669df51a82e3..324599bfad14 100644 --- a/arch/sh/boards/board-sh7757lcr.c +++ b/arch/sh/boards/board-sh7757lcr.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/spi/spi.h> | 17 | #include <linux/spi/spi.h> |
| 18 | #include <linux/spi/flash.h> | 18 | #include <linux/spi/flash.h> |
| 19 | #include <linux/io.h> | 19 | #include <linux/io.h> |
| 20 | #include <linux/mfd/tmio.h> | ||
| 20 | #include <linux/mmc/host.h> | 21 | #include <linux/mmc/host.h> |
| 21 | #include <linux/mmc/sh_mmcif.h> | 22 | #include <linux/mmc/sh_mmcif.h> |
| 22 | #include <linux/mmc/sh_mobile_sdhi.h> | 23 | #include <linux/mmc/sh_mobile_sdhi.h> |
| @@ -243,10 +244,10 @@ static struct platform_device sh_mmcif_device = { | |||
| 243 | }; | 244 | }; |
| 244 | 245 | ||
| 245 | /* SDHI0 */ | 246 | /* SDHI0 */ |
| 246 | static struct sh_mobile_sdhi_info sdhi_info = { | 247 | static struct tmio_mmc_data sdhi_info = { |
| 247 | .dma_slave_tx = SHDMA_SLAVE_SDHI_TX, | 248 | .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI_TX, |
| 248 | .dma_slave_rx = SHDMA_SLAVE_SDHI_RX, | 249 | .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI_RX, |
| 249 | .tmio_caps = MMC_CAP_SD_HIGHSPEED, | 250 | .capabilities = MMC_CAP_SD_HIGHSPEED, |
| 250 | }; | 251 | }; |
| 251 | 252 | ||
| 252 | static struct resource sdhi_resources[] = { | 253 | static struct resource sdhi_resources[] = { |
diff --git a/arch/sh/boards/mach-ap325rxa/setup.c b/arch/sh/boards/mach-ap325rxa/setup.c index d4b01d4cc102..cbd2a9f02a91 100644 --- a/arch/sh/boards/mach-ap325rxa/setup.c +++ b/arch/sh/boards/mach-ap325rxa/setup.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/mmc/sh_mobile_sdhi.h> | 18 | #include <linux/mmc/sh_mobile_sdhi.h> |
| 19 | #include <linux/mtd/physmap.h> | 19 | #include <linux/mtd/physmap.h> |
| 20 | #include <linux/mtd/sh_flctl.h> | 20 | #include <linux/mtd/sh_flctl.h> |
| 21 | #include <linux/mfd/tmio.h> | ||
| 21 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
| 22 | #include <linux/i2c.h> | 23 | #include <linux/i2c.h> |
| 23 | #include <linux/regulator/fixed.h> | 24 | #include <linux/regulator/fixed.h> |
| @@ -447,8 +448,8 @@ static struct resource sdhi0_cn3_resources[] = { | |||
| 447 | }, | 448 | }, |
| 448 | }; | 449 | }; |
| 449 | 450 | ||
| 450 | static struct sh_mobile_sdhi_info sdhi0_cn3_data = { | 451 | static struct tmio_mmc_data sdhi0_cn3_data = { |
| 451 | .tmio_caps = MMC_CAP_SDIO_IRQ, | 452 | .capabilities = MMC_CAP_SDIO_IRQ, |
| 452 | }; | 453 | }; |
| 453 | 454 | ||
| 454 | static struct platform_device sdhi0_cn3_device = { | 455 | static struct platform_device sdhi0_cn3_device = { |
| @@ -474,8 +475,8 @@ static struct resource sdhi1_cn7_resources[] = { | |||
| 474 | }, | 475 | }, |
| 475 | }; | 476 | }; |
| 476 | 477 | ||
| 477 | static struct sh_mobile_sdhi_info sdhi1_cn7_data = { | 478 | static struct tmio_mmc_data sdhi1_cn7_data = { |
| 478 | .tmio_caps = MMC_CAP_SDIO_IRQ, | 479 | .capabilities = MMC_CAP_SDIO_IRQ, |
| 479 | }; | 480 | }; |
| 480 | 481 | ||
| 481 | static struct platform_device sdhi1_cn7_device = { | 482 | static struct platform_device sdhi1_cn7_device = { |
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index 0d3049244cd3..d531791f06ff 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c | |||
| @@ -601,12 +601,12 @@ static struct platform_device sdhi0_power = { | |||
| 601 | }, | 601 | }, |
| 602 | }; | 602 | }; |
| 603 | 603 | ||
| 604 | static struct sh_mobile_sdhi_info sdhi0_info = { | 604 | static struct tmio_mmc_data sdhi0_info = { |
| 605 | .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, | 605 | .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI0_TX, |
| 606 | .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, | 606 | .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI0_RX, |
| 607 | .tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD | | 607 | .capabilities = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD | |
| 608 | MMC_CAP_NEEDS_POLL, | 608 | MMC_CAP_NEEDS_POLL, |
| 609 | .tmio_flags = TMIO_MMC_USE_GPIO_CD, | 609 | .flags = TMIO_MMC_USE_GPIO_CD, |
| 610 | .cd_gpio = GPIO_PTY7, | 610 | .cd_gpio = GPIO_PTY7, |
| 611 | }; | 611 | }; |
| 612 | 612 | ||
| @@ -635,12 +635,12 @@ static struct platform_device sdhi0_device = { | |||
| 635 | 635 | ||
| 636 | #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) | 636 | #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) |
| 637 | /* SDHI1 */ | 637 | /* SDHI1 */ |
| 638 | static struct sh_mobile_sdhi_info sdhi1_info = { | 638 | static struct tmio_mmc_data sdhi1_info = { |
| 639 | .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX, | 639 | .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI1_TX, |
| 640 | .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX, | 640 | .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI1_RX, |
| 641 | .tmio_caps = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD | | 641 | .capabilities = MMC_CAP_SDIO_IRQ | MMC_CAP_POWER_OFF_CARD | |
| 642 | MMC_CAP_NEEDS_POLL, | 642 | MMC_CAP_NEEDS_POLL, |
| 643 | .tmio_flags = TMIO_MMC_USE_GPIO_CD, | 643 | .flags = TMIO_MMC_USE_GPIO_CD, |
| 644 | .cd_gpio = GPIO_PTW7, | 644 | .cd_gpio = GPIO_PTW7, |
| 645 | }; | 645 | }; |
| 646 | 646 | ||
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c index 1df4398f8375..7d997cec09c5 100644 --- a/arch/sh/boards/mach-kfr2r09/setup.c +++ b/arch/sh/boards/mach-kfr2r09/setup.c | |||
| @@ -373,11 +373,11 @@ static struct resource kfr2r09_sh_sdhi0_resources[] = { | |||
| 373 | }, | 373 | }, |
| 374 | }; | 374 | }; |
| 375 | 375 | ||
| 376 | static struct sh_mobile_sdhi_info sh7724_sdhi0_data = { | 376 | static struct tmio_mmc_data sh7724_sdhi0_data = { |
| 377 | .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, | 377 | .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI0_TX, |
| 378 | .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, | 378 | .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI0_RX, |
| 379 | .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE, | 379 | .flags = TMIO_MMC_WRPROTECT_DISABLE, |
| 380 | .tmio_caps = MMC_CAP_SDIO_IRQ, | 380 | .capabilities = MMC_CAP_SDIO_IRQ, |
| 381 | }; | 381 | }; |
| 382 | 382 | ||
| 383 | static struct platform_device kfr2r09_sh_sdhi0_device = { | 383 | static struct platform_device kfr2r09_sh_sdhi0_device = { |
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c index 8b73194ed2ce..29b7c0dcfc51 100644 --- a/arch/sh/boards/mach-migor/setup.c +++ b/arch/sh/boards/mach-migor/setup.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/mmc/host.h> | 15 | #include <linux/mmc/host.h> |
| 16 | #include <linux/mmc/sh_mobile_sdhi.h> | 16 | #include <linux/mmc/sh_mobile_sdhi.h> |
| 17 | #include <linux/mtd/physmap.h> | 17 | #include <linux/mtd/physmap.h> |
| 18 | #include <linux/mfd/tmio.h> | ||
| 18 | #include <linux/mtd/nand.h> | 19 | #include <linux/mtd/nand.h> |
| 19 | #include <linux/i2c.h> | 20 | #include <linux/i2c.h> |
| 20 | #include <linux/regulator/fixed.h> | 21 | #include <linux/regulator/fixed.h> |
| @@ -408,10 +409,10 @@ static struct resource sdhi_cn9_resources[] = { | |||
| 408 | }, | 409 | }, |
| 409 | }; | 410 | }; |
| 410 | 411 | ||
| 411 | static struct sh_mobile_sdhi_info sh7724_sdhi_data = { | 412 | static struct tmio_mmc_data sh7724_sdhi_data = { |
| 412 | .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, | 413 | .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI0_TX, |
| 413 | .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, | 414 | .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI0_RX, |
| 414 | .tmio_caps = MMC_CAP_SDIO_IRQ, | 415 | .capabilities = MMC_CAP_SDIO_IRQ, |
| 415 | }; | 416 | }; |
| 416 | 417 | ||
| 417 | static struct platform_device sdhi_cn9_device = { | 418 | static struct platform_device sdhi_cn9_device = { |
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c index 1162bc6945a3..4f6635a075f2 100644 --- a/arch/sh/boards/mach-se/7724/setup.c +++ b/arch/sh/boards/mach-se/7724/setup.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
| 17 | #include <linux/mmc/host.h> | 17 | #include <linux/mmc/host.h> |
| 18 | #include <linux/mmc/sh_mobile_sdhi.h> | 18 | #include <linux/mmc/sh_mobile_sdhi.h> |
| 19 | #include <linux/mfd/tmio.h> | ||
| 19 | #include <linux/mtd/physmap.h> | 20 | #include <linux/mtd/physmap.h> |
| 20 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
| 21 | #include <linux/regulator/fixed.h> | 22 | #include <linux/regulator/fixed.h> |
| @@ -468,10 +469,10 @@ static struct resource sdhi0_cn7_resources[] = { | |||
| 468 | }, | 469 | }, |
| 469 | }; | 470 | }; |
| 470 | 471 | ||
| 471 | static struct sh_mobile_sdhi_info sh7724_sdhi0_data = { | 472 | static struct tmio_mmc_data sh7724_sdhi0_data = { |
| 472 | .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, | 473 | .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI0_TX, |
| 473 | .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, | 474 | .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI0_RX, |
| 474 | .tmio_caps = MMC_CAP_SDIO_IRQ, | 475 | .capabilities = MMC_CAP_SDIO_IRQ, |
| 475 | }; | 476 | }; |
| 476 | 477 | ||
| 477 | static struct platform_device sdhi0_cn7_device = { | 478 | static struct platform_device sdhi0_cn7_device = { |
| @@ -497,10 +498,10 @@ static struct resource sdhi1_cn8_resources[] = { | |||
| 497 | }, | 498 | }, |
| 498 | }; | 499 | }; |
| 499 | 500 | ||
| 500 | static struct sh_mobile_sdhi_info sh7724_sdhi1_data = { | 501 | static struct tmio_mmc_data sh7724_sdhi1_data = { |
| 501 | .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX, | 502 | .chan_priv_tx = (void *)SHDMA_SLAVE_SDHI1_TX, |
| 502 | .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX, | 503 | .chan_priv_rx = (void *)SHDMA_SLAVE_SDHI1_RX, |
| 503 | .tmio_caps = MMC_CAP_SDIO_IRQ, | 504 | .capabilities = MMC_CAP_SDIO_IRQ, |
| 504 | }; | 505 | }; |
| 505 | 506 | ||
| 506 | static struct platform_device sdhi1_cn8_device = { | 507 | static struct platform_device sdhi1_cn8_device = { |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 91eced044321..fd7ac13f2574 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
| @@ -112,6 +112,17 @@ config FSL_DMA | |||
| 112 | EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on | 112 | EloPlus is on mpc85xx and mpc86xx and Pxxx parts, and the Elo3 is on |
| 113 | some Txxx and Bxxx parts. | 113 | some Txxx and Bxxx parts. |
| 114 | 114 | ||
| 115 | config FSL_RAID | ||
| 116 | tristate "Freescale RAID engine Support" | ||
| 117 | depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH | ||
| 118 | select DMA_ENGINE | ||
| 119 | select DMA_ENGINE_RAID | ||
| 120 | ---help--- | ||
| 121 | Enable support for Freescale RAID Engine. RAID Engine is | ||
| 122 | available on some QorIQ SoCs (like P5020/P5040). It has | ||
| 123 | the capability to offload memcpy, xor and pq computation | ||
| 124 | for raid5/6. | ||
| 125 | |||
| 115 | source "drivers/dma/hsu/Kconfig" | 126 | source "drivers/dma/hsu/Kconfig" |
| 116 | 127 | ||
| 117 | config MPC512X_DMA | 128 | config MPC512X_DMA |
| @@ -347,6 +358,16 @@ config DMA_JZ4740 | |||
| 347 | select DMA_ENGINE | 358 | select DMA_ENGINE |
| 348 | select DMA_VIRTUAL_CHANNELS | 359 | select DMA_VIRTUAL_CHANNELS |
| 349 | 360 | ||
| 361 | config DMA_JZ4780 | ||
| 362 | tristate "JZ4780 DMA support" | ||
| 363 | depends on MACH_JZ4780 | ||
| 364 | select DMA_ENGINE | ||
| 365 | select DMA_VIRTUAL_CHANNELS | ||
| 366 | help | ||
| 367 | This selects support for the DMA controller in Ingenic JZ4780 SoCs. | ||
| 368 | If you have a board based on such a SoC and wish to use DMA for | ||
| 369 | devices which can use the DMA controller, say Y or M here. | ||
| 370 | |||
| 350 | config K3_DMA | 371 | config K3_DMA |
| 351 | tristate "Hisilicon K3 DMA support" | 372 | tristate "Hisilicon K3 DMA support" |
| 352 | depends on ARCH_HI3xxx | 373 | depends on ARCH_HI3xxx |
| @@ -414,6 +435,14 @@ config IMG_MDC_DMA | |||
| 414 | help | 435 | help |
| 415 | Enable support for the IMG multi-threaded DMA controller (MDC). | 436 | Enable support for the IMG multi-threaded DMA controller (MDC). |
| 416 | 437 | ||
| 438 | config XGENE_DMA | ||
| 439 | tristate "APM X-Gene DMA support" | ||
| 440 | select DMA_ENGINE | ||
| 441 | select DMA_ENGINE_RAID | ||
| 442 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | ||
| 443 | help | ||
| 444 | Enable support for the APM X-Gene SoC DMA engine. | ||
| 445 | |||
| 417 | config DMA_ENGINE | 446 | config DMA_ENGINE |
| 418 | bool | 447 | bool |
| 419 | 448 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 7e8301cb489d..69f77d5ba53b 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
| @@ -41,9 +41,11 @@ obj-$(CONFIG_DMA_OMAP) += omap-dma.o | |||
| 41 | obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o | 41 | obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o |
| 42 | obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o | 42 | obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o |
| 43 | obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o | 43 | obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o |
| 44 | obj-$(CONFIG_DMA_JZ4780) += dma-jz4780.o | ||
| 44 | obj-$(CONFIG_TI_CPPI41) += cppi41.o | 45 | obj-$(CONFIG_TI_CPPI41) += cppi41.o |
| 45 | obj-$(CONFIG_K3_DMA) += k3dma.o | 46 | obj-$(CONFIG_K3_DMA) += k3dma.o |
| 46 | obj-$(CONFIG_MOXART_DMA) += moxart-dma.o | 47 | obj-$(CONFIG_MOXART_DMA) += moxart-dma.o |
| 48 | obj-$(CONFIG_FSL_RAID) += fsl_raid.o | ||
| 47 | obj-$(CONFIG_FSL_EDMA) += fsl-edma.o | 49 | obj-$(CONFIG_FSL_EDMA) += fsl-edma.o |
| 48 | obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o | 50 | obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o |
| 49 | obj-y += xilinx/ | 51 | obj-y += xilinx/ |
| @@ -51,3 +53,4 @@ obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o | |||
| 51 | obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o | 53 | obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o |
| 52 | obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o | 54 | obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o |
| 53 | obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o | 55 | obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o |
| 56 | obj-$(CONFIG_XGENE_DMA) += xgene-dma.o | ||
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 83aa55d6fa5d..49d396ec06e5 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
| @@ -15,10 +15,6 @@ | |||
| 15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 16 | * more details. | 16 | * more details. |
| 17 | * | 17 | * |
| 18 | * You should have received a copy of the GNU General Public License along with | ||
| 19 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 20 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 21 | * | ||
| 22 | * The full GNU General Public License is in this distribution in the file | 18 | * The full GNU General Public License is in this distribution in the file |
| 23 | * called COPYING. | 19 | * called COPYING. |
| 24 | * | 20 | * |
| @@ -1195,11 +1191,6 @@ static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, | |||
| 1195 | /* | 1191 | /* |
| 1196 | * The DMA ENGINE API | 1192 | * The DMA ENGINE API |
| 1197 | */ | 1193 | */ |
| 1198 | static int pl08x_alloc_chan_resources(struct dma_chan *chan) | ||
| 1199 | { | ||
| 1200 | return 0; | ||
| 1201 | } | ||
| 1202 | |||
| 1203 | static void pl08x_free_chan_resources(struct dma_chan *chan) | 1194 | static void pl08x_free_chan_resources(struct dma_chan *chan) |
| 1204 | { | 1195 | { |
| 1205 | /* Ensure all queued descriptors are freed */ | 1196 | /* Ensure all queued descriptors are freed */ |
| @@ -2066,7 +2057,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 2066 | /* Initialize memcpy engine */ | 2057 | /* Initialize memcpy engine */ |
| 2067 | dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); | 2058 | dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); |
| 2068 | pl08x->memcpy.dev = &adev->dev; | 2059 | pl08x->memcpy.dev = &adev->dev; |
| 2069 | pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources; | ||
| 2070 | pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; | 2060 | pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; |
| 2071 | pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; | 2061 | pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; |
| 2072 | pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; | 2062 | pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; |
| @@ -2085,7 +2075,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) | |||
| 2085 | dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); | 2075 | dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); |
| 2086 | dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask); | 2076 | dma_cap_set(DMA_CYCLIC, pl08x->slave.cap_mask); |
| 2087 | pl08x->slave.dev = &adev->dev; | 2077 | pl08x->slave.dev = &adev->dev; |
| 2088 | pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources; | ||
| 2089 | pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; | 2078 | pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; |
| 2090 | pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; | 2079 | pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; |
| 2091 | pl08x->slave.device_tx_status = pl08x_dma_tx_status; | 2080 | pl08x->slave.device_tx_status = pl08x_dma_tx_status; |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 0b4fc6fb48ce..57b2141ddddc 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
| @@ -65,6 +65,21 @@ static void atc_issue_pending(struct dma_chan *chan); | |||
| 65 | 65 | ||
| 66 | /*----------------------------------------------------------------------*/ | 66 | /*----------------------------------------------------------------------*/ |
| 67 | 67 | ||
| 68 | static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst, | ||
| 69 | size_t len) | ||
| 70 | { | ||
| 71 | unsigned int width; | ||
| 72 | |||
| 73 | if (!((src | dst | len) & 3)) | ||
| 74 | width = 2; | ||
| 75 | else if (!((src | dst | len) & 1)) | ||
| 76 | width = 1; | ||
| 77 | else | ||
| 78 | width = 0; | ||
| 79 | |||
| 80 | return width; | ||
| 81 | } | ||
| 82 | |||
| 68 | static struct at_desc *atc_first_active(struct at_dma_chan *atchan) | 83 | static struct at_desc *atc_first_active(struct at_dma_chan *atchan) |
| 69 | { | 84 | { |
| 70 | return list_first_entry(&atchan->active_list, | 85 | return list_first_entry(&atchan->active_list, |
| @@ -659,16 +674,10 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
| 659 | * We can be a lot more clever here, but this should take care | 674 | * We can be a lot more clever here, but this should take care |
| 660 | * of the most common optimization. | 675 | * of the most common optimization. |
| 661 | */ | 676 | */ |
| 662 | if (!((src | dest | len) & 3)) { | 677 | src_width = dst_width = atc_get_xfer_width(src, dest, len); |
| 663 | ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; | 678 | |
| 664 | src_width = dst_width = 2; | 679 | ctrla = ATC_SRC_WIDTH(src_width) | |
| 665 | } else if (!((src | dest | len) & 1)) { | 680 | ATC_DST_WIDTH(dst_width); |
| 666 | ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; | ||
| 667 | src_width = dst_width = 1; | ||
| 668 | } else { | ||
| 669 | ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; | ||
| 670 | src_width = dst_width = 0; | ||
| 671 | } | ||
| 672 | 681 | ||
| 673 | for (offset = 0; offset < len; offset += xfer_count << src_width) { | 682 | for (offset = 0; offset < len; offset += xfer_count << src_width) { |
| 674 | xfer_count = min_t(size_t, (len - offset) >> src_width, | 683 | xfer_count = min_t(size_t, (len - offset) >> src_width, |
| @@ -862,6 +871,144 @@ err: | |||
| 862 | } | 871 | } |
| 863 | 872 | ||
| 864 | /** | 873 | /** |
| 874 | * atc_prep_dma_sg - prepare memory to memory scather-gather operation | ||
| 875 | * @chan: the channel to prepare operation on | ||
| 876 | * @dst_sg: destination scatterlist | ||
| 877 | * @dst_nents: number of destination scatterlist entries | ||
| 878 | * @src_sg: source scatterlist | ||
| 879 | * @src_nents: number of source scatterlist entries | ||
| 880 | * @flags: tx descriptor status flags | ||
| 881 | */ | ||
| 882 | static struct dma_async_tx_descriptor * | ||
| 883 | atc_prep_dma_sg(struct dma_chan *chan, | ||
| 884 | struct scatterlist *dst_sg, unsigned int dst_nents, | ||
| 885 | struct scatterlist *src_sg, unsigned int src_nents, | ||
| 886 | unsigned long flags) | ||
| 887 | { | ||
| 888 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | ||
| 889 | struct at_desc *desc = NULL; | ||
| 890 | struct at_desc *first = NULL; | ||
| 891 | struct at_desc *prev = NULL; | ||
| 892 | unsigned int src_width; | ||
| 893 | unsigned int dst_width; | ||
| 894 | size_t xfer_count; | ||
| 895 | u32 ctrla; | ||
| 896 | u32 ctrlb; | ||
| 897 | size_t dst_len = 0, src_len = 0; | ||
| 898 | dma_addr_t dst = 0, src = 0; | ||
| 899 | size_t len = 0, total_len = 0; | ||
| 900 | |||
| 901 | if (unlikely(dst_nents == 0 || src_nents == 0)) | ||
| 902 | return NULL; | ||
| 903 | |||
| 904 | if (unlikely(dst_sg == NULL || src_sg == NULL)) | ||
| 905 | return NULL; | ||
| 906 | |||
| 907 | ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN | ||
| 908 | | ATC_SRC_ADDR_MODE_INCR | ||
| 909 | | ATC_DST_ADDR_MODE_INCR | ||
| 910 | | ATC_FC_MEM2MEM; | ||
| 911 | |||
| 912 | /* | ||
| 913 | * loop until there is either no more source or no more destination | ||
| 914 | * scatterlist entry | ||
| 915 | */ | ||
| 916 | while (true) { | ||
| 917 | |||
| 918 | /* prepare the next transfer */ | ||
| 919 | if (dst_len == 0) { | ||
| 920 | |||
| 921 | /* no more destination scatterlist entries */ | ||
| 922 | if (!dst_sg || !dst_nents) | ||
| 923 | break; | ||
| 924 | |||
| 925 | dst = sg_dma_address(dst_sg); | ||
| 926 | dst_len = sg_dma_len(dst_sg); | ||
| 927 | |||
| 928 | dst_sg = sg_next(dst_sg); | ||
| 929 | dst_nents--; | ||
| 930 | } | ||
| 931 | |||
| 932 | if (src_len == 0) { | ||
| 933 | |||
| 934 | /* no more source scatterlist entries */ | ||
| 935 | if (!src_sg || !src_nents) | ||
| 936 | break; | ||
| 937 | |||
| 938 | src = sg_dma_address(src_sg); | ||
| 939 | src_len = sg_dma_len(src_sg); | ||
| 940 | |||
| 941 | src_sg = sg_next(src_sg); | ||
| 942 | src_nents--; | ||
| 943 | } | ||
| 944 | |||
| 945 | len = min_t(size_t, src_len, dst_len); | ||
| 946 | if (len == 0) | ||
| 947 | continue; | ||
| 948 | |||
| 949 | /* take care for the alignment */ | ||
| 950 | src_width = dst_width = atc_get_xfer_width(src, dst, len); | ||
| 951 | |||
| 952 | ctrla = ATC_SRC_WIDTH(src_width) | | ||
| 953 | ATC_DST_WIDTH(dst_width); | ||
| 954 | |||
| 955 | /* | ||
| 956 | * The number of transfers to set up refer to the source width | ||
| 957 | * that depends on the alignment. | ||
| 958 | */ | ||
| 959 | xfer_count = len >> src_width; | ||
| 960 | if (xfer_count > ATC_BTSIZE_MAX) { | ||
| 961 | xfer_count = ATC_BTSIZE_MAX; | ||
| 962 | len = ATC_BTSIZE_MAX << src_width; | ||
| 963 | } | ||
| 964 | |||
| 965 | /* create the transfer */ | ||
| 966 | desc = atc_desc_get(atchan); | ||
| 967 | if (!desc) | ||
| 968 | goto err_desc_get; | ||
| 969 | |||
| 970 | desc->lli.saddr = src; | ||
| 971 | desc->lli.daddr = dst; | ||
| 972 | desc->lli.ctrla = ctrla | xfer_count; | ||
| 973 | desc->lli.ctrlb = ctrlb; | ||
| 974 | |||
| 975 | desc->txd.cookie = 0; | ||
| 976 | desc->len = len; | ||
| 977 | |||
| 978 | /* | ||
| 979 | * Although we only need the transfer width for the first and | ||
| 980 | * the last descriptor, its easier to set it to all descriptors. | ||
| 981 | */ | ||
| 982 | desc->tx_width = src_width; | ||
| 983 | |||
| 984 | atc_desc_chain(&first, &prev, desc); | ||
| 985 | |||
| 986 | /* update the lengths and addresses for the next loop cycle */ | ||
| 987 | dst_len -= len; | ||
| 988 | src_len -= len; | ||
| 989 | dst += len; | ||
| 990 | src += len; | ||
| 991 | |||
| 992 | total_len += len; | ||
| 993 | } | ||
| 994 | |||
| 995 | /* First descriptor of the chain embedds additional information */ | ||
| 996 | first->txd.cookie = -EBUSY; | ||
| 997 | first->total_len = total_len; | ||
| 998 | |||
| 999 | /* set end-of-link to the last link descriptor of list*/ | ||
| 1000 | set_desc_eol(desc); | ||
| 1001 | |||
| 1002 | first->txd.flags = flags; /* client is in control of this ack */ | ||
| 1003 | |||
| 1004 | return &first->txd; | ||
| 1005 | |||
| 1006 | err_desc_get: | ||
| 1007 | atc_desc_put(atchan, first); | ||
| 1008 | return NULL; | ||
| 1009 | } | ||
| 1010 | |||
| 1011 | /** | ||
| 865 | * atc_dma_cyclic_check_values | 1012 | * atc_dma_cyclic_check_values |
| 866 | * Check for too big/unaligned periods and unaligned DMA buffer | 1013 | * Check for too big/unaligned periods and unaligned DMA buffer |
| 867 | */ | 1014 | */ |
| @@ -1461,8 +1608,10 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
| 1461 | 1608 | ||
| 1462 | /* setup platform data for each SoC */ | 1609 | /* setup platform data for each SoC */ |
| 1463 | dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); | 1610 | dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); |
| 1611 | dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask); | ||
| 1464 | dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); | 1612 | dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); |
| 1465 | dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); | 1613 | dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); |
| 1614 | dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask); | ||
| 1466 | 1615 | ||
| 1467 | /* get DMA parameters from controller type */ | 1616 | /* get DMA parameters from controller type */ |
| 1468 | plat_dat = at_dma_get_driver_data(pdev); | 1617 | plat_dat = at_dma_get_driver_data(pdev); |
| @@ -1582,11 +1731,15 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
| 1582 | atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | 1731 | atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
| 1583 | } | 1732 | } |
| 1584 | 1733 | ||
| 1734 | if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask)) | ||
| 1735 | atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg; | ||
| 1736 | |||
| 1585 | dma_writel(atdma, EN, AT_DMA_ENABLE); | 1737 | dma_writel(atdma, EN, AT_DMA_ENABLE); |
| 1586 | 1738 | ||
| 1587 | dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", | 1739 | dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n", |
| 1588 | dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", | 1740 | dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", |
| 1589 | dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", | 1741 | dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", |
| 1742 | dma_has_cap(DMA_SG, atdma->dma_common.cap_mask) ? "sg-cpy " : "", | ||
| 1590 | plat_dat->nr_channels); | 1743 | plat_dat->nr_channels); |
| 1591 | 1744 | ||
| 1592 | dma_async_device_register(&atdma->dma_common); | 1745 | dma_async_device_register(&atdma->dma_common); |
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index d9891d3461f6..933e4b338459 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c | |||
| @@ -1154,8 +1154,10 @@ static int at_xdmac_device_resume(struct dma_chan *chan) | |||
| 1154 | dev_dbg(chan2dev(chan), "%s\n", __func__); | 1154 | dev_dbg(chan2dev(chan), "%s\n", __func__); |
| 1155 | 1155 | ||
| 1156 | spin_lock_bh(&atchan->lock); | 1156 | spin_lock_bh(&atchan->lock); |
| 1157 | if (!at_xdmac_chan_is_paused(atchan)) | 1157 | if (!at_xdmac_chan_is_paused(atchan)) { |
| 1158 | spin_unlock_bh(&atchan->lock); | ||
| 1158 | return 0; | 1159 | return 0; |
| 1160 | } | ||
| 1159 | 1161 | ||
| 1160 | at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); | 1162 | at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); |
| 1161 | clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); | 1163 | clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); |
diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c index fa378d88f6c8..180fedb418cc 100644 --- a/drivers/dma/bestcomm/bestcomm.c +++ b/drivers/dma/bestcomm/bestcomm.c | |||
| @@ -30,7 +30,7 @@ | |||
| 30 | #define DRIVER_NAME "bestcomm-core" | 30 | #define DRIVER_NAME "bestcomm-core" |
| 31 | 31 | ||
| 32 | /* MPC5200 device tree match tables */ | 32 | /* MPC5200 device tree match tables */ |
| 33 | static struct of_device_id mpc52xx_sram_ids[] = { | 33 | static const struct of_device_id mpc52xx_sram_ids[] = { |
| 34 | { .compatible = "fsl,mpc5200-sram", }, | 34 | { .compatible = "fsl,mpc5200-sram", }, |
| 35 | { .compatible = "mpc5200-sram", }, | 35 | { .compatible = "mpc5200-sram", }, |
| 36 | {} | 36 | {} |
| @@ -481,7 +481,7 @@ static int mpc52xx_bcom_remove(struct platform_device *op) | |||
| 481 | return 0; | 481 | return 0; |
| 482 | } | 482 | } |
| 483 | 483 | ||
| 484 | static struct of_device_id mpc52xx_bcom_of_match[] = { | 484 | static const struct of_device_id mpc52xx_bcom_of_match[] = { |
| 485 | { .compatible = "fsl,mpc5200-bestcomm", }, | 485 | { .compatible = "fsl,mpc5200-bestcomm", }, |
| 486 | { .compatible = "mpc5200-bestcomm", }, | 486 | { .compatible = "mpc5200-bestcomm", }, |
| 487 | {}, | 487 | {}, |
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c index 84884418fd30..7638b24ce8d0 100644 --- a/drivers/dma/dma-jz4740.c +++ b/drivers/dma/dma-jz4740.c | |||
| @@ -7,10 +7,6 @@ | |||
| 7 | * Free Software Foundation; either version 2 of the License, or (at your | 7 | * Free Software Foundation; either version 2 of the License, or (at your |
| 8 | * option) any later version. | 8 | * option) any later version. |
| 9 | * | 9 | * |
| 10 | * You should have received a copy of the GNU General Public License along | ||
| 11 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
| 12 | * 675 Mass Ave, Cambridge, MA 02139, USA. | ||
| 13 | * | ||
| 14 | */ | 10 | */ |
| 15 | 11 | ||
| 16 | #include <linux/dmaengine.h> | 12 | #include <linux/dmaengine.h> |
| @@ -343,7 +339,7 @@ static void jz4740_dma_chan_irq(struct jz4740_dmaengine_chan *chan) | |||
| 343 | { | 339 | { |
| 344 | spin_lock(&chan->vchan.lock); | 340 | spin_lock(&chan->vchan.lock); |
| 345 | if (chan->desc) { | 341 | if (chan->desc) { |
| 346 | if (chan->desc && chan->desc->cyclic) { | 342 | if (chan->desc->cyclic) { |
| 347 | vchan_cyclic_callback(&chan->desc->vdesc); | 343 | vchan_cyclic_callback(&chan->desc->vdesc); |
| 348 | } else { | 344 | } else { |
| 349 | if (chan->next_sg == chan->desc->num_sgs) { | 345 | if (chan->next_sg == chan->desc->num_sgs) { |
| @@ -496,11 +492,6 @@ static enum dma_status jz4740_dma_tx_status(struct dma_chan *c, | |||
| 496 | return status; | 492 | return status; |
| 497 | } | 493 | } |
| 498 | 494 | ||
| 499 | static int jz4740_dma_alloc_chan_resources(struct dma_chan *c) | ||
| 500 | { | ||
| 501 | return 0; | ||
| 502 | } | ||
| 503 | |||
| 504 | static void jz4740_dma_free_chan_resources(struct dma_chan *c) | 495 | static void jz4740_dma_free_chan_resources(struct dma_chan *c) |
| 505 | { | 496 | { |
| 506 | vchan_free_chan_resources(to_virt_chan(c)); | 497 | vchan_free_chan_resources(to_virt_chan(c)); |
| @@ -543,7 +534,6 @@ static int jz4740_dma_probe(struct platform_device *pdev) | |||
| 543 | 534 | ||
| 544 | dma_cap_set(DMA_SLAVE, dd->cap_mask); | 535 | dma_cap_set(DMA_SLAVE, dd->cap_mask); |
| 545 | dma_cap_set(DMA_CYCLIC, dd->cap_mask); | 536 | dma_cap_set(DMA_CYCLIC, dd->cap_mask); |
| 546 | dd->device_alloc_chan_resources = jz4740_dma_alloc_chan_resources; | ||
| 547 | dd->device_free_chan_resources = jz4740_dma_free_chan_resources; | 537 | dd->device_free_chan_resources = jz4740_dma_free_chan_resources; |
| 548 | dd->device_tx_status = jz4740_dma_tx_status; | 538 | dd->device_tx_status = jz4740_dma_tx_status; |
| 549 | dd->device_issue_pending = jz4740_dma_issue_pending; | 539 | dd->device_issue_pending = jz4740_dma_issue_pending; |
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c new file mode 100644 index 000000000000..26d2f0e09ea3 --- /dev/null +++ b/drivers/dma/dma-jz4780.c | |||
| @@ -0,0 +1,877 @@ | |||
| 1 | /* | ||
| 2 | * Ingenic JZ4780 DMA controller | ||
| 3 | * | ||
| 4 | * Copyright (c) 2015 Imagination Technologies | ||
| 5 | * Author: Alex Smith <alex@alex-smith.me.uk> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify it | ||
| 8 | * under the terms of the GNU General Public License as published by the | ||
| 9 | * Free Software Foundation; either version 2 of the License, or (at your | ||
| 10 | * option) any later version. | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <linux/clk.h> | ||
| 14 | #include <linux/dmapool.h> | ||
| 15 | #include <linux/init.h> | ||
| 16 | #include <linux/interrupt.h> | ||
| 17 | #include <linux/module.h> | ||
| 18 | #include <linux/of.h> | ||
| 19 | #include <linux/of_dma.h> | ||
| 20 | #include <linux/platform_device.h> | ||
| 21 | #include <linux/slab.h> | ||
| 22 | |||
| 23 | #include "dmaengine.h" | ||
| 24 | #include "virt-dma.h" | ||
| 25 | |||
| 26 | #define JZ_DMA_NR_CHANNELS 32 | ||
| 27 | |||
| 28 | /* Global registers. */ | ||
| 29 | #define JZ_DMA_REG_DMAC 0x1000 | ||
| 30 | #define JZ_DMA_REG_DIRQP 0x1004 | ||
| 31 | #define JZ_DMA_REG_DDR 0x1008 | ||
| 32 | #define JZ_DMA_REG_DDRS 0x100c | ||
| 33 | #define JZ_DMA_REG_DMACP 0x101c | ||
| 34 | #define JZ_DMA_REG_DSIRQP 0x1020 | ||
| 35 | #define JZ_DMA_REG_DSIRQM 0x1024 | ||
| 36 | #define JZ_DMA_REG_DCIRQP 0x1028 | ||
| 37 | #define JZ_DMA_REG_DCIRQM 0x102c | ||
| 38 | |||
| 39 | /* Per-channel registers. */ | ||
| 40 | #define JZ_DMA_REG_CHAN(n) (n * 0x20) | ||
| 41 | #define JZ_DMA_REG_DSA(n) (0x00 + JZ_DMA_REG_CHAN(n)) | ||
| 42 | #define JZ_DMA_REG_DTA(n) (0x04 + JZ_DMA_REG_CHAN(n)) | ||
| 43 | #define JZ_DMA_REG_DTC(n) (0x08 + JZ_DMA_REG_CHAN(n)) | ||
| 44 | #define JZ_DMA_REG_DRT(n) (0x0c + JZ_DMA_REG_CHAN(n)) | ||
| 45 | #define JZ_DMA_REG_DCS(n) (0x10 + JZ_DMA_REG_CHAN(n)) | ||
| 46 | #define JZ_DMA_REG_DCM(n) (0x14 + JZ_DMA_REG_CHAN(n)) | ||
| 47 | #define JZ_DMA_REG_DDA(n) (0x18 + JZ_DMA_REG_CHAN(n)) | ||
| 48 | #define JZ_DMA_REG_DSD(n) (0x1c + JZ_DMA_REG_CHAN(n)) | ||
| 49 | |||
| 50 | #define JZ_DMA_DMAC_DMAE BIT(0) | ||
| 51 | #define JZ_DMA_DMAC_AR BIT(2) | ||
| 52 | #define JZ_DMA_DMAC_HLT BIT(3) | ||
| 53 | #define JZ_DMA_DMAC_FMSC BIT(31) | ||
| 54 | |||
| 55 | #define JZ_DMA_DRT_AUTO 0x8 | ||
| 56 | |||
| 57 | #define JZ_DMA_DCS_CTE BIT(0) | ||
| 58 | #define JZ_DMA_DCS_HLT BIT(2) | ||
| 59 | #define JZ_DMA_DCS_TT BIT(3) | ||
| 60 | #define JZ_DMA_DCS_AR BIT(4) | ||
| 61 | #define JZ_DMA_DCS_DES8 BIT(30) | ||
| 62 | |||
| 63 | #define JZ_DMA_DCM_LINK BIT(0) | ||
| 64 | #define JZ_DMA_DCM_TIE BIT(1) | ||
| 65 | #define JZ_DMA_DCM_STDE BIT(2) | ||
| 66 | #define JZ_DMA_DCM_TSZ_SHIFT 8 | ||
| 67 | #define JZ_DMA_DCM_TSZ_MASK (0x7 << JZ_DMA_DCM_TSZ_SHIFT) | ||
| 68 | #define JZ_DMA_DCM_DP_SHIFT 12 | ||
| 69 | #define JZ_DMA_DCM_SP_SHIFT 14 | ||
| 70 | #define JZ_DMA_DCM_DAI BIT(22) | ||
| 71 | #define JZ_DMA_DCM_SAI BIT(23) | ||
| 72 | |||
| 73 | #define JZ_DMA_SIZE_4_BYTE 0x0 | ||
| 74 | #define JZ_DMA_SIZE_1_BYTE 0x1 | ||
| 75 | #define JZ_DMA_SIZE_2_BYTE 0x2 | ||
| 76 | #define JZ_DMA_SIZE_16_BYTE 0x3 | ||
| 77 | #define JZ_DMA_SIZE_32_BYTE 0x4 | ||
| 78 | #define JZ_DMA_SIZE_64_BYTE 0x5 | ||
| 79 | #define JZ_DMA_SIZE_128_BYTE 0x6 | ||
| 80 | |||
| 81 | #define JZ_DMA_WIDTH_32_BIT 0x0 | ||
| 82 | #define JZ_DMA_WIDTH_8_BIT 0x1 | ||
| 83 | #define JZ_DMA_WIDTH_16_BIT 0x2 | ||
| 84 | |||
| 85 | #define JZ_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | ||
| 86 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | ||
| 87 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) | ||
| 88 | |||
| 89 | /** | ||
| 90 | * struct jz4780_dma_hwdesc - descriptor structure read by the DMA controller. | ||
| 91 | * @dcm: value for the DCM (channel command) register | ||
| 92 | * @dsa: source address | ||
| 93 | * @dta: target address | ||
| 94 | * @dtc: transfer count (number of blocks of the transfer size specified in DCM | ||
| 95 | * to transfer) in the low 24 bits, offset of the next descriptor from the | ||
| 96 | * descriptor base address in the upper 8 bits. | ||
| 97 | * @sd: target/source stride difference (in stride transfer mode). | ||
| 98 | * @drt: request type | ||
| 99 | */ | ||
| 100 | struct jz4780_dma_hwdesc { | ||
| 101 | uint32_t dcm; | ||
| 102 | uint32_t dsa; | ||
| 103 | uint32_t dta; | ||
| 104 | uint32_t dtc; | ||
| 105 | uint32_t sd; | ||
| 106 | uint32_t drt; | ||
| 107 | uint32_t reserved[2]; | ||
| 108 | }; | ||
| 109 | |||
| 110 | /* Size of allocations for hardware descriptor blocks. */ | ||
| 111 | #define JZ_DMA_DESC_BLOCK_SIZE PAGE_SIZE | ||
| 112 | #define JZ_DMA_MAX_DESC \ | ||
| 113 | (JZ_DMA_DESC_BLOCK_SIZE / sizeof(struct jz4780_dma_hwdesc)) | ||
| 114 | |||
| 115 | struct jz4780_dma_desc { | ||
| 116 | struct virt_dma_desc vdesc; | ||
| 117 | |||
| 118 | struct jz4780_dma_hwdesc *desc; | ||
| 119 | dma_addr_t desc_phys; | ||
| 120 | unsigned int count; | ||
| 121 | enum dma_transaction_type type; | ||
| 122 | uint32_t status; | ||
| 123 | }; | ||
| 124 | |||
| 125 | struct jz4780_dma_chan { | ||
| 126 | struct virt_dma_chan vchan; | ||
| 127 | unsigned int id; | ||
| 128 | struct dma_pool *desc_pool; | ||
| 129 | |||
| 130 | uint32_t transfer_type; | ||
| 131 | uint32_t transfer_shift; | ||
| 132 | struct dma_slave_config config; | ||
| 133 | |||
| 134 | struct jz4780_dma_desc *desc; | ||
| 135 | unsigned int curr_hwdesc; | ||
| 136 | }; | ||
| 137 | |||
| 138 | struct jz4780_dma_dev { | ||
| 139 | struct dma_device dma_device; | ||
| 140 | void __iomem *base; | ||
| 141 | struct clk *clk; | ||
| 142 | unsigned int irq; | ||
| 143 | |||
| 144 | uint32_t chan_reserved; | ||
| 145 | struct jz4780_dma_chan chan[JZ_DMA_NR_CHANNELS]; | ||
| 146 | }; | ||
| 147 | |||
| 148 | struct jz4780_dma_data { | ||
| 149 | uint32_t transfer_type; | ||
| 150 | int channel; | ||
| 151 | }; | ||
| 152 | |||
| 153 | static inline struct jz4780_dma_chan *to_jz4780_dma_chan(struct dma_chan *chan) | ||
| 154 | { | ||
| 155 | return container_of(chan, struct jz4780_dma_chan, vchan.chan); | ||
| 156 | } | ||
| 157 | |||
| 158 | static inline struct jz4780_dma_desc *to_jz4780_dma_desc( | ||
| 159 | struct virt_dma_desc *vdesc) | ||
| 160 | { | ||
| 161 | return container_of(vdesc, struct jz4780_dma_desc, vdesc); | ||
| 162 | } | ||
| 163 | |||
| 164 | static inline struct jz4780_dma_dev *jz4780_dma_chan_parent( | ||
| 165 | struct jz4780_dma_chan *jzchan) | ||
| 166 | { | ||
| 167 | return container_of(jzchan->vchan.chan.device, struct jz4780_dma_dev, | ||
| 168 | dma_device); | ||
| 169 | } | ||
| 170 | |||
| 171 | static inline uint32_t jz4780_dma_readl(struct jz4780_dma_dev *jzdma, | ||
| 172 | unsigned int reg) | ||
| 173 | { | ||
| 174 | return readl(jzdma->base + reg); | ||
| 175 | } | ||
| 176 | |||
| 177 | static inline void jz4780_dma_writel(struct jz4780_dma_dev *jzdma, | ||
| 178 | unsigned int reg, uint32_t val) | ||
| 179 | { | ||
| 180 | writel(val, jzdma->base + reg); | ||
| 181 | } | ||
| 182 | |||
| 183 | static struct jz4780_dma_desc *jz4780_dma_desc_alloc( | ||
| 184 | struct jz4780_dma_chan *jzchan, unsigned int count, | ||
| 185 | enum dma_transaction_type type) | ||
| 186 | { | ||
| 187 | struct jz4780_dma_desc *desc; | ||
| 188 | |||
| 189 | if (count > JZ_DMA_MAX_DESC) | ||
| 190 | return NULL; | ||
| 191 | |||
| 192 | desc = kzalloc(sizeof(*desc), GFP_NOWAIT); | ||
| 193 | if (!desc) | ||
| 194 | return NULL; | ||
| 195 | |||
| 196 | desc->desc = dma_pool_alloc(jzchan->desc_pool, GFP_NOWAIT, | ||
| 197 | &desc->desc_phys); | ||
| 198 | if (!desc->desc) { | ||
| 199 | kfree(desc); | ||
| 200 | return NULL; | ||
| 201 | } | ||
| 202 | |||
| 203 | desc->count = count; | ||
| 204 | desc->type = type; | ||
| 205 | return desc; | ||
| 206 | } | ||
| 207 | |||
| 208 | static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc) | ||
| 209 | { | ||
| 210 | struct jz4780_dma_desc *desc = to_jz4780_dma_desc(vdesc); | ||
| 211 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(vdesc->tx.chan); | ||
| 212 | |||
| 213 | dma_pool_free(jzchan->desc_pool, desc->desc, desc->desc_phys); | ||
| 214 | kfree(desc); | ||
| 215 | } | ||
| 216 | |||
| 217 | static uint32_t jz4780_dma_transfer_size(unsigned long val, int *ord) | ||
| 218 | { | ||
| 219 | *ord = ffs(val) - 1; | ||
| 220 | |||
| 221 | switch (*ord) { | ||
| 222 | case 0: | ||
| 223 | return JZ_DMA_SIZE_1_BYTE; | ||
| 224 | case 1: | ||
| 225 | return JZ_DMA_SIZE_2_BYTE; | ||
| 226 | case 2: | ||
| 227 | return JZ_DMA_SIZE_4_BYTE; | ||
| 228 | case 4: | ||
| 229 | return JZ_DMA_SIZE_16_BYTE; | ||
| 230 | case 5: | ||
| 231 | return JZ_DMA_SIZE_32_BYTE; | ||
| 232 | case 6: | ||
| 233 | return JZ_DMA_SIZE_64_BYTE; | ||
| 234 | case 7: | ||
| 235 | return JZ_DMA_SIZE_128_BYTE; | ||
| 236 | default: | ||
| 237 | return -EINVAL; | ||
| 238 | } | ||
| 239 | } | ||
| 240 | |||
| 241 | static uint32_t jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan, | ||
| 242 | struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len, | ||
| 243 | enum dma_transfer_direction direction) | ||
| 244 | { | ||
| 245 | struct dma_slave_config *config = &jzchan->config; | ||
| 246 | uint32_t width, maxburst, tsz; | ||
| 247 | int ord; | ||
| 248 | |||
| 249 | if (direction == DMA_MEM_TO_DEV) { | ||
| 250 | desc->dcm = JZ_DMA_DCM_SAI; | ||
| 251 | desc->dsa = addr; | ||
| 252 | desc->dta = config->dst_addr; | ||
| 253 | desc->drt = jzchan->transfer_type; | ||
| 254 | |||
| 255 | width = config->dst_addr_width; | ||
| 256 | maxburst = config->dst_maxburst; | ||
| 257 | } else { | ||
| 258 | desc->dcm = JZ_DMA_DCM_DAI; | ||
| 259 | desc->dsa = config->src_addr; | ||
| 260 | desc->dta = addr; | ||
| 261 | desc->drt = jzchan->transfer_type; | ||
| 262 | |||
| 263 | width = config->src_addr_width; | ||
| 264 | maxburst = config->src_maxburst; | ||
| 265 | } | ||
| 266 | |||
| 267 | /* | ||
| 268 | * This calculates the maximum transfer size that can be used with the | ||
| 269 | * given address, length, width and maximum burst size. The address | ||
| 270 | * must be aligned to the transfer size, the total length must be | ||
| 271 | * divisible by the transfer size, and we must not use more than the | ||
| 272 | * maximum burst specified by the user. | ||
| 273 | */ | ||
| 274 | tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst), &ord); | ||
| 275 | jzchan->transfer_shift = ord; | ||
| 276 | |||
| 277 | switch (width) { | ||
| 278 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
| 279 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
| 280 | break; | ||
| 281 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
| 282 | width = JZ_DMA_WIDTH_32_BIT; | ||
| 283 | break; | ||
| 284 | default: | ||
| 285 | return -EINVAL; | ||
| 286 | } | ||
| 287 | |||
| 288 | desc->dcm |= tsz << JZ_DMA_DCM_TSZ_SHIFT; | ||
| 289 | desc->dcm |= width << JZ_DMA_DCM_SP_SHIFT; | ||
| 290 | desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT; | ||
| 291 | |||
| 292 | desc->dtc = len >> ord; | ||
| 293 | } | ||
| 294 | |||
| 295 | static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg( | ||
| 296 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | ||
| 297 | enum dma_transfer_direction direction, unsigned long flags) | ||
| 298 | { | ||
| 299 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | ||
| 300 | struct jz4780_dma_desc *desc; | ||
| 301 | unsigned int i; | ||
| 302 | int err; | ||
| 303 | |||
| 304 | desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE); | ||
| 305 | if (!desc) | ||
| 306 | return NULL; | ||
| 307 | |||
| 308 | for (i = 0; i < sg_len; i++) { | ||
| 309 | err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], | ||
| 310 | sg_dma_address(&sgl[i]), | ||
| 311 | sg_dma_len(&sgl[i]), | ||
| 312 | direction); | ||
| 313 | if (err < 0) | ||
| 314 | return ERR_PTR(err); | ||
| 315 | |||
| 316 | |||
| 317 | desc->desc[i].dcm |= JZ_DMA_DCM_TIE; | ||
| 318 | |||
| 319 | if (i != (sg_len - 1)) { | ||
| 320 | /* Automatically proceeed to the next descriptor. */ | ||
| 321 | desc->desc[i].dcm |= JZ_DMA_DCM_LINK; | ||
| 322 | |||
| 323 | /* | ||
| 324 | * The upper 8 bits of the DTC field in the descriptor | ||
| 325 | * must be set to (offset from descriptor base of next | ||
| 326 | * descriptor >> 4). | ||
| 327 | */ | ||
| 328 | desc->desc[i].dtc |= | ||
| 329 | (((i + 1) * sizeof(*desc->desc)) >> 4) << 24; | ||
| 330 | } | ||
| 331 | } | ||
| 332 | |||
| 333 | return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags); | ||
| 334 | } | ||
| 335 | |||
| 336 | static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic( | ||
| 337 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | ||
| 338 | size_t period_len, enum dma_transfer_direction direction, | ||
| 339 | unsigned long flags) | ||
| 340 | { | ||
| 341 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | ||
| 342 | struct jz4780_dma_desc *desc; | ||
| 343 | unsigned int periods, i; | ||
| 344 | int err; | ||
| 345 | |||
| 346 | if (buf_len % period_len) | ||
| 347 | return NULL; | ||
| 348 | |||
| 349 | periods = buf_len / period_len; | ||
| 350 | |||
| 351 | desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC); | ||
| 352 | if (!desc) | ||
| 353 | return NULL; | ||
| 354 | |||
| 355 | for (i = 0; i < periods; i++) { | ||
| 356 | err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr, | ||
| 357 | period_len, direction); | ||
| 358 | if (err < 0) | ||
| 359 | return ERR_PTR(err); | ||
| 360 | |||
| 361 | buf_addr += period_len; | ||
| 362 | |||
| 363 | /* | ||
| 364 | * Set the link bit to indicate that the controller should | ||
| 365 | * automatically proceed to the next descriptor. In | ||
| 366 | * jz4780_dma_begin(), this will be cleared if we need to issue | ||
| 367 | * an interrupt after each period. | ||
| 368 | */ | ||
| 369 | desc->desc[i].dcm |= JZ_DMA_DCM_TIE | JZ_DMA_DCM_LINK; | ||
| 370 | |||
| 371 | /* | ||
| 372 | * The upper 8 bits of the DTC field in the descriptor must be | ||
| 373 | * set to (offset from descriptor base of next descriptor >> 4). | ||
| 374 | * If this is the last descriptor, link it back to the first, | ||
| 375 | * i.e. leave offset set to 0, otherwise point to the next one. | ||
| 376 | */ | ||
| 377 | if (i != (periods - 1)) { | ||
| 378 | desc->desc[i].dtc |= | ||
| 379 | (((i + 1) * sizeof(*desc->desc)) >> 4) << 24; | ||
| 380 | } | ||
| 381 | } | ||
| 382 | |||
| 383 | return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags); | ||
| 384 | } | ||
| 385 | |||
| 386 | struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy( | ||
| 387 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | ||
| 388 | size_t len, unsigned long flags) | ||
| 389 | { | ||
| 390 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | ||
| 391 | struct jz4780_dma_desc *desc; | ||
| 392 | uint32_t tsz; | ||
| 393 | int ord; | ||
| 394 | |||
| 395 | desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY); | ||
| 396 | if (!desc) | ||
| 397 | return NULL; | ||
| 398 | |||
| 399 | tsz = jz4780_dma_transfer_size(dest | src | len, &ord); | ||
| 400 | if (tsz < 0) | ||
| 401 | return ERR_PTR(tsz); | ||
| 402 | |||
| 403 | desc->desc[0].dsa = src; | ||
| 404 | desc->desc[0].dta = dest; | ||
| 405 | desc->desc[0].drt = JZ_DMA_DRT_AUTO; | ||
| 406 | desc->desc[0].dcm = JZ_DMA_DCM_TIE | JZ_DMA_DCM_SAI | JZ_DMA_DCM_DAI | | ||
| 407 | tsz << JZ_DMA_DCM_TSZ_SHIFT | | ||
| 408 | JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT | | ||
| 409 | JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_DP_SHIFT; | ||
| 410 | desc->desc[0].dtc = len >> ord; | ||
| 411 | |||
| 412 | return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags); | ||
| 413 | } | ||
| 414 | |||
| 415 | static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan) | ||
| 416 | { | ||
| 417 | struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); | ||
| 418 | struct virt_dma_desc *vdesc; | ||
| 419 | unsigned int i; | ||
| 420 | dma_addr_t desc_phys; | ||
| 421 | |||
| 422 | if (!jzchan->desc) { | ||
| 423 | vdesc = vchan_next_desc(&jzchan->vchan); | ||
| 424 | if (!vdesc) | ||
| 425 | return; | ||
| 426 | |||
| 427 | list_del(&vdesc->node); | ||
| 428 | |||
| 429 | jzchan->desc = to_jz4780_dma_desc(vdesc); | ||
| 430 | jzchan->curr_hwdesc = 0; | ||
| 431 | |||
| 432 | if (jzchan->desc->type == DMA_CYCLIC && vdesc->tx.callback) { | ||
| 433 | /* | ||
| 434 | * The DMA controller doesn't support triggering an | ||
| 435 | * interrupt after processing each descriptor, only | ||
| 436 | * after processing an entire terminated list of | ||
| 437 | * descriptors. For a cyclic DMA setup the list of | ||
| 438 | * descriptors is not terminated so we can never get an | ||
| 439 | * interrupt. | ||
| 440 | * | ||
| 441 | * If the user requested a callback for a cyclic DMA | ||
| 442 | * setup then we workaround this hardware limitation | ||
| 443 | * here by degrading to a set of unlinked descriptors | ||
| 444 | * which we will submit in sequence in response to the | ||
| 445 | * completion of processing the previous descriptor. | ||
| 446 | */ | ||
| 447 | for (i = 0; i < jzchan->desc->count; i++) | ||
| 448 | jzchan->desc->desc[i].dcm &= ~JZ_DMA_DCM_LINK; | ||
| 449 | } | ||
| 450 | } else { | ||
| 451 | /* | ||
| 452 | * There is an existing transfer, therefore this must be one | ||
| 453 | * for which we unlinked the descriptors above. Advance to the | ||
| 454 | * next one in the list. | ||
| 455 | */ | ||
| 456 | jzchan->curr_hwdesc = | ||
| 457 | (jzchan->curr_hwdesc + 1) % jzchan->desc->count; | ||
| 458 | } | ||
| 459 | |||
| 460 | /* Use 8-word descriptors. */ | ||
| 461 | jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), JZ_DMA_DCS_DES8); | ||
| 462 | |||
| 463 | /* Write descriptor address and initiate descriptor fetch. */ | ||
| 464 | desc_phys = jzchan->desc->desc_phys + | ||
| 465 | (jzchan->curr_hwdesc * sizeof(*jzchan->desc->desc)); | ||
| 466 | jz4780_dma_writel(jzdma, JZ_DMA_REG_DDA(jzchan->id), desc_phys); | ||
| 467 | jz4780_dma_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id)); | ||
| 468 | |||
| 469 | /* Enable the channel. */ | ||
| 470 | jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), | ||
| 471 | JZ_DMA_DCS_DES8 | JZ_DMA_DCS_CTE); | ||
| 472 | } | ||
| 473 | |||
| 474 | static void jz4780_dma_issue_pending(struct dma_chan *chan) | ||
| 475 | { | ||
| 476 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | ||
| 477 | unsigned long flags; | ||
| 478 | |||
| 479 | spin_lock_irqsave(&jzchan->vchan.lock, flags); | ||
| 480 | |||
| 481 | if (vchan_issue_pending(&jzchan->vchan) && !jzchan->desc) | ||
| 482 | jz4780_dma_begin(jzchan); | ||
| 483 | |||
| 484 | spin_unlock_irqrestore(&jzchan->vchan.lock, flags); | ||
| 485 | } | ||
| 486 | |||
| 487 | static int jz4780_dma_terminate_all(struct jz4780_dma_chan *jzchan) | ||
| 488 | { | ||
| 489 | struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); | ||
| 490 | unsigned long flags; | ||
| 491 | LIST_HEAD(head); | ||
| 492 | |||
| 493 | spin_lock_irqsave(&jzchan->vchan.lock, flags); | ||
| 494 | |||
| 495 | /* Clear the DMA status and stop the transfer. */ | ||
| 496 | jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0); | ||
| 497 | if (jzchan->desc) { | ||
| 498 | jz4780_dma_desc_free(&jzchan->desc->vdesc); | ||
| 499 | jzchan->desc = NULL; | ||
| 500 | } | ||
| 501 | |||
| 502 | vchan_get_all_descriptors(&jzchan->vchan, &head); | ||
| 503 | |||
| 504 | spin_unlock_irqrestore(&jzchan->vchan.lock, flags); | ||
| 505 | |||
| 506 | vchan_dma_desc_free_list(&jzchan->vchan, &head); | ||
| 507 | return 0; | ||
| 508 | } | ||
| 509 | |||
| 510 | static int jz4780_dma_slave_config(struct jz4780_dma_chan *jzchan, | ||
| 511 | const struct dma_slave_config *config) | ||
| 512 | { | ||
| 513 | if ((config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) | ||
| 514 | || (config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)) | ||
| 515 | return -EINVAL; | ||
| 516 | |||
| 517 | /* Copy the reset of the slave configuration, it is used later. */ | ||
| 518 | memcpy(&jzchan->config, config, sizeof(jzchan->config)); | ||
| 519 | |||
| 520 | return 0; | ||
| 521 | } | ||
| 522 | |||
| 523 | static size_t jz4780_dma_desc_residue(struct jz4780_dma_chan *jzchan, | ||
| 524 | struct jz4780_dma_desc *desc, unsigned int next_sg) | ||
| 525 | { | ||
| 526 | struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); | ||
| 527 | unsigned int residue, count; | ||
| 528 | unsigned int i; | ||
| 529 | |||
| 530 | residue = 0; | ||
| 531 | |||
| 532 | for (i = next_sg; i < desc->count; i++) | ||
| 533 | residue += desc->desc[i].dtc << jzchan->transfer_shift; | ||
| 534 | |||
| 535 | if (next_sg != 0) { | ||
| 536 | count = jz4780_dma_readl(jzdma, | ||
| 537 | JZ_DMA_REG_DTC(jzchan->id)); | ||
| 538 | residue += count << jzchan->transfer_shift; | ||
| 539 | } | ||
| 540 | |||
| 541 | return residue; | ||
| 542 | } | ||
| 543 | |||
| 544 | static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan, | ||
| 545 | dma_cookie_t cookie, struct dma_tx_state *txstate) | ||
| 546 | { | ||
| 547 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | ||
| 548 | struct virt_dma_desc *vdesc; | ||
| 549 | enum dma_status status; | ||
| 550 | unsigned long flags; | ||
| 551 | |||
| 552 | status = dma_cookie_status(chan, cookie, txstate); | ||
| 553 | if ((status == DMA_COMPLETE) || (txstate == NULL)) | ||
| 554 | return status; | ||
| 555 | |||
| 556 | spin_lock_irqsave(&jzchan->vchan.lock, flags); | ||
| 557 | |||
| 558 | vdesc = vchan_find_desc(&jzchan->vchan, cookie); | ||
| 559 | if (vdesc) { | ||
| 560 | /* On the issued list, so hasn't been processed yet */ | ||
| 561 | txstate->residue = jz4780_dma_desc_residue(jzchan, | ||
| 562 | to_jz4780_dma_desc(vdesc), 0); | ||
| 563 | } else if (cookie == jzchan->desc->vdesc.tx.cookie) { | ||
| 564 | txstate->residue = jz4780_dma_desc_residue(jzchan, jzchan->desc, | ||
| 565 | (jzchan->curr_hwdesc + 1) % jzchan->desc->count); | ||
| 566 | } else | ||
| 567 | txstate->residue = 0; | ||
| 568 | |||
| 569 | if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc | ||
| 570 | && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) | ||
| 571 | status = DMA_ERROR; | ||
| 572 | |||
| 573 | spin_unlock_irqrestore(&jzchan->vchan.lock, flags); | ||
| 574 | return status; | ||
| 575 | } | ||
| 576 | |||
| 577 | static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma, | ||
| 578 | struct jz4780_dma_chan *jzchan) | ||
| 579 | { | ||
| 580 | uint32_t dcs; | ||
| 581 | |||
| 582 | spin_lock(&jzchan->vchan.lock); | ||
| 583 | |||
| 584 | dcs = jz4780_dma_readl(jzdma, JZ_DMA_REG_DCS(jzchan->id)); | ||
| 585 | jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0); | ||
| 586 | |||
| 587 | if (dcs & JZ_DMA_DCS_AR) { | ||
| 588 | dev_warn(&jzchan->vchan.chan.dev->device, | ||
| 589 | "address error (DCS=0x%x)\n", dcs); | ||
| 590 | } | ||
| 591 | |||
| 592 | if (dcs & JZ_DMA_DCS_HLT) { | ||
| 593 | dev_warn(&jzchan->vchan.chan.dev->device, | ||
| 594 | "channel halt (DCS=0x%x)\n", dcs); | ||
| 595 | } | ||
| 596 | |||
| 597 | if (jzchan->desc) { | ||
| 598 | jzchan->desc->status = dcs; | ||
| 599 | |||
| 600 | if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) { | ||
| 601 | if (jzchan->desc->type == DMA_CYCLIC) { | ||
| 602 | vchan_cyclic_callback(&jzchan->desc->vdesc); | ||
| 603 | } else { | ||
| 604 | vchan_cookie_complete(&jzchan->desc->vdesc); | ||
| 605 | jzchan->desc = NULL; | ||
| 606 | } | ||
| 607 | |||
| 608 | jz4780_dma_begin(jzchan); | ||
| 609 | } | ||
| 610 | } else { | ||
| 611 | dev_err(&jzchan->vchan.chan.dev->device, | ||
| 612 | "channel IRQ with no active transfer\n"); | ||
| 613 | } | ||
| 614 | |||
| 615 | spin_unlock(&jzchan->vchan.lock); | ||
| 616 | } | ||
| 617 | |||
| 618 | static irqreturn_t jz4780_dma_irq_handler(int irq, void *data) | ||
| 619 | { | ||
| 620 | struct jz4780_dma_dev *jzdma = data; | ||
| 621 | uint32_t pending, dmac; | ||
| 622 | int i; | ||
| 623 | |||
| 624 | pending = jz4780_dma_readl(jzdma, JZ_DMA_REG_DIRQP); | ||
| 625 | |||
| 626 | for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) { | ||
| 627 | if (!(pending & (1<<i))) | ||
| 628 | continue; | ||
| 629 | |||
| 630 | jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]); | ||
| 631 | } | ||
| 632 | |||
| 633 | /* Clear halt and address error status of all channels. */ | ||
| 634 | dmac = jz4780_dma_readl(jzdma, JZ_DMA_REG_DMAC); | ||
| 635 | dmac &= ~(JZ_DMA_DMAC_HLT | JZ_DMA_DMAC_AR); | ||
| 636 | jz4780_dma_writel(jzdma, JZ_DMA_REG_DMAC, dmac); | ||
| 637 | |||
| 638 | /* Clear interrupt pending status. */ | ||
| 639 | jz4780_dma_writel(jzdma, JZ_DMA_REG_DIRQP, 0); | ||
| 640 | |||
| 641 | return IRQ_HANDLED; | ||
| 642 | } | ||
| 643 | |||
| 644 | static int jz4780_dma_alloc_chan_resources(struct dma_chan *chan) | ||
| 645 | { | ||
| 646 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | ||
| 647 | |||
| 648 | jzchan->desc_pool = dma_pool_create(dev_name(&chan->dev->device), | ||
| 649 | chan->device->dev, | ||
| 650 | JZ_DMA_DESC_BLOCK_SIZE, | ||
| 651 | PAGE_SIZE, 0); | ||
| 652 | if (!jzchan->desc_pool) { | ||
| 653 | dev_err(&chan->dev->device, | ||
| 654 | "failed to allocate descriptor pool\n"); | ||
| 655 | return -ENOMEM; | ||
| 656 | } | ||
| 657 | |||
| 658 | return 0; | ||
| 659 | } | ||
| 660 | |||
| 661 | static void jz4780_dma_free_chan_resources(struct dma_chan *chan) | ||
| 662 | { | ||
| 663 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | ||
| 664 | |||
| 665 | vchan_free_chan_resources(&jzchan->vchan); | ||
| 666 | dma_pool_destroy(jzchan->desc_pool); | ||
| 667 | jzchan->desc_pool = NULL; | ||
| 668 | } | ||
| 669 | |||
| 670 | static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param) | ||
| 671 | { | ||
| 672 | struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); | ||
| 673 | struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan); | ||
| 674 | struct jz4780_dma_data *data = param; | ||
| 675 | |||
| 676 | if (data->channel > -1) { | ||
| 677 | if (data->channel != jzchan->id) | ||
| 678 | return false; | ||
| 679 | } else if (jzdma->chan_reserved & BIT(jzchan->id)) { | ||
| 680 | return false; | ||
| 681 | } | ||
| 682 | |||
| 683 | jzchan->transfer_type = data->transfer_type; | ||
| 684 | |||
| 685 | return true; | ||
| 686 | } | ||
| 687 | |||
| 688 | static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec, | ||
| 689 | struct of_dma *ofdma) | ||
| 690 | { | ||
| 691 | struct jz4780_dma_dev *jzdma = ofdma->of_dma_data; | ||
| 692 | dma_cap_mask_t mask = jzdma->dma_device.cap_mask; | ||
| 693 | struct jz4780_dma_data data; | ||
| 694 | |||
| 695 | if (dma_spec->args_count != 2) | ||
| 696 | return NULL; | ||
| 697 | |||
| 698 | data.transfer_type = dma_spec->args[0]; | ||
| 699 | data.channel = dma_spec->args[1]; | ||
| 700 | |||
| 701 | if (data.channel > -1) { | ||
| 702 | if (data.channel >= JZ_DMA_NR_CHANNELS) { | ||
| 703 | dev_err(jzdma->dma_device.dev, | ||
| 704 | "device requested non-existent channel %u\n", | ||
| 705 | data.channel); | ||
| 706 | return NULL; | ||
| 707 | } | ||
| 708 | |||
| 709 | /* Can only select a channel marked as reserved. */ | ||
| 710 | if (!(jzdma->chan_reserved & BIT(data.channel))) { | ||
| 711 | dev_err(jzdma->dma_device.dev, | ||
| 712 | "device requested unreserved channel %u\n", | ||
| 713 | data.channel); | ||
| 714 | return NULL; | ||
| 715 | } | ||
| 716 | } | ||
| 717 | |||
| 718 | return dma_request_channel(mask, jz4780_dma_filter_fn, &data); | ||
| 719 | } | ||
| 720 | |||
| 721 | static int jz4780_dma_probe(struct platform_device *pdev) | ||
| 722 | { | ||
| 723 | struct device *dev = &pdev->dev; | ||
| 724 | struct jz4780_dma_dev *jzdma; | ||
| 725 | struct jz4780_dma_chan *jzchan; | ||
| 726 | struct dma_device *dd; | ||
| 727 | struct resource *res; | ||
| 728 | int i, ret; | ||
| 729 | |||
| 730 | jzdma = devm_kzalloc(dev, sizeof(*jzdma), GFP_KERNEL); | ||
| 731 | if (!jzdma) | ||
| 732 | return -ENOMEM; | ||
| 733 | |||
| 734 | platform_set_drvdata(pdev, jzdma); | ||
| 735 | |||
| 736 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 737 | if (!res) { | ||
| 738 | dev_err(dev, "failed to get I/O memory\n"); | ||
| 739 | return -EINVAL; | ||
| 740 | } | ||
| 741 | |||
| 742 | jzdma->base = devm_ioremap_resource(dev, res); | ||
| 743 | if (IS_ERR(jzdma->base)) | ||
| 744 | return PTR_ERR(jzdma->base); | ||
| 745 | |||
| 746 | jzdma->irq = platform_get_irq(pdev, 0); | ||
| 747 | if (jzdma->irq < 0) { | ||
| 748 | dev_err(dev, "failed to get IRQ: %d\n", ret); | ||
| 749 | return jzdma->irq; | ||
| 750 | } | ||
| 751 | |||
| 752 | ret = devm_request_irq(dev, jzdma->irq, jz4780_dma_irq_handler, 0, | ||
| 753 | dev_name(dev), jzdma); | ||
| 754 | if (ret) { | ||
| 755 | dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq); | ||
| 756 | return -EINVAL; | ||
| 757 | } | ||
| 758 | |||
| 759 | jzdma->clk = devm_clk_get(dev, NULL); | ||
| 760 | if (IS_ERR(jzdma->clk)) { | ||
| 761 | dev_err(dev, "failed to get clock\n"); | ||
| 762 | return PTR_ERR(jzdma->clk); | ||
| 763 | } | ||
| 764 | |||
| 765 | clk_prepare_enable(jzdma->clk); | ||
| 766 | |||
| 767 | /* Property is optional, if it doesn't exist the value will remain 0. */ | ||
| 768 | of_property_read_u32_index(dev->of_node, "ingenic,reserved-channels", | ||
| 769 | 0, &jzdma->chan_reserved); | ||
| 770 | |||
| 771 | dd = &jzdma->dma_device; | ||
| 772 | |||
| 773 | dma_cap_set(DMA_MEMCPY, dd->cap_mask); | ||
| 774 | dma_cap_set(DMA_SLAVE, dd->cap_mask); | ||
| 775 | dma_cap_set(DMA_CYCLIC, dd->cap_mask); | ||
| 776 | |||
| 777 | dd->dev = dev; | ||
| 778 | dd->copy_align = 2; /* 2^2 = 4 byte alignment */ | ||
| 779 | dd->device_alloc_chan_resources = jz4780_dma_alloc_chan_resources; | ||
| 780 | dd->device_free_chan_resources = jz4780_dma_free_chan_resources; | ||
| 781 | dd->device_prep_slave_sg = jz4780_dma_prep_slave_sg; | ||
| 782 | dd->device_prep_dma_cyclic = jz4780_dma_prep_dma_cyclic; | ||
| 783 | dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy; | ||
| 784 | dd->device_config = jz4780_dma_slave_config; | ||
| 785 | dd->device_terminate_all = jz4780_dma_terminate_all; | ||
| 786 | dd->device_tx_status = jz4780_dma_tx_status; | ||
| 787 | dd->device_issue_pending = jz4780_dma_issue_pending; | ||
| 788 | dd->src_addr_widths = JZ_DMA_BUSWIDTHS; | ||
| 789 | dd->dst_addr_widths = JZ_DMA_BUSWIDTHS; | ||
| 790 | dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
| 791 | dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
| 792 | |||
| 793 | |||
| 794 | /* | ||
| 795 | * Enable DMA controller, mark all channels as not programmable. | ||
| 796 | * Also set the FMSC bit - it increases MSC performance, so it makes | ||
| 797 | * little sense not to enable it. | ||
| 798 | */ | ||
| 799 | jz4780_dma_writel(jzdma, JZ_DMA_REG_DMAC, | ||
| 800 | JZ_DMA_DMAC_DMAE | JZ_DMA_DMAC_FMSC); | ||
| 801 | jz4780_dma_writel(jzdma, JZ_DMA_REG_DMACP, 0); | ||
| 802 | |||
| 803 | INIT_LIST_HEAD(&dd->channels); | ||
| 804 | |||
| 805 | for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) { | ||
| 806 | jzchan = &jzdma->chan[i]; | ||
| 807 | jzchan->id = i; | ||
| 808 | |||
| 809 | vchan_init(&jzchan->vchan, dd); | ||
| 810 | jzchan->vchan.desc_free = jz4780_dma_desc_free; | ||
| 811 | } | ||
| 812 | |||
| 813 | ret = dma_async_device_register(dd); | ||
| 814 | if (ret) { | ||
| 815 | dev_err(dev, "failed to register device\n"); | ||
| 816 | goto err_disable_clk; | ||
| 817 | } | ||
| 818 | |||
| 819 | /* Register with OF DMA helpers. */ | ||
| 820 | ret = of_dma_controller_register(dev->of_node, jz4780_of_dma_xlate, | ||
| 821 | jzdma); | ||
| 822 | if (ret) { | ||
| 823 | dev_err(dev, "failed to register OF DMA controller\n"); | ||
| 824 | goto err_unregister_dev; | ||
| 825 | } | ||
| 826 | |||
| 827 | dev_info(dev, "JZ4780 DMA controller initialised\n"); | ||
| 828 | return 0; | ||
| 829 | |||
| 830 | err_unregister_dev: | ||
| 831 | dma_async_device_unregister(dd); | ||
| 832 | |||
| 833 | err_disable_clk: | ||
| 834 | clk_disable_unprepare(jzdma->clk); | ||
| 835 | return ret; | ||
| 836 | } | ||
| 837 | |||
| 838 | static int jz4780_dma_remove(struct platform_device *pdev) | ||
| 839 | { | ||
| 840 | struct jz4780_dma_dev *jzdma = platform_get_drvdata(pdev); | ||
| 841 | |||
| 842 | of_dma_controller_free(pdev->dev.of_node); | ||
| 843 | devm_free_irq(&pdev->dev, jzdma->irq, jzdma); | ||
| 844 | dma_async_device_unregister(&jzdma->dma_device); | ||
| 845 | return 0; | ||
| 846 | } | ||
| 847 | |||
| 848 | static const struct of_device_id jz4780_dma_dt_match[] = { | ||
| 849 | { .compatible = "ingenic,jz4780-dma", .data = NULL }, | ||
| 850 | {}, | ||
| 851 | }; | ||
| 852 | MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match); | ||
| 853 | |||
| 854 | static struct platform_driver jz4780_dma_driver = { | ||
| 855 | .probe = jz4780_dma_probe, | ||
| 856 | .remove = jz4780_dma_remove, | ||
| 857 | .driver = { | ||
| 858 | .name = "jz4780-dma", | ||
| 859 | .of_match_table = of_match_ptr(jz4780_dma_dt_match), | ||
| 860 | }, | ||
| 861 | }; | ||
| 862 | |||
| 863 | static int __init jz4780_dma_init(void) | ||
| 864 | { | ||
| 865 | return platform_driver_register(&jz4780_dma_driver); | ||
| 866 | } | ||
| 867 | subsys_initcall(jz4780_dma_init); | ||
| 868 | |||
| 869 | static void __exit jz4780_dma_exit(void) | ||
| 870 | { | ||
| 871 | platform_driver_unregister(&jz4780_dma_driver); | ||
| 872 | } | ||
| 873 | module_exit(jz4780_dma_exit); | ||
| 874 | |||
| 875 | MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>"); | ||
| 876 | MODULE_DESCRIPTION("Ingenic JZ4780 DMA controller driver"); | ||
| 877 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index ac336a961dea..0e035a8cf401 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
| @@ -11,10 +11,6 @@ | |||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. | 12 | * more details. |
| 13 | * | 13 | * |
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 17 | * | ||
| 18 | * The full GNU General Public License is included in this distribution in the | 14 | * The full GNU General Public License is included in this distribution in the |
| 19 | * file called COPYING. | 15 | * file called COPYING. |
| 20 | */ | 16 | */ |
| @@ -355,20 +351,6 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) | |||
| 355 | } | 351 | } |
| 356 | EXPORT_SYMBOL(dma_find_channel); | 352 | EXPORT_SYMBOL(dma_find_channel); |
| 357 | 353 | ||
| 358 | /* | ||
| 359 | * net_dma_find_channel - find a channel for net_dma | ||
| 360 | * net_dma has alignment requirements | ||
| 361 | */ | ||
| 362 | struct dma_chan *net_dma_find_channel(void) | ||
| 363 | { | ||
| 364 | struct dma_chan *chan = dma_find_channel(DMA_MEMCPY); | ||
| 365 | if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1)) | ||
| 366 | return NULL; | ||
| 367 | |||
| 368 | return chan; | ||
| 369 | } | ||
| 370 | EXPORT_SYMBOL(net_dma_find_channel); | ||
| 371 | |||
| 372 | /** | 354 | /** |
| 373 | * dma_issue_pending_all - flush all pending operations across all channels | 355 | * dma_issue_pending_all - flush all pending operations across all channels |
| 374 | */ | 356 | */ |
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig index dcfe964cc8dc..36e02f0f645e 100644 --- a/drivers/dma/dw/Kconfig +++ b/drivers/dma/dw/Kconfig | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | config DW_DMAC_CORE | 5 | config DW_DMAC_CORE |
| 6 | tristate "Synopsys DesignWare AHB DMA support" | 6 | tristate |
| 7 | select DMA_ENGINE | 7 | select DMA_ENGINE |
| 8 | 8 | ||
| 9 | config DW_DMAC | 9 | config DW_DMAC |
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index a8ad05291b27..1022c2e1a2b0 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c | |||
| @@ -230,7 +230,8 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |||
| 230 | /* ASSERT: channel is idle */ | 230 | /* ASSERT: channel is idle */ |
| 231 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 231 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
| 232 | dev_err(chan2dev(&dwc->chan), | 232 | dev_err(chan2dev(&dwc->chan), |
| 233 | "BUG: Attempted to start non-idle channel\n"); | 233 | "%s: BUG: Attempted to start non-idle channel\n", |
| 234 | __func__); | ||
| 234 | dwc_dump_chan_regs(dwc); | 235 | dwc_dump_chan_regs(dwc); |
| 235 | 236 | ||
| 236 | /* The tasklet will hopefully advance the queue... */ | 237 | /* The tasklet will hopefully advance the queue... */ |
| @@ -814,11 +815,8 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
| 814 | 815 | ||
| 815 | slave_sg_todev_fill_desc: | 816 | slave_sg_todev_fill_desc: |
| 816 | desc = dwc_desc_get(dwc); | 817 | desc = dwc_desc_get(dwc); |
| 817 | if (!desc) { | 818 | if (!desc) |
| 818 | dev_err(chan2dev(chan), | ||
| 819 | "not enough descriptors available\n"); | ||
| 820 | goto err_desc_get; | 819 | goto err_desc_get; |
| 821 | } | ||
| 822 | 820 | ||
| 823 | desc->lli.sar = mem; | 821 | desc->lli.sar = mem; |
| 824 | desc->lli.dar = reg; | 822 | desc->lli.dar = reg; |
| @@ -874,11 +872,8 @@ slave_sg_todev_fill_desc: | |||
| 874 | 872 | ||
| 875 | slave_sg_fromdev_fill_desc: | 873 | slave_sg_fromdev_fill_desc: |
| 876 | desc = dwc_desc_get(dwc); | 874 | desc = dwc_desc_get(dwc); |
| 877 | if (!desc) { | 875 | if (!desc) |
| 878 | dev_err(chan2dev(chan), | ||
| 879 | "not enough descriptors available\n"); | ||
| 880 | goto err_desc_get; | 876 | goto err_desc_get; |
| 881 | } | ||
| 882 | 877 | ||
| 883 | desc->lli.sar = reg; | 878 | desc->lli.sar = reg; |
| 884 | desc->lli.dar = mem; | 879 | desc->lli.dar = mem; |
| @@ -922,6 +917,8 @@ slave_sg_fromdev_fill_desc: | |||
| 922 | return &first->txd; | 917 | return &first->txd; |
| 923 | 918 | ||
| 924 | err_desc_get: | 919 | err_desc_get: |
| 920 | dev_err(chan2dev(chan), | ||
| 921 | "not enough descriptors available. Direction %d\n", direction); | ||
| 925 | dwc_desc_put(dwc, first); | 922 | dwc_desc_put(dwc, first); |
| 926 | return NULL; | 923 | return NULL; |
| 927 | } | 924 | } |
| @@ -1261,7 +1258,8 @@ int dw_dma_cyclic_start(struct dma_chan *chan) | |||
| 1261 | /* Assert channel is idle */ | 1258 | /* Assert channel is idle */ |
| 1262 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 1259 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
| 1263 | dev_err(chan2dev(&dwc->chan), | 1260 | dev_err(chan2dev(&dwc->chan), |
| 1264 | "BUG: Attempted to start non-idle channel\n"); | 1261 | "%s: BUG: Attempted to start non-idle channel\n", |
| 1262 | __func__); | ||
| 1265 | dwc_dump_chan_regs(dwc); | 1263 | dwc_dump_chan_regs(dwc); |
| 1266 | spin_unlock_irqrestore(&dwc->lock, flags); | 1264 | spin_unlock_irqrestore(&dwc->lock, flags); |
| 1267 | return -EBUSY; | 1265 | return -EBUSY; |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 53dbd3b3384c..bf09db7ca9ee 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
| @@ -812,7 +812,7 @@ static int edma_alloc_chan_resources(struct dma_chan *chan) | |||
| 812 | LIST_HEAD(descs); | 812 | LIST_HEAD(descs); |
| 813 | 813 | ||
| 814 | a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback, | 814 | a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback, |
| 815 | chan, EVENTQ_DEFAULT); | 815 | echan, EVENTQ_DEFAULT); |
| 816 | 816 | ||
| 817 | if (a_ch_num < 0) { | 817 | if (a_ch_num < 0) { |
| 818 | ret = -ENODEV; | 818 | ret = -ENODEV; |
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c new file mode 100644 index 000000000000..4d9470f16552 --- /dev/null +++ b/drivers/dma/fsl_raid.c | |||
| @@ -0,0 +1,904 @@ | |||
| 1 | /* | ||
| 2 | * drivers/dma/fsl_raid.c | ||
| 3 | * | ||
| 4 | * Freescale RAID Engine device driver | ||
| 5 | * | ||
| 6 | * Author: | ||
| 7 | * Harninder Rai <harninder.rai@freescale.com> | ||
| 8 | * Naveen Burmi <naveenburmi@freescale.com> | ||
| 9 | * | ||
| 10 | * Rewrite: | ||
| 11 | * Xuelin Shi <xuelin.shi@freescale.com> | ||
| 12 | * | ||
| 13 | * Copyright (c) 2010-2014 Freescale Semiconductor, Inc. | ||
| 14 | * | ||
| 15 | * Redistribution and use in source and binary forms, with or without | ||
| 16 | * modification, are permitted provided that the following conditions are met: | ||
| 17 | * * Redistributions of source code must retain the above copyright | ||
| 18 | * notice, this list of conditions and the following disclaimer. | ||
| 19 | * * Redistributions in binary form must reproduce the above copyright | ||
| 20 | * notice, this list of conditions and the following disclaimer in the | ||
| 21 | * documentation and/or other materials provided with the distribution. | ||
| 22 | * * Neither the name of Freescale Semiconductor nor the | ||
| 23 | * names of its contributors may be used to endorse or promote products | ||
| 24 | * derived from this software without specific prior written permission. | ||
| 25 | * | ||
| 26 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
| 27 | * GNU General Public License ("GPL") as published by the Free Software | ||
| 28 | * Foundation, either version 2 of that License or (at your option) any | ||
| 29 | * later version. | ||
| 30 | * | ||
| 31 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
| 32 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
| 33 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
| 34 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
| 35 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
| 36 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
| 37 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
| 38 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 39 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
| 40 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 41 | * | ||
| 42 | * Theory of operation: | ||
| 43 | * | ||
| 44 | * General capabilities: | ||
| 45 | * RAID Engine (RE) block is capable of offloading XOR, memcpy and P/Q | ||
| 46 | * calculations required in RAID5 and RAID6 operations. RE driver | ||
| 47 | * registers with Linux's ASYNC layer as dma driver. RE hardware | ||
| 48 | * maintains strict ordering of the requests through chained | ||
| 49 | * command queueing. | ||
| 50 | * | ||
| 51 | * Data flow: | ||
| 52 | * Software RAID layer of Linux (MD layer) maintains RAID partitions, | ||
| 53 | * strips, stripes etc. It sends requests to the underlying ASYNC layer | ||
| 54 | * which further passes it to RE driver. ASYNC layer decides which request | ||
| 55 | * goes to which job ring of RE hardware. For every request processed by | ||
| 56 | * RAID Engine, driver gets an interrupt unless coalescing is set. The | ||
| 57 | * per job ring interrupt handler checks the status register for errors, | ||
| 58 | * clears the interrupt and leave the post interrupt processing to the irq | ||
| 59 | * thread. | ||
| 60 | */ | ||
| 61 | #include <linux/interrupt.h> | ||
| 62 | #include <linux/module.h> | ||
| 63 | #include <linux/of_irq.h> | ||
| 64 | #include <linux/of_address.h> | ||
| 65 | #include <linux/of_platform.h> | ||
| 66 | #include <linux/dma-mapping.h> | ||
| 67 | #include <linux/dmapool.h> | ||
| 68 | #include <linux/dmaengine.h> | ||
| 69 | #include <linux/io.h> | ||
| 70 | #include <linux/spinlock.h> | ||
| 71 | #include <linux/slab.h> | ||
| 72 | |||
| 73 | #include "dmaengine.h" | ||
| 74 | #include "fsl_raid.h" | ||
| 75 | |||
| 76 | #define FSL_RE_MAX_XOR_SRCS 16 | ||
| 77 | #define FSL_RE_MAX_PQ_SRCS 16 | ||
| 78 | #define FSL_RE_MIN_DESCS 256 | ||
| 79 | #define FSL_RE_MAX_DESCS (4 * FSL_RE_MIN_DESCS) | ||
| 80 | #define FSL_RE_FRAME_FORMAT 0x1 | ||
| 81 | #define FSL_RE_MAX_DATA_LEN (1024*1024) | ||
| 82 | |||
| 83 | #define to_fsl_re_dma_desc(tx) container_of(tx, struct fsl_re_desc, async_tx) | ||
| 84 | |||
| 85 | /* Add descriptors into per chan software queue - submit_q */ | ||
| 86 | static dma_cookie_t fsl_re_tx_submit(struct dma_async_tx_descriptor *tx) | ||
| 87 | { | ||
| 88 | struct fsl_re_desc *desc; | ||
| 89 | struct fsl_re_chan *re_chan; | ||
| 90 | dma_cookie_t cookie; | ||
| 91 | unsigned long flags; | ||
| 92 | |||
| 93 | desc = to_fsl_re_dma_desc(tx); | ||
| 94 | re_chan = container_of(tx->chan, struct fsl_re_chan, chan); | ||
| 95 | |||
| 96 | spin_lock_irqsave(&re_chan->desc_lock, flags); | ||
| 97 | cookie = dma_cookie_assign(tx); | ||
| 98 | list_add_tail(&desc->node, &re_chan->submit_q); | ||
| 99 | spin_unlock_irqrestore(&re_chan->desc_lock, flags); | ||
| 100 | |||
| 101 | return cookie; | ||
| 102 | } | ||
| 103 | |||
| 104 | /* Copy descriptor from per chan software queue into hardware job ring */ | ||
| 105 | static void fsl_re_issue_pending(struct dma_chan *chan) | ||
| 106 | { | ||
| 107 | struct fsl_re_chan *re_chan; | ||
| 108 | int avail; | ||
| 109 | struct fsl_re_desc *desc, *_desc; | ||
| 110 | unsigned long flags; | ||
| 111 | |||
| 112 | re_chan = container_of(chan, struct fsl_re_chan, chan); | ||
| 113 | |||
| 114 | spin_lock_irqsave(&re_chan->desc_lock, flags); | ||
| 115 | avail = FSL_RE_SLOT_AVAIL( | ||
| 116 | in_be32(&re_chan->jrregs->inbring_slot_avail)); | ||
| 117 | |||
| 118 | list_for_each_entry_safe(desc, _desc, &re_chan->submit_q, node) { | ||
| 119 | if (!avail) | ||
| 120 | break; | ||
| 121 | |||
| 122 | list_move_tail(&desc->node, &re_chan->active_q); | ||
| 123 | |||
| 124 | memcpy(&re_chan->inb_ring_virt_addr[re_chan->inb_count], | ||
| 125 | &desc->hwdesc, sizeof(struct fsl_re_hw_desc)); | ||
| 126 | |||
| 127 | re_chan->inb_count = (re_chan->inb_count + 1) & | ||
| 128 | FSL_RE_RING_SIZE_MASK; | ||
| 129 | out_be32(&re_chan->jrregs->inbring_add_job, FSL_RE_ADD_JOB(1)); | ||
| 130 | avail--; | ||
| 131 | } | ||
| 132 | spin_unlock_irqrestore(&re_chan->desc_lock, flags); | ||
| 133 | } | ||
| 134 | |||
| 135 | static void fsl_re_desc_done(struct fsl_re_desc *desc) | ||
| 136 | { | ||
| 137 | dma_async_tx_callback callback; | ||
| 138 | void *callback_param; | ||
| 139 | |||
| 140 | dma_cookie_complete(&desc->async_tx); | ||
| 141 | |||
| 142 | callback = desc->async_tx.callback; | ||
| 143 | callback_param = desc->async_tx.callback_param; | ||
| 144 | if (callback) | ||
| 145 | callback(callback_param); | ||
| 146 | |||
| 147 | dma_descriptor_unmap(&desc->async_tx); | ||
| 148 | } | ||
| 149 | |||
| 150 | static void fsl_re_cleanup_descs(struct fsl_re_chan *re_chan) | ||
| 151 | { | ||
| 152 | struct fsl_re_desc *desc, *_desc; | ||
| 153 | unsigned long flags; | ||
| 154 | |||
| 155 | spin_lock_irqsave(&re_chan->desc_lock, flags); | ||
| 156 | list_for_each_entry_safe(desc, _desc, &re_chan->ack_q, node) { | ||
| 157 | if (async_tx_test_ack(&desc->async_tx)) | ||
| 158 | list_move_tail(&desc->node, &re_chan->free_q); | ||
| 159 | } | ||
| 160 | spin_unlock_irqrestore(&re_chan->desc_lock, flags); | ||
| 161 | |||
| 162 | fsl_re_issue_pending(&re_chan->chan); | ||
| 163 | } | ||
| 164 | |||
| 165 | static void fsl_re_dequeue(unsigned long data) | ||
| 166 | { | ||
| 167 | struct fsl_re_chan *re_chan; | ||
| 168 | struct fsl_re_desc *desc, *_desc; | ||
| 169 | struct fsl_re_hw_desc *hwdesc; | ||
| 170 | unsigned long flags; | ||
| 171 | unsigned int count, oub_count; | ||
| 172 | int found; | ||
| 173 | |||
| 174 | re_chan = dev_get_drvdata((struct device *)data); | ||
| 175 | |||
| 176 | fsl_re_cleanup_descs(re_chan); | ||
| 177 | |||
| 178 | spin_lock_irqsave(&re_chan->desc_lock, flags); | ||
| 179 | count = FSL_RE_SLOT_FULL(in_be32(&re_chan->jrregs->oubring_slot_full)); | ||
| 180 | while (count--) { | ||
| 181 | found = 0; | ||
| 182 | hwdesc = &re_chan->oub_ring_virt_addr[re_chan->oub_count]; | ||
| 183 | list_for_each_entry_safe(desc, _desc, &re_chan->active_q, | ||
| 184 | node) { | ||
| 185 | /* compare the hw dma addr to find the completed */ | ||
| 186 | if (desc->hwdesc.lbea32 == hwdesc->lbea32 && | ||
| 187 | desc->hwdesc.addr_low == hwdesc->addr_low) { | ||
| 188 | found = 1; | ||
| 189 | break; | ||
| 190 | } | ||
| 191 | } | ||
| 192 | |||
| 193 | if (found) { | ||
| 194 | fsl_re_desc_done(desc); | ||
| 195 | list_move_tail(&desc->node, &re_chan->ack_q); | ||
| 196 | } else { | ||
| 197 | dev_err(re_chan->dev, | ||
| 198 | "found hwdesc not in sw queue, discard it\n"); | ||
| 199 | } | ||
| 200 | |||
| 201 | oub_count = (re_chan->oub_count + 1) & FSL_RE_RING_SIZE_MASK; | ||
| 202 | re_chan->oub_count = oub_count; | ||
| 203 | |||
| 204 | out_be32(&re_chan->jrregs->oubring_job_rmvd, | ||
| 205 | FSL_RE_RMVD_JOB(1)); | ||
| 206 | } | ||
| 207 | spin_unlock_irqrestore(&re_chan->desc_lock, flags); | ||
| 208 | } | ||
| 209 | |||
| 210 | /* Per Job Ring interrupt handler */ | ||
| 211 | static irqreturn_t fsl_re_isr(int irq, void *data) | ||
| 212 | { | ||
| 213 | struct fsl_re_chan *re_chan; | ||
| 214 | u32 irqstate, status; | ||
| 215 | |||
| 216 | re_chan = dev_get_drvdata((struct device *)data); | ||
| 217 | |||
| 218 | irqstate = in_be32(&re_chan->jrregs->jr_interrupt_status); | ||
| 219 | if (!irqstate) | ||
| 220 | return IRQ_NONE; | ||
| 221 | |||
| 222 | /* | ||
| 223 | * There's no way in upper layer (read MD layer) to recover from | ||
| 224 | * error conditions except restart everything. In long term we | ||
| 225 | * need to do something more than just crashing | ||
| 226 | */ | ||
| 227 | if (irqstate & FSL_RE_ERROR) { | ||
| 228 | status = in_be32(&re_chan->jrregs->jr_status); | ||
| 229 | dev_err(re_chan->dev, "chan error irqstate: %x, status: %x\n", | ||
| 230 | irqstate, status); | ||
| 231 | } | ||
| 232 | |||
| 233 | /* Clear interrupt */ | ||
| 234 | out_be32(&re_chan->jrregs->jr_interrupt_status, FSL_RE_CLR_INTR); | ||
| 235 | |||
| 236 | tasklet_schedule(&re_chan->irqtask); | ||
| 237 | |||
| 238 | return IRQ_HANDLED; | ||
| 239 | } | ||
| 240 | |||
| 241 | static enum dma_status fsl_re_tx_status(struct dma_chan *chan, | ||
| 242 | dma_cookie_t cookie, | ||
| 243 | struct dma_tx_state *txstate) | ||
| 244 | { | ||
| 245 | return dma_cookie_status(chan, cookie, txstate); | ||
| 246 | } | ||
| 247 | |||
| 248 | static void fill_cfd_frame(struct fsl_re_cmpnd_frame *cf, u8 index, | ||
| 249 | size_t length, dma_addr_t addr, bool final) | ||
| 250 | { | ||
| 251 | u32 efrl = length & FSL_RE_CF_LENGTH_MASK; | ||
| 252 | |||
| 253 | efrl |= final << FSL_RE_CF_FINAL_SHIFT; | ||
| 254 | cf[index].efrl32 = efrl; | ||
| 255 | cf[index].addr_high = upper_32_bits(addr); | ||
| 256 | cf[index].addr_low = lower_32_bits(addr); | ||
| 257 | } | ||
| 258 | |||
| 259 | static struct fsl_re_desc *fsl_re_init_desc(struct fsl_re_chan *re_chan, | ||
| 260 | struct fsl_re_desc *desc, | ||
| 261 | void *cf, dma_addr_t paddr) | ||
| 262 | { | ||
| 263 | desc->re_chan = re_chan; | ||
| 264 | desc->async_tx.tx_submit = fsl_re_tx_submit; | ||
| 265 | dma_async_tx_descriptor_init(&desc->async_tx, &re_chan->chan); | ||
| 266 | INIT_LIST_HEAD(&desc->node); | ||
| 267 | |||
| 268 | desc->hwdesc.fmt32 = FSL_RE_FRAME_FORMAT << FSL_RE_HWDESC_FMT_SHIFT; | ||
| 269 | desc->hwdesc.lbea32 = upper_32_bits(paddr); | ||
| 270 | desc->hwdesc.addr_low = lower_32_bits(paddr); | ||
| 271 | desc->cf_addr = cf; | ||
| 272 | desc->cf_paddr = paddr; | ||
| 273 | |||
| 274 | desc->cdb_addr = (void *)(cf + FSL_RE_CF_DESC_SIZE); | ||
| 275 | desc->cdb_paddr = paddr + FSL_RE_CF_DESC_SIZE; | ||
| 276 | |||
| 277 | return desc; | ||
| 278 | } | ||
| 279 | |||
| 280 | static struct fsl_re_desc *fsl_re_chan_alloc_desc(struct fsl_re_chan *re_chan, | ||
| 281 | unsigned long flags) | ||
| 282 | { | ||
| 283 | struct fsl_re_desc *desc = NULL; | ||
| 284 | void *cf; | ||
| 285 | dma_addr_t paddr; | ||
| 286 | unsigned long lock_flag; | ||
| 287 | |||
| 288 | fsl_re_cleanup_descs(re_chan); | ||
| 289 | |||
| 290 | spin_lock_irqsave(&re_chan->desc_lock, lock_flag); | ||
| 291 | if (!list_empty(&re_chan->free_q)) { | ||
| 292 | /* take one desc from free_q */ | ||
| 293 | desc = list_first_entry(&re_chan->free_q, | ||
| 294 | struct fsl_re_desc, node); | ||
| 295 | list_del(&desc->node); | ||
| 296 | |||
| 297 | desc->async_tx.flags = flags; | ||
| 298 | } | ||
| 299 | spin_unlock_irqrestore(&re_chan->desc_lock, lock_flag); | ||
| 300 | |||
| 301 | if (!desc) { | ||
| 302 | desc = kzalloc(sizeof(*desc), GFP_NOWAIT); | ||
| 303 | if (!desc) | ||
| 304 | return NULL; | ||
| 305 | |||
| 306 | cf = dma_pool_alloc(re_chan->re_dev->cf_desc_pool, GFP_NOWAIT, | ||
| 307 | &paddr); | ||
| 308 | if (!cf) { | ||
| 309 | kfree(desc); | ||
| 310 | return NULL; | ||
| 311 | } | ||
| 312 | |||
| 313 | desc = fsl_re_init_desc(re_chan, desc, cf, paddr); | ||
| 314 | desc->async_tx.flags = flags; | ||
| 315 | |||
| 316 | spin_lock_irqsave(&re_chan->desc_lock, lock_flag); | ||
| 317 | re_chan->alloc_count++; | ||
| 318 | spin_unlock_irqrestore(&re_chan->desc_lock, lock_flag); | ||
| 319 | } | ||
| 320 | |||
| 321 | return desc; | ||
| 322 | } | ||
| 323 | |||
| 324 | static struct dma_async_tx_descriptor *fsl_re_prep_dma_genq( | ||
| 325 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | ||
| 326 | unsigned int src_cnt, const unsigned char *scf, size_t len, | ||
| 327 | unsigned long flags) | ||
| 328 | { | ||
| 329 | struct fsl_re_chan *re_chan; | ||
| 330 | struct fsl_re_desc *desc; | ||
| 331 | struct fsl_re_xor_cdb *xor; | ||
| 332 | struct fsl_re_cmpnd_frame *cf; | ||
| 333 | u32 cdb; | ||
| 334 | unsigned int i, j; | ||
| 335 | unsigned int save_src_cnt = src_cnt; | ||
| 336 | int cont_q = 0; | ||
| 337 | |||
| 338 | re_chan = container_of(chan, struct fsl_re_chan, chan); | ||
| 339 | if (len > FSL_RE_MAX_DATA_LEN) { | ||
| 340 | dev_err(re_chan->dev, "genq tx length %lu, max length %d\n", | ||
| 341 | len, FSL_RE_MAX_DATA_LEN); | ||
| 342 | return NULL; | ||
| 343 | } | ||
| 344 | |||
| 345 | desc = fsl_re_chan_alloc_desc(re_chan, flags); | ||
| 346 | if (desc <= 0) | ||
| 347 | return NULL; | ||
| 348 | |||
| 349 | if (scf && (flags & DMA_PREP_CONTINUE)) { | ||
| 350 | cont_q = 1; | ||
| 351 | src_cnt += 1; | ||
| 352 | } | ||
| 353 | |||
| 354 | /* Filling xor CDB */ | ||
| 355 | cdb = FSL_RE_XOR_OPCODE << FSL_RE_CDB_OPCODE_SHIFT; | ||
| 356 | cdb |= (src_cnt - 1) << FSL_RE_CDB_NRCS_SHIFT; | ||
| 357 | cdb |= FSL_RE_BLOCK_SIZE << FSL_RE_CDB_BLKSIZE_SHIFT; | ||
| 358 | cdb |= FSL_RE_INTR_ON_ERROR << FSL_RE_CDB_ERROR_SHIFT; | ||
| 359 | cdb |= FSL_RE_DATA_DEP << FSL_RE_CDB_DEPEND_SHIFT; | ||
| 360 | xor = desc->cdb_addr; | ||
| 361 | xor->cdb32 = cdb; | ||
| 362 | |||
| 363 | if (scf) { | ||
| 364 | /* compute q = src0*coef0^src1*coef1^..., * is GF(8) mult */ | ||
| 365 | for (i = 0; i < save_src_cnt; i++) | ||
| 366 | xor->gfm[i] = scf[i]; | ||
| 367 | if (cont_q) | ||
| 368 | xor->gfm[i++] = 1; | ||
| 369 | } else { | ||
| 370 | /* compute P, that is XOR all srcs */ | ||
| 371 | for (i = 0; i < src_cnt; i++) | ||
| 372 | xor->gfm[i] = 1; | ||
| 373 | } | ||
| 374 | |||
| 375 | /* Filling frame 0 of compound frame descriptor with CDB */ | ||
| 376 | cf = desc->cf_addr; | ||
| 377 | fill_cfd_frame(cf, 0, sizeof(*xor), desc->cdb_paddr, 0); | ||
| 378 | |||
| 379 | /* Fill CFD's 1st frame with dest buffer */ | ||
| 380 | fill_cfd_frame(cf, 1, len, dest, 0); | ||
| 381 | |||
| 382 | /* Fill CFD's rest of the frames with source buffers */ | ||
| 383 | for (i = 2, j = 0; j < save_src_cnt; i++, j++) | ||
| 384 | fill_cfd_frame(cf, i, len, src[j], 0); | ||
| 385 | |||
| 386 | if (cont_q) | ||
| 387 | fill_cfd_frame(cf, i++, len, dest, 0); | ||
| 388 | |||
| 389 | /* Setting the final bit in the last source buffer frame in CFD */ | ||
| 390 | cf[i - 1].efrl32 |= 1 << FSL_RE_CF_FINAL_SHIFT; | ||
| 391 | |||
| 392 | return &desc->async_tx; | ||
| 393 | } | ||
| 394 | |||
| 395 | /* | ||
| 396 | * Prep function for P parity calculation.In RAID Engine terminology, | ||
| 397 | * XOR calculation is called GenQ calculation done through GenQ command | ||
| 398 | */ | ||
| 399 | static struct dma_async_tx_descriptor *fsl_re_prep_dma_xor( | ||
| 400 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | ||
| 401 | unsigned int src_cnt, size_t len, unsigned long flags) | ||
| 402 | { | ||
| 403 | /* NULL let genq take all coef as 1 */ | ||
| 404 | return fsl_re_prep_dma_genq(chan, dest, src, src_cnt, NULL, len, flags); | ||
| 405 | } | ||
| 406 | |||
| 407 | /* | ||
| 408 | * Prep function for P/Q parity calculation.In RAID Engine terminology, | ||
| 409 | * P/Q calculation is called GenQQ done through GenQQ command | ||
| 410 | */ | ||
| 411 | static struct dma_async_tx_descriptor *fsl_re_prep_dma_pq( | ||
| 412 | struct dma_chan *chan, dma_addr_t *dest, dma_addr_t *src, | ||
| 413 | unsigned int src_cnt, const unsigned char *scf, size_t len, | ||
| 414 | unsigned long flags) | ||
| 415 | { | ||
| 416 | struct fsl_re_chan *re_chan; | ||
| 417 | struct fsl_re_desc *desc; | ||
| 418 | struct fsl_re_pq_cdb *pq; | ||
| 419 | struct fsl_re_cmpnd_frame *cf; | ||
| 420 | u32 cdb; | ||
| 421 | u8 *p; | ||
| 422 | int gfmq_len, i, j; | ||
| 423 | unsigned int save_src_cnt = src_cnt; | ||
| 424 | |||
| 425 | re_chan = container_of(chan, struct fsl_re_chan, chan); | ||
| 426 | if (len > FSL_RE_MAX_DATA_LEN) { | ||
| 427 | dev_err(re_chan->dev, "pq tx length is %lu, max length is %d\n", | ||
| 428 | len, FSL_RE_MAX_DATA_LEN); | ||
| 429 | return NULL; | ||
| 430 | } | ||
| 431 | |||
| 432 | /* | ||
| 433 | * RE requires at least 2 sources, if given only one source, we pass the | ||
| 434 | * second source same as the first one. | ||
| 435 | * With only one source, generating P is meaningless, only generate Q. | ||
| 436 | */ | ||
| 437 | if (src_cnt == 1) { | ||
| 438 | struct dma_async_tx_descriptor *tx; | ||
| 439 | dma_addr_t dma_src[2]; | ||
| 440 | unsigned char coef[2]; | ||
| 441 | |||
| 442 | dma_src[0] = *src; | ||
| 443 | coef[0] = *scf; | ||
| 444 | dma_src[1] = *src; | ||
| 445 | coef[1] = 0; | ||
| 446 | tx = fsl_re_prep_dma_genq(chan, dest[1], dma_src, 2, coef, len, | ||
| 447 | flags); | ||
| 448 | if (tx) | ||
| 449 | desc = to_fsl_re_dma_desc(tx); | ||
| 450 | |||
| 451 | return tx; | ||
| 452 | } | ||
| 453 | |||
| 454 | /* | ||
| 455 | * During RAID6 array creation, Linux's MD layer gets P and Q | ||
| 456 | * calculated separately in two steps. But our RAID Engine has | ||
| 457 | * the capability to calculate both P and Q with a single command | ||
| 458 | * Hence to merge well with MD layer, we need to provide a hook | ||
| 459 | * here and call re_jq_prep_dma_genq() function | ||
| 460 | */ | ||
| 461 | |||
| 462 | if (flags & DMA_PREP_PQ_DISABLE_P) | ||
| 463 | return fsl_re_prep_dma_genq(chan, dest[1], src, src_cnt, | ||
| 464 | scf, len, flags); | ||
| 465 | |||
| 466 | if (flags & DMA_PREP_CONTINUE) | ||
| 467 | src_cnt += 3; | ||
| 468 | |||
| 469 | desc = fsl_re_chan_alloc_desc(re_chan, flags); | ||
| 470 | if (desc <= 0) | ||
| 471 | return NULL; | ||
| 472 | |||
| 473 | /* Filling GenQQ CDB */ | ||
| 474 | cdb = FSL_RE_PQ_OPCODE << FSL_RE_CDB_OPCODE_SHIFT; | ||
| 475 | cdb |= (src_cnt - 1) << FSL_RE_CDB_NRCS_SHIFT; | ||
| 476 | cdb |= FSL_RE_BLOCK_SIZE << FSL_RE_CDB_BLKSIZE_SHIFT; | ||
| 477 | cdb |= FSL_RE_BUFFER_OUTPUT << FSL_RE_CDB_BUFFER_SHIFT; | ||
| 478 | cdb |= FSL_RE_DATA_DEP << FSL_RE_CDB_DEPEND_SHIFT; | ||
| 479 | |||
| 480 | pq = desc->cdb_addr; | ||
| 481 | pq->cdb32 = cdb; | ||
| 482 | |||
| 483 | p = pq->gfm_q1; | ||
| 484 | /* Init gfm_q1[] */ | ||
| 485 | for (i = 0; i < src_cnt; i++) | ||
| 486 | p[i] = 1; | ||
| 487 | |||
| 488 | /* Align gfm[] to 32bit */ | ||
| 489 | gfmq_len = ALIGN(src_cnt, 4); | ||
| 490 | |||
| 491 | /* Init gfm_q2[] */ | ||
| 492 | p += gfmq_len; | ||
| 493 | for (i = 0; i < src_cnt; i++) | ||
| 494 | p[i] = scf[i]; | ||
| 495 | |||
| 496 | /* Filling frame 0 of compound frame descriptor with CDB */ | ||
| 497 | cf = desc->cf_addr; | ||
| 498 | fill_cfd_frame(cf, 0, sizeof(struct fsl_re_pq_cdb), desc->cdb_paddr, 0); | ||
| 499 | |||
| 500 | /* Fill CFD's 1st & 2nd frame with dest buffers */ | ||
| 501 | for (i = 1, j = 0; i < 3; i++, j++) | ||
| 502 | fill_cfd_frame(cf, i, len, dest[j], 0); | ||
| 503 | |||
| 504 | /* Fill CFD's rest of the frames with source buffers */ | ||
| 505 | for (i = 3, j = 0; j < save_src_cnt; i++, j++) | ||
| 506 | fill_cfd_frame(cf, i, len, src[j], 0); | ||
| 507 | |||
| 508 | /* PQ computation continuation */ | ||
| 509 | if (flags & DMA_PREP_CONTINUE) { | ||
| 510 | if (src_cnt - save_src_cnt == 3) { | ||
| 511 | p[save_src_cnt] = 0; | ||
| 512 | p[save_src_cnt + 1] = 0; | ||
| 513 | p[save_src_cnt + 2] = 1; | ||
| 514 | fill_cfd_frame(cf, i++, len, dest[0], 0); | ||
| 515 | fill_cfd_frame(cf, i++, len, dest[1], 0); | ||
| 516 | fill_cfd_frame(cf, i++, len, dest[1], 0); | ||
| 517 | } else { | ||
| 518 | dev_err(re_chan->dev, "PQ tx continuation error!\n"); | ||
| 519 | return NULL; | ||
| 520 | } | ||
| 521 | } | ||
| 522 | |||
| 523 | /* Setting the final bit in the last source buffer frame in CFD */ | ||
| 524 | cf[i - 1].efrl32 |= 1 << FSL_RE_CF_FINAL_SHIFT; | ||
| 525 | |||
| 526 | return &desc->async_tx; | ||
| 527 | } | ||
| 528 | |||
| 529 | /* | ||
| 530 | * Prep function for memcpy. In RAID Engine, memcpy is done through MOVE | ||
| 531 | * command. Logic of this function will need to be modified once multipage | ||
| 532 | * support is added in Linux's MD/ASYNC Layer | ||
| 533 | */ | ||
| 534 | static struct dma_async_tx_descriptor *fsl_re_prep_dma_memcpy( | ||
| 535 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | ||
| 536 | size_t len, unsigned long flags) | ||
| 537 | { | ||
| 538 | struct fsl_re_chan *re_chan; | ||
| 539 | struct fsl_re_desc *desc; | ||
| 540 | size_t length; | ||
| 541 | struct fsl_re_cmpnd_frame *cf; | ||
| 542 | struct fsl_re_move_cdb *move; | ||
| 543 | u32 cdb; | ||
| 544 | |||
| 545 | re_chan = container_of(chan, struct fsl_re_chan, chan); | ||
| 546 | |||
| 547 | if (len > FSL_RE_MAX_DATA_LEN) { | ||
| 548 | dev_err(re_chan->dev, "cp tx length is %lu, max length is %d\n", | ||
| 549 | len, FSL_RE_MAX_DATA_LEN); | ||
| 550 | return NULL; | ||
| 551 | } | ||
| 552 | |||
| 553 | desc = fsl_re_chan_alloc_desc(re_chan, flags); | ||
| 554 | if (desc <= 0) | ||
| 555 | return NULL; | ||
| 556 | |||
| 557 | /* Filling move CDB */ | ||
| 558 | cdb = FSL_RE_MOVE_OPCODE << FSL_RE_CDB_OPCODE_SHIFT; | ||
| 559 | cdb |= FSL_RE_BLOCK_SIZE << FSL_RE_CDB_BLKSIZE_SHIFT; | ||
| 560 | cdb |= FSL_RE_INTR_ON_ERROR << FSL_RE_CDB_ERROR_SHIFT; | ||
| 561 | cdb |= FSL_RE_DATA_DEP << FSL_RE_CDB_DEPEND_SHIFT; | ||
| 562 | |||
| 563 | move = desc->cdb_addr; | ||
| 564 | move->cdb32 = cdb; | ||
| 565 | |||
| 566 | /* Filling frame 0 of CFD with move CDB */ | ||
| 567 | cf = desc->cf_addr; | ||
| 568 | fill_cfd_frame(cf, 0, sizeof(*move), desc->cdb_paddr, 0); | ||
| 569 | |||
| 570 | length = min_t(size_t, len, FSL_RE_MAX_DATA_LEN); | ||
| 571 | |||
| 572 | /* Fill CFD's 1st frame with dest buffer */ | ||
| 573 | fill_cfd_frame(cf, 1, length, dest, 0); | ||
| 574 | |||
| 575 | /* Fill CFD's 2nd frame with src buffer */ | ||
| 576 | fill_cfd_frame(cf, 2, length, src, 1); | ||
| 577 | |||
| 578 | return &desc->async_tx; | ||
| 579 | } | ||
| 580 | |||
| 581 | static int fsl_re_alloc_chan_resources(struct dma_chan *chan) | ||
| 582 | { | ||
| 583 | struct fsl_re_chan *re_chan; | ||
| 584 | struct fsl_re_desc *desc; | ||
| 585 | void *cf; | ||
| 586 | dma_addr_t paddr; | ||
| 587 | int i; | ||
| 588 | |||
| 589 | re_chan = container_of(chan, struct fsl_re_chan, chan); | ||
| 590 | for (i = 0; i < FSL_RE_MIN_DESCS; i++) { | ||
| 591 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); | ||
| 592 | if (!desc) | ||
| 593 | break; | ||
| 594 | |||
| 595 | cf = dma_pool_alloc(re_chan->re_dev->cf_desc_pool, GFP_KERNEL, | ||
| 596 | &paddr); | ||
| 597 | if (!cf) { | ||
| 598 | kfree(desc); | ||
| 599 | break; | ||
| 600 | } | ||
| 601 | |||
| 602 | INIT_LIST_HEAD(&desc->node); | ||
| 603 | fsl_re_init_desc(re_chan, desc, cf, paddr); | ||
| 604 | |||
| 605 | list_add_tail(&desc->node, &re_chan->free_q); | ||
| 606 | re_chan->alloc_count++; | ||
| 607 | } | ||
| 608 | return re_chan->alloc_count; | ||
| 609 | } | ||
| 610 | |||
| 611 | static void fsl_re_free_chan_resources(struct dma_chan *chan) | ||
| 612 | { | ||
| 613 | struct fsl_re_chan *re_chan; | ||
| 614 | struct fsl_re_desc *desc; | ||
| 615 | |||
| 616 | re_chan = container_of(chan, struct fsl_re_chan, chan); | ||
| 617 | while (re_chan->alloc_count--) { | ||
| 618 | desc = list_first_entry(&re_chan->free_q, | ||
| 619 | struct fsl_re_desc, | ||
| 620 | node); | ||
| 621 | |||
| 622 | list_del(&desc->node); | ||
| 623 | dma_pool_free(re_chan->re_dev->cf_desc_pool, desc->cf_addr, | ||
| 624 | desc->cf_paddr); | ||
| 625 | kfree(desc); | ||
| 626 | } | ||
| 627 | |||
| 628 | if (!list_empty(&re_chan->free_q)) | ||
| 629 | dev_err(re_chan->dev, "chan resource cannot be cleaned!\n"); | ||
| 630 | } | ||
| 631 | |||
| 632 | static int fsl_re_chan_probe(struct platform_device *ofdev, | ||
| 633 | struct device_node *np, u8 q, u32 off) | ||
| 634 | { | ||
| 635 | struct device *dev, *chandev; | ||
| 636 | struct fsl_re_drv_private *re_priv; | ||
| 637 | struct fsl_re_chan *chan; | ||
| 638 | struct dma_device *dma_dev; | ||
| 639 | u32 ptr; | ||
| 640 | u32 status; | ||
| 641 | int ret = 0, rc; | ||
| 642 | struct platform_device *chan_ofdev; | ||
| 643 | |||
| 644 | dev = &ofdev->dev; | ||
| 645 | re_priv = dev_get_drvdata(dev); | ||
| 646 | dma_dev = &re_priv->dma_dev; | ||
| 647 | |||
| 648 | chan = devm_kzalloc(dev, sizeof(*chan), GFP_KERNEL); | ||
| 649 | if (!chan) | ||
| 650 | return -ENOMEM; | ||
| 651 | |||
| 652 | /* create platform device for chan node */ | ||
| 653 | chan_ofdev = of_platform_device_create(np, NULL, dev); | ||
| 654 | if (!chan_ofdev) { | ||
| 655 | dev_err(dev, "Not able to create ofdev for jr %d\n", q); | ||
| 656 | ret = -EINVAL; | ||
| 657 | goto err_free; | ||
| 658 | } | ||
| 659 | |||
| 660 | /* read reg property from dts */ | ||
| 661 | rc = of_property_read_u32(np, "reg", &ptr); | ||
| 662 | if (rc) { | ||
| 663 | dev_err(dev, "Reg property not found in jr %d\n", q); | ||
| 664 | ret = -ENODEV; | ||
| 665 | goto err_free; | ||
| 666 | } | ||
| 667 | |||
| 668 | chan->jrregs = (struct fsl_re_chan_cfg *)((u8 *)re_priv->re_regs + | ||
| 669 | off + ptr); | ||
| 670 | |||
| 671 | /* read irq property from dts */ | ||
| 672 | chan->irq = irq_of_parse_and_map(np, 0); | ||
| 673 | if (chan->irq == NO_IRQ) { | ||
| 674 | dev_err(dev, "No IRQ defined for JR %d\n", q); | ||
| 675 | ret = -ENODEV; | ||
| 676 | goto err_free; | ||
| 677 | } | ||
| 678 | |||
| 679 | snprintf(chan->name, sizeof(chan->name), "re_jr%02d", q); | ||
| 680 | |||
| 681 | chandev = &chan_ofdev->dev; | ||
| 682 | tasklet_init(&chan->irqtask, fsl_re_dequeue, (unsigned long)chandev); | ||
| 683 | |||
| 684 | ret = request_irq(chan->irq, fsl_re_isr, 0, chan->name, chandev); | ||
| 685 | if (ret) { | ||
| 686 | dev_err(dev, "Unable to register interrupt for JR %d\n", q); | ||
| 687 | ret = -EINVAL; | ||
| 688 | goto err_free; | ||
| 689 | } | ||
| 690 | |||
| 691 | re_priv->re_jrs[q] = chan; | ||
| 692 | chan->chan.device = dma_dev; | ||
| 693 | chan->chan.private = chan; | ||
| 694 | chan->dev = chandev; | ||
| 695 | chan->re_dev = re_priv; | ||
| 696 | |||
| 697 | spin_lock_init(&chan->desc_lock); | ||
| 698 | INIT_LIST_HEAD(&chan->ack_q); | ||
| 699 | INIT_LIST_HEAD(&chan->active_q); | ||
| 700 | INIT_LIST_HEAD(&chan->submit_q); | ||
| 701 | INIT_LIST_HEAD(&chan->free_q); | ||
| 702 | |||
| 703 | chan->inb_ring_virt_addr = dma_pool_alloc(chan->re_dev->hw_desc_pool, | ||
| 704 | GFP_KERNEL, &chan->inb_phys_addr); | ||
| 705 | if (!chan->inb_ring_virt_addr) { | ||
| 706 | dev_err(dev, "No dma memory for inb_ring_virt_addr\n"); | ||
| 707 | ret = -ENOMEM; | ||
| 708 | goto err_free; | ||
| 709 | } | ||
| 710 | |||
| 711 | chan->oub_ring_virt_addr = dma_pool_alloc(chan->re_dev->hw_desc_pool, | ||
| 712 | GFP_KERNEL, &chan->oub_phys_addr); | ||
| 713 | if (!chan->oub_ring_virt_addr) { | ||
| 714 | dev_err(dev, "No dma memory for oub_ring_virt_addr\n"); | ||
| 715 | ret = -ENOMEM; | ||
| 716 | goto err_free_1; | ||
| 717 | } | ||
| 718 | |||
| 719 | /* Program the Inbound/Outbound ring base addresses and size */ | ||
| 720 | out_be32(&chan->jrregs->inbring_base_h, | ||
| 721 | chan->inb_phys_addr & FSL_RE_ADDR_BIT_MASK); | ||
| 722 | out_be32(&chan->jrregs->oubring_base_h, | ||
| 723 | chan->oub_phys_addr & FSL_RE_ADDR_BIT_MASK); | ||
| 724 | out_be32(&chan->jrregs->inbring_base_l, | ||
| 725 | chan->inb_phys_addr >> FSL_RE_ADDR_BIT_SHIFT); | ||
| 726 | out_be32(&chan->jrregs->oubring_base_l, | ||
| 727 | chan->oub_phys_addr >> FSL_RE_ADDR_BIT_SHIFT); | ||
| 728 | out_be32(&chan->jrregs->inbring_size, | ||
| 729 | FSL_RE_RING_SIZE << FSL_RE_RING_SIZE_SHIFT); | ||
| 730 | out_be32(&chan->jrregs->oubring_size, | ||
| 731 | FSL_RE_RING_SIZE << FSL_RE_RING_SIZE_SHIFT); | ||
| 732 | |||
| 733 | /* Read LIODN value from u-boot */ | ||
| 734 | status = in_be32(&chan->jrregs->jr_config_1) & FSL_RE_REG_LIODN_MASK; | ||
| 735 | |||
| 736 | /* Program the CFG reg */ | ||
| 737 | out_be32(&chan->jrregs->jr_config_1, | ||
| 738 | FSL_RE_CFG1_CBSI | FSL_RE_CFG1_CBS0 | status); | ||
| 739 | |||
| 740 | dev_set_drvdata(chandev, chan); | ||
| 741 | |||
| 742 | /* Enable RE/CHAN */ | ||
| 743 | out_be32(&chan->jrregs->jr_command, FSL_RE_ENABLE); | ||
| 744 | |||
| 745 | return 0; | ||
| 746 | |||
| 747 | err_free_1: | ||
| 748 | dma_pool_free(chan->re_dev->hw_desc_pool, chan->inb_ring_virt_addr, | ||
| 749 | chan->inb_phys_addr); | ||
| 750 | err_free: | ||
| 751 | return ret; | ||
| 752 | } | ||
| 753 | |||
| 754 | /* Probe function for RAID Engine */ | ||
| 755 | static int fsl_re_probe(struct platform_device *ofdev) | ||
| 756 | { | ||
| 757 | struct fsl_re_drv_private *re_priv; | ||
| 758 | struct device_node *np; | ||
| 759 | struct device_node *child; | ||
| 760 | u32 off; | ||
| 761 | u8 ridx = 0; | ||
| 762 | struct dma_device *dma_dev; | ||
| 763 | struct resource *res; | ||
| 764 | int rc; | ||
| 765 | struct device *dev = &ofdev->dev; | ||
| 766 | |||
| 767 | re_priv = devm_kzalloc(dev, sizeof(*re_priv), GFP_KERNEL); | ||
| 768 | if (!re_priv) | ||
| 769 | return -ENOMEM; | ||
| 770 | |||
| 771 | res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); | ||
| 772 | if (!res) | ||
| 773 | return -ENODEV; | ||
| 774 | |||
| 775 | /* IOMAP the entire RAID Engine region */ | ||
| 776 | re_priv->re_regs = devm_ioremap(dev, res->start, resource_size(res)); | ||
| 777 | if (!re_priv->re_regs) | ||
| 778 | return -EBUSY; | ||
| 779 | |||
| 780 | /* Program the RE mode */ | ||
| 781 | out_be32(&re_priv->re_regs->global_config, FSL_RE_NON_DPAA_MODE); | ||
| 782 | |||
| 783 | /* Program Galois Field polynomial */ | ||
| 784 | out_be32(&re_priv->re_regs->galois_field_config, FSL_RE_GFM_POLY); | ||
| 785 | |||
| 786 | dev_info(dev, "version %x, mode %x, gfp %x\n", | ||
| 787 | in_be32(&re_priv->re_regs->re_version_id), | ||
| 788 | in_be32(&re_priv->re_regs->global_config), | ||
| 789 | in_be32(&re_priv->re_regs->galois_field_config)); | ||
| 790 | |||
| 791 | dma_dev = &re_priv->dma_dev; | ||
| 792 | dma_dev->dev = dev; | ||
| 793 | INIT_LIST_HEAD(&dma_dev->channels); | ||
| 794 | dma_set_mask(dev, DMA_BIT_MASK(40)); | ||
| 795 | |||
| 796 | dma_dev->device_alloc_chan_resources = fsl_re_alloc_chan_resources; | ||
| 797 | dma_dev->device_tx_status = fsl_re_tx_status; | ||
| 798 | dma_dev->device_issue_pending = fsl_re_issue_pending; | ||
| 799 | |||
| 800 | dma_dev->max_xor = FSL_RE_MAX_XOR_SRCS; | ||
| 801 | dma_dev->device_prep_dma_xor = fsl_re_prep_dma_xor; | ||
| 802 | dma_cap_set(DMA_XOR, dma_dev->cap_mask); | ||
| 803 | |||
| 804 | dma_dev->max_pq = FSL_RE_MAX_PQ_SRCS; | ||
| 805 | dma_dev->device_prep_dma_pq = fsl_re_prep_dma_pq; | ||
| 806 | dma_cap_set(DMA_PQ, dma_dev->cap_mask); | ||
| 807 | |||
| 808 | dma_dev->device_prep_dma_memcpy = fsl_re_prep_dma_memcpy; | ||
| 809 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | ||
| 810 | |||
| 811 | dma_dev->device_free_chan_resources = fsl_re_free_chan_resources; | ||
| 812 | |||
| 813 | re_priv->total_chans = 0; | ||
| 814 | |||
| 815 | re_priv->cf_desc_pool = dmam_pool_create("fsl_re_cf_desc_pool", dev, | ||
| 816 | FSL_RE_CF_CDB_SIZE, | ||
| 817 | FSL_RE_CF_CDB_ALIGN, 0); | ||
| 818 | |||
| 819 | if (!re_priv->cf_desc_pool) { | ||
| 820 | dev_err(dev, "No memory for fsl re_cf desc pool\n"); | ||
| 821 | return -ENOMEM; | ||
| 822 | } | ||
| 823 | |||
| 824 | re_priv->hw_desc_pool = dmam_pool_create("fsl_re_hw_desc_pool", dev, | ||
| 825 | sizeof(struct fsl_re_hw_desc) * FSL_RE_RING_SIZE, | ||
| 826 | FSL_RE_FRAME_ALIGN, 0); | ||
| 827 | if (!re_priv->hw_desc_pool) { | ||
| 828 | dev_err(dev, "No memory for fsl re_hw desc pool\n"); | ||
| 829 | return -ENOMEM; | ||
| 830 | } | ||
| 831 | |||
| 832 | dev_set_drvdata(dev, re_priv); | ||
| 833 | |||
| 834 | /* Parse Device tree to find out the total number of JQs present */ | ||
| 835 | for_each_compatible_node(np, NULL, "fsl,raideng-v1.0-job-queue") { | ||
| 836 | rc = of_property_read_u32(np, "reg", &off); | ||
| 837 | if (rc) { | ||
| 838 | dev_err(dev, "Reg property not found in JQ node\n"); | ||
| 839 | return -ENODEV; | ||
| 840 | } | ||
| 841 | /* Find out the Job Rings present under each JQ */ | ||
| 842 | for_each_child_of_node(np, child) { | ||
| 843 | rc = of_device_is_compatible(child, | ||
| 844 | "fsl,raideng-v1.0-job-ring"); | ||
| 845 | if (rc) { | ||
| 846 | fsl_re_chan_probe(ofdev, child, ridx++, off); | ||
| 847 | re_priv->total_chans++; | ||
| 848 | } | ||
| 849 | } | ||
| 850 | } | ||
| 851 | |||
| 852 | dma_async_device_register(dma_dev); | ||
| 853 | |||
| 854 | return 0; | ||
| 855 | } | ||
| 856 | |||
| 857 | static void fsl_re_remove_chan(struct fsl_re_chan *chan) | ||
| 858 | { | ||
| 859 | dma_pool_free(chan->re_dev->hw_desc_pool, chan->inb_ring_virt_addr, | ||
| 860 | chan->inb_phys_addr); | ||
| 861 | |||
| 862 | dma_pool_free(chan->re_dev->hw_desc_pool, chan->oub_ring_virt_addr, | ||
| 863 | chan->oub_phys_addr); | ||
| 864 | } | ||
| 865 | |||
| 866 | static int fsl_re_remove(struct platform_device *ofdev) | ||
| 867 | { | ||
| 868 | struct fsl_re_drv_private *re_priv; | ||
| 869 | struct device *dev; | ||
| 870 | int i; | ||
| 871 | |||
| 872 | dev = &ofdev->dev; | ||
| 873 | re_priv = dev_get_drvdata(dev); | ||
| 874 | |||
| 875 | /* Cleanup chan related memory areas */ | ||
| 876 | for (i = 0; i < re_priv->total_chans; i++) | ||
| 877 | fsl_re_remove_chan(re_priv->re_jrs[i]); | ||
| 878 | |||
| 879 | /* Unregister the driver */ | ||
| 880 | dma_async_device_unregister(&re_priv->dma_dev); | ||
| 881 | |||
| 882 | return 0; | ||
| 883 | } | ||
| 884 | |||
| 885 | static struct of_device_id fsl_re_ids[] = { | ||
| 886 | { .compatible = "fsl,raideng-v1.0", }, | ||
| 887 | {} | ||
| 888 | }; | ||
| 889 | |||
| 890 | static struct platform_driver fsl_re_driver = { | ||
| 891 | .driver = { | ||
| 892 | .name = "fsl-raideng", | ||
| 893 | .owner = THIS_MODULE, | ||
| 894 | .of_match_table = fsl_re_ids, | ||
| 895 | }, | ||
| 896 | .probe = fsl_re_probe, | ||
| 897 | .remove = fsl_re_remove, | ||
| 898 | }; | ||
| 899 | |||
| 900 | module_platform_driver(fsl_re_driver); | ||
| 901 | |||
| 902 | MODULE_AUTHOR("Harninder Rai <harninder.rai@freescale.com>"); | ||
| 903 | MODULE_LICENSE("GPL v2"); | ||
| 904 | MODULE_DESCRIPTION("Freescale RAID Engine Device Driver"); | ||
diff --git a/drivers/dma/fsl_raid.h b/drivers/dma/fsl_raid.h new file mode 100644 index 000000000000..69d743c04973 --- /dev/null +++ b/drivers/dma/fsl_raid.h | |||
| @@ -0,0 +1,306 @@ | |||
| 1 | /* | ||
| 2 | * drivers/dma/fsl_raid.h | ||
| 3 | * | ||
| 4 | * Freescale RAID Engine device driver | ||
| 5 | * | ||
| 6 | * Author: | ||
| 7 | * Harninder Rai <harninder.rai@freescale.com> | ||
| 8 | * Naveen Burmi <naveenburmi@freescale.com> | ||
| 9 | * | ||
| 10 | * Rewrite: | ||
| 11 | * Xuelin Shi <xuelin.shi@freescale.com> | ||
| 12 | |||
| 13 | * Copyright (c) 2010-2012 Freescale Semiconductor, Inc. | ||
| 14 | * | ||
| 15 | * Redistribution and use in source and binary forms, with or without | ||
| 16 | * modification, are permitted provided that the following conditions are met: | ||
| 17 | * * Redistributions of source code must retain the above copyright | ||
| 18 | * notice, this list of conditions and the following disclaimer. | ||
| 19 | * * Redistributions in binary form must reproduce the above copyright | ||
| 20 | * notice, this list of conditions and the following disclaimer in the | ||
| 21 | * documentation and/or other materials provided with the distribution. | ||
| 22 | * * Neither the name of Freescale Semiconductor nor the | ||
| 23 | * names of its contributors may be used to endorse or promote products | ||
| 24 | * derived from this software without specific prior written permission. | ||
| 25 | * | ||
| 26 | * ALTERNATIVELY, this software may be distributed under the terms of the | ||
| 27 | * GNU General Public License ("GPL") as published by the Free Software | ||
| 28 | * Foundation, either version 2 of that License or (at your option) any | ||
| 29 | * later version. | ||
| 30 | * | ||
| 31 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | ||
| 32 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | ||
| 33 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
| 34 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | ||
| 35 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | ||
| 36 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | ||
| 37 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | ||
| 38 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
| 39 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
| 40 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
| 41 | * | ||
| 42 | */ | ||
| 43 | |||
| 44 | #define FSL_RE_MAX_CHANS 4 | ||
| 45 | #define FSL_RE_DPAA_MODE BIT(30) | ||
| 46 | #define FSL_RE_NON_DPAA_MODE BIT(31) | ||
| 47 | #define FSL_RE_GFM_POLY 0x1d000000 | ||
| 48 | #define FSL_RE_ADD_JOB(x) ((x) << 16) | ||
| 49 | #define FSL_RE_RMVD_JOB(x) ((x) << 16) | ||
| 50 | #define FSL_RE_CFG1_CBSI 0x08000000 | ||
| 51 | #define FSL_RE_CFG1_CBS0 0x00080000 | ||
| 52 | #define FSL_RE_SLOT_FULL_SHIFT 8 | ||
| 53 | #define FSL_RE_SLOT_FULL(x) ((x) >> FSL_RE_SLOT_FULL_SHIFT) | ||
| 54 | #define FSL_RE_SLOT_AVAIL_SHIFT 8 | ||
| 55 | #define FSL_RE_SLOT_AVAIL(x) ((x) >> FSL_RE_SLOT_AVAIL_SHIFT) | ||
| 56 | #define FSL_RE_PQ_OPCODE 0x1B | ||
| 57 | #define FSL_RE_XOR_OPCODE 0x1A | ||
| 58 | #define FSL_RE_MOVE_OPCODE 0x8 | ||
| 59 | #define FSL_RE_FRAME_ALIGN 16 | ||
| 60 | #define FSL_RE_BLOCK_SIZE 0x3 /* 4096 bytes */ | ||
| 61 | #define FSL_RE_CACHEABLE_IO 0x0 | ||
| 62 | #define FSL_RE_BUFFER_OUTPUT 0x0 | ||
| 63 | #define FSL_RE_INTR_ON_ERROR 0x1 | ||
| 64 | #define FSL_RE_DATA_DEP 0x1 | ||
| 65 | #define FSL_RE_ENABLE_DPI 0x0 | ||
| 66 | #define FSL_RE_RING_SIZE 0x400 | ||
| 67 | #define FSL_RE_RING_SIZE_MASK (FSL_RE_RING_SIZE - 1) | ||
| 68 | #define FSL_RE_RING_SIZE_SHIFT 8 | ||
| 69 | #define FSL_RE_ADDR_BIT_SHIFT 4 | ||
| 70 | #define FSL_RE_ADDR_BIT_MASK (BIT(FSL_RE_ADDR_BIT_SHIFT) - 1) | ||
| 71 | #define FSL_RE_ERROR 0x40000000 | ||
| 72 | #define FSL_RE_INTR 0x80000000 | ||
| 73 | #define FSL_RE_CLR_INTR 0x80000000 | ||
| 74 | #define FSL_RE_PAUSE 0x80000000 | ||
| 75 | #define FSL_RE_ENABLE 0x80000000 | ||
| 76 | #define FSL_RE_REG_LIODN_MASK 0x00000FFF | ||
| 77 | |||
| 78 | #define FSL_RE_CDB_OPCODE_MASK 0xF8000000 | ||
| 79 | #define FSL_RE_CDB_OPCODE_SHIFT 27 | ||
| 80 | #define FSL_RE_CDB_EXCLEN_MASK 0x03000000 | ||
| 81 | #define FSL_RE_CDB_EXCLEN_SHIFT 24 | ||
| 82 | #define FSL_RE_CDB_EXCLQ1_MASK 0x00F00000 | ||
| 83 | #define FSL_RE_CDB_EXCLQ1_SHIFT 20 | ||
| 84 | #define FSL_RE_CDB_EXCLQ2_MASK 0x000F0000 | ||
| 85 | #define FSL_RE_CDB_EXCLQ2_SHIFT 16 | ||
| 86 | #define FSL_RE_CDB_BLKSIZE_MASK 0x0000C000 | ||
| 87 | #define FSL_RE_CDB_BLKSIZE_SHIFT 14 | ||
| 88 | #define FSL_RE_CDB_CACHE_MASK 0x00003000 | ||
| 89 | #define FSL_RE_CDB_CACHE_SHIFT 12 | ||
| 90 | #define FSL_RE_CDB_BUFFER_MASK 0x00000800 | ||
| 91 | #define FSL_RE_CDB_BUFFER_SHIFT 11 | ||
| 92 | #define FSL_RE_CDB_ERROR_MASK 0x00000400 | ||
| 93 | #define FSL_RE_CDB_ERROR_SHIFT 10 | ||
| 94 | #define FSL_RE_CDB_NRCS_MASK 0x0000003C | ||
| 95 | #define FSL_RE_CDB_NRCS_SHIFT 6 | ||
| 96 | #define FSL_RE_CDB_DEPEND_MASK 0x00000008 | ||
| 97 | #define FSL_RE_CDB_DEPEND_SHIFT 3 | ||
| 98 | #define FSL_RE_CDB_DPI_MASK 0x00000004 | ||
| 99 | #define FSL_RE_CDB_DPI_SHIFT 2 | ||
| 100 | |||
| 101 | /* | ||
| 102 | * the largest cf block is 19*sizeof(struct cmpnd_frame), which is 304 bytes. | ||
| 103 | * here 19 = 1(cdb)+2(dest)+16(src), align to 64bytes, that is 320 bytes. | ||
| 104 | * the largest cdb block: struct pq_cdb which is 180 bytes, adding to cf block | ||
| 105 | * 320+180=500, align to 64bytes, that is 512 bytes. | ||
| 106 | */ | ||
| 107 | #define FSL_RE_CF_DESC_SIZE 320 | ||
| 108 | #define FSL_RE_CF_CDB_SIZE 512 | ||
| 109 | #define FSL_RE_CF_CDB_ALIGN 64 | ||
| 110 | |||
| 111 | struct fsl_re_ctrl { | ||
| 112 | /* General Configuration Registers */ | ||
| 113 | __be32 global_config; /* Global Configuration Register */ | ||
| 114 | u8 rsvd1[4]; | ||
| 115 | __be32 galois_field_config; /* Galois Field Configuration Register */ | ||
| 116 | u8 rsvd2[4]; | ||
| 117 | __be32 jq_wrr_config; /* WRR Configuration register */ | ||
| 118 | u8 rsvd3[4]; | ||
| 119 | __be32 crc_config; /* CRC Configuration register */ | ||
| 120 | u8 rsvd4[228]; | ||
| 121 | __be32 system_reset; /* System Reset Register */ | ||
| 122 | u8 rsvd5[252]; | ||
| 123 | __be32 global_status; /* Global Status Register */ | ||
| 124 | u8 rsvd6[832]; | ||
| 125 | __be32 re_liodn_base; /* LIODN Base Register */ | ||
| 126 | u8 rsvd7[1712]; | ||
| 127 | __be32 re_version_id; /* Version ID register of RE */ | ||
| 128 | __be32 re_version_id_2; /* Version ID 2 register of RE */ | ||
| 129 | u8 rsvd8[512]; | ||
| 130 | __be32 host_config; /* Host I/F Configuration Register */ | ||
| 131 | }; | ||
| 132 | |||
| 133 | struct fsl_re_chan_cfg { | ||
| 134 | /* Registers for JR interface */ | ||
| 135 | __be32 jr_config_0; /* Job Queue Configuration 0 Register */ | ||
| 136 | __be32 jr_config_1; /* Job Queue Configuration 1 Register */ | ||
| 137 | __be32 jr_interrupt_status; /* Job Queue Interrupt Status Register */ | ||
| 138 | u8 rsvd1[4]; | ||
| 139 | __be32 jr_command; /* Job Queue Command Register */ | ||
| 140 | u8 rsvd2[4]; | ||
| 141 | __be32 jr_status; /* Job Queue Status Register */ | ||
| 142 | u8 rsvd3[228]; | ||
| 143 | |||
| 144 | /* Input Ring */ | ||
| 145 | __be32 inbring_base_h; /* Inbound Ring Base Address Register - High */ | ||
| 146 | __be32 inbring_base_l; /* Inbound Ring Base Address Register - Low */ | ||
| 147 | __be32 inbring_size; /* Inbound Ring Size Register */ | ||
| 148 | u8 rsvd4[4]; | ||
| 149 | __be32 inbring_slot_avail; /* Inbound Ring Slot Available Register */ | ||
| 150 | u8 rsvd5[4]; | ||
| 151 | __be32 inbring_add_job; /* Inbound Ring Add Job Register */ | ||
| 152 | u8 rsvd6[4]; | ||
| 153 | __be32 inbring_cnsmr_indx; /* Inbound Ring Consumer Index Register */ | ||
| 154 | u8 rsvd7[220]; | ||
| 155 | |||
| 156 | /* Output Ring */ | ||
| 157 | __be32 oubring_base_h; /* Outbound Ring Base Address Register - High */ | ||
| 158 | __be32 oubring_base_l; /* Outbound Ring Base Address Register - Low */ | ||
| 159 | __be32 oubring_size; /* Outbound Ring Size Register */ | ||
| 160 | u8 rsvd8[4]; | ||
| 161 | __be32 oubring_job_rmvd; /* Outbound Ring Job Removed Register */ | ||
| 162 | u8 rsvd9[4]; | ||
| 163 | __be32 oubring_slot_full; /* Outbound Ring Slot Full Register */ | ||
| 164 | u8 rsvd10[4]; | ||
| 165 | __be32 oubring_prdcr_indx; /* Outbound Ring Producer Index */ | ||
| 166 | }; | ||
| 167 | |||
| 168 | /* | ||
| 169 | * Command Descriptor Block (CDB) for unicast move command. | ||
| 170 | * In RAID Engine terms, memcpy is done through move command | ||
| 171 | */ | ||
| 172 | struct fsl_re_move_cdb { | ||
| 173 | __be32 cdb32; | ||
| 174 | }; | ||
| 175 | |||
| 176 | /* Data protection/integrity related fields */ | ||
| 177 | #define FSL_RE_DPI_APPS_MASK 0xC0000000 | ||
| 178 | #define FSL_RE_DPI_APPS_SHIFT 30 | ||
| 179 | #define FSL_RE_DPI_REF_MASK 0x30000000 | ||
| 180 | #define FSL_RE_DPI_REF_SHIFT 28 | ||
| 181 | #define FSL_RE_DPI_GUARD_MASK 0x0C000000 | ||
| 182 | #define FSL_RE_DPI_GUARD_SHIFT 26 | ||
| 183 | #define FSL_RE_DPI_ATTR_MASK 0x03000000 | ||
| 184 | #define FSL_RE_DPI_ATTR_SHIFT 24 | ||
| 185 | #define FSL_RE_DPI_META_MASK 0x0000FFFF | ||
| 186 | |||
| 187 | struct fsl_re_dpi { | ||
| 188 | __be32 dpi32; | ||
| 189 | __be32 ref; | ||
| 190 | }; | ||
| 191 | |||
| 192 | /* | ||
| 193 | * CDB for GenQ command. In RAID Engine terminology, XOR is | ||
| 194 | * done through this command | ||
| 195 | */ | ||
| 196 | struct fsl_re_xor_cdb { | ||
| 197 | __be32 cdb32; | ||
| 198 | u8 gfm[16]; | ||
| 199 | struct fsl_re_dpi dpi_dest_spec; | ||
| 200 | struct fsl_re_dpi dpi_src_spec[16]; | ||
| 201 | }; | ||
| 202 | |||
| 203 | /* CDB for no-op command */ | ||
| 204 | struct fsl_re_noop_cdb { | ||
| 205 | __be32 cdb32; | ||
| 206 | }; | ||
| 207 | |||
| 208 | /* | ||
| 209 | * CDB for GenQQ command. In RAID Engine terminology, P/Q is | ||
| 210 | * done through this command | ||
| 211 | */ | ||
| 212 | struct fsl_re_pq_cdb { | ||
| 213 | __be32 cdb32; | ||
| 214 | u8 gfm_q1[16]; | ||
| 215 | u8 gfm_q2[16]; | ||
| 216 | struct fsl_re_dpi dpi_dest_spec[2]; | ||
| 217 | struct fsl_re_dpi dpi_src_spec[16]; | ||
| 218 | }; | ||
| 219 | |||
| 220 | /* Compound frame */ | ||
| 221 | #define FSL_RE_CF_ADDR_HIGH_MASK 0x000000FF | ||
| 222 | #define FSL_RE_CF_EXT_MASK 0x80000000 | ||
| 223 | #define FSL_RE_CF_EXT_SHIFT 31 | ||
| 224 | #define FSL_RE_CF_FINAL_MASK 0x40000000 | ||
| 225 | #define FSL_RE_CF_FINAL_SHIFT 30 | ||
| 226 | #define FSL_RE_CF_LENGTH_MASK 0x000FFFFF | ||
| 227 | #define FSL_RE_CF_BPID_MASK 0x00FF0000 | ||
| 228 | #define FSL_RE_CF_BPID_SHIFT 16 | ||
| 229 | #define FSL_RE_CF_OFFSET_MASK 0x00001FFF | ||
| 230 | |||
| 231 | struct fsl_re_cmpnd_frame { | ||
| 232 | __be32 addr_high; | ||
| 233 | __be32 addr_low; | ||
| 234 | __be32 efrl32; | ||
| 235 | __be32 rbro32; | ||
| 236 | }; | ||
| 237 | |||
| 238 | /* Frame descriptor */ | ||
| 239 | #define FSL_RE_HWDESC_LIODN_MASK 0x3F000000 | ||
| 240 | #define FSL_RE_HWDESC_LIODN_SHIFT 24 | ||
| 241 | #define FSL_RE_HWDESC_BPID_MASK 0x00FF0000 | ||
| 242 | #define FSL_RE_HWDESC_BPID_SHIFT 16 | ||
| 243 | #define FSL_RE_HWDESC_ELIODN_MASK 0x0000F000 | ||
| 244 | #define FSL_RE_HWDESC_ELIODN_SHIFT 12 | ||
| 245 | #define FSL_RE_HWDESC_FMT_SHIFT 29 | ||
| 246 | #define FSL_RE_HWDESC_FMT_MASK (0x3 << FSL_RE_HWDESC_FMT_SHIFT) | ||
| 247 | |||
| 248 | struct fsl_re_hw_desc { | ||
| 249 | __be32 lbea32; | ||
| 250 | __be32 addr_low; | ||
| 251 | __be32 fmt32; | ||
| 252 | __be32 status; | ||
| 253 | }; | ||
| 254 | |||
| 255 | /* Raid Engine device private data */ | ||
| 256 | struct fsl_re_drv_private { | ||
| 257 | u8 total_chans; | ||
| 258 | struct dma_device dma_dev; | ||
| 259 | struct fsl_re_ctrl *re_regs; | ||
| 260 | struct fsl_re_chan *re_jrs[FSL_RE_MAX_CHANS]; | ||
| 261 | struct dma_pool *cf_desc_pool; | ||
| 262 | struct dma_pool *hw_desc_pool; | ||
| 263 | }; | ||
| 264 | |||
| 265 | /* Per job ring data structure */ | ||
| 266 | struct fsl_re_chan { | ||
| 267 | char name[16]; | ||
| 268 | spinlock_t desc_lock; /* queue lock */ | ||
| 269 | struct list_head ack_q; /* wait to acked queue */ | ||
| 270 | struct list_head active_q; /* already issued on hw, not completed */ | ||
| 271 | struct list_head submit_q; | ||
| 272 | struct list_head free_q; /* alloc available queue */ | ||
| 273 | struct device *dev; | ||
| 274 | struct fsl_re_drv_private *re_dev; | ||
| 275 | struct dma_chan chan; | ||
| 276 | struct fsl_re_chan_cfg *jrregs; | ||
| 277 | int irq; | ||
| 278 | struct tasklet_struct irqtask; | ||
| 279 | u32 alloc_count; | ||
| 280 | |||
| 281 | /* hw descriptor ring for inbound queue*/ | ||
| 282 | dma_addr_t inb_phys_addr; | ||
| 283 | struct fsl_re_hw_desc *inb_ring_virt_addr; | ||
| 284 | u32 inb_count; | ||
| 285 | |||
| 286 | /* hw descriptor ring for outbound queue */ | ||
| 287 | dma_addr_t oub_phys_addr; | ||
| 288 | struct fsl_re_hw_desc *oub_ring_virt_addr; | ||
| 289 | u32 oub_count; | ||
| 290 | }; | ||
| 291 | |||
| 292 | /* Async transaction descriptor */ | ||
| 293 | struct fsl_re_desc { | ||
| 294 | struct dma_async_tx_descriptor async_tx; | ||
| 295 | struct list_head node; | ||
| 296 | struct fsl_re_hw_desc hwdesc; | ||
| 297 | struct fsl_re_chan *re_chan; | ||
| 298 | |||
| 299 | /* hwdesc will point to cf_addr */ | ||
| 300 | void *cf_addr; | ||
| 301 | dma_addr_t cf_paddr; | ||
| 302 | |||
| 303 | void *cdb_addr; | ||
| 304 | dma_addr_t cdb_paddr; | ||
| 305 | int status; | ||
| 306 | }; | ||
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c index ed045a9ad634..9ca56830cc63 100644 --- a/drivers/dma/img-mdc-dma.c +++ b/drivers/dma/img-mdc-dma.c | |||
| @@ -689,11 +689,6 @@ static int mdc_slave_config(struct dma_chan *chan, | |||
| 689 | return 0; | 689 | return 0; |
| 690 | } | 690 | } |
| 691 | 691 | ||
| 692 | static int mdc_alloc_chan_resources(struct dma_chan *chan) | ||
| 693 | { | ||
| 694 | return 0; | ||
| 695 | } | ||
| 696 | |||
| 697 | static void mdc_free_chan_resources(struct dma_chan *chan) | 692 | static void mdc_free_chan_resources(struct dma_chan *chan) |
| 698 | { | 693 | { |
| 699 | struct mdc_chan *mchan = to_mdc_chan(chan); | 694 | struct mdc_chan *mchan = to_mdc_chan(chan); |
| @@ -910,7 +905,6 @@ static int mdc_dma_probe(struct platform_device *pdev) | |||
| 910 | mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg; | 905 | mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg; |
| 911 | mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic; | 906 | mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic; |
| 912 | mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy; | 907 | mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy; |
| 913 | mdma->dma_dev.device_alloc_chan_resources = mdc_alloc_chan_resources; | ||
| 914 | mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources; | 908 | mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources; |
| 915 | mdma->dma_dev.device_tx_status = mdc_tx_status; | 909 | mdma->dma_dev.device_tx_status = mdc_tx_status; |
| 916 | mdma->dma_dev.device_issue_pending = mdc_issue_pending; | 910 | mdma->dma_dev.device_issue_pending = mdc_issue_pending; |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 66a0efb9651d..62bbd79338e0 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
| @@ -1260,6 +1260,7 @@ static void sdma_issue_pending(struct dma_chan *chan) | |||
| 1260 | 1260 | ||
| 1261 | #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 | 1261 | #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 |
| 1262 | #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38 | 1262 | #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38 |
| 1263 | #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 41 | ||
| 1263 | 1264 | ||
| 1264 | static void sdma_add_scripts(struct sdma_engine *sdma, | 1265 | static void sdma_add_scripts(struct sdma_engine *sdma, |
| 1265 | const struct sdma_script_start_addrs *addr) | 1266 | const struct sdma_script_start_addrs *addr) |
| @@ -1306,6 +1307,9 @@ static void sdma_load_firmware(const struct firmware *fw, void *context) | |||
| 1306 | case 2: | 1307 | case 2: |
| 1307 | sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2; | 1308 | sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2; |
| 1308 | break; | 1309 | break; |
| 1310 | case 3: | ||
| 1311 | sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3; | ||
| 1312 | break; | ||
| 1309 | default: | 1313 | default: |
| 1310 | dev_err(sdma->dev, "unknown firmware version\n"); | 1314 | dev_err(sdma->dev, "unknown firmware version\n"); |
| 1311 | goto err_firmware; | 1315 | goto err_firmware; |
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c index 3b55bb8d969a..ea1e107ae884 100644 --- a/drivers/dma/ioat/dca.c +++ b/drivers/dma/ioat/dca.c | |||
| @@ -11,10 +11,6 @@ | |||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. | 12 | * more details. |
| 13 | * | 13 | * |
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
| 16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 17 | * | ||
| 18 | * The full GNU General Public License is included in this distribution in | 14 | * The full GNU General Public License is included in this distribution in |
| 19 | * the file called "COPYING". | 15 | * the file called "COPYING". |
| 20 | * | 16 | * |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 940c1502a8b5..ee0aa9f4ccfa 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
| @@ -11,10 +11,6 @@ | |||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. | 12 | * more details. |
| 13 | * | 13 | * |
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
| 16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 17 | * | ||
| 18 | * The full GNU General Public License is included in this distribution in | 14 | * The full GNU General Public License is included in this distribution in |
| 19 | * the file called "COPYING". | 15 | * the file called "COPYING". |
| 20 | * | 16 | * |
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index d63f68b1aa35..30f5c7eede16 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
| @@ -11,10 +11,6 @@ | |||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. | 12 | * more details. |
| 13 | * | 13 | * |
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 17 | * | ||
| 18 | * The full GNU General Public License is included in this distribution in the | 14 | * The full GNU General Public License is included in this distribution in the |
| 19 | * file called COPYING. | 15 | * file called COPYING. |
| 20 | */ | 16 | */ |
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 695483e6be32..69c7dfcad023 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c | |||
| @@ -11,10 +11,6 @@ | |||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. | 12 | * more details. |
| 13 | * | 13 | * |
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
| 16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 17 | * | ||
| 18 | * The full GNU General Public License is included in this distribution in | 14 | * The full GNU General Public License is included in this distribution in |
| 19 | * the file called "COPYING". | 15 | * the file called "COPYING". |
| 20 | * | 16 | * |
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index 470292767e68..bf24ebe874b0 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h | |||
| @@ -11,10 +11,6 @@ | |||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. | 12 | * more details. |
| 13 | * | 13 | * |
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 17 | * | ||
| 18 | * The full GNU General Public License is included in this distribution in the | 14 | * The full GNU General Public License is included in this distribution in the |
| 19 | * file called COPYING. | 15 | * file called COPYING. |
| 20 | */ | 16 | */ |
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 194ec20c9408..64790a45ef5d 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
| @@ -15,10 +15,6 @@ | |||
| 15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 16 | * more details. | 16 | * more details. |
| 17 | * | 17 | * |
| 18 | * You should have received a copy of the GNU General Public License along with | ||
| 19 | * this program; if not, write to the Free Software Foundation, Inc., | ||
| 20 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 21 | * | ||
| 22 | * The full GNU General Public License is included in this distribution in | 18 | * The full GNU General Public License is included in this distribution in |
| 23 | * the file called "COPYING". | 19 | * the file called "COPYING". |
| 24 | * | 20 | * |
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index 02177ecf09f8..a3e731edce57 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h | |||
| @@ -11,10 +11,6 @@ | |||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. | 12 | * more details. |
| 13 | * | 13 | * |
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 17 | * | ||
| 18 | * The full GNU General Public License is included in this distribution in the | 14 | * The full GNU General Public License is included in this distribution in the |
| 19 | * file called COPYING. | 15 | * file called COPYING. |
| 20 | */ | 16 | */ |
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 5501eb072d69..76f0dc688a19 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c | |||
| @@ -11,10 +11,6 @@ | |||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. | 12 | * more details. |
| 13 | * | 13 | * |
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
| 16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 17 | * | ||
| 18 | * The full GNU General Public License is included in this distribution in | 14 | * The full GNU General Public License is included in this distribution in |
| 19 | * the file called "COPYING". | 15 | * the file called "COPYING". |
| 20 | * | 16 | * |
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index 2f1cfa0f1f47..909352f74c89 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h | |||
| @@ -11,10 +11,6 @@ | |||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. | 12 | * more details. |
| 13 | * | 13 | * |
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 17 | * | ||
| 18 | * The full GNU General Public License is included in this distribution in the | 14 | * The full GNU General Public License is included in this distribution in the |
| 19 | * file called COPYING. | 15 | * file called COPYING. |
| 20 | */ | 16 | */ |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 263d9f6a207e..998826854fdd 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
| @@ -11,10 +11,6 @@ | |||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. | 12 | * more details. |
| 13 | * | 13 | * |
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
| 16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 17 | * | ||
| 18 | */ | 14 | */ |
| 19 | 15 | ||
| 20 | /* | 16 | /* |
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 6f7f43529ccb..647e362f01fd 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c | |||
| @@ -313,11 +313,6 @@ static void k3_dma_tasklet(unsigned long arg) | |||
| 313 | } | 313 | } |
| 314 | } | 314 | } |
| 315 | 315 | ||
| 316 | static int k3_dma_alloc_chan_resources(struct dma_chan *chan) | ||
| 317 | { | ||
| 318 | return 0; | ||
| 319 | } | ||
| 320 | |||
| 321 | static void k3_dma_free_chan_resources(struct dma_chan *chan) | 316 | static void k3_dma_free_chan_resources(struct dma_chan *chan) |
| 322 | { | 317 | { |
| 323 | struct k3_dma_chan *c = to_k3_chan(chan); | 318 | struct k3_dma_chan *c = to_k3_chan(chan); |
| @@ -654,7 +649,7 @@ static void k3_dma_free_desc(struct virt_dma_desc *vd) | |||
| 654 | kfree(ds); | 649 | kfree(ds); |
| 655 | } | 650 | } |
| 656 | 651 | ||
| 657 | static struct of_device_id k3_pdma_dt_ids[] = { | 652 | static const struct of_device_id k3_pdma_dt_ids[] = { |
| 658 | { .compatible = "hisilicon,k3-dma-1.0", }, | 653 | { .compatible = "hisilicon,k3-dma-1.0", }, |
| 659 | {} | 654 | {} |
| 660 | }; | 655 | }; |
| @@ -728,7 +723,6 @@ static int k3_dma_probe(struct platform_device *op) | |||
| 728 | dma_cap_set(DMA_SLAVE, d->slave.cap_mask); | 723 | dma_cap_set(DMA_SLAVE, d->slave.cap_mask); |
| 729 | dma_cap_set(DMA_MEMCPY, d->slave.cap_mask); | 724 | dma_cap_set(DMA_MEMCPY, d->slave.cap_mask); |
| 730 | d->slave.dev = &op->dev; | 725 | d->slave.dev = &op->dev; |
| 731 | d->slave.device_alloc_chan_resources = k3_dma_alloc_chan_resources; | ||
| 732 | d->slave.device_free_chan_resources = k3_dma_free_chan_resources; | 726 | d->slave.device_free_chan_resources = k3_dma_free_chan_resources; |
| 733 | d->slave.device_tx_status = k3_dma_tx_status; | 727 | d->slave.device_tx_status = k3_dma_tx_status; |
| 734 | d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy; | 728 | d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy; |
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index eb410044e1af..462a0229a743 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c | |||
| @@ -973,7 +973,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq) | |||
| 973 | return 0; | 973 | return 0; |
| 974 | } | 974 | } |
| 975 | 975 | ||
| 976 | static struct of_device_id mmp_pdma_dt_ids[] = { | 976 | static const struct of_device_id mmp_pdma_dt_ids[] = { |
| 977 | { .compatible = "marvell,pdma-1.0", }, | 977 | { .compatible = "marvell,pdma-1.0", }, |
| 978 | {} | 978 | {} |
| 979 | }; | 979 | }; |
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index b6f4e1fc9c78..449e785def17 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c | |||
| @@ -613,7 +613,7 @@ struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec, | |||
| 613 | return dma_request_channel(mask, mmp_tdma_filter_fn, ¶m); | 613 | return dma_request_channel(mask, mmp_tdma_filter_fn, ¶m); |
| 614 | } | 614 | } |
| 615 | 615 | ||
| 616 | static struct of_device_id mmp_tdma_dt_ids[] = { | 616 | static const struct of_device_id mmp_tdma_dt_ids[] = { |
| 617 | { .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA}, | 617 | { .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA}, |
| 618 | { .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU}, | 618 | { .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU}, |
| 619 | {} | 619 | {} |
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 57d2457545f3..e6281e7aa46e 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c | |||
| @@ -21,10 +21,6 @@ | |||
| 21 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 21 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 22 | * more details. | 22 | * more details. |
| 23 | * | 23 | * |
| 24 | * You should have received a copy of the GNU General Public License along with | ||
| 25 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 26 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 27 | * | ||
| 28 | * The full GNU General Public License is included in this distribution in the | 24 | * The full GNU General Public License is included in this distribution in the |
| 29 | * file called COPYING. | 25 | * file called COPYING. |
| 30 | */ | 26 | */ |
| @@ -1072,7 +1068,7 @@ static int mpc_dma_remove(struct platform_device *op) | |||
| 1072 | return 0; | 1068 | return 0; |
| 1073 | } | 1069 | } |
| 1074 | 1070 | ||
| 1075 | static struct of_device_id mpc_dma_match[] = { | 1071 | static const struct of_device_id mpc_dma_match[] = { |
| 1076 | { .compatible = "fsl,mpc5121-dma", }, | 1072 | { .compatible = "fsl,mpc5121-dma", }, |
| 1077 | { .compatible = "fsl,mpc8308-dma", }, | 1073 | { .compatible = "fsl,mpc8308-dma", }, |
| 1078 | {}, | 1074 | {}, |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index b03e8137b918..1c56001df676 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
| @@ -10,10 +10,6 @@ | |||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. | 12 | * more details. |
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
| 16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 17 | */ | 13 | */ |
| 18 | 14 | ||
| 19 | #include <linux/init.h> | 15 | #include <linux/init.h> |
| @@ -1249,7 +1245,7 @@ static int mv_xor_remove(struct platform_device *pdev) | |||
| 1249 | } | 1245 | } |
| 1250 | 1246 | ||
| 1251 | #ifdef CONFIG_OF | 1247 | #ifdef CONFIG_OF |
| 1252 | static struct of_device_id mv_xor_dt_ids[] = { | 1248 | static const struct of_device_id mv_xor_dt_ids[] = { |
| 1253 | { .compatible = "marvell,orion-xor", }, | 1249 | { .compatible = "marvell,orion-xor", }, |
| 1254 | {}, | 1250 | {}, |
| 1255 | }; | 1251 | }; |
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index 78edc7e44569..91958dba39a2 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h | |||
| @@ -9,10 +9,6 @@ | |||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 11 | * for more details. | 11 | * for more details. |
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License | ||
| 14 | * along with this program; if not, write to the Free Software Foundation, | ||
| 15 | * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
| 16 | */ | 12 | */ |
| 17 | 13 | ||
| 18 | #ifndef MV_XOR_H | 14 | #ifndef MV_XOR_H |
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 35c143cb88da..b859792dde95 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
| @@ -949,6 +949,7 @@ err_free_res: | |||
| 949 | err_disable_pdev: | 949 | err_disable_pdev: |
| 950 | pci_disable_device(pdev); | 950 | pci_disable_device(pdev); |
| 951 | err_free_mem: | 951 | err_free_mem: |
| 952 | kfree(pd); | ||
| 952 | return err; | 953 | return err; |
| 953 | } | 954 | } |
| 954 | 955 | ||
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 0e1f56772855..a7d9d3029b14 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
| @@ -556,7 +556,7 @@ static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[], | |||
| 556 | 556 | ||
| 557 | buf[0] = CMD_DMAADDH; | 557 | buf[0] = CMD_DMAADDH; |
| 558 | buf[0] |= (da << 1); | 558 | buf[0] |= (da << 1); |
| 559 | *((u16 *)&buf[1]) = val; | 559 | *((__le16 *)&buf[1]) = cpu_to_le16(val); |
| 560 | 560 | ||
| 561 | PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n", | 561 | PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n", |
| 562 | da == 1 ? "DA" : "SA", val); | 562 | da == 1 ? "DA" : "SA", val); |
| @@ -710,7 +710,7 @@ static inline u32 _emit_MOV(unsigned dry_run, u8 buf[], | |||
| 710 | 710 | ||
| 711 | buf[0] = CMD_DMAMOV; | 711 | buf[0] = CMD_DMAMOV; |
| 712 | buf[1] = dst; | 712 | buf[1] = dst; |
| 713 | *((u32 *)&buf[2]) = val; | 713 | *((__le32 *)&buf[2]) = cpu_to_le32(val); |
| 714 | 714 | ||
| 715 | PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n", | 715 | PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n", |
| 716 | dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val); | 716 | dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val); |
| @@ -888,7 +888,7 @@ static inline u32 _emit_GO(unsigned dry_run, u8 buf[], | |||
| 888 | 888 | ||
| 889 | buf[1] = chan & 0x7; | 889 | buf[1] = chan & 0x7; |
| 890 | 890 | ||
| 891 | *((u32 *)&buf[2]) = addr; | 891 | *((__le32 *)&buf[2]) = cpu_to_le32(addr); |
| 892 | 892 | ||
| 893 | return SZ_DMAGO; | 893 | return SZ_DMAGO; |
| 894 | } | 894 | } |
| @@ -928,7 +928,7 @@ static inline void _execute_DBGINSN(struct pl330_thread *thrd, | |||
| 928 | } | 928 | } |
| 929 | writel(val, regs + DBGINST0); | 929 | writel(val, regs + DBGINST0); |
| 930 | 930 | ||
| 931 | val = *((u32 *)&insn[2]); | 931 | val = le32_to_cpu(*((__le32 *)&insn[2])); |
| 932 | writel(val, regs + DBGINST1); | 932 | writel(val, regs + DBGINST1); |
| 933 | 933 | ||
| 934 | /* If timed out due to halted state-machine */ | 934 | /* If timed out due to halted state-machine */ |
| @@ -2162,7 +2162,7 @@ static int pl330_terminate_all(struct dma_chan *chan) | |||
| 2162 | * DMA transfer again. This pause feature was implemented to | 2162 | * DMA transfer again. This pause feature was implemented to |
| 2163 | * allow safely read residue before channel termination. | 2163 | * allow safely read residue before channel termination. |
| 2164 | */ | 2164 | */ |
| 2165 | int pl330_pause(struct dma_chan *chan) | 2165 | static int pl330_pause(struct dma_chan *chan) |
| 2166 | { | 2166 | { |
| 2167 | struct dma_pl330_chan *pch = to_pchan(chan); | 2167 | struct dma_pl330_chan *pch = to_pchan(chan); |
| 2168 | struct pl330_dmac *pl330 = pch->dmac; | 2168 | struct pl330_dmac *pl330 = pch->dmac; |
| @@ -2203,8 +2203,8 @@ static void pl330_free_chan_resources(struct dma_chan *chan) | |||
| 2203 | pm_runtime_put_autosuspend(pch->dmac->ddma.dev); | 2203 | pm_runtime_put_autosuspend(pch->dmac->ddma.dev); |
| 2204 | } | 2204 | } |
| 2205 | 2205 | ||
| 2206 | int pl330_get_current_xferred_count(struct dma_pl330_chan *pch, | 2206 | static int pl330_get_current_xferred_count(struct dma_pl330_chan *pch, |
| 2207 | struct dma_pl330_desc *desc) | 2207 | struct dma_pl330_desc *desc) |
| 2208 | { | 2208 | { |
| 2209 | struct pl330_thread *thrd = pch->thread; | 2209 | struct pl330_thread *thrd = pch->thread; |
| 2210 | struct pl330_dmac *pl330 = pch->dmac; | 2210 | struct pl330_dmac *pl330 = pch->dmac; |
| @@ -2259,7 +2259,17 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
| 2259 | transferred = 0; | 2259 | transferred = 0; |
| 2260 | residual += desc->bytes_requested - transferred; | 2260 | residual += desc->bytes_requested - transferred; |
| 2261 | if (desc->txd.cookie == cookie) { | 2261 | if (desc->txd.cookie == cookie) { |
| 2262 | ret = desc->status; | 2262 | switch (desc->status) { |
| 2263 | case DONE: | ||
| 2264 | ret = DMA_COMPLETE; | ||
| 2265 | break; | ||
| 2266 | case PREP: | ||
| 2267 | case BUSY: | ||
| 2268 | ret = DMA_IN_PROGRESS; | ||
| 2269 | break; | ||
| 2270 | default: | ||
| 2271 | WARN_ON(1); | ||
| 2272 | } | ||
| 2263 | break; | 2273 | break; |
| 2264 | } | 2274 | } |
| 2265 | if (desc->last) | 2275 | if (desc->last) |
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index fa764a39cd36..9217f893b0d1 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c | |||
| @@ -16,10 +16,6 @@ | |||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 17 | * more details. | 17 | * more details. |
| 18 | * | 18 | * |
| 19 | * You should have received a copy of the GNU General Public License along with | ||
| 20 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 21 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 22 | * | ||
| 23 | * The full GNU General Public License is included in this distribution in the | 19 | * The full GNU General Public License is included in this distribution in the |
| 24 | * file called COPYING. | 20 | * file called COPYING. |
| 25 | */ | 21 | */ |
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c index 9c914d625906..5a250cdc8376 100644 --- a/drivers/dma/qcom_bam_dma.c +++ b/drivers/dma/qcom_bam_dma.c | |||
| @@ -171,6 +171,35 @@ static const struct reg_offset_data bam_v1_4_reg_info[] = { | |||
| 171 | [BAM_P_FIFO_SIZES] = { 0x1820, 0x00, 0x1000, 0x00 }, | 171 | [BAM_P_FIFO_SIZES] = { 0x1820, 0x00, 0x1000, 0x00 }, |
| 172 | }; | 172 | }; |
| 173 | 173 | ||
| 174 | static const struct reg_offset_data bam_v1_7_reg_info[] = { | ||
| 175 | [BAM_CTRL] = { 0x00000, 0x00, 0x00, 0x00 }, | ||
| 176 | [BAM_REVISION] = { 0x01000, 0x00, 0x00, 0x00 }, | ||
| 177 | [BAM_NUM_PIPES] = { 0x01008, 0x00, 0x00, 0x00 }, | ||
| 178 | [BAM_DESC_CNT_TRSHLD] = { 0x00008, 0x00, 0x00, 0x00 }, | ||
| 179 | [BAM_IRQ_SRCS] = { 0x03010, 0x00, 0x00, 0x00 }, | ||
| 180 | [BAM_IRQ_SRCS_MSK] = { 0x03014, 0x00, 0x00, 0x00 }, | ||
| 181 | [BAM_IRQ_SRCS_UNMASKED] = { 0x03018, 0x00, 0x00, 0x00 }, | ||
| 182 | [BAM_IRQ_STTS] = { 0x00014, 0x00, 0x00, 0x00 }, | ||
| 183 | [BAM_IRQ_CLR] = { 0x00018, 0x00, 0x00, 0x00 }, | ||
| 184 | [BAM_IRQ_EN] = { 0x0001C, 0x00, 0x00, 0x00 }, | ||
| 185 | [BAM_CNFG_BITS] = { 0x0007C, 0x00, 0x00, 0x00 }, | ||
| 186 | [BAM_IRQ_SRCS_EE] = { 0x03000, 0x00, 0x00, 0x1000 }, | ||
| 187 | [BAM_IRQ_SRCS_MSK_EE] = { 0x03004, 0x00, 0x00, 0x1000 }, | ||
| 188 | [BAM_P_CTRL] = { 0x13000, 0x1000, 0x00, 0x00 }, | ||
| 189 | [BAM_P_RST] = { 0x13004, 0x1000, 0x00, 0x00 }, | ||
| 190 | [BAM_P_HALT] = { 0x13008, 0x1000, 0x00, 0x00 }, | ||
| 191 | [BAM_P_IRQ_STTS] = { 0x13010, 0x1000, 0x00, 0x00 }, | ||
| 192 | [BAM_P_IRQ_CLR] = { 0x13014, 0x1000, 0x00, 0x00 }, | ||
| 193 | [BAM_P_IRQ_EN] = { 0x13018, 0x1000, 0x00, 0x00 }, | ||
| 194 | [BAM_P_EVNT_DEST_ADDR] = { 0x1382C, 0x00, 0x1000, 0x00 }, | ||
| 195 | [BAM_P_EVNT_REG] = { 0x13818, 0x00, 0x1000, 0x00 }, | ||
| 196 | [BAM_P_SW_OFSTS] = { 0x13800, 0x00, 0x1000, 0x00 }, | ||
| 197 | [BAM_P_DATA_FIFO_ADDR] = { 0x13824, 0x00, 0x1000, 0x00 }, | ||
| 198 | [BAM_P_DESC_FIFO_ADDR] = { 0x1381C, 0x00, 0x1000, 0x00 }, | ||
| 199 | [BAM_P_EVNT_GEN_TRSHLD] = { 0x13828, 0x00, 0x1000, 0x00 }, | ||
| 200 | [BAM_P_FIFO_SIZES] = { 0x13820, 0x00, 0x1000, 0x00 }, | ||
| 201 | }; | ||
| 202 | |||
| 174 | /* BAM CTRL */ | 203 | /* BAM CTRL */ |
| 175 | #define BAM_SW_RST BIT(0) | 204 | #define BAM_SW_RST BIT(0) |
| 176 | #define BAM_EN BIT(1) | 205 | #define BAM_EN BIT(1) |
| @@ -1051,6 +1080,7 @@ static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan, | |||
| 1051 | static const struct of_device_id bam_of_match[] = { | 1080 | static const struct of_device_id bam_of_match[] = { |
| 1052 | { .compatible = "qcom,bam-v1.3.0", .data = &bam_v1_3_reg_info }, | 1081 | { .compatible = "qcom,bam-v1.3.0", .data = &bam_v1_3_reg_info }, |
| 1053 | { .compatible = "qcom,bam-v1.4.0", .data = &bam_v1_4_reg_info }, | 1082 | { .compatible = "qcom,bam-v1.4.0", .data = &bam_v1_4_reg_info }, |
| 1083 | { .compatible = "qcom,bam-v1.7.0", .data = &bam_v1_7_reg_info }, | ||
| 1054 | {} | 1084 | {} |
| 1055 | }; | 1085 | }; |
| 1056 | 1086 | ||
| @@ -1113,7 +1143,7 @@ static int bam_dma_probe(struct platform_device *pdev) | |||
| 1113 | 1143 | ||
| 1114 | if (!bdev->channels) { | 1144 | if (!bdev->channels) { |
| 1115 | ret = -ENOMEM; | 1145 | ret = -ENOMEM; |
| 1116 | goto err_disable_clk; | 1146 | goto err_tasklet_kill; |
| 1117 | } | 1147 | } |
| 1118 | 1148 | ||
| 1119 | /* allocate and initialize channels */ | 1149 | /* allocate and initialize channels */ |
| @@ -1125,7 +1155,7 @@ static int bam_dma_probe(struct platform_device *pdev) | |||
| 1125 | ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq, | 1155 | ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq, |
| 1126 | IRQF_TRIGGER_HIGH, "bam_dma", bdev); | 1156 | IRQF_TRIGGER_HIGH, "bam_dma", bdev); |
| 1127 | if (ret) | 1157 | if (ret) |
| 1128 | goto err_disable_clk; | 1158 | goto err_bam_channel_exit; |
| 1129 | 1159 | ||
| 1130 | /* set max dma segment size */ | 1160 | /* set max dma segment size */ |
| 1131 | bdev->common.dev = bdev->dev; | 1161 | bdev->common.dev = bdev->dev; |
| @@ -1133,7 +1163,7 @@ static int bam_dma_probe(struct platform_device *pdev) | |||
| 1133 | ret = dma_set_max_seg_size(bdev->common.dev, BAM_MAX_DATA_SIZE); | 1163 | ret = dma_set_max_seg_size(bdev->common.dev, BAM_MAX_DATA_SIZE); |
| 1134 | if (ret) { | 1164 | if (ret) { |
| 1135 | dev_err(bdev->dev, "cannot set maximum segment size\n"); | 1165 | dev_err(bdev->dev, "cannot set maximum segment size\n"); |
| 1136 | goto err_disable_clk; | 1166 | goto err_bam_channel_exit; |
| 1137 | } | 1167 | } |
| 1138 | 1168 | ||
| 1139 | platform_set_drvdata(pdev, bdev); | 1169 | platform_set_drvdata(pdev, bdev); |
| @@ -1161,7 +1191,7 @@ static int bam_dma_probe(struct platform_device *pdev) | |||
| 1161 | ret = dma_async_device_register(&bdev->common); | 1191 | ret = dma_async_device_register(&bdev->common); |
| 1162 | if (ret) { | 1192 | if (ret) { |
| 1163 | dev_err(bdev->dev, "failed to register dma async device\n"); | 1193 | dev_err(bdev->dev, "failed to register dma async device\n"); |
| 1164 | goto err_disable_clk; | 1194 | goto err_bam_channel_exit; |
| 1165 | } | 1195 | } |
| 1166 | 1196 | ||
| 1167 | ret = of_dma_controller_register(pdev->dev.of_node, bam_dma_xlate, | 1197 | ret = of_dma_controller_register(pdev->dev.of_node, bam_dma_xlate, |
| @@ -1173,8 +1203,14 @@ static int bam_dma_probe(struct platform_device *pdev) | |||
| 1173 | 1203 | ||
| 1174 | err_unregister_dma: | 1204 | err_unregister_dma: |
| 1175 | dma_async_device_unregister(&bdev->common); | 1205 | dma_async_device_unregister(&bdev->common); |
| 1206 | err_bam_channel_exit: | ||
| 1207 | for (i = 0; i < bdev->num_channels; i++) | ||
| 1208 | tasklet_kill(&bdev->channels[i].vc.task); | ||
| 1209 | err_tasklet_kill: | ||
| 1210 | tasklet_kill(&bdev->task); | ||
| 1176 | err_disable_clk: | 1211 | err_disable_clk: |
| 1177 | clk_disable_unprepare(bdev->bamclk); | 1212 | clk_disable_unprepare(bdev->bamclk); |
| 1213 | |||
| 1178 | return ret; | 1214 | return ret; |
| 1179 | } | 1215 | } |
| 1180 | 1216 | ||
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index 2f91da3db836..01dcaf21b988 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c | |||
| @@ -749,11 +749,6 @@ unlock: | |||
| 749 | return ret; | 749 | return ret; |
| 750 | } | 750 | } |
| 751 | 751 | ||
| 752 | static int s3c24xx_dma_alloc_chan_resources(struct dma_chan *chan) | ||
| 753 | { | ||
| 754 | return 0; | ||
| 755 | } | ||
| 756 | |||
| 757 | static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan) | 752 | static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan) |
| 758 | { | 753 | { |
| 759 | /* Ensure all queued descriptors are freed */ | 754 | /* Ensure all queued descriptors are freed */ |
| @@ -1238,7 +1233,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev) | |||
| 1238 | if (!s3cdma->phy_chans) | 1233 | if (!s3cdma->phy_chans) |
| 1239 | return -ENOMEM; | 1234 | return -ENOMEM; |
| 1240 | 1235 | ||
| 1241 | /* aquire irqs and clocks for all physical channels */ | 1236 | /* acquire irqs and clocks for all physical channels */ |
| 1242 | for (i = 0; i < pdata->num_phy_channels; i++) { | 1237 | for (i = 0; i < pdata->num_phy_channels; i++) { |
| 1243 | struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; | 1238 | struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; |
| 1244 | char clk_name[6]; | 1239 | char clk_name[6]; |
| @@ -1266,7 +1261,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev) | |||
| 1266 | sprintf(clk_name, "dma.%d", i); | 1261 | sprintf(clk_name, "dma.%d", i); |
| 1267 | phy->clk = devm_clk_get(&pdev->dev, clk_name); | 1262 | phy->clk = devm_clk_get(&pdev->dev, clk_name); |
| 1268 | if (IS_ERR(phy->clk) && sdata->has_clocks) { | 1263 | if (IS_ERR(phy->clk) && sdata->has_clocks) { |
| 1269 | dev_err(&pdev->dev, "unable to aquire clock for channel %d, error %lu", | 1264 | dev_err(&pdev->dev, "unable to acquire clock for channel %d, error %lu\n", |
| 1270 | i, PTR_ERR(phy->clk)); | 1265 | i, PTR_ERR(phy->clk)); |
| 1271 | continue; | 1266 | continue; |
| 1272 | } | 1267 | } |
| @@ -1290,8 +1285,6 @@ static int s3c24xx_dma_probe(struct platform_device *pdev) | |||
| 1290 | dma_cap_set(DMA_MEMCPY, s3cdma->memcpy.cap_mask); | 1285 | dma_cap_set(DMA_MEMCPY, s3cdma->memcpy.cap_mask); |
| 1291 | dma_cap_set(DMA_PRIVATE, s3cdma->memcpy.cap_mask); | 1286 | dma_cap_set(DMA_PRIVATE, s3cdma->memcpy.cap_mask); |
| 1292 | s3cdma->memcpy.dev = &pdev->dev; | 1287 | s3cdma->memcpy.dev = &pdev->dev; |
| 1293 | s3cdma->memcpy.device_alloc_chan_resources = | ||
| 1294 | s3c24xx_dma_alloc_chan_resources; | ||
| 1295 | s3cdma->memcpy.device_free_chan_resources = | 1288 | s3cdma->memcpy.device_free_chan_resources = |
| 1296 | s3c24xx_dma_free_chan_resources; | 1289 | s3c24xx_dma_free_chan_resources; |
| 1297 | s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy; | 1290 | s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy; |
| @@ -1305,8 +1298,6 @@ static int s3c24xx_dma_probe(struct platform_device *pdev) | |||
| 1305 | dma_cap_set(DMA_CYCLIC, s3cdma->slave.cap_mask); | 1298 | dma_cap_set(DMA_CYCLIC, s3cdma->slave.cap_mask); |
| 1306 | dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask); | 1299 | dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask); |
| 1307 | s3cdma->slave.dev = &pdev->dev; | 1300 | s3cdma->slave.dev = &pdev->dev; |
| 1308 | s3cdma->slave.device_alloc_chan_resources = | ||
| 1309 | s3c24xx_dma_alloc_chan_resources; | ||
| 1310 | s3cdma->slave.device_free_chan_resources = | 1301 | s3cdma->slave.device_free_chan_resources = |
| 1311 | s3c24xx_dma_free_chan_resources; | 1302 | s3c24xx_dma_free_chan_resources; |
| 1312 | s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status; | 1303 | s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status; |
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index 5adf5407a8cb..43db255050d2 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c | |||
| @@ -389,11 +389,6 @@ static void sa11x0_dma_tasklet(unsigned long arg) | |||
| 389 | } | 389 | } |
| 390 | 390 | ||
| 391 | 391 | ||
| 392 | static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan) | ||
| 393 | { | ||
| 394 | return 0; | ||
| 395 | } | ||
| 396 | |||
| 397 | static void sa11x0_dma_free_chan_resources(struct dma_chan *chan) | 392 | static void sa11x0_dma_free_chan_resources(struct dma_chan *chan) |
| 398 | { | 393 | { |
| 399 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); | 394 | struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); |
| @@ -835,7 +830,6 @@ static int sa11x0_dma_init_dmadev(struct dma_device *dmadev, | |||
| 835 | 830 | ||
| 836 | INIT_LIST_HEAD(&dmadev->channels); | 831 | INIT_LIST_HEAD(&dmadev->channels); |
| 837 | dmadev->dev = dev; | 832 | dmadev->dev = dev; |
| 838 | dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources; | ||
| 839 | dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources; | 833 | dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources; |
| 840 | dmadev->device_config = sa11x0_dma_device_config; | 834 | dmadev->device_config = sa11x0_dma_device_config; |
| 841 | dmadev->device_pause = sa11x0_dma_device_pause; | 835 | dmadev->device_pause = sa11x0_dma_device_pause; |
| @@ -948,6 +942,12 @@ static int sa11x0_dma_probe(struct platform_device *pdev) | |||
| 948 | dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); | 942 | dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); |
| 949 | d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg; | 943 | d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg; |
| 950 | d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic; | 944 | d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic; |
| 945 | d->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | ||
| 946 | d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
| 947 | d->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | ||
| 948 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES); | ||
| 949 | d->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | ||
| 950 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES); | ||
| 951 | ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev); | 951 | ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev); |
| 952 | if (ret) { | 952 | if (ret) { |
| 953 | dev_warn(d->slave.dev, "failed to register slave async device: %d\n", | 953 | dev_warn(d->slave.dev, "failed to register slave async device: %d\n", |
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index 8190ad225a1b..0f371524a4d9 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig | |||
| @@ -51,12 +51,6 @@ config RCAR_HPB_DMAE | |||
| 51 | help | 51 | help |
| 52 | Enable support for the Renesas R-Car series DMA controllers. | 52 | Enable support for the Renesas R-Car series DMA controllers. |
| 53 | 53 | ||
| 54 | config RCAR_AUDMAC_PP | ||
| 55 | tristate "Renesas R-Car Audio DMAC Peripheral Peripheral support" | ||
| 56 | depends on SH_DMAE_BASE | ||
| 57 | help | ||
| 58 | Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers. | ||
| 59 | |||
| 60 | config RCAR_DMAC | 54 | config RCAR_DMAC |
| 61 | tristate "Renesas R-Car Gen2 DMA Controller" | 55 | tristate "Renesas R-Car Gen2 DMA Controller" |
| 62 | depends on ARCH_SHMOBILE || COMPILE_TEST | 56 | depends on ARCH_SHMOBILE || COMPILE_TEST |
| @@ -64,3 +58,12 @@ config RCAR_DMAC | |||
| 64 | help | 58 | help |
| 65 | This driver supports the general purpose DMA controller found in the | 59 | This driver supports the general purpose DMA controller found in the |
| 66 | Renesas R-Car second generation SoCs. | 60 | Renesas R-Car second generation SoCs. |
| 61 | |||
| 62 | config RENESAS_USB_DMAC | ||
| 63 | tristate "Renesas USB-DMA Controller" | ||
| 64 | depends on ARCH_SHMOBILE || COMPILE_TEST | ||
| 65 | select RENESAS_DMA | ||
| 66 | select DMA_VIRTUAL_CHANNELS | ||
| 67 | help | ||
| 68 | This driver supports the USB-DMA controller found in the Renesas | ||
| 69 | SoCs. | ||
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile index 2852f9db61a4..b8a598066ce2 100644 --- a/drivers/dma/sh/Makefile +++ b/drivers/dma/sh/Makefile | |||
| @@ -15,5 +15,5 @@ obj-$(CONFIG_SH_DMAE) += shdma.o | |||
| 15 | 15 | ||
| 16 | obj-$(CONFIG_SUDMAC) += sudmac.o | 16 | obj-$(CONFIG_SUDMAC) += sudmac.o |
| 17 | obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o | 17 | obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o |
| 18 | obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o | ||
| 19 | obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o | 18 | obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o |
| 19 | obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o | ||
diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c deleted file mode 100644 index d95bbdd721f4..000000000000 --- a/drivers/dma/sh/rcar-audmapp.c +++ /dev/null | |||
| @@ -1,376 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * This is for Renesas R-Car Audio-DMAC-peri-peri. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2014 Renesas Electronics Corporation | ||
| 5 | * Copyright (C) 2014 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> | ||
| 6 | * | ||
| 7 | * based on the drivers/dma/sh/shdma.c | ||
| 8 | * | ||
| 9 | * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> | ||
| 10 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> | ||
| 11 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. | ||
| 12 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | ||
| 13 | * | ||
| 14 | * This is free software; you can redistribute it and/or modify | ||
| 15 | * it under the terms of the GNU General Public License as published by | ||
| 16 | * the Free Software Foundation; either version 2 of the License, or | ||
| 17 | * (at your option) any later version. | ||
| 18 | * | ||
| 19 | */ | ||
| 20 | #include <linux/delay.h> | ||
| 21 | #include <linux/init.h> | ||
| 22 | #include <linux/module.h> | ||
| 23 | #include <linux/slab.h> | ||
| 24 | #include <linux/dmaengine.h> | ||
| 25 | #include <linux/of_dma.h> | ||
| 26 | #include <linux/platform_data/dma-rcar-audmapp.h> | ||
| 27 | #include <linux/platform_device.h> | ||
| 28 | #include <linux/shdma-base.h> | ||
| 29 | |||
| 30 | /* | ||
| 31 | * DMA register | ||
| 32 | */ | ||
| 33 | #define PDMASAR 0x00 | ||
| 34 | #define PDMADAR 0x04 | ||
| 35 | #define PDMACHCR 0x0c | ||
| 36 | |||
| 37 | /* PDMACHCR */ | ||
| 38 | #define PDMACHCR_DE (1 << 0) | ||
| 39 | |||
| 40 | #define AUDMAPP_MAX_CHANNELS 29 | ||
| 41 | |||
| 42 | /* Default MEMCPY transfer size = 2^2 = 4 bytes */ | ||
| 43 | #define LOG2_DEFAULT_XFER_SIZE 2 | ||
| 44 | #define AUDMAPP_SLAVE_NUMBER 256 | ||
| 45 | #define AUDMAPP_LEN_MAX (16 * 1024 * 1024) | ||
| 46 | |||
| 47 | struct audmapp_chan { | ||
| 48 | struct shdma_chan shdma_chan; | ||
| 49 | void __iomem *base; | ||
| 50 | dma_addr_t slave_addr; | ||
| 51 | u32 chcr; | ||
| 52 | }; | ||
| 53 | |||
| 54 | struct audmapp_device { | ||
| 55 | struct shdma_dev shdma_dev; | ||
| 56 | struct audmapp_pdata *pdata; | ||
| 57 | struct device *dev; | ||
| 58 | void __iomem *chan_reg; | ||
| 59 | }; | ||
| 60 | |||
| 61 | struct audmapp_desc { | ||
| 62 | struct shdma_desc shdma_desc; | ||
| 63 | dma_addr_t src; | ||
| 64 | dma_addr_t dst; | ||
| 65 | }; | ||
| 66 | |||
| 67 | #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan) | ||
| 68 | |||
| 69 | #define to_chan(chan) container_of(chan, struct audmapp_chan, shdma_chan) | ||
| 70 | #define to_desc(sdesc) container_of(sdesc, struct audmapp_desc, shdma_desc) | ||
| 71 | #define to_dev(chan) container_of(chan->shdma_chan.dma_chan.device, \ | ||
| 72 | struct audmapp_device, shdma_dev.dma_dev) | ||
| 73 | |||
| 74 | static void audmapp_write(struct audmapp_chan *auchan, u32 data, u32 reg) | ||
| 75 | { | ||
| 76 | struct audmapp_device *audev = to_dev(auchan); | ||
| 77 | struct device *dev = audev->dev; | ||
| 78 | |||
| 79 | dev_dbg(dev, "w %p : %08x\n", auchan->base + reg, data); | ||
| 80 | |||
| 81 | iowrite32(data, auchan->base + reg); | ||
| 82 | } | ||
| 83 | |||
| 84 | static u32 audmapp_read(struct audmapp_chan *auchan, u32 reg) | ||
| 85 | { | ||
| 86 | return ioread32(auchan->base + reg); | ||
| 87 | } | ||
| 88 | |||
| 89 | static void audmapp_halt(struct shdma_chan *schan) | ||
| 90 | { | ||
| 91 | struct audmapp_chan *auchan = to_chan(schan); | ||
| 92 | int i; | ||
| 93 | |||
| 94 | audmapp_write(auchan, 0, PDMACHCR); | ||
| 95 | |||
| 96 | for (i = 0; i < 1024; i++) { | ||
| 97 | if (0 == audmapp_read(auchan, PDMACHCR)) | ||
| 98 | return; | ||
| 99 | udelay(1); | ||
| 100 | } | ||
| 101 | } | ||
| 102 | |||
| 103 | static void audmapp_start_xfer(struct shdma_chan *schan, | ||
| 104 | struct shdma_desc *sdesc) | ||
| 105 | { | ||
| 106 | struct audmapp_chan *auchan = to_chan(schan); | ||
| 107 | struct audmapp_device *audev = to_dev(auchan); | ||
| 108 | struct audmapp_desc *desc = to_desc(sdesc); | ||
| 109 | struct device *dev = audev->dev; | ||
| 110 | u32 chcr = auchan->chcr | PDMACHCR_DE; | ||
| 111 | |||
| 112 | dev_dbg(dev, "src/dst/chcr = %pad/%pad/%08x\n", | ||
| 113 | &desc->src, &desc->dst, chcr); | ||
| 114 | |||
| 115 | audmapp_write(auchan, desc->src, PDMASAR); | ||
| 116 | audmapp_write(auchan, desc->dst, PDMADAR); | ||
| 117 | audmapp_write(auchan, chcr, PDMACHCR); | ||
| 118 | } | ||
| 119 | |||
| 120 | static int audmapp_get_config(struct audmapp_chan *auchan, int slave_id, | ||
| 121 | u32 *chcr, dma_addr_t *dst) | ||
| 122 | { | ||
| 123 | struct audmapp_device *audev = to_dev(auchan); | ||
| 124 | struct audmapp_pdata *pdata = audev->pdata; | ||
| 125 | struct audmapp_slave_config *cfg; | ||
| 126 | int i; | ||
| 127 | |||
| 128 | *chcr = 0; | ||
| 129 | *dst = 0; | ||
| 130 | |||
| 131 | if (!pdata) { /* DT */ | ||
| 132 | *chcr = ((u32)slave_id) << 16; | ||
| 133 | auchan->shdma_chan.slave_id = (slave_id) >> 8; | ||
| 134 | return 0; | ||
| 135 | } | ||
| 136 | |||
| 137 | /* non-DT */ | ||
| 138 | |||
| 139 | if (slave_id >= AUDMAPP_SLAVE_NUMBER) | ||
| 140 | return -ENXIO; | ||
| 141 | |||
| 142 | for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) | ||
| 143 | if (cfg->slave_id == slave_id) { | ||
| 144 | *chcr = cfg->chcr; | ||
| 145 | *dst = cfg->dst; | ||
| 146 | return 0; | ||
| 147 | } | ||
| 148 | |||
| 149 | return -ENXIO; | ||
| 150 | } | ||
| 151 | |||
| 152 | static int audmapp_set_slave(struct shdma_chan *schan, int slave_id, | ||
| 153 | dma_addr_t slave_addr, bool try) | ||
| 154 | { | ||
| 155 | struct audmapp_chan *auchan = to_chan(schan); | ||
| 156 | u32 chcr; | ||
| 157 | dma_addr_t dst; | ||
| 158 | int ret; | ||
| 159 | |||
| 160 | ret = audmapp_get_config(auchan, slave_id, &chcr, &dst); | ||
| 161 | if (ret < 0) | ||
| 162 | return ret; | ||
| 163 | |||
| 164 | if (try) | ||
| 165 | return 0; | ||
| 166 | |||
| 167 | auchan->chcr = chcr; | ||
| 168 | auchan->slave_addr = slave_addr ? : dst; | ||
| 169 | |||
| 170 | return 0; | ||
| 171 | } | ||
| 172 | |||
| 173 | static int audmapp_desc_setup(struct shdma_chan *schan, | ||
| 174 | struct shdma_desc *sdesc, | ||
| 175 | dma_addr_t src, dma_addr_t dst, size_t *len) | ||
| 176 | { | ||
| 177 | struct audmapp_desc *desc = to_desc(sdesc); | ||
| 178 | |||
| 179 | if (*len > (size_t)AUDMAPP_LEN_MAX) | ||
| 180 | *len = (size_t)AUDMAPP_LEN_MAX; | ||
| 181 | |||
| 182 | desc->src = src; | ||
| 183 | desc->dst = dst; | ||
| 184 | |||
| 185 | return 0; | ||
| 186 | } | ||
| 187 | |||
| 188 | static void audmapp_setup_xfer(struct shdma_chan *schan, | ||
| 189 | int slave_id) | ||
| 190 | { | ||
| 191 | } | ||
| 192 | |||
| 193 | static dma_addr_t audmapp_slave_addr(struct shdma_chan *schan) | ||
| 194 | { | ||
| 195 | struct audmapp_chan *auchan = to_chan(schan); | ||
| 196 | |||
| 197 | return auchan->slave_addr; | ||
| 198 | } | ||
| 199 | |||
| 200 | static bool audmapp_channel_busy(struct shdma_chan *schan) | ||
| 201 | { | ||
| 202 | struct audmapp_chan *auchan = to_chan(schan); | ||
| 203 | u32 chcr = audmapp_read(auchan, PDMACHCR); | ||
| 204 | |||
| 205 | return chcr & ~PDMACHCR_DE; | ||
| 206 | } | ||
| 207 | |||
| 208 | static bool audmapp_desc_completed(struct shdma_chan *schan, | ||
| 209 | struct shdma_desc *sdesc) | ||
| 210 | { | ||
| 211 | return true; | ||
| 212 | } | ||
| 213 | |||
| 214 | static struct shdma_desc *audmapp_embedded_desc(void *buf, int i) | ||
| 215 | { | ||
| 216 | return &((struct audmapp_desc *)buf)[i].shdma_desc; | ||
| 217 | } | ||
| 218 | |||
| 219 | static const struct shdma_ops audmapp_shdma_ops = { | ||
| 220 | .halt_channel = audmapp_halt, | ||
| 221 | .desc_setup = audmapp_desc_setup, | ||
| 222 | .set_slave = audmapp_set_slave, | ||
| 223 | .start_xfer = audmapp_start_xfer, | ||
| 224 | .embedded_desc = audmapp_embedded_desc, | ||
| 225 | .setup_xfer = audmapp_setup_xfer, | ||
| 226 | .slave_addr = audmapp_slave_addr, | ||
| 227 | .channel_busy = audmapp_channel_busy, | ||
| 228 | .desc_completed = audmapp_desc_completed, | ||
| 229 | }; | ||
| 230 | |||
| 231 | static int audmapp_chan_probe(struct platform_device *pdev, | ||
| 232 | struct audmapp_device *audev, int id) | ||
| 233 | { | ||
| 234 | struct shdma_dev *sdev = &audev->shdma_dev; | ||
| 235 | struct audmapp_chan *auchan; | ||
| 236 | struct shdma_chan *schan; | ||
| 237 | struct device *dev = audev->dev; | ||
| 238 | |||
| 239 | auchan = devm_kzalloc(dev, sizeof(*auchan), GFP_KERNEL); | ||
| 240 | if (!auchan) | ||
| 241 | return -ENOMEM; | ||
| 242 | |||
| 243 | schan = &auchan->shdma_chan; | ||
| 244 | schan->max_xfer_len = AUDMAPP_LEN_MAX; | ||
| 245 | |||
| 246 | shdma_chan_probe(sdev, schan, id); | ||
| 247 | |||
| 248 | auchan->base = audev->chan_reg + 0x20 + (0x10 * id); | ||
| 249 | dev_dbg(dev, "%02d : %p / %p", id, auchan->base, audev->chan_reg); | ||
| 250 | |||
| 251 | return 0; | ||
| 252 | } | ||
| 253 | |||
| 254 | static void audmapp_chan_remove(struct audmapp_device *audev) | ||
| 255 | { | ||
| 256 | struct shdma_chan *schan; | ||
| 257 | int i; | ||
| 258 | |||
| 259 | shdma_for_each_chan(schan, &audev->shdma_dev, i) { | ||
| 260 | BUG_ON(!schan); | ||
| 261 | shdma_chan_remove(schan); | ||
| 262 | } | ||
| 263 | } | ||
| 264 | |||
| 265 | static struct dma_chan *audmapp_of_xlate(struct of_phandle_args *dma_spec, | ||
| 266 | struct of_dma *ofdma) | ||
| 267 | { | ||
| 268 | dma_cap_mask_t mask; | ||
| 269 | struct dma_chan *chan; | ||
| 270 | u32 chcr = dma_spec->args[0]; | ||
| 271 | |||
| 272 | if (dma_spec->args_count != 1) | ||
| 273 | return NULL; | ||
| 274 | |||
| 275 | dma_cap_zero(mask); | ||
| 276 | dma_cap_set(DMA_SLAVE, mask); | ||
| 277 | |||
| 278 | chan = dma_request_channel(mask, shdma_chan_filter, NULL); | ||
| 279 | if (chan) | ||
| 280 | to_shdma_chan(chan)->hw_req = chcr; | ||
| 281 | |||
| 282 | return chan; | ||
| 283 | } | ||
| 284 | |||
| 285 | static int audmapp_probe(struct platform_device *pdev) | ||
| 286 | { | ||
| 287 | struct audmapp_pdata *pdata = pdev->dev.platform_data; | ||
| 288 | struct device_node *np = pdev->dev.of_node; | ||
| 289 | struct audmapp_device *audev; | ||
| 290 | struct shdma_dev *sdev; | ||
| 291 | struct dma_device *dma_dev; | ||
| 292 | struct resource *res; | ||
| 293 | int err, i; | ||
| 294 | |||
| 295 | if (np) | ||
| 296 | of_dma_controller_register(np, audmapp_of_xlate, pdev); | ||
| 297 | else if (!pdata) | ||
| 298 | return -ENODEV; | ||
| 299 | |||
| 300 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 301 | |||
| 302 | audev = devm_kzalloc(&pdev->dev, sizeof(*audev), GFP_KERNEL); | ||
| 303 | if (!audev) | ||
| 304 | return -ENOMEM; | ||
| 305 | |||
| 306 | audev->dev = &pdev->dev; | ||
| 307 | audev->pdata = pdata; | ||
| 308 | audev->chan_reg = devm_ioremap_resource(&pdev->dev, res); | ||
| 309 | if (IS_ERR(audev->chan_reg)) | ||
| 310 | return PTR_ERR(audev->chan_reg); | ||
| 311 | |||
| 312 | sdev = &audev->shdma_dev; | ||
| 313 | sdev->ops = &audmapp_shdma_ops; | ||
| 314 | sdev->desc_size = sizeof(struct audmapp_desc); | ||
| 315 | |||
| 316 | dma_dev = &sdev->dma_dev; | ||
| 317 | dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE; | ||
| 318 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); | ||
| 319 | |||
| 320 | err = shdma_init(&pdev->dev, sdev, AUDMAPP_MAX_CHANNELS); | ||
| 321 | if (err < 0) | ||
| 322 | return err; | ||
| 323 | |||
| 324 | platform_set_drvdata(pdev, audev); | ||
| 325 | |||
| 326 | /* Create DMA Channel */ | ||
| 327 | for (i = 0; i < AUDMAPP_MAX_CHANNELS; i++) { | ||
| 328 | err = audmapp_chan_probe(pdev, audev, i); | ||
| 329 | if (err) | ||
| 330 | goto chan_probe_err; | ||
| 331 | } | ||
| 332 | |||
| 333 | err = dma_async_device_register(dma_dev); | ||
| 334 | if (err < 0) | ||
| 335 | goto chan_probe_err; | ||
| 336 | |||
| 337 | return err; | ||
| 338 | |||
| 339 | chan_probe_err: | ||
| 340 | audmapp_chan_remove(audev); | ||
| 341 | shdma_cleanup(sdev); | ||
| 342 | |||
| 343 | return err; | ||
| 344 | } | ||
| 345 | |||
| 346 | static int audmapp_remove(struct platform_device *pdev) | ||
| 347 | { | ||
| 348 | struct audmapp_device *audev = platform_get_drvdata(pdev); | ||
| 349 | struct dma_device *dma_dev = &audev->shdma_dev.dma_dev; | ||
| 350 | |||
| 351 | dma_async_device_unregister(dma_dev); | ||
| 352 | |||
| 353 | audmapp_chan_remove(audev); | ||
| 354 | shdma_cleanup(&audev->shdma_dev); | ||
| 355 | |||
| 356 | return 0; | ||
| 357 | } | ||
| 358 | |||
| 359 | static const struct of_device_id audmapp_of_match[] = { | ||
| 360 | { .compatible = "renesas,rcar-audmapp", }, | ||
| 361 | {}, | ||
| 362 | }; | ||
| 363 | |||
| 364 | static struct platform_driver audmapp_driver = { | ||
| 365 | .probe = audmapp_probe, | ||
| 366 | .remove = audmapp_remove, | ||
| 367 | .driver = { | ||
| 368 | .name = "rcar-audmapp-engine", | ||
| 369 | .of_match_table = audmapp_of_match, | ||
| 370 | }, | ||
| 371 | }; | ||
| 372 | module_platform_driver(audmapp_driver); | ||
| 373 | |||
| 374 | MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>"); | ||
| 375 | MODULE_DESCRIPTION("Renesas R-Car Audio DMAC peri-peri driver"); | ||
| 376 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 8ee383d339a5..10fcabad80f3 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c | |||
| @@ -171,8 +171,7 @@ static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan) | |||
| 171 | return NULL; | 171 | return NULL; |
| 172 | } | 172 | } |
| 173 | 173 | ||
| 174 | static int shdma_setup_slave(struct shdma_chan *schan, int slave_id, | 174 | static int shdma_setup_slave(struct shdma_chan *schan, dma_addr_t slave_addr) |
| 175 | dma_addr_t slave_addr) | ||
| 176 | { | 175 | { |
| 177 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); | 176 | struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); |
| 178 | const struct shdma_ops *ops = sdev->ops; | 177 | const struct shdma_ops *ops = sdev->ops; |
| @@ -183,25 +182,23 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id, | |||
| 183 | ret = ops->set_slave(schan, match, slave_addr, true); | 182 | ret = ops->set_slave(schan, match, slave_addr, true); |
| 184 | if (ret < 0) | 183 | if (ret < 0) |
| 185 | return ret; | 184 | return ret; |
| 186 | |||
| 187 | slave_id = schan->slave_id; | ||
| 188 | } else { | 185 | } else { |
| 189 | match = slave_id; | 186 | match = schan->real_slave_id; |
| 190 | } | 187 | } |
| 191 | 188 | ||
| 192 | if (slave_id < 0 || slave_id >= slave_num) | 189 | if (schan->real_slave_id < 0 || schan->real_slave_id >= slave_num) |
| 193 | return -EINVAL; | 190 | return -EINVAL; |
| 194 | 191 | ||
| 195 | if (test_and_set_bit(slave_id, shdma_slave_used)) | 192 | if (test_and_set_bit(schan->real_slave_id, shdma_slave_used)) |
| 196 | return -EBUSY; | 193 | return -EBUSY; |
| 197 | 194 | ||
| 198 | ret = ops->set_slave(schan, match, slave_addr, false); | 195 | ret = ops->set_slave(schan, match, slave_addr, false); |
| 199 | if (ret < 0) { | 196 | if (ret < 0) { |
| 200 | clear_bit(slave_id, shdma_slave_used); | 197 | clear_bit(schan->real_slave_id, shdma_slave_used); |
| 201 | return ret; | 198 | return ret; |
| 202 | } | 199 | } |
| 203 | 200 | ||
| 204 | schan->slave_id = slave_id; | 201 | schan->slave_id = schan->real_slave_id; |
| 205 | 202 | ||
| 206 | return 0; | 203 | return 0; |
| 207 | } | 204 | } |
| @@ -221,10 +218,12 @@ static int shdma_alloc_chan_resources(struct dma_chan *chan) | |||
| 221 | */ | 218 | */ |
| 222 | if (slave) { | 219 | if (slave) { |
| 223 | /* Legacy mode: .private is set in filter */ | 220 | /* Legacy mode: .private is set in filter */ |
| 224 | ret = shdma_setup_slave(schan, slave->slave_id, 0); | 221 | schan->real_slave_id = slave->slave_id; |
| 222 | ret = shdma_setup_slave(schan, 0); | ||
| 225 | if (ret < 0) | 223 | if (ret < 0) |
| 226 | goto esetslave; | 224 | goto esetslave; |
| 227 | } else { | 225 | } else { |
| 226 | /* Normal mode: real_slave_id was set by filter */ | ||
| 228 | schan->slave_id = -EINVAL; | 227 | schan->slave_id = -EINVAL; |
| 229 | } | 228 | } |
| 230 | 229 | ||
| @@ -258,11 +257,14 @@ esetslave: | |||
| 258 | 257 | ||
| 259 | /* | 258 | /* |
| 260 | * This is the standard shdma filter function to be used as a replacement to the | 259 | * This is the standard shdma filter function to be used as a replacement to the |
| 261 | * "old" method, using the .private pointer. If for some reason you allocate a | 260 | * "old" method, using the .private pointer. |
| 262 | * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter | 261 | * You always have to pass a valid slave id as the argument, old drivers that |
| 262 | * pass ERR_PTR(-EINVAL) as a filter parameter and set it up in dma_slave_config | ||
| 263 | * need to be updated so we can remove the slave_id field from dma_slave_config. | ||
| 263 | * parameter. If this filter is used, the slave driver, after calling | 264 | * parameter. If this filter is used, the slave driver, after calling |
| 264 | * dma_request_channel(), will also have to call dmaengine_slave_config() with | 265 | * dma_request_channel(), will also have to call dmaengine_slave_config() with |
| 265 | * .slave_id, .direction, and either .src_addr or .dst_addr set. | 266 | * .direction, and either .src_addr or .dst_addr set. |
| 267 | * | ||
| 266 | * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE | 268 | * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE |
| 267 | * capability! If this becomes a requirement, hardware glue drivers, using this | 269 | * capability! If this becomes a requirement, hardware glue drivers, using this |
| 268 | * services would have to provide their own filters, which first would check | 270 | * services would have to provide their own filters, which first would check |
| @@ -276,7 +278,7 @@ bool shdma_chan_filter(struct dma_chan *chan, void *arg) | |||
| 276 | { | 278 | { |
| 277 | struct shdma_chan *schan; | 279 | struct shdma_chan *schan; |
| 278 | struct shdma_dev *sdev; | 280 | struct shdma_dev *sdev; |
| 279 | int match = (long)arg; | 281 | int slave_id = (long)arg; |
| 280 | int ret; | 282 | int ret; |
| 281 | 283 | ||
| 282 | /* Only support channels handled by this driver. */ | 284 | /* Only support channels handled by this driver. */ |
| @@ -284,19 +286,39 @@ bool shdma_chan_filter(struct dma_chan *chan, void *arg) | |||
| 284 | shdma_alloc_chan_resources) | 286 | shdma_alloc_chan_resources) |
| 285 | return false; | 287 | return false; |
| 286 | 288 | ||
| 287 | if (match < 0) | 289 | schan = to_shdma_chan(chan); |
| 290 | sdev = to_shdma_dev(chan->device); | ||
| 291 | |||
| 292 | /* | ||
| 293 | * For DT, the schan->slave_id field is generated by the | ||
| 294 | * set_slave function from the slave ID that is passed in | ||
| 295 | * from xlate. For the non-DT case, the slave ID is | ||
| 296 | * directly passed into the filter function by the driver | ||
| 297 | */ | ||
| 298 | if (schan->dev->of_node) { | ||
| 299 | ret = sdev->ops->set_slave(schan, slave_id, 0, true); | ||
| 300 | if (ret < 0) | ||
| 301 | return false; | ||
| 302 | |||
| 303 | schan->real_slave_id = schan->slave_id; | ||
| 304 | return true; | ||
| 305 | } | ||
| 306 | |||
| 307 | if (slave_id < 0) { | ||
| 288 | /* No slave requested - arbitrary channel */ | 308 | /* No slave requested - arbitrary channel */ |
| 309 | dev_warn(sdev->dma_dev.dev, "invalid slave ID passed to dma_request_slave\n"); | ||
| 289 | return true; | 310 | return true; |
| 311 | } | ||
| 290 | 312 | ||
| 291 | schan = to_shdma_chan(chan); | 313 | if (slave_id >= slave_num) |
| 292 | if (!schan->dev->of_node && match >= slave_num) | ||
| 293 | return false; | 314 | return false; |
| 294 | 315 | ||
| 295 | sdev = to_shdma_dev(schan->dma_chan.device); | 316 | ret = sdev->ops->set_slave(schan, slave_id, 0, true); |
| 296 | ret = sdev->ops->set_slave(schan, match, 0, true); | ||
| 297 | if (ret < 0) | 317 | if (ret < 0) |
| 298 | return false; | 318 | return false; |
| 299 | 319 | ||
| 320 | schan->real_slave_id = slave_id; | ||
| 321 | |||
| 300 | return true; | 322 | return true; |
| 301 | } | 323 | } |
| 302 | EXPORT_SYMBOL(shdma_chan_filter); | 324 | EXPORT_SYMBOL(shdma_chan_filter); |
| @@ -452,6 +474,8 @@ static void shdma_free_chan_resources(struct dma_chan *chan) | |||
| 452 | chan->private = NULL; | 474 | chan->private = NULL; |
| 453 | } | 475 | } |
| 454 | 476 | ||
| 477 | schan->real_slave_id = 0; | ||
| 478 | |||
| 455 | spin_lock_irq(&schan->chan_lock); | 479 | spin_lock_irq(&schan->chan_lock); |
| 456 | 480 | ||
| 457 | list_splice_init(&schan->ld_free, &list); | 481 | list_splice_init(&schan->ld_free, &list); |
| @@ -764,11 +788,20 @@ static int shdma_config(struct dma_chan *chan, | |||
| 764 | */ | 788 | */ |
| 765 | if (!config) | 789 | if (!config) |
| 766 | return -EINVAL; | 790 | return -EINVAL; |
| 791 | |||
| 792 | /* | ||
| 793 | * overriding the slave_id through dma_slave_config is deprecated, | ||
| 794 | * but possibly some out-of-tree drivers still do it. | ||
| 795 | */ | ||
| 796 | if (WARN_ON_ONCE(config->slave_id && | ||
| 797 | config->slave_id != schan->real_slave_id)) | ||
| 798 | schan->real_slave_id = config->slave_id; | ||
| 799 | |||
| 767 | /* | 800 | /* |
| 768 | * We could lock this, but you shouldn't be configuring the | 801 | * We could lock this, but you shouldn't be configuring the |
| 769 | * channel, while using it... | 802 | * channel, while using it... |
| 770 | */ | 803 | */ |
| 771 | return shdma_setup_slave(schan, config->slave_id, | 804 | return shdma_setup_slave(schan, |
| 772 | config->direction == DMA_DEV_TO_MEM ? | 805 | config->direction == DMA_DEV_TO_MEM ? |
| 773 | config->src_addr : config->dst_addr); | 806 | config->src_addr : config->dst_addr); |
| 774 | } | 807 | } |
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index 9f1d4c7dbab8..11707df1a689 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c | |||
| @@ -443,7 +443,7 @@ static bool sh_dmae_reset(struct sh_dmae_device *shdev) | |||
| 443 | return ret; | 443 | return ret; |
| 444 | } | 444 | } |
| 445 | 445 | ||
| 446 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM) | 446 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) |
| 447 | static irqreturn_t sh_dmae_err(int irq, void *data) | 447 | static irqreturn_t sh_dmae_err(int irq, void *data) |
| 448 | { | 448 | { |
| 449 | struct sh_dmae_device *shdev = data; | 449 | struct sh_dmae_device *shdev = data; |
| @@ -689,7 +689,7 @@ static int sh_dmae_probe(struct platform_device *pdev) | |||
| 689 | const struct sh_dmae_pdata *pdata; | 689 | const struct sh_dmae_pdata *pdata; |
| 690 | unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {}; | 690 | unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {}; |
| 691 | int chan_irq[SH_DMAE_MAX_CHANNELS]; | 691 | int chan_irq[SH_DMAE_MAX_CHANNELS]; |
| 692 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM) | 692 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) |
| 693 | unsigned long irqflags = 0; | 693 | unsigned long irqflags = 0; |
| 694 | int errirq; | 694 | int errirq; |
| 695 | #endif | 695 | #endif |
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c new file mode 100644 index 000000000000..f705798ce3eb --- /dev/null +++ b/drivers/dma/sh/usb-dmac.c | |||
| @@ -0,0 +1,910 @@ | |||
| 1 | /* | ||
| 2 | * Renesas USB DMA Controller Driver | ||
| 3 | * | ||
| 4 | * Copyright (C) 2015 Renesas Electronics Corporation | ||
| 5 | * | ||
| 6 | * based on rcar-dmac.c | ||
| 7 | * Copyright (C) 2014 Renesas Electronics Inc. | ||
| 8 | * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> | ||
| 9 | * | ||
| 10 | * This is free software; you can redistribute it and/or modify | ||
| 11 | * it under the terms of version 2 of the GNU General Public License as | ||
| 12 | * published by the Free Software Foundation. | ||
| 13 | */ | ||
| 14 | |||
| 15 | #include <linux/delay.h> | ||
| 16 | #include <linux/dma-mapping.h> | ||
| 17 | #include <linux/dmaengine.h> | ||
| 18 | #include <linux/interrupt.h> | ||
| 19 | #include <linux/list.h> | ||
| 20 | #include <linux/module.h> | ||
| 21 | #include <linux/of.h> | ||
| 22 | #include <linux/of_dma.h> | ||
| 23 | #include <linux/of_platform.h> | ||
| 24 | #include <linux/platform_device.h> | ||
| 25 | #include <linux/pm_runtime.h> | ||
| 26 | #include <linux/slab.h> | ||
| 27 | #include <linux/spinlock.h> | ||
| 28 | |||
| 29 | #include "../dmaengine.h" | ||
| 30 | #include "../virt-dma.h" | ||
| 31 | |||
| 32 | /* | ||
| 33 | * struct usb_dmac_sg - Descriptor for a hardware transfer | ||
| 34 | * @mem_addr: memory address | ||
| 35 | * @size: transfer size in bytes | ||
| 36 | */ | ||
| 37 | struct usb_dmac_sg { | ||
| 38 | dma_addr_t mem_addr; | ||
| 39 | u32 size; | ||
| 40 | }; | ||
| 41 | |||
| 42 | /* | ||
| 43 | * struct usb_dmac_desc - USB DMA Transfer Descriptor | ||
| 44 | * @vd: base virtual channel DMA transaction descriptor | ||
| 45 | * @direction: direction of the DMA transfer | ||
| 46 | * @sg_allocated_len: length of allocated sg | ||
| 47 | * @sg_len: length of sg | ||
| 48 | * @sg_index: index of sg | ||
| 49 | * @residue: residue after the DMAC completed a transfer | ||
| 50 | * @node: node for desc_got and desc_freed | ||
| 51 | * @done_cookie: cookie after the DMAC completed a transfer | ||
| 52 | * @sg: information for the transfer | ||
| 53 | */ | ||
| 54 | struct usb_dmac_desc { | ||
| 55 | struct virt_dma_desc vd; | ||
| 56 | enum dma_transfer_direction direction; | ||
| 57 | unsigned int sg_allocated_len; | ||
| 58 | unsigned int sg_len; | ||
| 59 | unsigned int sg_index; | ||
| 60 | u32 residue; | ||
| 61 | struct list_head node; | ||
| 62 | dma_cookie_t done_cookie; | ||
| 63 | struct usb_dmac_sg sg[0]; | ||
| 64 | }; | ||
| 65 | |||
| 66 | #define to_usb_dmac_desc(vd) container_of(vd, struct usb_dmac_desc, vd) | ||
| 67 | |||
| 68 | /* | ||
| 69 | * struct usb_dmac_chan - USB DMA Controller Channel | ||
| 70 | * @vc: base virtual DMA channel object | ||
| 71 | * @iomem: channel I/O memory base | ||
| 72 | * @index: index of this channel in the controller | ||
| 73 | * @irq: irq number of this channel | ||
| 74 | * @desc: the current descriptor | ||
| 75 | * @descs_allocated: number of descriptors allocated | ||
| 76 | * @desc_got: got descriptors | ||
| 77 | * @desc_freed: freed descriptors after the DMAC completed a transfer | ||
| 78 | */ | ||
| 79 | struct usb_dmac_chan { | ||
| 80 | struct virt_dma_chan vc; | ||
| 81 | void __iomem *iomem; | ||
| 82 | unsigned int index; | ||
| 83 | int irq; | ||
| 84 | struct usb_dmac_desc *desc; | ||
| 85 | int descs_allocated; | ||
| 86 | struct list_head desc_got; | ||
| 87 | struct list_head desc_freed; | ||
| 88 | }; | ||
| 89 | |||
| 90 | #define to_usb_dmac_chan(c) container_of(c, struct usb_dmac_chan, vc.chan) | ||
| 91 | |||
| 92 | /* | ||
| 93 | * struct usb_dmac - USB DMA Controller | ||
| 94 | * @engine: base DMA engine object | ||
| 95 | * @dev: the hardware device | ||
| 96 | * @iomem: remapped I/O memory base | ||
| 97 | * @n_channels: number of available channels | ||
| 98 | * @channels: array of DMAC channels | ||
| 99 | */ | ||
| 100 | struct usb_dmac { | ||
| 101 | struct dma_device engine; | ||
| 102 | struct device *dev; | ||
| 103 | void __iomem *iomem; | ||
| 104 | |||
| 105 | unsigned int n_channels; | ||
| 106 | struct usb_dmac_chan *channels; | ||
| 107 | }; | ||
| 108 | |||
| 109 | #define to_usb_dmac(d) container_of(d, struct usb_dmac, engine) | ||
| 110 | |||
| 111 | /* ----------------------------------------------------------------------------- | ||
| 112 | * Registers | ||
| 113 | */ | ||
| 114 | |||
| 115 | #define USB_DMAC_CHAN_OFFSET(i) (0x20 + 0x20 * (i)) | ||
| 116 | |||
| 117 | #define USB_DMASWR 0x0008 | ||
| 118 | #define USB_DMASWR_SWR (1 << 0) | ||
| 119 | #define USB_DMAOR 0x0060 | ||
| 120 | #define USB_DMAOR_AE (1 << 2) | ||
| 121 | #define USB_DMAOR_DME (1 << 0) | ||
| 122 | |||
| 123 | #define USB_DMASAR 0x0000 | ||
| 124 | #define USB_DMADAR 0x0004 | ||
| 125 | #define USB_DMATCR 0x0008 | ||
| 126 | #define USB_DMATCR_MASK 0x00ffffff | ||
| 127 | #define USB_DMACHCR 0x0014 | ||
| 128 | #define USB_DMACHCR_FTE (1 << 24) | ||
| 129 | #define USB_DMACHCR_NULLE (1 << 16) | ||
| 130 | #define USB_DMACHCR_NULL (1 << 12) | ||
| 131 | #define USB_DMACHCR_TS_8B ((0 << 7) | (0 << 6)) | ||
| 132 | #define USB_DMACHCR_TS_16B ((0 << 7) | (1 << 6)) | ||
| 133 | #define USB_DMACHCR_TS_32B ((1 << 7) | (0 << 6)) | ||
| 134 | #define USB_DMACHCR_IE (1 << 5) | ||
| 135 | #define USB_DMACHCR_SP (1 << 2) | ||
| 136 | #define USB_DMACHCR_TE (1 << 1) | ||
| 137 | #define USB_DMACHCR_DE (1 << 0) | ||
| 138 | #define USB_DMATEND 0x0018 | ||
| 139 | |||
| 140 | /* Hardcode the xfer_shift to 5 (32bytes) */ | ||
| 141 | #define USB_DMAC_XFER_SHIFT 5 | ||
| 142 | #define USB_DMAC_XFER_SIZE (1 << USB_DMAC_XFER_SHIFT) | ||
| 143 | #define USB_DMAC_CHCR_TS USB_DMACHCR_TS_32B | ||
| 144 | #define USB_DMAC_SLAVE_BUSWIDTH DMA_SLAVE_BUSWIDTH_32_BYTES | ||
| 145 | |||
| 146 | /* for descriptors */ | ||
| 147 | #define USB_DMAC_INITIAL_NR_DESC 16 | ||
| 148 | #define USB_DMAC_INITIAL_NR_SG 8 | ||
| 149 | |||
| 150 | /* ----------------------------------------------------------------------------- | ||
| 151 | * Device access | ||
| 152 | */ | ||
| 153 | |||
| 154 | static void usb_dmac_write(struct usb_dmac *dmac, u32 reg, u32 data) | ||
| 155 | { | ||
| 156 | writel(data, dmac->iomem + reg); | ||
| 157 | } | ||
| 158 | |||
| 159 | static u32 usb_dmac_read(struct usb_dmac *dmac, u32 reg) | ||
| 160 | { | ||
| 161 | return readl(dmac->iomem + reg); | ||
| 162 | } | ||
| 163 | |||
| 164 | static u32 usb_dmac_chan_read(struct usb_dmac_chan *chan, u32 reg) | ||
| 165 | { | ||
| 166 | return readl(chan->iomem + reg); | ||
| 167 | } | ||
| 168 | |||
| 169 | static void usb_dmac_chan_write(struct usb_dmac_chan *chan, u32 reg, u32 data) | ||
| 170 | { | ||
| 171 | writel(data, chan->iomem + reg); | ||
| 172 | } | ||
| 173 | |||
| 174 | /* ----------------------------------------------------------------------------- | ||
| 175 | * Initialization and configuration | ||
| 176 | */ | ||
| 177 | |||
| 178 | static bool usb_dmac_chan_is_busy(struct usb_dmac_chan *chan) | ||
| 179 | { | ||
| 180 | u32 chcr = usb_dmac_chan_read(chan, USB_DMACHCR); | ||
| 181 | |||
| 182 | return (chcr & (USB_DMACHCR_DE | USB_DMACHCR_TE)) == USB_DMACHCR_DE; | ||
| 183 | } | ||
| 184 | |||
| 185 | static u32 usb_dmac_calc_tend(u32 size) | ||
| 186 | { | ||
| 187 | /* | ||
| 188 | * Please refer to the Figure "Example of Final Transaction Valid | ||
| 189 | * Data Transfer Enable (EDTEN) Setting" in the data sheet. | ||
| 190 | */ | ||
| 191 | return 0xffffffff << (32 - (size % USB_DMAC_XFER_SIZE ? : | ||
| 192 | USB_DMAC_XFER_SIZE)); | ||
| 193 | } | ||
| 194 | |||
| 195 | /* This function is already held by vc.lock */ | ||
| 196 | static void usb_dmac_chan_start_sg(struct usb_dmac_chan *chan, | ||
| 197 | unsigned int index) | ||
| 198 | { | ||
| 199 | struct usb_dmac_desc *desc = chan->desc; | ||
| 200 | struct usb_dmac_sg *sg = desc->sg + index; | ||
| 201 | dma_addr_t src_addr = 0, dst_addr = 0; | ||
| 202 | |||
| 203 | WARN_ON_ONCE(usb_dmac_chan_is_busy(chan)); | ||
| 204 | |||
| 205 | if (desc->direction == DMA_DEV_TO_MEM) | ||
| 206 | dst_addr = sg->mem_addr; | ||
| 207 | else | ||
| 208 | src_addr = sg->mem_addr; | ||
| 209 | |||
| 210 | dev_dbg(chan->vc.chan.device->dev, | ||
| 211 | "chan%u: queue sg %p: %u@%pad -> %pad\n", | ||
| 212 | chan->index, sg, sg->size, &src_addr, &dst_addr); | ||
| 213 | |||
| 214 | usb_dmac_chan_write(chan, USB_DMASAR, src_addr & 0xffffffff); | ||
| 215 | usb_dmac_chan_write(chan, USB_DMADAR, dst_addr & 0xffffffff); | ||
| 216 | usb_dmac_chan_write(chan, USB_DMATCR, | ||
| 217 | DIV_ROUND_UP(sg->size, USB_DMAC_XFER_SIZE)); | ||
| 218 | usb_dmac_chan_write(chan, USB_DMATEND, usb_dmac_calc_tend(sg->size)); | ||
| 219 | |||
| 220 | usb_dmac_chan_write(chan, USB_DMACHCR, USB_DMAC_CHCR_TS | | ||
| 221 | USB_DMACHCR_NULLE | USB_DMACHCR_IE | USB_DMACHCR_DE); | ||
| 222 | } | ||
| 223 | |||
| 224 | /* This function is already held by vc.lock */ | ||
| 225 | static void usb_dmac_chan_start_desc(struct usb_dmac_chan *chan) | ||
| 226 | { | ||
| 227 | struct virt_dma_desc *vd; | ||
| 228 | |||
| 229 | vd = vchan_next_desc(&chan->vc); | ||
| 230 | if (!vd) { | ||
| 231 | chan->desc = NULL; | ||
| 232 | return; | ||
| 233 | } | ||
| 234 | |||
| 235 | /* | ||
| 236 | * Remove this request from vc->desc_issued. Otherwise, this driver | ||
| 237 | * will get the previous value from vchan_next_desc() after a transfer | ||
| 238 | * was completed. | ||
| 239 | */ | ||
| 240 | list_del(&vd->node); | ||
| 241 | |||
| 242 | chan->desc = to_usb_dmac_desc(vd); | ||
| 243 | chan->desc->sg_index = 0; | ||
| 244 | usb_dmac_chan_start_sg(chan, 0); | ||
| 245 | } | ||
| 246 | |||
| 247 | static int usb_dmac_init(struct usb_dmac *dmac) | ||
| 248 | { | ||
| 249 | u16 dmaor; | ||
| 250 | |||
| 251 | /* Clear all channels and enable the DMAC globally. */ | ||
| 252 | usb_dmac_write(dmac, USB_DMAOR, USB_DMAOR_DME); | ||
| 253 | |||
| 254 | dmaor = usb_dmac_read(dmac, USB_DMAOR); | ||
| 255 | if ((dmaor & (USB_DMAOR_AE | USB_DMAOR_DME)) != USB_DMAOR_DME) { | ||
| 256 | dev_warn(dmac->dev, "DMAOR initialization failed.\n"); | ||
| 257 | return -EIO; | ||
| 258 | } | ||
| 259 | |||
| 260 | return 0; | ||
| 261 | } | ||
| 262 | |||
| 263 | /* ----------------------------------------------------------------------------- | ||
| 264 | * Descriptors allocation and free | ||
| 265 | */ | ||
| 266 | static int usb_dmac_desc_alloc(struct usb_dmac_chan *chan, unsigned int sg_len, | ||
| 267 | gfp_t gfp) | ||
| 268 | { | ||
| 269 | struct usb_dmac_desc *desc; | ||
| 270 | unsigned long flags; | ||
| 271 | |||
| 272 | desc = kzalloc(sizeof(*desc) + sg_len * sizeof(desc->sg[0]), gfp); | ||
| 273 | if (!desc) | ||
| 274 | return -ENOMEM; | ||
| 275 | |||
| 276 | desc->sg_allocated_len = sg_len; | ||
| 277 | INIT_LIST_HEAD(&desc->node); | ||
| 278 | |||
| 279 | spin_lock_irqsave(&chan->vc.lock, flags); | ||
| 280 | list_add_tail(&desc->node, &chan->desc_freed); | ||
| 281 | spin_unlock_irqrestore(&chan->vc.lock, flags); | ||
| 282 | |||
| 283 | return 0; | ||
| 284 | } | ||
| 285 | |||
| 286 | static void usb_dmac_desc_free(struct usb_dmac_chan *chan) | ||
| 287 | { | ||
| 288 | struct usb_dmac_desc *desc, *_desc; | ||
| 289 | LIST_HEAD(list); | ||
| 290 | |||
| 291 | list_splice_init(&chan->desc_freed, &list); | ||
| 292 | list_splice_init(&chan->desc_got, &list); | ||
| 293 | |||
| 294 | list_for_each_entry_safe(desc, _desc, &list, node) { | ||
| 295 | list_del(&desc->node); | ||
| 296 | kfree(desc); | ||
| 297 | } | ||
| 298 | chan->descs_allocated = 0; | ||
| 299 | } | ||
| 300 | |||
| 301 | static struct usb_dmac_desc *usb_dmac_desc_get(struct usb_dmac_chan *chan, | ||
| 302 | unsigned int sg_len, gfp_t gfp) | ||
| 303 | { | ||
| 304 | struct usb_dmac_desc *desc = NULL; | ||
| 305 | unsigned long flags; | ||
| 306 | |||
| 307 | /* Get a freed descritpor */ | ||
| 308 | spin_lock_irqsave(&chan->vc.lock, flags); | ||
| 309 | list_for_each_entry(desc, &chan->desc_freed, node) { | ||
| 310 | if (sg_len <= desc->sg_allocated_len) { | ||
| 311 | list_move_tail(&desc->node, &chan->desc_got); | ||
| 312 | spin_unlock_irqrestore(&chan->vc.lock, flags); | ||
| 313 | return desc; | ||
| 314 | } | ||
| 315 | } | ||
| 316 | spin_unlock_irqrestore(&chan->vc.lock, flags); | ||
| 317 | |||
| 318 | /* Allocate a new descriptor */ | ||
| 319 | if (!usb_dmac_desc_alloc(chan, sg_len, gfp)) { | ||
| 320 | /* If allocated the desc, it was added to tail of the list */ | ||
| 321 | spin_lock_irqsave(&chan->vc.lock, flags); | ||
| 322 | desc = list_last_entry(&chan->desc_freed, struct usb_dmac_desc, | ||
| 323 | node); | ||
| 324 | list_move_tail(&desc->node, &chan->desc_got); | ||
| 325 | spin_unlock_irqrestore(&chan->vc.lock, flags); | ||
| 326 | return desc; | ||
| 327 | } | ||
| 328 | |||
| 329 | return NULL; | ||
| 330 | } | ||
| 331 | |||
| 332 | static void usb_dmac_desc_put(struct usb_dmac_chan *chan, | ||
| 333 | struct usb_dmac_desc *desc) | ||
| 334 | { | ||
| 335 | unsigned long flags; | ||
| 336 | |||
| 337 | spin_lock_irqsave(&chan->vc.lock, flags); | ||
| 338 | list_move_tail(&desc->node, &chan->desc_freed); | ||
| 339 | spin_unlock_irqrestore(&chan->vc.lock, flags); | ||
| 340 | } | ||
| 341 | |||
| 342 | /* ----------------------------------------------------------------------------- | ||
| 343 | * Stop and reset | ||
| 344 | */ | ||
| 345 | |||
| 346 | static void usb_dmac_soft_reset(struct usb_dmac_chan *uchan) | ||
| 347 | { | ||
| 348 | struct dma_chan *chan = &uchan->vc.chan; | ||
| 349 | struct usb_dmac *dmac = to_usb_dmac(chan->device); | ||
| 350 | int i; | ||
| 351 | |||
| 352 | /* Don't issue soft reset if any one of channels is busy */ | ||
| 353 | for (i = 0; i < dmac->n_channels; ++i) { | ||
| 354 | if (usb_dmac_chan_is_busy(uchan)) | ||
| 355 | return; | ||
| 356 | } | ||
| 357 | |||
| 358 | usb_dmac_write(dmac, USB_DMAOR, 0); | ||
| 359 | usb_dmac_write(dmac, USB_DMASWR, USB_DMASWR_SWR); | ||
| 360 | udelay(100); | ||
| 361 | usb_dmac_write(dmac, USB_DMASWR, 0); | ||
| 362 | usb_dmac_write(dmac, USB_DMAOR, 1); | ||
| 363 | } | ||
| 364 | |||
| 365 | static void usb_dmac_chan_halt(struct usb_dmac_chan *chan) | ||
| 366 | { | ||
| 367 | u32 chcr = usb_dmac_chan_read(chan, USB_DMACHCR); | ||
| 368 | |||
| 369 | chcr &= ~(USB_DMACHCR_IE | USB_DMACHCR_TE | USB_DMACHCR_DE); | ||
| 370 | usb_dmac_chan_write(chan, USB_DMACHCR, chcr); | ||
| 371 | |||
| 372 | usb_dmac_soft_reset(chan); | ||
| 373 | } | ||
| 374 | |||
| 375 | static void usb_dmac_stop(struct usb_dmac *dmac) | ||
| 376 | { | ||
| 377 | usb_dmac_write(dmac, USB_DMAOR, 0); | ||
| 378 | } | ||
| 379 | |||
| 380 | /* ----------------------------------------------------------------------------- | ||
| 381 | * DMA engine operations | ||
| 382 | */ | ||
| 383 | |||
| 384 | static int usb_dmac_alloc_chan_resources(struct dma_chan *chan) | ||
| 385 | { | ||
| 386 | struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan); | ||
| 387 | int ret; | ||
| 388 | |||
| 389 | while (uchan->descs_allocated < USB_DMAC_INITIAL_NR_DESC) { | ||
| 390 | ret = usb_dmac_desc_alloc(uchan, USB_DMAC_INITIAL_NR_SG, | ||
| 391 | GFP_KERNEL); | ||
| 392 | if (ret < 0) { | ||
| 393 | usb_dmac_desc_free(uchan); | ||
| 394 | return ret; | ||
| 395 | } | ||
| 396 | uchan->descs_allocated++; | ||
| 397 | } | ||
| 398 | |||
| 399 | return pm_runtime_get_sync(chan->device->dev); | ||
| 400 | } | ||
| 401 | |||
| 402 | static void usb_dmac_free_chan_resources(struct dma_chan *chan) | ||
| 403 | { | ||
| 404 | struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan); | ||
| 405 | unsigned long flags; | ||
| 406 | |||
| 407 | /* Protect against ISR */ | ||
| 408 | spin_lock_irqsave(&uchan->vc.lock, flags); | ||
| 409 | usb_dmac_chan_halt(uchan); | ||
| 410 | spin_unlock_irqrestore(&uchan->vc.lock, flags); | ||
| 411 | |||
| 412 | usb_dmac_desc_free(uchan); | ||
| 413 | vchan_free_chan_resources(&uchan->vc); | ||
| 414 | |||
| 415 | pm_runtime_put(chan->device->dev); | ||
| 416 | } | ||
| 417 | |||
| 418 | static struct dma_async_tx_descriptor * | ||
| 419 | usb_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | ||
| 420 | unsigned int sg_len, enum dma_transfer_direction dir, | ||
| 421 | unsigned long dma_flags, void *context) | ||
| 422 | { | ||
| 423 | struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan); | ||
| 424 | struct usb_dmac_desc *desc; | ||
| 425 | struct scatterlist *sg; | ||
| 426 | int i; | ||
| 427 | |||
| 428 | if (!sg_len) { | ||
| 429 | dev_warn(chan->device->dev, | ||
| 430 | "%s: bad parameter: len=%d\n", __func__, sg_len); | ||
| 431 | return NULL; | ||
| 432 | } | ||
| 433 | |||
| 434 | desc = usb_dmac_desc_get(uchan, sg_len, GFP_NOWAIT); | ||
| 435 | if (!desc) | ||
| 436 | return NULL; | ||
| 437 | |||
| 438 | desc->direction = dir; | ||
| 439 | desc->sg_len = sg_len; | ||
| 440 | for_each_sg(sgl, sg, sg_len, i) { | ||
| 441 | desc->sg[i].mem_addr = sg_dma_address(sg); | ||
| 442 | desc->sg[i].size = sg_dma_len(sg); | ||
| 443 | } | ||
| 444 | |||
| 445 | return vchan_tx_prep(&uchan->vc, &desc->vd, dma_flags); | ||
| 446 | } | ||
| 447 | |||
| 448 | static int usb_dmac_chan_terminate_all(struct dma_chan *chan) | ||
| 449 | { | ||
| 450 | struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan); | ||
| 451 | struct usb_dmac_desc *desc; | ||
| 452 | unsigned long flags; | ||
| 453 | LIST_HEAD(head); | ||
| 454 | LIST_HEAD(list); | ||
| 455 | |||
| 456 | spin_lock_irqsave(&uchan->vc.lock, flags); | ||
| 457 | usb_dmac_chan_halt(uchan); | ||
| 458 | vchan_get_all_descriptors(&uchan->vc, &head); | ||
| 459 | if (uchan->desc) | ||
| 460 | uchan->desc = NULL; | ||
| 461 | list_splice_init(&uchan->desc_got, &list); | ||
| 462 | list_for_each_entry(desc, &list, node) | ||
| 463 | list_move_tail(&desc->node, &uchan->desc_freed); | ||
| 464 | spin_unlock_irqrestore(&uchan->vc.lock, flags); | ||
| 465 | vchan_dma_desc_free_list(&uchan->vc, &head); | ||
| 466 | |||
| 467 | return 0; | ||
| 468 | } | ||
| 469 | |||
| 470 | static unsigned int usb_dmac_get_current_residue(struct usb_dmac_chan *chan, | ||
| 471 | struct usb_dmac_desc *desc, | ||
| 472 | int sg_index) | ||
| 473 | { | ||
| 474 | struct usb_dmac_sg *sg = desc->sg + sg_index; | ||
| 475 | u32 mem_addr = sg->mem_addr & 0xffffffff; | ||
| 476 | unsigned int residue = sg->size; | ||
| 477 | |||
| 478 | /* | ||
| 479 | * We cannot use USB_DMATCR to calculate residue because USB_DMATCR | ||
| 480 | * has unsuited value to calculate. | ||
| 481 | */ | ||
| 482 | if (desc->direction == DMA_DEV_TO_MEM) | ||
| 483 | residue -= usb_dmac_chan_read(chan, USB_DMADAR) - mem_addr; | ||
| 484 | else | ||
| 485 | residue -= usb_dmac_chan_read(chan, USB_DMASAR) - mem_addr; | ||
| 486 | |||
| 487 | return residue; | ||
| 488 | } | ||
| 489 | |||
| 490 | static u32 usb_dmac_chan_get_residue_if_complete(struct usb_dmac_chan *chan, | ||
| 491 | dma_cookie_t cookie) | ||
| 492 | { | ||
| 493 | struct usb_dmac_desc *desc; | ||
| 494 | u32 residue = 0; | ||
| 495 | |||
| 496 | list_for_each_entry_reverse(desc, &chan->desc_freed, node) { | ||
| 497 | if (desc->done_cookie == cookie) { | ||
| 498 | residue = desc->residue; | ||
| 499 | break; | ||
| 500 | } | ||
| 501 | } | ||
| 502 | |||
| 503 | return residue; | ||
| 504 | } | ||
| 505 | |||
| 506 | static u32 usb_dmac_chan_get_residue(struct usb_dmac_chan *chan, | ||
| 507 | dma_cookie_t cookie) | ||
| 508 | { | ||
| 509 | u32 residue = 0; | ||
| 510 | struct virt_dma_desc *vd; | ||
| 511 | struct usb_dmac_desc *desc = chan->desc; | ||
| 512 | int i; | ||
| 513 | |||
| 514 | if (!desc) { | ||
| 515 | vd = vchan_find_desc(&chan->vc, cookie); | ||
| 516 | if (!vd) | ||
| 517 | return 0; | ||
| 518 | desc = to_usb_dmac_desc(vd); | ||
| 519 | } | ||
| 520 | |||
| 521 | /* Compute the size of all usb_dmac_sg still to be transferred */ | ||
| 522 | for (i = desc->sg_index + 1; i < desc->sg_len; i++) | ||
| 523 | residue += desc->sg[i].size; | ||
| 524 | |||
| 525 | /* Add the residue for the current sg */ | ||
| 526 | residue += usb_dmac_get_current_residue(chan, desc, desc->sg_index); | ||
| 527 | |||
| 528 | return residue; | ||
| 529 | } | ||
| 530 | |||
| 531 | static enum dma_status usb_dmac_tx_status(struct dma_chan *chan, | ||
| 532 | dma_cookie_t cookie, | ||
| 533 | struct dma_tx_state *txstate) | ||
| 534 | { | ||
| 535 | struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan); | ||
| 536 | enum dma_status status; | ||
| 537 | unsigned int residue = 0; | ||
| 538 | unsigned long flags; | ||
| 539 | |||
| 540 | status = dma_cookie_status(chan, cookie, txstate); | ||
| 541 | /* a client driver will get residue after DMA_COMPLETE */ | ||
| 542 | if (!txstate) | ||
| 543 | return status; | ||
| 544 | |||
| 545 | spin_lock_irqsave(&uchan->vc.lock, flags); | ||
| 546 | if (status == DMA_COMPLETE) | ||
| 547 | residue = usb_dmac_chan_get_residue_if_complete(uchan, cookie); | ||
| 548 | else | ||
| 549 | residue = usb_dmac_chan_get_residue(uchan, cookie); | ||
| 550 | spin_unlock_irqrestore(&uchan->vc.lock, flags); | ||
| 551 | |||
| 552 | dma_set_residue(txstate, residue); | ||
| 553 | |||
| 554 | return status; | ||
| 555 | } | ||
| 556 | |||
| 557 | static void usb_dmac_issue_pending(struct dma_chan *chan) | ||
| 558 | { | ||
| 559 | struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan); | ||
| 560 | unsigned long flags; | ||
| 561 | |||
| 562 | spin_lock_irqsave(&uchan->vc.lock, flags); | ||
| 563 | if (vchan_issue_pending(&uchan->vc) && !uchan->desc) | ||
| 564 | usb_dmac_chan_start_desc(uchan); | ||
| 565 | spin_unlock_irqrestore(&uchan->vc.lock, flags); | ||
| 566 | } | ||
| 567 | |||
| 568 | static void usb_dmac_virt_desc_free(struct virt_dma_desc *vd) | ||
| 569 | { | ||
| 570 | struct usb_dmac_desc *desc = to_usb_dmac_desc(vd); | ||
| 571 | struct usb_dmac_chan *chan = to_usb_dmac_chan(vd->tx.chan); | ||
| 572 | |||
| 573 | usb_dmac_desc_put(chan, desc); | ||
| 574 | } | ||
| 575 | |||
| 576 | /* ----------------------------------------------------------------------------- | ||
| 577 | * IRQ handling | ||
| 578 | */ | ||
| 579 | |||
| 580 | static void usb_dmac_isr_transfer_end(struct usb_dmac_chan *chan) | ||
| 581 | { | ||
| 582 | struct usb_dmac_desc *desc = chan->desc; | ||
| 583 | |||
| 584 | BUG_ON(!desc); | ||
| 585 | |||
| 586 | if (++desc->sg_index < desc->sg_len) { | ||
| 587 | usb_dmac_chan_start_sg(chan, desc->sg_index); | ||
| 588 | } else { | ||
| 589 | desc->residue = usb_dmac_get_current_residue(chan, desc, | ||
| 590 | desc->sg_index - 1); | ||
| 591 | desc->done_cookie = desc->vd.tx.cookie; | ||
| 592 | vchan_cookie_complete(&desc->vd); | ||
| 593 | |||
| 594 | /* Restart the next transfer if this driver has a next desc */ | ||
| 595 | usb_dmac_chan_start_desc(chan); | ||
| 596 | } | ||
| 597 | } | ||
| 598 | |||
| 599 | static irqreturn_t usb_dmac_isr_channel(int irq, void *dev) | ||
| 600 | { | ||
| 601 | struct usb_dmac_chan *chan = dev; | ||
| 602 | irqreturn_t ret = IRQ_NONE; | ||
| 603 | u32 mask = USB_DMACHCR_TE; | ||
| 604 | u32 check_bits = USB_DMACHCR_TE | USB_DMACHCR_SP; | ||
| 605 | u32 chcr; | ||
| 606 | |||
| 607 | spin_lock(&chan->vc.lock); | ||
| 608 | |||
| 609 | chcr = usb_dmac_chan_read(chan, USB_DMACHCR); | ||
| 610 | if (chcr & check_bits) | ||
| 611 | mask |= USB_DMACHCR_DE | check_bits; | ||
| 612 | if (chcr & USB_DMACHCR_NULL) { | ||
| 613 | /* An interruption of TE will happen after we set FTE */ | ||
| 614 | mask |= USB_DMACHCR_NULL; | ||
| 615 | chcr |= USB_DMACHCR_FTE; | ||
| 616 | ret |= IRQ_HANDLED; | ||
| 617 | } | ||
| 618 | usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask); | ||
| 619 | |||
| 620 | if (chcr & check_bits) { | ||
| 621 | usb_dmac_isr_transfer_end(chan); | ||
| 622 | ret |= IRQ_HANDLED; | ||
| 623 | } | ||
| 624 | |||
| 625 | spin_unlock(&chan->vc.lock); | ||
| 626 | |||
| 627 | return ret; | ||
| 628 | } | ||
| 629 | |||
| 630 | /* ----------------------------------------------------------------------------- | ||
| 631 | * OF xlate and channel filter | ||
| 632 | */ | ||
| 633 | |||
| 634 | static bool usb_dmac_chan_filter(struct dma_chan *chan, void *arg) | ||
| 635 | { | ||
| 636 | struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan); | ||
| 637 | struct of_phandle_args *dma_spec = arg; | ||
| 638 | |||
| 639 | if (dma_spec->np != chan->device->dev->of_node) | ||
| 640 | return false; | ||
| 641 | |||
| 642 | /* USB-DMAC should be used with fixed usb controller's FIFO */ | ||
| 643 | if (uchan->index != dma_spec->args[0]) | ||
| 644 | return false; | ||
| 645 | |||
| 646 | return true; | ||
| 647 | } | ||
| 648 | |||
| 649 | static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec, | ||
| 650 | struct of_dma *ofdma) | ||
| 651 | { | ||
| 652 | struct usb_dmac_chan *uchan; | ||
| 653 | struct dma_chan *chan; | ||
| 654 | dma_cap_mask_t mask; | ||
| 655 | |||
| 656 | if (dma_spec->args_count != 1) | ||
| 657 | return NULL; | ||
| 658 | |||
| 659 | /* Only slave DMA channels can be allocated via DT */ | ||
| 660 | dma_cap_zero(mask); | ||
| 661 | dma_cap_set(DMA_SLAVE, mask); | ||
| 662 | |||
| 663 | chan = dma_request_channel(mask, usb_dmac_chan_filter, dma_spec); | ||
| 664 | if (!chan) | ||
| 665 | return NULL; | ||
| 666 | |||
| 667 | uchan = to_usb_dmac_chan(chan); | ||
| 668 | |||
| 669 | return chan; | ||
| 670 | } | ||
| 671 | |||
| 672 | /* ----------------------------------------------------------------------------- | ||
| 673 | * Power management | ||
| 674 | */ | ||
| 675 | |||
| 676 | static int usb_dmac_runtime_suspend(struct device *dev) | ||
| 677 | { | ||
| 678 | struct usb_dmac *dmac = dev_get_drvdata(dev); | ||
| 679 | int i; | ||
| 680 | |||
| 681 | for (i = 0; i < dmac->n_channels; ++i) | ||
| 682 | usb_dmac_chan_halt(&dmac->channels[i]); | ||
| 683 | |||
| 684 | return 0; | ||
| 685 | } | ||
| 686 | |||
| 687 | static int usb_dmac_runtime_resume(struct device *dev) | ||
| 688 | { | ||
| 689 | struct usb_dmac *dmac = dev_get_drvdata(dev); | ||
| 690 | |||
| 691 | return usb_dmac_init(dmac); | ||
| 692 | } | ||
| 693 | |||
| 694 | static const struct dev_pm_ops usb_dmac_pm = { | ||
| 695 | SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume, | ||
| 696 | NULL) | ||
| 697 | }; | ||
| 698 | |||
| 699 | /* ----------------------------------------------------------------------------- | ||
| 700 | * Probe and remove | ||
| 701 | */ | ||
| 702 | |||
| 703 | static int usb_dmac_chan_probe(struct usb_dmac *dmac, | ||
| 704 | struct usb_dmac_chan *uchan, | ||
| 705 | unsigned int index) | ||
| 706 | { | ||
| 707 | struct platform_device *pdev = to_platform_device(dmac->dev); | ||
| 708 | char pdev_irqname[5]; | ||
| 709 | char *irqname; | ||
| 710 | int ret; | ||
| 711 | |||
| 712 | uchan->index = index; | ||
| 713 | uchan->iomem = dmac->iomem + USB_DMAC_CHAN_OFFSET(index); | ||
| 714 | |||
| 715 | /* Request the channel interrupt. */ | ||
| 716 | sprintf(pdev_irqname, "ch%u", index); | ||
| 717 | uchan->irq = platform_get_irq_byname(pdev, pdev_irqname); | ||
| 718 | if (uchan->irq < 0) { | ||
| 719 | dev_err(dmac->dev, "no IRQ specified for channel %u\n", index); | ||
| 720 | return -ENODEV; | ||
| 721 | } | ||
| 722 | |||
| 723 | irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u", | ||
| 724 | dev_name(dmac->dev), index); | ||
| 725 | if (!irqname) | ||
| 726 | return -ENOMEM; | ||
| 727 | |||
| 728 | ret = devm_request_irq(dmac->dev, uchan->irq, usb_dmac_isr_channel, | ||
| 729 | IRQF_SHARED, irqname, uchan); | ||
| 730 | if (ret) { | ||
| 731 | dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", | ||
| 732 | uchan->irq, ret); | ||
| 733 | return ret; | ||
| 734 | } | ||
| 735 | |||
| 736 | uchan->vc.desc_free = usb_dmac_virt_desc_free; | ||
| 737 | vchan_init(&uchan->vc, &dmac->engine); | ||
| 738 | INIT_LIST_HEAD(&uchan->desc_freed); | ||
| 739 | INIT_LIST_HEAD(&uchan->desc_got); | ||
| 740 | |||
| 741 | return 0; | ||
| 742 | } | ||
| 743 | |||
| 744 | static int usb_dmac_parse_of(struct device *dev, struct usb_dmac *dmac) | ||
| 745 | { | ||
| 746 | struct device_node *np = dev->of_node; | ||
| 747 | int ret; | ||
| 748 | |||
| 749 | ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels); | ||
| 750 | if (ret < 0) { | ||
| 751 | dev_err(dev, "unable to read dma-channels property\n"); | ||
| 752 | return ret; | ||
| 753 | } | ||
| 754 | |||
| 755 | if (dmac->n_channels <= 0 || dmac->n_channels >= 100) { | ||
| 756 | dev_err(dev, "invalid number of channels %u\n", | ||
| 757 | dmac->n_channels); | ||
| 758 | return -EINVAL; | ||
| 759 | } | ||
| 760 | |||
| 761 | return 0; | ||
| 762 | } | ||
| 763 | |||
| 764 | static int usb_dmac_probe(struct platform_device *pdev) | ||
| 765 | { | ||
| 766 | const enum dma_slave_buswidth widths = USB_DMAC_SLAVE_BUSWIDTH; | ||
| 767 | struct dma_device *engine; | ||
| 768 | struct usb_dmac *dmac; | ||
| 769 | struct resource *mem; | ||
| 770 | unsigned int i; | ||
| 771 | int ret; | ||
| 772 | |||
| 773 | dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); | ||
| 774 | if (!dmac) | ||
| 775 | return -ENOMEM; | ||
| 776 | |||
| 777 | dmac->dev = &pdev->dev; | ||
| 778 | platform_set_drvdata(pdev, dmac); | ||
| 779 | |||
| 780 | ret = usb_dmac_parse_of(&pdev->dev, dmac); | ||
| 781 | if (ret < 0) | ||
| 782 | return ret; | ||
| 783 | |||
| 784 | dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, | ||
| 785 | sizeof(*dmac->channels), GFP_KERNEL); | ||
| 786 | if (!dmac->channels) | ||
| 787 | return -ENOMEM; | ||
| 788 | |||
| 789 | /* Request resources. */ | ||
| 790 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 791 | dmac->iomem = devm_ioremap_resource(&pdev->dev, mem); | ||
| 792 | if (IS_ERR(dmac->iomem)) | ||
| 793 | return PTR_ERR(dmac->iomem); | ||
| 794 | |||
| 795 | /* Enable runtime PM and initialize the device. */ | ||
| 796 | pm_runtime_enable(&pdev->dev); | ||
| 797 | ret = pm_runtime_get_sync(&pdev->dev); | ||
| 798 | if (ret < 0) { | ||
| 799 | dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); | ||
| 800 | return ret; | ||
| 801 | } | ||
| 802 | |||
| 803 | ret = usb_dmac_init(dmac); | ||
| 804 | pm_runtime_put(&pdev->dev); | ||
| 805 | |||
| 806 | if (ret) { | ||
| 807 | dev_err(&pdev->dev, "failed to reset device\n"); | ||
| 808 | goto error; | ||
| 809 | } | ||
| 810 | |||
| 811 | /* Initialize the channels. */ | ||
| 812 | INIT_LIST_HEAD(&dmac->engine.channels); | ||
| 813 | |||
| 814 | for (i = 0; i < dmac->n_channels; ++i) { | ||
| 815 | ret = usb_dmac_chan_probe(dmac, &dmac->channels[i], i); | ||
| 816 | if (ret < 0) | ||
| 817 | goto error; | ||
| 818 | } | ||
| 819 | |||
| 820 | /* Register the DMAC as a DMA provider for DT. */ | ||
| 821 | ret = of_dma_controller_register(pdev->dev.of_node, usb_dmac_of_xlate, | ||
| 822 | NULL); | ||
| 823 | if (ret < 0) | ||
| 824 | goto error; | ||
| 825 | |||
| 826 | /* | ||
| 827 | * Register the DMA engine device. | ||
| 828 | * | ||
| 829 | * Default transfer size of 32 bytes requires 32-byte alignment. | ||
| 830 | */ | ||
| 831 | engine = &dmac->engine; | ||
| 832 | dma_cap_set(DMA_SLAVE, engine->cap_mask); | ||
| 833 | |||
| 834 | engine->dev = &pdev->dev; | ||
| 835 | |||
| 836 | engine->src_addr_widths = widths; | ||
| 837 | engine->dst_addr_widths = widths; | ||
| 838 | engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); | ||
| 839 | engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | ||
| 840 | |||
| 841 | engine->device_alloc_chan_resources = usb_dmac_alloc_chan_resources; | ||
| 842 | engine->device_free_chan_resources = usb_dmac_free_chan_resources; | ||
| 843 | engine->device_prep_slave_sg = usb_dmac_prep_slave_sg; | ||
| 844 | engine->device_terminate_all = usb_dmac_chan_terminate_all; | ||
| 845 | engine->device_tx_status = usb_dmac_tx_status; | ||
| 846 | engine->device_issue_pending = usb_dmac_issue_pending; | ||
| 847 | |||
| 848 | ret = dma_async_device_register(engine); | ||
| 849 | if (ret < 0) | ||
| 850 | goto error; | ||
| 851 | |||
| 852 | return 0; | ||
| 853 | |||
| 854 | error: | ||
| 855 | of_dma_controller_free(pdev->dev.of_node); | ||
| 856 | pm_runtime_disable(&pdev->dev); | ||
| 857 | return ret; | ||
| 858 | } | ||
| 859 | |||
| 860 | static void usb_dmac_chan_remove(struct usb_dmac *dmac, | ||
| 861 | struct usb_dmac_chan *uchan) | ||
| 862 | { | ||
| 863 | usb_dmac_chan_halt(uchan); | ||
| 864 | devm_free_irq(dmac->dev, uchan->irq, uchan); | ||
| 865 | } | ||
| 866 | |||
| 867 | static int usb_dmac_remove(struct platform_device *pdev) | ||
| 868 | { | ||
| 869 | struct usb_dmac *dmac = platform_get_drvdata(pdev); | ||
| 870 | int i; | ||
| 871 | |||
| 872 | for (i = 0; i < dmac->n_channels; ++i) | ||
| 873 | usb_dmac_chan_remove(dmac, &dmac->channels[i]); | ||
| 874 | of_dma_controller_free(pdev->dev.of_node); | ||
| 875 | dma_async_device_unregister(&dmac->engine); | ||
| 876 | |||
| 877 | pm_runtime_disable(&pdev->dev); | ||
| 878 | |||
| 879 | return 0; | ||
| 880 | } | ||
| 881 | |||
| 882 | static void usb_dmac_shutdown(struct platform_device *pdev) | ||
| 883 | { | ||
| 884 | struct usb_dmac *dmac = platform_get_drvdata(pdev); | ||
| 885 | |||
| 886 | usb_dmac_stop(dmac); | ||
| 887 | } | ||
| 888 | |||
| 889 | static const struct of_device_id usb_dmac_of_ids[] = { | ||
| 890 | { .compatible = "renesas,usb-dmac", }, | ||
| 891 | { /* Sentinel */ } | ||
| 892 | }; | ||
| 893 | MODULE_DEVICE_TABLE(of, usb_dmac_of_ids); | ||
| 894 | |||
| 895 | static struct platform_driver usb_dmac_driver = { | ||
| 896 | .driver = { | ||
| 897 | .pm = &usb_dmac_pm, | ||
| 898 | .name = "usb-dmac", | ||
| 899 | .of_match_table = usb_dmac_of_ids, | ||
| 900 | }, | ||
| 901 | .probe = usb_dmac_probe, | ||
| 902 | .remove = usb_dmac_remove, | ||
| 903 | .shutdown = usb_dmac_shutdown, | ||
| 904 | }; | ||
| 905 | |||
| 906 | module_platform_driver(usb_dmac_driver); | ||
| 907 | |||
| 908 | MODULE_DESCRIPTION("Renesas USB DMA Controller Driver"); | ||
| 909 | MODULE_AUTHOR("Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>"); | ||
| 910 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index d0086e9f2082..a1afda43b8ef 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c | |||
| @@ -896,7 +896,7 @@ static const struct dev_pm_ops sirfsoc_dma_pm_ops = { | |||
| 896 | SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume) | 896 | SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume) |
| 897 | }; | 897 | }; |
| 898 | 898 | ||
| 899 | static struct of_device_id sirfsoc_dma_match[] = { | 899 | static const struct of_device_id sirfsoc_dma_match[] = { |
| 900 | { .compatible = "sirf,prima2-dmac", }, | 900 | { .compatible = "sirf,prima2-dmac", }, |
| 901 | { .compatible = "sirf,marco-dmac", }, | 901 | { .compatible = "sirf,marco-dmac", }, |
| 902 | {}, | 902 | {}, |
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 1332b1d4d541..3c10f034d4b9 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
| @@ -2514,7 +2514,8 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | |||
| 2514 | sg_dma_len(&dst_sg) = size; | 2514 | sg_dma_len(&dst_sg) = size; |
| 2515 | sg_dma_len(&src_sg) = size; | 2515 | sg_dma_len(&src_sg) = size; |
| 2516 | 2516 | ||
| 2517 | return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags); | 2517 | return d40_prep_sg(chan, &src_sg, &dst_sg, 1, |
| 2518 | DMA_MEM_TO_MEM, dma_flags); | ||
| 2518 | } | 2519 | } |
| 2519 | 2520 | ||
| 2520 | static struct dma_async_tx_descriptor * | 2521 | static struct dma_async_tx_descriptor * |
| @@ -2526,7 +2527,8 @@ d40_prep_memcpy_sg(struct dma_chan *chan, | |||
| 2526 | if (dst_nents != src_nents) | 2527 | if (dst_nents != src_nents) |
| 2527 | return NULL; | 2528 | return NULL; |
| 2528 | 2529 | ||
| 2529 | return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags); | 2530 | return d40_prep_sg(chan, src_sg, dst_sg, src_nents, |
| 2531 | DMA_MEM_TO_MEM, dma_flags); | ||
| 2530 | } | 2532 | } |
| 2531 | 2533 | ||
| 2532 | static struct dma_async_tx_descriptor * | 2534 | static struct dma_async_tx_descriptor * |
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 7ebcf9bec698..11e536586812 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c | |||
| @@ -796,11 +796,6 @@ static void sun6i_dma_issue_pending(struct dma_chan *chan) | |||
| 796 | spin_unlock_irqrestore(&vchan->vc.lock, flags); | 796 | spin_unlock_irqrestore(&vchan->vc.lock, flags); |
| 797 | } | 797 | } |
| 798 | 798 | ||
| 799 | static int sun6i_dma_alloc_chan_resources(struct dma_chan *chan) | ||
| 800 | { | ||
| 801 | return 0; | ||
| 802 | } | ||
| 803 | |||
| 804 | static void sun6i_dma_free_chan_resources(struct dma_chan *chan) | 799 | static void sun6i_dma_free_chan_resources(struct dma_chan *chan) |
| 805 | { | 800 | { |
| 806 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); | 801 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); |
| @@ -896,7 +891,7 @@ static struct sun6i_dma_config sun8i_a23_dma_cfg = { | |||
| 896 | .nr_max_vchans = 37, | 891 | .nr_max_vchans = 37, |
| 897 | }; | 892 | }; |
| 898 | 893 | ||
| 899 | static struct of_device_id sun6i_dma_match[] = { | 894 | static const struct of_device_id sun6i_dma_match[] = { |
| 900 | { .compatible = "allwinner,sun6i-a31-dma", .data = &sun6i_a31_dma_cfg }, | 895 | { .compatible = "allwinner,sun6i-a31-dma", .data = &sun6i_a31_dma_cfg }, |
| 901 | { .compatible = "allwinner,sun8i-a23-dma", .data = &sun8i_a23_dma_cfg }, | 896 | { .compatible = "allwinner,sun8i-a23-dma", .data = &sun8i_a23_dma_cfg }, |
| 902 | { /* sentinel */ } | 897 | { /* sentinel */ } |
| @@ -957,7 +952,6 @@ static int sun6i_dma_probe(struct platform_device *pdev) | |||
| 957 | dma_cap_set(DMA_SLAVE, sdc->slave.cap_mask); | 952 | dma_cap_set(DMA_SLAVE, sdc->slave.cap_mask); |
| 958 | 953 | ||
| 959 | INIT_LIST_HEAD(&sdc->slave.channels); | 954 | INIT_LIST_HEAD(&sdc->slave.channels); |
| 960 | sdc->slave.device_alloc_chan_resources = sun6i_dma_alloc_chan_resources; | ||
| 961 | sdc->slave.device_free_chan_resources = sun6i_dma_free_chan_resources; | 955 | sdc->slave.device_free_chan_resources = sun6i_dma_free_chan_resources; |
| 962 | sdc->slave.device_tx_status = sun6i_dma_tx_status; | 956 | sdc->slave.device_tx_status = sun6i_dma_tx_status; |
| 963 | sdc->slave.device_issue_pending = sun6i_dma_issue_pending; | 957 | sdc->slave.device_issue_pending = sun6i_dma_issue_pending; |
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c new file mode 100755 index 000000000000..f52e37502254 --- /dev/null +++ b/drivers/dma/xgene-dma.c | |||
| @@ -0,0 +1,2089 @@ | |||
| 1 | /* | ||
| 2 | * Applied Micro X-Gene SoC DMA engine Driver | ||
| 3 | * | ||
| 4 | * Copyright (c) 2015, Applied Micro Circuits Corporation | ||
| 5 | * Authors: Rameshwar Prasad Sahu <rsahu@apm.com> | ||
| 6 | * Loc Ho <lho@apm.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms of the GNU General Public License as published by the | ||
| 10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
| 11 | * option) any later version. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope that it will be useful, | ||
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 16 | * GNU General Public License for more details. | ||
| 17 | * | ||
| 18 | * You should have received a copy of the GNU General Public License | ||
| 19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 20 | * | ||
| 21 | * NOTE: PM support is currently not available. | ||
| 22 | */ | ||
| 23 | |||
| 24 | #include <linux/clk.h> | ||
| 25 | #include <linux/delay.h> | ||
| 26 | #include <linux/dma-mapping.h> | ||
| 27 | #include <linux/dmaengine.h> | ||
| 28 | #include <linux/dmapool.h> | ||
| 29 | #include <linux/interrupt.h> | ||
| 30 | #include <linux/io.h> | ||
| 31 | #include <linux/module.h> | ||
| 32 | #include <linux/of_device.h> | ||
| 33 | |||
| 34 | #include "dmaengine.h" | ||
| 35 | |||
| 36 | /* X-Gene DMA ring csr registers and bit definations */ | ||
| 37 | #define XGENE_DMA_RING_CONFIG 0x04 | ||
| 38 | #define XGENE_DMA_RING_ENABLE BIT(31) | ||
| 39 | #define XGENE_DMA_RING_ID 0x08 | ||
| 40 | #define XGENE_DMA_RING_ID_SETUP(v) ((v) | BIT(31)) | ||
| 41 | #define XGENE_DMA_RING_ID_BUF 0x0C | ||
| 42 | #define XGENE_DMA_RING_ID_BUF_SETUP(v) (((v) << 9) | BIT(21)) | ||
| 43 | #define XGENE_DMA_RING_THRESLD0_SET1 0x30 | ||
| 44 | #define XGENE_DMA_RING_THRESLD0_SET1_VAL 0X64 | ||
| 45 | #define XGENE_DMA_RING_THRESLD1_SET1 0x34 | ||
| 46 | #define XGENE_DMA_RING_THRESLD1_SET1_VAL 0xC8 | ||
| 47 | #define XGENE_DMA_RING_HYSTERESIS 0x68 | ||
| 48 | #define XGENE_DMA_RING_HYSTERESIS_VAL 0xFFFFFFFF | ||
| 49 | #define XGENE_DMA_RING_STATE 0x6C | ||
| 50 | #define XGENE_DMA_RING_STATE_WR_BASE 0x70 | ||
| 51 | #define XGENE_DMA_RING_NE_INT_MODE 0x017C | ||
| 52 | #define XGENE_DMA_RING_NE_INT_MODE_SET(m, v) \ | ||
| 53 | ((m) = ((m) & ~BIT(31 - (v))) | BIT(31 - (v))) | ||
| 54 | #define XGENE_DMA_RING_NE_INT_MODE_RESET(m, v) \ | ||
| 55 | ((m) &= (~BIT(31 - (v)))) | ||
| 56 | #define XGENE_DMA_RING_CLKEN 0xC208 | ||
| 57 | #define XGENE_DMA_RING_SRST 0xC200 | ||
| 58 | #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070 | ||
| 59 | #define XGENE_DMA_RING_BLK_MEM_RDY 0xD074 | ||
| 60 | #define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF | ||
| 61 | #define XGENE_DMA_RING_DESC_CNT(v) (((v) & 0x0001FFFE) >> 1) | ||
| 62 | #define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num)) | ||
| 63 | #define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v)) | ||
| 64 | #define XGENE_DMA_RING_CMD_OFFSET 0x2C | ||
| 65 | #define XGENE_DMA_RING_CMD_BASE_OFFSET(v) ((v) << 6) | ||
| 66 | #define XGENE_DMA_RING_COHERENT_SET(m) \ | ||
| 67 | (((u32 *)(m))[2] |= BIT(4)) | ||
| 68 | #define XGENE_DMA_RING_ADDRL_SET(m, v) \ | ||
| 69 | (((u32 *)(m))[2] |= (((v) >> 8) << 5)) | ||
| 70 | #define XGENE_DMA_RING_ADDRH_SET(m, v) \ | ||
| 71 | (((u32 *)(m))[3] |= ((v) >> 35)) | ||
| 72 | #define XGENE_DMA_RING_ACCEPTLERR_SET(m) \ | ||
| 73 | (((u32 *)(m))[3] |= BIT(19)) | ||
| 74 | #define XGENE_DMA_RING_SIZE_SET(m, v) \ | ||
| 75 | (((u32 *)(m))[3] |= ((v) << 23)) | ||
| 76 | #define XGENE_DMA_RING_RECOMBBUF_SET(m) \ | ||
| 77 | (((u32 *)(m))[3] |= BIT(27)) | ||
| 78 | #define XGENE_DMA_RING_RECOMTIMEOUTL_SET(m) \ | ||
| 79 | (((u32 *)(m))[3] |= (0x7 << 28)) | ||
| 80 | #define XGENE_DMA_RING_RECOMTIMEOUTH_SET(m) \ | ||
| 81 | (((u32 *)(m))[4] |= 0x3) | ||
| 82 | #define XGENE_DMA_RING_SELTHRSH_SET(m) \ | ||
| 83 | (((u32 *)(m))[4] |= BIT(3)) | ||
| 84 | #define XGENE_DMA_RING_TYPE_SET(m, v) \ | ||
| 85 | (((u32 *)(m))[4] |= ((v) << 19)) | ||
| 86 | |||
| 87 | /* X-Gene DMA device csr registers and bit definitions */ | ||
| 88 | #define XGENE_DMA_IPBRR 0x0 | ||
| 89 | #define XGENE_DMA_DEV_ID_RD(v) ((v) & 0x00000FFF) | ||
| 90 | #define XGENE_DMA_BUS_ID_RD(v) (((v) >> 12) & 3) | ||
| 91 | #define XGENE_DMA_REV_NO_RD(v) (((v) >> 14) & 3) | ||
| 92 | #define XGENE_DMA_GCR 0x10 | ||
| 93 | #define XGENE_DMA_CH_SETUP(v) \ | ||
| 94 | ((v) = ((v) & ~0x000FFFFF) | 0x000AAFFF) | ||
| 95 | #define XGENE_DMA_ENABLE(v) ((v) |= BIT(31)) | ||
| 96 | #define XGENE_DMA_DISABLE(v) ((v) &= ~BIT(31)) | ||
| 97 | #define XGENE_DMA_RAID6_CONT 0x14 | ||
| 98 | #define XGENE_DMA_RAID6_MULTI_CTRL(v) ((v) << 24) | ||
| 99 | #define XGENE_DMA_INT 0x70 | ||
| 100 | #define XGENE_DMA_INT_MASK 0x74 | ||
| 101 | #define XGENE_DMA_INT_ALL_MASK 0xFFFFFFFF | ||
| 102 | #define XGENE_DMA_INT_ALL_UNMASK 0x0 | ||
| 103 | #define XGENE_DMA_INT_MASK_SHIFT 0x14 | ||
| 104 | #define XGENE_DMA_RING_INT0_MASK 0x90A0 | ||
| 105 | #define XGENE_DMA_RING_INT1_MASK 0x90A8 | ||
| 106 | #define XGENE_DMA_RING_INT2_MASK 0x90B0 | ||
| 107 | #define XGENE_DMA_RING_INT3_MASK 0x90B8 | ||
| 108 | #define XGENE_DMA_RING_INT4_MASK 0x90C0 | ||
| 109 | #define XGENE_DMA_CFG_RING_WQ_ASSOC 0x90E0 | ||
| 110 | #define XGENE_DMA_ASSOC_RING_MNGR1 0xFFFFFFFF | ||
| 111 | #define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070 | ||
| 112 | #define XGENE_DMA_BLK_MEM_RDY 0xD074 | ||
| 113 | #define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF | ||
| 114 | |||
| 115 | /* X-Gene SoC EFUSE csr register and bit defination */ | ||
| 116 | #define XGENE_SOC_JTAG1_SHADOW 0x18 | ||
| 117 | #define XGENE_DMA_PQ_DISABLE_MASK BIT(13) | ||
| 118 | |||
| 119 | /* X-Gene DMA Descriptor format */ | ||
| 120 | #define XGENE_DMA_DESC_NV_BIT BIT_ULL(50) | ||
| 121 | #define XGENE_DMA_DESC_IN_BIT BIT_ULL(55) | ||
| 122 | #define XGENE_DMA_DESC_C_BIT BIT_ULL(63) | ||
| 123 | #define XGENE_DMA_DESC_DR_BIT BIT_ULL(61) | ||
| 124 | #define XGENE_DMA_DESC_ELERR_POS 46 | ||
| 125 | #define XGENE_DMA_DESC_RTYPE_POS 56 | ||
| 126 | #define XGENE_DMA_DESC_LERR_POS 60 | ||
| 127 | #define XGENE_DMA_DESC_FLYBY_POS 4 | ||
| 128 | #define XGENE_DMA_DESC_BUFLEN_POS 48 | ||
| 129 | #define XGENE_DMA_DESC_HOENQ_NUM_POS 48 | ||
| 130 | |||
| 131 | #define XGENE_DMA_DESC_NV_SET(m) \ | ||
| 132 | (((u64 *)(m))[0] |= XGENE_DMA_DESC_NV_BIT) | ||
| 133 | #define XGENE_DMA_DESC_IN_SET(m) \ | ||
| 134 | (((u64 *)(m))[0] |= XGENE_DMA_DESC_IN_BIT) | ||
| 135 | #define XGENE_DMA_DESC_RTYPE_SET(m, v) \ | ||
| 136 | (((u64 *)(m))[0] |= ((u64)(v) << XGENE_DMA_DESC_RTYPE_POS)) | ||
| 137 | #define XGENE_DMA_DESC_BUFADDR_SET(m, v) \ | ||
| 138 | (((u64 *)(m))[0] |= (v)) | ||
| 139 | #define XGENE_DMA_DESC_BUFLEN_SET(m, v) \ | ||
| 140 | (((u64 *)(m))[0] |= ((u64)(v) << XGENE_DMA_DESC_BUFLEN_POS)) | ||
| 141 | #define XGENE_DMA_DESC_C_SET(m) \ | ||
| 142 | (((u64 *)(m))[1] |= XGENE_DMA_DESC_C_BIT) | ||
| 143 | #define XGENE_DMA_DESC_FLYBY_SET(m, v) \ | ||
| 144 | (((u64 *)(m))[2] |= ((v) << XGENE_DMA_DESC_FLYBY_POS)) | ||
| 145 | #define XGENE_DMA_DESC_MULTI_SET(m, v, i) \ | ||
| 146 | (((u64 *)(m))[2] |= ((u64)(v) << (((i) + 1) * 8))) | ||
| 147 | #define XGENE_DMA_DESC_DR_SET(m) \ | ||
| 148 | (((u64 *)(m))[2] |= XGENE_DMA_DESC_DR_BIT) | ||
| 149 | #define XGENE_DMA_DESC_DST_ADDR_SET(m, v) \ | ||
| 150 | (((u64 *)(m))[3] |= (v)) | ||
| 151 | #define XGENE_DMA_DESC_H0ENQ_NUM_SET(m, v) \ | ||
| 152 | (((u64 *)(m))[3] |= ((u64)(v) << XGENE_DMA_DESC_HOENQ_NUM_POS)) | ||
| 153 | #define XGENE_DMA_DESC_ELERR_RD(m) \ | ||
| 154 | (((m) >> XGENE_DMA_DESC_ELERR_POS) & 0x3) | ||
| 155 | #define XGENE_DMA_DESC_LERR_RD(m) \ | ||
| 156 | (((m) >> XGENE_DMA_DESC_LERR_POS) & 0x7) | ||
| 157 | #define XGENE_DMA_DESC_STATUS(elerr, lerr) \ | ||
| 158 | (((elerr) << 4) | (lerr)) | ||
| 159 | |||
| 160 | /* X-Gene DMA descriptor empty s/w signature */ | ||
| 161 | #define XGENE_DMA_DESC_EMPTY_INDEX 0 | ||
| 162 | #define XGENE_DMA_DESC_EMPTY_SIGNATURE ~0ULL | ||
| 163 | #define XGENE_DMA_DESC_SET_EMPTY(m) \ | ||
| 164 | (((u64 *)(m))[XGENE_DMA_DESC_EMPTY_INDEX] = \ | ||
| 165 | XGENE_DMA_DESC_EMPTY_SIGNATURE) | ||
| 166 | #define XGENE_DMA_DESC_IS_EMPTY(m) \ | ||
| 167 | (((u64 *)(m))[XGENE_DMA_DESC_EMPTY_INDEX] == \ | ||
| 168 | XGENE_DMA_DESC_EMPTY_SIGNATURE) | ||
| 169 | |||
| 170 | /* X-Gene DMA configurable parameters defines */ | ||
| 171 | #define XGENE_DMA_RING_NUM 512 | ||
| 172 | #define XGENE_DMA_BUFNUM 0x0 | ||
| 173 | #define XGENE_DMA_CPU_BUFNUM 0x18 | ||
| 174 | #define XGENE_DMA_RING_OWNER_DMA 0x03 | ||
| 175 | #define XGENE_DMA_RING_OWNER_CPU 0x0F | ||
| 176 | #define XGENE_DMA_RING_TYPE_REGULAR 0x01 | ||
| 177 | #define XGENE_DMA_RING_WQ_DESC_SIZE 32 /* 32 Bytes */ | ||
| 178 | #define XGENE_DMA_RING_NUM_CONFIG 5 | ||
| 179 | #define XGENE_DMA_MAX_CHANNEL 4 | ||
| 180 | #define XGENE_DMA_XOR_CHANNEL 0 | ||
| 181 | #define XGENE_DMA_PQ_CHANNEL 1 | ||
| 182 | #define XGENE_DMA_MAX_BYTE_CNT 0x4000 /* 16 KB */ | ||
| 183 | #define XGENE_DMA_MAX_64B_DESC_BYTE_CNT 0x14000 /* 80 KB */ | ||
| 184 | #define XGENE_DMA_XOR_ALIGNMENT 6 /* 64 Bytes */ | ||
| 185 | #define XGENE_DMA_MAX_XOR_SRC 5 | ||
| 186 | #define XGENE_DMA_16K_BUFFER_LEN_CODE 0x0 | ||
| 187 | #define XGENE_DMA_INVALID_LEN_CODE 0x7800 | ||
| 188 | |||
| 189 | /* X-Gene DMA descriptor error codes */ | ||
| 190 | #define ERR_DESC_AXI 0x01 | ||
| 191 | #define ERR_BAD_DESC 0x02 | ||
| 192 | #define ERR_READ_DATA_AXI 0x03 | ||
| 193 | #define ERR_WRITE_DATA_AXI 0x04 | ||
| 194 | #define ERR_FBP_TIMEOUT 0x05 | ||
| 195 | #define ERR_ECC 0x06 | ||
| 196 | #define ERR_DIFF_SIZE 0x08 | ||
| 197 | #define ERR_SCT_GAT_LEN 0x09 | ||
| 198 | #define ERR_CRC_ERR 0x11 | ||
| 199 | #define ERR_CHKSUM 0x12 | ||
| 200 | #define ERR_DIF 0x13 | ||
| 201 | |||
| 202 | /* X-Gene DMA error interrupt codes */ | ||
| 203 | #define ERR_DIF_SIZE_INT 0x0 | ||
| 204 | #define ERR_GS_ERR_INT 0x1 | ||
| 205 | #define ERR_FPB_TIMEO_INT 0x2 | ||
| 206 | #define ERR_WFIFO_OVF_INT 0x3 | ||
| 207 | #define ERR_RFIFO_OVF_INT 0x4 | ||
| 208 | #define ERR_WR_TIMEO_INT 0x5 | ||
| 209 | #define ERR_RD_TIMEO_INT 0x6 | ||
| 210 | #define ERR_WR_ERR_INT 0x7 | ||
| 211 | #define ERR_RD_ERR_INT 0x8 | ||
| 212 | #define ERR_BAD_DESC_INT 0x9 | ||
| 213 | #define ERR_DESC_DST_INT 0xA | ||
| 214 | #define ERR_DESC_SRC_INT 0xB | ||
| 215 | |||
| 216 | /* X-Gene DMA flyby operation code */ | ||
| 217 | #define FLYBY_2SRC_XOR 0x8 | ||
| 218 | #define FLYBY_3SRC_XOR 0x9 | ||
| 219 | #define FLYBY_4SRC_XOR 0xA | ||
| 220 | #define FLYBY_5SRC_XOR 0xB | ||
| 221 | |||
| 222 | /* X-Gene DMA SW descriptor flags */ | ||
| 223 | #define XGENE_DMA_FLAG_64B_DESC BIT(0) | ||
| 224 | |||
| 225 | /* Define to dump X-Gene DMA descriptor */ | ||
| 226 | #define XGENE_DMA_DESC_DUMP(desc, m) \ | ||
| 227 | print_hex_dump(KERN_ERR, (m), \ | ||
| 228 | DUMP_PREFIX_ADDRESS, 16, 8, (desc), 32, 0) | ||
| 229 | |||
| 230 | #define to_dma_desc_sw(tx) \ | ||
| 231 | container_of(tx, struct xgene_dma_desc_sw, tx) | ||
| 232 | #define to_dma_chan(dchan) \ | ||
| 233 | container_of(dchan, struct xgene_dma_chan, dma_chan) | ||
| 234 | |||
| 235 | #define chan_dbg(chan, fmt, arg...) \ | ||
| 236 | dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg) | ||
| 237 | #define chan_err(chan, fmt, arg...) \ | ||
| 238 | dev_err(chan->dev, "%s: " fmt, chan->name, ##arg) | ||
| 239 | |||
| 240 | struct xgene_dma_desc_hw { | ||
| 241 | u64 m0; | ||
| 242 | u64 m1; | ||
| 243 | u64 m2; | ||
| 244 | u64 m3; | ||
| 245 | }; | ||
| 246 | |||
| 247 | enum xgene_dma_ring_cfgsize { | ||
| 248 | XGENE_DMA_RING_CFG_SIZE_512B, | ||
| 249 | XGENE_DMA_RING_CFG_SIZE_2KB, | ||
| 250 | XGENE_DMA_RING_CFG_SIZE_16KB, | ||
| 251 | XGENE_DMA_RING_CFG_SIZE_64KB, | ||
| 252 | XGENE_DMA_RING_CFG_SIZE_512KB, | ||
| 253 | XGENE_DMA_RING_CFG_SIZE_INVALID | ||
| 254 | }; | ||
| 255 | |||
| 256 | struct xgene_dma_ring { | ||
| 257 | struct xgene_dma *pdma; | ||
| 258 | u8 buf_num; | ||
| 259 | u16 id; | ||
| 260 | u16 num; | ||
| 261 | u16 head; | ||
| 262 | u16 owner; | ||
| 263 | u16 slots; | ||
| 264 | u16 dst_ring_num; | ||
| 265 | u32 size; | ||
| 266 | void __iomem *cmd; | ||
| 267 | void __iomem *cmd_base; | ||
| 268 | dma_addr_t desc_paddr; | ||
| 269 | u32 state[XGENE_DMA_RING_NUM_CONFIG]; | ||
| 270 | enum xgene_dma_ring_cfgsize cfgsize; | ||
| 271 | union { | ||
| 272 | void *desc_vaddr; | ||
| 273 | struct xgene_dma_desc_hw *desc_hw; | ||
| 274 | }; | ||
| 275 | }; | ||
| 276 | |||
| 277 | struct xgene_dma_desc_sw { | ||
| 278 | struct xgene_dma_desc_hw desc1; | ||
| 279 | struct xgene_dma_desc_hw desc2; | ||
| 280 | u32 flags; | ||
| 281 | struct list_head node; | ||
| 282 | struct list_head tx_list; | ||
| 283 | struct dma_async_tx_descriptor tx; | ||
| 284 | }; | ||
| 285 | |||
| 286 | /** | ||
| 287 | * struct xgene_dma_chan - internal representation of an X-Gene DMA channel | ||
| 288 | * @dma_chan: dmaengine channel object member | ||
| 289 | * @pdma: X-Gene DMA device structure reference | ||
| 290 | * @dev: struct device reference for dma mapping api | ||
| 291 | * @id: raw id of this channel | ||
| 292 | * @rx_irq: channel IRQ | ||
| 293 | * @name: name of X-Gene DMA channel | ||
| 294 | * @lock: serializes enqueue/dequeue operations to the descriptor pool | ||
| 295 | * @pending: number of transaction request pushed to DMA controller for | ||
| 296 | * execution, but still waiting for completion, | ||
| 297 | * @max_outstanding: max number of outstanding request we can push to channel | ||
| 298 | * @ld_pending: descriptors which are queued to run, but have not yet been | ||
| 299 | * submitted to the hardware for execution | ||
| 300 | * @ld_running: descriptors which are currently being executing by the hardware | ||
| 301 | * @ld_completed: descriptors which have finished execution by the hardware. | ||
| 302 | * These descriptors have already had their cleanup actions run. They | ||
| 303 | * are waiting for the ACK bit to be set by the async tx API. | ||
| 304 | * @desc_pool: descriptor pool for DMA operations | ||
| 305 | * @tasklet: bottom half where all completed descriptors cleans | ||
| 306 | * @tx_ring: transmit ring descriptor that we use to prepare actual | ||
| 307 | * descriptors for further executions | ||
| 308 | * @rx_ring: receive ring descriptor that we use to get completed DMA | ||
| 309 | * descriptors during cleanup time | ||
| 310 | */ | ||
| 311 | struct xgene_dma_chan { | ||
| 312 | struct dma_chan dma_chan; | ||
| 313 | struct xgene_dma *pdma; | ||
| 314 | struct device *dev; | ||
| 315 | int id; | ||
| 316 | int rx_irq; | ||
| 317 | char name[10]; | ||
| 318 | spinlock_t lock; | ||
| 319 | int pending; | ||
| 320 | int max_outstanding; | ||
| 321 | struct list_head ld_pending; | ||
| 322 | struct list_head ld_running; | ||
| 323 | struct list_head ld_completed; | ||
| 324 | struct dma_pool *desc_pool; | ||
| 325 | struct tasklet_struct tasklet; | ||
| 326 | struct xgene_dma_ring tx_ring; | ||
| 327 | struct xgene_dma_ring rx_ring; | ||
| 328 | }; | ||
| 329 | |||
| 330 | /** | ||
| 331 | * struct xgene_dma - internal representation of an X-Gene DMA device | ||
| 332 | * @err_irq: DMA error irq number | ||
| 333 | * @ring_num: start id number for DMA ring | ||
| 334 | * @csr_dma: base for DMA register access | ||
| 335 | * @csr_ring: base for DMA ring register access | ||
| 336 | * @csr_ring_cmd: base for DMA ring command register access | ||
| 337 | * @csr_efuse: base for efuse register access | ||
| 338 | * @dma_dev: embedded struct dma_device | ||
| 339 | * @chan: reference to X-Gene DMA channels | ||
| 340 | */ | ||
| 341 | struct xgene_dma { | ||
| 342 | struct device *dev; | ||
| 343 | struct clk *clk; | ||
| 344 | int err_irq; | ||
| 345 | int ring_num; | ||
| 346 | void __iomem *csr_dma; | ||
| 347 | void __iomem *csr_ring; | ||
| 348 | void __iomem *csr_ring_cmd; | ||
| 349 | void __iomem *csr_efuse; | ||
| 350 | struct dma_device dma_dev[XGENE_DMA_MAX_CHANNEL]; | ||
| 351 | struct xgene_dma_chan chan[XGENE_DMA_MAX_CHANNEL]; | ||
| 352 | }; | ||
| 353 | |||
| 354 | static const char * const xgene_dma_desc_err[] = { | ||
| 355 | [ERR_DESC_AXI] = "AXI error when reading src/dst link list", | ||
| 356 | [ERR_BAD_DESC] = "ERR or El_ERR fields not set to zero in desc", | ||
| 357 | [ERR_READ_DATA_AXI] = "AXI error when reading data", | ||
| 358 | [ERR_WRITE_DATA_AXI] = "AXI error when writing data", | ||
| 359 | [ERR_FBP_TIMEOUT] = "Timeout on bufpool fetch", | ||
| 360 | [ERR_ECC] = "ECC double bit error", | ||
| 361 | [ERR_DIFF_SIZE] = "Bufpool too small to hold all the DIF result", | ||
| 362 | [ERR_SCT_GAT_LEN] = "Gather and scatter data length not same", | ||
| 363 | [ERR_CRC_ERR] = "CRC error", | ||
| 364 | [ERR_CHKSUM] = "Checksum error", | ||
| 365 | [ERR_DIF] = "DIF error", | ||
| 366 | }; | ||
| 367 | |||
| 368 | static const char * const xgene_dma_err[] = { | ||
| 369 | [ERR_DIF_SIZE_INT] = "DIF size error", | ||
| 370 | [ERR_GS_ERR_INT] = "Gather scatter not same size error", | ||
| 371 | [ERR_FPB_TIMEO_INT] = "Free pool time out error", | ||
| 372 | [ERR_WFIFO_OVF_INT] = "Write FIFO over flow error", | ||
| 373 | [ERR_RFIFO_OVF_INT] = "Read FIFO over flow error", | ||
| 374 | [ERR_WR_TIMEO_INT] = "Write time out error", | ||
| 375 | [ERR_RD_TIMEO_INT] = "Read time out error", | ||
| 376 | [ERR_WR_ERR_INT] = "HBF bus write error", | ||
| 377 | [ERR_RD_ERR_INT] = "HBF bus read error", | ||
| 378 | [ERR_BAD_DESC_INT] = "Ring descriptor HE0 not set error", | ||
| 379 | [ERR_DESC_DST_INT] = "HFB reading dst link address error", | ||
| 380 | [ERR_DESC_SRC_INT] = "HFB reading src link address error", | ||
| 381 | }; | ||
| 382 | |||
| 383 | static bool is_pq_enabled(struct xgene_dma *pdma) | ||
| 384 | { | ||
| 385 | u32 val; | ||
| 386 | |||
| 387 | val = ioread32(pdma->csr_efuse + XGENE_SOC_JTAG1_SHADOW); | ||
| 388 | return !(val & XGENE_DMA_PQ_DISABLE_MASK); | ||
| 389 | } | ||
| 390 | |||
| 391 | static void xgene_dma_cpu_to_le64(u64 *desc, int count) | ||
| 392 | { | ||
| 393 | int i; | ||
| 394 | |||
| 395 | for (i = 0; i < count; i++) | ||
| 396 | desc[i] = cpu_to_le64(desc[i]); | ||
| 397 | } | ||
| 398 | |||
| 399 | static u16 xgene_dma_encode_len(u32 len) | ||
| 400 | { | ||
| 401 | return (len < XGENE_DMA_MAX_BYTE_CNT) ? | ||
| 402 | len : XGENE_DMA_16K_BUFFER_LEN_CODE; | ||
| 403 | } | ||
| 404 | |||
| 405 | static u8 xgene_dma_encode_xor_flyby(u32 src_cnt) | ||
| 406 | { | ||
| 407 | static u8 flyby_type[] = { | ||
| 408 | FLYBY_2SRC_XOR, /* Dummy */ | ||
| 409 | FLYBY_2SRC_XOR, /* Dummy */ | ||
| 410 | FLYBY_2SRC_XOR, | ||
| 411 | FLYBY_3SRC_XOR, | ||
| 412 | FLYBY_4SRC_XOR, | ||
| 413 | FLYBY_5SRC_XOR | ||
| 414 | }; | ||
| 415 | |||
| 416 | return flyby_type[src_cnt]; | ||
| 417 | } | ||
| 418 | |||
| 419 | static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring) | ||
| 420 | { | ||
| 421 | u32 __iomem *cmd_base = ring->cmd_base; | ||
| 422 | u32 ring_state = ioread32(&cmd_base[1]); | ||
| 423 | |||
| 424 | return XGENE_DMA_RING_DESC_CNT(ring_state); | ||
| 425 | } | ||
| 426 | |||
| 427 | static void xgene_dma_set_src_buffer(void *ext8, size_t *len, | ||
| 428 | dma_addr_t *paddr) | ||
| 429 | { | ||
| 430 | size_t nbytes = (*len < XGENE_DMA_MAX_BYTE_CNT) ? | ||
| 431 | *len : XGENE_DMA_MAX_BYTE_CNT; | ||
| 432 | |||
| 433 | XGENE_DMA_DESC_BUFADDR_SET(ext8, *paddr); | ||
| 434 | XGENE_DMA_DESC_BUFLEN_SET(ext8, xgene_dma_encode_len(nbytes)); | ||
| 435 | *len -= nbytes; | ||
| 436 | *paddr += nbytes; | ||
| 437 | } | ||
| 438 | |||
| 439 | static void xgene_dma_invalidate_buffer(void *ext8) | ||
| 440 | { | ||
| 441 | XGENE_DMA_DESC_BUFLEN_SET(ext8, XGENE_DMA_INVALID_LEN_CODE); | ||
| 442 | } | ||
| 443 | |||
| 444 | static void *xgene_dma_lookup_ext8(u64 *desc, int idx) | ||
| 445 | { | ||
| 446 | return (idx % 2) ? (desc + idx - 1) : (desc + idx + 1); | ||
| 447 | } | ||
| 448 | |||
| 449 | static void xgene_dma_init_desc(void *desc, u16 dst_ring_num) | ||
| 450 | { | ||
| 451 | XGENE_DMA_DESC_C_SET(desc); /* Coherent IO */ | ||
| 452 | XGENE_DMA_DESC_IN_SET(desc); | ||
| 453 | XGENE_DMA_DESC_H0ENQ_NUM_SET(desc, dst_ring_num); | ||
| 454 | XGENE_DMA_DESC_RTYPE_SET(desc, XGENE_DMA_RING_OWNER_DMA); | ||
| 455 | } | ||
| 456 | |||
| 457 | static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan, | ||
| 458 | struct xgene_dma_desc_sw *desc_sw, | ||
| 459 | dma_addr_t dst, dma_addr_t src, | ||
| 460 | size_t len) | ||
| 461 | { | ||
| 462 | void *desc1, *desc2; | ||
| 463 | int i; | ||
| 464 | |||
| 465 | /* Get 1st descriptor */ | ||
| 466 | desc1 = &desc_sw->desc1; | ||
| 467 | xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num); | ||
| 468 | |||
| 469 | /* Set destination address */ | ||
| 470 | XGENE_DMA_DESC_DR_SET(desc1); | ||
| 471 | XGENE_DMA_DESC_DST_ADDR_SET(desc1, dst); | ||
| 472 | |||
| 473 | /* Set 1st source address */ | ||
| 474 | xgene_dma_set_src_buffer(desc1 + 8, &len, &src); | ||
| 475 | |||
| 476 | if (len <= 0) { | ||
| 477 | desc2 = NULL; | ||
| 478 | goto skip_additional_src; | ||
| 479 | } | ||
| 480 | |||
| 481 | /* | ||
| 482 | * We need to split this source buffer, | ||
| 483 | * and need to use 2nd descriptor | ||
| 484 | */ | ||
| 485 | desc2 = &desc_sw->desc2; | ||
| 486 | XGENE_DMA_DESC_NV_SET(desc1); | ||
| 487 | |||
| 488 | /* Set 2nd to 5th source address */ | ||
| 489 | for (i = 0; i < 4 && len; i++) | ||
| 490 | xgene_dma_set_src_buffer(xgene_dma_lookup_ext8(desc2, i), | ||
| 491 | &len, &src); | ||
| 492 | |||
| 493 | /* Invalidate unused source address field */ | ||
| 494 | for (; i < 4; i++) | ||
| 495 | xgene_dma_invalidate_buffer(xgene_dma_lookup_ext8(desc2, i)); | ||
| 496 | |||
| 497 | /* Updated flag that we have prepared 64B descriptor */ | ||
| 498 | desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC; | ||
| 499 | |||
| 500 | skip_additional_src: | ||
| 501 | /* Hardware stores descriptor in little endian format */ | ||
| 502 | xgene_dma_cpu_to_le64(desc1, 4); | ||
| 503 | if (desc2) | ||
| 504 | xgene_dma_cpu_to_le64(desc2, 4); | ||
| 505 | } | ||
| 506 | |||
| 507 | static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan, | ||
| 508 | struct xgene_dma_desc_sw *desc_sw, | ||
| 509 | dma_addr_t *dst, dma_addr_t *src, | ||
| 510 | u32 src_cnt, size_t *nbytes, | ||
| 511 | const u8 *scf) | ||
| 512 | { | ||
| 513 | void *desc1, *desc2; | ||
| 514 | size_t len = *nbytes; | ||
| 515 | int i; | ||
| 516 | |||
| 517 | desc1 = &desc_sw->desc1; | ||
| 518 | desc2 = &desc_sw->desc2; | ||
| 519 | |||
| 520 | /* Initialize DMA descriptor */ | ||
| 521 | xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num); | ||
| 522 | |||
| 523 | /* Set destination address */ | ||
| 524 | XGENE_DMA_DESC_DR_SET(desc1); | ||
| 525 | XGENE_DMA_DESC_DST_ADDR_SET(desc1, *dst); | ||
| 526 | |||
| 527 | /* We have multiple source addresses, so need to set NV bit*/ | ||
| 528 | XGENE_DMA_DESC_NV_SET(desc1); | ||
| 529 | |||
| 530 | /* Set flyby opcode */ | ||
| 531 | XGENE_DMA_DESC_FLYBY_SET(desc1, xgene_dma_encode_xor_flyby(src_cnt)); | ||
| 532 | |||
| 533 | /* Set 1st to 5th source addresses */ | ||
| 534 | for (i = 0; i < src_cnt; i++) { | ||
| 535 | len = *nbytes; | ||
| 536 | xgene_dma_set_src_buffer((i == 0) ? (desc1 + 8) : | ||
| 537 | xgene_dma_lookup_ext8(desc2, i - 1), | ||
| 538 | &len, &src[i]); | ||
| 539 | XGENE_DMA_DESC_MULTI_SET(desc1, scf[i], i); | ||
| 540 | } | ||
| 541 | |||
| 542 | /* Hardware stores descriptor in little endian format */ | ||
| 543 | xgene_dma_cpu_to_le64(desc1, 4); | ||
| 544 | xgene_dma_cpu_to_le64(desc2, 4); | ||
| 545 | |||
| 546 | /* Update meta data */ | ||
| 547 | *nbytes = len; | ||
| 548 | *dst += XGENE_DMA_MAX_BYTE_CNT; | ||
| 549 | |||
| 550 | /* We need always 64B descriptor to perform xor or pq operations */ | ||
| 551 | desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC; | ||
| 552 | } | ||
| 553 | |||
| 554 | static dma_cookie_t xgene_dma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
| 555 | { | ||
| 556 | struct xgene_dma_desc_sw *desc; | ||
| 557 | struct xgene_dma_chan *chan; | ||
| 558 | dma_cookie_t cookie; | ||
| 559 | |||
| 560 | if (unlikely(!tx)) | ||
| 561 | return -EINVAL; | ||
| 562 | |||
| 563 | chan = to_dma_chan(tx->chan); | ||
| 564 | desc = to_dma_desc_sw(tx); | ||
| 565 | |||
| 566 | spin_lock_bh(&chan->lock); | ||
| 567 | |||
| 568 | cookie = dma_cookie_assign(tx); | ||
| 569 | |||
| 570 | /* Add this transaction list onto the tail of the pending queue */ | ||
| 571 | list_splice_tail_init(&desc->tx_list, &chan->ld_pending); | ||
| 572 | |||
| 573 | spin_unlock_bh(&chan->lock); | ||
| 574 | |||
| 575 | return cookie; | ||
| 576 | } | ||
| 577 | |||
| 578 | static void xgene_dma_clean_descriptor(struct xgene_dma_chan *chan, | ||
| 579 | struct xgene_dma_desc_sw *desc) | ||
| 580 | { | ||
| 581 | list_del(&desc->node); | ||
| 582 | chan_dbg(chan, "LD %p free\n", desc); | ||
| 583 | dma_pool_free(chan->desc_pool, desc, desc->tx.phys); | ||
| 584 | } | ||
| 585 | |||
| 586 | static struct xgene_dma_desc_sw *xgene_dma_alloc_descriptor( | ||
| 587 | struct xgene_dma_chan *chan) | ||
| 588 | { | ||
| 589 | struct xgene_dma_desc_sw *desc; | ||
| 590 | dma_addr_t phys; | ||
| 591 | |||
| 592 | desc = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &phys); | ||
| 593 | if (!desc) { | ||
| 594 | chan_err(chan, "Failed to allocate LDs\n"); | ||
| 595 | return NULL; | ||
| 596 | } | ||
| 597 | |||
| 598 | memset(desc, 0, sizeof(*desc)); | ||
| 599 | |||
| 600 | INIT_LIST_HEAD(&desc->tx_list); | ||
| 601 | desc->tx.phys = phys; | ||
| 602 | desc->tx.tx_submit = xgene_dma_tx_submit; | ||
| 603 | dma_async_tx_descriptor_init(&desc->tx, &chan->dma_chan); | ||
| 604 | |||
| 605 | chan_dbg(chan, "LD %p allocated\n", desc); | ||
| 606 | |||
| 607 | return desc; | ||
| 608 | } | ||
| 609 | |||
| 610 | /** | ||
| 611 | * xgene_dma_clean_completed_descriptor - free all descriptors which | ||
| 612 | * has been completed and acked | ||
| 613 | * @chan: X-Gene DMA channel | ||
| 614 | * | ||
| 615 | * This function is used on all completed and acked descriptors. | ||
| 616 | */ | ||
| 617 | static void xgene_dma_clean_completed_descriptor(struct xgene_dma_chan *chan) | ||
| 618 | { | ||
| 619 | struct xgene_dma_desc_sw *desc, *_desc; | ||
| 620 | |||
| 621 | /* Run the callback for each descriptor, in order */ | ||
| 622 | list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node) { | ||
| 623 | if (async_tx_test_ack(&desc->tx)) | ||
| 624 | xgene_dma_clean_descriptor(chan, desc); | ||
| 625 | } | ||
| 626 | } | ||
| 627 | |||
| 628 | /** | ||
| 629 | * xgene_dma_run_tx_complete_actions - cleanup a single link descriptor | ||
| 630 | * @chan: X-Gene DMA channel | ||
| 631 | * @desc: descriptor to cleanup and free | ||
| 632 | * | ||
| 633 | * This function is used on a descriptor which has been executed by the DMA | ||
| 634 | * controller. It will run any callbacks, submit any dependencies. | ||
| 635 | */ | ||
| 636 | static void xgene_dma_run_tx_complete_actions(struct xgene_dma_chan *chan, | ||
| 637 | struct xgene_dma_desc_sw *desc) | ||
| 638 | { | ||
| 639 | struct dma_async_tx_descriptor *tx = &desc->tx; | ||
| 640 | |||
| 641 | /* | ||
| 642 | * If this is not the last transaction in the group, | ||
| 643 | * then no need to complete cookie and run any callback as | ||
| 644 | * this is not the tx_descriptor which had been sent to caller | ||
| 645 | * of this DMA request | ||
| 646 | */ | ||
| 647 | |||
| 648 | if (tx->cookie == 0) | ||
| 649 | return; | ||
| 650 | |||
| 651 | dma_cookie_complete(tx); | ||
| 652 | |||
| 653 | /* Run the link descriptor callback function */ | ||
| 654 | if (tx->callback) | ||
| 655 | tx->callback(tx->callback_param); | ||
| 656 | |||
| 657 | dma_descriptor_unmap(tx); | ||
| 658 | |||
| 659 | /* Run any dependencies */ | ||
| 660 | dma_run_dependencies(tx); | ||
| 661 | } | ||
| 662 | |||
| 663 | /** | ||
| 664 | * xgene_dma_clean_running_descriptor - move the completed descriptor from | ||
| 665 | * ld_running to ld_completed | ||
| 666 | * @chan: X-Gene DMA channel | ||
| 667 | * @desc: the descriptor which is completed | ||
| 668 | * | ||
| 669 | * Free the descriptor directly if acked by async_tx api, | ||
| 670 | * else move it to queue ld_completed. | ||
| 671 | */ | ||
| 672 | static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan, | ||
| 673 | struct xgene_dma_desc_sw *desc) | ||
| 674 | { | ||
| 675 | /* Remove from the list of running transactions */ | ||
| 676 | list_del(&desc->node); | ||
| 677 | |||
| 678 | /* | ||
| 679 | * the client is allowed to attach dependent operations | ||
| 680 | * until 'ack' is set | ||
| 681 | */ | ||
| 682 | if (!async_tx_test_ack(&desc->tx)) { | ||
| 683 | /* | ||
| 684 | * Move this descriptor to the list of descriptors which is | ||
| 685 | * completed, but still awaiting the 'ack' bit to be set. | ||
| 686 | */ | ||
| 687 | list_add_tail(&desc->node, &chan->ld_completed); | ||
| 688 | return; | ||
| 689 | } | ||
| 690 | |||
| 691 | chan_dbg(chan, "LD %p free\n", desc); | ||
| 692 | dma_pool_free(chan->desc_pool, desc, desc->tx.phys); | ||
| 693 | } | ||
| 694 | |||
| 695 | static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, | ||
| 696 | struct xgene_dma_desc_sw *desc_sw) | ||
| 697 | { | ||
| 698 | struct xgene_dma_desc_hw *desc_hw; | ||
| 699 | |||
| 700 | /* Check if can push more descriptor to hw for execution */ | ||
| 701 | if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2)) | ||
| 702 | return -EBUSY; | ||
| 703 | |||
| 704 | /* Get hw descriptor from DMA tx ring */ | ||
| 705 | desc_hw = &ring->desc_hw[ring->head]; | ||
| 706 | |||
| 707 | /* | ||
| 708 | * Increment the head count to point next | ||
| 709 | * descriptor for next time | ||
| 710 | */ | ||
| 711 | if (++ring->head == ring->slots) | ||
| 712 | ring->head = 0; | ||
| 713 | |||
| 714 | /* Copy prepared sw descriptor data to hw descriptor */ | ||
| 715 | memcpy(desc_hw, &desc_sw->desc1, sizeof(*desc_hw)); | ||
| 716 | |||
| 717 | /* | ||
| 718 | * Check if we have prepared 64B descriptor, | ||
| 719 | * in this case we need one more hw descriptor | ||
| 720 | */ | ||
| 721 | if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) { | ||
| 722 | desc_hw = &ring->desc_hw[ring->head]; | ||
| 723 | |||
| 724 | if (++ring->head == ring->slots) | ||
| 725 | ring->head = 0; | ||
| 726 | |||
| 727 | memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw)); | ||
| 728 | } | ||
| 729 | |||
| 730 | /* Notify the hw that we have descriptor ready for execution */ | ||
| 731 | iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ? | ||
| 732 | 2 : 1, ring->cmd); | ||
| 733 | |||
| 734 | return 0; | ||
| 735 | } | ||
| 736 | |||
| 737 | /** | ||
| 738 | * xgene_chan_xfer_ld_pending - push any pending transactions to hw | ||
| 739 | * @chan : X-Gene DMA channel | ||
| 740 | * | ||
| 741 | * LOCKING: must hold chan->desc_lock | ||
| 742 | */ | ||
| 743 | static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) | ||
| 744 | { | ||
| 745 | struct xgene_dma_desc_sw *desc_sw, *_desc_sw; | ||
| 746 | int ret; | ||
| 747 | |||
| 748 | /* | ||
| 749 | * If the list of pending descriptors is empty, then we | ||
| 750 | * don't need to do any work at all | ||
| 751 | */ | ||
| 752 | if (list_empty(&chan->ld_pending)) { | ||
| 753 | chan_dbg(chan, "No pending LDs\n"); | ||
| 754 | return; | ||
| 755 | } | ||
| 756 | |||
| 757 | /* | ||
| 758 | * Move elements from the queue of pending transactions onto the list | ||
| 759 | * of running transactions and push it to hw for further executions | ||
| 760 | */ | ||
| 761 | list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_pending, node) { | ||
| 762 | /* | ||
| 763 | * Check if have pushed max number of transactions to hw | ||
| 764 | * as capable, so let's stop here and will push remaining | ||
| 765 | * elements from pening ld queue after completing some | ||
| 766 | * descriptors that we have already pushed | ||
| 767 | */ | ||
| 768 | if (chan->pending >= chan->max_outstanding) | ||
| 769 | return; | ||
| 770 | |||
| 771 | ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw); | ||
| 772 | if (ret) | ||
| 773 | return; | ||
| 774 | |||
| 775 | /* | ||
| 776 | * Delete this element from ld pending queue and append it to | ||
| 777 | * ld running queue | ||
| 778 | */ | ||
| 779 | list_move_tail(&desc_sw->node, &chan->ld_running); | ||
| 780 | |||
| 781 | /* Increment the pending transaction count */ | ||
| 782 | chan->pending++; | ||
| 783 | } | ||
| 784 | } | ||
| 785 | |||
| 786 | /** | ||
| 787 | * xgene_dma_cleanup_descriptors - cleanup link descriptors which are completed | ||
| 788 | * and move them to ld_completed to free until flag 'ack' is set | ||
| 789 | * @chan: X-Gene DMA channel | ||
| 790 | * | ||
| 791 | * This function is used on descriptors which have been executed by the DMA | ||
| 792 | * controller. It will run any callbacks, submit any dependencies, then | ||
| 793 | * free these descriptors if flag 'ack' is set. | ||
| 794 | */ | ||
| 795 | static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) | ||
| 796 | { | ||
| 797 | struct xgene_dma_ring *ring = &chan->rx_ring; | ||
| 798 | struct xgene_dma_desc_sw *desc_sw, *_desc_sw; | ||
| 799 | struct xgene_dma_desc_hw *desc_hw; | ||
| 800 | u8 status; | ||
| 801 | |||
| 802 | /* Clean already completed and acked descriptors */ | ||
| 803 | xgene_dma_clean_completed_descriptor(chan); | ||
| 804 | |||
| 805 | /* Run the callback for each descriptor, in order */ | ||
| 806 | list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) { | ||
| 807 | /* Get subsequent hw descriptor from DMA rx ring */ | ||
| 808 | desc_hw = &ring->desc_hw[ring->head]; | ||
| 809 | |||
| 810 | /* Check if this descriptor has been completed */ | ||
| 811 | if (unlikely(XGENE_DMA_DESC_IS_EMPTY(desc_hw))) | ||
| 812 | break; | ||
| 813 | |||
| 814 | if (++ring->head == ring->slots) | ||
| 815 | ring->head = 0; | ||
| 816 | |||
| 817 | /* Check if we have any error with DMA transactions */ | ||
| 818 | status = XGENE_DMA_DESC_STATUS( | ||
| 819 | XGENE_DMA_DESC_ELERR_RD(le64_to_cpu( | ||
| 820 | desc_hw->m0)), | ||
| 821 | XGENE_DMA_DESC_LERR_RD(le64_to_cpu( | ||
| 822 | desc_hw->m0))); | ||
| 823 | if (status) { | ||
| 824 | /* Print the DMA error type */ | ||
| 825 | chan_err(chan, "%s\n", xgene_dma_desc_err[status]); | ||
| 826 | |||
| 827 | /* | ||
| 828 | * We have DMA transactions error here. Dump DMA Tx | ||
| 829 | * and Rx descriptors for this request */ | ||
| 830 | XGENE_DMA_DESC_DUMP(&desc_sw->desc1, | ||
| 831 | "X-Gene DMA TX DESC1: "); | ||
| 832 | |||
| 833 | if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) | ||
| 834 | XGENE_DMA_DESC_DUMP(&desc_sw->desc2, | ||
| 835 | "X-Gene DMA TX DESC2: "); | ||
| 836 | |||
| 837 | XGENE_DMA_DESC_DUMP(desc_hw, | ||
| 838 | "X-Gene DMA RX ERR DESC: "); | ||
| 839 | } | ||
| 840 | |||
| 841 | /* Notify the hw about this completed descriptor */ | ||
| 842 | iowrite32(-1, ring->cmd); | ||
| 843 | |||
| 844 | /* Mark this hw descriptor as processed */ | ||
| 845 | XGENE_DMA_DESC_SET_EMPTY(desc_hw); | ||
| 846 | |||
| 847 | xgene_dma_run_tx_complete_actions(chan, desc_sw); | ||
| 848 | |||
| 849 | xgene_dma_clean_running_descriptor(chan, desc_sw); | ||
| 850 | |||
| 851 | /* | ||
| 852 | * Decrement the pending transaction count | ||
| 853 | * as we have processed one | ||
| 854 | */ | ||
| 855 | chan->pending--; | ||
| 856 | } | ||
| 857 | |||
| 858 | /* | ||
| 859 | * Start any pending transactions automatically | ||
| 860 | * In the ideal case, we keep the DMA controller busy while we go | ||
| 861 | * ahead and free the descriptors below. | ||
| 862 | */ | ||
| 863 | xgene_chan_xfer_ld_pending(chan); | ||
| 864 | } | ||
| 865 | |||
| 866 | static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan) | ||
| 867 | { | ||
| 868 | struct xgene_dma_chan *chan = to_dma_chan(dchan); | ||
| 869 | |||
| 870 | /* Has this channel already been allocated? */ | ||
| 871 | if (chan->desc_pool) | ||
| 872 | return 1; | ||
| 873 | |||
| 874 | chan->desc_pool = dma_pool_create(chan->name, chan->dev, | ||
| 875 | sizeof(struct xgene_dma_desc_sw), | ||
| 876 | 0, 0); | ||
| 877 | if (!chan->desc_pool) { | ||
| 878 | chan_err(chan, "Failed to allocate descriptor pool\n"); | ||
| 879 | return -ENOMEM; | ||
| 880 | } | ||
| 881 | |||
| 882 | chan_dbg(chan, "Allocate descripto pool\n"); | ||
| 883 | |||
| 884 | return 1; | ||
| 885 | } | ||
| 886 | |||
| 887 | /** | ||
| 888 | * xgene_dma_free_desc_list - Free all descriptors in a queue | ||
| 889 | * @chan: X-Gene DMA channel | ||
| 890 | * @list: the list to free | ||
| 891 | * | ||
| 892 | * LOCKING: must hold chan->desc_lock | ||
| 893 | */ | ||
| 894 | static void xgene_dma_free_desc_list(struct xgene_dma_chan *chan, | ||
| 895 | struct list_head *list) | ||
| 896 | { | ||
| 897 | struct xgene_dma_desc_sw *desc, *_desc; | ||
| 898 | |||
| 899 | list_for_each_entry_safe(desc, _desc, list, node) | ||
| 900 | xgene_dma_clean_descriptor(chan, desc); | ||
| 901 | } | ||
| 902 | |||
| 903 | static void xgene_dma_free_tx_desc_list(struct xgene_dma_chan *chan, | ||
| 904 | struct list_head *list) | ||
| 905 | { | ||
| 906 | struct xgene_dma_desc_sw *desc, *_desc; | ||
| 907 | |||
| 908 | list_for_each_entry_safe(desc, _desc, list, node) | ||
| 909 | xgene_dma_clean_descriptor(chan, desc); | ||
| 910 | } | ||
| 911 | |||
| 912 | static void xgene_dma_free_chan_resources(struct dma_chan *dchan) | ||
| 913 | { | ||
| 914 | struct xgene_dma_chan *chan = to_dma_chan(dchan); | ||
| 915 | |||
| 916 | chan_dbg(chan, "Free all resources\n"); | ||
| 917 | |||
| 918 | if (!chan->desc_pool) | ||
| 919 | return; | ||
| 920 | |||
| 921 | spin_lock_bh(&chan->lock); | ||
| 922 | |||
| 923 | /* Process all running descriptor */ | ||
| 924 | xgene_dma_cleanup_descriptors(chan); | ||
| 925 | |||
| 926 | /* Clean all link descriptor queues */ | ||
| 927 | xgene_dma_free_desc_list(chan, &chan->ld_pending); | ||
| 928 | xgene_dma_free_desc_list(chan, &chan->ld_running); | ||
| 929 | xgene_dma_free_desc_list(chan, &chan->ld_completed); | ||
| 930 | |||
| 931 | spin_unlock_bh(&chan->lock); | ||
| 932 | |||
| 933 | /* Delete this channel DMA pool */ | ||
| 934 | dma_pool_destroy(chan->desc_pool); | ||
| 935 | chan->desc_pool = NULL; | ||
| 936 | } | ||
| 937 | |||
| 938 | static struct dma_async_tx_descriptor *xgene_dma_prep_memcpy( | ||
| 939 | struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src, | ||
| 940 | size_t len, unsigned long flags) | ||
| 941 | { | ||
| 942 | struct xgene_dma_desc_sw *first = NULL, *new; | ||
| 943 | struct xgene_dma_chan *chan; | ||
| 944 | size_t copy; | ||
| 945 | |||
| 946 | if (unlikely(!dchan || !len)) | ||
| 947 | return NULL; | ||
| 948 | |||
| 949 | chan = to_dma_chan(dchan); | ||
| 950 | |||
| 951 | do { | ||
| 952 | /* Allocate the link descriptor from DMA pool */ | ||
| 953 | new = xgene_dma_alloc_descriptor(chan); | ||
| 954 | if (!new) | ||
| 955 | goto fail; | ||
| 956 | |||
| 957 | /* Create the largest transaction possible */ | ||
| 958 | copy = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT); | ||
| 959 | |||
| 960 | /* Prepare DMA descriptor */ | ||
| 961 | xgene_dma_prep_cpy_desc(chan, new, dst, src, copy); | ||
| 962 | |||
| 963 | if (!first) | ||
| 964 | first = new; | ||
| 965 | |||
| 966 | new->tx.cookie = 0; | ||
| 967 | async_tx_ack(&new->tx); | ||
| 968 | |||
| 969 | /* Update metadata */ | ||
| 970 | len -= copy; | ||
| 971 | dst += copy; | ||
| 972 | src += copy; | ||
| 973 | |||
| 974 | /* Insert the link descriptor to the LD ring */ | ||
| 975 | list_add_tail(&new->node, &first->tx_list); | ||
| 976 | } while (len); | ||
| 977 | |||
| 978 | new->tx.flags = flags; /* client is in control of this ack */ | ||
| 979 | new->tx.cookie = -EBUSY; | ||
| 980 | list_splice(&first->tx_list, &new->tx_list); | ||
| 981 | |||
| 982 | return &new->tx; | ||
| 983 | |||
| 984 | fail: | ||
| 985 | if (!first) | ||
| 986 | return NULL; | ||
| 987 | |||
| 988 | xgene_dma_free_tx_desc_list(chan, &first->tx_list); | ||
| 989 | return NULL; | ||
| 990 | } | ||
| 991 | |||
| 992 | static struct dma_async_tx_descriptor *xgene_dma_prep_sg( | ||
| 993 | struct dma_chan *dchan, struct scatterlist *dst_sg, | ||
| 994 | u32 dst_nents, struct scatterlist *src_sg, | ||
| 995 | u32 src_nents, unsigned long flags) | ||
| 996 | { | ||
| 997 | struct xgene_dma_desc_sw *first = NULL, *new = NULL; | ||
| 998 | struct xgene_dma_chan *chan; | ||
| 999 | size_t dst_avail, src_avail; | ||
| 1000 | dma_addr_t dst, src; | ||
| 1001 | size_t len; | ||
| 1002 | |||
| 1003 | if (unlikely(!dchan)) | ||
| 1004 | return NULL; | ||
| 1005 | |||
| 1006 | if (unlikely(!dst_nents || !src_nents)) | ||
| 1007 | return NULL; | ||
| 1008 | |||
| 1009 | if (unlikely(!dst_sg || !src_sg)) | ||
| 1010 | return NULL; | ||
| 1011 | |||
| 1012 | chan = to_dma_chan(dchan); | ||
| 1013 | |||
| 1014 | /* Get prepared for the loop */ | ||
| 1015 | dst_avail = sg_dma_len(dst_sg); | ||
| 1016 | src_avail = sg_dma_len(src_sg); | ||
| 1017 | dst_nents--; | ||
| 1018 | src_nents--; | ||
| 1019 | |||
| 1020 | /* Run until we are out of scatterlist entries */ | ||
| 1021 | while (true) { | ||
| 1022 | /* Create the largest transaction possible */ | ||
| 1023 | len = min_t(size_t, src_avail, dst_avail); | ||
| 1024 | len = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT); | ||
| 1025 | if (len == 0) | ||
| 1026 | goto fetch; | ||
| 1027 | |||
| 1028 | dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail; | ||
| 1029 | src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail; | ||
| 1030 | |||
| 1031 | /* Allocate the link descriptor from DMA pool */ | ||
| 1032 | new = xgene_dma_alloc_descriptor(chan); | ||
| 1033 | if (!new) | ||
| 1034 | goto fail; | ||
| 1035 | |||
| 1036 | /* Prepare DMA descriptor */ | ||
| 1037 | xgene_dma_prep_cpy_desc(chan, new, dst, src, len); | ||
| 1038 | |||
| 1039 | if (!first) | ||
| 1040 | first = new; | ||
| 1041 | |||
| 1042 | new->tx.cookie = 0; | ||
| 1043 | async_tx_ack(&new->tx); | ||
| 1044 | |||
| 1045 | /* update metadata */ | ||
| 1046 | dst_avail -= len; | ||
| 1047 | src_avail -= len; | ||
| 1048 | |||
| 1049 | /* Insert the link descriptor to the LD ring */ | ||
| 1050 | list_add_tail(&new->node, &first->tx_list); | ||
| 1051 | |||
| 1052 | fetch: | ||
| 1053 | /* fetch the next dst scatterlist entry */ | ||
| 1054 | if (dst_avail == 0) { | ||
| 1055 | /* no more entries: we're done */ | ||
| 1056 | if (dst_nents == 0) | ||
| 1057 | break; | ||
| 1058 | |||
| 1059 | /* fetch the next entry: if there are no more: done */ | ||
| 1060 | dst_sg = sg_next(dst_sg); | ||
| 1061 | if (!dst_sg) | ||
| 1062 | break; | ||
| 1063 | |||
| 1064 | dst_nents--; | ||
| 1065 | dst_avail = sg_dma_len(dst_sg); | ||
| 1066 | } | ||
| 1067 | |||
| 1068 | /* fetch the next src scatterlist entry */ | ||
| 1069 | if (src_avail == 0) { | ||
| 1070 | /* no more entries: we're done */ | ||
| 1071 | if (src_nents == 0) | ||
| 1072 | break; | ||
| 1073 | |||
| 1074 | /* fetch the next entry: if there are no more: done */ | ||
| 1075 | src_sg = sg_next(src_sg); | ||
| 1076 | if (!src_sg) | ||
| 1077 | break; | ||
| 1078 | |||
| 1079 | src_nents--; | ||
| 1080 | src_avail = sg_dma_len(src_sg); | ||
| 1081 | } | ||
| 1082 | } | ||
| 1083 | |||
| 1084 | if (!new) | ||
| 1085 | return NULL; | ||
| 1086 | |||
| 1087 | new->tx.flags = flags; /* client is in control of this ack */ | ||
| 1088 | new->tx.cookie = -EBUSY; | ||
| 1089 | list_splice(&first->tx_list, &new->tx_list); | ||
| 1090 | |||
| 1091 | return &new->tx; | ||
| 1092 | fail: | ||
| 1093 | if (!first) | ||
| 1094 | return NULL; | ||
| 1095 | |||
| 1096 | xgene_dma_free_tx_desc_list(chan, &first->tx_list); | ||
| 1097 | return NULL; | ||
| 1098 | } | ||
| 1099 | |||
| 1100 | static struct dma_async_tx_descriptor *xgene_dma_prep_xor( | ||
| 1101 | struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src, | ||
| 1102 | u32 src_cnt, size_t len, unsigned long flags) | ||
| 1103 | { | ||
| 1104 | struct xgene_dma_desc_sw *first = NULL, *new; | ||
| 1105 | struct xgene_dma_chan *chan; | ||
| 1106 | static u8 multi[XGENE_DMA_MAX_XOR_SRC] = { | ||
| 1107 | 0x01, 0x01, 0x01, 0x01, 0x01}; | ||
| 1108 | |||
| 1109 | if (unlikely(!dchan || !len)) | ||
| 1110 | return NULL; | ||
| 1111 | |||
| 1112 | chan = to_dma_chan(dchan); | ||
| 1113 | |||
| 1114 | do { | ||
| 1115 | /* Allocate the link descriptor from DMA pool */ | ||
| 1116 | new = xgene_dma_alloc_descriptor(chan); | ||
| 1117 | if (!new) | ||
| 1118 | goto fail; | ||
| 1119 | |||
| 1120 | /* Prepare xor DMA descriptor */ | ||
| 1121 | xgene_dma_prep_xor_desc(chan, new, &dst, src, | ||
| 1122 | src_cnt, &len, multi); | ||
| 1123 | |||
| 1124 | if (!first) | ||
| 1125 | first = new; | ||
| 1126 | |||
| 1127 | new->tx.cookie = 0; | ||
| 1128 | async_tx_ack(&new->tx); | ||
| 1129 | |||
| 1130 | /* Insert the link descriptor to the LD ring */ | ||
| 1131 | list_add_tail(&new->node, &first->tx_list); | ||
| 1132 | } while (len); | ||
| 1133 | |||
| 1134 | new->tx.flags = flags; /* client is in control of this ack */ | ||
| 1135 | new->tx.cookie = -EBUSY; | ||
| 1136 | list_splice(&first->tx_list, &new->tx_list); | ||
| 1137 | |||
| 1138 | return &new->tx; | ||
| 1139 | |||
| 1140 | fail: | ||
| 1141 | if (!first) | ||
| 1142 | return NULL; | ||
| 1143 | |||
| 1144 | xgene_dma_free_tx_desc_list(chan, &first->tx_list); | ||
| 1145 | return NULL; | ||
| 1146 | } | ||
| 1147 | |||
| 1148 | static struct dma_async_tx_descriptor *xgene_dma_prep_pq( | ||
| 1149 | struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src, | ||
| 1150 | u32 src_cnt, const u8 *scf, size_t len, unsigned long flags) | ||
| 1151 | { | ||
| 1152 | struct xgene_dma_desc_sw *first = NULL, *new; | ||
| 1153 | struct xgene_dma_chan *chan; | ||
| 1154 | size_t _len = len; | ||
| 1155 | dma_addr_t _src[XGENE_DMA_MAX_XOR_SRC]; | ||
| 1156 | static u8 multi[XGENE_DMA_MAX_XOR_SRC] = {0x01, 0x01, 0x01, 0x01, 0x01}; | ||
| 1157 | |||
| 1158 | if (unlikely(!dchan || !len)) | ||
| 1159 | return NULL; | ||
| 1160 | |||
| 1161 | chan = to_dma_chan(dchan); | ||
| 1162 | |||
| 1163 | /* | ||
| 1164 | * Save source addresses on local variable, may be we have to | ||
| 1165 | * prepare two descriptor to generate P and Q if both enabled | ||
| 1166 | * in the flags by client | ||
| 1167 | */ | ||
| 1168 | memcpy(_src, src, sizeof(*src) * src_cnt); | ||
| 1169 | |||
| 1170 | if (flags & DMA_PREP_PQ_DISABLE_P) | ||
| 1171 | len = 0; | ||
| 1172 | |||
| 1173 | if (flags & DMA_PREP_PQ_DISABLE_Q) | ||
| 1174 | _len = 0; | ||
| 1175 | |||
| 1176 | do { | ||
| 1177 | /* Allocate the link descriptor from DMA pool */ | ||
| 1178 | new = xgene_dma_alloc_descriptor(chan); | ||
| 1179 | if (!new) | ||
| 1180 | goto fail; | ||
| 1181 | |||
| 1182 | if (!first) | ||
| 1183 | first = new; | ||
| 1184 | |||
| 1185 | new->tx.cookie = 0; | ||
| 1186 | async_tx_ack(&new->tx); | ||
| 1187 | |||
| 1188 | /* Insert the link descriptor to the LD ring */ | ||
| 1189 | list_add_tail(&new->node, &first->tx_list); | ||
| 1190 | |||
| 1191 | /* | ||
| 1192 | * Prepare DMA descriptor to generate P, | ||
| 1193 | * if DMA_PREP_PQ_DISABLE_P flag is not set | ||
| 1194 | */ | ||
| 1195 | if (len) { | ||
| 1196 | xgene_dma_prep_xor_desc(chan, new, &dst[0], src, | ||
| 1197 | src_cnt, &len, multi); | ||
| 1198 | continue; | ||
| 1199 | } | ||
| 1200 | |||
| 1201 | /* | ||
| 1202 | * Prepare DMA descriptor to generate Q, | ||
| 1203 | * if DMA_PREP_PQ_DISABLE_Q flag is not set | ||
| 1204 | */ | ||
| 1205 | if (_len) { | ||
| 1206 | xgene_dma_prep_xor_desc(chan, new, &dst[1], _src, | ||
| 1207 | src_cnt, &_len, scf); | ||
| 1208 | } | ||
| 1209 | } while (len || _len); | ||
| 1210 | |||
| 1211 | new->tx.flags = flags; /* client is in control of this ack */ | ||
| 1212 | new->tx.cookie = -EBUSY; | ||
| 1213 | list_splice(&first->tx_list, &new->tx_list); | ||
| 1214 | |||
| 1215 | return &new->tx; | ||
| 1216 | |||
| 1217 | fail: | ||
| 1218 | if (!first) | ||
| 1219 | return NULL; | ||
| 1220 | |||
| 1221 | xgene_dma_free_tx_desc_list(chan, &first->tx_list); | ||
| 1222 | return NULL; | ||
| 1223 | } | ||
| 1224 | |||
| 1225 | static void xgene_dma_issue_pending(struct dma_chan *dchan) | ||
| 1226 | { | ||
| 1227 | struct xgene_dma_chan *chan = to_dma_chan(dchan); | ||
| 1228 | |||
| 1229 | spin_lock_bh(&chan->lock); | ||
| 1230 | xgene_chan_xfer_ld_pending(chan); | ||
| 1231 | spin_unlock_bh(&chan->lock); | ||
| 1232 | } | ||
| 1233 | |||
| 1234 | static enum dma_status xgene_dma_tx_status(struct dma_chan *dchan, | ||
| 1235 | dma_cookie_t cookie, | ||
| 1236 | struct dma_tx_state *txstate) | ||
| 1237 | { | ||
| 1238 | return dma_cookie_status(dchan, cookie, txstate); | ||
| 1239 | } | ||
| 1240 | |||
| 1241 | static void xgene_dma_tasklet_cb(unsigned long data) | ||
| 1242 | { | ||
| 1243 | struct xgene_dma_chan *chan = (struct xgene_dma_chan *)data; | ||
| 1244 | |||
| 1245 | spin_lock_bh(&chan->lock); | ||
| 1246 | |||
| 1247 | /* Run all cleanup for descriptors which have been completed */ | ||
| 1248 | xgene_dma_cleanup_descriptors(chan); | ||
| 1249 | |||
| 1250 | /* Re-enable DMA channel IRQ */ | ||
| 1251 | enable_irq(chan->rx_irq); | ||
| 1252 | |||
| 1253 | spin_unlock_bh(&chan->lock); | ||
| 1254 | } | ||
| 1255 | |||
| 1256 | static irqreturn_t xgene_dma_chan_ring_isr(int irq, void *id) | ||
| 1257 | { | ||
| 1258 | struct xgene_dma_chan *chan = (struct xgene_dma_chan *)id; | ||
| 1259 | |||
| 1260 | BUG_ON(!chan); | ||
| 1261 | |||
| 1262 | /* | ||
| 1263 | * Disable DMA channel IRQ until we process completed | ||
| 1264 | * descriptors | ||
| 1265 | */ | ||
| 1266 | disable_irq_nosync(chan->rx_irq); | ||
| 1267 | |||
| 1268 | /* | ||
| 1269 | * Schedule the tasklet to handle all cleanup of the current | ||
| 1270 | * transaction. It will start a new transaction if there is | ||
| 1271 | * one pending. | ||
| 1272 | */ | ||
| 1273 | tasklet_schedule(&chan->tasklet); | ||
| 1274 | |||
| 1275 | return IRQ_HANDLED; | ||
| 1276 | } | ||
| 1277 | |||
| 1278 | static irqreturn_t xgene_dma_err_isr(int irq, void *id) | ||
| 1279 | { | ||
| 1280 | struct xgene_dma *pdma = (struct xgene_dma *)id; | ||
| 1281 | unsigned long int_mask; | ||
| 1282 | u32 val, i; | ||
| 1283 | |||
| 1284 | val = ioread32(pdma->csr_dma + XGENE_DMA_INT); | ||
| 1285 | |||
| 1286 | /* Clear DMA interrupts */ | ||
| 1287 | iowrite32(val, pdma->csr_dma + XGENE_DMA_INT); | ||
| 1288 | |||
| 1289 | /* Print DMA error info */ | ||
| 1290 | int_mask = val >> XGENE_DMA_INT_MASK_SHIFT; | ||
| 1291 | for_each_set_bit(i, &int_mask, ARRAY_SIZE(xgene_dma_err)) | ||
| 1292 | dev_err(pdma->dev, | ||
| 1293 | "Interrupt status 0x%08X %s\n", val, xgene_dma_err[i]); | ||
| 1294 | |||
| 1295 | return IRQ_HANDLED; | ||
| 1296 | } | ||
| 1297 | |||
| 1298 | static void xgene_dma_wr_ring_state(struct xgene_dma_ring *ring) | ||
| 1299 | { | ||
| 1300 | int i; | ||
| 1301 | |||
| 1302 | iowrite32(ring->num, ring->pdma->csr_ring + XGENE_DMA_RING_STATE); | ||
| 1303 | |||
| 1304 | for (i = 0; i < XGENE_DMA_RING_NUM_CONFIG; i++) | ||
| 1305 | iowrite32(ring->state[i], ring->pdma->csr_ring + | ||
| 1306 | XGENE_DMA_RING_STATE_WR_BASE + (i * 4)); | ||
| 1307 | } | ||
| 1308 | |||
| 1309 | static void xgene_dma_clr_ring_state(struct xgene_dma_ring *ring) | ||
| 1310 | { | ||
| 1311 | memset(ring->state, 0, sizeof(u32) * XGENE_DMA_RING_NUM_CONFIG); | ||
| 1312 | xgene_dma_wr_ring_state(ring); | ||
| 1313 | } | ||
| 1314 | |||
| 1315 | static void xgene_dma_setup_ring(struct xgene_dma_ring *ring) | ||
| 1316 | { | ||
| 1317 | void *ring_cfg = ring->state; | ||
| 1318 | u64 addr = ring->desc_paddr; | ||
| 1319 | void *desc; | ||
| 1320 | u32 i, val; | ||
| 1321 | |||
| 1322 | ring->slots = ring->size / XGENE_DMA_RING_WQ_DESC_SIZE; | ||
| 1323 | |||
| 1324 | /* Clear DMA ring state */ | ||
| 1325 | xgene_dma_clr_ring_state(ring); | ||
| 1326 | |||
| 1327 | /* Set DMA ring type */ | ||
| 1328 | XGENE_DMA_RING_TYPE_SET(ring_cfg, XGENE_DMA_RING_TYPE_REGULAR); | ||
| 1329 | |||
| 1330 | if (ring->owner == XGENE_DMA_RING_OWNER_DMA) { | ||
| 1331 | /* Set recombination buffer and timeout */ | ||
| 1332 | XGENE_DMA_RING_RECOMBBUF_SET(ring_cfg); | ||
| 1333 | XGENE_DMA_RING_RECOMTIMEOUTL_SET(ring_cfg); | ||
| 1334 | XGENE_DMA_RING_RECOMTIMEOUTH_SET(ring_cfg); | ||
| 1335 | } | ||
| 1336 | |||
| 1337 | /* Initialize DMA ring state */ | ||
| 1338 | XGENE_DMA_RING_SELTHRSH_SET(ring_cfg); | ||
| 1339 | XGENE_DMA_RING_ACCEPTLERR_SET(ring_cfg); | ||
| 1340 | XGENE_DMA_RING_COHERENT_SET(ring_cfg); | ||
| 1341 | XGENE_DMA_RING_ADDRL_SET(ring_cfg, addr); | ||
| 1342 | XGENE_DMA_RING_ADDRH_SET(ring_cfg, addr); | ||
| 1343 | XGENE_DMA_RING_SIZE_SET(ring_cfg, ring->cfgsize); | ||
| 1344 | |||
| 1345 | /* Write DMA ring configurations */ | ||
| 1346 | xgene_dma_wr_ring_state(ring); | ||
| 1347 | |||
| 1348 | /* Set DMA ring id */ | ||
| 1349 | iowrite32(XGENE_DMA_RING_ID_SETUP(ring->id), | ||
| 1350 | ring->pdma->csr_ring + XGENE_DMA_RING_ID); | ||
| 1351 | |||
| 1352 | /* Set DMA ring buffer */ | ||
| 1353 | iowrite32(XGENE_DMA_RING_ID_BUF_SETUP(ring->num), | ||
| 1354 | ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF); | ||
| 1355 | |||
| 1356 | if (ring->owner != XGENE_DMA_RING_OWNER_CPU) | ||
| 1357 | return; | ||
| 1358 | |||
| 1359 | /* Set empty signature to DMA Rx ring descriptors */ | ||
| 1360 | for (i = 0; i < ring->slots; i++) { | ||
| 1361 | desc = &ring->desc_hw[i]; | ||
| 1362 | XGENE_DMA_DESC_SET_EMPTY(desc); | ||
| 1363 | } | ||
| 1364 | |||
| 1365 | /* Enable DMA Rx ring interrupt */ | ||
| 1366 | val = ioread32(ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE); | ||
| 1367 | XGENE_DMA_RING_NE_INT_MODE_SET(val, ring->buf_num); | ||
| 1368 | iowrite32(val, ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE); | ||
| 1369 | } | ||
| 1370 | |||
| 1371 | static void xgene_dma_clear_ring(struct xgene_dma_ring *ring) | ||
| 1372 | { | ||
| 1373 | u32 ring_id, val; | ||
| 1374 | |||
| 1375 | if (ring->owner == XGENE_DMA_RING_OWNER_CPU) { | ||
| 1376 | /* Disable DMA Rx ring interrupt */ | ||
| 1377 | val = ioread32(ring->pdma->csr_ring + | ||
| 1378 | XGENE_DMA_RING_NE_INT_MODE); | ||
| 1379 | XGENE_DMA_RING_NE_INT_MODE_RESET(val, ring->buf_num); | ||
| 1380 | iowrite32(val, ring->pdma->csr_ring + | ||
| 1381 | XGENE_DMA_RING_NE_INT_MODE); | ||
| 1382 | } | ||
| 1383 | |||
| 1384 | /* Clear DMA ring state */ | ||
| 1385 | ring_id = XGENE_DMA_RING_ID_SETUP(ring->id); | ||
| 1386 | iowrite32(ring_id, ring->pdma->csr_ring + XGENE_DMA_RING_ID); | ||
| 1387 | |||
| 1388 | iowrite32(0, ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF); | ||
| 1389 | xgene_dma_clr_ring_state(ring); | ||
| 1390 | } | ||
| 1391 | |||
| 1392 | static void xgene_dma_set_ring_cmd(struct xgene_dma_ring *ring) | ||
| 1393 | { | ||
| 1394 | ring->cmd_base = ring->pdma->csr_ring_cmd + | ||
| 1395 | XGENE_DMA_RING_CMD_BASE_OFFSET((ring->num - | ||
| 1396 | XGENE_DMA_RING_NUM)); | ||
| 1397 | |||
| 1398 | ring->cmd = ring->cmd_base + XGENE_DMA_RING_CMD_OFFSET; | ||
| 1399 | } | ||
| 1400 | |||
| 1401 | static int xgene_dma_get_ring_size(struct xgene_dma_chan *chan, | ||
| 1402 | enum xgene_dma_ring_cfgsize cfgsize) | ||
| 1403 | { | ||
| 1404 | int size; | ||
| 1405 | |||
| 1406 | switch (cfgsize) { | ||
| 1407 | case XGENE_DMA_RING_CFG_SIZE_512B: | ||
| 1408 | size = 0x200; | ||
| 1409 | break; | ||
| 1410 | case XGENE_DMA_RING_CFG_SIZE_2KB: | ||
| 1411 | size = 0x800; | ||
| 1412 | break; | ||
| 1413 | case XGENE_DMA_RING_CFG_SIZE_16KB: | ||
| 1414 | size = 0x4000; | ||
| 1415 | break; | ||
| 1416 | case XGENE_DMA_RING_CFG_SIZE_64KB: | ||
| 1417 | size = 0x10000; | ||
| 1418 | break; | ||
| 1419 | case XGENE_DMA_RING_CFG_SIZE_512KB: | ||
| 1420 | size = 0x80000; | ||
| 1421 | break; | ||
| 1422 | default: | ||
| 1423 | chan_err(chan, "Unsupported cfg ring size %d\n", cfgsize); | ||
| 1424 | return -EINVAL; | ||
| 1425 | } | ||
| 1426 | |||
| 1427 | return size; | ||
| 1428 | } | ||
| 1429 | |||
| 1430 | static void xgene_dma_delete_ring_one(struct xgene_dma_ring *ring) | ||
| 1431 | { | ||
| 1432 | /* Clear DMA ring configurations */ | ||
| 1433 | xgene_dma_clear_ring(ring); | ||
| 1434 | |||
| 1435 | /* De-allocate DMA ring descriptor */ | ||
| 1436 | if (ring->desc_vaddr) { | ||
| 1437 | dma_free_coherent(ring->pdma->dev, ring->size, | ||
| 1438 | ring->desc_vaddr, ring->desc_paddr); | ||
| 1439 | ring->desc_vaddr = NULL; | ||
| 1440 | } | ||
| 1441 | } | ||
| 1442 | |||
| 1443 | static void xgene_dma_delete_chan_rings(struct xgene_dma_chan *chan) | ||
| 1444 | { | ||
| 1445 | xgene_dma_delete_ring_one(&chan->rx_ring); | ||
| 1446 | xgene_dma_delete_ring_one(&chan->tx_ring); | ||
| 1447 | } | ||
| 1448 | |||
| 1449 | static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan, | ||
| 1450 | struct xgene_dma_ring *ring, | ||
| 1451 | enum xgene_dma_ring_cfgsize cfgsize) | ||
| 1452 | { | ||
| 1453 | /* Setup DMA ring descriptor variables */ | ||
| 1454 | ring->pdma = chan->pdma; | ||
| 1455 | ring->cfgsize = cfgsize; | ||
| 1456 | ring->num = chan->pdma->ring_num++; | ||
| 1457 | ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num); | ||
| 1458 | |||
| 1459 | ring->size = xgene_dma_get_ring_size(chan, cfgsize); | ||
| 1460 | if (ring->size <= 0) | ||
| 1461 | return ring->size; | ||
| 1462 | |||
| 1463 | /* Allocate memory for DMA ring descriptor */ | ||
| 1464 | ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size, | ||
| 1465 | &ring->desc_paddr, GFP_KERNEL); | ||
| 1466 | if (!ring->desc_vaddr) { | ||
| 1467 | chan_err(chan, "Failed to allocate ring desc\n"); | ||
| 1468 | return -ENOMEM; | ||
| 1469 | } | ||
| 1470 | |||
| 1471 | /* Configure and enable DMA ring */ | ||
| 1472 | xgene_dma_set_ring_cmd(ring); | ||
| 1473 | xgene_dma_setup_ring(ring); | ||
| 1474 | |||
| 1475 | return 0; | ||
| 1476 | } | ||
| 1477 | |||
| 1478 | static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan) | ||
| 1479 | { | ||
| 1480 | struct xgene_dma_ring *rx_ring = &chan->rx_ring; | ||
| 1481 | struct xgene_dma_ring *tx_ring = &chan->tx_ring; | ||
| 1482 | int ret; | ||
| 1483 | |||
| 1484 | /* Create DMA Rx ring descriptor */ | ||
| 1485 | rx_ring->owner = XGENE_DMA_RING_OWNER_CPU; | ||
| 1486 | rx_ring->buf_num = XGENE_DMA_CPU_BUFNUM + chan->id; | ||
| 1487 | |||
| 1488 | ret = xgene_dma_create_ring_one(chan, rx_ring, | ||
| 1489 | XGENE_DMA_RING_CFG_SIZE_64KB); | ||
| 1490 | if (ret) | ||
| 1491 | return ret; | ||
| 1492 | |||
| 1493 | chan_dbg(chan, "Rx ring id 0x%X num %d desc 0x%p\n", | ||
| 1494 | rx_ring->id, rx_ring->num, rx_ring->desc_vaddr); | ||
| 1495 | |||
| 1496 | /* Create DMA Tx ring descriptor */ | ||
| 1497 | tx_ring->owner = XGENE_DMA_RING_OWNER_DMA; | ||
| 1498 | tx_ring->buf_num = XGENE_DMA_BUFNUM + chan->id; | ||
| 1499 | |||
| 1500 | ret = xgene_dma_create_ring_one(chan, tx_ring, | ||
| 1501 | XGENE_DMA_RING_CFG_SIZE_64KB); | ||
| 1502 | if (ret) { | ||
| 1503 | xgene_dma_delete_ring_one(rx_ring); | ||
| 1504 | return ret; | ||
| 1505 | } | ||
| 1506 | |||
| 1507 | tx_ring->dst_ring_num = XGENE_DMA_RING_DST_ID(rx_ring->num); | ||
| 1508 | |||
| 1509 | chan_dbg(chan, | ||
| 1510 | "Tx ring id 0x%X num %d desc 0x%p\n", | ||
| 1511 | tx_ring->id, tx_ring->num, tx_ring->desc_vaddr); | ||
| 1512 | |||
| 1513 | /* Set the max outstanding request possible to this channel */ | ||
| 1514 | chan->max_outstanding = rx_ring->slots; | ||
| 1515 | |||
| 1516 | return ret; | ||
| 1517 | } | ||
| 1518 | |||
| 1519 | static int xgene_dma_init_rings(struct xgene_dma *pdma) | ||
| 1520 | { | ||
| 1521 | int ret, i, j; | ||
| 1522 | |||
| 1523 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { | ||
| 1524 | ret = xgene_dma_create_chan_rings(&pdma->chan[i]); | ||
| 1525 | if (ret) { | ||
| 1526 | for (j = 0; j < i; j++) | ||
| 1527 | xgene_dma_delete_chan_rings(&pdma->chan[j]); | ||
| 1528 | return ret; | ||
| 1529 | } | ||
| 1530 | } | ||
| 1531 | |||
| 1532 | return ret; | ||
| 1533 | } | ||
| 1534 | |||
| 1535 | static void xgene_dma_enable(struct xgene_dma *pdma) | ||
| 1536 | { | ||
| 1537 | u32 val; | ||
| 1538 | |||
| 1539 | /* Configure and enable DMA engine */ | ||
| 1540 | val = ioread32(pdma->csr_dma + XGENE_DMA_GCR); | ||
| 1541 | XGENE_DMA_CH_SETUP(val); | ||
| 1542 | XGENE_DMA_ENABLE(val); | ||
| 1543 | iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR); | ||
| 1544 | } | ||
| 1545 | |||
| 1546 | static void xgene_dma_disable(struct xgene_dma *pdma) | ||
| 1547 | { | ||
| 1548 | u32 val; | ||
| 1549 | |||
| 1550 | val = ioread32(pdma->csr_dma + XGENE_DMA_GCR); | ||
| 1551 | XGENE_DMA_DISABLE(val); | ||
| 1552 | iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR); | ||
| 1553 | } | ||
| 1554 | |||
| 1555 | static void xgene_dma_mask_interrupts(struct xgene_dma *pdma) | ||
| 1556 | { | ||
| 1557 | /* | ||
| 1558 | * Mask DMA ring overflow, underflow and | ||
| 1559 | * AXI write/read error interrupts | ||
| 1560 | */ | ||
| 1561 | iowrite32(XGENE_DMA_INT_ALL_MASK, | ||
| 1562 | pdma->csr_dma + XGENE_DMA_RING_INT0_MASK); | ||
| 1563 | iowrite32(XGENE_DMA_INT_ALL_MASK, | ||
| 1564 | pdma->csr_dma + XGENE_DMA_RING_INT1_MASK); | ||
| 1565 | iowrite32(XGENE_DMA_INT_ALL_MASK, | ||
| 1566 | pdma->csr_dma + XGENE_DMA_RING_INT2_MASK); | ||
| 1567 | iowrite32(XGENE_DMA_INT_ALL_MASK, | ||
| 1568 | pdma->csr_dma + XGENE_DMA_RING_INT3_MASK); | ||
| 1569 | iowrite32(XGENE_DMA_INT_ALL_MASK, | ||
| 1570 | pdma->csr_dma + XGENE_DMA_RING_INT4_MASK); | ||
| 1571 | |||
| 1572 | /* Mask DMA error interrupts */ | ||
| 1573 | iowrite32(XGENE_DMA_INT_ALL_MASK, pdma->csr_dma + XGENE_DMA_INT_MASK); | ||
| 1574 | } | ||
| 1575 | |||
| 1576 | static void xgene_dma_unmask_interrupts(struct xgene_dma *pdma) | ||
| 1577 | { | ||
| 1578 | /* | ||
| 1579 | * Unmask DMA ring overflow, underflow and | ||
| 1580 | * AXI write/read error interrupts | ||
| 1581 | */ | ||
| 1582 | iowrite32(XGENE_DMA_INT_ALL_UNMASK, | ||
| 1583 | pdma->csr_dma + XGENE_DMA_RING_INT0_MASK); | ||
| 1584 | iowrite32(XGENE_DMA_INT_ALL_UNMASK, | ||
| 1585 | pdma->csr_dma + XGENE_DMA_RING_INT1_MASK); | ||
| 1586 | iowrite32(XGENE_DMA_INT_ALL_UNMASK, | ||
| 1587 | pdma->csr_dma + XGENE_DMA_RING_INT2_MASK); | ||
| 1588 | iowrite32(XGENE_DMA_INT_ALL_UNMASK, | ||
| 1589 | pdma->csr_dma + XGENE_DMA_RING_INT3_MASK); | ||
| 1590 | iowrite32(XGENE_DMA_INT_ALL_UNMASK, | ||
| 1591 | pdma->csr_dma + XGENE_DMA_RING_INT4_MASK); | ||
| 1592 | |||
| 1593 | /* Unmask DMA error interrupts */ | ||
| 1594 | iowrite32(XGENE_DMA_INT_ALL_UNMASK, | ||
| 1595 | pdma->csr_dma + XGENE_DMA_INT_MASK); | ||
| 1596 | } | ||
| 1597 | |||
| 1598 | static void xgene_dma_init_hw(struct xgene_dma *pdma) | ||
| 1599 | { | ||
| 1600 | u32 val; | ||
| 1601 | |||
| 1602 | /* Associate DMA ring to corresponding ring HW */ | ||
| 1603 | iowrite32(XGENE_DMA_ASSOC_RING_MNGR1, | ||
| 1604 | pdma->csr_dma + XGENE_DMA_CFG_RING_WQ_ASSOC); | ||
| 1605 | |||
| 1606 | /* Configure RAID6 polynomial control setting */ | ||
| 1607 | if (is_pq_enabled(pdma)) | ||
| 1608 | iowrite32(XGENE_DMA_RAID6_MULTI_CTRL(0x1D), | ||
| 1609 | pdma->csr_dma + XGENE_DMA_RAID6_CONT); | ||
| 1610 | else | ||
| 1611 | dev_info(pdma->dev, "PQ is disabled in HW\n"); | ||
| 1612 | |||
| 1613 | xgene_dma_enable(pdma); | ||
| 1614 | xgene_dma_unmask_interrupts(pdma); | ||
| 1615 | |||
| 1616 | /* Get DMA id and version info */ | ||
| 1617 | val = ioread32(pdma->csr_dma + XGENE_DMA_IPBRR); | ||
| 1618 | |||
| 1619 | /* DMA device info */ | ||
| 1620 | dev_info(pdma->dev, | ||
| 1621 | "X-Gene DMA v%d.%02d.%02d driver registered %d channels", | ||
| 1622 | XGENE_DMA_REV_NO_RD(val), XGENE_DMA_BUS_ID_RD(val), | ||
| 1623 | XGENE_DMA_DEV_ID_RD(val), XGENE_DMA_MAX_CHANNEL); | ||
| 1624 | } | ||
| 1625 | |||
| 1626 | static int xgene_dma_init_ring_mngr(struct xgene_dma *pdma) | ||
| 1627 | { | ||
| 1628 | if (ioread32(pdma->csr_ring + XGENE_DMA_RING_CLKEN) && | ||
| 1629 | (!ioread32(pdma->csr_ring + XGENE_DMA_RING_SRST))) | ||
| 1630 | return 0; | ||
| 1631 | |||
| 1632 | iowrite32(0x3, pdma->csr_ring + XGENE_DMA_RING_CLKEN); | ||
| 1633 | iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_SRST); | ||
| 1634 | |||
| 1635 | /* Bring up memory */ | ||
| 1636 | iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN); | ||
| 1637 | |||
| 1638 | /* Force a barrier */ | ||
| 1639 | ioread32(pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN); | ||
| 1640 | |||
| 1641 | /* reset may take up to 1ms */ | ||
| 1642 | usleep_range(1000, 1100); | ||
| 1643 | |||
| 1644 | if (ioread32(pdma->csr_ring + XGENE_DMA_RING_BLK_MEM_RDY) | ||
| 1645 | != XGENE_DMA_RING_BLK_MEM_RDY_VAL) { | ||
| 1646 | dev_err(pdma->dev, | ||
| 1647 | "Failed to release ring mngr memory from shutdown\n"); | ||
| 1648 | return -ENODEV; | ||
| 1649 | } | ||
| 1650 | |||
| 1651 | /* program threshold set 1 and all hysteresis */ | ||
| 1652 | iowrite32(XGENE_DMA_RING_THRESLD0_SET1_VAL, | ||
| 1653 | pdma->csr_ring + XGENE_DMA_RING_THRESLD0_SET1); | ||
| 1654 | iowrite32(XGENE_DMA_RING_THRESLD1_SET1_VAL, | ||
| 1655 | pdma->csr_ring + XGENE_DMA_RING_THRESLD1_SET1); | ||
| 1656 | iowrite32(XGENE_DMA_RING_HYSTERESIS_VAL, | ||
| 1657 | pdma->csr_ring + XGENE_DMA_RING_HYSTERESIS); | ||
| 1658 | |||
| 1659 | /* Enable QPcore and assign error queue */ | ||
| 1660 | iowrite32(XGENE_DMA_RING_ENABLE, | ||
| 1661 | pdma->csr_ring + XGENE_DMA_RING_CONFIG); | ||
| 1662 | |||
| 1663 | return 0; | ||
| 1664 | } | ||
| 1665 | |||
| 1666 | static int xgene_dma_init_mem(struct xgene_dma *pdma) | ||
| 1667 | { | ||
| 1668 | int ret; | ||
| 1669 | |||
| 1670 | ret = xgene_dma_init_ring_mngr(pdma); | ||
| 1671 | if (ret) | ||
| 1672 | return ret; | ||
| 1673 | |||
| 1674 | /* Bring up memory */ | ||
| 1675 | iowrite32(0x0, pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN); | ||
| 1676 | |||
| 1677 | /* Force a barrier */ | ||
| 1678 | ioread32(pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN); | ||
| 1679 | |||
| 1680 | /* reset may take up to 1ms */ | ||
| 1681 | usleep_range(1000, 1100); | ||
| 1682 | |||
| 1683 | if (ioread32(pdma->csr_dma + XGENE_DMA_BLK_MEM_RDY) | ||
| 1684 | != XGENE_DMA_BLK_MEM_RDY_VAL) { | ||
| 1685 | dev_err(pdma->dev, | ||
| 1686 | "Failed to release DMA memory from shutdown\n"); | ||
| 1687 | return -ENODEV; | ||
| 1688 | } | ||
| 1689 | |||
| 1690 | return 0; | ||
| 1691 | } | ||
| 1692 | |||
| 1693 | static int xgene_dma_request_irqs(struct xgene_dma *pdma) | ||
| 1694 | { | ||
| 1695 | struct xgene_dma_chan *chan; | ||
| 1696 | int ret, i, j; | ||
| 1697 | |||
| 1698 | /* Register DMA error irq */ | ||
| 1699 | ret = devm_request_irq(pdma->dev, pdma->err_irq, xgene_dma_err_isr, | ||
| 1700 | 0, "dma_error", pdma); | ||
| 1701 | if (ret) { | ||
| 1702 | dev_err(pdma->dev, | ||
| 1703 | "Failed to register error IRQ %d\n", pdma->err_irq); | ||
| 1704 | return ret; | ||
| 1705 | } | ||
| 1706 | |||
| 1707 | /* Register DMA channel rx irq */ | ||
| 1708 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { | ||
| 1709 | chan = &pdma->chan[i]; | ||
| 1710 | ret = devm_request_irq(chan->dev, chan->rx_irq, | ||
| 1711 | xgene_dma_chan_ring_isr, | ||
| 1712 | 0, chan->name, chan); | ||
| 1713 | if (ret) { | ||
| 1714 | chan_err(chan, "Failed to register Rx IRQ %d\n", | ||
| 1715 | chan->rx_irq); | ||
| 1716 | devm_free_irq(pdma->dev, pdma->err_irq, pdma); | ||
| 1717 | |||
| 1718 | for (j = 0; j < i; j++) { | ||
| 1719 | chan = &pdma->chan[i]; | ||
| 1720 | devm_free_irq(chan->dev, chan->rx_irq, chan); | ||
| 1721 | } | ||
| 1722 | |||
| 1723 | return ret; | ||
| 1724 | } | ||
| 1725 | } | ||
| 1726 | |||
| 1727 | return 0; | ||
| 1728 | } | ||
| 1729 | |||
| 1730 | static void xgene_dma_free_irqs(struct xgene_dma *pdma) | ||
| 1731 | { | ||
| 1732 | struct xgene_dma_chan *chan; | ||
| 1733 | int i; | ||
| 1734 | |||
| 1735 | /* Free DMA device error irq */ | ||
| 1736 | devm_free_irq(pdma->dev, pdma->err_irq, pdma); | ||
| 1737 | |||
| 1738 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { | ||
| 1739 | chan = &pdma->chan[i]; | ||
| 1740 | devm_free_irq(chan->dev, chan->rx_irq, chan); | ||
| 1741 | } | ||
| 1742 | } | ||
| 1743 | |||
| 1744 | static void xgene_dma_set_caps(struct xgene_dma_chan *chan, | ||
| 1745 | struct dma_device *dma_dev) | ||
| 1746 | { | ||
| 1747 | /* Initialize DMA device capability mask */ | ||
| 1748 | dma_cap_zero(dma_dev->cap_mask); | ||
| 1749 | |||
| 1750 | /* Set DMA device capability */ | ||
| 1751 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | ||
| 1752 | dma_cap_set(DMA_SG, dma_dev->cap_mask); | ||
| 1753 | |||
| 1754 | /* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR | ||
| 1755 | * and channel 1 supports XOR, PQ both. First thing here is we have | ||
| 1756 | * mechanism in hw to enable/disable PQ/XOR supports on channel 1, | ||
| 1757 | * we can make sure this by reading SoC Efuse register. | ||
| 1758 | * Second thing, we have hw errata that if we run channel 0 and | ||
| 1759 | * channel 1 simultaneously with executing XOR and PQ request, | ||
| 1760 | * suddenly DMA engine hangs, So here we enable XOR on channel 0 only | ||
| 1761 | * if XOR and PQ supports on channel 1 is disabled. | ||
| 1762 | */ | ||
| 1763 | if ((chan->id == XGENE_DMA_PQ_CHANNEL) && | ||
| 1764 | is_pq_enabled(chan->pdma)) { | ||
| 1765 | dma_cap_set(DMA_PQ, dma_dev->cap_mask); | ||
| 1766 | dma_cap_set(DMA_XOR, dma_dev->cap_mask); | ||
| 1767 | } else if ((chan->id == XGENE_DMA_XOR_CHANNEL) && | ||
| 1768 | !is_pq_enabled(chan->pdma)) { | ||
| 1769 | dma_cap_set(DMA_XOR, dma_dev->cap_mask); | ||
| 1770 | } | ||
| 1771 | |||
| 1772 | /* Set base and prep routines */ | ||
| 1773 | dma_dev->dev = chan->dev; | ||
| 1774 | dma_dev->device_alloc_chan_resources = xgene_dma_alloc_chan_resources; | ||
| 1775 | dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources; | ||
| 1776 | dma_dev->device_issue_pending = xgene_dma_issue_pending; | ||
| 1777 | dma_dev->device_tx_status = xgene_dma_tx_status; | ||
| 1778 | dma_dev->device_prep_dma_memcpy = xgene_dma_prep_memcpy; | ||
| 1779 | dma_dev->device_prep_dma_sg = xgene_dma_prep_sg; | ||
| 1780 | |||
| 1781 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | ||
| 1782 | dma_dev->device_prep_dma_xor = xgene_dma_prep_xor; | ||
| 1783 | dma_dev->max_xor = XGENE_DMA_MAX_XOR_SRC; | ||
| 1784 | dma_dev->xor_align = XGENE_DMA_XOR_ALIGNMENT; | ||
| 1785 | } | ||
| 1786 | |||
| 1787 | if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { | ||
| 1788 | dma_dev->device_prep_dma_pq = xgene_dma_prep_pq; | ||
| 1789 | dma_dev->max_pq = XGENE_DMA_MAX_XOR_SRC; | ||
| 1790 | dma_dev->pq_align = XGENE_DMA_XOR_ALIGNMENT; | ||
| 1791 | } | ||
| 1792 | } | ||
| 1793 | |||
| 1794 | static int xgene_dma_async_register(struct xgene_dma *pdma, int id) | ||
| 1795 | { | ||
| 1796 | struct xgene_dma_chan *chan = &pdma->chan[id]; | ||
| 1797 | struct dma_device *dma_dev = &pdma->dma_dev[id]; | ||
| 1798 | int ret; | ||
| 1799 | |||
| 1800 | chan->dma_chan.device = dma_dev; | ||
| 1801 | |||
| 1802 | spin_lock_init(&chan->lock); | ||
| 1803 | INIT_LIST_HEAD(&chan->ld_pending); | ||
| 1804 | INIT_LIST_HEAD(&chan->ld_running); | ||
| 1805 | INIT_LIST_HEAD(&chan->ld_completed); | ||
| 1806 | tasklet_init(&chan->tasklet, xgene_dma_tasklet_cb, | ||
| 1807 | (unsigned long)chan); | ||
| 1808 | |||
| 1809 | chan->pending = 0; | ||
| 1810 | chan->desc_pool = NULL; | ||
| 1811 | dma_cookie_init(&chan->dma_chan); | ||
| 1812 | |||
| 1813 | /* Setup dma device capabilities and prep routines */ | ||
| 1814 | xgene_dma_set_caps(chan, dma_dev); | ||
| 1815 | |||
| 1816 | /* Initialize DMA device list head */ | ||
| 1817 | INIT_LIST_HEAD(&dma_dev->channels); | ||
| 1818 | list_add_tail(&chan->dma_chan.device_node, &dma_dev->channels); | ||
| 1819 | |||
| 1820 | /* Register with Linux async DMA framework*/ | ||
| 1821 | ret = dma_async_device_register(dma_dev); | ||
| 1822 | if (ret) { | ||
| 1823 | chan_err(chan, "Failed to register async device %d", ret); | ||
| 1824 | tasklet_kill(&chan->tasklet); | ||
| 1825 | |||
| 1826 | return ret; | ||
| 1827 | } | ||
| 1828 | |||
| 1829 | /* DMA capability info */ | ||
| 1830 | dev_info(pdma->dev, | ||
| 1831 | "%s: CAPABILITY ( %s%s%s%s)\n", dma_chan_name(&chan->dma_chan), | ||
| 1832 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "MEMCPY " : "", | ||
| 1833 | dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "SGCPY " : "", | ||
| 1834 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "", | ||
| 1835 | dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : ""); | ||
| 1836 | |||
| 1837 | return 0; | ||
| 1838 | } | ||
| 1839 | |||
| 1840 | static int xgene_dma_init_async(struct xgene_dma *pdma) | ||
| 1841 | { | ||
| 1842 | int ret, i, j; | ||
| 1843 | |||
| 1844 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL ; i++) { | ||
| 1845 | ret = xgene_dma_async_register(pdma, i); | ||
| 1846 | if (ret) { | ||
| 1847 | for (j = 0; j < i; j++) { | ||
| 1848 | dma_async_device_unregister(&pdma->dma_dev[j]); | ||
| 1849 | tasklet_kill(&pdma->chan[j].tasklet); | ||
| 1850 | } | ||
| 1851 | |||
| 1852 | return ret; | ||
| 1853 | } | ||
| 1854 | } | ||
| 1855 | |||
| 1856 | return ret; | ||
| 1857 | } | ||
| 1858 | |||
| 1859 | static void xgene_dma_async_unregister(struct xgene_dma *pdma) | ||
| 1860 | { | ||
| 1861 | int i; | ||
| 1862 | |||
| 1863 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) | ||
| 1864 | dma_async_device_unregister(&pdma->dma_dev[i]); | ||
| 1865 | } | ||
| 1866 | |||
| 1867 | static void xgene_dma_init_channels(struct xgene_dma *pdma) | ||
| 1868 | { | ||
| 1869 | struct xgene_dma_chan *chan; | ||
| 1870 | int i; | ||
| 1871 | |||
| 1872 | pdma->ring_num = XGENE_DMA_RING_NUM; | ||
| 1873 | |||
| 1874 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { | ||
| 1875 | chan = &pdma->chan[i]; | ||
| 1876 | chan->dev = pdma->dev; | ||
| 1877 | chan->pdma = pdma; | ||
| 1878 | chan->id = i; | ||
| 1879 | snprintf(chan->name, sizeof(chan->name), "dmachan%d", chan->id); | ||
| 1880 | } | ||
| 1881 | } | ||
| 1882 | |||
| 1883 | static int xgene_dma_get_resources(struct platform_device *pdev, | ||
| 1884 | struct xgene_dma *pdma) | ||
| 1885 | { | ||
| 1886 | struct resource *res; | ||
| 1887 | int irq, i; | ||
| 1888 | |||
| 1889 | /* Get DMA csr region */ | ||
| 1890 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 1891 | if (!res) { | ||
| 1892 | dev_err(&pdev->dev, "Failed to get csr region\n"); | ||
| 1893 | return -ENXIO; | ||
| 1894 | } | ||
| 1895 | |||
| 1896 | pdma->csr_dma = devm_ioremap(&pdev->dev, res->start, | ||
| 1897 | resource_size(res)); | ||
| 1898 | if (!pdma->csr_dma) { | ||
| 1899 | dev_err(&pdev->dev, "Failed to ioremap csr region"); | ||
| 1900 | return -ENOMEM; | ||
| 1901 | } | ||
| 1902 | |||
| 1903 | /* Get DMA ring csr region */ | ||
| 1904 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
| 1905 | if (!res) { | ||
| 1906 | dev_err(&pdev->dev, "Failed to get ring csr region\n"); | ||
| 1907 | return -ENXIO; | ||
| 1908 | } | ||
| 1909 | |||
| 1910 | pdma->csr_ring = devm_ioremap(&pdev->dev, res->start, | ||
| 1911 | resource_size(res)); | ||
| 1912 | if (!pdma->csr_ring) { | ||
| 1913 | dev_err(&pdev->dev, "Failed to ioremap ring csr region"); | ||
| 1914 | return -ENOMEM; | ||
| 1915 | } | ||
| 1916 | |||
| 1917 | /* Get DMA ring cmd csr region */ | ||
| 1918 | res = platform_get_resource(pdev, IORESOURCE_MEM, 2); | ||
| 1919 | if (!res) { | ||
| 1920 | dev_err(&pdev->dev, "Failed to get ring cmd csr region\n"); | ||
| 1921 | return -ENXIO; | ||
| 1922 | } | ||
| 1923 | |||
| 1924 | pdma->csr_ring_cmd = devm_ioremap(&pdev->dev, res->start, | ||
| 1925 | resource_size(res)); | ||
| 1926 | if (!pdma->csr_ring_cmd) { | ||
| 1927 | dev_err(&pdev->dev, "Failed to ioremap ring cmd csr region"); | ||
| 1928 | return -ENOMEM; | ||
| 1929 | } | ||
| 1930 | |||
| 1931 | /* Get efuse csr region */ | ||
| 1932 | res = platform_get_resource(pdev, IORESOURCE_MEM, 3); | ||
| 1933 | if (!res) { | ||
| 1934 | dev_err(&pdev->dev, "Failed to get efuse csr region\n"); | ||
| 1935 | return -ENXIO; | ||
| 1936 | } | ||
| 1937 | |||
| 1938 | pdma->csr_efuse = devm_ioremap(&pdev->dev, res->start, | ||
| 1939 | resource_size(res)); | ||
| 1940 | if (!pdma->csr_efuse) { | ||
| 1941 | dev_err(&pdev->dev, "Failed to ioremap efuse csr region"); | ||
| 1942 | return -ENOMEM; | ||
| 1943 | } | ||
| 1944 | |||
| 1945 | /* Get DMA error interrupt */ | ||
| 1946 | irq = platform_get_irq(pdev, 0); | ||
| 1947 | if (irq <= 0) { | ||
| 1948 | dev_err(&pdev->dev, "Failed to get Error IRQ\n"); | ||
| 1949 | return -ENXIO; | ||
| 1950 | } | ||
| 1951 | |||
| 1952 | pdma->err_irq = irq; | ||
| 1953 | |||
| 1954 | /* Get DMA Rx ring descriptor interrupts for all DMA channels */ | ||
| 1955 | for (i = 1; i <= XGENE_DMA_MAX_CHANNEL; i++) { | ||
| 1956 | irq = platform_get_irq(pdev, i); | ||
| 1957 | if (irq <= 0) { | ||
| 1958 | dev_err(&pdev->dev, "Failed to get Rx IRQ\n"); | ||
| 1959 | return -ENXIO; | ||
| 1960 | } | ||
| 1961 | |||
| 1962 | pdma->chan[i - 1].rx_irq = irq; | ||
| 1963 | } | ||
| 1964 | |||
| 1965 | return 0; | ||
| 1966 | } | ||
| 1967 | |||
| 1968 | static int xgene_dma_probe(struct platform_device *pdev) | ||
| 1969 | { | ||
| 1970 | struct xgene_dma *pdma; | ||
| 1971 | int ret, i; | ||
| 1972 | |||
| 1973 | pdma = devm_kzalloc(&pdev->dev, sizeof(*pdma), GFP_KERNEL); | ||
| 1974 | if (!pdma) | ||
| 1975 | return -ENOMEM; | ||
| 1976 | |||
| 1977 | pdma->dev = &pdev->dev; | ||
| 1978 | platform_set_drvdata(pdev, pdma); | ||
| 1979 | |||
| 1980 | ret = xgene_dma_get_resources(pdev, pdma); | ||
| 1981 | if (ret) | ||
| 1982 | return ret; | ||
| 1983 | |||
| 1984 | pdma->clk = devm_clk_get(&pdev->dev, NULL); | ||
| 1985 | if (IS_ERR(pdma->clk)) { | ||
| 1986 | dev_err(&pdev->dev, "Failed to get clk\n"); | ||
| 1987 | return PTR_ERR(pdma->clk); | ||
| 1988 | } | ||
| 1989 | |||
| 1990 | /* Enable clk before accessing registers */ | ||
| 1991 | ret = clk_prepare_enable(pdma->clk); | ||
| 1992 | if (ret) { | ||
| 1993 | dev_err(&pdev->dev, "Failed to enable clk %d\n", ret); | ||
| 1994 | return ret; | ||
| 1995 | } | ||
| 1996 | |||
| 1997 | /* Remove DMA RAM out of shutdown */ | ||
| 1998 | ret = xgene_dma_init_mem(pdma); | ||
| 1999 | if (ret) | ||
| 2000 | goto err_clk_enable; | ||
| 2001 | |||
| 2002 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(42)); | ||
| 2003 | if (ret) { | ||
| 2004 | dev_err(&pdev->dev, "No usable DMA configuration\n"); | ||
| 2005 | goto err_dma_mask; | ||
| 2006 | } | ||
| 2007 | |||
| 2008 | /* Initialize DMA channels software state */ | ||
| 2009 | xgene_dma_init_channels(pdma); | ||
| 2010 | |||
| 2011 | /* Configue DMA rings */ | ||
| 2012 | ret = xgene_dma_init_rings(pdma); | ||
| 2013 | if (ret) | ||
| 2014 | goto err_clk_enable; | ||
| 2015 | |||
| 2016 | ret = xgene_dma_request_irqs(pdma); | ||
| 2017 | if (ret) | ||
| 2018 | goto err_request_irq; | ||
| 2019 | |||
| 2020 | /* Configure and enable DMA engine */ | ||
| 2021 | xgene_dma_init_hw(pdma); | ||
| 2022 | |||
| 2023 | /* Register DMA device with linux async framework */ | ||
| 2024 | ret = xgene_dma_init_async(pdma); | ||
| 2025 | if (ret) | ||
| 2026 | goto err_async_init; | ||
| 2027 | |||
| 2028 | return 0; | ||
| 2029 | |||
| 2030 | err_async_init: | ||
| 2031 | xgene_dma_free_irqs(pdma); | ||
| 2032 | |||
| 2033 | err_request_irq: | ||
| 2034 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) | ||
| 2035 | xgene_dma_delete_chan_rings(&pdma->chan[i]); | ||
| 2036 | |||
| 2037 | err_dma_mask: | ||
| 2038 | err_clk_enable: | ||
| 2039 | clk_disable_unprepare(pdma->clk); | ||
| 2040 | |||
| 2041 | return ret; | ||
| 2042 | } | ||
| 2043 | |||
| 2044 | static int xgene_dma_remove(struct platform_device *pdev) | ||
| 2045 | { | ||
| 2046 | struct xgene_dma *pdma = platform_get_drvdata(pdev); | ||
| 2047 | struct xgene_dma_chan *chan; | ||
| 2048 | int i; | ||
| 2049 | |||
| 2050 | xgene_dma_async_unregister(pdma); | ||
| 2051 | |||
| 2052 | /* Mask interrupts and disable DMA engine */ | ||
| 2053 | xgene_dma_mask_interrupts(pdma); | ||
| 2054 | xgene_dma_disable(pdma); | ||
| 2055 | xgene_dma_free_irqs(pdma); | ||
| 2056 | |||
| 2057 | for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { | ||
| 2058 | chan = &pdma->chan[i]; | ||
| 2059 | tasklet_kill(&chan->tasklet); | ||
| 2060 | xgene_dma_delete_chan_rings(chan); | ||
| 2061 | } | ||
| 2062 | |||
| 2063 | clk_disable_unprepare(pdma->clk); | ||
| 2064 | |||
| 2065 | return 0; | ||
| 2066 | } | ||
| 2067 | |||
| 2068 | static const struct of_device_id xgene_dma_of_match_ptr[] = { | ||
| 2069 | {.compatible = "apm,xgene-storm-dma",}, | ||
| 2070 | {}, | ||
| 2071 | }; | ||
| 2072 | MODULE_DEVICE_TABLE(of, xgene_dma_of_match_ptr); | ||
| 2073 | |||
| 2074 | static struct platform_driver xgene_dma_driver = { | ||
| 2075 | .probe = xgene_dma_probe, | ||
| 2076 | .remove = xgene_dma_remove, | ||
| 2077 | .driver = { | ||
| 2078 | .name = "X-Gene-DMA", | ||
| 2079 | .of_match_table = xgene_dma_of_match_ptr, | ||
| 2080 | }, | ||
| 2081 | }; | ||
| 2082 | |||
| 2083 | module_platform_driver(xgene_dma_driver); | ||
| 2084 | |||
| 2085 | MODULE_DESCRIPTION("APM X-Gene SoC DMA driver"); | ||
| 2086 | MODULE_AUTHOR("Rameshwar Prasad Sahu <rsahu@apm.com>"); | ||
| 2087 | MODULE_AUTHOR("Loc Ho <lho@apm.com>"); | ||
| 2088 | MODULE_LICENSE("GPL"); | ||
| 2089 | MODULE_VERSION("1.0"); | ||
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c index bdd2a5dd7220..d8434d465885 100644 --- a/drivers/dma/xilinx/xilinx_vdma.c +++ b/drivers/dma/xilinx/xilinx_vdma.c | |||
| @@ -22,9 +22,9 @@ | |||
| 22 | * (at your option) any later version. | 22 | * (at your option) any later version. |
| 23 | */ | 23 | */ |
| 24 | 24 | ||
| 25 | #include <linux/amba/xilinx_dma.h> | ||
| 26 | #include <linux/bitops.h> | 25 | #include <linux/bitops.h> |
| 27 | #include <linux/dmapool.h> | 26 | #include <linux/dmapool.h> |
| 27 | #include <linux/dma/xilinx_dma.h> | ||
| 28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
| 29 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
| 30 | #include <linux/io.h> | 30 | #include <linux/io.h> |
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 072f67066df3..2b6ef6bd5d5f 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c | |||
| @@ -388,7 +388,7 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host, | |||
| 388 | { | 388 | { |
| 389 | struct dma_slave_config cfg = { 0, }; | 389 | struct dma_slave_config cfg = { 0, }; |
| 390 | struct dma_chan *chan; | 390 | struct dma_chan *chan; |
| 391 | unsigned int slave_id; | 391 | void *slave_data = NULL; |
| 392 | struct resource *res; | 392 | struct resource *res; |
| 393 | dma_cap_mask_t mask; | 393 | dma_cap_mask_t mask; |
| 394 | int ret; | 394 | int ret; |
| @@ -397,13 +397,12 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host, | |||
| 397 | dma_cap_set(DMA_SLAVE, mask); | 397 | dma_cap_set(DMA_SLAVE, mask); |
| 398 | 398 | ||
| 399 | if (pdata) | 399 | if (pdata) |
| 400 | slave_id = direction == DMA_MEM_TO_DEV | 400 | slave_data = direction == DMA_MEM_TO_DEV ? |
| 401 | ? pdata->slave_id_tx : pdata->slave_id_rx; | 401 | (void *)pdata->slave_id_tx : |
| 402 | else | 402 | (void *)pdata->slave_id_rx; |
| 403 | slave_id = 0; | ||
| 404 | 403 | ||
| 405 | chan = dma_request_slave_channel_compat(mask, shdma_chan_filter, | 404 | chan = dma_request_slave_channel_compat(mask, shdma_chan_filter, |
| 406 | (void *)(unsigned long)slave_id, &host->pd->dev, | 405 | slave_data, &host->pd->dev, |
| 407 | direction == DMA_MEM_TO_DEV ? "tx" : "rx"); | 406 | direction == DMA_MEM_TO_DEV ? "tx" : "rx"); |
| 408 | 407 | ||
| 409 | dev_dbg(&host->pd->dev, "%s: %s: got channel %p\n", __func__, | 408 | dev_dbg(&host->pd->dev, "%s: %s: got channel %p\n", __func__, |
| @@ -414,8 +413,6 @@ sh_mmcif_request_dma_one(struct sh_mmcif_host *host, | |||
| 414 | 413 | ||
| 415 | res = platform_get_resource(host->pd, IORESOURCE_MEM, 0); | 414 | res = platform_get_resource(host->pd, IORESOURCE_MEM, 0); |
| 416 | 415 | ||
| 417 | /* In the OF case the driver will get the slave ID from the DT */ | ||
| 418 | cfg.slave_id = slave_id; | ||
| 419 | cfg.direction = direction; | 416 | cfg.direction = direction; |
| 420 | 417 | ||
| 421 | if (direction == DMA_DEV_TO_MEM) { | 418 | if (direction == DMA_DEV_TO_MEM) { |
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c index 6906a905cd54..354f4f335ed5 100644 --- a/drivers/mmc/host/sh_mobile_sdhi.c +++ b/drivers/mmc/host/sh_mobile_sdhi.c | |||
| @@ -201,7 +201,7 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
| 201 | of_match_device(sh_mobile_sdhi_of_match, &pdev->dev); | 201 | of_match_device(sh_mobile_sdhi_of_match, &pdev->dev); |
| 202 | struct sh_mobile_sdhi *priv; | 202 | struct sh_mobile_sdhi *priv; |
| 203 | struct tmio_mmc_data *mmc_data; | 203 | struct tmio_mmc_data *mmc_data; |
| 204 | struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; | 204 | struct tmio_mmc_data *mmd = pdev->dev.platform_data; |
| 205 | struct tmio_mmc_host *host; | 205 | struct tmio_mmc_host *host; |
| 206 | struct resource *res; | 206 | struct resource *res; |
| 207 | int irq, ret, i = 0; | 207 | int irq, ret, i = 0; |
| @@ -245,30 +245,14 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev) | |||
| 245 | else | 245 | else |
| 246 | host->bus_shift = 0; | 246 | host->bus_shift = 0; |
| 247 | 247 | ||
| 248 | mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED; | 248 | if (mmd) |
| 249 | if (p) { | 249 | *mmc_data = *mmd; |
| 250 | mmc_data->flags = p->tmio_flags; | 250 | |
| 251 | mmc_data->ocr_mask = p->tmio_ocr_mask; | ||
| 252 | mmc_data->capabilities |= p->tmio_caps; | ||
| 253 | mmc_data->capabilities2 |= p->tmio_caps2; | ||
| 254 | mmc_data->cd_gpio = p->cd_gpio; | ||
| 255 | |||
| 256 | if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) { | ||
| 257 | /* | ||
| 258 | * Yes, we have to provide slave IDs twice to TMIO: | ||
| 259 | * once as a filter parameter and once for channel | ||
| 260 | * configuration as an explicit slave ID | ||
| 261 | */ | ||
| 262 | dma_priv->chan_priv_tx = (void *)p->dma_slave_tx; | ||
| 263 | dma_priv->chan_priv_rx = (void *)p->dma_slave_rx; | ||
| 264 | dma_priv->slave_id_tx = p->dma_slave_tx; | ||
| 265 | dma_priv->slave_id_rx = p->dma_slave_rx; | ||
| 266 | } | ||
| 267 | } | ||
| 268 | dma_priv->filter = shdma_chan_filter; | 251 | dma_priv->filter = shdma_chan_filter; |
| 269 | dma_priv->enable = sh_mobile_sdhi_enable_dma; | 252 | dma_priv->enable = sh_mobile_sdhi_enable_dma; |
| 270 | 253 | ||
| 271 | mmc_data->alignment_shift = 1; /* 2-byte alignment */ | 254 | mmc_data->alignment_shift = 1; /* 2-byte alignment */ |
| 255 | mmc_data->capabilities |= MMC_CAP_MMC_HIGHSPEED; | ||
| 272 | 256 | ||
| 273 | /* | 257 | /* |
| 274 | * All SDHI blocks support 2-byte and larger block sizes in 4-bit | 258 | * All SDHI blocks support 2-byte and larger block sizes in 4-bit |
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index fc3805ed69d1..4a597f5a53e2 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h | |||
| @@ -43,10 +43,6 @@ struct tmio_mmc_data; | |||
| 43 | struct tmio_mmc_host; | 43 | struct tmio_mmc_host; |
| 44 | 44 | ||
| 45 | struct tmio_mmc_dma { | 45 | struct tmio_mmc_dma { |
| 46 | void *chan_priv_tx; | ||
| 47 | void *chan_priv_rx; | ||
| 48 | int slave_id_tx; | ||
| 49 | int slave_id_rx; | ||
| 50 | enum dma_slave_buswidth dma_buswidth; | 46 | enum dma_slave_buswidth dma_buswidth; |
| 51 | bool (*filter)(struct dma_chan *chan, void *arg); | 47 | bool (*filter)(struct dma_chan *chan, void *arg); |
| 52 | void (*enable)(struct tmio_mmc_host *host, bool enable); | 48 | void (*enable)(struct tmio_mmc_host *host, bool enable); |
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c index 331bb618e398..e4b05dbb9ca8 100644 --- a/drivers/mmc/host/tmio_mmc_dma.c +++ b/drivers/mmc/host/tmio_mmc_dma.c | |||
| @@ -261,7 +261,7 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat | |||
| 261 | { | 261 | { |
| 262 | /* We can only either use DMA for both Tx and Rx or not use it at all */ | 262 | /* We can only either use DMA for both Tx and Rx or not use it at all */ |
| 263 | if (!host->dma || (!host->pdev->dev.of_node && | 263 | if (!host->dma || (!host->pdev->dev.of_node && |
| 264 | (!host->dma->chan_priv_tx || !host->dma->chan_priv_rx))) | 264 | (!pdata->chan_priv_tx || !pdata->chan_priv_rx))) |
| 265 | return; | 265 | return; |
| 266 | 266 | ||
| 267 | if (!host->chan_tx && !host->chan_rx) { | 267 | if (!host->chan_tx && !host->chan_rx) { |
| @@ -278,7 +278,7 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat | |||
| 278 | dma_cap_set(DMA_SLAVE, mask); | 278 | dma_cap_set(DMA_SLAVE, mask); |
| 279 | 279 | ||
| 280 | host->chan_tx = dma_request_slave_channel_compat(mask, | 280 | host->chan_tx = dma_request_slave_channel_compat(mask, |
| 281 | host->dma->filter, host->dma->chan_priv_tx, | 281 | host->dma->filter, pdata->chan_priv_tx, |
| 282 | &host->pdev->dev, "tx"); | 282 | &host->pdev->dev, "tx"); |
| 283 | dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, | 283 | dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, |
| 284 | host->chan_tx); | 284 | host->chan_tx); |
| @@ -286,8 +286,6 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat | |||
| 286 | if (!host->chan_tx) | 286 | if (!host->chan_tx) |
| 287 | return; | 287 | return; |
| 288 | 288 | ||
| 289 | if (host->dma->chan_priv_tx) | ||
| 290 | cfg.slave_id = host->dma->slave_id_tx; | ||
| 291 | cfg.direction = DMA_MEM_TO_DEV; | 289 | cfg.direction = DMA_MEM_TO_DEV; |
| 292 | cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->bus_shift); | 290 | cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->bus_shift); |
| 293 | cfg.dst_addr_width = host->dma->dma_buswidth; | 291 | cfg.dst_addr_width = host->dma->dma_buswidth; |
| @@ -299,7 +297,7 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat | |||
| 299 | goto ecfgtx; | 297 | goto ecfgtx; |
| 300 | 298 | ||
| 301 | host->chan_rx = dma_request_slave_channel_compat(mask, | 299 | host->chan_rx = dma_request_slave_channel_compat(mask, |
| 302 | host->dma->filter, host->dma->chan_priv_rx, | 300 | host->dma->filter, pdata->chan_priv_rx, |
| 303 | &host->pdev->dev, "rx"); | 301 | &host->pdev->dev, "rx"); |
| 304 | dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, | 302 | dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, |
| 305 | host->chan_rx); | 303 | host->chan_rx); |
| @@ -307,8 +305,6 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat | |||
| 307 | if (!host->chan_rx) | 305 | if (!host->chan_rx) |
| 308 | goto ereqrx; | 306 | goto ereqrx; |
| 309 | 307 | ||
| 310 | if (host->dma->chan_priv_rx) | ||
| 311 | cfg.slave_id = host->dma->slave_id_rx; | ||
| 312 | cfg.direction = DMA_DEV_TO_MEM; | 308 | cfg.direction = DMA_DEV_TO_MEM; |
| 313 | cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset; | 309 | cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset; |
| 314 | cfg.src_addr_width = host->dma->dma_buswidth; | 310 | cfg.src_addr_width = host->dma->dma_buswidth; |
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c index a21c378f096a..c3ce81c1a716 100644 --- a/drivers/mtd/nand/sh_flctl.c +++ b/drivers/mtd/nand/sh_flctl.c | |||
| @@ -159,7 +159,6 @@ static void flctl_setup_dma(struct sh_flctl *flctl) | |||
| 159 | return; | 159 | return; |
| 160 | 160 | ||
| 161 | memset(&cfg, 0, sizeof(cfg)); | 161 | memset(&cfg, 0, sizeof(cfg)); |
| 162 | cfg.slave_id = pdata->slave_id_fifo0_tx; | ||
| 163 | cfg.direction = DMA_MEM_TO_DEV; | 162 | cfg.direction = DMA_MEM_TO_DEV; |
| 164 | cfg.dst_addr = (dma_addr_t)FLDTFIFO(flctl); | 163 | cfg.dst_addr = (dma_addr_t)FLDTFIFO(flctl); |
| 165 | cfg.src_addr = 0; | 164 | cfg.src_addr = 0; |
| @@ -175,7 +174,6 @@ static void flctl_setup_dma(struct sh_flctl *flctl) | |||
| 175 | if (!flctl->chan_fifo0_rx) | 174 | if (!flctl->chan_fifo0_rx) |
| 176 | goto err; | 175 | goto err; |
| 177 | 176 | ||
| 178 | cfg.slave_id = pdata->slave_id_fifo0_rx; | ||
| 179 | cfg.direction = DMA_DEV_TO_MEM; | 177 | cfg.direction = DMA_DEV_TO_MEM; |
| 180 | cfg.dst_addr = 0; | 178 | cfg.dst_addr = 0; |
| 181 | cfg.src_addr = (dma_addr_t)FLDTFIFO(flctl); | 179 | cfg.src_addr = (dma_addr_t)FLDTFIFO(flctl); |
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 186924aa4740..f6bac9e77d06 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c | |||
| @@ -1023,7 +1023,6 @@ static struct dma_chan *rspi_request_dma_chan(struct device *dev, | |||
| 1023 | } | 1023 | } |
| 1024 | 1024 | ||
| 1025 | memset(&cfg, 0, sizeof(cfg)); | 1025 | memset(&cfg, 0, sizeof(cfg)); |
| 1026 | cfg.slave_id = id; | ||
| 1027 | cfg.direction = dir; | 1026 | cfg.direction = dir; |
| 1028 | if (dir == DMA_MEM_TO_DEV) { | 1027 | if (dir == DMA_MEM_TO_DEV) { |
| 1029 | cfg.dst_addr = port_addr; | 1028 | cfg.dst_addr = port_addr; |
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index e57eec0b2f46..bcc7c635d8e7 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c | |||
| @@ -1030,7 +1030,6 @@ static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev, | |||
| 1030 | } | 1030 | } |
| 1031 | 1031 | ||
| 1032 | memset(&cfg, 0, sizeof(cfg)); | 1032 | memset(&cfg, 0, sizeof(cfg)); |
| 1033 | cfg.slave_id = id; | ||
| 1034 | cfg.direction = dir; | 1033 | cfg.direction = dir; |
| 1035 | if (dir == DMA_MEM_TO_DEV) { | 1034 | if (dir == DMA_MEM_TO_DEV) { |
| 1036 | cfg.dst_addr = port_addr; | 1035 | cfg.dst_addr = port_addr; |
diff --git a/include/dt-bindings/dma/jz4780-dma.h b/include/dt-bindings/dma/jz4780-dma.h new file mode 100644 index 000000000000..df017fdfb44e --- /dev/null +++ b/include/dt-bindings/dma/jz4780-dma.h | |||
| @@ -0,0 +1,49 @@ | |||
| 1 | #ifndef __DT_BINDINGS_DMA_JZ4780_DMA_H__ | ||
| 2 | #define __DT_BINDINGS_DMA_JZ4780_DMA_H__ | ||
| 3 | |||
| 4 | /* | ||
| 5 | * Request type numbers for the JZ4780 DMA controller (written to the DRTn | ||
| 6 | * register for the channel). | ||
| 7 | */ | ||
| 8 | #define JZ4780_DMA_I2S1_TX 0x4 | ||
| 9 | #define JZ4780_DMA_I2S1_RX 0x5 | ||
| 10 | #define JZ4780_DMA_I2S0_TX 0x6 | ||
| 11 | #define JZ4780_DMA_I2S0_RX 0x7 | ||
| 12 | #define JZ4780_DMA_AUTO 0x8 | ||
| 13 | #define JZ4780_DMA_SADC_RX 0x9 | ||
| 14 | #define JZ4780_DMA_UART4_TX 0xc | ||
| 15 | #define JZ4780_DMA_UART4_RX 0xd | ||
| 16 | #define JZ4780_DMA_UART3_TX 0xe | ||
| 17 | #define JZ4780_DMA_UART3_RX 0xf | ||
| 18 | #define JZ4780_DMA_UART2_TX 0x10 | ||
| 19 | #define JZ4780_DMA_UART2_RX 0x11 | ||
| 20 | #define JZ4780_DMA_UART1_TX 0x12 | ||
| 21 | #define JZ4780_DMA_UART1_RX 0x13 | ||
| 22 | #define JZ4780_DMA_UART0_TX 0x14 | ||
| 23 | #define JZ4780_DMA_UART0_RX 0x15 | ||
| 24 | #define JZ4780_DMA_SSI0_TX 0x16 | ||
| 25 | #define JZ4780_DMA_SSI0_RX 0x17 | ||
| 26 | #define JZ4780_DMA_SSI1_TX 0x18 | ||
| 27 | #define JZ4780_DMA_SSI1_RX 0x19 | ||
| 28 | #define JZ4780_DMA_MSC0_TX 0x1a | ||
| 29 | #define JZ4780_DMA_MSC0_RX 0x1b | ||
| 30 | #define JZ4780_DMA_MSC1_TX 0x1c | ||
| 31 | #define JZ4780_DMA_MSC1_RX 0x1d | ||
| 32 | #define JZ4780_DMA_MSC2_TX 0x1e | ||
| 33 | #define JZ4780_DMA_MSC2_RX 0x1f | ||
| 34 | #define JZ4780_DMA_PCM0_TX 0x20 | ||
| 35 | #define JZ4780_DMA_PCM0_RX 0x21 | ||
| 36 | #define JZ4780_DMA_SMB0_TX 0x24 | ||
| 37 | #define JZ4780_DMA_SMB0_RX 0x25 | ||
| 38 | #define JZ4780_DMA_SMB1_TX 0x26 | ||
| 39 | #define JZ4780_DMA_SMB1_RX 0x27 | ||
| 40 | #define JZ4780_DMA_SMB2_TX 0x28 | ||
| 41 | #define JZ4780_DMA_SMB2_RX 0x29 | ||
| 42 | #define JZ4780_DMA_SMB3_TX 0x2a | ||
| 43 | #define JZ4780_DMA_SMB3_RX 0x2b | ||
| 44 | #define JZ4780_DMA_SMB4_TX 0x2c | ||
| 45 | #define JZ4780_DMA_SMB4_RX 0x2d | ||
| 46 | #define JZ4780_DMA_DES_TX 0x2e | ||
| 47 | #define JZ4780_DMA_DES_RX 0x2f | ||
| 48 | |||
| 49 | #endif /* __DT_BINDINGS_DMA_JZ4780_DMA_H__ */ | ||
diff --git a/include/linux/amba/xilinx_dma.h b/include/linux/dma/xilinx_dma.h index 34b98f276ed0..34b98f276ed0 100644 --- a/include/linux/amba/xilinx_dma.h +++ b/include/linux/dma/xilinx_dma.h | |||
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index b6997a0cb528..ad419757241f 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
| @@ -11,10 +11,6 @@ | |||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. | 12 | * more details. |
| 13 | * | 13 | * |
| 14 | * You should have received a copy of the GNU General Public License along with | ||
| 15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 17 | * | ||
| 18 | * The full GNU General Public License is included in this distribution in the | 14 | * The full GNU General Public License is included in this distribution in the |
| 19 | * file called COPYING. | 15 | * file called COPYING. |
| 20 | */ | 16 | */ |
| @@ -574,7 +570,6 @@ struct dma_tx_state { | |||
| 574 | * @copy_align: alignment shift for memcpy operations | 570 | * @copy_align: alignment shift for memcpy operations |
| 575 | * @xor_align: alignment shift for xor operations | 571 | * @xor_align: alignment shift for xor operations |
| 576 | * @pq_align: alignment shift for pq operations | 572 | * @pq_align: alignment shift for pq operations |
| 577 | * @fill_align: alignment shift for memset operations | ||
| 578 | * @dev_id: unique device ID | 573 | * @dev_id: unique device ID |
| 579 | * @dev: struct device reference for dma mapping api | 574 | * @dev: struct device reference for dma mapping api |
| 580 | * @src_addr_widths: bit mask of src addr widths the device supports | 575 | * @src_addr_widths: bit mask of src addr widths the device supports |
| @@ -625,7 +620,6 @@ struct dma_device { | |||
| 625 | u8 copy_align; | 620 | u8 copy_align; |
| 626 | u8 xor_align; | 621 | u8 xor_align; |
| 627 | u8 pq_align; | 622 | u8 pq_align; |
| 628 | u8 fill_align; | ||
| 629 | #define DMA_HAS_PQ_CONTINUE (1 << 15) | 623 | #define DMA_HAS_PQ_CONTINUE (1 << 15) |
| 630 | 624 | ||
| 631 | int dev_id; | 625 | int dev_id; |
| @@ -826,12 +820,6 @@ static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1, | |||
| 826 | return dmaengine_check_align(dev->pq_align, off1, off2, len); | 820 | return dmaengine_check_align(dev->pq_align, off1, off2, len); |
| 827 | } | 821 | } |
| 828 | 822 | ||
| 829 | static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1, | ||
| 830 | size_t off2, size_t len) | ||
| 831 | { | ||
| 832 | return dmaengine_check_align(dev->fill_align, off1, off2, len); | ||
| 833 | } | ||
| 834 | |||
| 835 | static inline void | 823 | static inline void |
| 836 | dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue) | 824 | dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue) |
| 837 | { | 825 | { |
| @@ -1098,7 +1086,6 @@ void dma_async_device_unregister(struct dma_device *device); | |||
| 1098 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx); | 1086 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx); |
| 1099 | struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); | 1087 | struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); |
| 1100 | struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); | 1088 | struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); |
| 1101 | struct dma_chan *net_dma_find_channel(void); | ||
| 1102 | #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) | 1089 | #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) |
| 1103 | #define dma_request_slave_channel_compat(mask, x, y, dev, name) \ | 1090 | #define dma_request_slave_channel_compat(mask, x, y, dev, name) \ |
| 1104 | __dma_request_slave_channel_compat(&(mask), x, y, dev, name) | 1091 | __dma_request_slave_channel_compat(&(mask), x, y, dev, name) |
| @@ -1116,27 +1103,4 @@ static inline struct dma_chan | |||
| 1116 | 1103 | ||
| 1117 | return __dma_request_channel(mask, fn, fn_param); | 1104 | return __dma_request_channel(mask, fn, fn_param); |
| 1118 | } | 1105 | } |
| 1119 | |||
| 1120 | /* --- Helper iov-locking functions --- */ | ||
| 1121 | |||
| 1122 | struct dma_page_list { | ||
| 1123 | char __user *base_address; | ||
| 1124 | int nr_pages; | ||
| 1125 | struct page **pages; | ||
| 1126 | }; | ||
| 1127 | |||
| 1128 | struct dma_pinned_list { | ||
| 1129 | int nr_iovecs; | ||
| 1130 | struct dma_page_list page_list[0]; | ||
| 1131 | }; | ||
| 1132 | |||
| 1133 | struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len); | ||
| 1134 | void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list); | ||
| 1135 | |||
| 1136 | dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov, | ||
| 1137 | struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len); | ||
| 1138 | dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, | ||
| 1139 | struct dma_pinned_list *pinned_list, struct page *page, | ||
| 1140 | unsigned int offset, size_t len); | ||
| 1141 | |||
| 1142 | #endif /* DMAENGINE_H */ | 1106 | #endif /* DMAENGINE_H */ |
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index 605812820e48..24b86d538e88 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h | |||
| @@ -111,6 +111,8 @@ struct dma_chan; | |||
| 111 | * data for the MMC controller | 111 | * data for the MMC controller |
| 112 | */ | 112 | */ |
| 113 | struct tmio_mmc_data { | 113 | struct tmio_mmc_data { |
| 114 | void *chan_priv_tx; | ||
| 115 | void *chan_priv_rx; | ||
| 114 | unsigned int hclk; | 116 | unsigned int hclk; |
| 115 | unsigned long capabilities; | 117 | unsigned long capabilities; |
| 116 | unsigned long capabilities2; | 118 | unsigned long capabilities2; |
diff --git a/include/linux/mmc/sh_mobile_sdhi.h b/include/linux/mmc/sh_mobile_sdhi.h index da77e5e2041d..95d6f0314a7d 100644 --- a/include/linux/mmc/sh_mobile_sdhi.h +++ b/include/linux/mmc/sh_mobile_sdhi.h | |||
| @@ -7,14 +7,4 @@ | |||
| 7 | #define SH_MOBILE_SDHI_IRQ_SDCARD "sdcard" | 7 | #define SH_MOBILE_SDHI_IRQ_SDCARD "sdcard" |
| 8 | #define SH_MOBILE_SDHI_IRQ_SDIO "sdio" | 8 | #define SH_MOBILE_SDHI_IRQ_SDIO "sdio" |
| 9 | 9 | ||
| 10 | struct sh_mobile_sdhi_info { | ||
| 11 | int dma_slave_tx; | ||
| 12 | int dma_slave_rx; | ||
| 13 | unsigned long tmio_flags; | ||
| 14 | unsigned long tmio_caps; | ||
| 15 | unsigned long tmio_caps2; | ||
| 16 | u32 tmio_ocr_mask; /* available MMC voltages */ | ||
| 17 | unsigned int cd_gpio; | ||
| 18 | }; | ||
| 19 | |||
| 20 | #endif /* LINUX_MMC_SH_MOBILE_SDHI_H */ | 10 | #endif /* LINUX_MMC_SH_MOBILE_SDHI_H */ |
diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h index eabac4e2fc99..2d08816720f6 100644 --- a/include/linux/platform_data/dma-imx-sdma.h +++ b/include/linux/platform_data/dma-imx-sdma.h | |||
| @@ -48,6 +48,9 @@ struct sdma_script_start_addrs { | |||
| 48 | s32 ssish_2_mcu_addr; | 48 | s32 ssish_2_mcu_addr; |
| 49 | s32 hdmi_dma_addr; | 49 | s32 hdmi_dma_addr; |
| 50 | /* End of v2 array */ | 50 | /* End of v2 array */ |
| 51 | s32 zcanfd_2_mcu_addr; | ||
| 52 | s32 zqspi_2_mcu_addr; | ||
| 53 | /* End of v3 array */ | ||
| 51 | }; | 54 | }; |
| 52 | 55 | ||
| 53 | /** | 56 | /** |
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h index abdf1f229dc3..dd0ba502ccb3 100644 --- a/include/linux/shdma-base.h +++ b/include/linux/shdma-base.h | |||
| @@ -69,6 +69,7 @@ struct shdma_chan { | |||
| 69 | int id; /* Raw id of this channel */ | 69 | int id; /* Raw id of this channel */ |
| 70 | int irq; /* Channel IRQ */ | 70 | int irq; /* Channel IRQ */ |
| 71 | int slave_id; /* Client ID for slave DMA */ | 71 | int slave_id; /* Client ID for slave DMA */ |
| 72 | int real_slave_id; /* argument passed to filter function */ | ||
| 72 | int hw_req; /* DMA request line for slave DMA - same | 73 | int hw_req; /* DMA request line for slave DMA - same |
| 73 | * as MID/RID, used with DT */ | 74 | * as MID/RID, used with DT */ |
| 74 | enum shdma_pm_state pm_state; | 75 | enum shdma_pm_state pm_state; |
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c index 0c2af21b0b82..142c066eaee2 100644 --- a/sound/soc/sh/fsi.c +++ b/sound/soc/sh/fsi.c | |||
| @@ -250,6 +250,7 @@ struct fsi_clk { | |||
| 250 | 250 | ||
| 251 | struct fsi_priv { | 251 | struct fsi_priv { |
| 252 | void __iomem *base; | 252 | void __iomem *base; |
| 253 | phys_addr_t phys; | ||
| 253 | struct fsi_master *master; | 254 | struct fsi_master *master; |
| 254 | 255 | ||
| 255 | struct fsi_stream playback; | 256 | struct fsi_stream playback; |
| @@ -1371,13 +1372,18 @@ static int fsi_dma_probe(struct fsi_priv *fsi, struct fsi_stream *io, struct dev | |||
| 1371 | shdma_chan_filter, (void *)io->dma_id, | 1372 | shdma_chan_filter, (void *)io->dma_id, |
| 1372 | dev, is_play ? "tx" : "rx"); | 1373 | dev, is_play ? "tx" : "rx"); |
| 1373 | if (io->chan) { | 1374 | if (io->chan) { |
| 1374 | struct dma_slave_config cfg; | 1375 | struct dma_slave_config cfg = {}; |
| 1375 | int ret; | 1376 | int ret; |
| 1376 | 1377 | ||
| 1377 | cfg.slave_id = io->dma_id; | 1378 | if (is_play) { |
| 1378 | cfg.dst_addr = 0; /* use default addr */ | 1379 | cfg.dst_addr = fsi->phys + REG_DODT; |
| 1379 | cfg.src_addr = 0; /* use default addr */ | 1380 | cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
| 1380 | cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; | 1381 | cfg.direction = DMA_MEM_TO_DEV; |
| 1382 | } else { | ||
| 1383 | cfg.src_addr = fsi->phys + REG_DIDT; | ||
| 1384 | cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
| 1385 | cfg.direction = DMA_DEV_TO_MEM; | ||
| 1386 | } | ||
| 1381 | 1387 | ||
| 1382 | ret = dmaengine_slave_config(io->chan, &cfg); | 1388 | ret = dmaengine_slave_config(io->chan, &cfg); |
| 1383 | if (ret < 0) { | 1389 | if (ret < 0) { |
| @@ -1974,6 +1980,7 @@ static int fsi_probe(struct platform_device *pdev) | |||
| 1974 | /* FSI A setting */ | 1980 | /* FSI A setting */ |
| 1975 | fsi = &master->fsia; | 1981 | fsi = &master->fsia; |
| 1976 | fsi->base = master->base; | 1982 | fsi->base = master->base; |
| 1983 | fsi->phys = res->start; | ||
| 1977 | fsi->master = master; | 1984 | fsi->master = master; |
| 1978 | fsi_port_info_init(fsi, &info.port_a); | 1985 | fsi_port_info_init(fsi, &info.port_a); |
| 1979 | fsi_handler_init(fsi, &info.port_a); | 1986 | fsi_handler_init(fsi, &info.port_a); |
| @@ -1986,6 +1993,7 @@ static int fsi_probe(struct platform_device *pdev) | |||
| 1986 | /* FSI B setting */ | 1993 | /* FSI B setting */ |
| 1987 | fsi = &master->fsib; | 1994 | fsi = &master->fsib; |
| 1988 | fsi->base = master->base + 0x40; | 1995 | fsi->base = master->base + 0x40; |
| 1996 | fsi->phys = res->start + 0x40; | ||
| 1989 | fsi->master = master; | 1997 | fsi->master = master; |
| 1990 | fsi_port_info_init(fsi, &info.port_b); | 1998 | fsi_port_info_init(fsi, &info.port_b); |
| 1991 | fsi_handler_init(fsi, &info.port_b); | 1999 | fsi_handler_init(fsi, &info.port_b); |
