diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-17 15:34:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-17 15:34:54 -0400 |
commit | b5b131c7473e17275debcdf1c226f452dc3876ed (patch) | |
tree | a272e947c38213d4ee989bb3f863a8091d50426b | |
parent | c7eec380e85a427983782df744f0fb745d867170 (diff) | |
parent | 896e041e8e8efb34520d033a693ef25391f9c9f0 (diff) |
Merge tag 'dmaengine-4.6-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul:
"This is smallish update with minor changes to core and new driver and
usual updates. Nothing super exciting here..
- We have made slave address as physical to enable driver to do the
mapping.
- We now expose the maxburst for slave dma as new capability so
clients can know this and program accordingly
- addition of device synchronize callbacks on omap and edma.
- pl330 updates to support DMAFLUSHP for Rockchip platforms.
- Updates and improved sg handling in Xilinx VDMA driver.
- New hidma qualcomm dma driver, though some bits are still in
progress"
* tag 'dmaengine-4.6-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (40 commits)
dmaengine: IOATDMA: revise channel reset workaround on CB3.3 platforms
dmaengine: add Qualcomm Technologies HIDMA channel driver
dmaengine: add Qualcomm Technologies HIDMA management driver
dmaengine: hidma: Add Device Tree binding
dmaengine: qcom_bam_dma: move to qcom directory
dmaengine: tegra: Move of_device_id table near to its user
dmaengine: xilinx_vdma: Remove unnecessary variable initializations
dmaengine: sirf: use __maybe_unused to hide pm functions
dmaengine: rcar-dmac: clear pertinence number of channels
dmaengine: sh: shdmac: don't open code of_device_get_match_data()
dmaengine: tegra: don't open code of_device_get_match_data()
dmaengine: qcom_bam_dma: Make driver work for BE
dmaengine: sun4i: support module autoloading
dma/mic_x100_dma: IS_ERR() vs PTR_ERR() typo
dmaengine: xilinx_vdma: Use readl_poll_timeout instead of do while loop's
dmaengine: xilinx_vdma: Simplify spin lock handling
dmaengine: xilinx_vdma: Fix issues with non-parking mode
dmaengine: xilinx_vdma: Improve SG engine handling
dmaengine: pl330: fix to support the burst mode
dmaengine: make slave address physical
...
37 files changed, 2146 insertions, 464 deletions
diff --git a/Documentation/ABI/testing/sysfs-platform-hidma-mgmt b/Documentation/ABI/testing/sysfs-platform-hidma-mgmt new file mode 100644 index 000000000000..c2fb5d033f0e --- /dev/null +++ b/Documentation/ABI/testing/sysfs-platform-hidma-mgmt | |||
@@ -0,0 +1,97 @@ | |||
1 | What: /sys/devices/platform/hidma-mgmt*/chanops/chan*/priority | ||
2 | /sys/devices/platform/QCOM8060:*/chanops/chan*/priority | ||
3 | Date: Nov 2015 | ||
4 | KernelVersion: 4.4 | ||
5 | Contact: "Sinan Kaya <okaya@cudeaurora.org>" | ||
6 | Description: | ||
7 | Contains either 0 or 1 and indicates if the DMA channel is a | ||
8 | low priority (0) or high priority (1) channel. | ||
9 | |||
10 | What: /sys/devices/platform/hidma-mgmt*/chanops/chan*/weight | ||
11 | /sys/devices/platform/QCOM8060:*/chanops/chan*/weight | ||
12 | Date: Nov 2015 | ||
13 | KernelVersion: 4.4 | ||
14 | Contact: "Sinan Kaya <okaya@cudeaurora.org>" | ||
15 | Description: | ||
16 | Contains 0..15 and indicates the weight of the channel among | ||
17 | equal priority channels during round robin scheduling. | ||
18 | |||
19 | What: /sys/devices/platform/hidma-mgmt*/chreset_timeout_cycles | ||
20 | /sys/devices/platform/QCOM8060:*/chreset_timeout_cycles | ||
21 | Date: Nov 2015 | ||
22 | KernelVersion: 4.4 | ||
23 | Contact: "Sinan Kaya <okaya@cudeaurora.org>" | ||
24 | Description: | ||
25 | Contains the platform specific cycle value to wait after a | ||
26 | reset command is issued. If the value is chosen too short, | ||
27 | then the HW will issue a reset failure interrupt. The value | ||
28 | is platform specific and should not be changed without | ||
29 | consultance. | ||
30 | |||
31 | What: /sys/devices/platform/hidma-mgmt*/dma_channels | ||
32 | /sys/devices/platform/QCOM8060:*/dma_channels | ||
33 | Date: Nov 2015 | ||
34 | KernelVersion: 4.4 | ||
35 | Contact: "Sinan Kaya <okaya@cudeaurora.org>" | ||
36 | Description: | ||
37 | Contains the number of dma channels supported by one instance | ||
38 | of HIDMA hardware. The value may change from chip to chip. | ||
39 | |||
40 | What: /sys/devices/platform/hidma-mgmt*/hw_version_major | ||
41 | /sys/devices/platform/QCOM8060:*/hw_version_major | ||
42 | Date: Nov 2015 | ||
43 | KernelVersion: 4.4 | ||
44 | Contact: "Sinan Kaya <okaya@cudeaurora.org>" | ||
45 | Description: | ||
46 | Version number major for the hardware. | ||
47 | |||
48 | What: /sys/devices/platform/hidma-mgmt*/hw_version_minor | ||
49 | /sys/devices/platform/QCOM8060:*/hw_version_minor | ||
50 | Date: Nov 2015 | ||
51 | KernelVersion: 4.4 | ||
52 | Contact: "Sinan Kaya <okaya@cudeaurora.org>" | ||
53 | Description: | ||
54 | Version number minor for the hardware. | ||
55 | |||
56 | What: /sys/devices/platform/hidma-mgmt*/max_rd_xactions | ||
57 | /sys/devices/platform/QCOM8060:*/max_rd_xactions | ||
58 | Date: Nov 2015 | ||
59 | KernelVersion: 4.4 | ||
60 | Contact: "Sinan Kaya <okaya@cudeaurora.org>" | ||
61 | Description: | ||
62 | Contains a value between 0 and 31. Maximum number of | ||
63 | read transactions that can be issued back to back. | ||
64 | Choosing a higher number gives better performance but | ||
65 | can also cause performance reduction to other peripherals | ||
66 | sharing the same bus. | ||
67 | |||
68 | What: /sys/devices/platform/hidma-mgmt*/max_read_request | ||
69 | /sys/devices/platform/QCOM8060:*/max_read_request | ||
70 | Date: Nov 2015 | ||
71 | KernelVersion: 4.4 | ||
72 | Contact: "Sinan Kaya <okaya@cudeaurora.org>" | ||
73 | Description: | ||
74 | Size of each read request. The value needs to be a power | ||
75 | of two and can be between 128 and 1024. | ||
76 | |||
77 | What: /sys/devices/platform/hidma-mgmt*/max_wr_xactions | ||
78 | /sys/devices/platform/QCOM8060:*/max_wr_xactions | ||
79 | Date: Nov 2015 | ||
80 | KernelVersion: 4.4 | ||
81 | Contact: "Sinan Kaya <okaya@cudeaurora.org>" | ||
82 | Description: | ||
83 | Contains a value between 0 and 31. Maximum number of | ||
84 | write transactions that can be issued back to back. | ||
85 | Choosing a higher number gives better performance but | ||
86 | can also cause performance reduction to other peripherals | ||
87 | sharing the same bus. | ||
88 | |||
89 | |||
90 | What: /sys/devices/platform/hidma-mgmt*/max_write_request | ||
91 | /sys/devices/platform/QCOM8060:*/max_write_request | ||
92 | Date: Nov 2015 | ||
93 | KernelVersion: 4.4 | ||
94 | Contact: "Sinan Kaya <okaya@cudeaurora.org>" | ||
95 | Description: | ||
96 | Size of each write request. The value needs to be a power | ||
97 | of two and can be between 128 and 1024. | ||
diff --git a/Documentation/devicetree/bindings/dma/arm-pl330.txt b/Documentation/devicetree/bindings/dma/arm-pl330.txt index 267565894db9..db7e2260f9c5 100644 --- a/Documentation/devicetree/bindings/dma/arm-pl330.txt +++ b/Documentation/devicetree/bindings/dma/arm-pl330.txt | |||
@@ -15,6 +15,7 @@ Optional properties: | |||
15 | cells in the dmas property of client device. | 15 | cells in the dmas property of client device. |
16 | - dma-channels: contains the total number of DMA channels supported by the DMAC | 16 | - dma-channels: contains the total number of DMA channels supported by the DMAC |
17 | - dma-requests: contains the total number of DMA requests supported by the DMAC | 17 | - dma-requests: contains the total number of DMA requests supported by the DMAC |
18 | - arm,pl330-broken-no-flushp: quirk for avoiding to execute DMAFLUSHP | ||
18 | 19 | ||
19 | Example: | 20 | Example: |
20 | 21 | ||
diff --git a/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt new file mode 100644 index 000000000000..fd5618bd8fbc --- /dev/null +++ b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt | |||
@@ -0,0 +1,89 @@ | |||
1 | Qualcomm Technologies HIDMA Management interface | ||
2 | |||
3 | Qualcomm Technologies HIDMA is a high speed DMA device. It only supports | ||
4 | memcpy and memset capabilities. It has been designed for virtualized | ||
5 | environments. | ||
6 | |||
7 | Each HIDMA HW instance consists of multiple DMA channels. These channels | ||
8 | share the same bandwidth. The bandwidth utilization can be parititioned | ||
9 | among channels based on the priority and weight assignments. | ||
10 | |||
11 | There are only two priority levels and 15 weigh assignments possible. | ||
12 | |||
13 | Other parameters here determine how much of the system bus this HIDMA | ||
14 | instance can use like maximum read/write request and and number of bytes to | ||
15 | read/write in a single burst. | ||
16 | |||
17 | Main node required properties: | ||
18 | - compatible: "qcom,hidma-mgmt-1.0"; | ||
19 | - reg: Address range for DMA device | ||
20 | - dma-channels: Number of channels supported by this DMA controller. | ||
21 | - max-write-burst-bytes: Maximum write burst in bytes that HIDMA can | ||
22 | occupy the bus for in a single transaction. A memcpy requested is | ||
23 | fragmented to multiples of this amount. This parameter is used while | ||
24 | writing into destination memory. Setting this value incorrectly can | ||
25 | starve other peripherals in the system. | ||
26 | - max-read-burst-bytes: Maximum read burst in bytes that HIDMA can | ||
27 | occupy the bus for in a single transaction. A memcpy request is | ||
28 | fragmented to multiples of this amount. This parameter is used while | ||
29 | reading the source memory. Setting this value incorrectly can starve | ||
30 | other peripherals in the system. | ||
31 | - max-write-transactions: This value is how many times a write burst is | ||
32 | applied back to back while writing to the destination before yielding | ||
33 | the bus. | ||
34 | - max-read-transactions: This value is how many times a read burst is | ||
35 | applied back to back while reading the source before yielding the bus. | ||
36 | - channel-reset-timeout-cycles: Channel reset timeout in cycles for this SOC. | ||
37 | Once a reset is applied to the HW, HW starts a timer for reset operation | ||
38 | to confirm. If reset is not completed within this time, HW reports reset | ||
39 | failure. | ||
40 | |||
41 | Sub-nodes: | ||
42 | |||
43 | HIDMA has one or more DMA channels that are used to move data from one | ||
44 | memory location to another. | ||
45 | |||
46 | When the OS is not in control of the management interface (i.e. it's a guest), | ||
47 | the channel nodes appear on their own, not under a management node. | ||
48 | |||
49 | Required properties: | ||
50 | - compatible: must contain "qcom,hidma-1.0" | ||
51 | - reg: Addresses for the transfer and event channel | ||
52 | - interrupts: Should contain the event interrupt | ||
53 | - desc-count: Number of asynchronous requests this channel can handle | ||
54 | - iommus: required a iommu node | ||
55 | |||
56 | Example: | ||
57 | |||
58 | Hypervisor OS configuration: | ||
59 | |||
60 | hidma-mgmt@f9984000 = { | ||
61 | compatible = "qcom,hidma-mgmt-1.0"; | ||
62 | reg = <0xf9984000 0x15000>; | ||
63 | dma-channels = <6>; | ||
64 | max-write-burst-bytes = <1024>; | ||
65 | max-read-burst-bytes = <1024>; | ||
66 | max-write-transactions = <31>; | ||
67 | max-read-transactions = <31>; | ||
68 | channel-reset-timeout-cycles = <0x500>; | ||
69 | |||
70 | hidma_24: dma-controller@0x5c050000 { | ||
71 | compatible = "qcom,hidma-1.0"; | ||
72 | reg = <0 0x5c050000 0x0 0x1000>, | ||
73 | <0 0x5c0b0000 0x0 0x1000>; | ||
74 | interrupts = <0 389 0>; | ||
75 | desc-count = <10>; | ||
76 | iommus = <&system_mmu>; | ||
77 | }; | ||
78 | }; | ||
79 | |||
80 | Guest OS configuration: | ||
81 | |||
82 | hidma_24: dma-controller@0x5c050000 { | ||
83 | compatible = "qcom,hidma-1.0"; | ||
84 | reg = <0 0x5c050000 0x0 0x1000>, | ||
85 | <0 0x5c0b0000 0x0 0x1000>; | ||
86 | interrupts = <0 389 0>; | ||
87 | desc-count = <10>; | ||
88 | iommus = <&system_mmu>; | ||
89 | }; | ||
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 79b1390f2016..d96d87c56f2e 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -341,12 +341,13 @@ config MV_XOR | |||
341 | 341 | ||
342 | config MXS_DMA | 342 | config MXS_DMA |
343 | bool "MXS DMA support" | 343 | bool "MXS DMA support" |
344 | depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q | 344 | depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q || SOC_IMX6UL |
345 | select STMP_DEVICE | 345 | select STMP_DEVICE |
346 | select DMA_ENGINE | 346 | select DMA_ENGINE |
347 | help | 347 | help |
348 | Support the MXS DMA engine. This engine including APBH-DMA | 348 | Support the MXS DMA engine. This engine including APBH-DMA |
349 | and APBX-DMA is integrated into Freescale i.MX23/28/MX6Q/MX6DL chips. | 349 | and APBX-DMA is integrated into Freescale |
350 | i.MX23/28/MX6Q/MX6DL/MX6UL chips. | ||
350 | 351 | ||
351 | config MX3_IPU | 352 | config MX3_IPU |
352 | bool "MX3x Image Processing Unit support" | 353 | bool "MX3x Image Processing Unit support" |
@@ -408,15 +409,6 @@ config PXA_DMA | |||
408 | 16 to 32 channels for peripheral to memory or memory to memory | 409 | 16 to 32 channels for peripheral to memory or memory to memory |
409 | transfers. | 410 | transfers. |
410 | 411 | ||
411 | config QCOM_BAM_DMA | ||
412 | tristate "QCOM BAM DMA support" | ||
413 | depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM) | ||
414 | select DMA_ENGINE | ||
415 | select DMA_VIRTUAL_CHANNELS | ||
416 | ---help--- | ||
417 | Enable support for the QCOM BAM DMA controller. This controller | ||
418 | provides DMA capabilities for a variety of on-chip devices. | ||
419 | |||
420 | config SIRF_DMA | 412 | config SIRF_DMA |
421 | tristate "CSR SiRFprimaII/SiRFmarco DMA support" | 413 | tristate "CSR SiRFprimaII/SiRFmarco DMA support" |
422 | depends on ARCH_SIRF | 414 | depends on ARCH_SIRF |
@@ -539,6 +531,8 @@ config ZX_DMA | |||
539 | # driver files | 531 | # driver files |
540 | source "drivers/dma/bestcomm/Kconfig" | 532 | source "drivers/dma/bestcomm/Kconfig" |
541 | 533 | ||
534 | source "drivers/dma/qcom/Kconfig" | ||
535 | |||
542 | source "drivers/dma/dw/Kconfig" | 536 | source "drivers/dma/dw/Kconfig" |
543 | 537 | ||
544 | source "drivers/dma/hsu/Kconfig" | 538 | source "drivers/dma/hsu/Kconfig" |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 2dd0a067a0ca..6084127c1486 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -52,7 +52,6 @@ obj-$(CONFIG_PCH_DMA) += pch_dma.o | |||
52 | obj-$(CONFIG_PL330_DMA) += pl330.o | 52 | obj-$(CONFIG_PL330_DMA) += pl330.o |
53 | obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ | 53 | obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ |
54 | obj-$(CONFIG_PXA_DMA) += pxa_dma.o | 54 | obj-$(CONFIG_PXA_DMA) += pxa_dma.o |
55 | obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o | ||
56 | obj-$(CONFIG_RENESAS_DMA) += sh/ | 55 | obj-$(CONFIG_RENESAS_DMA) += sh/ |
57 | obj-$(CONFIG_SIRF_DMA) += sirf-dma.o | 56 | obj-$(CONFIG_SIRF_DMA) += sirf-dma.o |
58 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o | 57 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o |
@@ -67,4 +66,5 @@ obj-$(CONFIG_TI_EDMA) += edma.o | |||
67 | obj-$(CONFIG_XGENE_DMA) += xgene-dma.o | 66 | obj-$(CONFIG_XGENE_DMA) += xgene-dma.o |
68 | obj-$(CONFIG_ZX_DMA) += zx296702_dma.o | 67 | obj-$(CONFIG_ZX_DMA) += zx296702_dma.o |
69 | 68 | ||
69 | obj-y += qcom/ | ||
70 | obj-y += xilinx/ | 70 | obj-y += xilinx/ |
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c index eed6bda01790..4a748c3435d7 100644 --- a/drivers/dma/acpi-dma.c +++ b/drivers/dma/acpi-dma.c | |||
@@ -438,7 +438,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev, | |||
438 | return ERR_PTR(-ENODEV); | 438 | return ERR_PTR(-ENODEV); |
439 | } | 439 | } |
440 | 440 | ||
441 | dev_dbg(dev, "found DMA channel \"%s\" at index %d\n", name, index); | 441 | dev_dbg(dev, "Looking for DMA channel \"%s\" at index %d...\n", name, index); |
442 | return acpi_dma_request_slave_chan_by_index(dev, index); | 442 | return acpi_dma_request_slave_chan_by_index(dev, index); |
443 | } | 443 | } |
444 | EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_name); | 444 | EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_name); |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index c50a247be2e0..0cb259c59916 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -496,6 +496,7 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) | |||
496 | caps->src_addr_widths = device->src_addr_widths; | 496 | caps->src_addr_widths = device->src_addr_widths; |
497 | caps->dst_addr_widths = device->dst_addr_widths; | 497 | caps->dst_addr_widths = device->dst_addr_widths; |
498 | caps->directions = device->directions; | 498 | caps->directions = device->directions; |
499 | caps->max_burst = device->max_burst; | ||
499 | caps->residue_granularity = device->residue_granularity; | 500 | caps->residue_granularity = device->residue_granularity; |
500 | caps->descriptor_reuse = device->descriptor_reuse; | 501 | caps->descriptor_reuse = device->descriptor_reuse; |
501 | 502 | ||
diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h index 241ff2b1402b..0a50c18d85b8 100644 --- a/drivers/dma/dw/regs.h +++ b/drivers/dma/dw/regs.h | |||
@@ -150,7 +150,7 @@ enum dw_dma_msize { | |||
150 | #define DWC_CTLL_DST_INC (0<<7) /* DAR update/not */ | 150 | #define DWC_CTLL_DST_INC (0<<7) /* DAR update/not */ |
151 | #define DWC_CTLL_DST_DEC (1<<7) | 151 | #define DWC_CTLL_DST_DEC (1<<7) |
152 | #define DWC_CTLL_DST_FIX (2<<7) | 152 | #define DWC_CTLL_DST_FIX (2<<7) |
153 | #define DWC_CTLL_SRC_INC (0<<7) /* SAR update/not */ | 153 | #define DWC_CTLL_SRC_INC (0<<9) /* SAR update/not */ |
154 | #define DWC_CTLL_SRC_DEC (1<<9) | 154 | #define DWC_CTLL_SRC_DEC (1<<9) |
155 | #define DWC_CTLL_SRC_FIX (2<<9) | 155 | #define DWC_CTLL_SRC_FIX (2<<9) |
156 | #define DWC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */ | 156 | #define DWC_CTLL_DST_MSIZE(n) ((n)<<11) /* burst, #elements */ |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index e3d7fcb69b4c..ee3463e774f8 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -869,6 +869,13 @@ static int edma_terminate_all(struct dma_chan *chan) | |||
869 | return 0; | 869 | return 0; |
870 | } | 870 | } |
871 | 871 | ||
872 | static void edma_synchronize(struct dma_chan *chan) | ||
873 | { | ||
874 | struct edma_chan *echan = to_edma_chan(chan); | ||
875 | |||
876 | vchan_synchronize(&echan->vchan); | ||
877 | } | ||
878 | |||
872 | static int edma_slave_config(struct dma_chan *chan, | 879 | static int edma_slave_config(struct dma_chan *chan, |
873 | struct dma_slave_config *cfg) | 880 | struct dma_slave_config *cfg) |
874 | { | 881 | { |
@@ -1365,36 +1372,36 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( | |||
1365 | static void edma_completion_handler(struct edma_chan *echan) | 1372 | static void edma_completion_handler(struct edma_chan *echan) |
1366 | { | 1373 | { |
1367 | struct device *dev = echan->vchan.chan.device->dev; | 1374 | struct device *dev = echan->vchan.chan.device->dev; |
1368 | struct edma_desc *edesc = echan->edesc; | 1375 | struct edma_desc *edesc; |
1369 | |||
1370 | if (!edesc) | ||
1371 | return; | ||
1372 | 1376 | ||
1373 | spin_lock(&echan->vchan.lock); | 1377 | spin_lock(&echan->vchan.lock); |
1374 | if (edesc->cyclic) { | 1378 | edesc = echan->edesc; |
1375 | vchan_cyclic_callback(&edesc->vdesc); | 1379 | if (edesc) { |
1376 | spin_unlock(&echan->vchan.lock); | 1380 | if (edesc->cyclic) { |
1377 | return; | 1381 | vchan_cyclic_callback(&edesc->vdesc); |
1378 | } else if (edesc->processed == edesc->pset_nr) { | 1382 | spin_unlock(&echan->vchan.lock); |
1379 | edesc->residue = 0; | 1383 | return; |
1380 | edma_stop(echan); | 1384 | } else if (edesc->processed == edesc->pset_nr) { |
1381 | vchan_cookie_complete(&edesc->vdesc); | 1385 | edesc->residue = 0; |
1382 | echan->edesc = NULL; | 1386 | edma_stop(echan); |
1383 | 1387 | vchan_cookie_complete(&edesc->vdesc); | |
1384 | dev_dbg(dev, "Transfer completed on channel %d\n", | 1388 | echan->edesc = NULL; |
1385 | echan->ch_num); | 1389 | |
1386 | } else { | 1390 | dev_dbg(dev, "Transfer completed on channel %d\n", |
1387 | dev_dbg(dev, "Sub transfer completed on channel %d\n", | 1391 | echan->ch_num); |
1388 | echan->ch_num); | 1392 | } else { |
1389 | 1393 | dev_dbg(dev, "Sub transfer completed on channel %d\n", | |
1390 | edma_pause(echan); | 1394 | echan->ch_num); |
1391 | 1395 | ||
1392 | /* Update statistics for tx_status */ | 1396 | edma_pause(echan); |
1393 | edesc->residue -= edesc->sg_len; | 1397 | |
1394 | edesc->residue_stat = edesc->residue; | 1398 | /* Update statistics for tx_status */ |
1395 | edesc->processed_stat = edesc->processed; | 1399 | edesc->residue -= edesc->sg_len; |
1400 | edesc->residue_stat = edesc->residue; | ||
1401 | edesc->processed_stat = edesc->processed; | ||
1402 | } | ||
1403 | edma_execute(echan); | ||
1396 | } | 1404 | } |
1397 | edma_execute(echan); | ||
1398 | 1405 | ||
1399 | spin_unlock(&echan->vchan.lock); | 1406 | spin_unlock(&echan->vchan.lock); |
1400 | } | 1407 | } |
@@ -1837,6 +1844,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode) | |||
1837 | s_ddev->device_pause = edma_dma_pause; | 1844 | s_ddev->device_pause = edma_dma_pause; |
1838 | s_ddev->device_resume = edma_dma_resume; | 1845 | s_ddev->device_resume = edma_dma_resume; |
1839 | s_ddev->device_terminate_all = edma_terminate_all; | 1846 | s_ddev->device_terminate_all = edma_terminate_all; |
1847 | s_ddev->device_synchronize = edma_synchronize; | ||
1840 | 1848 | ||
1841 | s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; | 1849 | s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; |
1842 | s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; | 1850 | s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; |
@@ -1862,6 +1870,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode) | |||
1862 | m_ddev->device_pause = edma_dma_pause; | 1870 | m_ddev->device_pause = edma_dma_pause; |
1863 | m_ddev->device_resume = edma_dma_resume; | 1871 | m_ddev->device_resume = edma_dma_resume; |
1864 | m_ddev->device_terminate_all = edma_terminate_all; | 1872 | m_ddev->device_terminate_all = edma_terminate_all; |
1873 | m_ddev->device_synchronize = edma_synchronize; | ||
1865 | 1874 | ||
1866 | m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; | 1875 | m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; |
1867 | m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; | 1876 | m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; |
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 57ff46284f15..21f08cc3352b 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c | |||
@@ -421,23 +421,25 @@ static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac) | |||
421 | desc->size); | 421 | desc->size); |
422 | } | 422 | } |
423 | 423 | ||
424 | switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) { | 424 | /* |
425 | case M2P_INTERRUPT_STALL: | 425 | * Even latest E2 silicon revision sometimes assert STALL interrupt |
426 | /* Disable interrupts */ | 426 | * instead of NFB. Therefore we treat them equally, basing on the |
427 | control = readl(edmac->regs + M2P_CONTROL); | 427 | * amount of data we still have to transfer. |
428 | control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); | 428 | */ |
429 | m2p_set_control(edmac, control); | 429 | if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB))) |
430 | 430 | return INTERRUPT_UNKNOWN; | |
431 | return INTERRUPT_DONE; | ||
432 | |||
433 | case M2P_INTERRUPT_NFB: | ||
434 | if (ep93xx_dma_advance_active(edmac)) | ||
435 | m2p_fill_desc(edmac); | ||
436 | 431 | ||
432 | if (ep93xx_dma_advance_active(edmac)) { | ||
433 | m2p_fill_desc(edmac); | ||
437 | return INTERRUPT_NEXT_BUFFER; | 434 | return INTERRUPT_NEXT_BUFFER; |
438 | } | 435 | } |
439 | 436 | ||
440 | return INTERRUPT_UNKNOWN; | 437 | /* Disable interrupts */ |
438 | control = readl(edmac->regs + M2P_CONTROL); | ||
439 | control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); | ||
440 | m2p_set_control(edmac, control); | ||
441 | |||
442 | return INTERRUPT_DONE; | ||
441 | } | 443 | } |
442 | 444 | ||
443 | /* | 445 | /* |
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index 3cb7b2c78197..1953e57505f4 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c | |||
@@ -289,6 +289,9 @@ static void idma64_desc_fill(struct idma64_chan *idma64c, | |||
289 | 289 | ||
290 | /* Trigger an interrupt after the last block is transfered */ | 290 | /* Trigger an interrupt after the last block is transfered */ |
291 | lli->ctllo |= IDMA64C_CTLL_INT_EN; | 291 | lli->ctllo |= IDMA64C_CTLL_INT_EN; |
292 | |||
293 | /* Disable LLP transfer in the last block */ | ||
294 | lli->ctllo &= ~(IDMA64C_CTLL_LLP_S_EN | IDMA64C_CTLL_LLP_D_EN); | ||
292 | } | 295 | } |
293 | 296 | ||
294 | static struct dma_async_tx_descriptor *idma64_prep_slave_sg( | 297 | static struct dma_async_tx_descriptor *idma64_prep_slave_sg( |
diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h index 8423f13ed0da..dc6874424188 100644 --- a/drivers/dma/idma64.h +++ b/drivers/dma/idma64.h | |||
@@ -71,7 +71,7 @@ | |||
71 | #define IDMA64C_CFGH_SRC_PER(x) ((x) << 0) /* src peripheral */ | 71 | #define IDMA64C_CFGH_SRC_PER(x) ((x) << 0) /* src peripheral */ |
72 | #define IDMA64C_CFGH_DST_PER(x) ((x) << 4) /* dst peripheral */ | 72 | #define IDMA64C_CFGH_DST_PER(x) ((x) << 4) /* dst peripheral */ |
73 | #define IDMA64C_CFGH_RD_ISSUE_THD(x) ((x) << 8) | 73 | #define IDMA64C_CFGH_RD_ISSUE_THD(x) ((x) << 8) |
74 | #define IDMA64C_CFGH_RW_ISSUE_THD(x) ((x) << 18) | 74 | #define IDMA64C_CFGH_WR_ISSUE_THD(x) ((x) << 18) |
75 | 75 | ||
76 | /* Interrupt registers */ | 76 | /* Interrupt registers */ |
77 | 77 | ||
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 21539d5c54c3..bd09961443b1 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/dma-mapping.h> | 31 | #include <linux/dma-mapping.h> |
32 | #include <linux/workqueue.h> | 32 | #include <linux/workqueue.h> |
33 | #include <linux/prefetch.h> | 33 | #include <linux/prefetch.h> |
34 | #include <linux/sizes.h> | ||
34 | #include "dma.h" | 35 | #include "dma.h" |
35 | #include "registers.h" | 36 | #include "registers.h" |
36 | #include "hw.h" | 37 | #include "hw.h" |
@@ -290,24 +291,30 @@ static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx) | |||
290 | } | 291 | } |
291 | 292 | ||
292 | static struct ioat_ring_ent * | 293 | static struct ioat_ring_ent * |
293 | ioat_alloc_ring_ent(struct dma_chan *chan, gfp_t flags) | 294 | ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags) |
294 | { | 295 | { |
295 | struct ioat_dma_descriptor *hw; | 296 | struct ioat_dma_descriptor *hw; |
296 | struct ioat_ring_ent *desc; | 297 | struct ioat_ring_ent *desc; |
297 | struct ioatdma_device *ioat_dma; | 298 | struct ioatdma_device *ioat_dma; |
299 | struct ioatdma_chan *ioat_chan = to_ioat_chan(chan); | ||
300 | int chunk; | ||
298 | dma_addr_t phys; | 301 | dma_addr_t phys; |
302 | u8 *pos; | ||
303 | off_t offs; | ||
299 | 304 | ||
300 | ioat_dma = to_ioatdma_device(chan->device); | 305 | ioat_dma = to_ioatdma_device(chan->device); |
301 | hw = pci_pool_alloc(ioat_dma->dma_pool, flags, &phys); | 306 | |
302 | if (!hw) | 307 | chunk = idx / IOAT_DESCS_PER_2M; |
303 | return NULL; | 308 | idx &= (IOAT_DESCS_PER_2M - 1); |
309 | offs = idx * IOAT_DESC_SZ; | ||
310 | pos = (u8 *)ioat_chan->descs[chunk].virt + offs; | ||
311 | phys = ioat_chan->descs[chunk].hw + offs; | ||
312 | hw = (struct ioat_dma_descriptor *)pos; | ||
304 | memset(hw, 0, sizeof(*hw)); | 313 | memset(hw, 0, sizeof(*hw)); |
305 | 314 | ||
306 | desc = kmem_cache_zalloc(ioat_cache, flags); | 315 | desc = kmem_cache_zalloc(ioat_cache, flags); |
307 | if (!desc) { | 316 | if (!desc) |
308 | pci_pool_free(ioat_dma->dma_pool, hw, phys); | ||
309 | return NULL; | 317 | return NULL; |
310 | } | ||
311 | 318 | ||
312 | dma_async_tx_descriptor_init(&desc->txd, chan); | 319 | dma_async_tx_descriptor_init(&desc->txd, chan); |
313 | desc->txd.tx_submit = ioat_tx_submit_unlock; | 320 | desc->txd.tx_submit = ioat_tx_submit_unlock; |
@@ -318,32 +325,63 @@ ioat_alloc_ring_ent(struct dma_chan *chan, gfp_t flags) | |||
318 | 325 | ||
319 | void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan) | 326 | void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan) |
320 | { | 327 | { |
321 | struct ioatdma_device *ioat_dma; | ||
322 | |||
323 | ioat_dma = to_ioatdma_device(chan->device); | ||
324 | pci_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys); | ||
325 | kmem_cache_free(ioat_cache, desc); | 328 | kmem_cache_free(ioat_cache, desc); |
326 | } | 329 | } |
327 | 330 | ||
328 | struct ioat_ring_ent ** | 331 | struct ioat_ring_ent ** |
329 | ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) | 332 | ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) |
330 | { | 333 | { |
334 | struct ioatdma_chan *ioat_chan = to_ioat_chan(c); | ||
331 | struct ioat_ring_ent **ring; | 335 | struct ioat_ring_ent **ring; |
332 | int descs = 1 << order; | 336 | int total_descs = 1 << order; |
333 | int i; | 337 | int i, chunks; |
334 | |||
335 | if (order > ioat_get_max_alloc_order()) | ||
336 | return NULL; | ||
337 | 338 | ||
338 | /* allocate the array to hold the software ring */ | 339 | /* allocate the array to hold the software ring */ |
339 | ring = kcalloc(descs, sizeof(*ring), flags); | 340 | ring = kcalloc(total_descs, sizeof(*ring), flags); |
340 | if (!ring) | 341 | if (!ring) |
341 | return NULL; | 342 | return NULL; |
342 | for (i = 0; i < descs; i++) { | 343 | |
343 | ring[i] = ioat_alloc_ring_ent(c, flags); | 344 | ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M; |
345 | |||
346 | for (i = 0; i < chunks; i++) { | ||
347 | struct ioat_descs *descs = &ioat_chan->descs[i]; | ||
348 | |||
349 | descs->virt = dma_alloc_coherent(to_dev(ioat_chan), | ||
350 | SZ_2M, &descs->hw, flags); | ||
351 | if (!descs->virt && (i > 0)) { | ||
352 | int idx; | ||
353 | |||
354 | for (idx = 0; idx < i; idx++) { | ||
355 | dma_free_coherent(to_dev(ioat_chan), SZ_2M, | ||
356 | descs->virt, descs->hw); | ||
357 | descs->virt = NULL; | ||
358 | descs->hw = 0; | ||
359 | } | ||
360 | |||
361 | ioat_chan->desc_chunks = 0; | ||
362 | kfree(ring); | ||
363 | return NULL; | ||
364 | } | ||
365 | } | ||
366 | |||
367 | for (i = 0; i < total_descs; i++) { | ||
368 | ring[i] = ioat_alloc_ring_ent(c, i, flags); | ||
344 | if (!ring[i]) { | 369 | if (!ring[i]) { |
370 | int idx; | ||
371 | |||
345 | while (i--) | 372 | while (i--) |
346 | ioat_free_ring_ent(ring[i], c); | 373 | ioat_free_ring_ent(ring[i], c); |
374 | |||
375 | for (idx = 0; idx < ioat_chan->desc_chunks; idx++) { | ||
376 | dma_free_coherent(to_dev(ioat_chan), | ||
377 | SZ_2M, | ||
378 | ioat_chan->descs[idx].virt, | ||
379 | ioat_chan->descs[idx].hw); | ||
380 | ioat_chan->descs[idx].virt = NULL; | ||
381 | ioat_chan->descs[idx].hw = 0; | ||
382 | } | ||
383 | |||
384 | ioat_chan->desc_chunks = 0; | ||
347 | kfree(ring); | 385 | kfree(ring); |
348 | return NULL; | 386 | return NULL; |
349 | } | 387 | } |
@@ -351,7 +389,7 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) | |||
351 | } | 389 | } |
352 | 390 | ||
353 | /* link descs */ | 391 | /* link descs */ |
354 | for (i = 0; i < descs-1; i++) { | 392 | for (i = 0; i < total_descs-1; i++) { |
355 | struct ioat_ring_ent *next = ring[i+1]; | 393 | struct ioat_ring_ent *next = ring[i+1]; |
356 | struct ioat_dma_descriptor *hw = ring[i]->hw; | 394 | struct ioat_dma_descriptor *hw = ring[i]->hw; |
357 | 395 | ||
@@ -362,114 +400,6 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) | |||
362 | return ring; | 400 | return ring; |
363 | } | 401 | } |
364 | 402 | ||
365 | static bool reshape_ring(struct ioatdma_chan *ioat_chan, int order) | ||
366 | { | ||
367 | /* reshape differs from normal ring allocation in that we want | ||
368 | * to allocate a new software ring while only | ||
369 | * extending/truncating the hardware ring | ||
370 | */ | ||
371 | struct dma_chan *c = &ioat_chan->dma_chan; | ||
372 | const u32 curr_size = ioat_ring_size(ioat_chan); | ||
373 | const u16 active = ioat_ring_active(ioat_chan); | ||
374 | const u32 new_size = 1 << order; | ||
375 | struct ioat_ring_ent **ring; | ||
376 | u32 i; | ||
377 | |||
378 | if (order > ioat_get_max_alloc_order()) | ||
379 | return false; | ||
380 | |||
381 | /* double check that we have at least 1 free descriptor */ | ||
382 | if (active == curr_size) | ||
383 | return false; | ||
384 | |||
385 | /* when shrinking, verify that we can hold the current active | ||
386 | * set in the new ring | ||
387 | */ | ||
388 | if (active >= new_size) | ||
389 | return false; | ||
390 | |||
391 | /* allocate the array to hold the software ring */ | ||
392 | ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT); | ||
393 | if (!ring) | ||
394 | return false; | ||
395 | |||
396 | /* allocate/trim descriptors as needed */ | ||
397 | if (new_size > curr_size) { | ||
398 | /* copy current descriptors to the new ring */ | ||
399 | for (i = 0; i < curr_size; i++) { | ||
400 | u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1); | ||
401 | u16 new_idx = (ioat_chan->tail+i) & (new_size-1); | ||
402 | |||
403 | ring[new_idx] = ioat_chan->ring[curr_idx]; | ||
404 | set_desc_id(ring[new_idx], new_idx); | ||
405 | } | ||
406 | |||
407 | /* add new descriptors to the ring */ | ||
408 | for (i = curr_size; i < new_size; i++) { | ||
409 | u16 new_idx = (ioat_chan->tail+i) & (new_size-1); | ||
410 | |||
411 | ring[new_idx] = ioat_alloc_ring_ent(c, GFP_NOWAIT); | ||
412 | if (!ring[new_idx]) { | ||
413 | while (i--) { | ||
414 | u16 new_idx = (ioat_chan->tail+i) & | ||
415 | (new_size-1); | ||
416 | |||
417 | ioat_free_ring_ent(ring[new_idx], c); | ||
418 | } | ||
419 | kfree(ring); | ||
420 | return false; | ||
421 | } | ||
422 | set_desc_id(ring[new_idx], new_idx); | ||
423 | } | ||
424 | |||
425 | /* hw link new descriptors */ | ||
426 | for (i = curr_size-1; i < new_size; i++) { | ||
427 | u16 new_idx = (ioat_chan->tail+i) & (new_size-1); | ||
428 | struct ioat_ring_ent *next = | ||
429 | ring[(new_idx+1) & (new_size-1)]; | ||
430 | struct ioat_dma_descriptor *hw = ring[new_idx]->hw; | ||
431 | |||
432 | hw->next = next->txd.phys; | ||
433 | } | ||
434 | } else { | ||
435 | struct ioat_dma_descriptor *hw; | ||
436 | struct ioat_ring_ent *next; | ||
437 | |||
438 | /* copy current descriptors to the new ring, dropping the | ||
439 | * removed descriptors | ||
440 | */ | ||
441 | for (i = 0; i < new_size; i++) { | ||
442 | u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1); | ||
443 | u16 new_idx = (ioat_chan->tail+i) & (new_size-1); | ||
444 | |||
445 | ring[new_idx] = ioat_chan->ring[curr_idx]; | ||
446 | set_desc_id(ring[new_idx], new_idx); | ||
447 | } | ||
448 | |||
449 | /* free deleted descriptors */ | ||
450 | for (i = new_size; i < curr_size; i++) { | ||
451 | struct ioat_ring_ent *ent; | ||
452 | |||
453 | ent = ioat_get_ring_ent(ioat_chan, ioat_chan->tail+i); | ||
454 | ioat_free_ring_ent(ent, c); | ||
455 | } | ||
456 | |||
457 | /* fix up hardware ring */ | ||
458 | hw = ring[(ioat_chan->tail+new_size-1) & (new_size-1)]->hw; | ||
459 | next = ring[(ioat_chan->tail+new_size) & (new_size-1)]; | ||
460 | hw->next = next->txd.phys; | ||
461 | } | ||
462 | |||
463 | dev_dbg(to_dev(ioat_chan), "%s: allocated %d descriptors\n", | ||
464 | __func__, new_size); | ||
465 | |||
466 | kfree(ioat_chan->ring); | ||
467 | ioat_chan->ring = ring; | ||
468 | ioat_chan->alloc_order = order; | ||
469 | |||
470 | return true; | ||
471 | } | ||
472 | |||
473 | /** | 403 | /** |
474 | * ioat_check_space_lock - verify space and grab ring producer lock | 404 | * ioat_check_space_lock - verify space and grab ring producer lock |
475 | * @ioat: ioat,3 channel (ring) to operate on | 405 | * @ioat: ioat,3 channel (ring) to operate on |
@@ -478,9 +408,6 @@ static bool reshape_ring(struct ioatdma_chan *ioat_chan, int order) | |||
478 | int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs) | 408 | int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs) |
479 | __acquires(&ioat_chan->prep_lock) | 409 | __acquires(&ioat_chan->prep_lock) |
480 | { | 410 | { |
481 | bool retry; | ||
482 | |||
483 | retry: | ||
484 | spin_lock_bh(&ioat_chan->prep_lock); | 411 | spin_lock_bh(&ioat_chan->prep_lock); |
485 | /* never allow the last descriptor to be consumed, we need at | 412 | /* never allow the last descriptor to be consumed, we need at |
486 | * least one free at all times to allow for on-the-fly ring | 413 | * least one free at all times to allow for on-the-fly ring |
@@ -493,24 +420,8 @@ int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs) | |||
493 | ioat_chan->produce = num_descs; | 420 | ioat_chan->produce = num_descs; |
494 | return 0; /* with ioat->prep_lock held */ | 421 | return 0; /* with ioat->prep_lock held */ |
495 | } | 422 | } |
496 | retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state); | ||
497 | spin_unlock_bh(&ioat_chan->prep_lock); | 423 | spin_unlock_bh(&ioat_chan->prep_lock); |
498 | 424 | ||
499 | /* is another cpu already trying to expand the ring? */ | ||
500 | if (retry) | ||
501 | goto retry; | ||
502 | |||
503 | spin_lock_bh(&ioat_chan->cleanup_lock); | ||
504 | spin_lock_bh(&ioat_chan->prep_lock); | ||
505 | retry = reshape_ring(ioat_chan, ioat_chan->alloc_order + 1); | ||
506 | clear_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state); | ||
507 | spin_unlock_bh(&ioat_chan->prep_lock); | ||
508 | spin_unlock_bh(&ioat_chan->cleanup_lock); | ||
509 | |||
510 | /* if we were able to expand the ring retry the allocation */ | ||
511 | if (retry) | ||
512 | goto retry; | ||
513 | |||
514 | dev_dbg_ratelimited(to_dev(ioat_chan), | 425 | dev_dbg_ratelimited(to_dev(ioat_chan), |
515 | "%s: ring full! num_descs: %d (%x:%x:%x)\n", | 426 | "%s: ring full! num_descs: %d (%x:%x:%x)\n", |
516 | __func__, num_descs, ioat_chan->head, | 427 | __func__, num_descs, ioat_chan->head, |
@@ -823,19 +734,6 @@ static void check_active(struct ioatdma_chan *ioat_chan) | |||
823 | 734 | ||
824 | if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state)) | 735 | if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state)) |
825 | mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); | 736 | mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); |
826 | else if (ioat_chan->alloc_order > ioat_get_alloc_order()) { | ||
827 | /* if the ring is idle, empty, and oversized try to step | ||
828 | * down the size | ||
829 | */ | ||
830 | reshape_ring(ioat_chan, ioat_chan->alloc_order - 1); | ||
831 | |||
832 | /* keep shrinking until we get back to our minimum | ||
833 | * default size | ||
834 | */ | ||
835 | if (ioat_chan->alloc_order > ioat_get_alloc_order()) | ||
836 | mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT); | ||
837 | } | ||
838 | |||
839 | } | 737 | } |
840 | 738 | ||
841 | void ioat_timer_event(unsigned long data) | 739 | void ioat_timer_event(unsigned long data) |
@@ -916,40 +814,6 @@ ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, | |||
916 | return dma_cookie_status(c, cookie, txstate); | 814 | return dma_cookie_status(c, cookie, txstate); |
917 | } | 815 | } |
918 | 816 | ||
919 | static int ioat_irq_reinit(struct ioatdma_device *ioat_dma) | ||
920 | { | ||
921 | struct pci_dev *pdev = ioat_dma->pdev; | ||
922 | int irq = pdev->irq, i; | ||
923 | |||
924 | if (!is_bwd_ioat(pdev)) | ||
925 | return 0; | ||
926 | |||
927 | switch (ioat_dma->irq_mode) { | ||
928 | case IOAT_MSIX: | ||
929 | for (i = 0; i < ioat_dma->dma_dev.chancnt; i++) { | ||
930 | struct msix_entry *msix = &ioat_dma->msix_entries[i]; | ||
931 | struct ioatdma_chan *ioat_chan; | ||
932 | |||
933 | ioat_chan = ioat_chan_by_index(ioat_dma, i); | ||
934 | devm_free_irq(&pdev->dev, msix->vector, ioat_chan); | ||
935 | } | ||
936 | |||
937 | pci_disable_msix(pdev); | ||
938 | break; | ||
939 | case IOAT_MSI: | ||
940 | pci_disable_msi(pdev); | ||
941 | /* fall through */ | ||
942 | case IOAT_INTX: | ||
943 | devm_free_irq(&pdev->dev, irq, ioat_dma); | ||
944 | break; | ||
945 | default: | ||
946 | return 0; | ||
947 | } | ||
948 | ioat_dma->irq_mode = IOAT_NOIRQ; | ||
949 | |||
950 | return ioat_dma_setup_interrupts(ioat_dma); | ||
951 | } | ||
952 | |||
953 | int ioat_reset_hw(struct ioatdma_chan *ioat_chan) | 817 | int ioat_reset_hw(struct ioatdma_chan *ioat_chan) |
954 | { | 818 | { |
955 | /* throw away whatever the channel was doing and get it | 819 | /* throw away whatever the channel was doing and get it |
@@ -989,9 +853,21 @@ int ioat_reset_hw(struct ioatdma_chan *ioat_chan) | |||
989 | } | 853 | } |
990 | } | 854 | } |
991 | 855 | ||
856 | if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) { | ||
857 | ioat_dma->msixtba0 = readq(ioat_dma->reg_base + 0x1000); | ||
858 | ioat_dma->msixdata0 = readq(ioat_dma->reg_base + 0x1008); | ||
859 | ioat_dma->msixpba = readq(ioat_dma->reg_base + 0x1800); | ||
860 | } | ||
861 | |||
862 | |||
992 | err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200)); | 863 | err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200)); |
993 | if (!err) | 864 | if (!err) { |
994 | err = ioat_irq_reinit(ioat_dma); | 865 | if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) { |
866 | writeq(ioat_dma->msixtba0, ioat_dma->reg_base + 0x1000); | ||
867 | writeq(ioat_dma->msixdata0, ioat_dma->reg_base + 0x1008); | ||
868 | writeq(ioat_dma->msixpba, ioat_dma->reg_base + 0x1800); | ||
869 | } | ||
870 | } | ||
995 | 871 | ||
996 | if (err) | 872 | if (err) |
997 | dev_err(&pdev->dev, "Failed to reset: %d\n", err); | 873 | dev_err(&pdev->dev, "Failed to reset: %d\n", err); |
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index b8f48074789f..a9bc1a15b0d1 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -62,7 +62,6 @@ enum ioat_irq_mode { | |||
62 | * struct ioatdma_device - internal representation of a IOAT device | 62 | * struct ioatdma_device - internal representation of a IOAT device |
63 | * @pdev: PCI-Express device | 63 | * @pdev: PCI-Express device |
64 | * @reg_base: MMIO register space base address | 64 | * @reg_base: MMIO register space base address |
65 | * @dma_pool: for allocating DMA descriptors | ||
66 | * @completion_pool: DMA buffers for completion ops | 65 | * @completion_pool: DMA buffers for completion ops |
67 | * @sed_hw_pool: DMA super descriptor pools | 66 | * @sed_hw_pool: DMA super descriptor pools |
68 | * @dma_dev: embedded struct dma_device | 67 | * @dma_dev: embedded struct dma_device |
@@ -76,8 +75,7 @@ enum ioat_irq_mode { | |||
76 | struct ioatdma_device { | 75 | struct ioatdma_device { |
77 | struct pci_dev *pdev; | 76 | struct pci_dev *pdev; |
78 | void __iomem *reg_base; | 77 | void __iomem *reg_base; |
79 | struct pci_pool *dma_pool; | 78 | struct dma_pool *completion_pool; |
80 | struct pci_pool *completion_pool; | ||
81 | #define MAX_SED_POOLS 5 | 79 | #define MAX_SED_POOLS 5 |
82 | struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; | 80 | struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; |
83 | struct dma_device dma_dev; | 81 | struct dma_device dma_dev; |
@@ -88,6 +86,16 @@ struct ioatdma_device { | |||
88 | struct dca_provider *dca; | 86 | struct dca_provider *dca; |
89 | enum ioat_irq_mode irq_mode; | 87 | enum ioat_irq_mode irq_mode; |
90 | u32 cap; | 88 | u32 cap; |
89 | |||
90 | /* shadow version for CB3.3 chan reset errata workaround */ | ||
91 | u64 msixtba0; | ||
92 | u64 msixdata0; | ||
93 | u32 msixpba; | ||
94 | }; | ||
95 | |||
96 | struct ioat_descs { | ||
97 | void *virt; | ||
98 | dma_addr_t hw; | ||
91 | }; | 99 | }; |
92 | 100 | ||
93 | struct ioatdma_chan { | 101 | struct ioatdma_chan { |
@@ -100,7 +108,6 @@ struct ioatdma_chan { | |||
100 | #define IOAT_COMPLETION_ACK 1 | 108 | #define IOAT_COMPLETION_ACK 1 |
101 | #define IOAT_RESET_PENDING 2 | 109 | #define IOAT_RESET_PENDING 2 |
102 | #define IOAT_KOBJ_INIT_FAIL 3 | 110 | #define IOAT_KOBJ_INIT_FAIL 3 |
103 | #define IOAT_RESHAPE_PENDING 4 | ||
104 | #define IOAT_RUN 5 | 111 | #define IOAT_RUN 5 |
105 | #define IOAT_CHAN_ACTIVE 6 | 112 | #define IOAT_CHAN_ACTIVE 6 |
106 | struct timer_list timer; | 113 | struct timer_list timer; |
@@ -133,6 +140,8 @@ struct ioatdma_chan { | |||
133 | u16 produce; | 140 | u16 produce; |
134 | struct ioat_ring_ent **ring; | 141 | struct ioat_ring_ent **ring; |
135 | spinlock_t prep_lock; | 142 | spinlock_t prep_lock; |
143 | struct ioat_descs descs[2]; | ||
144 | int desc_chunks; | ||
136 | }; | 145 | }; |
137 | 146 | ||
138 | struct ioat_sysfs_entry { | 147 | struct ioat_sysfs_entry { |
@@ -302,10 +311,8 @@ static inline bool is_ioat_bug(unsigned long err) | |||
302 | } | 311 | } |
303 | 312 | ||
304 | #define IOAT_MAX_ORDER 16 | 313 | #define IOAT_MAX_ORDER 16 |
305 | #define ioat_get_alloc_order() \ | 314 | #define IOAT_MAX_DESCS 65536 |
306 | (min(ioat_ring_alloc_order, IOAT_MAX_ORDER)) | 315 | #define IOAT_DESCS_PER_2M 32768 |
307 | #define ioat_get_max_alloc_order() \ | ||
308 | (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER)) | ||
309 | 316 | ||
310 | static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan) | 317 | static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan) |
311 | { | 318 | { |
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index 690e3b4f8202..8e67895bcca3 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h | |||
@@ -73,6 +73,8 @@ | |||
73 | 73 | ||
74 | int system_has_dca_enabled(struct pci_dev *pdev); | 74 | int system_has_dca_enabled(struct pci_dev *pdev); |
75 | 75 | ||
76 | #define IOAT_DESC_SZ 64 | ||
77 | |||
76 | struct ioat_dma_descriptor { | 78 | struct ioat_dma_descriptor { |
77 | uint32_t size; | 79 | uint32_t size; |
78 | union { | 80 | union { |
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 4ef0c5e07912..efdee1a69fc4 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/prefetch.h> | 28 | #include <linux/prefetch.h> |
29 | #include <linux/dca.h> | 29 | #include <linux/dca.h> |
30 | #include <linux/aer.h> | 30 | #include <linux/aer.h> |
31 | #include <linux/sizes.h> | ||
31 | #include "dma.h" | 32 | #include "dma.h" |
32 | #include "registers.h" | 33 | #include "registers.h" |
33 | #include "hw.h" | 34 | #include "hw.h" |
@@ -136,14 +137,6 @@ int ioat_pending_level = 4; | |||
136 | module_param(ioat_pending_level, int, 0644); | 137 | module_param(ioat_pending_level, int, 0644); |
137 | MODULE_PARM_DESC(ioat_pending_level, | 138 | MODULE_PARM_DESC(ioat_pending_level, |
138 | "high-water mark for pushing ioat descriptors (default: 4)"); | 139 | "high-water mark for pushing ioat descriptors (default: 4)"); |
139 | int ioat_ring_alloc_order = 8; | ||
140 | module_param(ioat_ring_alloc_order, int, 0644); | ||
141 | MODULE_PARM_DESC(ioat_ring_alloc_order, | ||
142 | "ioat+: allocate 2^n descriptors per channel (default: 8 max: 16)"); | ||
143 | int ioat_ring_max_alloc_order = IOAT_MAX_ORDER; | ||
144 | module_param(ioat_ring_max_alloc_order, int, 0644); | ||
145 | MODULE_PARM_DESC(ioat_ring_max_alloc_order, | ||
146 | "ioat+: upper limit for ring size (default: 16)"); | ||
147 | static char ioat_interrupt_style[32] = "msix"; | 140 | static char ioat_interrupt_style[32] = "msix"; |
148 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, | 141 | module_param_string(ioat_interrupt_style, ioat_interrupt_style, |
149 | sizeof(ioat_interrupt_style), 0644); | 142 | sizeof(ioat_interrupt_style), 0644); |
@@ -504,23 +497,14 @@ static int ioat_probe(struct ioatdma_device *ioat_dma) | |||
504 | struct pci_dev *pdev = ioat_dma->pdev; | 497 | struct pci_dev *pdev = ioat_dma->pdev; |
505 | struct device *dev = &pdev->dev; | 498 | struct device *dev = &pdev->dev; |
506 | 499 | ||
507 | /* DMA coherent memory pool for DMA descriptor allocations */ | 500 | ioat_dma->completion_pool = dma_pool_create("completion_pool", dev, |
508 | ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev, | ||
509 | sizeof(struct ioat_dma_descriptor), | ||
510 | 64, 0); | ||
511 | if (!ioat_dma->dma_pool) { | ||
512 | err = -ENOMEM; | ||
513 | goto err_dma_pool; | ||
514 | } | ||
515 | |||
516 | ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev, | ||
517 | sizeof(u64), | 501 | sizeof(u64), |
518 | SMP_CACHE_BYTES, | 502 | SMP_CACHE_BYTES, |
519 | SMP_CACHE_BYTES); | 503 | SMP_CACHE_BYTES); |
520 | 504 | ||
521 | if (!ioat_dma->completion_pool) { | 505 | if (!ioat_dma->completion_pool) { |
522 | err = -ENOMEM; | 506 | err = -ENOMEM; |
523 | goto err_completion_pool; | 507 | goto err_out; |
524 | } | 508 | } |
525 | 509 | ||
526 | ioat_enumerate_channels(ioat_dma); | 510 | ioat_enumerate_channels(ioat_dma); |
@@ -546,10 +530,8 @@ static int ioat_probe(struct ioatdma_device *ioat_dma) | |||
546 | err_self_test: | 530 | err_self_test: |
547 | ioat_disable_interrupts(ioat_dma); | 531 | ioat_disable_interrupts(ioat_dma); |
548 | err_setup_interrupts: | 532 | err_setup_interrupts: |
549 | pci_pool_destroy(ioat_dma->completion_pool); | 533 | dma_pool_destroy(ioat_dma->completion_pool); |
550 | err_completion_pool: | 534 | err_out: |
551 | pci_pool_destroy(ioat_dma->dma_pool); | ||
552 | err_dma_pool: | ||
553 | return err; | 535 | return err; |
554 | } | 536 | } |
555 | 537 | ||
@@ -559,8 +541,7 @@ static int ioat_register(struct ioatdma_device *ioat_dma) | |||
559 | 541 | ||
560 | if (err) { | 542 | if (err) { |
561 | ioat_disable_interrupts(ioat_dma); | 543 | ioat_disable_interrupts(ioat_dma); |
562 | pci_pool_destroy(ioat_dma->completion_pool); | 544 | dma_pool_destroy(ioat_dma->completion_pool); |
563 | pci_pool_destroy(ioat_dma->dma_pool); | ||
564 | } | 545 | } |
565 | 546 | ||
566 | return err; | 547 | return err; |
@@ -576,8 +557,7 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma) | |||
576 | 557 | ||
577 | dma_async_device_unregister(dma); | 558 | dma_async_device_unregister(dma); |
578 | 559 | ||
579 | pci_pool_destroy(ioat_dma->dma_pool); | 560 | dma_pool_destroy(ioat_dma->completion_pool); |
580 | pci_pool_destroy(ioat_dma->completion_pool); | ||
581 | 561 | ||
582 | INIT_LIST_HEAD(&dma->channels); | 562 | INIT_LIST_HEAD(&dma->channels); |
583 | } | 563 | } |
@@ -666,10 +646,19 @@ static void ioat_free_chan_resources(struct dma_chan *c) | |||
666 | ioat_free_ring_ent(desc, c); | 646 | ioat_free_ring_ent(desc, c); |
667 | } | 647 | } |
668 | 648 | ||
649 | for (i = 0; i < ioat_chan->desc_chunks; i++) { | ||
650 | dma_free_coherent(to_dev(ioat_chan), SZ_2M, | ||
651 | ioat_chan->descs[i].virt, | ||
652 | ioat_chan->descs[i].hw); | ||
653 | ioat_chan->descs[i].virt = NULL; | ||
654 | ioat_chan->descs[i].hw = 0; | ||
655 | } | ||
656 | ioat_chan->desc_chunks = 0; | ||
657 | |||
669 | kfree(ioat_chan->ring); | 658 | kfree(ioat_chan->ring); |
670 | ioat_chan->ring = NULL; | 659 | ioat_chan->ring = NULL; |
671 | ioat_chan->alloc_order = 0; | 660 | ioat_chan->alloc_order = 0; |
672 | pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion, | 661 | dma_pool_free(ioat_dma->completion_pool, ioat_chan->completion, |
673 | ioat_chan->completion_dma); | 662 | ioat_chan->completion_dma); |
674 | spin_unlock_bh(&ioat_chan->prep_lock); | 663 | spin_unlock_bh(&ioat_chan->prep_lock); |
675 | spin_unlock_bh(&ioat_chan->cleanup_lock); | 664 | spin_unlock_bh(&ioat_chan->cleanup_lock); |
@@ -701,7 +690,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c) | |||
701 | /* allocate a completion writeback area */ | 690 | /* allocate a completion writeback area */ |
702 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ | 691 | /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ |
703 | ioat_chan->completion = | 692 | ioat_chan->completion = |
704 | pci_pool_alloc(ioat_chan->ioat_dma->completion_pool, | 693 | dma_pool_alloc(ioat_chan->ioat_dma->completion_pool, |
705 | GFP_KERNEL, &ioat_chan->completion_dma); | 694 | GFP_KERNEL, &ioat_chan->completion_dma); |
706 | if (!ioat_chan->completion) | 695 | if (!ioat_chan->completion) |
707 | return -ENOMEM; | 696 | return -ENOMEM; |
@@ -712,7 +701,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c) | |||
712 | writel(((u64)ioat_chan->completion_dma) >> 32, | 701 | writel(((u64)ioat_chan->completion_dma) >> 32, |
713 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); | 702 | ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); |
714 | 703 | ||
715 | order = ioat_get_alloc_order(); | 704 | order = IOAT_MAX_ORDER; |
716 | ring = ioat_alloc_ring(c, order, GFP_KERNEL); | 705 | ring = ioat_alloc_ring(c, order, GFP_KERNEL); |
717 | if (!ring) | 706 | if (!ring) |
718 | return -ENOMEM; | 707 | return -ENOMEM; |
diff --git a/drivers/dma/ioat/prep.c b/drivers/dma/ioat/prep.c index 6bb4a13a8fbd..243421af888f 100644 --- a/drivers/dma/ioat/prep.c +++ b/drivers/dma/ioat/prep.c | |||
@@ -26,7 +26,7 @@ | |||
26 | #include "hw.h" | 26 | #include "hw.h" |
27 | #include "dma.h" | 27 | #include "dma.h" |
28 | 28 | ||
29 | #define MAX_SCF 1024 | 29 | #define MAX_SCF 256 |
30 | 30 | ||
31 | /* provide a lookup table for setting the source address in the base or | 31 | /* provide a lookup table for setting the source address in the base or |
32 | * extended descriptor of an xor or pq descriptor | 32 | * extended descriptor of an xor or pq descriptor |
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c index 068e920ecb68..1502b24b7c7d 100644 --- a/drivers/dma/mic_x100_dma.c +++ b/drivers/dma/mic_x100_dma.c | |||
@@ -483,7 +483,7 @@ static int mic_dma_setup_irq(struct mic_dma_chan *ch) | |||
483 | mic_dma_intr_handler, mic_dma_thread_fn, | 483 | mic_dma_intr_handler, mic_dma_thread_fn, |
484 | "mic dma_channel", ch, ch->ch_num); | 484 | "mic dma_channel", ch, ch->ch_num); |
485 | if (IS_ERR(ch->cookie)) | 485 | if (IS_ERR(ch->cookie)) |
486 | return IS_ERR(ch->cookie); | 486 | return PTR_ERR(ch->cookie); |
487 | return 0; | 487 | return 0; |
488 | } | 488 | } |
489 | 489 | ||
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 9794b073d7d7..43bd5aee7ffe 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c | |||
@@ -1009,6 +1009,13 @@ static int omap_dma_terminate_all(struct dma_chan *chan) | |||
1009 | return 0; | 1009 | return 0; |
1010 | } | 1010 | } |
1011 | 1011 | ||
1012 | static void omap_dma_synchronize(struct dma_chan *chan) | ||
1013 | { | ||
1014 | struct omap_chan *c = to_omap_dma_chan(chan); | ||
1015 | |||
1016 | vchan_synchronize(&c->vc); | ||
1017 | } | ||
1018 | |||
1012 | static int omap_dma_pause(struct dma_chan *chan) | 1019 | static int omap_dma_pause(struct dma_chan *chan) |
1013 | { | 1020 | { |
1014 | struct omap_chan *c = to_omap_dma_chan(chan); | 1021 | struct omap_chan *c = to_omap_dma_chan(chan); |
@@ -1112,6 +1119,7 @@ static int omap_dma_probe(struct platform_device *pdev) | |||
1112 | od->ddev.device_pause = omap_dma_pause; | 1119 | od->ddev.device_pause = omap_dma_pause; |
1113 | od->ddev.device_resume = omap_dma_resume; | 1120 | od->ddev.device_resume = omap_dma_resume; |
1114 | od->ddev.device_terminate_all = omap_dma_terminate_all; | 1121 | od->ddev.device_terminate_all = omap_dma_terminate_all; |
1122 | od->ddev.device_synchronize = omap_dma_synchronize; | ||
1115 | od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS; | 1123 | od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS; |
1116 | od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS; | 1124 | od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS; |
1117 | od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | 1125 | od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 17ee758b419f..372b4359da97 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -33,6 +33,9 @@ | |||
33 | #define PL330_MAX_CHAN 8 | 33 | #define PL330_MAX_CHAN 8 |
34 | #define PL330_MAX_IRQS 32 | 34 | #define PL330_MAX_IRQS 32 |
35 | #define PL330_MAX_PERI 32 | 35 | #define PL330_MAX_PERI 32 |
36 | #define PL330_MAX_BURST 16 | ||
37 | |||
38 | #define PL330_QUIRK_BROKEN_NO_FLUSHP BIT(0) | ||
36 | 39 | ||
37 | enum pl330_cachectrl { | 40 | enum pl330_cachectrl { |
38 | CCTRL0, /* Noncacheable and nonbufferable */ | 41 | CCTRL0, /* Noncacheable and nonbufferable */ |
@@ -488,6 +491,17 @@ struct pl330_dmac { | |||
488 | /* Peripheral channels connected to this DMAC */ | 491 | /* Peripheral channels connected to this DMAC */ |
489 | unsigned int num_peripherals; | 492 | unsigned int num_peripherals; |
490 | struct dma_pl330_chan *peripherals; /* keep at end */ | 493 | struct dma_pl330_chan *peripherals; /* keep at end */ |
494 | int quirks; | ||
495 | }; | ||
496 | |||
497 | static struct pl330_of_quirks { | ||
498 | char *quirk; | ||
499 | int id; | ||
500 | } of_quirks[] = { | ||
501 | { | ||
502 | .quirk = "arm,pl330-broken-no-flushp", | ||
503 | .id = PL330_QUIRK_BROKEN_NO_FLUSHP, | ||
504 | } | ||
491 | }; | 505 | }; |
492 | 506 | ||
493 | struct dma_pl330_desc { | 507 | struct dma_pl330_desc { |
@@ -1137,47 +1151,67 @@ static inline int _ldst_memtomem(unsigned dry_run, u8 buf[], | |||
1137 | return off; | 1151 | return off; |
1138 | } | 1152 | } |
1139 | 1153 | ||
1140 | static inline int _ldst_devtomem(unsigned dry_run, u8 buf[], | 1154 | static inline int _ldst_devtomem(struct pl330_dmac *pl330, unsigned dry_run, |
1141 | const struct _xfer_spec *pxs, int cyc) | 1155 | u8 buf[], const struct _xfer_spec *pxs, |
1156 | int cyc) | ||
1142 | { | 1157 | { |
1143 | int off = 0; | 1158 | int off = 0; |
1159 | enum pl330_cond cond; | ||
1160 | |||
1161 | if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) | ||
1162 | cond = BURST; | ||
1163 | else | ||
1164 | cond = SINGLE; | ||
1144 | 1165 | ||
1145 | while (cyc--) { | 1166 | while (cyc--) { |
1146 | off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri); | 1167 | off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri); |
1147 | off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->desc->peri); | 1168 | off += _emit_LDP(dry_run, &buf[off], cond, pxs->desc->peri); |
1148 | off += _emit_ST(dry_run, &buf[off], ALWAYS); | 1169 | off += _emit_ST(dry_run, &buf[off], ALWAYS); |
1149 | off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri); | 1170 | |
1171 | if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)) | ||
1172 | off += _emit_FLUSHP(dry_run, &buf[off], | ||
1173 | pxs->desc->peri); | ||
1150 | } | 1174 | } |
1151 | 1175 | ||
1152 | return off; | 1176 | return off; |
1153 | } | 1177 | } |
1154 | 1178 | ||
1155 | static inline int _ldst_memtodev(unsigned dry_run, u8 buf[], | 1179 | static inline int _ldst_memtodev(struct pl330_dmac *pl330, |
1156 | const struct _xfer_spec *pxs, int cyc) | 1180 | unsigned dry_run, u8 buf[], |
1181 | const struct _xfer_spec *pxs, int cyc) | ||
1157 | { | 1182 | { |
1158 | int off = 0; | 1183 | int off = 0; |
1184 | enum pl330_cond cond; | ||
1185 | |||
1186 | if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) | ||
1187 | cond = BURST; | ||
1188 | else | ||
1189 | cond = SINGLE; | ||
1159 | 1190 | ||
1160 | while (cyc--) { | 1191 | while (cyc--) { |
1161 | off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri); | 1192 | off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri); |
1162 | off += _emit_LD(dry_run, &buf[off], ALWAYS); | 1193 | off += _emit_LD(dry_run, &buf[off], ALWAYS); |
1163 | off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->desc->peri); | 1194 | off += _emit_STP(dry_run, &buf[off], cond, pxs->desc->peri); |
1164 | off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri); | 1195 | |
1196 | if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)) | ||
1197 | off += _emit_FLUSHP(dry_run, &buf[off], | ||
1198 | pxs->desc->peri); | ||
1165 | } | 1199 | } |
1166 | 1200 | ||
1167 | return off; | 1201 | return off; |
1168 | } | 1202 | } |
1169 | 1203 | ||
1170 | static int _bursts(unsigned dry_run, u8 buf[], | 1204 | static int _bursts(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[], |
1171 | const struct _xfer_spec *pxs, int cyc) | 1205 | const struct _xfer_spec *pxs, int cyc) |
1172 | { | 1206 | { |
1173 | int off = 0; | 1207 | int off = 0; |
1174 | 1208 | ||
1175 | switch (pxs->desc->rqtype) { | 1209 | switch (pxs->desc->rqtype) { |
1176 | case DMA_MEM_TO_DEV: | 1210 | case DMA_MEM_TO_DEV: |
1177 | off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc); | 1211 | off += _ldst_memtodev(pl330, dry_run, &buf[off], pxs, cyc); |
1178 | break; | 1212 | break; |
1179 | case DMA_DEV_TO_MEM: | 1213 | case DMA_DEV_TO_MEM: |
1180 | off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc); | 1214 | off += _ldst_devtomem(pl330, dry_run, &buf[off], pxs, cyc); |
1181 | break; | 1215 | break; |
1182 | case DMA_MEM_TO_MEM: | 1216 | case DMA_MEM_TO_MEM: |
1183 | off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc); | 1217 | off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc); |
@@ -1191,7 +1225,7 @@ static int _bursts(unsigned dry_run, u8 buf[], | |||
1191 | } | 1225 | } |
1192 | 1226 | ||
1193 | /* Returns bytes consumed and updates bursts */ | 1227 | /* Returns bytes consumed and updates bursts */ |
1194 | static inline int _loop(unsigned dry_run, u8 buf[], | 1228 | static inline int _loop(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[], |
1195 | unsigned long *bursts, const struct _xfer_spec *pxs) | 1229 | unsigned long *bursts, const struct _xfer_spec *pxs) |
1196 | { | 1230 | { |
1197 | int cyc, cycmax, szlp, szlpend, szbrst, off; | 1231 | int cyc, cycmax, szlp, szlpend, szbrst, off; |
@@ -1199,7 +1233,7 @@ static inline int _loop(unsigned dry_run, u8 buf[], | |||
1199 | struct _arg_LPEND lpend; | 1233 | struct _arg_LPEND lpend; |
1200 | 1234 | ||
1201 | if (*bursts == 1) | 1235 | if (*bursts == 1) |
1202 | return _bursts(dry_run, buf, pxs, 1); | 1236 | return _bursts(pl330, dry_run, buf, pxs, 1); |
1203 | 1237 | ||
1204 | /* Max iterations possible in DMALP is 256 */ | 1238 | /* Max iterations possible in DMALP is 256 */ |
1205 | if (*bursts >= 256*256) { | 1239 | if (*bursts >= 256*256) { |
@@ -1217,7 +1251,7 @@ static inline int _loop(unsigned dry_run, u8 buf[], | |||
1217 | } | 1251 | } |
1218 | 1252 | ||
1219 | szlp = _emit_LP(1, buf, 0, 0); | 1253 | szlp = _emit_LP(1, buf, 0, 0); |
1220 | szbrst = _bursts(1, buf, pxs, 1); | 1254 | szbrst = _bursts(pl330, 1, buf, pxs, 1); |
1221 | 1255 | ||
1222 | lpend.cond = ALWAYS; | 1256 | lpend.cond = ALWAYS; |
1223 | lpend.forever = false; | 1257 | lpend.forever = false; |
@@ -1249,7 +1283,7 @@ static inline int _loop(unsigned dry_run, u8 buf[], | |||
1249 | off += _emit_LP(dry_run, &buf[off], 1, lcnt1); | 1283 | off += _emit_LP(dry_run, &buf[off], 1, lcnt1); |
1250 | ljmp1 = off; | 1284 | ljmp1 = off; |
1251 | 1285 | ||
1252 | off += _bursts(dry_run, &buf[off], pxs, cyc); | 1286 | off += _bursts(pl330, dry_run, &buf[off], pxs, cyc); |
1253 | 1287 | ||
1254 | lpend.cond = ALWAYS; | 1288 | lpend.cond = ALWAYS; |
1255 | lpend.forever = false; | 1289 | lpend.forever = false; |
@@ -1272,8 +1306,9 @@ static inline int _loop(unsigned dry_run, u8 buf[], | |||
1272 | return off; | 1306 | return off; |
1273 | } | 1307 | } |
1274 | 1308 | ||
1275 | static inline int _setup_loops(unsigned dry_run, u8 buf[], | 1309 | static inline int _setup_loops(struct pl330_dmac *pl330, |
1276 | const struct _xfer_spec *pxs) | 1310 | unsigned dry_run, u8 buf[], |
1311 | const struct _xfer_spec *pxs) | ||
1277 | { | 1312 | { |
1278 | struct pl330_xfer *x = &pxs->desc->px; | 1313 | struct pl330_xfer *x = &pxs->desc->px; |
1279 | u32 ccr = pxs->ccr; | 1314 | u32 ccr = pxs->ccr; |
@@ -1282,15 +1317,16 @@ static inline int _setup_loops(unsigned dry_run, u8 buf[], | |||
1282 | 1317 | ||
1283 | while (bursts) { | 1318 | while (bursts) { |
1284 | c = bursts; | 1319 | c = bursts; |
1285 | off += _loop(dry_run, &buf[off], &c, pxs); | 1320 | off += _loop(pl330, dry_run, &buf[off], &c, pxs); |
1286 | bursts -= c; | 1321 | bursts -= c; |
1287 | } | 1322 | } |
1288 | 1323 | ||
1289 | return off; | 1324 | return off; |
1290 | } | 1325 | } |
1291 | 1326 | ||
1292 | static inline int _setup_xfer(unsigned dry_run, u8 buf[], | 1327 | static inline int _setup_xfer(struct pl330_dmac *pl330, |
1293 | const struct _xfer_spec *pxs) | 1328 | unsigned dry_run, u8 buf[], |
1329 | const struct _xfer_spec *pxs) | ||
1294 | { | 1330 | { |
1295 | struct pl330_xfer *x = &pxs->desc->px; | 1331 | struct pl330_xfer *x = &pxs->desc->px; |
1296 | int off = 0; | 1332 | int off = 0; |
@@ -1301,7 +1337,7 @@ static inline int _setup_xfer(unsigned dry_run, u8 buf[], | |||
1301 | off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr); | 1337 | off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr); |
1302 | 1338 | ||
1303 | /* Setup Loop(s) */ | 1339 | /* Setup Loop(s) */ |
1304 | off += _setup_loops(dry_run, &buf[off], pxs); | 1340 | off += _setup_loops(pl330, dry_run, &buf[off], pxs); |
1305 | 1341 | ||
1306 | return off; | 1342 | return off; |
1307 | } | 1343 | } |
@@ -1310,8 +1346,9 @@ static inline int _setup_xfer(unsigned dry_run, u8 buf[], | |||
1310 | * A req is a sequence of one or more xfer units. | 1346 | * A req is a sequence of one or more xfer units. |
1311 | * Returns the number of bytes taken to setup the MC for the req. | 1347 | * Returns the number of bytes taken to setup the MC for the req. |
1312 | */ | 1348 | */ |
1313 | static int _setup_req(unsigned dry_run, struct pl330_thread *thrd, | 1349 | static int _setup_req(struct pl330_dmac *pl330, unsigned dry_run, |
1314 | unsigned index, struct _xfer_spec *pxs) | 1350 | struct pl330_thread *thrd, unsigned index, |
1351 | struct _xfer_spec *pxs) | ||
1315 | { | 1352 | { |
1316 | struct _pl330_req *req = &thrd->req[index]; | 1353 | struct _pl330_req *req = &thrd->req[index]; |
1317 | struct pl330_xfer *x; | 1354 | struct pl330_xfer *x; |
@@ -1328,7 +1365,7 @@ static int _setup_req(unsigned dry_run, struct pl330_thread *thrd, | |||
1328 | if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr))) | 1365 | if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr))) |
1329 | return -EINVAL; | 1366 | return -EINVAL; |
1330 | 1367 | ||
1331 | off += _setup_xfer(dry_run, &buf[off], pxs); | 1368 | off += _setup_xfer(pl330, dry_run, &buf[off], pxs); |
1332 | 1369 | ||
1333 | /* DMASEV peripheral/event */ | 1370 | /* DMASEV peripheral/event */ |
1334 | off += _emit_SEV(dry_run, &buf[off], thrd->ev); | 1371 | off += _emit_SEV(dry_run, &buf[off], thrd->ev); |
@@ -1422,7 +1459,7 @@ static int pl330_submit_req(struct pl330_thread *thrd, | |||
1422 | xs.desc = desc; | 1459 | xs.desc = desc; |
1423 | 1460 | ||
1424 | /* First dry run to check if req is acceptable */ | 1461 | /* First dry run to check if req is acceptable */ |
1425 | ret = _setup_req(1, thrd, idx, &xs); | 1462 | ret = _setup_req(pl330, 1, thrd, idx, &xs); |
1426 | if (ret < 0) | 1463 | if (ret < 0) |
1427 | goto xfer_exit; | 1464 | goto xfer_exit; |
1428 | 1465 | ||
@@ -1436,7 +1473,7 @@ static int pl330_submit_req(struct pl330_thread *thrd, | |||
1436 | /* Hook the request */ | 1473 | /* Hook the request */ |
1437 | thrd->lstenq = idx; | 1474 | thrd->lstenq = idx; |
1438 | thrd->req[idx].desc = desc; | 1475 | thrd->req[idx].desc = desc; |
1439 | _setup_req(0, thrd, idx, &xs); | 1476 | _setup_req(pl330, 0, thrd, idx, &xs); |
1440 | 1477 | ||
1441 | ret = 0; | 1478 | ret = 0; |
1442 | 1479 | ||
@@ -2781,6 +2818,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2781 | struct resource *res; | 2818 | struct resource *res; |
2782 | int i, ret, irq; | 2819 | int i, ret, irq; |
2783 | int num_chan; | 2820 | int num_chan; |
2821 | struct device_node *np = adev->dev.of_node; | ||
2784 | 2822 | ||
2785 | pdat = dev_get_platdata(&adev->dev); | 2823 | pdat = dev_get_platdata(&adev->dev); |
2786 | 2824 | ||
@@ -2800,6 +2838,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2800 | 2838 | ||
2801 | pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0; | 2839 | pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0; |
2802 | 2840 | ||
2841 | /* get quirk */ | ||
2842 | for (i = 0; i < ARRAY_SIZE(of_quirks); i++) | ||
2843 | if (of_property_read_bool(np, of_quirks[i].quirk)) | ||
2844 | pl330->quirks |= of_quirks[i].id; | ||
2845 | |||
2803 | res = &adev->res; | 2846 | res = &adev->res; |
2804 | pl330->base = devm_ioremap_resource(&adev->dev, res); | 2847 | pl330->base = devm_ioremap_resource(&adev->dev, res); |
2805 | if (IS_ERR(pl330->base)) | 2848 | if (IS_ERR(pl330->base)) |
@@ -2895,6 +2938,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2895 | pd->dst_addr_widths = PL330_DMA_BUSWIDTHS; | 2938 | pd->dst_addr_widths = PL330_DMA_BUSWIDTHS; |
2896 | pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | 2939 | pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
2897 | pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; | 2940 | pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; |
2941 | pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ? | ||
2942 | 1 : PL330_MAX_BURST); | ||
2898 | 2943 | ||
2899 | ret = dma_async_device_register(pd); | 2944 | ret = dma_async_device_register(pd); |
2900 | if (ret) { | 2945 | if (ret) { |
diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig new file mode 100644 index 000000000000..a7761c4025f4 --- /dev/null +++ b/drivers/dma/qcom/Kconfig | |||
@@ -0,0 +1,29 @@ | |||
1 | config QCOM_BAM_DMA | ||
2 | tristate "QCOM BAM DMA support" | ||
3 | depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM) | ||
4 | select DMA_ENGINE | ||
5 | select DMA_VIRTUAL_CHANNELS | ||
6 | ---help--- | ||
7 | Enable support for the QCOM BAM DMA controller. This controller | ||
8 | provides DMA capabilities for a variety of on-chip devices. | ||
9 | |||
10 | config QCOM_HIDMA_MGMT | ||
11 | tristate "Qualcomm Technologies HIDMA Management support" | ||
12 | select DMA_ENGINE | ||
13 | help | ||
14 | Enable support for the Qualcomm Technologies HIDMA Management. | ||
15 | Each DMA device requires one management interface driver | ||
16 | for basic initialization before QCOM_HIDMA channel driver can | ||
17 | start managing the channels. In a virtualized environment, | ||
18 | the guest OS would run QCOM_HIDMA channel driver and the | ||
19 | host would run the QCOM_HIDMA_MGMT management driver. | ||
20 | |||
21 | config QCOM_HIDMA | ||
22 | tristate "Qualcomm Technologies HIDMA Channel support" | ||
23 | select DMA_ENGINE | ||
24 | help | ||
25 | Enable support for the Qualcomm Technologies HIDMA controller. | ||
26 | The HIDMA controller supports optimized buffer copies | ||
27 | (user to kernel, kernel to kernel, etc.). It only supports | ||
28 | memcpy interface. The core is not intended for general | ||
29 | purpose slave DMA. | ||
diff --git a/drivers/dma/qcom/Makefile b/drivers/dma/qcom/Makefile new file mode 100644 index 000000000000..bfea6990229f --- /dev/null +++ b/drivers/dma/qcom/Makefile | |||
@@ -0,0 +1,3 @@ | |||
1 | obj-$(CONFIG_QCOM_BAM_DMA) += bam_dma.o | ||
2 | obj-$(CONFIG_QCOM_HIDMA_MGMT) += hdma_mgmt.o | ||
3 | hdma_mgmt-objs := hidma_mgmt.o hidma_mgmt_sys.o | ||
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom/bam_dma.c index d34aef7a101b..d5e0a9c3ad5d 100644 --- a/drivers/dma/qcom_bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c | |||
@@ -49,13 +49,13 @@ | |||
49 | #include <linux/clk.h> | 49 | #include <linux/clk.h> |
50 | #include <linux/dmaengine.h> | 50 | #include <linux/dmaengine.h> |
51 | 51 | ||
52 | #include "dmaengine.h" | 52 | #include "../dmaengine.h" |
53 | #include "virt-dma.h" | 53 | #include "../virt-dma.h" |
54 | 54 | ||
55 | struct bam_desc_hw { | 55 | struct bam_desc_hw { |
56 | u32 addr; /* Buffer physical address */ | 56 | __le32 addr; /* Buffer physical address */ |
57 | u16 size; /* Buffer size in bytes */ | 57 | __le16 size; /* Buffer size in bytes */ |
58 | u16 flags; | 58 | __le16 flags; |
59 | }; | 59 | }; |
60 | 60 | ||
61 | #define DESC_FLAG_INT BIT(15) | 61 | #define DESC_FLAG_INT BIT(15) |
@@ -632,14 +632,15 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, | |||
632 | unsigned int curr_offset = 0; | 632 | unsigned int curr_offset = 0; |
633 | 633 | ||
634 | do { | 634 | do { |
635 | desc->addr = sg_dma_address(sg) + curr_offset; | 635 | desc->addr = cpu_to_le32(sg_dma_address(sg) + |
636 | curr_offset); | ||
636 | 637 | ||
637 | if (remainder > BAM_MAX_DATA_SIZE) { | 638 | if (remainder > BAM_MAX_DATA_SIZE) { |
638 | desc->size = BAM_MAX_DATA_SIZE; | 639 | desc->size = cpu_to_le16(BAM_MAX_DATA_SIZE); |
639 | remainder -= BAM_MAX_DATA_SIZE; | 640 | remainder -= BAM_MAX_DATA_SIZE; |
640 | curr_offset += BAM_MAX_DATA_SIZE; | 641 | curr_offset += BAM_MAX_DATA_SIZE; |
641 | } else { | 642 | } else { |
642 | desc->size = remainder; | 643 | desc->size = cpu_to_le16(remainder); |
643 | remainder = 0; | 644 | remainder = 0; |
644 | } | 645 | } |
645 | 646 | ||
@@ -915,9 +916,11 @@ static void bam_start_dma(struct bam_chan *bchan) | |||
915 | 916 | ||
916 | /* set any special flags on the last descriptor */ | 917 | /* set any special flags on the last descriptor */ |
917 | if (async_desc->num_desc == async_desc->xfer_len) | 918 | if (async_desc->num_desc == async_desc->xfer_len) |
918 | desc[async_desc->xfer_len - 1].flags = async_desc->flags; | 919 | desc[async_desc->xfer_len - 1].flags = |
920 | cpu_to_le16(async_desc->flags); | ||
919 | else | 921 | else |
920 | desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT; | 922 | desc[async_desc->xfer_len - 1].flags |= |
923 | cpu_to_le16(DESC_FLAG_INT); | ||
921 | 924 | ||
922 | if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) { | 925 | if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) { |
923 | u32 partial = MAX_DESCRIPTORS - bchan->tail; | 926 | u32 partial = MAX_DESCRIPTORS - bchan->tail; |
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c new file mode 100644 index 000000000000..cccc78efbca9 --- /dev/null +++ b/drivers/dma/qcom/hidma.c | |||
@@ -0,0 +1,706 @@ | |||
1 | /* | ||
2 | * Qualcomm Technologies HIDMA DMA engine interface | ||
3 | * | ||
4 | * Copyright (c) 2015, The Linux Foundation. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 and | ||
8 | * only version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | /* | ||
17 | * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. | ||
18 | * Copyright (C) Semihalf 2009 | ||
19 | * Copyright (C) Ilya Yanok, Emcraft Systems 2010 | ||
20 | * Copyright (C) Alexander Popov, Promcontroller 2014 | ||
21 | * | ||
22 | * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description | ||
23 | * (defines, structures and comments) was taken from MPC5121 DMA driver | ||
24 | * written by Hongjun Chen <hong-jun.chen@freescale.com>. | ||
25 | * | ||
26 | * Approved as OSADL project by a majority of OSADL members and funded | ||
27 | * by OSADL membership fees in 2009; for details see www.osadl.org. | ||
28 | * | ||
29 | * This program is free software; you can redistribute it and/or modify it | ||
30 | * under the terms of the GNU General Public License as published by the Free | ||
31 | * Software Foundation; either version 2 of the License, or (at your option) | ||
32 | * any later version. | ||
33 | * | ||
34 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
35 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
36 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
37 | * more details. | ||
38 | * | ||
39 | * The full GNU General Public License is included in this distribution in the | ||
40 | * file called COPYING. | ||
41 | */ | ||
42 | |||
43 | /* Linux Foundation elects GPLv2 license only. */ | ||
44 | |||
45 | #include <linux/dmaengine.h> | ||
46 | #include <linux/dma-mapping.h> | ||
47 | #include <linux/list.h> | ||
48 | #include <linux/module.h> | ||
49 | #include <linux/platform_device.h> | ||
50 | #include <linux/slab.h> | ||
51 | #include <linux/spinlock.h> | ||
52 | #include <linux/of_dma.h> | ||
53 | #include <linux/property.h> | ||
54 | #include <linux/delay.h> | ||
55 | #include <linux/acpi.h> | ||
56 | #include <linux/irq.h> | ||
57 | #include <linux/atomic.h> | ||
58 | #include <linux/pm_runtime.h> | ||
59 | |||
60 | #include "../dmaengine.h" | ||
61 | #include "hidma.h" | ||
62 | |||
63 | /* | ||
64 | * Default idle time is 2 seconds. This parameter can | ||
65 | * be overridden by changing the following | ||
66 | * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms | ||
67 | * during kernel boot. | ||
68 | */ | ||
69 | #define HIDMA_AUTOSUSPEND_TIMEOUT 2000 | ||
70 | #define HIDMA_ERR_INFO_SW 0xFF | ||
71 | #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0 | ||
72 | #define HIDMA_NR_DEFAULT_DESC 10 | ||
73 | |||
74 | static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev) | ||
75 | { | ||
76 | return container_of(dmadev, struct hidma_dev, ddev); | ||
77 | } | ||
78 | |||
79 | static inline | ||
80 | struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp) | ||
81 | { | ||
82 | return container_of(_lldevp, struct hidma_dev, lldev); | ||
83 | } | ||
84 | |||
85 | static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach) | ||
86 | { | ||
87 | return container_of(dmach, struct hidma_chan, chan); | ||
88 | } | ||
89 | |||
90 | static inline | ||
91 | struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t) | ||
92 | { | ||
93 | return container_of(t, struct hidma_desc, desc); | ||
94 | } | ||
95 | |||
96 | static void hidma_free(struct hidma_dev *dmadev) | ||
97 | { | ||
98 | INIT_LIST_HEAD(&dmadev->ddev.channels); | ||
99 | } | ||
100 | |||
101 | static unsigned int nr_desc_prm; | ||
102 | module_param(nr_desc_prm, uint, 0644); | ||
103 | MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)"); | ||
104 | |||
105 | |||
106 | /* process completed descriptors */ | ||
107 | static void hidma_process_completed(struct hidma_chan *mchan) | ||
108 | { | ||
109 | struct dma_device *ddev = mchan->chan.device; | ||
110 | struct hidma_dev *mdma = to_hidma_dev(ddev); | ||
111 | struct dma_async_tx_descriptor *desc; | ||
112 | dma_cookie_t last_cookie; | ||
113 | struct hidma_desc *mdesc; | ||
114 | unsigned long irqflags; | ||
115 | struct list_head list; | ||
116 | |||
117 | INIT_LIST_HEAD(&list); | ||
118 | |||
119 | /* Get all completed descriptors */ | ||
120 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
121 | list_splice_tail_init(&mchan->completed, &list); | ||
122 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
123 | |||
124 | /* Execute callbacks and run dependencies */ | ||
125 | list_for_each_entry(mdesc, &list, node) { | ||
126 | enum dma_status llstat; | ||
127 | |||
128 | desc = &mdesc->desc; | ||
129 | |||
130 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
131 | dma_cookie_complete(desc); | ||
132 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
133 | |||
134 | llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch); | ||
135 | if (desc->callback && (llstat == DMA_COMPLETE)) | ||
136 | desc->callback(desc->callback_param); | ||
137 | |||
138 | last_cookie = desc->cookie; | ||
139 | dma_run_dependencies(desc); | ||
140 | } | ||
141 | |||
142 | /* Free descriptors */ | ||
143 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
144 | list_splice_tail_init(&list, &mchan->free); | ||
145 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
146 | |||
147 | } | ||
148 | |||
149 | /* | ||
150 | * Called once for each submitted descriptor. | ||
151 | * PM is locked once for each descriptor that is currently | ||
152 | * in execution. | ||
153 | */ | ||
154 | static void hidma_callback(void *data) | ||
155 | { | ||
156 | struct hidma_desc *mdesc = data; | ||
157 | struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan); | ||
158 | struct dma_device *ddev = mchan->chan.device; | ||
159 | struct hidma_dev *dmadev = to_hidma_dev(ddev); | ||
160 | unsigned long irqflags; | ||
161 | bool queued = false; | ||
162 | |||
163 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
164 | if (mdesc->node.next) { | ||
165 | /* Delete from the active list, add to completed list */ | ||
166 | list_move_tail(&mdesc->node, &mchan->completed); | ||
167 | queued = true; | ||
168 | |||
169 | /* calculate the next running descriptor */ | ||
170 | mchan->running = list_first_entry(&mchan->active, | ||
171 | struct hidma_desc, node); | ||
172 | } | ||
173 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
174 | |||
175 | hidma_process_completed(mchan); | ||
176 | |||
177 | if (queued) { | ||
178 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | ||
179 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | ||
180 | } | ||
181 | } | ||
182 | |||
183 | static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig) | ||
184 | { | ||
185 | struct hidma_chan *mchan; | ||
186 | struct dma_device *ddev; | ||
187 | |||
188 | mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL); | ||
189 | if (!mchan) | ||
190 | return -ENOMEM; | ||
191 | |||
192 | ddev = &dmadev->ddev; | ||
193 | mchan->dma_sig = dma_sig; | ||
194 | mchan->dmadev = dmadev; | ||
195 | mchan->chan.device = ddev; | ||
196 | dma_cookie_init(&mchan->chan); | ||
197 | |||
198 | INIT_LIST_HEAD(&mchan->free); | ||
199 | INIT_LIST_HEAD(&mchan->prepared); | ||
200 | INIT_LIST_HEAD(&mchan->active); | ||
201 | INIT_LIST_HEAD(&mchan->completed); | ||
202 | |||
203 | spin_lock_init(&mchan->lock); | ||
204 | list_add_tail(&mchan->chan.device_node, &ddev->channels); | ||
205 | dmadev->ddev.chancnt++; | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static void hidma_issue_task(unsigned long arg) | ||
210 | { | ||
211 | struct hidma_dev *dmadev = (struct hidma_dev *)arg; | ||
212 | |||
213 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
214 | hidma_ll_start(dmadev->lldev); | ||
215 | } | ||
216 | |||
217 | static void hidma_issue_pending(struct dma_chan *dmach) | ||
218 | { | ||
219 | struct hidma_chan *mchan = to_hidma_chan(dmach); | ||
220 | struct hidma_dev *dmadev = mchan->dmadev; | ||
221 | unsigned long flags; | ||
222 | int status; | ||
223 | |||
224 | spin_lock_irqsave(&mchan->lock, flags); | ||
225 | if (!mchan->running) { | ||
226 | struct hidma_desc *desc = list_first_entry(&mchan->active, | ||
227 | struct hidma_desc, | ||
228 | node); | ||
229 | mchan->running = desc; | ||
230 | } | ||
231 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
232 | |||
233 | /* PM will be released in hidma_callback function. */ | ||
234 | status = pm_runtime_get(dmadev->ddev.dev); | ||
235 | if (status < 0) | ||
236 | tasklet_schedule(&dmadev->task); | ||
237 | else | ||
238 | hidma_ll_start(dmadev->lldev); | ||
239 | } | ||
240 | |||
241 | static enum dma_status hidma_tx_status(struct dma_chan *dmach, | ||
242 | dma_cookie_t cookie, | ||
243 | struct dma_tx_state *txstate) | ||
244 | { | ||
245 | struct hidma_chan *mchan = to_hidma_chan(dmach); | ||
246 | enum dma_status ret; | ||
247 | |||
248 | ret = dma_cookie_status(dmach, cookie, txstate); | ||
249 | if (ret == DMA_COMPLETE) | ||
250 | return ret; | ||
251 | |||
252 | if (mchan->paused && (ret == DMA_IN_PROGRESS)) { | ||
253 | unsigned long flags; | ||
254 | dma_cookie_t runcookie; | ||
255 | |||
256 | spin_lock_irqsave(&mchan->lock, flags); | ||
257 | if (mchan->running) | ||
258 | runcookie = mchan->running->desc.cookie; | ||
259 | else | ||
260 | runcookie = -EINVAL; | ||
261 | |||
262 | if (runcookie == cookie) | ||
263 | ret = DMA_PAUSED; | ||
264 | |||
265 | spin_unlock_irqrestore(&mchan->lock, flags); | ||
266 | } | ||
267 | |||
268 | return ret; | ||
269 | } | ||
270 | |||
271 | /* | ||
272 | * Submit descriptor to hardware. | ||
273 | * Lock the PM for each descriptor we are sending. | ||
274 | */ | ||
275 | static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd) | ||
276 | { | ||
277 | struct hidma_chan *mchan = to_hidma_chan(txd->chan); | ||
278 | struct hidma_dev *dmadev = mchan->dmadev; | ||
279 | struct hidma_desc *mdesc; | ||
280 | unsigned long irqflags; | ||
281 | dma_cookie_t cookie; | ||
282 | |||
283 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
284 | if (!hidma_ll_isenabled(dmadev->lldev)) { | ||
285 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | ||
286 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | ||
287 | return -ENODEV; | ||
288 | } | ||
289 | |||
290 | mdesc = container_of(txd, struct hidma_desc, desc); | ||
291 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
292 | |||
293 | /* Move descriptor to active */ | ||
294 | list_move_tail(&mdesc->node, &mchan->active); | ||
295 | |||
296 | /* Update cookie */ | ||
297 | cookie = dma_cookie_assign(txd); | ||
298 | |||
299 | hidma_ll_queue_request(dmadev->lldev, mdesc->tre_ch); | ||
300 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
301 | |||
302 | return cookie; | ||
303 | } | ||
304 | |||
305 | static int hidma_alloc_chan_resources(struct dma_chan *dmach) | ||
306 | { | ||
307 | struct hidma_chan *mchan = to_hidma_chan(dmach); | ||
308 | struct hidma_dev *dmadev = mchan->dmadev; | ||
309 | struct hidma_desc *mdesc, *tmp; | ||
310 | unsigned long irqflags; | ||
311 | LIST_HEAD(descs); | ||
312 | unsigned int i; | ||
313 | int rc = 0; | ||
314 | |||
315 | if (mchan->allocated) | ||
316 | return 0; | ||
317 | |||
318 | /* Alloc descriptors for this channel */ | ||
319 | for (i = 0; i < dmadev->nr_descriptors; i++) { | ||
320 | mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT); | ||
321 | if (!mdesc) { | ||
322 | rc = -ENOMEM; | ||
323 | break; | ||
324 | } | ||
325 | dma_async_tx_descriptor_init(&mdesc->desc, dmach); | ||
326 | mdesc->desc.tx_submit = hidma_tx_submit; | ||
327 | |||
328 | rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig, | ||
329 | "DMA engine", hidma_callback, mdesc, | ||
330 | &mdesc->tre_ch); | ||
331 | if (rc) { | ||
332 | dev_err(dmach->device->dev, | ||
333 | "channel alloc failed at %u\n", i); | ||
334 | kfree(mdesc); | ||
335 | break; | ||
336 | } | ||
337 | list_add_tail(&mdesc->node, &descs); | ||
338 | } | ||
339 | |||
340 | if (rc) { | ||
341 | /* return the allocated descriptors */ | ||
342 | list_for_each_entry_safe(mdesc, tmp, &descs, node) { | ||
343 | hidma_ll_free(dmadev->lldev, mdesc->tre_ch); | ||
344 | kfree(mdesc); | ||
345 | } | ||
346 | return rc; | ||
347 | } | ||
348 | |||
349 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
350 | list_splice_tail_init(&descs, &mchan->free); | ||
351 | mchan->allocated = true; | ||
352 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
353 | return 1; | ||
354 | } | ||
355 | |||
356 | static struct dma_async_tx_descriptor * | ||
357 | hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src, | ||
358 | size_t len, unsigned long flags) | ||
359 | { | ||
360 | struct hidma_chan *mchan = to_hidma_chan(dmach); | ||
361 | struct hidma_desc *mdesc = NULL; | ||
362 | struct hidma_dev *mdma = mchan->dmadev; | ||
363 | unsigned long irqflags; | ||
364 | |||
365 | /* Get free descriptor */ | ||
366 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
367 | if (!list_empty(&mchan->free)) { | ||
368 | mdesc = list_first_entry(&mchan->free, struct hidma_desc, node); | ||
369 | list_del(&mdesc->node); | ||
370 | } | ||
371 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
372 | |||
373 | if (!mdesc) | ||
374 | return NULL; | ||
375 | |||
376 | hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, | ||
377 | src, dest, len, flags); | ||
378 | |||
379 | /* Place descriptor in prepared list */ | ||
380 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
381 | list_add_tail(&mdesc->node, &mchan->prepared); | ||
382 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
383 | |||
384 | return &mdesc->desc; | ||
385 | } | ||
386 | |||
387 | static int hidma_terminate_channel(struct dma_chan *chan) | ||
388 | { | ||
389 | struct hidma_chan *mchan = to_hidma_chan(chan); | ||
390 | struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device); | ||
391 | struct hidma_desc *tmp, *mdesc; | ||
392 | unsigned long irqflags; | ||
393 | LIST_HEAD(list); | ||
394 | int rc; | ||
395 | |||
396 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
397 | /* give completed requests a chance to finish */ | ||
398 | hidma_process_completed(mchan); | ||
399 | |||
400 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
401 | list_splice_init(&mchan->active, &list); | ||
402 | list_splice_init(&mchan->prepared, &list); | ||
403 | list_splice_init(&mchan->completed, &list); | ||
404 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
405 | |||
406 | /* this suspends the existing transfer */ | ||
407 | rc = hidma_ll_pause(dmadev->lldev); | ||
408 | if (rc) { | ||
409 | dev_err(dmadev->ddev.dev, "channel did not pause\n"); | ||
410 | goto out; | ||
411 | } | ||
412 | |||
413 | /* return all user requests */ | ||
414 | list_for_each_entry_safe(mdesc, tmp, &list, node) { | ||
415 | struct dma_async_tx_descriptor *txd = &mdesc->desc; | ||
416 | dma_async_tx_callback callback = mdesc->desc.callback; | ||
417 | void *param = mdesc->desc.callback_param; | ||
418 | |||
419 | dma_descriptor_unmap(txd); | ||
420 | |||
421 | if (callback) | ||
422 | callback(param); | ||
423 | |||
424 | dma_run_dependencies(txd); | ||
425 | |||
426 | /* move myself to free_list */ | ||
427 | list_move(&mdesc->node, &mchan->free); | ||
428 | } | ||
429 | |||
430 | rc = hidma_ll_resume(dmadev->lldev); | ||
431 | out: | ||
432 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | ||
433 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | ||
434 | return rc; | ||
435 | } | ||
436 | |||
437 | static int hidma_terminate_all(struct dma_chan *chan) | ||
438 | { | ||
439 | struct hidma_chan *mchan = to_hidma_chan(chan); | ||
440 | struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device); | ||
441 | int rc; | ||
442 | |||
443 | rc = hidma_terminate_channel(chan); | ||
444 | if (rc) | ||
445 | return rc; | ||
446 | |||
447 | /* reinitialize the hardware */ | ||
448 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
449 | rc = hidma_ll_setup(dmadev->lldev); | ||
450 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | ||
451 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | ||
452 | return rc; | ||
453 | } | ||
454 | |||
455 | static void hidma_free_chan_resources(struct dma_chan *dmach) | ||
456 | { | ||
457 | struct hidma_chan *mchan = to_hidma_chan(dmach); | ||
458 | struct hidma_dev *mdma = mchan->dmadev; | ||
459 | struct hidma_desc *mdesc, *tmp; | ||
460 | unsigned long irqflags; | ||
461 | LIST_HEAD(descs); | ||
462 | |||
463 | /* terminate running transactions and free descriptors */ | ||
464 | hidma_terminate_channel(dmach); | ||
465 | |||
466 | spin_lock_irqsave(&mchan->lock, irqflags); | ||
467 | |||
468 | /* Move data */ | ||
469 | list_splice_tail_init(&mchan->free, &descs); | ||
470 | |||
471 | /* Free descriptors */ | ||
472 | list_for_each_entry_safe(mdesc, tmp, &descs, node) { | ||
473 | hidma_ll_free(mdma->lldev, mdesc->tre_ch); | ||
474 | list_del(&mdesc->node); | ||
475 | kfree(mdesc); | ||
476 | } | ||
477 | |||
478 | mchan->allocated = 0; | ||
479 | spin_unlock_irqrestore(&mchan->lock, irqflags); | ||
480 | } | ||
481 | |||
482 | static int hidma_pause(struct dma_chan *chan) | ||
483 | { | ||
484 | struct hidma_chan *mchan; | ||
485 | struct hidma_dev *dmadev; | ||
486 | |||
487 | mchan = to_hidma_chan(chan); | ||
488 | dmadev = to_hidma_dev(mchan->chan.device); | ||
489 | if (!mchan->paused) { | ||
490 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
491 | if (hidma_ll_pause(dmadev->lldev)) | ||
492 | dev_warn(dmadev->ddev.dev, "channel did not stop\n"); | ||
493 | mchan->paused = true; | ||
494 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | ||
495 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | ||
496 | } | ||
497 | return 0; | ||
498 | } | ||
499 | |||
500 | static int hidma_resume(struct dma_chan *chan) | ||
501 | { | ||
502 | struct hidma_chan *mchan; | ||
503 | struct hidma_dev *dmadev; | ||
504 | int rc = 0; | ||
505 | |||
506 | mchan = to_hidma_chan(chan); | ||
507 | dmadev = to_hidma_dev(mchan->chan.device); | ||
508 | if (mchan->paused) { | ||
509 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
510 | rc = hidma_ll_resume(dmadev->lldev); | ||
511 | if (!rc) | ||
512 | mchan->paused = false; | ||
513 | else | ||
514 | dev_err(dmadev->ddev.dev, | ||
515 | "failed to resume the channel"); | ||
516 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | ||
517 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | ||
518 | } | ||
519 | return rc; | ||
520 | } | ||
521 | |||
522 | static irqreturn_t hidma_chirq_handler(int chirq, void *arg) | ||
523 | { | ||
524 | struct hidma_lldev *lldev = arg; | ||
525 | |||
526 | /* | ||
527 | * All interrupts are request driven. | ||
528 | * HW doesn't send an interrupt by itself. | ||
529 | */ | ||
530 | return hidma_ll_inthandler(chirq, lldev); | ||
531 | } | ||
532 | |||
533 | static int hidma_probe(struct platform_device *pdev) | ||
534 | { | ||
535 | struct hidma_dev *dmadev; | ||
536 | struct resource *trca_resource; | ||
537 | struct resource *evca_resource; | ||
538 | int chirq; | ||
539 | void __iomem *evca; | ||
540 | void __iomem *trca; | ||
541 | int rc; | ||
542 | |||
543 | pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT); | ||
544 | pm_runtime_use_autosuspend(&pdev->dev); | ||
545 | pm_runtime_set_active(&pdev->dev); | ||
546 | pm_runtime_enable(&pdev->dev); | ||
547 | |||
548 | trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
549 | trca = devm_ioremap_resource(&pdev->dev, trca_resource); | ||
550 | if (IS_ERR(trca)) { | ||
551 | rc = -ENOMEM; | ||
552 | goto bailout; | ||
553 | } | ||
554 | |||
555 | evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
556 | evca = devm_ioremap_resource(&pdev->dev, evca_resource); | ||
557 | if (IS_ERR(evca)) { | ||
558 | rc = -ENOMEM; | ||
559 | goto bailout; | ||
560 | } | ||
561 | |||
562 | /* | ||
563 | * This driver only handles the channel IRQs. | ||
564 | * Common IRQ is handled by the management driver. | ||
565 | */ | ||
566 | chirq = platform_get_irq(pdev, 0); | ||
567 | if (chirq < 0) { | ||
568 | rc = -ENODEV; | ||
569 | goto bailout; | ||
570 | } | ||
571 | |||
572 | dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL); | ||
573 | if (!dmadev) { | ||
574 | rc = -ENOMEM; | ||
575 | goto bailout; | ||
576 | } | ||
577 | |||
578 | INIT_LIST_HEAD(&dmadev->ddev.channels); | ||
579 | spin_lock_init(&dmadev->lock); | ||
580 | dmadev->ddev.dev = &pdev->dev; | ||
581 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
582 | |||
583 | dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask); | ||
584 | if (WARN_ON(!pdev->dev.dma_mask)) { | ||
585 | rc = -ENXIO; | ||
586 | goto dmafree; | ||
587 | } | ||
588 | |||
589 | dmadev->dev_evca = evca; | ||
590 | dmadev->evca_resource = evca_resource; | ||
591 | dmadev->dev_trca = trca; | ||
592 | dmadev->trca_resource = trca_resource; | ||
593 | dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy; | ||
594 | dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources; | ||
595 | dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources; | ||
596 | dmadev->ddev.device_tx_status = hidma_tx_status; | ||
597 | dmadev->ddev.device_issue_pending = hidma_issue_pending; | ||
598 | dmadev->ddev.device_pause = hidma_pause; | ||
599 | dmadev->ddev.device_resume = hidma_resume; | ||
600 | dmadev->ddev.device_terminate_all = hidma_terminate_all; | ||
601 | dmadev->ddev.copy_align = 8; | ||
602 | |||
603 | device_property_read_u32(&pdev->dev, "desc-count", | ||
604 | &dmadev->nr_descriptors); | ||
605 | |||
606 | if (!dmadev->nr_descriptors && nr_desc_prm) | ||
607 | dmadev->nr_descriptors = nr_desc_prm; | ||
608 | |||
609 | if (!dmadev->nr_descriptors) | ||
610 | dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC; | ||
611 | |||
612 | dmadev->chidx = readl(dmadev->dev_trca + 0x28); | ||
613 | |||
614 | /* Set DMA mask to 64 bits. */ | ||
615 | rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); | ||
616 | if (rc) { | ||
617 | dev_warn(&pdev->dev, "unable to set coherent mask to 64"); | ||
618 | rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); | ||
619 | if (rc) | ||
620 | goto dmafree; | ||
621 | } | ||
622 | |||
623 | dmadev->lldev = hidma_ll_init(dmadev->ddev.dev, | ||
624 | dmadev->nr_descriptors, dmadev->dev_trca, | ||
625 | dmadev->dev_evca, dmadev->chidx); | ||
626 | if (!dmadev->lldev) { | ||
627 | rc = -EPROBE_DEFER; | ||
628 | goto dmafree; | ||
629 | } | ||
630 | |||
631 | rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, 0, | ||
632 | "qcom-hidma", dmadev->lldev); | ||
633 | if (rc) | ||
634 | goto uninit; | ||
635 | |||
636 | INIT_LIST_HEAD(&dmadev->ddev.channels); | ||
637 | rc = hidma_chan_init(dmadev, 0); | ||
638 | if (rc) | ||
639 | goto uninit; | ||
640 | |||
641 | rc = dma_async_device_register(&dmadev->ddev); | ||
642 | if (rc) | ||
643 | goto uninit; | ||
644 | |||
645 | dmadev->irq = chirq; | ||
646 | tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev); | ||
647 | dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n"); | ||
648 | platform_set_drvdata(pdev, dmadev); | ||
649 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | ||
650 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | ||
651 | return 0; | ||
652 | |||
653 | uninit: | ||
654 | hidma_ll_uninit(dmadev->lldev); | ||
655 | dmafree: | ||
656 | if (dmadev) | ||
657 | hidma_free(dmadev); | ||
658 | bailout: | ||
659 | pm_runtime_put_sync(&pdev->dev); | ||
660 | pm_runtime_disable(&pdev->dev); | ||
661 | return rc; | ||
662 | } | ||
663 | |||
664 | static int hidma_remove(struct platform_device *pdev) | ||
665 | { | ||
666 | struct hidma_dev *dmadev = platform_get_drvdata(pdev); | ||
667 | |||
668 | pm_runtime_get_sync(dmadev->ddev.dev); | ||
669 | dma_async_device_unregister(&dmadev->ddev); | ||
670 | devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev); | ||
671 | hidma_ll_uninit(dmadev->lldev); | ||
672 | hidma_free(dmadev); | ||
673 | |||
674 | dev_info(&pdev->dev, "HI-DMA engine removed\n"); | ||
675 | pm_runtime_put_sync_suspend(&pdev->dev); | ||
676 | pm_runtime_disable(&pdev->dev); | ||
677 | |||
678 | return 0; | ||
679 | } | ||
680 | |||
681 | #if IS_ENABLED(CONFIG_ACPI) | ||
682 | static const struct acpi_device_id hidma_acpi_ids[] = { | ||
683 | {"QCOM8061"}, | ||
684 | {}, | ||
685 | }; | ||
686 | #endif | ||
687 | |||
688 | static const struct of_device_id hidma_match[] = { | ||
689 | {.compatible = "qcom,hidma-1.0",}, | ||
690 | {}, | ||
691 | }; | ||
692 | |||
693 | MODULE_DEVICE_TABLE(of, hidma_match); | ||
694 | |||
695 | static struct platform_driver hidma_driver = { | ||
696 | .probe = hidma_probe, | ||
697 | .remove = hidma_remove, | ||
698 | .driver = { | ||
699 | .name = "hidma", | ||
700 | .of_match_table = hidma_match, | ||
701 | .acpi_match_table = ACPI_PTR(hidma_acpi_ids), | ||
702 | }, | ||
703 | }; | ||
704 | |||
705 | module_platform_driver(hidma_driver); | ||
706 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h new file mode 100644 index 000000000000..231e306f6d87 --- /dev/null +++ b/drivers/dma/qcom/hidma.h | |||
@@ -0,0 +1,160 @@ | |||
1 | /* | ||
2 | * Qualcomm Technologies HIDMA data structures | ||
3 | * | ||
4 | * Copyright (c) 2014, The Linux Foundation. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 and | ||
8 | * only version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | #ifndef QCOM_HIDMA_H | ||
17 | #define QCOM_HIDMA_H | ||
18 | |||
19 | #include <linux/kfifo.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/dmaengine.h> | ||
22 | |||
23 | #define TRE_SIZE 32 /* each TRE is 32 bytes */ | ||
24 | #define TRE_CFG_IDX 0 | ||
25 | #define TRE_LEN_IDX 1 | ||
26 | #define TRE_SRC_LOW_IDX 2 | ||
27 | #define TRE_SRC_HI_IDX 3 | ||
28 | #define TRE_DEST_LOW_IDX 4 | ||
29 | #define TRE_DEST_HI_IDX 5 | ||
30 | |||
31 | struct hidma_tx_status { | ||
32 | u8 err_info; /* error record in this transfer */ | ||
33 | u8 err_code; /* completion code */ | ||
34 | }; | ||
35 | |||
36 | struct hidma_tre { | ||
37 | atomic_t allocated; /* if this channel is allocated */ | ||
38 | bool queued; /* flag whether this is pending */ | ||
39 | u16 status; /* status */ | ||
40 | u32 chidx; /* index of the tre */ | ||
41 | u32 dma_sig; /* signature of the tre */ | ||
42 | const char *dev_name; /* name of the device */ | ||
43 | void (*callback)(void *data); /* requester callback */ | ||
44 | void *data; /* Data associated with this channel*/ | ||
45 | struct hidma_lldev *lldev; /* lldma device pointer */ | ||
46 | u32 tre_local[TRE_SIZE / sizeof(u32) + 1]; /* TRE local copy */ | ||
47 | u32 tre_index; /* the offset where this was written*/ | ||
48 | u32 int_flags; /* interrupt flags */ | ||
49 | }; | ||
50 | |||
51 | struct hidma_lldev { | ||
52 | bool initialized; /* initialized flag */ | ||
53 | u8 trch_state; /* trch_state of the device */ | ||
54 | u8 evch_state; /* evch_state of the device */ | ||
55 | u8 chidx; /* channel index in the core */ | ||
56 | u32 nr_tres; /* max number of configs */ | ||
57 | spinlock_t lock; /* reentrancy */ | ||
58 | struct hidma_tre *trepool; /* trepool of user configs */ | ||
59 | struct device *dev; /* device */ | ||
60 | void __iomem *trca; /* Transfer Channel address */ | ||
61 | void __iomem *evca; /* Event Channel address */ | ||
62 | struct hidma_tre | ||
63 | **pending_tre_list; /* Pointers to pending TREs */ | ||
64 | struct hidma_tx_status | ||
65 | *tx_status_list; /* Pointers to pending TREs status*/ | ||
66 | s32 pending_tre_count; /* Number of TREs pending */ | ||
67 | |||
68 | void *tre_ring; /* TRE ring */ | ||
69 | dma_addr_t tre_ring_handle; /* TRE ring to be shared with HW */ | ||
70 | u32 tre_ring_size; /* Byte size of the ring */ | ||
71 | u32 tre_processed_off; /* last processed TRE */ | ||
72 | |||
73 | void *evre_ring; /* EVRE ring */ | ||
74 | dma_addr_t evre_ring_handle; /* EVRE ring to be shared with HW */ | ||
75 | u32 evre_ring_size; /* Byte size of the ring */ | ||
76 | u32 evre_processed_off; /* last processed EVRE */ | ||
77 | |||
78 | u32 tre_write_offset; /* TRE write location */ | ||
79 | struct tasklet_struct task; /* task delivering notifications */ | ||
80 | DECLARE_KFIFO_PTR(handoff_fifo, | ||
81 | struct hidma_tre *); /* pending TREs FIFO */ | ||
82 | }; | ||
83 | |||
84 | struct hidma_desc { | ||
85 | struct dma_async_tx_descriptor desc; | ||
86 | /* link list node for this channel*/ | ||
87 | struct list_head node; | ||
88 | u32 tre_ch; | ||
89 | }; | ||
90 | |||
91 | struct hidma_chan { | ||
92 | bool paused; | ||
93 | bool allocated; | ||
94 | char dbg_name[16]; | ||
95 | u32 dma_sig; | ||
96 | |||
97 | /* | ||
98 | * active descriptor on this channel | ||
99 | * It is used by the DMA complete notification to | ||
100 | * locate the descriptor that initiated the transfer. | ||
101 | */ | ||
102 | struct dentry *debugfs; | ||
103 | struct dentry *stats; | ||
104 | struct hidma_dev *dmadev; | ||
105 | struct hidma_desc *running; | ||
106 | |||
107 | struct dma_chan chan; | ||
108 | struct list_head free; | ||
109 | struct list_head prepared; | ||
110 | struct list_head active; | ||
111 | struct list_head completed; | ||
112 | |||
113 | /* Lock for this structure */ | ||
114 | spinlock_t lock; | ||
115 | }; | ||
116 | |||
117 | struct hidma_dev { | ||
118 | int irq; | ||
119 | int chidx; | ||
120 | u32 nr_descriptors; | ||
121 | |||
122 | struct hidma_lldev *lldev; | ||
123 | void __iomem *dev_trca; | ||
124 | struct resource *trca_resource; | ||
125 | void __iomem *dev_evca; | ||
126 | struct resource *evca_resource; | ||
127 | |||
128 | /* used to protect the pending channel list*/ | ||
129 | spinlock_t lock; | ||
130 | struct dma_device ddev; | ||
131 | |||
132 | struct dentry *debugfs; | ||
133 | struct dentry *stats; | ||
134 | |||
135 | /* Task delivering issue_pending */ | ||
136 | struct tasklet_struct task; | ||
137 | }; | ||
138 | |||
139 | int hidma_ll_request(struct hidma_lldev *llhndl, u32 dev_id, | ||
140 | const char *dev_name, | ||
141 | void (*callback)(void *data), void *data, u32 *tre_ch); | ||
142 | |||
143 | void hidma_ll_free(struct hidma_lldev *llhndl, u32 tre_ch); | ||
144 | enum dma_status hidma_ll_status(struct hidma_lldev *llhndl, u32 tre_ch); | ||
145 | bool hidma_ll_isenabled(struct hidma_lldev *llhndl); | ||
146 | void hidma_ll_queue_request(struct hidma_lldev *llhndl, u32 tre_ch); | ||
147 | void hidma_ll_start(struct hidma_lldev *llhndl); | ||
148 | int hidma_ll_pause(struct hidma_lldev *llhndl); | ||
149 | int hidma_ll_resume(struct hidma_lldev *llhndl); | ||
150 | void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch, | ||
151 | dma_addr_t src, dma_addr_t dest, u32 len, u32 flags); | ||
152 | int hidma_ll_setup(struct hidma_lldev *lldev); | ||
153 | struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels, | ||
154 | void __iomem *trca, void __iomem *evca, | ||
155 | u8 chidx); | ||
156 | int hidma_ll_uninit(struct hidma_lldev *llhndl); | ||
157 | irqreturn_t hidma_ll_inthandler(int irq, void *arg); | ||
158 | void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info, | ||
159 | u8 err_code); | ||
160 | #endif | ||
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c new file mode 100644 index 000000000000..ef491b893f40 --- /dev/null +++ b/drivers/dma/qcom/hidma_mgmt.c | |||
@@ -0,0 +1,302 @@ | |||
1 | /* | ||
2 | * Qualcomm Technologies HIDMA DMA engine Management interface | ||
3 | * | ||
4 | * Copyright (c) 2015, The Linux Foundation. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 and | ||
8 | * only version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | #include <linux/dmaengine.h> | ||
17 | #include <linux/acpi.h> | ||
18 | #include <linux/of.h> | ||
19 | #include <linux/property.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/platform_device.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/uaccess.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/pm_runtime.h> | ||
26 | #include <linux/bitops.h> | ||
27 | |||
28 | #include "hidma_mgmt.h" | ||
29 | |||
30 | #define HIDMA_QOS_N_OFFSET 0x300 | ||
31 | #define HIDMA_CFG_OFFSET 0x400 | ||
32 | #define HIDMA_MAX_BUS_REQ_LEN_OFFSET 0x41C | ||
33 | #define HIDMA_MAX_XACTIONS_OFFSET 0x420 | ||
34 | #define HIDMA_HW_VERSION_OFFSET 0x424 | ||
35 | #define HIDMA_CHRESET_TIMEOUT_OFFSET 0x418 | ||
36 | |||
37 | #define HIDMA_MAX_WR_XACTIONS_MASK GENMASK(4, 0) | ||
38 | #define HIDMA_MAX_RD_XACTIONS_MASK GENMASK(4, 0) | ||
39 | #define HIDMA_WEIGHT_MASK GENMASK(6, 0) | ||
40 | #define HIDMA_MAX_BUS_REQ_LEN_MASK GENMASK(15, 0) | ||
41 | #define HIDMA_CHRESET_TIMEOUT_MASK GENMASK(19, 0) | ||
42 | |||
43 | #define HIDMA_MAX_WR_XACTIONS_BIT_POS 16 | ||
44 | #define HIDMA_MAX_BUS_WR_REQ_BIT_POS 16 | ||
45 | #define HIDMA_WRR_BIT_POS 8 | ||
46 | #define HIDMA_PRIORITY_BIT_POS 15 | ||
47 | |||
48 | #define HIDMA_AUTOSUSPEND_TIMEOUT 2000 | ||
49 | #define HIDMA_MAX_CHANNEL_WEIGHT 15 | ||
50 | |||
51 | int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev) | ||
52 | { | ||
53 | unsigned int i; | ||
54 | u32 val; | ||
55 | |||
56 | if (!is_power_of_2(mgmtdev->max_write_request) || | ||
57 | (mgmtdev->max_write_request < 128) || | ||
58 | (mgmtdev->max_write_request > 1024)) { | ||
59 | dev_err(&mgmtdev->pdev->dev, "invalid write request %d\n", | ||
60 | mgmtdev->max_write_request); | ||
61 | return -EINVAL; | ||
62 | } | ||
63 | |||
64 | if (!is_power_of_2(mgmtdev->max_read_request) || | ||
65 | (mgmtdev->max_read_request < 128) || | ||
66 | (mgmtdev->max_read_request > 1024)) { | ||
67 | dev_err(&mgmtdev->pdev->dev, "invalid read request %d\n", | ||
68 | mgmtdev->max_read_request); | ||
69 | return -EINVAL; | ||
70 | } | ||
71 | |||
72 | if (mgmtdev->max_wr_xactions > HIDMA_MAX_WR_XACTIONS_MASK) { | ||
73 | dev_err(&mgmtdev->pdev->dev, | ||
74 | "max_wr_xactions cannot be bigger than %ld\n", | ||
75 | HIDMA_MAX_WR_XACTIONS_MASK); | ||
76 | return -EINVAL; | ||
77 | } | ||
78 | |||
79 | if (mgmtdev->max_rd_xactions > HIDMA_MAX_RD_XACTIONS_MASK) { | ||
80 | dev_err(&mgmtdev->pdev->dev, | ||
81 | "max_rd_xactions cannot be bigger than %ld\n", | ||
82 | HIDMA_MAX_RD_XACTIONS_MASK); | ||
83 | return -EINVAL; | ||
84 | } | ||
85 | |||
86 | for (i = 0; i < mgmtdev->dma_channels; i++) { | ||
87 | if (mgmtdev->priority[i] > 1) { | ||
88 | dev_err(&mgmtdev->pdev->dev, | ||
89 | "priority can be 0 or 1\n"); | ||
90 | return -EINVAL; | ||
91 | } | ||
92 | |||
93 | if (mgmtdev->weight[i] > HIDMA_MAX_CHANNEL_WEIGHT) { | ||
94 | dev_err(&mgmtdev->pdev->dev, | ||
95 | "max value of weight can be %d.\n", | ||
96 | HIDMA_MAX_CHANNEL_WEIGHT); | ||
97 | return -EINVAL; | ||
98 | } | ||
99 | |||
100 | /* weight needs to be at least one */ | ||
101 | if (mgmtdev->weight[i] == 0) | ||
102 | mgmtdev->weight[i] = 1; | ||
103 | } | ||
104 | |||
105 | pm_runtime_get_sync(&mgmtdev->pdev->dev); | ||
106 | val = readl(mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET); | ||
107 | val &= ~(HIDMA_MAX_BUS_REQ_LEN_MASK << HIDMA_MAX_BUS_WR_REQ_BIT_POS); | ||
108 | val |= mgmtdev->max_write_request << HIDMA_MAX_BUS_WR_REQ_BIT_POS; | ||
109 | val &= ~HIDMA_MAX_BUS_REQ_LEN_MASK; | ||
110 | val |= mgmtdev->max_read_request; | ||
111 | writel(val, mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET); | ||
112 | |||
113 | val = readl(mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET); | ||
114 | val &= ~(HIDMA_MAX_WR_XACTIONS_MASK << HIDMA_MAX_WR_XACTIONS_BIT_POS); | ||
115 | val |= mgmtdev->max_wr_xactions << HIDMA_MAX_WR_XACTIONS_BIT_POS; | ||
116 | val &= ~HIDMA_MAX_RD_XACTIONS_MASK; | ||
117 | val |= mgmtdev->max_rd_xactions; | ||
118 | writel(val, mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET); | ||
119 | |||
120 | mgmtdev->hw_version = | ||
121 | readl(mgmtdev->virtaddr + HIDMA_HW_VERSION_OFFSET); | ||
122 | mgmtdev->hw_version_major = (mgmtdev->hw_version >> 28) & 0xF; | ||
123 | mgmtdev->hw_version_minor = (mgmtdev->hw_version >> 16) & 0xF; | ||
124 | |||
125 | for (i = 0; i < mgmtdev->dma_channels; i++) { | ||
126 | u32 weight = mgmtdev->weight[i]; | ||
127 | u32 priority = mgmtdev->priority[i]; | ||
128 | |||
129 | val = readl(mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i)); | ||
130 | val &= ~(1 << HIDMA_PRIORITY_BIT_POS); | ||
131 | val |= (priority & 0x1) << HIDMA_PRIORITY_BIT_POS; | ||
132 | val &= ~(HIDMA_WEIGHT_MASK << HIDMA_WRR_BIT_POS); | ||
133 | val |= (weight & HIDMA_WEIGHT_MASK) << HIDMA_WRR_BIT_POS; | ||
134 | writel(val, mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i)); | ||
135 | } | ||
136 | |||
137 | val = readl(mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET); | ||
138 | val &= ~HIDMA_CHRESET_TIMEOUT_MASK; | ||
139 | val |= mgmtdev->chreset_timeout_cycles & HIDMA_CHRESET_TIMEOUT_MASK; | ||
140 | writel(val, mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET); | ||
141 | |||
142 | pm_runtime_mark_last_busy(&mgmtdev->pdev->dev); | ||
143 | pm_runtime_put_autosuspend(&mgmtdev->pdev->dev); | ||
144 | return 0; | ||
145 | } | ||
146 | EXPORT_SYMBOL_GPL(hidma_mgmt_setup); | ||
147 | |||
148 | static int hidma_mgmt_probe(struct platform_device *pdev) | ||
149 | { | ||
150 | struct hidma_mgmt_dev *mgmtdev; | ||
151 | struct resource *res; | ||
152 | void __iomem *virtaddr; | ||
153 | int irq; | ||
154 | int rc; | ||
155 | u32 val; | ||
156 | |||
157 | pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT); | ||
158 | pm_runtime_use_autosuspend(&pdev->dev); | ||
159 | pm_runtime_set_active(&pdev->dev); | ||
160 | pm_runtime_enable(&pdev->dev); | ||
161 | pm_runtime_get_sync(&pdev->dev); | ||
162 | |||
163 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
164 | virtaddr = devm_ioremap_resource(&pdev->dev, res); | ||
165 | if (IS_ERR(virtaddr)) { | ||
166 | rc = -ENOMEM; | ||
167 | goto out; | ||
168 | } | ||
169 | |||
170 | irq = platform_get_irq(pdev, 0); | ||
171 | if (irq < 0) { | ||
172 | dev_err(&pdev->dev, "irq resources not found\n"); | ||
173 | rc = irq; | ||
174 | goto out; | ||
175 | } | ||
176 | |||
177 | mgmtdev = devm_kzalloc(&pdev->dev, sizeof(*mgmtdev), GFP_KERNEL); | ||
178 | if (!mgmtdev) { | ||
179 | rc = -ENOMEM; | ||
180 | goto out; | ||
181 | } | ||
182 | |||
183 | mgmtdev->pdev = pdev; | ||
184 | mgmtdev->addrsize = resource_size(res); | ||
185 | mgmtdev->virtaddr = virtaddr; | ||
186 | |||
187 | rc = device_property_read_u32(&pdev->dev, "dma-channels", | ||
188 | &mgmtdev->dma_channels); | ||
189 | if (rc) { | ||
190 | dev_err(&pdev->dev, "number of channels missing\n"); | ||
191 | goto out; | ||
192 | } | ||
193 | |||
194 | rc = device_property_read_u32(&pdev->dev, | ||
195 | "channel-reset-timeout-cycles", | ||
196 | &mgmtdev->chreset_timeout_cycles); | ||
197 | if (rc) { | ||
198 | dev_err(&pdev->dev, "channel reset timeout missing\n"); | ||
199 | goto out; | ||
200 | } | ||
201 | |||
202 | rc = device_property_read_u32(&pdev->dev, "max-write-burst-bytes", | ||
203 | &mgmtdev->max_write_request); | ||
204 | if (rc) { | ||
205 | dev_err(&pdev->dev, "max-write-burst-bytes missing\n"); | ||
206 | goto out; | ||
207 | } | ||
208 | |||
209 | rc = device_property_read_u32(&pdev->dev, "max-read-burst-bytes", | ||
210 | &mgmtdev->max_read_request); | ||
211 | if (rc) { | ||
212 | dev_err(&pdev->dev, "max-read-burst-bytes missing\n"); | ||
213 | goto out; | ||
214 | } | ||
215 | |||
216 | rc = device_property_read_u32(&pdev->dev, "max-write-transactions", | ||
217 | &mgmtdev->max_wr_xactions); | ||
218 | if (rc) { | ||
219 | dev_err(&pdev->dev, "max-write-transactions missing\n"); | ||
220 | goto out; | ||
221 | } | ||
222 | |||
223 | rc = device_property_read_u32(&pdev->dev, "max-read-transactions", | ||
224 | &mgmtdev->max_rd_xactions); | ||
225 | if (rc) { | ||
226 | dev_err(&pdev->dev, "max-read-transactions missing\n"); | ||
227 | goto out; | ||
228 | } | ||
229 | |||
230 | mgmtdev->priority = devm_kcalloc(&pdev->dev, | ||
231 | mgmtdev->dma_channels, | ||
232 | sizeof(*mgmtdev->priority), | ||
233 | GFP_KERNEL); | ||
234 | if (!mgmtdev->priority) { | ||
235 | rc = -ENOMEM; | ||
236 | goto out; | ||
237 | } | ||
238 | |||
239 | mgmtdev->weight = devm_kcalloc(&pdev->dev, | ||
240 | mgmtdev->dma_channels, | ||
241 | sizeof(*mgmtdev->weight), GFP_KERNEL); | ||
242 | if (!mgmtdev->weight) { | ||
243 | rc = -ENOMEM; | ||
244 | goto out; | ||
245 | } | ||
246 | |||
247 | rc = hidma_mgmt_setup(mgmtdev); | ||
248 | if (rc) { | ||
249 | dev_err(&pdev->dev, "setup failed\n"); | ||
250 | goto out; | ||
251 | } | ||
252 | |||
253 | /* start the HW */ | ||
254 | val = readl(mgmtdev->virtaddr + HIDMA_CFG_OFFSET); | ||
255 | val |= 1; | ||
256 | writel(val, mgmtdev->virtaddr + HIDMA_CFG_OFFSET); | ||
257 | |||
258 | rc = hidma_mgmt_init_sys(mgmtdev); | ||
259 | if (rc) { | ||
260 | dev_err(&pdev->dev, "sysfs setup failed\n"); | ||
261 | goto out; | ||
262 | } | ||
263 | |||
264 | dev_info(&pdev->dev, | ||
265 | "HW rev: %d.%d @ %pa with %d physical channels\n", | ||
266 | mgmtdev->hw_version_major, mgmtdev->hw_version_minor, | ||
267 | &res->start, mgmtdev->dma_channels); | ||
268 | |||
269 | platform_set_drvdata(pdev, mgmtdev); | ||
270 | pm_runtime_mark_last_busy(&pdev->dev); | ||
271 | pm_runtime_put_autosuspend(&pdev->dev); | ||
272 | return 0; | ||
273 | out: | ||
274 | pm_runtime_put_sync_suspend(&pdev->dev); | ||
275 | pm_runtime_disable(&pdev->dev); | ||
276 | return rc; | ||
277 | } | ||
278 | |||
279 | #if IS_ENABLED(CONFIG_ACPI) | ||
280 | static const struct acpi_device_id hidma_mgmt_acpi_ids[] = { | ||
281 | {"QCOM8060"}, | ||
282 | {}, | ||
283 | }; | ||
284 | #endif | ||
285 | |||
286 | static const struct of_device_id hidma_mgmt_match[] = { | ||
287 | {.compatible = "qcom,hidma-mgmt-1.0",}, | ||
288 | {}, | ||
289 | }; | ||
290 | MODULE_DEVICE_TABLE(of, hidma_mgmt_match); | ||
291 | |||
292 | static struct platform_driver hidma_mgmt_driver = { | ||
293 | .probe = hidma_mgmt_probe, | ||
294 | .driver = { | ||
295 | .name = "hidma-mgmt", | ||
296 | .of_match_table = hidma_mgmt_match, | ||
297 | .acpi_match_table = ACPI_PTR(hidma_mgmt_acpi_ids), | ||
298 | }, | ||
299 | }; | ||
300 | |||
301 | module_platform_driver(hidma_mgmt_driver); | ||
302 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/qcom/hidma_mgmt.h b/drivers/dma/qcom/hidma_mgmt.h new file mode 100644 index 000000000000..f7daf33769f4 --- /dev/null +++ b/drivers/dma/qcom/hidma_mgmt.h | |||
@@ -0,0 +1,39 @@ | |||
1 | /* | ||
2 | * Qualcomm Technologies HIDMA Management common header | ||
3 | * | ||
4 | * Copyright (c) 2015, The Linux Foundation. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 and | ||
8 | * only version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | struct hidma_mgmt_dev { | ||
17 | u8 hw_version_major; | ||
18 | u8 hw_version_minor; | ||
19 | |||
20 | u32 max_wr_xactions; | ||
21 | u32 max_rd_xactions; | ||
22 | u32 max_write_request; | ||
23 | u32 max_read_request; | ||
24 | u32 dma_channels; | ||
25 | u32 chreset_timeout_cycles; | ||
26 | u32 hw_version; | ||
27 | u32 *priority; | ||
28 | u32 *weight; | ||
29 | |||
30 | /* Hardware device constants */ | ||
31 | void __iomem *virtaddr; | ||
32 | resource_size_t addrsize; | ||
33 | |||
34 | struct kobject **chroots; | ||
35 | struct platform_device *pdev; | ||
36 | }; | ||
37 | |||
38 | int hidma_mgmt_init_sys(struct hidma_mgmt_dev *dev); | ||
39 | int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev); | ||
diff --git a/drivers/dma/qcom/hidma_mgmt_sys.c b/drivers/dma/qcom/hidma_mgmt_sys.c new file mode 100644 index 000000000000..d61f1068a34b --- /dev/null +++ b/drivers/dma/qcom/hidma_mgmt_sys.c | |||
@@ -0,0 +1,295 @@ | |||
1 | /* | ||
2 | * Qualcomm Technologies HIDMA Management SYS interface | ||
3 | * | ||
4 | * Copyright (c) 2015, The Linux Foundation. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 and | ||
8 | * only version 2 as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | #include <linux/sysfs.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | |||
19 | #include "hidma_mgmt.h" | ||
20 | |||
21 | struct hidma_chan_attr { | ||
22 | struct hidma_mgmt_dev *mdev; | ||
23 | int index; | ||
24 | struct kobj_attribute attr; | ||
25 | }; | ||
26 | |||
27 | struct hidma_mgmt_fileinfo { | ||
28 | char *name; | ||
29 | int mode; | ||
30 | int (*get)(struct hidma_mgmt_dev *mdev); | ||
31 | int (*set)(struct hidma_mgmt_dev *mdev, u64 val); | ||
32 | }; | ||
33 | |||
34 | #define IMPLEMENT_GETSET(name) \ | ||
35 | static int get_##name(struct hidma_mgmt_dev *mdev) \ | ||
36 | { \ | ||
37 | return mdev->name; \ | ||
38 | } \ | ||
39 | static int set_##name(struct hidma_mgmt_dev *mdev, u64 val) \ | ||
40 | { \ | ||
41 | u64 tmp; \ | ||
42 | int rc; \ | ||
43 | \ | ||
44 | tmp = mdev->name; \ | ||
45 | mdev->name = val; \ | ||
46 | rc = hidma_mgmt_setup(mdev); \ | ||
47 | if (rc) \ | ||
48 | mdev->name = tmp; \ | ||
49 | return rc; \ | ||
50 | } | ||
51 | |||
52 | #define DECLARE_ATTRIBUTE(name, mode) \ | ||
53 | {#name, mode, get_##name, set_##name} | ||
54 | |||
55 | IMPLEMENT_GETSET(hw_version_major) | ||
56 | IMPLEMENT_GETSET(hw_version_minor) | ||
57 | IMPLEMENT_GETSET(max_wr_xactions) | ||
58 | IMPLEMENT_GETSET(max_rd_xactions) | ||
59 | IMPLEMENT_GETSET(max_write_request) | ||
60 | IMPLEMENT_GETSET(max_read_request) | ||
61 | IMPLEMENT_GETSET(dma_channels) | ||
62 | IMPLEMENT_GETSET(chreset_timeout_cycles) | ||
63 | |||
64 | static int set_priority(struct hidma_mgmt_dev *mdev, unsigned int i, u64 val) | ||
65 | { | ||
66 | u64 tmp; | ||
67 | int rc; | ||
68 | |||
69 | if (i >= mdev->dma_channels) | ||
70 | return -EINVAL; | ||
71 | |||
72 | tmp = mdev->priority[i]; | ||
73 | mdev->priority[i] = val; | ||
74 | rc = hidma_mgmt_setup(mdev); | ||
75 | if (rc) | ||
76 | mdev->priority[i] = tmp; | ||
77 | return rc; | ||
78 | } | ||
79 | |||
80 | static int set_weight(struct hidma_mgmt_dev *mdev, unsigned int i, u64 val) | ||
81 | { | ||
82 | u64 tmp; | ||
83 | int rc; | ||
84 | |||
85 | if (i >= mdev->dma_channels) | ||
86 | return -EINVAL; | ||
87 | |||
88 | tmp = mdev->weight[i]; | ||
89 | mdev->weight[i] = val; | ||
90 | rc = hidma_mgmt_setup(mdev); | ||
91 | if (rc) | ||
92 | mdev->weight[i] = tmp; | ||
93 | return rc; | ||
94 | } | ||
95 | |||
96 | static struct hidma_mgmt_fileinfo hidma_mgmt_files[] = { | ||
97 | DECLARE_ATTRIBUTE(hw_version_major, S_IRUGO), | ||
98 | DECLARE_ATTRIBUTE(hw_version_minor, S_IRUGO), | ||
99 | DECLARE_ATTRIBUTE(dma_channels, S_IRUGO), | ||
100 | DECLARE_ATTRIBUTE(chreset_timeout_cycles, S_IRUGO), | ||
101 | DECLARE_ATTRIBUTE(max_wr_xactions, S_IRUGO), | ||
102 | DECLARE_ATTRIBUTE(max_rd_xactions, S_IRUGO), | ||
103 | DECLARE_ATTRIBUTE(max_write_request, S_IRUGO), | ||
104 | DECLARE_ATTRIBUTE(max_read_request, S_IRUGO), | ||
105 | }; | ||
106 | |||
107 | static ssize_t show_values(struct device *dev, struct device_attribute *attr, | ||
108 | char *buf) | ||
109 | { | ||
110 | struct platform_device *pdev = to_platform_device(dev); | ||
111 | struct hidma_mgmt_dev *mdev = platform_get_drvdata(pdev); | ||
112 | unsigned int i; | ||
113 | |||
114 | buf[0] = 0; | ||
115 | |||
116 | for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) { | ||
117 | if (strcmp(attr->attr.name, hidma_mgmt_files[i].name) == 0) { | ||
118 | sprintf(buf, "%d\n", hidma_mgmt_files[i].get(mdev)); | ||
119 | break; | ||
120 | } | ||
121 | } | ||
122 | return strlen(buf); | ||
123 | } | ||
124 | |||
125 | static ssize_t set_values(struct device *dev, struct device_attribute *attr, | ||
126 | const char *buf, size_t count) | ||
127 | { | ||
128 | struct platform_device *pdev = to_platform_device(dev); | ||
129 | struct hidma_mgmt_dev *mdev = platform_get_drvdata(pdev); | ||
130 | unsigned long tmp; | ||
131 | unsigned int i; | ||
132 | int rc; | ||
133 | |||
134 | rc = kstrtoul(buf, 0, &tmp); | ||
135 | if (rc) | ||
136 | return rc; | ||
137 | |||
138 | for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) { | ||
139 | if (strcmp(attr->attr.name, hidma_mgmt_files[i].name) == 0) { | ||
140 | rc = hidma_mgmt_files[i].set(mdev, tmp); | ||
141 | if (rc) | ||
142 | return rc; | ||
143 | |||
144 | break; | ||
145 | } | ||
146 | } | ||
147 | return count; | ||
148 | } | ||
149 | |||
150 | static ssize_t show_values_channel(struct kobject *kobj, | ||
151 | struct kobj_attribute *attr, char *buf) | ||
152 | { | ||
153 | struct hidma_chan_attr *chattr; | ||
154 | struct hidma_mgmt_dev *mdev; | ||
155 | |||
156 | buf[0] = 0; | ||
157 | chattr = container_of(attr, struct hidma_chan_attr, attr); | ||
158 | mdev = chattr->mdev; | ||
159 | if (strcmp(attr->attr.name, "priority") == 0) | ||
160 | sprintf(buf, "%d\n", mdev->priority[chattr->index]); | ||
161 | else if (strcmp(attr->attr.name, "weight") == 0) | ||
162 | sprintf(buf, "%d\n", mdev->weight[chattr->index]); | ||
163 | |||
164 | return strlen(buf); | ||
165 | } | ||
166 | |||
167 | static ssize_t set_values_channel(struct kobject *kobj, | ||
168 | struct kobj_attribute *attr, const char *buf, | ||
169 | size_t count) | ||
170 | { | ||
171 | struct hidma_chan_attr *chattr; | ||
172 | struct hidma_mgmt_dev *mdev; | ||
173 | unsigned long tmp; | ||
174 | int rc; | ||
175 | |||
176 | chattr = container_of(attr, struct hidma_chan_attr, attr); | ||
177 | mdev = chattr->mdev; | ||
178 | |||
179 | rc = kstrtoul(buf, 0, &tmp); | ||
180 | if (rc) | ||
181 | return rc; | ||
182 | |||
183 | if (strcmp(attr->attr.name, "priority") == 0) { | ||
184 | rc = set_priority(mdev, chattr->index, tmp); | ||
185 | if (rc) | ||
186 | return rc; | ||
187 | } else if (strcmp(attr->attr.name, "weight") == 0) { | ||
188 | rc = set_weight(mdev, chattr->index, tmp); | ||
189 | if (rc) | ||
190 | return rc; | ||
191 | } | ||
192 | return count; | ||
193 | } | ||
194 | |||
195 | static int create_sysfs_entry(struct hidma_mgmt_dev *dev, char *name, int mode) | ||
196 | { | ||
197 | struct device_attribute *attrs; | ||
198 | char *name_copy; | ||
199 | |||
200 | attrs = devm_kmalloc(&dev->pdev->dev, | ||
201 | sizeof(struct device_attribute), GFP_KERNEL); | ||
202 | if (!attrs) | ||
203 | return -ENOMEM; | ||
204 | |||
205 | name_copy = devm_kstrdup(&dev->pdev->dev, name, GFP_KERNEL); | ||
206 | if (!name_copy) | ||
207 | return -ENOMEM; | ||
208 | |||
209 | attrs->attr.name = name_copy; | ||
210 | attrs->attr.mode = mode; | ||
211 | attrs->show = show_values; | ||
212 | attrs->store = set_values; | ||
213 | sysfs_attr_init(&attrs->attr); | ||
214 | |||
215 | return device_create_file(&dev->pdev->dev, attrs); | ||
216 | } | ||
217 | |||
218 | static int create_sysfs_entry_channel(struct hidma_mgmt_dev *mdev, char *name, | ||
219 | int mode, int index, | ||
220 | struct kobject *parent) | ||
221 | { | ||
222 | struct hidma_chan_attr *chattr; | ||
223 | char *name_copy; | ||
224 | |||
225 | chattr = devm_kmalloc(&mdev->pdev->dev, sizeof(*chattr), GFP_KERNEL); | ||
226 | if (!chattr) | ||
227 | return -ENOMEM; | ||
228 | |||
229 | name_copy = devm_kstrdup(&mdev->pdev->dev, name, GFP_KERNEL); | ||
230 | if (!name_copy) | ||
231 | return -ENOMEM; | ||
232 | |||
233 | chattr->mdev = mdev; | ||
234 | chattr->index = index; | ||
235 | chattr->attr.attr.name = name_copy; | ||
236 | chattr->attr.attr.mode = mode; | ||
237 | chattr->attr.show = show_values_channel; | ||
238 | chattr->attr.store = set_values_channel; | ||
239 | sysfs_attr_init(&chattr->attr.attr); | ||
240 | |||
241 | return sysfs_create_file(parent, &chattr->attr.attr); | ||
242 | } | ||
243 | |||
244 | int hidma_mgmt_init_sys(struct hidma_mgmt_dev *mdev) | ||
245 | { | ||
246 | unsigned int i; | ||
247 | int rc; | ||
248 | int required; | ||
249 | struct kobject *chanops; | ||
250 | |||
251 | required = sizeof(*mdev->chroots) * mdev->dma_channels; | ||
252 | mdev->chroots = devm_kmalloc(&mdev->pdev->dev, required, GFP_KERNEL); | ||
253 | if (!mdev->chroots) | ||
254 | return -ENOMEM; | ||
255 | |||
256 | chanops = kobject_create_and_add("chanops", &mdev->pdev->dev.kobj); | ||
257 | if (!chanops) | ||
258 | return -ENOMEM; | ||
259 | |||
260 | /* create each channel directory here */ | ||
261 | for (i = 0; i < mdev->dma_channels; i++) { | ||
262 | char name[20]; | ||
263 | |||
264 | snprintf(name, sizeof(name), "chan%d", i); | ||
265 | mdev->chroots[i] = kobject_create_and_add(name, chanops); | ||
266 | if (!mdev->chroots[i]) | ||
267 | return -ENOMEM; | ||
268 | } | ||
269 | |||
270 | /* populate common parameters */ | ||
271 | for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) { | ||
272 | rc = create_sysfs_entry(mdev, hidma_mgmt_files[i].name, | ||
273 | hidma_mgmt_files[i].mode); | ||
274 | if (rc) | ||
275 | return rc; | ||
276 | } | ||
277 | |||
278 | /* populate parameters that are per channel */ | ||
279 | for (i = 0; i < mdev->dma_channels; i++) { | ||
280 | rc = create_sysfs_entry_channel(mdev, "priority", | ||
281 | (S_IRUGO | S_IWUGO), i, | ||
282 | mdev->chroots[i]); | ||
283 | if (rc) | ||
284 | return rc; | ||
285 | |||
286 | rc = create_sysfs_entry_channel(mdev, "weight", | ||
287 | (S_IRUGO | S_IWUGO), i, | ||
288 | mdev->chroots[i]); | ||
289 | if (rc) | ||
290 | return rc; | ||
291 | } | ||
292 | |||
293 | return 0; | ||
294 | } | ||
295 | EXPORT_SYMBOL_GPL(hidma_mgmt_init_sys); | ||
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index f32c430eb16c..6e0685f1a838 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig | |||
@@ -12,7 +12,7 @@ config RENESAS_DMA | |||
12 | 12 | ||
13 | config SH_DMAE_BASE | 13 | config SH_DMAE_BASE |
14 | bool "Renesas SuperH DMA Engine support" | 14 | bool "Renesas SuperH DMA Engine support" |
15 | depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST | 15 | depends on SUPERH || ARCH_RENESAS || COMPILE_TEST |
16 | depends on !SUPERH || SH_DMA | 16 | depends on !SUPERH || SH_DMA |
17 | depends on !SH_DMA_API | 17 | depends on !SH_DMA_API |
18 | default y | 18 | default y |
@@ -41,7 +41,7 @@ endif | |||
41 | 41 | ||
42 | config RCAR_DMAC | 42 | config RCAR_DMAC |
43 | tristate "Renesas R-Car Gen2 DMA Controller" | 43 | tristate "Renesas R-Car Gen2 DMA Controller" |
44 | depends on ARCH_SHMOBILE || COMPILE_TEST | 44 | depends on ARCH_RENESAS || COMPILE_TEST |
45 | select RENESAS_DMA | 45 | select RENESAS_DMA |
46 | help | 46 | help |
47 | This driver supports the general purpose DMA controller found in the | 47 | This driver supports the general purpose DMA controller found in the |
@@ -49,7 +49,7 @@ config RCAR_DMAC | |||
49 | 49 | ||
50 | config RENESAS_USB_DMAC | 50 | config RENESAS_USB_DMAC |
51 | tristate "Renesas USB-DMA Controller" | 51 | tristate "Renesas USB-DMA Controller" |
52 | depends on ARCH_SHMOBILE || COMPILE_TEST | 52 | depends on ARCH_RENESAS || COMPILE_TEST |
53 | select RENESAS_DMA | 53 | select RENESAS_DMA |
54 | select DMA_VIRTUAL_CHANNELS | 54 | select DMA_VIRTUAL_CHANNELS |
55 | help | 55 | help |
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 7820d07e7bee..dfb17926297b 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c | |||
@@ -413,7 +413,7 @@ static int rcar_dmac_init(struct rcar_dmac *dmac) | |||
413 | u16 dmaor; | 413 | u16 dmaor; |
414 | 414 | ||
415 | /* Clear all channels and enable the DMAC globally. */ | 415 | /* Clear all channels and enable the DMAC globally. */ |
416 | rcar_dmac_write(dmac, RCAR_DMACHCLR, 0x7fff); | 416 | rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0)); |
417 | rcar_dmac_write(dmac, RCAR_DMAOR, | 417 | rcar_dmac_write(dmac, RCAR_DMAOR, |
418 | RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME); | 418 | RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME); |
419 | 419 | ||
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index 11707df1a689..80d86402490e 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c | |||
@@ -699,7 +699,7 @@ static int sh_dmae_probe(struct platform_device *pdev) | |||
699 | struct resource *chan, *dmars, *errirq_res, *chanirq_res; | 699 | struct resource *chan, *dmars, *errirq_res, *chanirq_res; |
700 | 700 | ||
701 | if (pdev->dev.of_node) | 701 | if (pdev->dev.of_node) |
702 | pdata = of_match_device(sh_dmae_of_match, &pdev->dev)->data; | 702 | pdata = of_device_get_match_data(&pdev->dev); |
703 | else | 703 | else |
704 | pdata = dev_get_platdata(&pdev->dev); | 704 | pdata = dev_get_platdata(&pdev->dev); |
705 | 705 | ||
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 22ea2419ee56..e48350e65089 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c | |||
@@ -989,7 +989,7 @@ static int sirfsoc_dma_remove(struct platform_device *op) | |||
989 | return 0; | 989 | return 0; |
990 | } | 990 | } |
991 | 991 | ||
992 | static int sirfsoc_dma_runtime_suspend(struct device *dev) | 992 | static int __maybe_unused sirfsoc_dma_runtime_suspend(struct device *dev) |
993 | { | 993 | { |
994 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | 994 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); |
995 | 995 | ||
@@ -997,7 +997,7 @@ static int sirfsoc_dma_runtime_suspend(struct device *dev) | |||
997 | return 0; | 997 | return 0; |
998 | } | 998 | } |
999 | 999 | ||
1000 | static int sirfsoc_dma_runtime_resume(struct device *dev) | 1000 | static int __maybe_unused sirfsoc_dma_runtime_resume(struct device *dev) |
1001 | { | 1001 | { |
1002 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | 1002 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); |
1003 | int ret; | 1003 | int ret; |
@@ -1010,8 +1010,7 @@ static int sirfsoc_dma_runtime_resume(struct device *dev) | |||
1010 | return 0; | 1010 | return 0; |
1011 | } | 1011 | } |
1012 | 1012 | ||
1013 | #ifdef CONFIG_PM_SLEEP | 1013 | static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev) |
1014 | static int sirfsoc_dma_pm_suspend(struct device *dev) | ||
1015 | { | 1014 | { |
1016 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | 1015 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); |
1017 | struct sirfsoc_dma_regs *save = &sdma->regs_save; | 1016 | struct sirfsoc_dma_regs *save = &sdma->regs_save; |
@@ -1062,7 +1061,7 @@ static int sirfsoc_dma_pm_suspend(struct device *dev) | |||
1062 | return 0; | 1061 | return 0; |
1063 | } | 1062 | } |
1064 | 1063 | ||
1065 | static int sirfsoc_dma_pm_resume(struct device *dev) | 1064 | static int __maybe_unused sirfsoc_dma_pm_resume(struct device *dev) |
1066 | { | 1065 | { |
1067 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | 1066 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); |
1068 | struct sirfsoc_dma_regs *save = &sdma->regs_save; | 1067 | struct sirfsoc_dma_regs *save = &sdma->regs_save; |
@@ -1121,7 +1120,6 @@ static int sirfsoc_dma_pm_resume(struct device *dev) | |||
1121 | 1120 | ||
1122 | return 0; | 1121 | return 0; |
1123 | } | 1122 | } |
1124 | #endif | ||
1125 | 1123 | ||
1126 | static const struct dev_pm_ops sirfsoc_dma_pm_ops = { | 1124 | static const struct dev_pm_ops sirfsoc_dma_pm_ops = { |
1127 | SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL) | 1125 | SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL) |
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c index 1661d518224a..e0df233dde92 100644 --- a/drivers/dma/sun4i-dma.c +++ b/drivers/dma/sun4i-dma.c | |||
@@ -1271,6 +1271,7 @@ static const struct of_device_id sun4i_dma_match[] = { | |||
1271 | { .compatible = "allwinner,sun4i-a10-dma" }, | 1271 | { .compatible = "allwinner,sun4i-a10-dma" }, |
1272 | { /* sentinel */ }, | 1272 | { /* sentinel */ }, |
1273 | }; | 1273 | }; |
1274 | MODULE_DEVICE_TABLE(of, sun4i_dma_match); | ||
1274 | 1275 | ||
1275 | static struct platform_driver sun4i_dma_driver = { | 1276 | static struct platform_driver sun4i_dma_driver = { |
1276 | .probe = sun4i_dma_probe, | 1277 | .probe = sun4i_dma_probe, |
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 935da8192f59..3871f29e523d 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c | |||
@@ -1292,40 +1292,19 @@ static const struct tegra_dma_chip_data tegra148_dma_chip_data = { | |||
1292 | .support_separate_wcount_reg = true, | 1292 | .support_separate_wcount_reg = true, |
1293 | }; | 1293 | }; |
1294 | 1294 | ||
1295 | |||
1296 | static const struct of_device_id tegra_dma_of_match[] = { | ||
1297 | { | ||
1298 | .compatible = "nvidia,tegra148-apbdma", | ||
1299 | .data = &tegra148_dma_chip_data, | ||
1300 | }, { | ||
1301 | .compatible = "nvidia,tegra114-apbdma", | ||
1302 | .data = &tegra114_dma_chip_data, | ||
1303 | }, { | ||
1304 | .compatible = "nvidia,tegra30-apbdma", | ||
1305 | .data = &tegra30_dma_chip_data, | ||
1306 | }, { | ||
1307 | .compatible = "nvidia,tegra20-apbdma", | ||
1308 | .data = &tegra20_dma_chip_data, | ||
1309 | }, { | ||
1310 | }, | ||
1311 | }; | ||
1312 | MODULE_DEVICE_TABLE(of, tegra_dma_of_match); | ||
1313 | |||
1314 | static int tegra_dma_probe(struct platform_device *pdev) | 1295 | static int tegra_dma_probe(struct platform_device *pdev) |
1315 | { | 1296 | { |
1316 | struct resource *res; | 1297 | struct resource *res; |
1317 | struct tegra_dma *tdma; | 1298 | struct tegra_dma *tdma; |
1318 | int ret; | 1299 | int ret; |
1319 | int i; | 1300 | int i; |
1320 | const struct tegra_dma_chip_data *cdata = NULL; | 1301 | const struct tegra_dma_chip_data *cdata; |
1321 | const struct of_device_id *match; | ||
1322 | 1302 | ||
1323 | match = of_match_device(tegra_dma_of_match, &pdev->dev); | 1303 | cdata = of_device_get_match_data(&pdev->dev); |
1324 | if (!match) { | 1304 | if (!cdata) { |
1325 | dev_err(&pdev->dev, "Error: No device match found\n"); | 1305 | dev_err(&pdev->dev, "Error: No device match data found\n"); |
1326 | return -ENODEV; | 1306 | return -ENODEV; |
1327 | } | 1307 | } |
1328 | cdata = match->data; | ||
1329 | 1308 | ||
1330 | tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels * | 1309 | tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels * |
1331 | sizeof(struct tegra_dma_channel), GFP_KERNEL); | 1310 | sizeof(struct tegra_dma_channel), GFP_KERNEL); |
@@ -1612,6 +1591,24 @@ static const struct dev_pm_ops tegra_dma_dev_pm_ops = { | |||
1612 | SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume) | 1591 | SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume) |
1613 | }; | 1592 | }; |
1614 | 1593 | ||
1594 | static const struct of_device_id tegra_dma_of_match[] = { | ||
1595 | { | ||
1596 | .compatible = "nvidia,tegra148-apbdma", | ||
1597 | .data = &tegra148_dma_chip_data, | ||
1598 | }, { | ||
1599 | .compatible = "nvidia,tegra114-apbdma", | ||
1600 | .data = &tegra114_dma_chip_data, | ||
1601 | }, { | ||
1602 | .compatible = "nvidia,tegra30-apbdma", | ||
1603 | .data = &tegra30_dma_chip_data, | ||
1604 | }, { | ||
1605 | .compatible = "nvidia,tegra20-apbdma", | ||
1606 | .data = &tegra20_dma_chip_data, | ||
1607 | }, { | ||
1608 | }, | ||
1609 | }; | ||
1610 | MODULE_DEVICE_TABLE(of, tegra_dma_of_match); | ||
1611 | |||
1615 | static struct platform_driver tegra_dmac_driver = { | 1612 | static struct platform_driver tegra_dmac_driver = { |
1616 | .driver = { | 1613 | .driver = { |
1617 | .name = "tegra-apbdma", | 1614 | .name = "tegra-apbdma", |
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c index 6f4b5017ca3b..0ee0321868d3 100644 --- a/drivers/dma/xilinx/xilinx_vdma.c +++ b/drivers/dma/xilinx/xilinx_vdma.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
30 | #include <linux/io.h> | 30 | #include <linux/io.h> |
31 | #include <linux/iopoll.h> | ||
31 | #include <linux/module.h> | 32 | #include <linux/module.h> |
32 | #include <linux/of_address.h> | 33 | #include <linux/of_address.h> |
33 | #include <linux/of_dma.h> | 34 | #include <linux/of_dma.h> |
@@ -190,8 +191,7 @@ struct xilinx_vdma_tx_descriptor { | |||
190 | * @desc_offset: TX descriptor registers offset | 191 | * @desc_offset: TX descriptor registers offset |
191 | * @lock: Descriptor operation lock | 192 | * @lock: Descriptor operation lock |
192 | * @pending_list: Descriptors waiting | 193 | * @pending_list: Descriptors waiting |
193 | * @active_desc: Active descriptor | 194 | * @active_list: Descriptors ready to submit |
194 | * @allocated_desc: Allocated descriptor | ||
195 | * @done_list: Complete descriptors | 195 | * @done_list: Complete descriptors |
196 | * @common: DMA common channel | 196 | * @common: DMA common channel |
197 | * @desc_pool: Descriptors pool | 197 | * @desc_pool: Descriptors pool |
@@ -206,6 +206,7 @@ struct xilinx_vdma_tx_descriptor { | |||
206 | * @tasklet: Cleanup work after irq | 206 | * @tasklet: Cleanup work after irq |
207 | * @config: Device configuration info | 207 | * @config: Device configuration info |
208 | * @flush_on_fsync: Flush on Frame sync | 208 | * @flush_on_fsync: Flush on Frame sync |
209 | * @desc_pendingcount: Descriptor pending count | ||
209 | */ | 210 | */ |
210 | struct xilinx_vdma_chan { | 211 | struct xilinx_vdma_chan { |
211 | struct xilinx_vdma_device *xdev; | 212 | struct xilinx_vdma_device *xdev; |
@@ -213,8 +214,7 @@ struct xilinx_vdma_chan { | |||
213 | u32 desc_offset; | 214 | u32 desc_offset; |
214 | spinlock_t lock; | 215 | spinlock_t lock; |
215 | struct list_head pending_list; | 216 | struct list_head pending_list; |
216 | struct xilinx_vdma_tx_descriptor *active_desc; | 217 | struct list_head active_list; |
217 | struct xilinx_vdma_tx_descriptor *allocated_desc; | ||
218 | struct list_head done_list; | 218 | struct list_head done_list; |
219 | struct dma_chan common; | 219 | struct dma_chan common; |
220 | struct dma_pool *desc_pool; | 220 | struct dma_pool *desc_pool; |
@@ -229,6 +229,7 @@ struct xilinx_vdma_chan { | |||
229 | struct tasklet_struct tasklet; | 229 | struct tasklet_struct tasklet; |
230 | struct xilinx_vdma_config config; | 230 | struct xilinx_vdma_config config; |
231 | bool flush_on_fsync; | 231 | bool flush_on_fsync; |
232 | u32 desc_pendingcount; | ||
232 | }; | 233 | }; |
233 | 234 | ||
234 | /** | 235 | /** |
@@ -254,6 +255,9 @@ struct xilinx_vdma_device { | |||
254 | container_of(chan, struct xilinx_vdma_chan, common) | 255 | container_of(chan, struct xilinx_vdma_chan, common) |
255 | #define to_vdma_tx_descriptor(tx) \ | 256 | #define to_vdma_tx_descriptor(tx) \ |
256 | container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx) | 257 | container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx) |
258 | #define xilinx_vdma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \ | ||
259 | readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \ | ||
260 | cond, delay_us, timeout_us) | ||
257 | 261 | ||
258 | /* IO accessors */ | 262 | /* IO accessors */ |
259 | static inline u32 vdma_read(struct xilinx_vdma_chan *chan, u32 reg) | 263 | static inline u32 vdma_read(struct xilinx_vdma_chan *chan, u32 reg) |
@@ -342,19 +346,11 @@ static struct xilinx_vdma_tx_descriptor * | |||
342 | xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan) | 346 | xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan) |
343 | { | 347 | { |
344 | struct xilinx_vdma_tx_descriptor *desc; | 348 | struct xilinx_vdma_tx_descriptor *desc; |
345 | unsigned long flags; | ||
346 | |||
347 | if (chan->allocated_desc) | ||
348 | return chan->allocated_desc; | ||
349 | 349 | ||
350 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); | 350 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); |
351 | if (!desc) | 351 | if (!desc) |
352 | return NULL; | 352 | return NULL; |
353 | 353 | ||
354 | spin_lock_irqsave(&chan->lock, flags); | ||
355 | chan->allocated_desc = desc; | ||
356 | spin_unlock_irqrestore(&chan->lock, flags); | ||
357 | |||
358 | INIT_LIST_HEAD(&desc->segments); | 354 | INIT_LIST_HEAD(&desc->segments); |
359 | 355 | ||
360 | return desc; | 356 | return desc; |
@@ -412,9 +408,7 @@ static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan *chan) | |||
412 | 408 | ||
413 | xilinx_vdma_free_desc_list(chan, &chan->pending_list); | 409 | xilinx_vdma_free_desc_list(chan, &chan->pending_list); |
414 | xilinx_vdma_free_desc_list(chan, &chan->done_list); | 410 | xilinx_vdma_free_desc_list(chan, &chan->done_list); |
415 | 411 | xilinx_vdma_free_desc_list(chan, &chan->active_list); | |
416 | xilinx_vdma_free_tx_descriptor(chan, chan->active_desc); | ||
417 | chan->active_desc = NULL; | ||
418 | 412 | ||
419 | spin_unlock_irqrestore(&chan->lock, flags); | 413 | spin_unlock_irqrestore(&chan->lock, flags); |
420 | } | 414 | } |
@@ -560,18 +554,17 @@ static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan) | |||
560 | */ | 554 | */ |
561 | static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan) | 555 | static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan) |
562 | { | 556 | { |
563 | int loop = XILINX_VDMA_LOOP_COUNT; | 557 | int err; |
558 | u32 val; | ||
564 | 559 | ||
565 | vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP); | 560 | vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP); |
566 | 561 | ||
567 | /* Wait for the hardware to halt */ | 562 | /* Wait for the hardware to halt */ |
568 | do { | 563 | err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val, |
569 | if (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & | 564 | (val & XILINX_VDMA_DMASR_HALTED), 0, |
570 | XILINX_VDMA_DMASR_HALTED) | 565 | XILINX_VDMA_LOOP_COUNT); |
571 | break; | ||
572 | } while (loop--); | ||
573 | 566 | ||
574 | if (!loop) { | 567 | if (err) { |
575 | dev_err(chan->dev, "Cannot stop channel %p: %x\n", | 568 | dev_err(chan->dev, "Cannot stop channel %p: %x\n", |
576 | chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); | 569 | chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); |
577 | chan->err = true; | 570 | chan->err = true; |
@@ -586,18 +579,17 @@ static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan) | |||
586 | */ | 579 | */ |
587 | static void xilinx_vdma_start(struct xilinx_vdma_chan *chan) | 580 | static void xilinx_vdma_start(struct xilinx_vdma_chan *chan) |
588 | { | 581 | { |
589 | int loop = XILINX_VDMA_LOOP_COUNT; | 582 | int err; |
583 | u32 val; | ||
590 | 584 | ||
591 | vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP); | 585 | vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP); |
592 | 586 | ||
593 | /* Wait for the hardware to start */ | 587 | /* Wait for the hardware to start */ |
594 | do { | 588 | err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val, |
595 | if (!(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & | 589 | !(val & XILINX_VDMA_DMASR_HALTED), 0, |
596 | XILINX_VDMA_DMASR_HALTED)) | 590 | XILINX_VDMA_LOOP_COUNT); |
597 | break; | ||
598 | } while (loop--); | ||
599 | 591 | ||
600 | if (!loop) { | 592 | if (err) { |
601 | dev_err(chan->dev, "Cannot start channel %p: %x\n", | 593 | dev_err(chan->dev, "Cannot start channel %p: %x\n", |
602 | chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); | 594 | chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); |
603 | 595 | ||
@@ -614,45 +606,39 @@ static void xilinx_vdma_start(struct xilinx_vdma_chan *chan) | |||
614 | static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan) | 606 | static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan) |
615 | { | 607 | { |
616 | struct xilinx_vdma_config *config = &chan->config; | 608 | struct xilinx_vdma_config *config = &chan->config; |
617 | struct xilinx_vdma_tx_descriptor *desc; | 609 | struct xilinx_vdma_tx_descriptor *desc, *tail_desc; |
618 | unsigned long flags; | ||
619 | u32 reg; | 610 | u32 reg; |
620 | struct xilinx_vdma_tx_segment *head, *tail = NULL; | 611 | struct xilinx_vdma_tx_segment *tail_segment; |
621 | 612 | ||
613 | /* This function was invoked with lock held */ | ||
622 | if (chan->err) | 614 | if (chan->err) |
623 | return; | 615 | return; |
624 | 616 | ||
625 | spin_lock_irqsave(&chan->lock, flags); | ||
626 | |||
627 | /* There's already an active descriptor, bail out. */ | ||
628 | if (chan->active_desc) | ||
629 | goto out_unlock; | ||
630 | |||
631 | if (list_empty(&chan->pending_list)) | 617 | if (list_empty(&chan->pending_list)) |
632 | goto out_unlock; | 618 | return; |
633 | 619 | ||
634 | desc = list_first_entry(&chan->pending_list, | 620 | desc = list_first_entry(&chan->pending_list, |
635 | struct xilinx_vdma_tx_descriptor, node); | 621 | struct xilinx_vdma_tx_descriptor, node); |
622 | tail_desc = list_last_entry(&chan->pending_list, | ||
623 | struct xilinx_vdma_tx_descriptor, node); | ||
624 | |||
625 | tail_segment = list_last_entry(&tail_desc->segments, | ||
626 | struct xilinx_vdma_tx_segment, node); | ||
636 | 627 | ||
637 | /* If it is SG mode and hardware is busy, cannot submit */ | 628 | /* If it is SG mode and hardware is busy, cannot submit */ |
638 | if (chan->has_sg && xilinx_vdma_is_running(chan) && | 629 | if (chan->has_sg && xilinx_vdma_is_running(chan) && |
639 | !xilinx_vdma_is_idle(chan)) { | 630 | !xilinx_vdma_is_idle(chan)) { |
640 | dev_dbg(chan->dev, "DMA controller still busy\n"); | 631 | dev_dbg(chan->dev, "DMA controller still busy\n"); |
641 | goto out_unlock; | 632 | return; |
642 | } | 633 | } |
643 | 634 | ||
644 | /* | 635 | /* |
645 | * If hardware is idle, then all descriptors on the running lists are | 636 | * If hardware is idle, then all descriptors on the running lists are |
646 | * done, start new transfers | 637 | * done, start new transfers |
647 | */ | 638 | */ |
648 | if (chan->has_sg) { | 639 | if (chan->has_sg) |
649 | head = list_first_entry(&desc->segments, | 640 | vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC, |
650 | struct xilinx_vdma_tx_segment, node); | 641 | desc->async_tx.phys); |
651 | tail = list_entry(desc->segments.prev, | ||
652 | struct xilinx_vdma_tx_segment, node); | ||
653 | |||
654 | vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC, head->phys); | ||
655 | } | ||
656 | 642 | ||
657 | /* Configure the hardware using info in the config structure */ | 643 | /* Configure the hardware using info in the config structure */ |
658 | reg = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR); | 644 | reg = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR); |
@@ -662,6 +648,10 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan) | |||
662 | else | 648 | else |
663 | reg &= ~XILINX_VDMA_DMACR_FRAMECNT_EN; | 649 | reg &= ~XILINX_VDMA_DMACR_FRAMECNT_EN; |
664 | 650 | ||
651 | /* Configure channel to allow number frame buffers */ | ||
652 | vdma_ctrl_write(chan, XILINX_VDMA_REG_FRMSTORE, | ||
653 | chan->desc_pendingcount); | ||
654 | |||
665 | /* | 655 | /* |
666 | * With SG, start with circular mode, so that BDs can be fetched. | 656 | * With SG, start with circular mode, so that BDs can be fetched. |
667 | * In direct register mode, if not parking, enable circular mode | 657 | * In direct register mode, if not parking, enable circular mode |
@@ -690,16 +680,19 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan) | |||
690 | xilinx_vdma_start(chan); | 680 | xilinx_vdma_start(chan); |
691 | 681 | ||
692 | if (chan->err) | 682 | if (chan->err) |
693 | goto out_unlock; | 683 | return; |
694 | 684 | ||
695 | /* Start the transfer */ | 685 | /* Start the transfer */ |
696 | if (chan->has_sg) { | 686 | if (chan->has_sg) { |
697 | vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC, tail->phys); | 687 | vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC, |
688 | tail_segment->phys); | ||
698 | } else { | 689 | } else { |
699 | struct xilinx_vdma_tx_segment *segment, *last = NULL; | 690 | struct xilinx_vdma_tx_segment *segment, *last = NULL; |
700 | int i = 0; | 691 | int i = 0; |
701 | 692 | ||
702 | list_for_each_entry(segment, &desc->segments, node) { | 693 | list_for_each_entry(desc, &chan->pending_list, node) { |
694 | segment = list_first_entry(&desc->segments, | ||
695 | struct xilinx_vdma_tx_segment, node); | ||
703 | vdma_desc_write(chan, | 696 | vdma_desc_write(chan, |
704 | XILINX_VDMA_REG_START_ADDRESS(i++), | 697 | XILINX_VDMA_REG_START_ADDRESS(i++), |
705 | segment->hw.buf_addr); | 698 | segment->hw.buf_addr); |
@@ -707,7 +700,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan) | |||
707 | } | 700 | } |
708 | 701 | ||
709 | if (!last) | 702 | if (!last) |
710 | goto out_unlock; | 703 | return; |
711 | 704 | ||
712 | /* HW expects these parameters to be same for one transaction */ | 705 | /* HW expects these parameters to be same for one transaction */ |
713 | vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last->hw.hsize); | 706 | vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last->hw.hsize); |
@@ -716,11 +709,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan) | |||
716 | vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize); | 709 | vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize); |
717 | } | 710 | } |
718 | 711 | ||
719 | list_del(&desc->node); | 712 | list_splice_tail_init(&chan->pending_list, &chan->active_list); |
720 | chan->active_desc = desc; | 713 | chan->desc_pendingcount = 0; |
721 | |||
722 | out_unlock: | ||
723 | spin_unlock_irqrestore(&chan->lock, flags); | ||
724 | } | 714 | } |
725 | 715 | ||
726 | /** | 716 | /** |
@@ -730,8 +720,11 @@ out_unlock: | |||
730 | static void xilinx_vdma_issue_pending(struct dma_chan *dchan) | 720 | static void xilinx_vdma_issue_pending(struct dma_chan *dchan) |
731 | { | 721 | { |
732 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | 722 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); |
723 | unsigned long flags; | ||
733 | 724 | ||
725 | spin_lock_irqsave(&chan->lock, flags); | ||
734 | xilinx_vdma_start_transfer(chan); | 726 | xilinx_vdma_start_transfer(chan); |
727 | spin_unlock_irqrestore(&chan->lock, flags); | ||
735 | } | 728 | } |
736 | 729 | ||
737 | /** | 730 | /** |
@@ -742,24 +735,17 @@ static void xilinx_vdma_issue_pending(struct dma_chan *dchan) | |||
742 | */ | 735 | */ |
743 | static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan) | 736 | static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan) |
744 | { | 737 | { |
745 | struct xilinx_vdma_tx_descriptor *desc; | 738 | struct xilinx_vdma_tx_descriptor *desc, *next; |
746 | unsigned long flags; | ||
747 | 739 | ||
748 | spin_lock_irqsave(&chan->lock, flags); | 740 | /* This function was invoked with lock held */ |
741 | if (list_empty(&chan->active_list)) | ||
742 | return; | ||
749 | 743 | ||
750 | desc = chan->active_desc; | 744 | list_for_each_entry_safe(desc, next, &chan->active_list, node) { |
751 | if (!desc) { | 745 | list_del(&desc->node); |
752 | dev_dbg(chan->dev, "no running descriptors\n"); | 746 | dma_cookie_complete(&desc->async_tx); |
753 | goto out_unlock; | 747 | list_add_tail(&desc->node, &chan->done_list); |
754 | } | 748 | } |
755 | |||
756 | dma_cookie_complete(&desc->async_tx); | ||
757 | list_add_tail(&desc->node, &chan->done_list); | ||
758 | |||
759 | chan->active_desc = NULL; | ||
760 | |||
761 | out_unlock: | ||
762 | spin_unlock_irqrestore(&chan->lock, flags); | ||
763 | } | 749 | } |
764 | 750 | ||
765 | /** | 751 | /** |
@@ -770,21 +756,17 @@ out_unlock: | |||
770 | */ | 756 | */ |
771 | static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan) | 757 | static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan) |
772 | { | 758 | { |
773 | int loop = XILINX_VDMA_LOOP_COUNT; | 759 | int err; |
774 | u32 tmp; | 760 | u32 tmp; |
775 | 761 | ||
776 | vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RESET); | 762 | vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RESET); |
777 | 763 | ||
778 | tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) & | ||
779 | XILINX_VDMA_DMACR_RESET; | ||
780 | |||
781 | /* Wait for the hardware to finish reset */ | 764 | /* Wait for the hardware to finish reset */ |
782 | do { | 765 | err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMACR, tmp, |
783 | tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) & | 766 | !(tmp & XILINX_VDMA_DMACR_RESET), 0, |
784 | XILINX_VDMA_DMACR_RESET; | 767 | XILINX_VDMA_LOOP_COUNT); |
785 | } while (loop-- && tmp); | ||
786 | 768 | ||
787 | if (!loop) { | 769 | if (err) { |
788 | dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", | 770 | dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", |
789 | vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR), | 771 | vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR), |
790 | vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); | 772 | vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); |
@@ -793,7 +775,7 @@ static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan) | |||
793 | 775 | ||
794 | chan->err = false; | 776 | chan->err = false; |
795 | 777 | ||
796 | return 0; | 778 | return err; |
797 | } | 779 | } |
798 | 780 | ||
799 | /** | 781 | /** |
@@ -870,8 +852,10 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data) | |||
870 | } | 852 | } |
871 | 853 | ||
872 | if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) { | 854 | if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) { |
855 | spin_lock(&chan->lock); | ||
873 | xilinx_vdma_complete_descriptor(chan); | 856 | xilinx_vdma_complete_descriptor(chan); |
874 | xilinx_vdma_start_transfer(chan); | 857 | xilinx_vdma_start_transfer(chan); |
858 | spin_unlock(&chan->lock); | ||
875 | } | 859 | } |
876 | 860 | ||
877 | tasklet_schedule(&chan->tasklet); | 861 | tasklet_schedule(&chan->tasklet); |
@@ -879,6 +863,44 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data) | |||
879 | } | 863 | } |
880 | 864 | ||
881 | /** | 865 | /** |
866 | * append_desc_queue - Queuing descriptor | ||
867 | * @chan: Driver specific dma channel | ||
868 | * @desc: dma transaction descriptor | ||
869 | */ | ||
870 | static void append_desc_queue(struct xilinx_vdma_chan *chan, | ||
871 | struct xilinx_vdma_tx_descriptor *desc) | ||
872 | { | ||
873 | struct xilinx_vdma_tx_segment *tail_segment; | ||
874 | struct xilinx_vdma_tx_descriptor *tail_desc; | ||
875 | |||
876 | if (list_empty(&chan->pending_list)) | ||
877 | goto append; | ||
878 | |||
879 | /* | ||
880 | * Add the hardware descriptor to the chain of hardware descriptors | ||
881 | * that already exists in memory. | ||
882 | */ | ||
883 | tail_desc = list_last_entry(&chan->pending_list, | ||
884 | struct xilinx_vdma_tx_descriptor, node); | ||
885 | tail_segment = list_last_entry(&tail_desc->segments, | ||
886 | struct xilinx_vdma_tx_segment, node); | ||
887 | tail_segment->hw.next_desc = (u32)desc->async_tx.phys; | ||
888 | |||
889 | /* | ||
890 | * Add the software descriptor and all children to the list | ||
891 | * of pending transactions | ||
892 | */ | ||
893 | append: | ||
894 | list_add_tail(&desc->node, &chan->pending_list); | ||
895 | chan->desc_pendingcount++; | ||
896 | |||
897 | if (unlikely(chan->desc_pendingcount > chan->num_frms)) { | ||
898 | dev_dbg(chan->dev, "desc pendingcount is too high\n"); | ||
899 | chan->desc_pendingcount = chan->num_frms; | ||
900 | } | ||
901 | } | ||
902 | |||
903 | /** | ||
882 | * xilinx_vdma_tx_submit - Submit DMA transaction | 904 | * xilinx_vdma_tx_submit - Submit DMA transaction |
883 | * @tx: Async transaction descriptor | 905 | * @tx: Async transaction descriptor |
884 | * | 906 | * |
@@ -906,11 +928,8 @@ static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
906 | 928 | ||
907 | cookie = dma_cookie_assign(tx); | 929 | cookie = dma_cookie_assign(tx); |
908 | 930 | ||
909 | /* Append the transaction to the pending transactions queue. */ | 931 | /* Put this transaction onto the tail of the pending queue */ |
910 | list_add_tail(&desc->node, &chan->pending_list); | 932 | append_desc_queue(chan, desc); |
911 | |||
912 | /* Free the allocated desc */ | ||
913 | chan->allocated_desc = NULL; | ||
914 | 933 | ||
915 | spin_unlock_irqrestore(&chan->lock, flags); | 934 | spin_unlock_irqrestore(&chan->lock, flags); |
916 | 935 | ||
@@ -973,13 +992,6 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, | |||
973 | else | 992 | else |
974 | hw->buf_addr = xt->src_start; | 993 | hw->buf_addr = xt->src_start; |
975 | 994 | ||
976 | /* Link the previous next descriptor to current */ | ||
977 | if (!list_empty(&desc->segments)) { | ||
978 | prev = list_last_entry(&desc->segments, | ||
979 | struct xilinx_vdma_tx_segment, node); | ||
980 | prev->hw.next_desc = segment->phys; | ||
981 | } | ||
982 | |||
983 | /* Insert the segment into the descriptor segments list. */ | 995 | /* Insert the segment into the descriptor segments list. */ |
984 | list_add_tail(&segment->node, &desc->segments); | 996 | list_add_tail(&segment->node, &desc->segments); |
985 | 997 | ||
@@ -988,7 +1000,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, | |||
988 | /* Link the last hardware descriptor with the first. */ | 1000 | /* Link the last hardware descriptor with the first. */ |
989 | segment = list_first_entry(&desc->segments, | 1001 | segment = list_first_entry(&desc->segments, |
990 | struct xilinx_vdma_tx_segment, node); | 1002 | struct xilinx_vdma_tx_segment, node); |
991 | prev->hw.next_desc = segment->phys; | 1003 | desc->async_tx.phys = segment->phys; |
992 | 1004 | ||
993 | return &desc->async_tx; | 1005 | return &desc->async_tx; |
994 | 1006 | ||
@@ -1127,10 +1139,12 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev, | |||
1127 | chan->dev = xdev->dev; | 1139 | chan->dev = xdev->dev; |
1128 | chan->xdev = xdev; | 1140 | chan->xdev = xdev; |
1129 | chan->has_sg = xdev->has_sg; | 1141 | chan->has_sg = xdev->has_sg; |
1142 | chan->desc_pendingcount = 0x0; | ||
1130 | 1143 | ||
1131 | spin_lock_init(&chan->lock); | 1144 | spin_lock_init(&chan->lock); |
1132 | INIT_LIST_HEAD(&chan->pending_list); | 1145 | INIT_LIST_HEAD(&chan->pending_list); |
1133 | INIT_LIST_HEAD(&chan->done_list); | 1146 | INIT_LIST_HEAD(&chan->done_list); |
1147 | INIT_LIST_HEAD(&chan->active_list); | ||
1134 | 1148 | ||
1135 | /* Retrieve the channel properties from the device tree */ | 1149 | /* Retrieve the channel properties from the device tree */ |
1136 | has_dre = of_property_read_bool(node, "xlnx,include-dre"); | 1150 | has_dre = of_property_read_bool(node, "xlnx,include-dre"); |
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index 26e2688c104e..8f50a4020f6f 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c | |||
@@ -191,6 +191,7 @@ struct rockchip_spi { | |||
191 | struct sg_table rx_sg; | 191 | struct sg_table rx_sg; |
192 | struct rockchip_spi_dma_data dma_rx; | 192 | struct rockchip_spi_dma_data dma_rx; |
193 | struct rockchip_spi_dma_data dma_tx; | 193 | struct rockchip_spi_dma_data dma_tx; |
194 | struct dma_slave_caps dma_caps; | ||
194 | }; | 195 | }; |
195 | 196 | ||
196 | static inline void spi_enable_chip(struct rockchip_spi *rs, int enable) | 197 | static inline void spi_enable_chip(struct rockchip_spi *rs, int enable) |
@@ -446,7 +447,10 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs) | |||
446 | rxconf.direction = rs->dma_rx.direction; | 447 | rxconf.direction = rs->dma_rx.direction; |
447 | rxconf.src_addr = rs->dma_rx.addr; | 448 | rxconf.src_addr = rs->dma_rx.addr; |
448 | rxconf.src_addr_width = rs->n_bytes; | 449 | rxconf.src_addr_width = rs->n_bytes; |
449 | rxconf.src_maxburst = rs->n_bytes; | 450 | if (rs->dma_caps.max_burst > 4) |
451 | rxconf.src_maxburst = 4; | ||
452 | else | ||
453 | rxconf.src_maxburst = 1; | ||
450 | dmaengine_slave_config(rs->dma_rx.ch, &rxconf); | 454 | dmaengine_slave_config(rs->dma_rx.ch, &rxconf); |
451 | 455 | ||
452 | rxdesc = dmaengine_prep_slave_sg( | 456 | rxdesc = dmaengine_prep_slave_sg( |
@@ -465,7 +469,10 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs) | |||
465 | txconf.direction = rs->dma_tx.direction; | 469 | txconf.direction = rs->dma_tx.direction; |
466 | txconf.dst_addr = rs->dma_tx.addr; | 470 | txconf.dst_addr = rs->dma_tx.addr; |
467 | txconf.dst_addr_width = rs->n_bytes; | 471 | txconf.dst_addr_width = rs->n_bytes; |
468 | txconf.dst_maxburst = rs->n_bytes; | 472 | if (rs->dma_caps.max_burst > 4) |
473 | txconf.dst_maxburst = 4; | ||
474 | else | ||
475 | txconf.dst_maxburst = 1; | ||
469 | dmaengine_slave_config(rs->dma_tx.ch, &txconf); | 476 | dmaengine_slave_config(rs->dma_tx.ch, &txconf); |
470 | 477 | ||
471 | txdesc = dmaengine_prep_slave_sg( | 478 | txdesc = dmaengine_prep_slave_sg( |
@@ -743,6 +750,7 @@ static int rockchip_spi_probe(struct platform_device *pdev) | |||
743 | } | 750 | } |
744 | 751 | ||
745 | if (rs->dma_tx.ch && rs->dma_rx.ch) { | 752 | if (rs->dma_tx.ch && rs->dma_rx.ch) { |
753 | dma_get_slave_caps(rs->dma_rx.ch, &(rs->dma_caps)); | ||
746 | rs->dma_tx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_TXDR); | 754 | rs->dma_tx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_TXDR); |
747 | rs->dma_rx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_RXDR); | 755 | rs->dma_rx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_RXDR); |
748 | rs->dma_tx.direction = DMA_MEM_TO_DEV; | 756 | rs->dma_tx.direction = DMA_MEM_TO_DEV; |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 16a1cad30c33..017433712833 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -357,8 +357,8 @@ enum dma_slave_buswidth { | |||
357 | */ | 357 | */ |
358 | struct dma_slave_config { | 358 | struct dma_slave_config { |
359 | enum dma_transfer_direction direction; | 359 | enum dma_transfer_direction direction; |
360 | dma_addr_t src_addr; | 360 | phys_addr_t src_addr; |
361 | dma_addr_t dst_addr; | 361 | phys_addr_t dst_addr; |
362 | enum dma_slave_buswidth src_addr_width; | 362 | enum dma_slave_buswidth src_addr_width; |
363 | enum dma_slave_buswidth dst_addr_width; | 363 | enum dma_slave_buswidth dst_addr_width; |
364 | u32 src_maxburst; | 364 | u32 src_maxburst; |
@@ -401,6 +401,7 @@ enum dma_residue_granularity { | |||
401 | * since the enum dma_transfer_direction is not defined as bits for each | 401 | * since the enum dma_transfer_direction is not defined as bits for each |
402 | * type of direction, the dma controller should fill (1 << <TYPE>) and same | 402 | * type of direction, the dma controller should fill (1 << <TYPE>) and same |
403 | * should be checked by controller as well | 403 | * should be checked by controller as well |
404 | * @max_burst: max burst capability per-transfer | ||
404 | * @cmd_pause: true, if pause and thereby resume is supported | 405 | * @cmd_pause: true, if pause and thereby resume is supported |
405 | * @cmd_terminate: true, if terminate cmd is supported | 406 | * @cmd_terminate: true, if terminate cmd is supported |
406 | * @residue_granularity: granularity of the reported transfer residue | 407 | * @residue_granularity: granularity of the reported transfer residue |
@@ -411,6 +412,7 @@ struct dma_slave_caps { | |||
411 | u32 src_addr_widths; | 412 | u32 src_addr_widths; |
412 | u32 dst_addr_widths; | 413 | u32 dst_addr_widths; |
413 | u32 directions; | 414 | u32 directions; |
415 | u32 max_burst; | ||
414 | bool cmd_pause; | 416 | bool cmd_pause; |
415 | bool cmd_terminate; | 417 | bool cmd_terminate; |
416 | enum dma_residue_granularity residue_granularity; | 418 | enum dma_residue_granularity residue_granularity; |
@@ -654,6 +656,7 @@ struct dma_filter { | |||
654 | * the enum dma_transfer_direction is not defined as bits for | 656 | * the enum dma_transfer_direction is not defined as bits for |
655 | * each type of direction, the dma controller should fill (1 << | 657 | * each type of direction, the dma controller should fill (1 << |
656 | * <TYPE>) and same should be checked by controller as well | 658 | * <TYPE>) and same should be checked by controller as well |
659 | * @max_burst: max burst capability per-transfer | ||
657 | * @residue_granularity: granularity of the transfer residue reported | 660 | * @residue_granularity: granularity of the transfer residue reported |
658 | * by tx_status | 661 | * by tx_status |
659 | * @device_alloc_chan_resources: allocate resources and return the | 662 | * @device_alloc_chan_resources: allocate resources and return the |
@@ -712,6 +715,7 @@ struct dma_device { | |||
712 | u32 src_addr_widths; | 715 | u32 src_addr_widths; |
713 | u32 dst_addr_widths; | 716 | u32 dst_addr_widths; |
714 | u32 directions; | 717 | u32 directions; |
718 | u32 max_burst; | ||
715 | bool descriptor_reuse; | 719 | bool descriptor_reuse; |
716 | enum dma_residue_granularity residue_granularity; | 720 | enum dma_residue_granularity residue_granularity; |
717 | 721 | ||