diff options
39 files changed, 3215 insertions, 517 deletions
diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt index b0d541042ac6..d9be7a97dff3 100644 --- a/Documentation/acpi/enumeration.txt +++ b/Documentation/acpi/enumeration.txt | |||
@@ -66,6 +66,83 @@ the ACPI device explicitly to acpi_platform_device_ids list defined in | |||
66 | drivers/acpi/acpi_platform.c. This limitation is only for the platform | 66 | drivers/acpi/acpi_platform.c. This limitation is only for the platform |
67 | devices, SPI and I2C devices are created automatically as described below. | 67 | devices, SPI and I2C devices are created automatically as described below. |
68 | 68 | ||
69 | DMA support | ||
70 | ~~~~~~~~~~~ | ||
71 | DMA controllers enumerated via ACPI should be registered in the system to | ||
72 | provide generic access to their resources. For example, a driver that would | ||
73 | like to be accessible to slave devices via generic API call | ||
74 | dma_request_slave_channel() must register itself at the end of the probe | ||
75 | function like this: | ||
76 | |||
77 | err = devm_acpi_dma_controller_register(dev, xlate_func, dw); | ||
78 | /* Handle the error if it's not a case of !CONFIG_ACPI */ | ||
79 | |||
80 | and implement custom xlate function if needed (usually acpi_dma_simple_xlate() | ||
81 | is enough) which converts the FixedDMA resource provided by struct | ||
82 | acpi_dma_spec into the corresponding DMA channel. A piece of code for that case | ||
83 | could look like: | ||
84 | |||
85 | #ifdef CONFIG_ACPI | ||
86 | struct filter_args { | ||
87 | /* Provide necessary information for the filter_func */ | ||
88 | ... | ||
89 | }; | ||
90 | |||
91 | static bool filter_func(struct dma_chan *chan, void *param) | ||
92 | { | ||
93 | /* Choose the proper channel */ | ||
94 | ... | ||
95 | } | ||
96 | |||
97 | static struct dma_chan *xlate_func(struct acpi_dma_spec *dma_spec, | ||
98 | struct acpi_dma *adma) | ||
99 | { | ||
100 | dma_cap_mask_t cap; | ||
101 | struct filter_args args; | ||
102 | |||
103 | /* Prepare arguments for filter_func */ | ||
104 | ... | ||
105 | return dma_request_channel(cap, filter_func, &args); | ||
106 | } | ||
107 | #else | ||
108 | static struct dma_chan *xlate_func(struct acpi_dma_spec *dma_spec, | ||
109 | struct acpi_dma *adma) | ||
110 | { | ||
111 | return NULL; | ||
112 | } | ||
113 | #endif | ||
114 | |||
115 | dma_request_slave_channel() will call xlate_func() for each registered DMA | ||
116 | controller. In the xlate function the proper channel must be chosen based on | ||
117 | information in struct acpi_dma_spec and the properties of the controller | ||
118 | provided by struct acpi_dma. | ||
119 | |||
120 | Clients must call dma_request_slave_channel() with the string parameter that | ||
121 | corresponds to a specific FixedDMA resource. By default "tx" means the first | ||
122 | entry of the FixedDMA resource array, "rx" means the second entry. The table | ||
123 | below shows a layout: | ||
124 | |||
125 | Device (I2C0) | ||
126 | { | ||
127 | ... | ||
128 | Method (_CRS, 0, NotSerialized) | ||
129 | { | ||
130 | Name (DBUF, ResourceTemplate () | ||
131 | { | ||
132 | FixedDMA (0x0018, 0x0004, Width32bit, _Y48) | ||
133 | FixedDMA (0x0019, 0x0005, Width32bit, ) | ||
134 | }) | ||
135 | ... | ||
136 | } | ||
137 | } | ||
138 | |||
139 | So, the FixedDMA with request line 0x0018 is "tx" and next one is "rx" in | ||
140 | this example. | ||
141 | |||
142 | In robust cases the client unfortunately needs to call | ||
143 | acpi_dma_request_slave_chan_by_index() directly and therefore choose the | ||
144 | specific FixedDMA resource by its index. | ||
145 | |||
69 | SPI serial bus support | 146 | SPI serial bus support |
70 | ~~~~~~~~~~~~~~~~~~~~~~ | 147 | ~~~~~~~~~~~~~~~~~~~~~~ |
71 | Slave devices behind SPI bus have SpiSerialBus resource attached to them. | 148 | Slave devices behind SPI bus have SpiSerialBus resource attached to them. |
diff --git a/Documentation/devicetree/bindings/dma/atmel-dma.txt b/Documentation/devicetree/bindings/dma/atmel-dma.txt index 3c046ee6e8b5..c80e8a3402f0 100644 --- a/Documentation/devicetree/bindings/dma/atmel-dma.txt +++ b/Documentation/devicetree/bindings/dma/atmel-dma.txt | |||
@@ -1,14 +1,39 @@ | |||
1 | * Atmel Direct Memory Access Controller (DMA) | 1 | * Atmel Direct Memory Access Controller (DMA) |
2 | 2 | ||
3 | Required properties: | 3 | Required properties: |
4 | - compatible: Should be "atmel,<chip>-dma" | 4 | - compatible: Should be "atmel,<chip>-dma". |
5 | - reg: Should contain DMA registers location and length | 5 | - reg: Should contain DMA registers location and length. |
6 | - interrupts: Should contain DMA interrupt | 6 | - interrupts: Should contain DMA interrupt. |
7 | - #dma-cells: Must be <2>, used to represent the number of integer cells in | ||
8 | the dmas property of client devices. | ||
7 | 9 | ||
8 | Examples: | 10 | Example: |
9 | 11 | ||
10 | dma@ffffec00 { | 12 | dma0: dma@ffffec00 { |
11 | compatible = "atmel,at91sam9g45-dma"; | 13 | compatible = "atmel,at91sam9g45-dma"; |
12 | reg = <0xffffec00 0x200>; | 14 | reg = <0xffffec00 0x200>; |
13 | interrupts = <21>; | 15 | interrupts = <21>; |
16 | #dma-cells = <2>; | ||
17 | }; | ||
18 | |||
19 | DMA clients connected to the Atmel DMA controller must use the format | ||
20 | described in the dma.txt file, using a three-cell specifier for each channel: | ||
21 | a phandle plus two interger cells. | ||
22 | The three cells in order are: | ||
23 | |||
24 | 1. A phandle pointing to the DMA controller. | ||
25 | 2. The memory interface (16 most significant bits), the peripheral interface | ||
26 | (16 less significant bits). | ||
27 | 3. The peripheral identifier for the hardware handshaking interface. The | ||
28 | identifier can be different for tx and rx. | ||
29 | |||
30 | Example: | ||
31 | |||
32 | i2c0@i2c@f8010000 { | ||
33 | compatible = "atmel,at91sam9x5-i2c"; | ||
34 | reg = <0xf8010000 0x100>; | ||
35 | interrupts = <9 4 6>; | ||
36 | dmas = <&dma0 1 7>, | ||
37 | <&dma0 1 8>; | ||
38 | dma-names = "tx", "rx"; | ||
14 | }; | 39 | }; |
diff --git a/Documentation/dmatest.txt b/Documentation/dmatest.txt new file mode 100644 index 000000000000..279ac0a8c5b1 --- /dev/null +++ b/Documentation/dmatest.txt | |||
@@ -0,0 +1,81 @@ | |||
1 | DMA Test Guide | ||
2 | ============== | ||
3 | |||
4 | Andy Shevchenko <andriy.shevchenko@linux.intel.com> | ||
5 | |||
6 | This small document introduces how to test DMA drivers using dmatest module. | ||
7 | |||
8 | Part 1 - How to build the test module | ||
9 | |||
10 | The menuconfig contains an option that could be found by following path: | ||
11 | Device Drivers -> DMA Engine support -> DMA Test client | ||
12 | |||
13 | In the configuration file the option called CONFIG_DMATEST. The dmatest could | ||
14 | be built as module or inside kernel. Let's consider those cases. | ||
15 | |||
16 | Part 2 - When dmatest is built as a module... | ||
17 | |||
18 | After mounting debugfs and loading the module, the /sys/kernel/debug/dmatest | ||
19 | folder with nodes will be created. They are the same as module parameters with | ||
20 | addition of the 'run' node that controls run and stop phases of the test. | ||
21 | |||
22 | Note that in this case test will not run on load automatically. | ||
23 | |||
24 | Example of usage: | ||
25 | % echo dma0chan0 > /sys/kernel/debug/dmatest/channel | ||
26 | % echo 2000 > /sys/kernel/debug/dmatest/timeout | ||
27 | % echo 1 > /sys/kernel/debug/dmatest/iterations | ||
28 | % echo 1 > /sys/kernel/debug/dmatest/run | ||
29 | |||
30 | Hint: available channel list could be extracted by running the following | ||
31 | command: | ||
32 | % ls -1 /sys/class/dma/ | ||
33 | |||
34 | After a while you will start to get messages about current status or error like | ||
35 | in the original code. | ||
36 | |||
37 | Note that running a new test will stop any in progress test. | ||
38 | |||
39 | The following command should return actual state of the test. | ||
40 | % cat /sys/kernel/debug/dmatest/run | ||
41 | |||
42 | To wait for test done the user may perform a busy loop that checks the state. | ||
43 | |||
44 | % while [ $(cat /sys/kernel/debug/dmatest/run) = "Y" ] | ||
45 | > do | ||
46 | > echo -n "." | ||
47 | > sleep 1 | ||
48 | > done | ||
49 | > echo | ||
50 | |||
51 | Part 3 - When built-in in the kernel... | ||
52 | |||
53 | The module parameters that is supplied to the kernel command line will be used | ||
54 | for the first performed test. After user gets a control, the test could be | ||
55 | interrupted or re-run with same or different parameters. For the details see | ||
56 | the above section "Part 2 - When dmatest is built as a module..." | ||
57 | |||
58 | In both cases the module parameters are used as initial values for the test case. | ||
59 | You always could check them at run-time by running | ||
60 | % grep -H . /sys/module/dmatest/parameters/* | ||
61 | |||
62 | Part 4 - Gathering the test results | ||
63 | |||
64 | The module provides a storage for the test results in the memory. The gathered | ||
65 | data could be used after test is done. | ||
66 | |||
67 | The special file 'results' in the debugfs represents gathered data of the in | ||
68 | progress test. The messages collected are printed to the kernel log as well. | ||
69 | |||
70 | Example of output: | ||
71 | % cat /sys/kernel/debug/dmatest/results | ||
72 | dma0chan0-copy0: #1: No errors with src_off=0x7bf dst_off=0x8ad len=0x3fea (0) | ||
73 | |||
74 | The message format is unified across the different types of errors. A number in | ||
75 | the parens represents additional information, e.g. error code, error counter, | ||
76 | or status. | ||
77 | |||
78 | Comparison between buffers is stored to the dedicated structure. | ||
79 | |||
80 | Note that the verify result is now accessible only via file 'results' in the | ||
81 | debugfs. | ||
diff --git a/arch/arm/mach-omap2/dma.c b/arch/arm/mach-omap2/dma.c index dab9fc014b97..49fd0d501c9b 100644 --- a/arch/arm/mach-omap2/dma.c +++ b/arch/arm/mach-omap2/dma.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | #include <linux/device.h> | 29 | #include <linux/device.h> |
30 | #include <linux/dma-mapping.h> | 30 | #include <linux/dma-mapping.h> |
31 | #include <linux/of.h> | ||
31 | #include <linux/omap-dma.h> | 32 | #include <linux/omap-dma.h> |
32 | 33 | ||
33 | #include "soc.h" | 34 | #include "soc.h" |
@@ -304,6 +305,9 @@ static int __init omap2_system_dma_init(void) | |||
304 | if (res) | 305 | if (res) |
305 | return res; | 306 | return res; |
306 | 307 | ||
308 | if (of_have_populated_dt()) | ||
309 | return res; | ||
310 | |||
307 | pdev = platform_device_register_full(&omap_dma_dev_info); | 311 | pdev = platform_device_register_full(&omap_dma_dev_info); |
308 | if (IS_ERR(pdev)) | 312 | if (IS_ERR(pdev)) |
309 | return PTR_ERR(pdev); | 313 | return PTR_ERR(pdev); |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index aeaea32bcfda..e9924898043a 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -63,8 +63,6 @@ config INTEL_IOATDMA | |||
63 | depends on PCI && X86 | 63 | depends on PCI && X86 |
64 | select DMA_ENGINE | 64 | select DMA_ENGINE |
65 | select DCA | 65 | select DCA |
66 | select ASYNC_TX_DISABLE_PQ_VAL_DMA | ||
67 | select ASYNC_TX_DISABLE_XOR_VAL_DMA | ||
68 | help | 66 | help |
69 | Enable support for the Intel(R) I/OAT DMA engine present | 67 | Enable support for the Intel(R) I/OAT DMA engine present |
70 | in recent Intel Xeon chipsets. | 68 | in recent Intel Xeon chipsets. |
@@ -174,15 +172,7 @@ config TEGRA20_APB_DMA | |||
174 | This DMA controller transfers data from memory to peripheral fifo | 172 | This DMA controller transfers data from memory to peripheral fifo |
175 | or vice versa. It does not support memory to memory data transfer. | 173 | or vice versa. It does not support memory to memory data transfer. |
176 | 174 | ||
177 | 175 | source "drivers/dma/sh/Kconfig" | |
178 | |||
179 | config SH_DMAE | ||
180 | tristate "Renesas SuperH DMAC support" | ||
181 | depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE) | ||
182 | depends on !SH_DMA_API | ||
183 | select DMA_ENGINE | ||
184 | help | ||
185 | Enable support for the Renesas SuperH DMA controllers. | ||
186 | 176 | ||
187 | config COH901318 | 177 | config COH901318 |
188 | bool "ST-Ericsson COH901318 DMA support" | 178 | bool "ST-Ericsson COH901318 DMA support" |
@@ -328,6 +318,10 @@ config DMA_ENGINE | |||
328 | config DMA_VIRTUAL_CHANNELS | 318 | config DMA_VIRTUAL_CHANNELS |
329 | tristate | 319 | tristate |
330 | 320 | ||
321 | config DMA_ACPI | ||
322 | def_bool y | ||
323 | depends on ACPI | ||
324 | |||
331 | config DMA_OF | 325 | config DMA_OF |
332 | def_bool y | 326 | def_bool y |
333 | depends on OF | 327 | depends on OF |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 488e3ff85b52..a2b0df591f95 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -3,6 +3,7 @@ ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG | |||
3 | 3 | ||
4 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o | 4 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o |
5 | obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o | 5 | obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o |
6 | obj-$(CONFIG_DMA_ACPI) += acpi-dma.o | ||
6 | obj-$(CONFIG_DMA_OF) += of-dma.o | 7 | obj-$(CONFIG_DMA_OF) += of-dma.o |
7 | 8 | ||
8 | obj-$(CONFIG_NET_DMA) += iovlock.o | 9 | obj-$(CONFIG_NET_DMA) += iovlock.o |
@@ -18,7 +19,7 @@ obj-$(CONFIG_DW_DMAC) += dw_dmac.o | |||
18 | obj-$(CONFIG_AT_HDMAC) += at_hdmac.o | 19 | obj-$(CONFIG_AT_HDMAC) += at_hdmac.o |
19 | obj-$(CONFIG_MX3_IPU) += ipu/ | 20 | obj-$(CONFIG_MX3_IPU) += ipu/ |
20 | obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o | 21 | obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o |
21 | obj-$(CONFIG_SH_DMAE) += sh/ | 22 | obj-$(CONFIG_SH_DMAE_BASE) += sh/ |
22 | obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o | 23 | obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o |
23 | obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ | 24 | obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ |
24 | obj-$(CONFIG_IMX_SDMA) += imx-sdma.o | 25 | obj-$(CONFIG_IMX_SDMA) += imx-sdma.o |
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c new file mode 100644 index 000000000000..ba6fc62e9651 --- /dev/null +++ b/drivers/dma/acpi-dma.c | |||
@@ -0,0 +1,279 @@ | |||
1 | /* | ||
2 | * ACPI helpers for DMA request / controller | ||
3 | * | ||
4 | * Based on of-dma.c | ||
5 | * | ||
6 | * Copyright (C) 2013, Intel Corporation | ||
7 | * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #include <linux/device.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/list.h> | ||
17 | #include <linux/mutex.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/acpi.h> | ||
20 | #include <linux/acpi_dma.h> | ||
21 | |||
22 | static LIST_HEAD(acpi_dma_list); | ||
23 | static DEFINE_MUTEX(acpi_dma_lock); | ||
24 | |||
25 | /** | ||
26 | * acpi_dma_controller_register - Register a DMA controller to ACPI DMA helpers | ||
27 | * @dev: struct device of DMA controller | ||
28 | * @acpi_dma_xlate: translation function which converts a dma specifier | ||
29 | * into a dma_chan structure | ||
30 | * @data pointer to controller specific data to be used by | ||
31 | * translation function | ||
32 | * | ||
33 | * Returns 0 on success or appropriate errno value on error. | ||
34 | * | ||
35 | * Allocated memory should be freed with appropriate acpi_dma_controller_free() | ||
36 | * call. | ||
37 | */ | ||
38 | int acpi_dma_controller_register(struct device *dev, | ||
39 | struct dma_chan *(*acpi_dma_xlate) | ||
40 | (struct acpi_dma_spec *, struct acpi_dma *), | ||
41 | void *data) | ||
42 | { | ||
43 | struct acpi_device *adev; | ||
44 | struct acpi_dma *adma; | ||
45 | |||
46 | if (!dev || !acpi_dma_xlate) | ||
47 | return -EINVAL; | ||
48 | |||
49 | /* Check if the device was enumerated by ACPI */ | ||
50 | if (!ACPI_HANDLE(dev)) | ||
51 | return -EINVAL; | ||
52 | |||
53 | if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev)) | ||
54 | return -EINVAL; | ||
55 | |||
56 | adma = kzalloc(sizeof(*adma), GFP_KERNEL); | ||
57 | if (!adma) | ||
58 | return -ENOMEM; | ||
59 | |||
60 | adma->dev = dev; | ||
61 | adma->acpi_dma_xlate = acpi_dma_xlate; | ||
62 | adma->data = data; | ||
63 | |||
64 | /* Now queue acpi_dma controller structure in list */ | ||
65 | mutex_lock(&acpi_dma_lock); | ||
66 | list_add_tail(&adma->dma_controllers, &acpi_dma_list); | ||
67 | mutex_unlock(&acpi_dma_lock); | ||
68 | |||
69 | return 0; | ||
70 | } | ||
71 | EXPORT_SYMBOL_GPL(acpi_dma_controller_register); | ||
72 | |||
73 | /** | ||
74 | * acpi_dma_controller_free - Remove a DMA controller from ACPI DMA helpers list | ||
75 | * @dev: struct device of DMA controller | ||
76 | * | ||
77 | * Memory allocated by acpi_dma_controller_register() is freed here. | ||
78 | */ | ||
79 | int acpi_dma_controller_free(struct device *dev) | ||
80 | { | ||
81 | struct acpi_dma *adma; | ||
82 | |||
83 | if (!dev) | ||
84 | return -EINVAL; | ||
85 | |||
86 | mutex_lock(&acpi_dma_lock); | ||
87 | |||
88 | list_for_each_entry(adma, &acpi_dma_list, dma_controllers) | ||
89 | if (adma->dev == dev) { | ||
90 | list_del(&adma->dma_controllers); | ||
91 | mutex_unlock(&acpi_dma_lock); | ||
92 | kfree(adma); | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | mutex_unlock(&acpi_dma_lock); | ||
97 | return -ENODEV; | ||
98 | } | ||
99 | EXPORT_SYMBOL_GPL(acpi_dma_controller_free); | ||
100 | |||
101 | static void devm_acpi_dma_release(struct device *dev, void *res) | ||
102 | { | ||
103 | acpi_dma_controller_free(dev); | ||
104 | } | ||
105 | |||
106 | /** | ||
107 | * devm_acpi_dma_controller_register - resource managed acpi_dma_controller_register() | ||
108 | * @dev: device that is registering this DMA controller | ||
109 | * @acpi_dma_xlate: translation function | ||
110 | * @data pointer to controller specific data | ||
111 | * | ||
112 | * Managed acpi_dma_controller_register(). DMA controller registered by this | ||
113 | * function are automatically freed on driver detach. See | ||
114 | * acpi_dma_controller_register() for more information. | ||
115 | */ | ||
116 | int devm_acpi_dma_controller_register(struct device *dev, | ||
117 | struct dma_chan *(*acpi_dma_xlate) | ||
118 | (struct acpi_dma_spec *, struct acpi_dma *), | ||
119 | void *data) | ||
120 | { | ||
121 | void *res; | ||
122 | int ret; | ||
123 | |||
124 | res = devres_alloc(devm_acpi_dma_release, 0, GFP_KERNEL); | ||
125 | if (!res) | ||
126 | return -ENOMEM; | ||
127 | |||
128 | ret = acpi_dma_controller_register(dev, acpi_dma_xlate, data); | ||
129 | if (ret) { | ||
130 | devres_free(res); | ||
131 | return ret; | ||
132 | } | ||
133 | devres_add(dev, res); | ||
134 | return 0; | ||
135 | } | ||
136 | EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register); | ||
137 | |||
138 | /** | ||
139 | * devm_acpi_dma_controller_free - resource managed acpi_dma_controller_free() | ||
140 | * | ||
141 | * Unregister a DMA controller registered with | ||
142 | * devm_acpi_dma_controller_register(). Normally this function will not need to | ||
143 | * be called and the resource management code will ensure that the resource is | ||
144 | * freed. | ||
145 | */ | ||
146 | void devm_acpi_dma_controller_free(struct device *dev) | ||
147 | { | ||
148 | WARN_ON(devres_destroy(dev, devm_acpi_dma_release, NULL, NULL)); | ||
149 | } | ||
150 | EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free); | ||
151 | |||
152 | struct acpi_dma_parser_data { | ||
153 | struct acpi_dma_spec dma_spec; | ||
154 | size_t index; | ||
155 | size_t n; | ||
156 | }; | ||
157 | |||
158 | /** | ||
159 | * acpi_dma_parse_fixed_dma - Parse FixedDMA ACPI resources to a DMA specifier | ||
160 | * @res: struct acpi_resource to get FixedDMA resources from | ||
161 | * @data: pointer to a helper struct acpi_dma_parser_data | ||
162 | */ | ||
163 | static int acpi_dma_parse_fixed_dma(struct acpi_resource *res, void *data) | ||
164 | { | ||
165 | struct acpi_dma_parser_data *pdata = data; | ||
166 | |||
167 | if (res->type == ACPI_RESOURCE_TYPE_FIXED_DMA) { | ||
168 | struct acpi_resource_fixed_dma *dma = &res->data.fixed_dma; | ||
169 | |||
170 | if (pdata->n++ == pdata->index) { | ||
171 | pdata->dma_spec.chan_id = dma->channels; | ||
172 | pdata->dma_spec.slave_id = dma->request_lines; | ||
173 | } | ||
174 | } | ||
175 | |||
176 | /* Tell the ACPI core to skip this resource */ | ||
177 | return 1; | ||
178 | } | ||
179 | |||
180 | /** | ||
181 | * acpi_dma_request_slave_chan_by_index - Get the DMA slave channel | ||
182 | * @dev: struct device to get DMA request from | ||
183 | * @index: index of FixedDMA descriptor for @dev | ||
184 | * | ||
185 | * Returns pointer to appropriate dma channel on success or NULL on error. | ||
186 | */ | ||
187 | struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, | ||
188 | size_t index) | ||
189 | { | ||
190 | struct acpi_dma_parser_data pdata; | ||
191 | struct acpi_dma_spec *dma_spec = &pdata.dma_spec; | ||
192 | struct list_head resource_list; | ||
193 | struct acpi_device *adev; | ||
194 | struct acpi_dma *adma; | ||
195 | struct dma_chan *chan = NULL; | ||
196 | |||
197 | /* Check if the device was enumerated by ACPI */ | ||
198 | if (!dev || !ACPI_HANDLE(dev)) | ||
199 | return NULL; | ||
200 | |||
201 | if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev)) | ||
202 | return NULL; | ||
203 | |||
204 | memset(&pdata, 0, sizeof(pdata)); | ||
205 | pdata.index = index; | ||
206 | |||
207 | /* Initial values for the request line and channel */ | ||
208 | dma_spec->chan_id = -1; | ||
209 | dma_spec->slave_id = -1; | ||
210 | |||
211 | INIT_LIST_HEAD(&resource_list); | ||
212 | acpi_dev_get_resources(adev, &resource_list, | ||
213 | acpi_dma_parse_fixed_dma, &pdata); | ||
214 | acpi_dev_free_resource_list(&resource_list); | ||
215 | |||
216 | if (dma_spec->slave_id < 0 || dma_spec->chan_id < 0) | ||
217 | return NULL; | ||
218 | |||
219 | mutex_lock(&acpi_dma_lock); | ||
220 | |||
221 | list_for_each_entry(adma, &acpi_dma_list, dma_controllers) { | ||
222 | dma_spec->dev = adma->dev; | ||
223 | chan = adma->acpi_dma_xlate(dma_spec, adma); | ||
224 | if (chan) | ||
225 | break; | ||
226 | } | ||
227 | |||
228 | mutex_unlock(&acpi_dma_lock); | ||
229 | return chan; | ||
230 | } | ||
231 | EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index); | ||
232 | |||
233 | /** | ||
234 | * acpi_dma_request_slave_chan_by_name - Get the DMA slave channel | ||
235 | * @dev: struct device to get DMA request from | ||
236 | * @name: represents corresponding FixedDMA descriptor for @dev | ||
237 | * | ||
238 | * In order to support both Device Tree and ACPI in a single driver we | ||
239 | * translate the names "tx" and "rx" here based on the most common case where | ||
240 | * the first FixedDMA descriptor is TX and second is RX. | ||
241 | * | ||
242 | * Returns pointer to appropriate dma channel on success or NULL on error. | ||
243 | */ | ||
244 | struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev, | ||
245 | const char *name) | ||
246 | { | ||
247 | size_t index; | ||
248 | |||
249 | if (!strcmp(name, "tx")) | ||
250 | index = 0; | ||
251 | else if (!strcmp(name, "rx")) | ||
252 | index = 1; | ||
253 | else | ||
254 | return NULL; | ||
255 | |||
256 | return acpi_dma_request_slave_chan_by_index(dev, index); | ||
257 | } | ||
258 | EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_name); | ||
259 | |||
260 | /** | ||
261 | * acpi_dma_simple_xlate - Simple ACPI DMA engine translation helper | ||
262 | * @dma_spec: pointer to ACPI DMA specifier | ||
263 | * @adma: pointer to ACPI DMA controller data | ||
264 | * | ||
265 | * A simple translation function for ACPI based devices. Passes &struct | ||
266 | * dma_spec to the DMA controller driver provided filter function. Returns | ||
267 | * pointer to the channel if found or %NULL otherwise. | ||
268 | */ | ||
269 | struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec, | ||
270 | struct acpi_dma *adma) | ||
271 | { | ||
272 | struct acpi_dma_filter_info *info = adma->data; | ||
273 | |||
274 | if (!info || !info->filter_fn) | ||
275 | return NULL; | ||
276 | |||
277 | return dma_request_channel(info->dma_cap, info->filter_fn, dma_spec); | ||
278 | } | ||
279 | EXPORT_SYMBOL_GPL(acpi_dma_simple_xlate); | ||
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 88cfc61329d2..e923cda930f9 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/of.h> | 25 | #include <linux/of.h> |
26 | #include <linux/of_device.h> | 26 | #include <linux/of_device.h> |
27 | #include <linux/of_dma.h> | ||
27 | 28 | ||
28 | #include "at_hdmac_regs.h" | 29 | #include "at_hdmac_regs.h" |
29 | #include "dmaengine.h" | 30 | #include "dmaengine.h" |
@@ -677,7 +678,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
677 | ctrlb |= ATC_DST_ADDR_MODE_FIXED | 678 | ctrlb |= ATC_DST_ADDR_MODE_FIXED |
678 | | ATC_SRC_ADDR_MODE_INCR | 679 | | ATC_SRC_ADDR_MODE_INCR |
679 | | ATC_FC_MEM2PER | 680 | | ATC_FC_MEM2PER |
680 | | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF); | 681 | | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if); |
681 | reg = sconfig->dst_addr; | 682 | reg = sconfig->dst_addr; |
682 | for_each_sg(sgl, sg, sg_len, i) { | 683 | for_each_sg(sgl, sg, sg_len, i) { |
683 | struct at_desc *desc; | 684 | struct at_desc *desc; |
@@ -716,7 +717,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
716 | ctrlb |= ATC_DST_ADDR_MODE_INCR | 717 | ctrlb |= ATC_DST_ADDR_MODE_INCR |
717 | | ATC_SRC_ADDR_MODE_FIXED | 718 | | ATC_SRC_ADDR_MODE_FIXED |
718 | | ATC_FC_PER2MEM | 719 | | ATC_FC_PER2MEM |
719 | | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF); | 720 | | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if); |
720 | 721 | ||
721 | reg = sconfig->src_addr; | 722 | reg = sconfig->src_addr; |
722 | for_each_sg(sgl, sg, sg_len, i) { | 723 | for_each_sg(sgl, sg, sg_len, i) { |
@@ -822,8 +823,8 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, | |||
822 | desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED | 823 | desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED |
823 | | ATC_SRC_ADDR_MODE_INCR | 824 | | ATC_SRC_ADDR_MODE_INCR |
824 | | ATC_FC_MEM2PER | 825 | | ATC_FC_MEM2PER |
825 | | ATC_SIF(AT_DMA_MEM_IF) | 826 | | ATC_SIF(atchan->mem_if) |
826 | | ATC_DIF(AT_DMA_PER_IF); | 827 | | ATC_DIF(atchan->per_if); |
827 | break; | 828 | break; |
828 | 829 | ||
829 | case DMA_DEV_TO_MEM: | 830 | case DMA_DEV_TO_MEM: |
@@ -833,8 +834,8 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, | |||
833 | desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR | 834 | desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR |
834 | | ATC_SRC_ADDR_MODE_FIXED | 835 | | ATC_SRC_ADDR_MODE_FIXED |
835 | | ATC_FC_PER2MEM | 836 | | ATC_FC_PER2MEM |
836 | | ATC_SIF(AT_DMA_PER_IF) | 837 | | ATC_SIF(atchan->per_if) |
837 | | ATC_DIF(AT_DMA_MEM_IF); | 838 | | ATC_DIF(atchan->mem_if); |
838 | break; | 839 | break; |
839 | 840 | ||
840 | default: | 841 | default: |
@@ -1188,6 +1189,67 @@ static void atc_free_chan_resources(struct dma_chan *chan) | |||
1188 | dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); | 1189 | dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); |
1189 | } | 1190 | } |
1190 | 1191 | ||
1192 | #ifdef CONFIG_OF | ||
1193 | static bool at_dma_filter(struct dma_chan *chan, void *slave) | ||
1194 | { | ||
1195 | struct at_dma_slave *atslave = slave; | ||
1196 | |||
1197 | if (atslave->dma_dev == chan->device->dev) { | ||
1198 | chan->private = atslave; | ||
1199 | return true; | ||
1200 | } else { | ||
1201 | return false; | ||
1202 | } | ||
1203 | } | ||
1204 | |||
1205 | static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, | ||
1206 | struct of_dma *of_dma) | ||
1207 | { | ||
1208 | struct dma_chan *chan; | ||
1209 | struct at_dma_chan *atchan; | ||
1210 | struct at_dma_slave *atslave; | ||
1211 | dma_cap_mask_t mask; | ||
1212 | unsigned int per_id; | ||
1213 | struct platform_device *dmac_pdev; | ||
1214 | |||
1215 | if (dma_spec->args_count != 2) | ||
1216 | return NULL; | ||
1217 | |||
1218 | dmac_pdev = of_find_device_by_node(dma_spec->np); | ||
1219 | |||
1220 | dma_cap_zero(mask); | ||
1221 | dma_cap_set(DMA_SLAVE, mask); | ||
1222 | |||
1223 | atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL); | ||
1224 | if (!atslave) | ||
1225 | return NULL; | ||
1226 | /* | ||
1227 | * We can fill both SRC_PER and DST_PER, one of these fields will be | ||
1228 | * ignored depending on DMA transfer direction. | ||
1229 | */ | ||
1230 | per_id = dma_spec->args[1]; | ||
1231 | atslave->cfg = ATC_FIFOCFG_HALFFIFO | ATC_DST_H2SEL_HW | ||
1232 | | ATC_SRC_H2SEL_HW | ATC_DST_PER(per_id) | ||
1233 | | ATC_SRC_PER(per_id); | ||
1234 | atslave->dma_dev = &dmac_pdev->dev; | ||
1235 | |||
1236 | chan = dma_request_channel(mask, at_dma_filter, atslave); | ||
1237 | if (!chan) | ||
1238 | return NULL; | ||
1239 | |||
1240 | atchan = to_at_dma_chan(chan); | ||
1241 | atchan->per_if = dma_spec->args[0] & 0xff; | ||
1242 | atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff; | ||
1243 | |||
1244 | return chan; | ||
1245 | } | ||
1246 | #else | ||
1247 | static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, | ||
1248 | struct of_dma *of_dma) | ||
1249 | { | ||
1250 | return NULL; | ||
1251 | } | ||
1252 | #endif | ||
1191 | 1253 | ||
1192 | /*-- Module Management -----------------------------------------------*/ | 1254 | /*-- Module Management -----------------------------------------------*/ |
1193 | 1255 | ||
@@ -1342,6 +1404,8 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
1342 | for (i = 0; i < plat_dat->nr_channels; i++) { | 1404 | for (i = 0; i < plat_dat->nr_channels; i++) { |
1343 | struct at_dma_chan *atchan = &atdma->chan[i]; | 1405 | struct at_dma_chan *atchan = &atdma->chan[i]; |
1344 | 1406 | ||
1407 | atchan->mem_if = AT_DMA_MEM_IF; | ||
1408 | atchan->per_if = AT_DMA_PER_IF; | ||
1345 | atchan->chan_common.device = &atdma->dma_common; | 1409 | atchan->chan_common.device = &atdma->dma_common; |
1346 | dma_cookie_init(&atchan->chan_common); | 1410 | dma_cookie_init(&atchan->chan_common); |
1347 | list_add_tail(&atchan->chan_common.device_node, | 1411 | list_add_tail(&atchan->chan_common.device_node, |
@@ -1388,8 +1452,25 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
1388 | 1452 | ||
1389 | dma_async_device_register(&atdma->dma_common); | 1453 | dma_async_device_register(&atdma->dma_common); |
1390 | 1454 | ||
1455 | /* | ||
1456 | * Do not return an error if the dmac node is not present in order to | ||
1457 | * not break the existing way of requesting channel with | ||
1458 | * dma_request_channel(). | ||
1459 | */ | ||
1460 | if (pdev->dev.of_node) { | ||
1461 | err = of_dma_controller_register(pdev->dev.of_node, | ||
1462 | at_dma_xlate, atdma); | ||
1463 | if (err) { | ||
1464 | dev_err(&pdev->dev, "could not register of_dma_controller\n"); | ||
1465 | goto err_of_dma_controller_register; | ||
1466 | } | ||
1467 | } | ||
1468 | |||
1391 | return 0; | 1469 | return 0; |
1392 | 1470 | ||
1471 | err_of_dma_controller_register: | ||
1472 | dma_async_device_unregister(&atdma->dma_common); | ||
1473 | dma_pool_destroy(atdma->dma_desc_pool); | ||
1393 | err_pool_create: | 1474 | err_pool_create: |
1394 | platform_set_drvdata(pdev, NULL); | 1475 | platform_set_drvdata(pdev, NULL); |
1395 | free_irq(platform_get_irq(pdev, 0), atdma); | 1476 | free_irq(platform_get_irq(pdev, 0), atdma); |
@@ -1406,7 +1487,7 @@ err_kfree: | |||
1406 | return err; | 1487 | return err; |
1407 | } | 1488 | } |
1408 | 1489 | ||
1409 | static int __exit at_dma_remove(struct platform_device *pdev) | 1490 | static int at_dma_remove(struct platform_device *pdev) |
1410 | { | 1491 | { |
1411 | struct at_dma *atdma = platform_get_drvdata(pdev); | 1492 | struct at_dma *atdma = platform_get_drvdata(pdev); |
1412 | struct dma_chan *chan, *_chan; | 1493 | struct dma_chan *chan, *_chan; |
@@ -1564,7 +1645,7 @@ static const struct dev_pm_ops at_dma_dev_pm_ops = { | |||
1564 | }; | 1645 | }; |
1565 | 1646 | ||
1566 | static struct platform_driver at_dma_driver = { | 1647 | static struct platform_driver at_dma_driver = { |
1567 | .remove = __exit_p(at_dma_remove), | 1648 | .remove = at_dma_remove, |
1568 | .shutdown = at_dma_shutdown, | 1649 | .shutdown = at_dma_shutdown, |
1569 | .id_table = atdma_devtypes, | 1650 | .id_table = atdma_devtypes, |
1570 | .driver = { | 1651 | .driver = { |
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 0eb3c1388667..c604d26fd4d3 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h | |||
@@ -220,6 +220,8 @@ enum atc_status { | |||
220 | * @device: parent device | 220 | * @device: parent device |
221 | * @ch_regs: memory mapped register base | 221 | * @ch_regs: memory mapped register base |
222 | * @mask: channel index in a mask | 222 | * @mask: channel index in a mask |
223 | * @per_if: peripheral interface | ||
224 | * @mem_if: memory interface | ||
223 | * @status: transmit status information from irq/prep* functions | 225 | * @status: transmit status information from irq/prep* functions |
224 | * to tasklet (use atomic operations) | 226 | * to tasklet (use atomic operations) |
225 | * @tasklet: bottom half to finish transaction work | 227 | * @tasklet: bottom half to finish transaction work |
@@ -238,6 +240,8 @@ struct at_dma_chan { | |||
238 | struct at_dma *device; | 240 | struct at_dma *device; |
239 | void __iomem *ch_regs; | 241 | void __iomem *ch_regs; |
240 | u8 mask; | 242 | u8 mask; |
243 | u8 per_if; | ||
244 | u8 mem_if; | ||
241 | unsigned long status; | 245 | unsigned long status; |
242 | struct tasklet_struct tasklet; | 246 | struct tasklet_struct tasklet; |
243 | u32 save_cfg; | 247 | u32 save_cfg; |
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 797940e532ff..3b23061cdb41 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
@@ -2748,7 +2748,7 @@ static int __init coh901318_probe(struct platform_device *pdev) | |||
2748 | return err; | 2748 | return err; |
2749 | } | 2749 | } |
2750 | 2750 | ||
2751 | static int __exit coh901318_remove(struct platform_device *pdev) | 2751 | static int coh901318_remove(struct platform_device *pdev) |
2752 | { | 2752 | { |
2753 | struct coh901318_base *base = platform_get_drvdata(pdev); | 2753 | struct coh901318_base *base = platform_get_drvdata(pdev); |
2754 | 2754 | ||
@@ -2760,7 +2760,7 @@ static int __exit coh901318_remove(struct platform_device *pdev) | |||
2760 | 2760 | ||
2761 | 2761 | ||
2762 | static struct platform_driver coh901318_driver = { | 2762 | static struct platform_driver coh901318_driver = { |
2763 | .remove = __exit_p(coh901318_remove), | 2763 | .remove = coh901318_remove, |
2764 | .driver = { | 2764 | .driver = { |
2765 | .name = "coh901318", | 2765 | .name = "coh901318", |
2766 | }, | 2766 | }, |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index b2728d6ba2fd..93f7992bee5c 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -62,6 +62,8 @@ | |||
62 | #include <linux/rculist.h> | 62 | #include <linux/rculist.h> |
63 | #include <linux/idr.h> | 63 | #include <linux/idr.h> |
64 | #include <linux/slab.h> | 64 | #include <linux/slab.h> |
65 | #include <linux/acpi.h> | ||
66 | #include <linux/acpi_dma.h> | ||
65 | #include <linux/of_dma.h> | 67 | #include <linux/of_dma.h> |
66 | 68 | ||
67 | static DEFINE_MUTEX(dma_list_mutex); | 69 | static DEFINE_MUTEX(dma_list_mutex); |
@@ -174,7 +176,8 @@ static struct class dma_devclass = { | |||
174 | #define dma_device_satisfies_mask(device, mask) \ | 176 | #define dma_device_satisfies_mask(device, mask) \ |
175 | __dma_device_satisfies_mask((device), &(mask)) | 177 | __dma_device_satisfies_mask((device), &(mask)) |
176 | static int | 178 | static int |
177 | __dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want) | 179 | __dma_device_satisfies_mask(struct dma_device *device, |
180 | const dma_cap_mask_t *want) | ||
178 | { | 181 | { |
179 | dma_cap_mask_t has; | 182 | dma_cap_mask_t has; |
180 | 183 | ||
@@ -463,7 +466,8 @@ static void dma_channel_rebalance(void) | |||
463 | } | 466 | } |
464 | } | 467 | } |
465 | 468 | ||
466 | static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev, | 469 | static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, |
470 | struct dma_device *dev, | ||
467 | dma_filter_fn fn, void *fn_param) | 471 | dma_filter_fn fn, void *fn_param) |
468 | { | 472 | { |
469 | struct dma_chan *chan; | 473 | struct dma_chan *chan; |
@@ -505,7 +509,8 @@ static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_devic | |||
505 | * @fn: optional callback to disposition available channels | 509 | * @fn: optional callback to disposition available channels |
506 | * @fn_param: opaque parameter to pass to dma_filter_fn | 510 | * @fn_param: opaque parameter to pass to dma_filter_fn |
507 | */ | 511 | */ |
508 | struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param) | 512 | struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, |
513 | dma_filter_fn fn, void *fn_param) | ||
509 | { | 514 | { |
510 | struct dma_device *device, *_d; | 515 | struct dma_device *device, *_d; |
511 | struct dma_chan *chan = NULL; | 516 | struct dma_chan *chan = NULL; |
@@ -555,12 +560,16 @@ EXPORT_SYMBOL_GPL(__dma_request_channel); | |||
555 | * @dev: pointer to client device structure | 560 | * @dev: pointer to client device structure |
556 | * @name: slave channel name | 561 | * @name: slave channel name |
557 | */ | 562 | */ |
558 | struct dma_chan *dma_request_slave_channel(struct device *dev, char *name) | 563 | struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name) |
559 | { | 564 | { |
560 | /* If device-tree is present get slave info from here */ | 565 | /* If device-tree is present get slave info from here */ |
561 | if (dev->of_node) | 566 | if (dev->of_node) |
562 | return of_dma_request_slave_channel(dev->of_node, name); | 567 | return of_dma_request_slave_channel(dev->of_node, name); |
563 | 568 | ||
569 | /* If device was enumerated by ACPI get slave info from here */ | ||
570 | if (ACPI_HANDLE(dev)) | ||
571 | return acpi_dma_request_slave_chan_by_name(dev, name); | ||
572 | |||
564 | return NULL; | 573 | return NULL; |
565 | } | 574 | } |
566 | EXPORT_SYMBOL_GPL(dma_request_slave_channel); | 575 | EXPORT_SYMBOL_GPL(dma_request_slave_channel); |
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index a2c8904b63ea..d8ce4ecfef18 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -2,6 +2,7 @@ | |||
2 | * DMA Engine test module | 2 | * DMA Engine test module |
3 | * | 3 | * |
4 | * Copyright (C) 2007 Atmel Corporation | 4 | * Copyright (C) 2007 Atmel Corporation |
5 | * Copyright (C) 2013 Intel Corporation | ||
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
@@ -18,6 +19,10 @@ | |||
18 | #include <linux/random.h> | 19 | #include <linux/random.h> |
19 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
20 | #include <linux/wait.h> | 21 | #include <linux/wait.h> |
22 | #include <linux/ctype.h> | ||
23 | #include <linux/debugfs.h> | ||
24 | #include <linux/uaccess.h> | ||
25 | #include <linux/seq_file.h> | ||
21 | 26 | ||
22 | static unsigned int test_buf_size = 16384; | 27 | static unsigned int test_buf_size = 16384; |
23 | module_param(test_buf_size, uint, S_IRUGO); | 28 | module_param(test_buf_size, uint, S_IRUGO); |
@@ -61,6 +66,9 @@ module_param(timeout, uint, S_IRUGO); | |||
61 | MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " | 66 | MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " |
62 | "Pass -1 for infinite timeout"); | 67 | "Pass -1 for infinite timeout"); |
63 | 68 | ||
69 | /* Maximum amount of mismatched bytes in buffer to print */ | ||
70 | #define MAX_ERROR_COUNT 32 | ||
71 | |||
64 | /* | 72 | /* |
65 | * Initialization patterns. All bytes in the source buffer has bit 7 | 73 | * Initialization patterns. All bytes in the source buffer has bit 7 |
66 | * set, all bytes in the destination buffer has bit 7 cleared. | 74 | * set, all bytes in the destination buffer has bit 7 cleared. |
@@ -78,13 +86,65 @@ MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " | |||
78 | #define PATTERN_OVERWRITE 0x20 | 86 | #define PATTERN_OVERWRITE 0x20 |
79 | #define PATTERN_COUNT_MASK 0x1f | 87 | #define PATTERN_COUNT_MASK 0x1f |
80 | 88 | ||
89 | enum dmatest_error_type { | ||
90 | DMATEST_ET_OK, | ||
91 | DMATEST_ET_MAP_SRC, | ||
92 | DMATEST_ET_MAP_DST, | ||
93 | DMATEST_ET_PREP, | ||
94 | DMATEST_ET_SUBMIT, | ||
95 | DMATEST_ET_TIMEOUT, | ||
96 | DMATEST_ET_DMA_ERROR, | ||
97 | DMATEST_ET_DMA_IN_PROGRESS, | ||
98 | DMATEST_ET_VERIFY, | ||
99 | DMATEST_ET_VERIFY_BUF, | ||
100 | }; | ||
101 | |||
102 | struct dmatest_verify_buffer { | ||
103 | unsigned int index; | ||
104 | u8 expected; | ||
105 | u8 actual; | ||
106 | }; | ||
107 | |||
108 | struct dmatest_verify_result { | ||
109 | unsigned int error_count; | ||
110 | struct dmatest_verify_buffer data[MAX_ERROR_COUNT]; | ||
111 | u8 pattern; | ||
112 | bool is_srcbuf; | ||
113 | }; | ||
114 | |||
115 | struct dmatest_thread_result { | ||
116 | struct list_head node; | ||
117 | unsigned int n; | ||
118 | unsigned int src_off; | ||
119 | unsigned int dst_off; | ||
120 | unsigned int len; | ||
121 | enum dmatest_error_type type; | ||
122 | union { | ||
123 | unsigned long data; | ||
124 | dma_cookie_t cookie; | ||
125 | enum dma_status status; | ||
126 | int error; | ||
127 | struct dmatest_verify_result *vr; | ||
128 | }; | ||
129 | }; | ||
130 | |||
131 | struct dmatest_result { | ||
132 | struct list_head node; | ||
133 | char *name; | ||
134 | struct list_head results; | ||
135 | }; | ||
136 | |||
137 | struct dmatest_info; | ||
138 | |||
81 | struct dmatest_thread { | 139 | struct dmatest_thread { |
82 | struct list_head node; | 140 | struct list_head node; |
141 | struct dmatest_info *info; | ||
83 | struct task_struct *task; | 142 | struct task_struct *task; |
84 | struct dma_chan *chan; | 143 | struct dma_chan *chan; |
85 | u8 **srcs; | 144 | u8 **srcs; |
86 | u8 **dsts; | 145 | u8 **dsts; |
87 | enum dma_transaction_type type; | 146 | enum dma_transaction_type type; |
147 | bool done; | ||
88 | }; | 148 | }; |
89 | 149 | ||
90 | struct dmatest_chan { | 150 | struct dmatest_chan { |
@@ -93,25 +153,69 @@ struct dmatest_chan { | |||
93 | struct list_head threads; | 153 | struct list_head threads; |
94 | }; | 154 | }; |
95 | 155 | ||
96 | /* | 156 | /** |
97 | * These are protected by dma_list_mutex since they're only used by | 157 | * struct dmatest_params - test parameters. |
98 | * the DMA filter function callback | 158 | * @buf_size: size of the memcpy test buffer |
159 | * @channel: bus ID of the channel to test | ||
160 | * @device: bus ID of the DMA Engine to test | ||
161 | * @threads_per_chan: number of threads to start per channel | ||
162 | * @max_channels: maximum number of channels to use | ||
163 | * @iterations: iterations before stopping test | ||
164 | * @xor_sources: number of xor source buffers | ||
165 | * @pq_sources: number of p+q source buffers | ||
166 | * @timeout: transfer timeout in msec, -1 for infinite timeout | ||
99 | */ | 167 | */ |
100 | static LIST_HEAD(dmatest_channels); | 168 | struct dmatest_params { |
101 | static unsigned int nr_channels; | 169 | unsigned int buf_size; |
170 | char channel[20]; | ||
171 | char device[20]; | ||
172 | unsigned int threads_per_chan; | ||
173 | unsigned int max_channels; | ||
174 | unsigned int iterations; | ||
175 | unsigned int xor_sources; | ||
176 | unsigned int pq_sources; | ||
177 | int timeout; | ||
178 | }; | ||
102 | 179 | ||
103 | static bool dmatest_match_channel(struct dma_chan *chan) | 180 | /** |
181 | * struct dmatest_info - test information. | ||
182 | * @params: test parameters | ||
183 | * @lock: access protection to the fields of this structure | ||
184 | */ | ||
185 | struct dmatest_info { | ||
186 | /* Test parameters */ | ||
187 | struct dmatest_params params; | ||
188 | |||
189 | /* Internal state */ | ||
190 | struct list_head channels; | ||
191 | unsigned int nr_channels; | ||
192 | struct mutex lock; | ||
193 | |||
194 | /* debugfs related stuff */ | ||
195 | struct dentry *root; | ||
196 | struct dmatest_params dbgfs_params; | ||
197 | |||
198 | /* Test results */ | ||
199 | struct list_head results; | ||
200 | struct mutex results_lock; | ||
201 | }; | ||
202 | |||
203 | static struct dmatest_info test_info; | ||
204 | |||
205 | static bool dmatest_match_channel(struct dmatest_params *params, | ||
206 | struct dma_chan *chan) | ||
104 | { | 207 | { |
105 | if (test_channel[0] == '\0') | 208 | if (params->channel[0] == '\0') |
106 | return true; | 209 | return true; |
107 | return strcmp(dma_chan_name(chan), test_channel) == 0; | 210 | return strcmp(dma_chan_name(chan), params->channel) == 0; |
108 | } | 211 | } |
109 | 212 | ||
110 | static bool dmatest_match_device(struct dma_device *device) | 213 | static bool dmatest_match_device(struct dmatest_params *params, |
214 | struct dma_device *device) | ||
111 | { | 215 | { |
112 | if (test_device[0] == '\0') | 216 | if (params->device[0] == '\0') |
113 | return true; | 217 | return true; |
114 | return strcmp(dev_name(device->dev), test_device) == 0; | 218 | return strcmp(dev_name(device->dev), params->device) == 0; |
115 | } | 219 | } |
116 | 220 | ||
117 | static unsigned long dmatest_random(void) | 221 | static unsigned long dmatest_random(void) |
@@ -122,7 +226,8 @@ static unsigned long dmatest_random(void) | |||
122 | return buf; | 226 | return buf; |
123 | } | 227 | } |
124 | 228 | ||
125 | static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len) | 229 | static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len, |
230 | unsigned int buf_size) | ||
126 | { | 231 | { |
127 | unsigned int i; | 232 | unsigned int i; |
128 | u8 *buf; | 233 | u8 *buf; |
@@ -133,13 +238,14 @@ static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len) | |||
133 | for ( ; i < start + len; i++) | 238 | for ( ; i < start + len; i++) |
134 | buf[i] = PATTERN_SRC | PATTERN_COPY | 239 | buf[i] = PATTERN_SRC | PATTERN_COPY |
135 | | (~i & PATTERN_COUNT_MASK); | 240 | | (~i & PATTERN_COUNT_MASK); |
136 | for ( ; i < test_buf_size; i++) | 241 | for ( ; i < buf_size; i++) |
137 | buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); | 242 | buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); |
138 | buf++; | 243 | buf++; |
139 | } | 244 | } |
140 | } | 245 | } |
141 | 246 | ||
142 | static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len) | 247 | static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len, |
248 | unsigned int buf_size) | ||
143 | { | 249 | { |
144 | unsigned int i; | 250 | unsigned int i; |
145 | u8 *buf; | 251 | u8 *buf; |
@@ -150,40 +256,14 @@ static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len) | |||
150 | for ( ; i < start + len; i++) | 256 | for ( ; i < start + len; i++) |
151 | buf[i] = PATTERN_DST | PATTERN_OVERWRITE | 257 | buf[i] = PATTERN_DST | PATTERN_OVERWRITE |
152 | | (~i & PATTERN_COUNT_MASK); | 258 | | (~i & PATTERN_COUNT_MASK); |
153 | for ( ; i < test_buf_size; i++) | 259 | for ( ; i < buf_size; i++) |
154 | buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); | 260 | buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); |
155 | } | 261 | } |
156 | } | 262 | } |
157 | 263 | ||
158 | static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, | 264 | static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, |
159 | unsigned int counter, bool is_srcbuf) | 265 | unsigned int start, unsigned int end, unsigned int counter, |
160 | { | 266 | u8 pattern, bool is_srcbuf) |
161 | u8 diff = actual ^ pattern; | ||
162 | u8 expected = pattern | (~counter & PATTERN_COUNT_MASK); | ||
163 | const char *thread_name = current->comm; | ||
164 | |||
165 | if (is_srcbuf) | ||
166 | pr_warning("%s: srcbuf[0x%x] overwritten!" | ||
167 | " Expected %02x, got %02x\n", | ||
168 | thread_name, index, expected, actual); | ||
169 | else if ((pattern & PATTERN_COPY) | ||
170 | && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) | ||
171 | pr_warning("%s: dstbuf[0x%x] not copied!" | ||
172 | " Expected %02x, got %02x\n", | ||
173 | thread_name, index, expected, actual); | ||
174 | else if (diff & PATTERN_SRC) | ||
175 | pr_warning("%s: dstbuf[0x%x] was copied!" | ||
176 | " Expected %02x, got %02x\n", | ||
177 | thread_name, index, expected, actual); | ||
178 | else | ||
179 | pr_warning("%s: dstbuf[0x%x] mismatch!" | ||
180 | " Expected %02x, got %02x\n", | ||
181 | thread_name, index, expected, actual); | ||
182 | } | ||
183 | |||
184 | static unsigned int dmatest_verify(u8 **bufs, unsigned int start, | ||
185 | unsigned int end, unsigned int counter, u8 pattern, | ||
186 | bool is_srcbuf) | ||
187 | { | 267 | { |
188 | unsigned int i; | 268 | unsigned int i; |
189 | unsigned int error_count = 0; | 269 | unsigned int error_count = 0; |
@@ -191,6 +271,7 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start, | |||
191 | u8 expected; | 271 | u8 expected; |
192 | u8 *buf; | 272 | u8 *buf; |
193 | unsigned int counter_orig = counter; | 273 | unsigned int counter_orig = counter; |
274 | struct dmatest_verify_buffer *vb; | ||
194 | 275 | ||
195 | for (; (buf = *bufs); bufs++) { | 276 | for (; (buf = *bufs); bufs++) { |
196 | counter = counter_orig; | 277 | counter = counter_orig; |
@@ -198,18 +279,21 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start, | |||
198 | actual = buf[i]; | 279 | actual = buf[i]; |
199 | expected = pattern | (~counter & PATTERN_COUNT_MASK); | 280 | expected = pattern | (~counter & PATTERN_COUNT_MASK); |
200 | if (actual != expected) { | 281 | if (actual != expected) { |
201 | if (error_count < 32) | 282 | if (error_count < MAX_ERROR_COUNT && vr) { |
202 | dmatest_mismatch(actual, pattern, i, | 283 | vb = &vr->data[error_count]; |
203 | counter, is_srcbuf); | 284 | vb->index = i; |
285 | vb->expected = expected; | ||
286 | vb->actual = actual; | ||
287 | } | ||
204 | error_count++; | 288 | error_count++; |
205 | } | 289 | } |
206 | counter++; | 290 | counter++; |
207 | } | 291 | } |
208 | } | 292 | } |
209 | 293 | ||
210 | if (error_count > 32) | 294 | if (error_count > MAX_ERROR_COUNT) |
211 | pr_warning("%s: %u errors suppressed\n", | 295 | pr_warning("%s: %u errors suppressed\n", |
212 | current->comm, error_count - 32); | 296 | current->comm, error_count - MAX_ERROR_COUNT); |
213 | 297 | ||
214 | return error_count; | 298 | return error_count; |
215 | } | 299 | } |
@@ -249,6 +333,170 @@ static unsigned int min_odd(unsigned int x, unsigned int y) | |||
249 | return val % 2 ? val : val - 1; | 333 | return val % 2 ? val : val - 1; |
250 | } | 334 | } |
251 | 335 | ||
336 | static char *verify_result_get_one(struct dmatest_verify_result *vr, | ||
337 | unsigned int i) | ||
338 | { | ||
339 | struct dmatest_verify_buffer *vb = &vr->data[i]; | ||
340 | u8 diff = vb->actual ^ vr->pattern; | ||
341 | static char buf[512]; | ||
342 | char *msg; | ||
343 | |||
344 | if (vr->is_srcbuf) | ||
345 | msg = "srcbuf overwritten!"; | ||
346 | else if ((vr->pattern & PATTERN_COPY) | ||
347 | && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) | ||
348 | msg = "dstbuf not copied!"; | ||
349 | else if (diff & PATTERN_SRC) | ||
350 | msg = "dstbuf was copied!"; | ||
351 | else | ||
352 | msg = "dstbuf mismatch!"; | ||
353 | |||
354 | snprintf(buf, sizeof(buf) - 1, "%s [0x%x] Expected %02x, got %02x", msg, | ||
355 | vb->index, vb->expected, vb->actual); | ||
356 | |||
357 | return buf; | ||
358 | } | ||
359 | |||
360 | static char *thread_result_get(const char *name, | ||
361 | struct dmatest_thread_result *tr) | ||
362 | { | ||
363 | static const char * const messages[] = { | ||
364 | [DMATEST_ET_OK] = "No errors", | ||
365 | [DMATEST_ET_MAP_SRC] = "src mapping error", | ||
366 | [DMATEST_ET_MAP_DST] = "dst mapping error", | ||
367 | [DMATEST_ET_PREP] = "prep error", | ||
368 | [DMATEST_ET_SUBMIT] = "submit error", | ||
369 | [DMATEST_ET_TIMEOUT] = "test timed out", | ||
370 | [DMATEST_ET_DMA_ERROR] = | ||
371 | "got completion callback (DMA_ERROR)", | ||
372 | [DMATEST_ET_DMA_IN_PROGRESS] = | ||
373 | "got completion callback (DMA_IN_PROGRESS)", | ||
374 | [DMATEST_ET_VERIFY] = "errors", | ||
375 | [DMATEST_ET_VERIFY_BUF] = "verify errors", | ||
376 | }; | ||
377 | static char buf[512]; | ||
378 | |||
379 | snprintf(buf, sizeof(buf) - 1, | ||
380 | "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)", | ||
381 | name, tr->n, messages[tr->type], tr->src_off, tr->dst_off, | ||
382 | tr->len, tr->data); | ||
383 | |||
384 | return buf; | ||
385 | } | ||
386 | |||
387 | static int thread_result_add(struct dmatest_info *info, | ||
388 | struct dmatest_result *r, enum dmatest_error_type type, | ||
389 | unsigned int n, unsigned int src_off, unsigned int dst_off, | ||
390 | unsigned int len, unsigned long data) | ||
391 | { | ||
392 | struct dmatest_thread_result *tr; | ||
393 | |||
394 | tr = kzalloc(sizeof(*tr), GFP_KERNEL); | ||
395 | if (!tr) | ||
396 | return -ENOMEM; | ||
397 | |||
398 | tr->type = type; | ||
399 | tr->n = n; | ||
400 | tr->src_off = src_off; | ||
401 | tr->dst_off = dst_off; | ||
402 | tr->len = len; | ||
403 | tr->data = data; | ||
404 | |||
405 | mutex_lock(&info->results_lock); | ||
406 | list_add_tail(&tr->node, &r->results); | ||
407 | mutex_unlock(&info->results_lock); | ||
408 | |||
409 | pr_warn("%s\n", thread_result_get(r->name, tr)); | ||
410 | return 0; | ||
411 | } | ||
412 | |||
413 | static unsigned int verify_result_add(struct dmatest_info *info, | ||
414 | struct dmatest_result *r, unsigned int n, | ||
415 | unsigned int src_off, unsigned int dst_off, unsigned int len, | ||
416 | u8 **bufs, int whence, unsigned int counter, u8 pattern, | ||
417 | bool is_srcbuf) | ||
418 | { | ||
419 | struct dmatest_verify_result *vr; | ||
420 | unsigned int error_count; | ||
421 | unsigned int buf_off = is_srcbuf ? src_off : dst_off; | ||
422 | unsigned int start, end; | ||
423 | |||
424 | if (whence < 0) { | ||
425 | start = 0; | ||
426 | end = buf_off; | ||
427 | } else if (whence > 0) { | ||
428 | start = buf_off + len; | ||
429 | end = info->params.buf_size; | ||
430 | } else { | ||
431 | start = buf_off; | ||
432 | end = buf_off + len; | ||
433 | } | ||
434 | |||
435 | vr = kmalloc(sizeof(*vr), GFP_KERNEL); | ||
436 | if (!vr) { | ||
437 | pr_warn("dmatest: No memory to store verify result\n"); | ||
438 | return dmatest_verify(NULL, bufs, start, end, counter, pattern, | ||
439 | is_srcbuf); | ||
440 | } | ||
441 | |||
442 | vr->pattern = pattern; | ||
443 | vr->is_srcbuf = is_srcbuf; | ||
444 | |||
445 | error_count = dmatest_verify(vr, bufs, start, end, counter, pattern, | ||
446 | is_srcbuf); | ||
447 | if (error_count) { | ||
448 | vr->error_count = error_count; | ||
449 | thread_result_add(info, r, DMATEST_ET_VERIFY_BUF, n, src_off, | ||
450 | dst_off, len, (unsigned long)vr); | ||
451 | return error_count; | ||
452 | } | ||
453 | |||
454 | kfree(vr); | ||
455 | return 0; | ||
456 | } | ||
457 | |||
458 | static void result_free(struct dmatest_info *info, const char *name) | ||
459 | { | ||
460 | struct dmatest_result *r, *_r; | ||
461 | |||
462 | mutex_lock(&info->results_lock); | ||
463 | list_for_each_entry_safe(r, _r, &info->results, node) { | ||
464 | struct dmatest_thread_result *tr, *_tr; | ||
465 | |||
466 | if (name && strcmp(r->name, name)) | ||
467 | continue; | ||
468 | |||
469 | list_for_each_entry_safe(tr, _tr, &r->results, node) { | ||
470 | if (tr->type == DMATEST_ET_VERIFY_BUF) | ||
471 | kfree(tr->vr); | ||
472 | list_del(&tr->node); | ||
473 | kfree(tr); | ||
474 | } | ||
475 | |||
476 | kfree(r->name); | ||
477 | list_del(&r->node); | ||
478 | kfree(r); | ||
479 | } | ||
480 | |||
481 | mutex_unlock(&info->results_lock); | ||
482 | } | ||
483 | |||
484 | static struct dmatest_result *result_init(struct dmatest_info *info, | ||
485 | const char *name) | ||
486 | { | ||
487 | struct dmatest_result *r; | ||
488 | |||
489 | r = kzalloc(sizeof(*r), GFP_KERNEL); | ||
490 | if (r) { | ||
491 | r->name = kstrdup(name, GFP_KERNEL); | ||
492 | INIT_LIST_HEAD(&r->results); | ||
493 | mutex_lock(&info->results_lock); | ||
494 | list_add_tail(&r->node, &info->results); | ||
495 | mutex_unlock(&info->results_lock); | ||
496 | } | ||
497 | return r; | ||
498 | } | ||
499 | |||
252 | /* | 500 | /* |
253 | * This function repeatedly tests DMA transfers of various lengths and | 501 | * This function repeatedly tests DMA transfers of various lengths and |
254 | * offsets for a given operation type until it is told to exit by | 502 | * offsets for a given operation type until it is told to exit by |
@@ -268,6 +516,8 @@ static int dmatest_func(void *data) | |||
268 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait); | 516 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait); |
269 | struct dmatest_thread *thread = data; | 517 | struct dmatest_thread *thread = data; |
270 | struct dmatest_done done = { .wait = &done_wait }; | 518 | struct dmatest_done done = { .wait = &done_wait }; |
519 | struct dmatest_info *info; | ||
520 | struct dmatest_params *params; | ||
271 | struct dma_chan *chan; | 521 | struct dma_chan *chan; |
272 | struct dma_device *dev; | 522 | struct dma_device *dev; |
273 | const char *thread_name; | 523 | const char *thread_name; |
@@ -278,11 +528,12 @@ static int dmatest_func(void *data) | |||
278 | dma_cookie_t cookie; | 528 | dma_cookie_t cookie; |
279 | enum dma_status status; | 529 | enum dma_status status; |
280 | enum dma_ctrl_flags flags; | 530 | enum dma_ctrl_flags flags; |
281 | u8 pq_coefs[pq_sources + 1]; | 531 | u8 *pq_coefs = NULL; |
282 | int ret; | 532 | int ret; |
283 | int src_cnt; | 533 | int src_cnt; |
284 | int dst_cnt; | 534 | int dst_cnt; |
285 | int i; | 535 | int i; |
536 | struct dmatest_result *result; | ||
286 | 537 | ||
287 | thread_name = current->comm; | 538 | thread_name = current->comm; |
288 | set_freezable(); | 539 | set_freezable(); |
@@ -290,28 +541,39 @@ static int dmatest_func(void *data) | |||
290 | ret = -ENOMEM; | 541 | ret = -ENOMEM; |
291 | 542 | ||
292 | smp_rmb(); | 543 | smp_rmb(); |
544 | info = thread->info; | ||
545 | params = &info->params; | ||
293 | chan = thread->chan; | 546 | chan = thread->chan; |
294 | dev = chan->device; | 547 | dev = chan->device; |
295 | if (thread->type == DMA_MEMCPY) | 548 | if (thread->type == DMA_MEMCPY) |
296 | src_cnt = dst_cnt = 1; | 549 | src_cnt = dst_cnt = 1; |
297 | else if (thread->type == DMA_XOR) { | 550 | else if (thread->type == DMA_XOR) { |
298 | /* force odd to ensure dst = src */ | 551 | /* force odd to ensure dst = src */ |
299 | src_cnt = min_odd(xor_sources | 1, dev->max_xor); | 552 | src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); |
300 | dst_cnt = 1; | 553 | dst_cnt = 1; |
301 | } else if (thread->type == DMA_PQ) { | 554 | } else if (thread->type == DMA_PQ) { |
302 | /* force odd to ensure dst = src */ | 555 | /* force odd to ensure dst = src */ |
303 | src_cnt = min_odd(pq_sources | 1, dma_maxpq(dev, 0)); | 556 | src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0)); |
304 | dst_cnt = 2; | 557 | dst_cnt = 2; |
558 | |||
559 | pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL); | ||
560 | if (!pq_coefs) | ||
561 | goto err_thread_type; | ||
562 | |||
305 | for (i = 0; i < src_cnt; i++) | 563 | for (i = 0; i < src_cnt; i++) |
306 | pq_coefs[i] = 1; | 564 | pq_coefs[i] = 1; |
307 | } else | 565 | } else |
566 | goto err_thread_type; | ||
567 | |||
568 | result = result_init(info, thread_name); | ||
569 | if (!result) | ||
308 | goto err_srcs; | 570 | goto err_srcs; |
309 | 571 | ||
310 | thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); | 572 | thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); |
311 | if (!thread->srcs) | 573 | if (!thread->srcs) |
312 | goto err_srcs; | 574 | goto err_srcs; |
313 | for (i = 0; i < src_cnt; i++) { | 575 | for (i = 0; i < src_cnt; i++) { |
314 | thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL); | 576 | thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL); |
315 | if (!thread->srcs[i]) | 577 | if (!thread->srcs[i]) |
316 | goto err_srcbuf; | 578 | goto err_srcbuf; |
317 | } | 579 | } |
@@ -321,7 +583,7 @@ static int dmatest_func(void *data) | |||
321 | if (!thread->dsts) | 583 | if (!thread->dsts) |
322 | goto err_dsts; | 584 | goto err_dsts; |
323 | for (i = 0; i < dst_cnt; i++) { | 585 | for (i = 0; i < dst_cnt; i++) { |
324 | thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL); | 586 | thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL); |
325 | if (!thread->dsts[i]) | 587 | if (!thread->dsts[i]) |
326 | goto err_dstbuf; | 588 | goto err_dstbuf; |
327 | } | 589 | } |
@@ -337,7 +599,7 @@ static int dmatest_func(void *data) | |||
337 | | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE; | 599 | | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE; |
338 | 600 | ||
339 | while (!kthread_should_stop() | 601 | while (!kthread_should_stop() |
340 | && !(iterations && total_tests >= iterations)) { | 602 | && !(params->iterations && total_tests >= params->iterations)) { |
341 | struct dma_async_tx_descriptor *tx = NULL; | 603 | struct dma_async_tx_descriptor *tx = NULL; |
342 | dma_addr_t dma_srcs[src_cnt]; | 604 | dma_addr_t dma_srcs[src_cnt]; |
343 | dma_addr_t dma_dsts[dst_cnt]; | 605 | dma_addr_t dma_dsts[dst_cnt]; |
@@ -353,24 +615,24 @@ static int dmatest_func(void *data) | |||
353 | else if (thread->type == DMA_PQ) | 615 | else if (thread->type == DMA_PQ) |
354 | align = dev->pq_align; | 616 | align = dev->pq_align; |
355 | 617 | ||
356 | if (1 << align > test_buf_size) { | 618 | if (1 << align > params->buf_size) { |
357 | pr_err("%u-byte buffer too small for %d-byte alignment\n", | 619 | pr_err("%u-byte buffer too small for %d-byte alignment\n", |
358 | test_buf_size, 1 << align); | 620 | params->buf_size, 1 << align); |
359 | break; | 621 | break; |
360 | } | 622 | } |
361 | 623 | ||
362 | len = dmatest_random() % test_buf_size + 1; | 624 | len = dmatest_random() % params->buf_size + 1; |
363 | len = (len >> align) << align; | 625 | len = (len >> align) << align; |
364 | if (!len) | 626 | if (!len) |
365 | len = 1 << align; | 627 | len = 1 << align; |
366 | src_off = dmatest_random() % (test_buf_size - len + 1); | 628 | src_off = dmatest_random() % (params->buf_size - len + 1); |
367 | dst_off = dmatest_random() % (test_buf_size - len + 1); | 629 | dst_off = dmatest_random() % (params->buf_size - len + 1); |
368 | 630 | ||
369 | src_off = (src_off >> align) << align; | 631 | src_off = (src_off >> align) << align; |
370 | dst_off = (dst_off >> align) << align; | 632 | dst_off = (dst_off >> align) << align; |
371 | 633 | ||
372 | dmatest_init_srcs(thread->srcs, src_off, len); | 634 | dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size); |
373 | dmatest_init_dsts(thread->dsts, dst_off, len); | 635 | dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size); |
374 | 636 | ||
375 | for (i = 0; i < src_cnt; i++) { | 637 | for (i = 0; i < src_cnt; i++) { |
376 | u8 *buf = thread->srcs[i] + src_off; | 638 | u8 *buf = thread->srcs[i] + src_off; |
@@ -380,10 +642,10 @@ static int dmatest_func(void *data) | |||
380 | ret = dma_mapping_error(dev->dev, dma_srcs[i]); | 642 | ret = dma_mapping_error(dev->dev, dma_srcs[i]); |
381 | if (ret) { | 643 | if (ret) { |
382 | unmap_src(dev->dev, dma_srcs, len, i); | 644 | unmap_src(dev->dev, dma_srcs, len, i); |
383 | pr_warn("%s: #%u: mapping error %d with " | 645 | thread_result_add(info, result, |
384 | "src_off=0x%x len=0x%x\n", | 646 | DMATEST_ET_MAP_SRC, |
385 | thread_name, total_tests - 1, ret, | 647 | total_tests, src_off, dst_off, |
386 | src_off, len); | 648 | len, ret); |
387 | failed_tests++; | 649 | failed_tests++; |
388 | continue; | 650 | continue; |
389 | } | 651 | } |
@@ -391,16 +653,17 @@ static int dmatest_func(void *data) | |||
391 | /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ | 653 | /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ |
392 | for (i = 0; i < dst_cnt; i++) { | 654 | for (i = 0; i < dst_cnt; i++) { |
393 | dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], | 655 | dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], |
394 | test_buf_size, | 656 | params->buf_size, |
395 | DMA_BIDIRECTIONAL); | 657 | DMA_BIDIRECTIONAL); |
396 | ret = dma_mapping_error(dev->dev, dma_dsts[i]); | 658 | ret = dma_mapping_error(dev->dev, dma_dsts[i]); |
397 | if (ret) { | 659 | if (ret) { |
398 | unmap_src(dev->dev, dma_srcs, len, src_cnt); | 660 | unmap_src(dev->dev, dma_srcs, len, src_cnt); |
399 | unmap_dst(dev->dev, dma_dsts, test_buf_size, i); | 661 | unmap_dst(dev->dev, dma_dsts, params->buf_size, |
400 | pr_warn("%s: #%u: mapping error %d with " | 662 | i); |
401 | "dst_off=0x%x len=0x%x\n", | 663 | thread_result_add(info, result, |
402 | thread_name, total_tests - 1, ret, | 664 | DMATEST_ET_MAP_DST, |
403 | dst_off, test_buf_size); | 665 | total_tests, src_off, dst_off, |
666 | len, ret); | ||
404 | failed_tests++; | 667 | failed_tests++; |
405 | continue; | 668 | continue; |
406 | } | 669 | } |
@@ -428,11 +691,11 @@ static int dmatest_func(void *data) | |||
428 | 691 | ||
429 | if (!tx) { | 692 | if (!tx) { |
430 | unmap_src(dev->dev, dma_srcs, len, src_cnt); | 693 | unmap_src(dev->dev, dma_srcs, len, src_cnt); |
431 | unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt); | 694 | unmap_dst(dev->dev, dma_dsts, params->buf_size, |
432 | pr_warning("%s: #%u: prep error with src_off=0x%x " | 695 | dst_cnt); |
433 | "dst_off=0x%x len=0x%x\n", | 696 | thread_result_add(info, result, DMATEST_ET_PREP, |
434 | thread_name, total_tests - 1, | 697 | total_tests, src_off, dst_off, |
435 | src_off, dst_off, len); | 698 | len, 0); |
436 | msleep(100); | 699 | msleep(100); |
437 | failed_tests++; | 700 | failed_tests++; |
438 | continue; | 701 | continue; |
@@ -444,18 +707,18 @@ static int dmatest_func(void *data) | |||
444 | cookie = tx->tx_submit(tx); | 707 | cookie = tx->tx_submit(tx); |
445 | 708 | ||
446 | if (dma_submit_error(cookie)) { | 709 | if (dma_submit_error(cookie)) { |
447 | pr_warning("%s: #%u: submit error %d with src_off=0x%x " | 710 | thread_result_add(info, result, DMATEST_ET_SUBMIT, |
448 | "dst_off=0x%x len=0x%x\n", | 711 | total_tests, src_off, dst_off, |
449 | thread_name, total_tests - 1, cookie, | 712 | len, cookie); |
450 | src_off, dst_off, len); | ||
451 | msleep(100); | 713 | msleep(100); |
452 | failed_tests++; | 714 | failed_tests++; |
453 | continue; | 715 | continue; |
454 | } | 716 | } |
455 | dma_async_issue_pending(chan); | 717 | dma_async_issue_pending(chan); |
456 | 718 | ||
457 | wait_event_freezable_timeout(done_wait, done.done, | 719 | wait_event_freezable_timeout(done_wait, |
458 | msecs_to_jiffies(timeout)); | 720 | done.done || kthread_should_stop(), |
721 | msecs_to_jiffies(params->timeout)); | ||
459 | 722 | ||
460 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | 723 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); |
461 | 724 | ||
@@ -468,56 +731,57 @@ static int dmatest_func(void *data) | |||
468 | * free it this time?" dancing. For now, just | 731 | * free it this time?" dancing. For now, just |
469 | * leave it dangling. | 732 | * leave it dangling. |
470 | */ | 733 | */ |
471 | pr_warning("%s: #%u: test timed out\n", | 734 | thread_result_add(info, result, DMATEST_ET_TIMEOUT, |
472 | thread_name, total_tests - 1); | 735 | total_tests, src_off, dst_off, |
736 | len, 0); | ||
473 | failed_tests++; | 737 | failed_tests++; |
474 | continue; | 738 | continue; |
475 | } else if (status != DMA_SUCCESS) { | 739 | } else if (status != DMA_SUCCESS) { |
476 | pr_warning("%s: #%u: got completion callback," | 740 | enum dmatest_error_type type = (status == DMA_ERROR) ? |
477 | " but status is \'%s\'\n", | 741 | DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS; |
478 | thread_name, total_tests - 1, | 742 | thread_result_add(info, result, type, |
479 | status == DMA_ERROR ? "error" : "in progress"); | 743 | total_tests, src_off, dst_off, |
744 | len, status); | ||
480 | failed_tests++; | 745 | failed_tests++; |
481 | continue; | 746 | continue; |
482 | } | 747 | } |
483 | 748 | ||
484 | /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ | 749 | /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ |
485 | unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt); | 750 | unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt); |
486 | 751 | ||
487 | error_count = 0; | 752 | error_count = 0; |
488 | 753 | ||
489 | pr_debug("%s: verifying source buffer...\n", thread_name); | 754 | pr_debug("%s: verifying source buffer...\n", thread_name); |
490 | error_count += dmatest_verify(thread->srcs, 0, src_off, | 755 | error_count += verify_result_add(info, result, total_tests, |
756 | src_off, dst_off, len, thread->srcs, -1, | ||
491 | 0, PATTERN_SRC, true); | 757 | 0, PATTERN_SRC, true); |
492 | error_count += dmatest_verify(thread->srcs, src_off, | 758 | error_count += verify_result_add(info, result, total_tests, |
493 | src_off + len, src_off, | 759 | src_off, dst_off, len, thread->srcs, 0, |
494 | PATTERN_SRC | PATTERN_COPY, true); | 760 | src_off, PATTERN_SRC | PATTERN_COPY, true); |
495 | error_count += dmatest_verify(thread->srcs, src_off + len, | 761 | error_count += verify_result_add(info, result, total_tests, |
496 | test_buf_size, src_off + len, | 762 | src_off, dst_off, len, thread->srcs, 1, |
497 | PATTERN_SRC, true); | 763 | src_off + len, PATTERN_SRC, true); |
498 | 764 | ||
499 | pr_debug("%s: verifying dest buffer...\n", | 765 | pr_debug("%s: verifying dest buffer...\n", thread_name); |
500 | thread->task->comm); | 766 | error_count += verify_result_add(info, result, total_tests, |
501 | error_count += dmatest_verify(thread->dsts, 0, dst_off, | 767 | src_off, dst_off, len, thread->dsts, -1, |
502 | 0, PATTERN_DST, false); | 768 | 0, PATTERN_DST, false); |
503 | error_count += dmatest_verify(thread->dsts, dst_off, | 769 | error_count += verify_result_add(info, result, total_tests, |
504 | dst_off + len, src_off, | 770 | src_off, dst_off, len, thread->dsts, 0, |
505 | PATTERN_SRC | PATTERN_COPY, false); | 771 | src_off, PATTERN_SRC | PATTERN_COPY, false); |
506 | error_count += dmatest_verify(thread->dsts, dst_off + len, | 772 | error_count += verify_result_add(info, result, total_tests, |
507 | test_buf_size, dst_off + len, | 773 | src_off, dst_off, len, thread->dsts, 1, |
508 | PATTERN_DST, false); | 774 | dst_off + len, PATTERN_DST, false); |
509 | 775 | ||
510 | if (error_count) { | 776 | if (error_count) { |
511 | pr_warning("%s: #%u: %u errors with " | 777 | thread_result_add(info, result, DMATEST_ET_VERIFY, |
512 | "src_off=0x%x dst_off=0x%x len=0x%x\n", | 778 | total_tests, src_off, dst_off, |
513 | thread_name, total_tests - 1, error_count, | 779 | len, error_count); |
514 | src_off, dst_off, len); | ||
515 | failed_tests++; | 780 | failed_tests++; |
516 | } else { | 781 | } else { |
517 | pr_debug("%s: #%u: No errors with " | 782 | thread_result_add(info, result, DMATEST_ET_OK, |
518 | "src_off=0x%x dst_off=0x%x len=0x%x\n", | 783 | total_tests, src_off, dst_off, |
519 | thread_name, total_tests - 1, | 784 | len, 0); |
520 | src_off, dst_off, len); | ||
521 | } | 785 | } |
522 | } | 786 | } |
523 | 787 | ||
@@ -532,6 +796,8 @@ err_dsts: | |||
532 | err_srcbuf: | 796 | err_srcbuf: |
533 | kfree(thread->srcs); | 797 | kfree(thread->srcs); |
534 | err_srcs: | 798 | err_srcs: |
799 | kfree(pq_coefs); | ||
800 | err_thread_type: | ||
535 | pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", | 801 | pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", |
536 | thread_name, total_tests, failed_tests, ret); | 802 | thread_name, total_tests, failed_tests, ret); |
537 | 803 | ||
@@ -539,7 +805,9 @@ err_srcs: | |||
539 | if (ret) | 805 | if (ret) |
540 | dmaengine_terminate_all(chan); | 806 | dmaengine_terminate_all(chan); |
541 | 807 | ||
542 | if (iterations > 0) | 808 | thread->done = true; |
809 | |||
810 | if (params->iterations > 0) | ||
543 | while (!kthread_should_stop()) { | 811 | while (!kthread_should_stop()) { |
544 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); | 812 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); |
545 | interruptible_sleep_on(&wait_dmatest_exit); | 813 | interruptible_sleep_on(&wait_dmatest_exit); |
@@ -568,8 +836,10 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc) | |||
568 | kfree(dtc); | 836 | kfree(dtc); |
569 | } | 837 | } |
570 | 838 | ||
571 | static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_type type) | 839 | static int dmatest_add_threads(struct dmatest_info *info, |
840 | struct dmatest_chan *dtc, enum dma_transaction_type type) | ||
572 | { | 841 | { |
842 | struct dmatest_params *params = &info->params; | ||
573 | struct dmatest_thread *thread; | 843 | struct dmatest_thread *thread; |
574 | struct dma_chan *chan = dtc->chan; | 844 | struct dma_chan *chan = dtc->chan; |
575 | char *op; | 845 | char *op; |
@@ -584,7 +854,7 @@ static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_ty | |||
584 | else | 854 | else |
585 | return -EINVAL; | 855 | return -EINVAL; |
586 | 856 | ||
587 | for (i = 0; i < threads_per_chan; i++) { | 857 | for (i = 0; i < params->threads_per_chan; i++) { |
588 | thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); | 858 | thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); |
589 | if (!thread) { | 859 | if (!thread) { |
590 | pr_warning("dmatest: No memory for %s-%s%u\n", | 860 | pr_warning("dmatest: No memory for %s-%s%u\n", |
@@ -592,6 +862,7 @@ static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_ty | |||
592 | 862 | ||
593 | break; | 863 | break; |
594 | } | 864 | } |
865 | thread->info = info; | ||
595 | thread->chan = dtc->chan; | 866 | thread->chan = dtc->chan; |
596 | thread->type = type; | 867 | thread->type = type; |
597 | smp_wmb(); | 868 | smp_wmb(); |
@@ -612,7 +883,8 @@ static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_ty | |||
612 | return i; | 883 | return i; |
613 | } | 884 | } |
614 | 885 | ||
615 | static int dmatest_add_channel(struct dma_chan *chan) | 886 | static int dmatest_add_channel(struct dmatest_info *info, |
887 | struct dma_chan *chan) | ||
616 | { | 888 | { |
617 | struct dmatest_chan *dtc; | 889 | struct dmatest_chan *dtc; |
618 | struct dma_device *dma_dev = chan->device; | 890 | struct dma_device *dma_dev = chan->device; |
@@ -629,75 +901,418 @@ static int dmatest_add_channel(struct dma_chan *chan) | |||
629 | INIT_LIST_HEAD(&dtc->threads); | 901 | INIT_LIST_HEAD(&dtc->threads); |
630 | 902 | ||
631 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { | 903 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { |
632 | cnt = dmatest_add_threads(dtc, DMA_MEMCPY); | 904 | cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY); |
633 | thread_count += cnt > 0 ? cnt : 0; | 905 | thread_count += cnt > 0 ? cnt : 0; |
634 | } | 906 | } |
635 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | 907 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
636 | cnt = dmatest_add_threads(dtc, DMA_XOR); | 908 | cnt = dmatest_add_threads(info, dtc, DMA_XOR); |
637 | thread_count += cnt > 0 ? cnt : 0; | 909 | thread_count += cnt > 0 ? cnt : 0; |
638 | } | 910 | } |
639 | if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { | 911 | if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { |
640 | cnt = dmatest_add_threads(dtc, DMA_PQ); | 912 | cnt = dmatest_add_threads(info, dtc, DMA_PQ); |
641 | thread_count += cnt > 0 ? cnt : 0; | 913 | thread_count += cnt > 0 ? cnt : 0; |
642 | } | 914 | } |
643 | 915 | ||
644 | pr_info("dmatest: Started %u threads using %s\n", | 916 | pr_info("dmatest: Started %u threads using %s\n", |
645 | thread_count, dma_chan_name(chan)); | 917 | thread_count, dma_chan_name(chan)); |
646 | 918 | ||
647 | list_add_tail(&dtc->node, &dmatest_channels); | 919 | list_add_tail(&dtc->node, &info->channels); |
648 | nr_channels++; | 920 | info->nr_channels++; |
649 | 921 | ||
650 | return 0; | 922 | return 0; |
651 | } | 923 | } |
652 | 924 | ||
653 | static bool filter(struct dma_chan *chan, void *param) | 925 | static bool filter(struct dma_chan *chan, void *param) |
654 | { | 926 | { |
655 | if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device)) | 927 | struct dmatest_params *params = param; |
928 | |||
929 | if (!dmatest_match_channel(params, chan) || | ||
930 | !dmatest_match_device(params, chan->device)) | ||
656 | return false; | 931 | return false; |
657 | else | 932 | else |
658 | return true; | 933 | return true; |
659 | } | 934 | } |
660 | 935 | ||
661 | static int __init dmatest_init(void) | 936 | static int __run_threaded_test(struct dmatest_info *info) |
662 | { | 937 | { |
663 | dma_cap_mask_t mask; | 938 | dma_cap_mask_t mask; |
664 | struct dma_chan *chan; | 939 | struct dma_chan *chan; |
940 | struct dmatest_params *params = &info->params; | ||
665 | int err = 0; | 941 | int err = 0; |
666 | 942 | ||
667 | dma_cap_zero(mask); | 943 | dma_cap_zero(mask); |
668 | dma_cap_set(DMA_MEMCPY, mask); | 944 | dma_cap_set(DMA_MEMCPY, mask); |
669 | for (;;) { | 945 | for (;;) { |
670 | chan = dma_request_channel(mask, filter, NULL); | 946 | chan = dma_request_channel(mask, filter, params); |
671 | if (chan) { | 947 | if (chan) { |
672 | err = dmatest_add_channel(chan); | 948 | err = dmatest_add_channel(info, chan); |
673 | if (err) { | 949 | if (err) { |
674 | dma_release_channel(chan); | 950 | dma_release_channel(chan); |
675 | break; /* add_channel failed, punt */ | 951 | break; /* add_channel failed, punt */ |
676 | } | 952 | } |
677 | } else | 953 | } else |
678 | break; /* no more channels available */ | 954 | break; /* no more channels available */ |
679 | if (max_channels && nr_channels >= max_channels) | 955 | if (params->max_channels && |
956 | info->nr_channels >= params->max_channels) | ||
680 | break; /* we have all we need */ | 957 | break; /* we have all we need */ |
681 | } | 958 | } |
682 | |||
683 | return err; | 959 | return err; |
684 | } | 960 | } |
685 | /* when compiled-in wait for drivers to load first */ | ||
686 | late_initcall(dmatest_init); | ||
687 | 961 | ||
688 | static void __exit dmatest_exit(void) | 962 | #ifndef MODULE |
963 | static int run_threaded_test(struct dmatest_info *info) | ||
964 | { | ||
965 | int ret; | ||
966 | |||
967 | mutex_lock(&info->lock); | ||
968 | ret = __run_threaded_test(info); | ||
969 | mutex_unlock(&info->lock); | ||
970 | return ret; | ||
971 | } | ||
972 | #endif | ||
973 | |||
974 | static void __stop_threaded_test(struct dmatest_info *info) | ||
689 | { | 975 | { |
690 | struct dmatest_chan *dtc, *_dtc; | 976 | struct dmatest_chan *dtc, *_dtc; |
691 | struct dma_chan *chan; | 977 | struct dma_chan *chan; |
692 | 978 | ||
693 | list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) { | 979 | list_for_each_entry_safe(dtc, _dtc, &info->channels, node) { |
694 | list_del(&dtc->node); | 980 | list_del(&dtc->node); |
695 | chan = dtc->chan; | 981 | chan = dtc->chan; |
696 | dmatest_cleanup_channel(dtc); | 982 | dmatest_cleanup_channel(dtc); |
697 | pr_debug("dmatest: dropped channel %s\n", | 983 | pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan)); |
698 | dma_chan_name(chan)); | ||
699 | dma_release_channel(chan); | 984 | dma_release_channel(chan); |
700 | } | 985 | } |
986 | |||
987 | info->nr_channels = 0; | ||
988 | } | ||
989 | |||
990 | static void stop_threaded_test(struct dmatest_info *info) | ||
991 | { | ||
992 | mutex_lock(&info->lock); | ||
993 | __stop_threaded_test(info); | ||
994 | mutex_unlock(&info->lock); | ||
995 | } | ||
996 | |||
997 | static int __restart_threaded_test(struct dmatest_info *info, bool run) | ||
998 | { | ||
999 | struct dmatest_params *params = &info->params; | ||
1000 | int ret; | ||
1001 | |||
1002 | /* Stop any running test first */ | ||
1003 | __stop_threaded_test(info); | ||
1004 | |||
1005 | if (run == false) | ||
1006 | return 0; | ||
1007 | |||
1008 | /* Clear results from previous run */ | ||
1009 | result_free(info, NULL); | ||
1010 | |||
1011 | /* Copy test parameters */ | ||
1012 | memcpy(params, &info->dbgfs_params, sizeof(*params)); | ||
1013 | |||
1014 | /* Run test with new parameters */ | ||
1015 | ret = __run_threaded_test(info); | ||
1016 | if (ret) { | ||
1017 | __stop_threaded_test(info); | ||
1018 | pr_err("dmatest: Can't run test\n"); | ||
1019 | } | ||
1020 | |||
1021 | return ret; | ||
1022 | } | ||
1023 | |||
1024 | static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos, | ||
1025 | const void __user *from, size_t count) | ||
1026 | { | ||
1027 | char tmp[20]; | ||
1028 | ssize_t len; | ||
1029 | |||
1030 | len = simple_write_to_buffer(tmp, sizeof(tmp) - 1, ppos, from, count); | ||
1031 | if (len >= 0) { | ||
1032 | tmp[len] = '\0'; | ||
1033 | strlcpy(to, strim(tmp), available); | ||
1034 | } | ||
1035 | |||
1036 | return len; | ||
1037 | } | ||
1038 | |||
1039 | static ssize_t dtf_read_channel(struct file *file, char __user *buf, | ||
1040 | size_t count, loff_t *ppos) | ||
1041 | { | ||
1042 | struct dmatest_info *info = file->private_data; | ||
1043 | return simple_read_from_buffer(buf, count, ppos, | ||
1044 | info->dbgfs_params.channel, | ||
1045 | strlen(info->dbgfs_params.channel)); | ||
1046 | } | ||
1047 | |||
1048 | static ssize_t dtf_write_channel(struct file *file, const char __user *buf, | ||
1049 | size_t size, loff_t *ppos) | ||
1050 | { | ||
1051 | struct dmatest_info *info = file->private_data; | ||
1052 | return dtf_write_string(info->dbgfs_params.channel, | ||
1053 | sizeof(info->dbgfs_params.channel), | ||
1054 | ppos, buf, size); | ||
1055 | } | ||
1056 | |||
1057 | static const struct file_operations dtf_channel_fops = { | ||
1058 | .read = dtf_read_channel, | ||
1059 | .write = dtf_write_channel, | ||
1060 | .open = simple_open, | ||
1061 | .llseek = default_llseek, | ||
1062 | }; | ||
1063 | |||
1064 | static ssize_t dtf_read_device(struct file *file, char __user *buf, | ||
1065 | size_t count, loff_t *ppos) | ||
1066 | { | ||
1067 | struct dmatest_info *info = file->private_data; | ||
1068 | return simple_read_from_buffer(buf, count, ppos, | ||
1069 | info->dbgfs_params.device, | ||
1070 | strlen(info->dbgfs_params.device)); | ||
1071 | } | ||
1072 | |||
1073 | static ssize_t dtf_write_device(struct file *file, const char __user *buf, | ||
1074 | size_t size, loff_t *ppos) | ||
1075 | { | ||
1076 | struct dmatest_info *info = file->private_data; | ||
1077 | return dtf_write_string(info->dbgfs_params.device, | ||
1078 | sizeof(info->dbgfs_params.device), | ||
1079 | ppos, buf, size); | ||
1080 | } | ||
1081 | |||
1082 | static const struct file_operations dtf_device_fops = { | ||
1083 | .read = dtf_read_device, | ||
1084 | .write = dtf_write_device, | ||
1085 | .open = simple_open, | ||
1086 | .llseek = default_llseek, | ||
1087 | }; | ||
1088 | |||
1089 | static ssize_t dtf_read_run(struct file *file, char __user *user_buf, | ||
1090 | size_t count, loff_t *ppos) | ||
1091 | { | ||
1092 | struct dmatest_info *info = file->private_data; | ||
1093 | char buf[3]; | ||
1094 | struct dmatest_chan *dtc; | ||
1095 | bool alive = false; | ||
1096 | |||
1097 | mutex_lock(&info->lock); | ||
1098 | list_for_each_entry(dtc, &info->channels, node) { | ||
1099 | struct dmatest_thread *thread; | ||
1100 | |||
1101 | list_for_each_entry(thread, &dtc->threads, node) { | ||
1102 | if (!thread->done) { | ||
1103 | alive = true; | ||
1104 | break; | ||
1105 | } | ||
1106 | } | ||
1107 | } | ||
1108 | |||
1109 | if (alive) { | ||
1110 | buf[0] = 'Y'; | ||
1111 | } else { | ||
1112 | __stop_threaded_test(info); | ||
1113 | buf[0] = 'N'; | ||
1114 | } | ||
1115 | |||
1116 | mutex_unlock(&info->lock); | ||
1117 | buf[1] = '\n'; | ||
1118 | buf[2] = 0x00; | ||
1119 | return simple_read_from_buffer(user_buf, count, ppos, buf, 2); | ||
1120 | } | ||
1121 | |||
1122 | static ssize_t dtf_write_run(struct file *file, const char __user *user_buf, | ||
1123 | size_t count, loff_t *ppos) | ||
1124 | { | ||
1125 | struct dmatest_info *info = file->private_data; | ||
1126 | char buf[16]; | ||
1127 | bool bv; | ||
1128 | int ret = 0; | ||
1129 | |||
1130 | if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1)))) | ||
1131 | return -EFAULT; | ||
1132 | |||
1133 | if (strtobool(buf, &bv) == 0) { | ||
1134 | mutex_lock(&info->lock); | ||
1135 | ret = __restart_threaded_test(info, bv); | ||
1136 | mutex_unlock(&info->lock); | ||
1137 | } | ||
1138 | |||
1139 | return ret ? ret : count; | ||
1140 | } | ||
1141 | |||
1142 | static const struct file_operations dtf_run_fops = { | ||
1143 | .read = dtf_read_run, | ||
1144 | .write = dtf_write_run, | ||
1145 | .open = simple_open, | ||
1146 | .llseek = default_llseek, | ||
1147 | }; | ||
1148 | |||
1149 | static int dtf_results_show(struct seq_file *sf, void *data) | ||
1150 | { | ||
1151 | struct dmatest_info *info = sf->private; | ||
1152 | struct dmatest_result *result; | ||
1153 | struct dmatest_thread_result *tr; | ||
1154 | unsigned int i; | ||
1155 | |||
1156 | mutex_lock(&info->results_lock); | ||
1157 | list_for_each_entry(result, &info->results, node) { | ||
1158 | list_for_each_entry(tr, &result->results, node) { | ||
1159 | seq_printf(sf, "%s\n", | ||
1160 | thread_result_get(result->name, tr)); | ||
1161 | if (tr->type == DMATEST_ET_VERIFY_BUF) { | ||
1162 | for (i = 0; i < tr->vr->error_count; i++) { | ||
1163 | seq_printf(sf, "\t%s\n", | ||
1164 | verify_result_get_one(tr->vr, i)); | ||
1165 | } | ||
1166 | } | ||
1167 | } | ||
1168 | } | ||
1169 | |||
1170 | mutex_unlock(&info->results_lock); | ||
1171 | return 0; | ||
1172 | } | ||
1173 | |||
1174 | static int dtf_results_open(struct inode *inode, struct file *file) | ||
1175 | { | ||
1176 | return single_open(file, dtf_results_show, inode->i_private); | ||
1177 | } | ||
1178 | |||
1179 | static const struct file_operations dtf_results_fops = { | ||
1180 | .open = dtf_results_open, | ||
1181 | .read = seq_read, | ||
1182 | .llseek = seq_lseek, | ||
1183 | .release = single_release, | ||
1184 | }; | ||
1185 | |||
1186 | static int dmatest_register_dbgfs(struct dmatest_info *info) | ||
1187 | { | ||
1188 | struct dentry *d; | ||
1189 | struct dmatest_params *params = &info->dbgfs_params; | ||
1190 | int ret = -ENOMEM; | ||
1191 | |||
1192 | d = debugfs_create_dir("dmatest", NULL); | ||
1193 | if (IS_ERR(d)) | ||
1194 | return PTR_ERR(d); | ||
1195 | if (!d) | ||
1196 | goto err_root; | ||
1197 | |||
1198 | info->root = d; | ||
1199 | |||
1200 | /* Copy initial values */ | ||
1201 | memcpy(params, &info->params, sizeof(*params)); | ||
1202 | |||
1203 | /* Test parameters */ | ||
1204 | |||
1205 | d = debugfs_create_u32("test_buf_size", S_IWUSR | S_IRUGO, info->root, | ||
1206 | (u32 *)¶ms->buf_size); | ||
1207 | if (IS_ERR_OR_NULL(d)) | ||
1208 | goto err_node; | ||
1209 | |||
1210 | d = debugfs_create_file("channel", S_IRUGO | S_IWUSR, info->root, | ||
1211 | info, &dtf_channel_fops); | ||
1212 | if (IS_ERR_OR_NULL(d)) | ||
1213 | goto err_node; | ||
1214 | |||
1215 | d = debugfs_create_file("device", S_IRUGO | S_IWUSR, info->root, | ||
1216 | info, &dtf_device_fops); | ||
1217 | if (IS_ERR_OR_NULL(d)) | ||
1218 | goto err_node; | ||
1219 | |||
1220 | d = debugfs_create_u32("threads_per_chan", S_IWUSR | S_IRUGO, info->root, | ||
1221 | (u32 *)¶ms->threads_per_chan); | ||
1222 | if (IS_ERR_OR_NULL(d)) | ||
1223 | goto err_node; | ||
1224 | |||
1225 | d = debugfs_create_u32("max_channels", S_IWUSR | S_IRUGO, info->root, | ||
1226 | (u32 *)¶ms->max_channels); | ||
1227 | if (IS_ERR_OR_NULL(d)) | ||
1228 | goto err_node; | ||
1229 | |||
1230 | d = debugfs_create_u32("iterations", S_IWUSR | S_IRUGO, info->root, | ||
1231 | (u32 *)¶ms->iterations); | ||
1232 | if (IS_ERR_OR_NULL(d)) | ||
1233 | goto err_node; | ||
1234 | |||
1235 | d = debugfs_create_u32("xor_sources", S_IWUSR | S_IRUGO, info->root, | ||
1236 | (u32 *)¶ms->xor_sources); | ||
1237 | if (IS_ERR_OR_NULL(d)) | ||
1238 | goto err_node; | ||
1239 | |||
1240 | d = debugfs_create_u32("pq_sources", S_IWUSR | S_IRUGO, info->root, | ||
1241 | (u32 *)¶ms->pq_sources); | ||
1242 | if (IS_ERR_OR_NULL(d)) | ||
1243 | goto err_node; | ||
1244 | |||
1245 | d = debugfs_create_u32("timeout", S_IWUSR | S_IRUGO, info->root, | ||
1246 | (u32 *)¶ms->timeout); | ||
1247 | if (IS_ERR_OR_NULL(d)) | ||
1248 | goto err_node; | ||
1249 | |||
1250 | /* Run or stop threaded test */ | ||
1251 | d = debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, | ||
1252 | info, &dtf_run_fops); | ||
1253 | if (IS_ERR_OR_NULL(d)) | ||
1254 | goto err_node; | ||
1255 | |||
1256 | /* Results of test in progress */ | ||
1257 | d = debugfs_create_file("results", S_IRUGO, info->root, info, | ||
1258 | &dtf_results_fops); | ||
1259 | if (IS_ERR_OR_NULL(d)) | ||
1260 | goto err_node; | ||
1261 | |||
1262 | return 0; | ||
1263 | |||
1264 | err_node: | ||
1265 | debugfs_remove_recursive(info->root); | ||
1266 | err_root: | ||
1267 | pr_err("dmatest: Failed to initialize debugfs\n"); | ||
1268 | return ret; | ||
1269 | } | ||
1270 | |||
1271 | static int __init dmatest_init(void) | ||
1272 | { | ||
1273 | struct dmatest_info *info = &test_info; | ||
1274 | struct dmatest_params *params = &info->params; | ||
1275 | int ret; | ||
1276 | |||
1277 | memset(info, 0, sizeof(*info)); | ||
1278 | |||
1279 | mutex_init(&info->lock); | ||
1280 | INIT_LIST_HEAD(&info->channels); | ||
1281 | |||
1282 | mutex_init(&info->results_lock); | ||
1283 | INIT_LIST_HEAD(&info->results); | ||
1284 | |||
1285 | /* Set default parameters */ | ||
1286 | params->buf_size = test_buf_size; | ||
1287 | strlcpy(params->channel, test_channel, sizeof(params->channel)); | ||
1288 | strlcpy(params->device, test_device, sizeof(params->device)); | ||
1289 | params->threads_per_chan = threads_per_chan; | ||
1290 | params->max_channels = max_channels; | ||
1291 | params->iterations = iterations; | ||
1292 | params->xor_sources = xor_sources; | ||
1293 | params->pq_sources = pq_sources; | ||
1294 | params->timeout = timeout; | ||
1295 | |||
1296 | ret = dmatest_register_dbgfs(info); | ||
1297 | if (ret) | ||
1298 | return ret; | ||
1299 | |||
1300 | #ifdef MODULE | ||
1301 | return 0; | ||
1302 | #else | ||
1303 | return run_threaded_test(info); | ||
1304 | #endif | ||
1305 | } | ||
1306 | /* when compiled-in wait for drivers to load first */ | ||
1307 | late_initcall(dmatest_init); | ||
1308 | |||
1309 | static void __exit dmatest_exit(void) | ||
1310 | { | ||
1311 | struct dmatest_info *info = &test_info; | ||
1312 | |||
1313 | debugfs_remove_recursive(info->root); | ||
1314 | stop_threaded_test(info); | ||
1315 | result_free(info, NULL); | ||
701 | } | 1316 | } |
702 | module_exit(dmatest_exit); | 1317 | module_exit(dmatest_exit); |
703 | 1318 | ||
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 43a5329d4483..2e5deaa82b60 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -25,6 +25,8 @@ | |||
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/acpi.h> | ||
29 | #include <linux/acpi_dma.h> | ||
28 | 30 | ||
29 | #include "dw_dmac_regs.h" | 31 | #include "dw_dmac_regs.h" |
30 | #include "dmaengine.h" | 32 | #include "dmaengine.h" |
@@ -49,29 +51,22 @@ static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) | |||
49 | return slave ? slave->src_master : 1; | 51 | return slave ? slave->src_master : 1; |
50 | } | 52 | } |
51 | 53 | ||
52 | #define SRC_MASTER 0 | 54 | static inline void dwc_set_masters(struct dw_dma_chan *dwc) |
53 | #define DST_MASTER 1 | ||
54 | |||
55 | static inline unsigned int dwc_get_master(struct dma_chan *chan, int master) | ||
56 | { | 55 | { |
57 | struct dw_dma *dw = to_dw_dma(chan->device); | 56 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
58 | struct dw_dma_slave *dws = chan->private; | 57 | struct dw_dma_slave *dws = dwc->chan.private; |
59 | unsigned int m; | 58 | unsigned char mmax = dw->nr_masters - 1; |
60 | |||
61 | if (master == SRC_MASTER) | ||
62 | m = dwc_get_sms(dws); | ||
63 | else | ||
64 | m = dwc_get_dms(dws); | ||
65 | 59 | ||
66 | return min_t(unsigned int, dw->nr_masters - 1, m); | 60 | if (dwc->request_line == ~0) { |
61 | dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws)); | ||
62 | dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws)); | ||
63 | } | ||
67 | } | 64 | } |
68 | 65 | ||
69 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ | 66 | #define DWC_DEFAULT_CTLLO(_chan) ({ \ |
70 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ | 67 | struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ |
71 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ | 68 | struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ |
72 | bool _is_slave = is_slave_direction(_dwc->direction); \ | 69 | bool _is_slave = is_slave_direction(_dwc->direction); \ |
73 | int _dms = dwc_get_master(_chan, DST_MASTER); \ | ||
74 | int _sms = dwc_get_master(_chan, SRC_MASTER); \ | ||
75 | u8 _smsize = _is_slave ? _sconfig->src_maxburst : \ | 70 | u8 _smsize = _is_slave ? _sconfig->src_maxburst : \ |
76 | DW_DMA_MSIZE_16; \ | 71 | DW_DMA_MSIZE_16; \ |
77 | u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ | 72 | u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ |
@@ -81,8 +76,8 @@ static inline unsigned int dwc_get_master(struct dma_chan *chan, int master) | |||
81 | | DWC_CTLL_SRC_MSIZE(_smsize) \ | 76 | | DWC_CTLL_SRC_MSIZE(_smsize) \ |
82 | | DWC_CTLL_LLP_D_EN \ | 77 | | DWC_CTLL_LLP_D_EN \ |
83 | | DWC_CTLL_LLP_S_EN \ | 78 | | DWC_CTLL_LLP_S_EN \ |
84 | | DWC_CTLL_DMS(_dms) \ | 79 | | DWC_CTLL_DMS(_dwc->dst_master) \ |
85 | | DWC_CTLL_SMS(_sms)); \ | 80 | | DWC_CTLL_SMS(_dwc->src_master)); \ |
86 | }) | 81 | }) |
87 | 82 | ||
88 | /* | 83 | /* |
@@ -92,13 +87,6 @@ static inline unsigned int dwc_get_master(struct dma_chan *chan, int master) | |||
92 | */ | 87 | */ |
93 | #define NR_DESCS_PER_CHANNEL 64 | 88 | #define NR_DESCS_PER_CHANNEL 64 |
94 | 89 | ||
95 | static inline unsigned int dwc_get_data_width(struct dma_chan *chan, int master) | ||
96 | { | ||
97 | struct dw_dma *dw = to_dw_dma(chan->device); | ||
98 | |||
99 | return dw->data_width[dwc_get_master(chan, master)]; | ||
100 | } | ||
101 | |||
102 | /*----------------------------------------------------------------------*/ | 90 | /*----------------------------------------------------------------------*/ |
103 | 91 | ||
104 | static struct device *chan2dev(struct dma_chan *chan) | 92 | static struct device *chan2dev(struct dma_chan *chan) |
@@ -172,13 +160,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc) | |||
172 | if (dwc->initialized == true) | 160 | if (dwc->initialized == true) |
173 | return; | 161 | return; |
174 | 162 | ||
175 | if (dws && dws->cfg_hi == ~0 && dws->cfg_lo == ~0) { | 163 | if (dws) { |
176 | /* autoconfigure based on request line from DT */ | ||
177 | if (dwc->direction == DMA_MEM_TO_DEV) | ||
178 | cfghi = DWC_CFGH_DST_PER(dwc->request_line); | ||
179 | else if (dwc->direction == DMA_DEV_TO_MEM) | ||
180 | cfghi = DWC_CFGH_SRC_PER(dwc->request_line); | ||
181 | } else if (dws) { | ||
182 | /* | 164 | /* |
183 | * We need controller-specific data to set up slave | 165 | * We need controller-specific data to set up slave |
184 | * transfers. | 166 | * transfers. |
@@ -189,9 +171,9 @@ static void dwc_initialize(struct dw_dma_chan *dwc) | |||
189 | cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; | 171 | cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; |
190 | } else { | 172 | } else { |
191 | if (dwc->direction == DMA_MEM_TO_DEV) | 173 | if (dwc->direction == DMA_MEM_TO_DEV) |
192 | cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id); | 174 | cfghi = DWC_CFGH_DST_PER(dwc->request_line); |
193 | else if (dwc->direction == DMA_DEV_TO_MEM) | 175 | else if (dwc->direction == DMA_DEV_TO_MEM) |
194 | cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id); | 176 | cfghi = DWC_CFGH_SRC_PER(dwc->request_line); |
195 | } | 177 | } |
196 | 178 | ||
197 | channel_writel(dwc, CFG_LO, cfglo); | 179 | channel_writel(dwc, CFG_LO, cfglo); |
@@ -473,16 +455,16 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | |||
473 | (unsigned long long)llp); | 455 | (unsigned long long)llp); |
474 | 456 | ||
475 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | 457 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { |
476 | /* initial residue value */ | 458 | /* Initial residue value */ |
477 | dwc->residue = desc->total_len; | 459 | dwc->residue = desc->total_len; |
478 | 460 | ||
479 | /* check first descriptors addr */ | 461 | /* Check first descriptors addr */ |
480 | if (desc->txd.phys == llp) { | 462 | if (desc->txd.phys == llp) { |
481 | spin_unlock_irqrestore(&dwc->lock, flags); | 463 | spin_unlock_irqrestore(&dwc->lock, flags); |
482 | return; | 464 | return; |
483 | } | 465 | } |
484 | 466 | ||
485 | /* check first descriptors llp */ | 467 | /* Check first descriptors llp */ |
486 | if (desc->lli.llp == llp) { | 468 | if (desc->lli.llp == llp) { |
487 | /* This one is currently in progress */ | 469 | /* This one is currently in progress */ |
488 | dwc->residue -= dwc_get_sent(dwc); | 470 | dwc->residue -= dwc_get_sent(dwc); |
@@ -588,7 +570,7 @@ inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) | |||
588 | } | 570 | } |
589 | EXPORT_SYMBOL(dw_dma_get_dst_addr); | 571 | EXPORT_SYMBOL(dw_dma_get_dst_addr); |
590 | 572 | ||
591 | /* called with dwc->lock held and all DMAC interrupts disabled */ | 573 | /* Called with dwc->lock held and all DMAC interrupts disabled */ |
592 | static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | 574 | static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, |
593 | u32 status_err, u32 status_xfer) | 575 | u32 status_err, u32 status_xfer) |
594 | { | 576 | { |
@@ -626,7 +608,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | |||
626 | 608 | ||
627 | dwc_chan_disable(dw, dwc); | 609 | dwc_chan_disable(dw, dwc); |
628 | 610 | ||
629 | /* make sure DMA does not restart by loading a new list */ | 611 | /* Make sure DMA does not restart by loading a new list */ |
630 | channel_writel(dwc, LLP, 0); | 612 | channel_writel(dwc, LLP, 0); |
631 | channel_writel(dwc, CTL_LO, 0); | 613 | channel_writel(dwc, CTL_LO, 0); |
632 | channel_writel(dwc, CTL_HI, 0); | 614 | channel_writel(dwc, CTL_HI, 0); |
@@ -745,6 +727,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
745 | size_t len, unsigned long flags) | 727 | size_t len, unsigned long flags) |
746 | { | 728 | { |
747 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 729 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
730 | struct dw_dma *dw = to_dw_dma(chan->device); | ||
748 | struct dw_desc *desc; | 731 | struct dw_desc *desc; |
749 | struct dw_desc *first; | 732 | struct dw_desc *first; |
750 | struct dw_desc *prev; | 733 | struct dw_desc *prev; |
@@ -767,8 +750,8 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
767 | 750 | ||
768 | dwc->direction = DMA_MEM_TO_MEM; | 751 | dwc->direction = DMA_MEM_TO_MEM; |
769 | 752 | ||
770 | data_width = min_t(unsigned int, dwc_get_data_width(chan, SRC_MASTER), | 753 | data_width = min_t(unsigned int, dw->data_width[dwc->src_master], |
771 | dwc_get_data_width(chan, DST_MASTER)); | 754 | dw->data_width[dwc->dst_master]); |
772 | 755 | ||
773 | src_width = dst_width = min_t(unsigned int, data_width, | 756 | src_width = dst_width = min_t(unsigned int, data_width, |
774 | dwc_fast_fls(src | dest | len)); | 757 | dwc_fast_fls(src | dest | len)); |
@@ -826,6 +809,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
826 | unsigned long flags, void *context) | 809 | unsigned long flags, void *context) |
827 | { | 810 | { |
828 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 811 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
812 | struct dw_dma *dw = to_dw_dma(chan->device); | ||
829 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; | 813 | struct dma_slave_config *sconfig = &dwc->dma_sconfig; |
830 | struct dw_desc *prev; | 814 | struct dw_desc *prev; |
831 | struct dw_desc *first; | 815 | struct dw_desc *first; |
@@ -859,7 +843,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
859 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | 843 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : |
860 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | 844 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); |
861 | 845 | ||
862 | data_width = dwc_get_data_width(chan, SRC_MASTER); | 846 | data_width = dw->data_width[dwc->src_master]; |
863 | 847 | ||
864 | for_each_sg(sgl, sg, sg_len, i) { | 848 | for_each_sg(sgl, sg, sg_len, i) { |
865 | struct dw_desc *desc; | 849 | struct dw_desc *desc; |
@@ -919,7 +903,7 @@ slave_sg_todev_fill_desc: | |||
919 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | 903 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : |
920 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | 904 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); |
921 | 905 | ||
922 | data_width = dwc_get_data_width(chan, DST_MASTER); | 906 | data_width = dw->data_width[dwc->dst_master]; |
923 | 907 | ||
924 | for_each_sg(sgl, sg, sg_len, i) { | 908 | for_each_sg(sgl, sg, sg_len, i) { |
925 | struct dw_desc *desc; | 909 | struct dw_desc *desc; |
@@ -1001,13 +985,6 @@ static inline void convert_burst(u32 *maxburst) | |||
1001 | *maxburst = 0; | 985 | *maxburst = 0; |
1002 | } | 986 | } |
1003 | 987 | ||
1004 | static inline void convert_slave_id(struct dw_dma_chan *dwc) | ||
1005 | { | ||
1006 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
1007 | |||
1008 | dwc->dma_sconfig.slave_id -= dw->request_line_base; | ||
1009 | } | ||
1010 | |||
1011 | static int | 988 | static int |
1012 | set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) | 989 | set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) |
1013 | { | 990 | { |
@@ -1020,9 +997,12 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) | |||
1020 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); | 997 | memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); |
1021 | dwc->direction = sconfig->direction; | 998 | dwc->direction = sconfig->direction; |
1022 | 999 | ||
1000 | /* Take the request line from slave_id member */ | ||
1001 | if (dwc->request_line == ~0) | ||
1002 | dwc->request_line = sconfig->slave_id; | ||
1003 | |||
1023 | convert_burst(&dwc->dma_sconfig.src_maxburst); | 1004 | convert_burst(&dwc->dma_sconfig.src_maxburst); |
1024 | convert_burst(&dwc->dma_sconfig.dst_maxburst); | 1005 | convert_burst(&dwc->dma_sconfig.dst_maxburst); |
1025 | convert_slave_id(dwc); | ||
1026 | 1006 | ||
1027 | return 0; | 1007 | return 0; |
1028 | } | 1008 | } |
@@ -1030,10 +1010,11 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) | |||
1030 | static inline void dwc_chan_pause(struct dw_dma_chan *dwc) | 1010 | static inline void dwc_chan_pause(struct dw_dma_chan *dwc) |
1031 | { | 1011 | { |
1032 | u32 cfglo = channel_readl(dwc, CFG_LO); | 1012 | u32 cfglo = channel_readl(dwc, CFG_LO); |
1013 | unsigned int count = 20; /* timeout iterations */ | ||
1033 | 1014 | ||
1034 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); | 1015 | channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); |
1035 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY)) | 1016 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) |
1036 | cpu_relax(); | 1017 | udelay(2); |
1037 | 1018 | ||
1038 | dwc->paused = true; | 1019 | dwc->paused = true; |
1039 | } | 1020 | } |
@@ -1169,6 +1150,8 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
1169 | * doesn't mean what you think it means), and status writeback. | 1150 | * doesn't mean what you think it means), and status writeback. |
1170 | */ | 1151 | */ |
1171 | 1152 | ||
1153 | dwc_set_masters(dwc); | ||
1154 | |||
1172 | spin_lock_irqsave(&dwc->lock, flags); | 1155 | spin_lock_irqsave(&dwc->lock, flags); |
1173 | i = dwc->descs_allocated; | 1156 | i = dwc->descs_allocated; |
1174 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { | 1157 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { |
@@ -1226,6 +1209,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1226 | list_splice_init(&dwc->free_list, &list); | 1209 | list_splice_init(&dwc->free_list, &list); |
1227 | dwc->descs_allocated = 0; | 1210 | dwc->descs_allocated = 0; |
1228 | dwc->initialized = false; | 1211 | dwc->initialized = false; |
1212 | dwc->request_line = ~0; | ||
1229 | 1213 | ||
1230 | /* Disable interrupts */ | 1214 | /* Disable interrupts */ |
1231 | channel_clear_bit(dw, MASK.XFER, dwc->mask); | 1215 | channel_clear_bit(dw, MASK.XFER, dwc->mask); |
@@ -1241,42 +1225,36 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1241 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); | 1225 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); |
1242 | } | 1226 | } |
1243 | 1227 | ||
1244 | struct dw_dma_filter_args { | 1228 | /*----------------------------------------------------------------------*/ |
1229 | |||
1230 | struct dw_dma_of_filter_args { | ||
1245 | struct dw_dma *dw; | 1231 | struct dw_dma *dw; |
1246 | unsigned int req; | 1232 | unsigned int req; |
1247 | unsigned int src; | 1233 | unsigned int src; |
1248 | unsigned int dst; | 1234 | unsigned int dst; |
1249 | }; | 1235 | }; |
1250 | 1236 | ||
1251 | static bool dw_dma_generic_filter(struct dma_chan *chan, void *param) | 1237 | static bool dw_dma_of_filter(struct dma_chan *chan, void *param) |
1252 | { | 1238 | { |
1253 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1239 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1254 | struct dw_dma *dw = to_dw_dma(chan->device); | 1240 | struct dw_dma_of_filter_args *fargs = param; |
1255 | struct dw_dma_filter_args *fargs = param; | ||
1256 | struct dw_dma_slave *dws = &dwc->slave; | ||
1257 | 1241 | ||
1258 | /* ensure the device matches our channel */ | 1242 | /* Ensure the device matches our channel */ |
1259 | if (chan->device != &fargs->dw->dma) | 1243 | if (chan->device != &fargs->dw->dma) |
1260 | return false; | 1244 | return false; |
1261 | 1245 | ||
1262 | dws->dma_dev = dw->dma.dev; | ||
1263 | dws->cfg_hi = ~0; | ||
1264 | dws->cfg_lo = ~0; | ||
1265 | dws->src_master = fargs->src; | ||
1266 | dws->dst_master = fargs->dst; | ||
1267 | |||
1268 | dwc->request_line = fargs->req; | 1246 | dwc->request_line = fargs->req; |
1269 | 1247 | dwc->src_master = fargs->src; | |
1270 | chan->private = dws; | 1248 | dwc->dst_master = fargs->dst; |
1271 | 1249 | ||
1272 | return true; | 1250 | return true; |
1273 | } | 1251 | } |
1274 | 1252 | ||
1275 | static struct dma_chan *dw_dma_xlate(struct of_phandle_args *dma_spec, | 1253 | static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, |
1276 | struct of_dma *ofdma) | 1254 | struct of_dma *ofdma) |
1277 | { | 1255 | { |
1278 | struct dw_dma *dw = ofdma->of_dma_data; | 1256 | struct dw_dma *dw = ofdma->of_dma_data; |
1279 | struct dw_dma_filter_args fargs = { | 1257 | struct dw_dma_of_filter_args fargs = { |
1280 | .dw = dw, | 1258 | .dw = dw, |
1281 | }; | 1259 | }; |
1282 | dma_cap_mask_t cap; | 1260 | dma_cap_mask_t cap; |
@@ -1297,8 +1275,48 @@ static struct dma_chan *dw_dma_xlate(struct of_phandle_args *dma_spec, | |||
1297 | dma_cap_set(DMA_SLAVE, cap); | 1275 | dma_cap_set(DMA_SLAVE, cap); |
1298 | 1276 | ||
1299 | /* TODO: there should be a simpler way to do this */ | 1277 | /* TODO: there should be a simpler way to do this */ |
1300 | return dma_request_channel(cap, dw_dma_generic_filter, &fargs); | 1278 | return dma_request_channel(cap, dw_dma_of_filter, &fargs); |
1279 | } | ||
1280 | |||
1281 | #ifdef CONFIG_ACPI | ||
1282 | static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param) | ||
1283 | { | ||
1284 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | ||
1285 | struct acpi_dma_spec *dma_spec = param; | ||
1286 | |||
1287 | if (chan->device->dev != dma_spec->dev || | ||
1288 | chan->chan_id != dma_spec->chan_id) | ||
1289 | return false; | ||
1290 | |||
1291 | dwc->request_line = dma_spec->slave_id; | ||
1292 | dwc->src_master = dwc_get_sms(NULL); | ||
1293 | dwc->dst_master = dwc_get_dms(NULL); | ||
1294 | |||
1295 | return true; | ||
1296 | } | ||
1297 | |||
1298 | static void dw_dma_acpi_controller_register(struct dw_dma *dw) | ||
1299 | { | ||
1300 | struct device *dev = dw->dma.dev; | ||
1301 | struct acpi_dma_filter_info *info; | ||
1302 | int ret; | ||
1303 | |||
1304 | info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); | ||
1305 | if (!info) | ||
1306 | return; | ||
1307 | |||
1308 | dma_cap_zero(info->dma_cap); | ||
1309 | dma_cap_set(DMA_SLAVE, info->dma_cap); | ||
1310 | info->filter_fn = dw_dma_acpi_filter; | ||
1311 | |||
1312 | ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate, | ||
1313 | info); | ||
1314 | if (ret) | ||
1315 | dev_err(dev, "could not register acpi_dma_controller\n"); | ||
1301 | } | 1316 | } |
1317 | #else /* !CONFIG_ACPI */ | ||
1318 | static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {} | ||
1319 | #endif /* !CONFIG_ACPI */ | ||
1302 | 1320 | ||
1303 | /* --------------------- Cyclic DMA API extensions -------------------- */ | 1321 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
1304 | 1322 | ||
@@ -1322,7 +1340,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan) | |||
1322 | 1340 | ||
1323 | spin_lock_irqsave(&dwc->lock, flags); | 1341 | spin_lock_irqsave(&dwc->lock, flags); |
1324 | 1342 | ||
1325 | /* assert channel is idle */ | 1343 | /* Assert channel is idle */ |
1326 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 1344 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
1327 | dev_err(chan2dev(&dwc->chan), | 1345 | dev_err(chan2dev(&dwc->chan), |
1328 | "BUG: Attempted to start non-idle channel\n"); | 1346 | "BUG: Attempted to start non-idle channel\n"); |
@@ -1334,7 +1352,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan) | |||
1334 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | 1352 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1335 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 1353 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
1336 | 1354 | ||
1337 | /* setup DMAC channel registers */ | 1355 | /* Setup DMAC channel registers */ |
1338 | channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); | 1356 | channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); |
1339 | channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | 1357 | channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); |
1340 | channel_writel(dwc, CTL_HI, 0); | 1358 | channel_writel(dwc, CTL_HI, 0); |
@@ -1501,7 +1519,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1501 | last = desc; | 1519 | last = desc; |
1502 | } | 1520 | } |
1503 | 1521 | ||
1504 | /* lets make a cyclic list */ | 1522 | /* Let's make a cyclic list */ |
1505 | last->lli.llp = cdesc->desc[0]->txd.phys; | 1523 | last->lli.llp = cdesc->desc[0]->txd.phys; |
1506 | 1524 | ||
1507 | dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " | 1525 | dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " |
@@ -1636,7 +1654,6 @@ dw_dma_parse_dt(struct platform_device *pdev) | |||
1636 | 1654 | ||
1637 | static int dw_probe(struct platform_device *pdev) | 1655 | static int dw_probe(struct platform_device *pdev) |
1638 | { | 1656 | { |
1639 | const struct platform_device_id *match; | ||
1640 | struct dw_dma_platform_data *pdata; | 1657 | struct dw_dma_platform_data *pdata; |
1641 | struct resource *io; | 1658 | struct resource *io; |
1642 | struct dw_dma *dw; | 1659 | struct dw_dma *dw; |
@@ -1706,7 +1723,7 @@ static int dw_probe(struct platform_device *pdev) | |||
1706 | 1723 | ||
1707 | dw->regs = regs; | 1724 | dw->regs = regs; |
1708 | 1725 | ||
1709 | /* get hardware configuration parameters */ | 1726 | /* Get hardware configuration parameters */ |
1710 | if (autocfg) { | 1727 | if (autocfg) { |
1711 | max_blk_size = dma_readl(dw, MAX_BLK_SIZE); | 1728 | max_blk_size = dma_readl(dw, MAX_BLK_SIZE); |
1712 | 1729 | ||
@@ -1720,18 +1737,13 @@ static int dw_probe(struct platform_device *pdev) | |||
1720 | memcpy(dw->data_width, pdata->data_width, 4); | 1737 | memcpy(dw->data_width, pdata->data_width, 4); |
1721 | } | 1738 | } |
1722 | 1739 | ||
1723 | /* Get the base request line if set */ | ||
1724 | match = platform_get_device_id(pdev); | ||
1725 | if (match) | ||
1726 | dw->request_line_base = (unsigned int)match->driver_data; | ||
1727 | |||
1728 | /* Calculate all channel mask before DMA setup */ | 1740 | /* Calculate all channel mask before DMA setup */ |
1729 | dw->all_chan_mask = (1 << nr_channels) - 1; | 1741 | dw->all_chan_mask = (1 << nr_channels) - 1; |
1730 | 1742 | ||
1731 | /* force dma off, just in case */ | 1743 | /* Force dma off, just in case */ |
1732 | dw_dma_off(dw); | 1744 | dw_dma_off(dw); |
1733 | 1745 | ||
1734 | /* disable BLOCK interrupts as well */ | 1746 | /* Disable BLOCK interrupts as well */ |
1735 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | 1747 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); |
1736 | 1748 | ||
1737 | err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0, | 1749 | err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0, |
@@ -1741,7 +1753,7 @@ static int dw_probe(struct platform_device *pdev) | |||
1741 | 1753 | ||
1742 | platform_set_drvdata(pdev, dw); | 1754 | platform_set_drvdata(pdev, dw); |
1743 | 1755 | ||
1744 | /* create a pool of consistent memory blocks for hardware descriptors */ | 1756 | /* Create a pool of consistent memory blocks for hardware descriptors */ |
1745 | dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev, | 1757 | dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev, |
1746 | sizeof(struct dw_desc), 4, 0); | 1758 | sizeof(struct dw_desc), 4, 0); |
1747 | if (!dw->desc_pool) { | 1759 | if (!dw->desc_pool) { |
@@ -1781,8 +1793,9 @@ static int dw_probe(struct platform_device *pdev) | |||
1781 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1793 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1782 | 1794 | ||
1783 | dwc->direction = DMA_TRANS_NONE; | 1795 | dwc->direction = DMA_TRANS_NONE; |
1796 | dwc->request_line = ~0; | ||
1784 | 1797 | ||
1785 | /* hardware configuration */ | 1798 | /* Hardware configuration */ |
1786 | if (autocfg) { | 1799 | if (autocfg) { |
1787 | unsigned int dwc_params; | 1800 | unsigned int dwc_params; |
1788 | 1801 | ||
@@ -1842,12 +1855,15 @@ static int dw_probe(struct platform_device *pdev) | |||
1842 | 1855 | ||
1843 | if (pdev->dev.of_node) { | 1856 | if (pdev->dev.of_node) { |
1844 | err = of_dma_controller_register(pdev->dev.of_node, | 1857 | err = of_dma_controller_register(pdev->dev.of_node, |
1845 | dw_dma_xlate, dw); | 1858 | dw_dma_of_xlate, dw); |
1846 | if (err && err != -ENODEV) | 1859 | if (err) |
1847 | dev_err(&pdev->dev, | 1860 | dev_err(&pdev->dev, |
1848 | "could not register of_dma_controller\n"); | 1861 | "could not register of_dma_controller\n"); |
1849 | } | 1862 | } |
1850 | 1863 | ||
1864 | if (ACPI_HANDLE(&pdev->dev)) | ||
1865 | dw_dma_acpi_controller_register(dw); | ||
1866 | |||
1851 | return 0; | 1867 | return 0; |
1852 | } | 1868 | } |
1853 | 1869 | ||
@@ -1912,18 +1928,19 @@ static const struct dev_pm_ops dw_dev_pm_ops = { | |||
1912 | }; | 1928 | }; |
1913 | 1929 | ||
1914 | #ifdef CONFIG_OF | 1930 | #ifdef CONFIG_OF |
1915 | static const struct of_device_id dw_dma_id_table[] = { | 1931 | static const struct of_device_id dw_dma_of_id_table[] = { |
1916 | { .compatible = "snps,dma-spear1340" }, | 1932 | { .compatible = "snps,dma-spear1340" }, |
1917 | {} | 1933 | {} |
1918 | }; | 1934 | }; |
1919 | MODULE_DEVICE_TABLE(of, dw_dma_id_table); | 1935 | MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); |
1920 | #endif | 1936 | #endif |
1921 | 1937 | ||
1922 | static const struct platform_device_id dw_dma_ids[] = { | 1938 | #ifdef CONFIG_ACPI |
1923 | /* Name, Request Line Base */ | 1939 | static const struct acpi_device_id dw_dma_acpi_id_table[] = { |
1924 | { "INTL9C60", (kernel_ulong_t)16 }, | 1940 | { "INTL9C60", 0 }, |
1925 | { } | 1941 | { } |
1926 | }; | 1942 | }; |
1943 | #endif | ||
1927 | 1944 | ||
1928 | static struct platform_driver dw_driver = { | 1945 | static struct platform_driver dw_driver = { |
1929 | .probe = dw_probe, | 1946 | .probe = dw_probe, |
@@ -1932,9 +1949,9 @@ static struct platform_driver dw_driver = { | |||
1932 | .driver = { | 1949 | .driver = { |
1933 | .name = "dw_dmac", | 1950 | .name = "dw_dmac", |
1934 | .pm = &dw_dev_pm_ops, | 1951 | .pm = &dw_dev_pm_ops, |
1935 | .of_match_table = of_match_ptr(dw_dma_id_table), | 1952 | .of_match_table = of_match_ptr(dw_dma_of_id_table), |
1953 | .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), | ||
1936 | }, | 1954 | }, |
1937 | .id_table = dw_dma_ids, | ||
1938 | }; | 1955 | }; |
1939 | 1956 | ||
1940 | static int __init dw_init(void) | 1957 | static int __init dw_init(void) |
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index 4d02c3669b75..9d417200bd57 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h | |||
@@ -212,8 +212,11 @@ struct dw_dma_chan { | |||
212 | /* hardware configuration */ | 212 | /* hardware configuration */ |
213 | unsigned int block_size; | 213 | unsigned int block_size; |
214 | bool nollp; | 214 | bool nollp; |
215 | |||
216 | /* custom slave configuration */ | ||
215 | unsigned int request_line; | 217 | unsigned int request_line; |
216 | struct dw_dma_slave slave; | 218 | unsigned char src_master; |
219 | unsigned char dst_master; | ||
217 | 220 | ||
218 | /* configuration passed via DMA_SLAVE_CONFIG */ | 221 | /* configuration passed via DMA_SLAVE_CONFIG */ |
219 | struct dma_slave_config dma_sconfig; | 222 | struct dma_slave_config dma_sconfig; |
@@ -247,7 +250,6 @@ struct dw_dma { | |||
247 | /* hardware configuration */ | 250 | /* hardware configuration */ |
248 | unsigned char nr_masters; | 251 | unsigned char nr_masters; |
249 | unsigned char data_width[4]; | 252 | unsigned char data_width[4]; |
250 | unsigned int request_line_base; | ||
251 | 253 | ||
252 | struct dw_dma_chan chan[0]; | 254 | struct dw_dma_chan chan[0]; |
253 | }; | 255 | }; |
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 70b8975d107e..f28583370d00 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
@@ -859,8 +859,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( | |||
859 | 859 | ||
860 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); | 860 | desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); |
861 | 861 | ||
862 | if (imxdmac->sg_list) | 862 | kfree(imxdmac->sg_list); |
863 | kfree(imxdmac->sg_list); | ||
864 | 863 | ||
865 | imxdmac->sg_list = kcalloc(periods + 1, | 864 | imxdmac->sg_list = kcalloc(periods + 1, |
866 | sizeof(struct scatterlist), GFP_KERNEL); | 865 | sizeof(struct scatterlist), GFP_KERNEL); |
@@ -1145,7 +1144,7 @@ err: | |||
1145 | return ret; | 1144 | return ret; |
1146 | } | 1145 | } |
1147 | 1146 | ||
1148 | static int __exit imxdma_remove(struct platform_device *pdev) | 1147 | static int imxdma_remove(struct platform_device *pdev) |
1149 | { | 1148 | { |
1150 | struct imxdma_engine *imxdma = platform_get_drvdata(pdev); | 1149 | struct imxdma_engine *imxdma = platform_get_drvdata(pdev); |
1151 | 1150 | ||
@@ -1162,7 +1161,7 @@ static struct platform_driver imxdma_driver = { | |||
1162 | .name = "imx-dma", | 1161 | .name = "imx-dma", |
1163 | }, | 1162 | }, |
1164 | .id_table = imx_dma_devtype, | 1163 | .id_table = imx_dma_devtype, |
1165 | .remove = __exit_p(imxdma_remove), | 1164 | .remove = imxdma_remove, |
1166 | }; | 1165 | }; |
1167 | 1166 | ||
1168 | static int __init imxdma_module_init(void) | 1167 | static int __init imxdma_module_init(void) |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index f082aa3a918c..092867bf795c 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -1462,7 +1462,7 @@ err_irq: | |||
1462 | return ret; | 1462 | return ret; |
1463 | } | 1463 | } |
1464 | 1464 | ||
1465 | static int __exit sdma_remove(struct platform_device *pdev) | 1465 | static int sdma_remove(struct platform_device *pdev) |
1466 | { | 1466 | { |
1467 | return -EBUSY; | 1467 | return -EBUSY; |
1468 | } | 1468 | } |
@@ -1473,7 +1473,7 @@ static struct platform_driver sdma_driver = { | |||
1473 | .of_match_table = sdma_dt_ids, | 1473 | .of_match_table = sdma_dt_ids, |
1474 | }, | 1474 | }, |
1475 | .id_table = sdma_devtypes, | 1475 | .id_table = sdma_devtypes, |
1476 | .remove = __exit_p(sdma_remove), | 1476 | .remove = sdma_remove, |
1477 | }; | 1477 | }; |
1478 | 1478 | ||
1479 | static int __init sdma_module_init(void) | 1479 | static int __init sdma_module_init(void) |
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 1879a5942bfc..17a2393b3e25 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c | |||
@@ -892,7 +892,7 @@ MODULE_PARM_DESC(ioat_interrupt_style, | |||
892 | * ioat_dma_setup_interrupts - setup interrupt handler | 892 | * ioat_dma_setup_interrupts - setup interrupt handler |
893 | * @device: ioat device | 893 | * @device: ioat device |
894 | */ | 894 | */ |
895 | static int ioat_dma_setup_interrupts(struct ioatdma_device *device) | 895 | int ioat_dma_setup_interrupts(struct ioatdma_device *device) |
896 | { | 896 | { |
897 | struct ioat_chan_common *chan; | 897 | struct ioat_chan_common *chan; |
898 | struct pci_dev *pdev = device->pdev; | 898 | struct pci_dev *pdev = device->pdev; |
@@ -941,6 +941,7 @@ msix: | |||
941 | } | 941 | } |
942 | } | 942 | } |
943 | intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; | 943 | intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; |
944 | device->irq_mode = IOAT_MSIX; | ||
944 | goto done; | 945 | goto done; |
945 | 946 | ||
946 | msix_single_vector: | 947 | msix_single_vector: |
@@ -956,6 +957,7 @@ msix_single_vector: | |||
956 | pci_disable_msix(pdev); | 957 | pci_disable_msix(pdev); |
957 | goto msi; | 958 | goto msi; |
958 | } | 959 | } |
960 | device->irq_mode = IOAT_MSIX_SINGLE; | ||
959 | goto done; | 961 | goto done; |
960 | 962 | ||
961 | msi: | 963 | msi: |
@@ -969,6 +971,7 @@ msi: | |||
969 | pci_disable_msi(pdev); | 971 | pci_disable_msi(pdev); |
970 | goto intx; | 972 | goto intx; |
971 | } | 973 | } |
974 | device->irq_mode = IOAT_MSIX; | ||
972 | goto done; | 975 | goto done; |
973 | 976 | ||
974 | intx: | 977 | intx: |
@@ -977,6 +980,7 @@ intx: | |||
977 | if (err) | 980 | if (err) |
978 | goto err_no_irq; | 981 | goto err_no_irq; |
979 | 982 | ||
983 | device->irq_mode = IOAT_INTX; | ||
980 | done: | 984 | done: |
981 | if (device->intr_quirk) | 985 | if (device->intr_quirk) |
982 | device->intr_quirk(device); | 986 | device->intr_quirk(device); |
@@ -987,9 +991,11 @@ done: | |||
987 | err_no_irq: | 991 | err_no_irq: |
988 | /* Disable all interrupt generation */ | 992 | /* Disable all interrupt generation */ |
989 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); | 993 | writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); |
994 | device->irq_mode = IOAT_NOIRQ; | ||
990 | dev_err(dev, "no usable interrupts\n"); | 995 | dev_err(dev, "no usable interrupts\n"); |
991 | return err; | 996 | return err; |
992 | } | 997 | } |
998 | EXPORT_SYMBOL(ioat_dma_setup_interrupts); | ||
993 | 999 | ||
994 | static void ioat_disable_interrupts(struct ioatdma_device *device) | 1000 | static void ioat_disable_interrupts(struct ioatdma_device *device) |
995 | { | 1001 | { |
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 53a4cbb78f47..54fb7b9ff9aa 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h | |||
@@ -39,6 +39,7 @@ | |||
39 | #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) | 39 | #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) |
40 | #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd) | 40 | #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd) |
41 | #define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev) | 41 | #define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev) |
42 | #define to_pdev(ioat_chan) ((ioat_chan)->device->pdev) | ||
42 | 43 | ||
43 | #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) | 44 | #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) |
44 | 45 | ||
@@ -48,6 +49,14 @@ | |||
48 | */ | 49 | */ |
49 | #define NULL_DESC_BUFFER_SIZE 1 | 50 | #define NULL_DESC_BUFFER_SIZE 1 |
50 | 51 | ||
52 | enum ioat_irq_mode { | ||
53 | IOAT_NOIRQ = 0, | ||
54 | IOAT_MSIX, | ||
55 | IOAT_MSIX_SINGLE, | ||
56 | IOAT_MSI, | ||
57 | IOAT_INTX | ||
58 | }; | ||
59 | |||
51 | /** | 60 | /** |
52 | * struct ioatdma_device - internal representation of a IOAT device | 61 | * struct ioatdma_device - internal representation of a IOAT device |
53 | * @pdev: PCI-Express device | 62 | * @pdev: PCI-Express device |
@@ -72,11 +81,16 @@ struct ioatdma_device { | |||
72 | void __iomem *reg_base; | 81 | void __iomem *reg_base; |
73 | struct pci_pool *dma_pool; | 82 | struct pci_pool *dma_pool; |
74 | struct pci_pool *completion_pool; | 83 | struct pci_pool *completion_pool; |
84 | #define MAX_SED_POOLS 5 | ||
85 | struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; | ||
86 | struct kmem_cache *sed_pool; | ||
75 | struct dma_device common; | 87 | struct dma_device common; |
76 | u8 version; | 88 | u8 version; |
77 | struct msix_entry msix_entries[4]; | 89 | struct msix_entry msix_entries[4]; |
78 | struct ioat_chan_common *idx[4]; | 90 | struct ioat_chan_common *idx[4]; |
79 | struct dca_provider *dca; | 91 | struct dca_provider *dca; |
92 | enum ioat_irq_mode irq_mode; | ||
93 | u32 cap; | ||
80 | void (*intr_quirk)(struct ioatdma_device *device); | 94 | void (*intr_quirk)(struct ioatdma_device *device); |
81 | int (*enumerate_channels)(struct ioatdma_device *device); | 95 | int (*enumerate_channels)(struct ioatdma_device *device); |
82 | int (*reset_hw)(struct ioat_chan_common *chan); | 96 | int (*reset_hw)(struct ioat_chan_common *chan); |
@@ -131,6 +145,20 @@ struct ioat_dma_chan { | |||
131 | u16 active; | 145 | u16 active; |
132 | }; | 146 | }; |
133 | 147 | ||
148 | /** | ||
149 | * struct ioat_sed_ent - wrapper around super extended hardware descriptor | ||
150 | * @hw: hardware SED | ||
151 | * @sed_dma: dma address for the SED | ||
152 | * @list: list member | ||
153 | * @parent: point to the dma descriptor that's the parent | ||
154 | */ | ||
155 | struct ioat_sed_ent { | ||
156 | struct ioat_sed_raw_descriptor *hw; | ||
157 | dma_addr_t dma; | ||
158 | struct ioat_ring_ent *parent; | ||
159 | unsigned int hw_pool; | ||
160 | }; | ||
161 | |||
134 | static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c) | 162 | static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c) |
135 | { | 163 | { |
136 | return container_of(c, struct ioat_chan_common, common); | 164 | return container_of(c, struct ioat_chan_common, common); |
@@ -179,7 +207,7 @@ __dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw, | |||
179 | struct device *dev = to_dev(chan); | 207 | struct device *dev = to_dev(chan); |
180 | 208 | ||
181 | dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x" | 209 | dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x" |
182 | " ctl: %#x (op: %d int_en: %d compl: %d)\n", id, | 210 | " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id, |
183 | (unsigned long long) tx->phys, | 211 | (unsigned long long) tx->phys, |
184 | (unsigned long long) hw->next, tx->cookie, tx->flags, | 212 | (unsigned long long) hw->next, tx->cookie, tx->flags, |
185 | hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write); | 213 | hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write); |
@@ -201,7 +229,7 @@ ioat_chan_by_index(struct ioatdma_device *device, int index) | |||
201 | return device->idx[index]; | 229 | return device->idx[index]; |
202 | } | 230 | } |
203 | 231 | ||
204 | static inline u64 ioat_chansts(struct ioat_chan_common *chan) | 232 | static inline u64 ioat_chansts_32(struct ioat_chan_common *chan) |
205 | { | 233 | { |
206 | u8 ver = chan->device->version; | 234 | u8 ver = chan->device->version; |
207 | u64 status; | 235 | u64 status; |
@@ -218,6 +246,26 @@ static inline u64 ioat_chansts(struct ioat_chan_common *chan) | |||
218 | return status; | 246 | return status; |
219 | } | 247 | } |
220 | 248 | ||
249 | #if BITS_PER_LONG == 64 | ||
250 | |||
251 | static inline u64 ioat_chansts(struct ioat_chan_common *chan) | ||
252 | { | ||
253 | u8 ver = chan->device->version; | ||
254 | u64 status; | ||
255 | |||
256 | /* With IOAT v3.3 the status register is 64bit. */ | ||
257 | if (ver >= IOAT_VER_3_3) | ||
258 | status = readq(chan->reg_base + IOAT_CHANSTS_OFFSET(ver)); | ||
259 | else | ||
260 | status = ioat_chansts_32(chan); | ||
261 | |||
262 | return status; | ||
263 | } | ||
264 | |||
265 | #else | ||
266 | #define ioat_chansts ioat_chansts_32 | ||
267 | #endif | ||
268 | |||
221 | static inline void ioat_start(struct ioat_chan_common *chan) | 269 | static inline void ioat_start(struct ioat_chan_common *chan) |
222 | { | 270 | { |
223 | u8 ver = chan->device->version; | 271 | u8 ver = chan->device->version; |
@@ -321,6 +369,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan, | |||
321 | dma_addr_t *phys_complete); | 369 | dma_addr_t *phys_complete); |
322 | void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); | 370 | void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); |
323 | void ioat_kobject_del(struct ioatdma_device *device); | 371 | void ioat_kobject_del(struct ioatdma_device *device); |
372 | int ioat_dma_setup_interrupts(struct ioatdma_device *device); | ||
324 | extern const struct sysfs_ops ioat_sysfs_ops; | 373 | extern const struct sysfs_ops ioat_sysfs_ops; |
325 | extern struct ioat_sysfs_entry ioat_version_attr; | 374 | extern struct ioat_sysfs_entry ioat_version_attr; |
326 | extern struct ioat_sysfs_entry ioat_cap_attr; | 375 | extern struct ioat_sysfs_entry ioat_cap_attr; |
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index e100f644e344..29bf9448035d 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h | |||
@@ -137,6 +137,7 @@ struct ioat_ring_ent { | |||
137 | #ifdef DEBUG | 137 | #ifdef DEBUG |
138 | int id; | 138 | int id; |
139 | #endif | 139 | #endif |
140 | struct ioat_sed_ent *sed; | ||
140 | }; | 141 | }; |
141 | 142 | ||
142 | static inline struct ioat_ring_ent * | 143 | static inline struct ioat_ring_ent * |
@@ -157,6 +158,7 @@ static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) | |||
157 | 158 | ||
158 | int ioat2_dma_probe(struct ioatdma_device *dev, int dca); | 159 | int ioat2_dma_probe(struct ioatdma_device *dev, int dca); |
159 | int ioat3_dma_probe(struct ioatdma_device *dev, int dca); | 160 | int ioat3_dma_probe(struct ioatdma_device *dev, int dca); |
161 | void ioat3_dma_remove(struct ioatdma_device *dev); | ||
160 | struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); | 162 | struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); |
161 | struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); | 163 | struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); |
162 | int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs); | 164 | int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs); |
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index e8336cce360b..ca6ea9b3551b 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c | |||
@@ -55,7 +55,7 @@ | |||
55 | /* | 55 | /* |
56 | * Support routines for v3+ hardware | 56 | * Support routines for v3+ hardware |
57 | */ | 57 | */ |
58 | 58 | #include <linux/module.h> | |
59 | #include <linux/pci.h> | 59 | #include <linux/pci.h> |
60 | #include <linux/gfp.h> | 60 | #include <linux/gfp.h> |
61 | #include <linux/dmaengine.h> | 61 | #include <linux/dmaengine.h> |
@@ -70,6 +70,10 @@ | |||
70 | /* ioat hardware assumes at least two sources for raid operations */ | 70 | /* ioat hardware assumes at least two sources for raid operations */ |
71 | #define src_cnt_to_sw(x) ((x) + 2) | 71 | #define src_cnt_to_sw(x) ((x) + 2) |
72 | #define src_cnt_to_hw(x) ((x) - 2) | 72 | #define src_cnt_to_hw(x) ((x) - 2) |
73 | #define ndest_to_sw(x) ((x) + 1) | ||
74 | #define ndest_to_hw(x) ((x) - 1) | ||
75 | #define src16_cnt_to_sw(x) ((x) + 9) | ||
76 | #define src16_cnt_to_hw(x) ((x) - 9) | ||
73 | 77 | ||
74 | /* provide a lookup table for setting the source address in the base or | 78 | /* provide a lookup table for setting the source address in the base or |
75 | * extended descriptor of an xor or pq descriptor | 79 | * extended descriptor of an xor or pq descriptor |
@@ -77,7 +81,20 @@ | |||
77 | static const u8 xor_idx_to_desc = 0xe0; | 81 | static const u8 xor_idx_to_desc = 0xe0; |
78 | static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 }; | 82 | static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 }; |
79 | static const u8 pq_idx_to_desc = 0xf8; | 83 | static const u8 pq_idx_to_desc = 0xf8; |
84 | static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1, | ||
85 | 2, 2, 2, 2, 2, 2, 2 }; | ||
80 | static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; | 86 | static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; |
87 | static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, | ||
88 | 0, 1, 2, 3, 4, 5, 6 }; | ||
89 | |||
90 | /* | ||
91 | * technically sources 1 and 2 do not require SED, but the op will have | ||
92 | * at least 9 descriptors so that's irrelevant. | ||
93 | */ | ||
94 | static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
95 | 1, 1, 1, 1, 1, 1, 1 }; | ||
96 | |||
97 | static void ioat3_eh(struct ioat2_dma_chan *ioat); | ||
81 | 98 | ||
82 | static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) | 99 | static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) |
83 | { | 100 | { |
@@ -101,6 +118,13 @@ static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx) | |||
101 | return raw->field[pq_idx_to_field[idx]]; | 118 | return raw->field[pq_idx_to_field[idx]]; |
102 | } | 119 | } |
103 | 120 | ||
121 | static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx) | ||
122 | { | ||
123 | struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; | ||
124 | |||
125 | return raw->field[pq16_idx_to_field[idx]]; | ||
126 | } | ||
127 | |||
104 | static void pq_set_src(struct ioat_raw_descriptor *descs[2], | 128 | static void pq_set_src(struct ioat_raw_descriptor *descs[2], |
105 | dma_addr_t addr, u32 offset, u8 coef, int idx) | 129 | dma_addr_t addr, u32 offset, u8 coef, int idx) |
106 | { | 130 | { |
@@ -111,6 +135,167 @@ static void pq_set_src(struct ioat_raw_descriptor *descs[2], | |||
111 | pq->coef[idx] = coef; | 135 | pq->coef[idx] = coef; |
112 | } | 136 | } |
113 | 137 | ||
138 | static int sed_get_pq16_pool_idx(int src_cnt) | ||
139 | { | ||
140 | |||
141 | return pq16_idx_to_sed[src_cnt]; | ||
142 | } | ||
143 | |||
144 | static bool is_jf_ioat(struct pci_dev *pdev) | ||
145 | { | ||
146 | switch (pdev->device) { | ||
147 | case PCI_DEVICE_ID_INTEL_IOAT_JSF0: | ||
148 | case PCI_DEVICE_ID_INTEL_IOAT_JSF1: | ||
149 | case PCI_DEVICE_ID_INTEL_IOAT_JSF2: | ||
150 | case PCI_DEVICE_ID_INTEL_IOAT_JSF3: | ||
151 | case PCI_DEVICE_ID_INTEL_IOAT_JSF4: | ||
152 | case PCI_DEVICE_ID_INTEL_IOAT_JSF5: | ||
153 | case PCI_DEVICE_ID_INTEL_IOAT_JSF6: | ||
154 | case PCI_DEVICE_ID_INTEL_IOAT_JSF7: | ||
155 | case PCI_DEVICE_ID_INTEL_IOAT_JSF8: | ||
156 | case PCI_DEVICE_ID_INTEL_IOAT_JSF9: | ||
157 | return true; | ||
158 | default: | ||
159 | return false; | ||
160 | } | ||
161 | } | ||
162 | |||
163 | static bool is_snb_ioat(struct pci_dev *pdev) | ||
164 | { | ||
165 | switch (pdev->device) { | ||
166 | case PCI_DEVICE_ID_INTEL_IOAT_SNB0: | ||
167 | case PCI_DEVICE_ID_INTEL_IOAT_SNB1: | ||
168 | case PCI_DEVICE_ID_INTEL_IOAT_SNB2: | ||
169 | case PCI_DEVICE_ID_INTEL_IOAT_SNB3: | ||
170 | case PCI_DEVICE_ID_INTEL_IOAT_SNB4: | ||
171 | case PCI_DEVICE_ID_INTEL_IOAT_SNB5: | ||
172 | case PCI_DEVICE_ID_INTEL_IOAT_SNB6: | ||
173 | case PCI_DEVICE_ID_INTEL_IOAT_SNB7: | ||
174 | case PCI_DEVICE_ID_INTEL_IOAT_SNB8: | ||
175 | case PCI_DEVICE_ID_INTEL_IOAT_SNB9: | ||
176 | return true; | ||
177 | default: | ||
178 | return false; | ||
179 | } | ||
180 | } | ||
181 | |||
182 | static bool is_ivb_ioat(struct pci_dev *pdev) | ||
183 | { | ||
184 | switch (pdev->device) { | ||
185 | case PCI_DEVICE_ID_INTEL_IOAT_IVB0: | ||
186 | case PCI_DEVICE_ID_INTEL_IOAT_IVB1: | ||
187 | case PCI_DEVICE_ID_INTEL_IOAT_IVB2: | ||
188 | case PCI_DEVICE_ID_INTEL_IOAT_IVB3: | ||
189 | case PCI_DEVICE_ID_INTEL_IOAT_IVB4: | ||
190 | case PCI_DEVICE_ID_INTEL_IOAT_IVB5: | ||
191 | case PCI_DEVICE_ID_INTEL_IOAT_IVB6: | ||
192 | case PCI_DEVICE_ID_INTEL_IOAT_IVB7: | ||
193 | case PCI_DEVICE_ID_INTEL_IOAT_IVB8: | ||
194 | case PCI_DEVICE_ID_INTEL_IOAT_IVB9: | ||
195 | return true; | ||
196 | default: | ||
197 | return false; | ||
198 | } | ||
199 | |||
200 | } | ||
201 | |||
202 | static bool is_hsw_ioat(struct pci_dev *pdev) | ||
203 | { | ||
204 | switch (pdev->device) { | ||
205 | case PCI_DEVICE_ID_INTEL_IOAT_HSW0: | ||
206 | case PCI_DEVICE_ID_INTEL_IOAT_HSW1: | ||
207 | case PCI_DEVICE_ID_INTEL_IOAT_HSW2: | ||
208 | case PCI_DEVICE_ID_INTEL_IOAT_HSW3: | ||
209 | case PCI_DEVICE_ID_INTEL_IOAT_HSW4: | ||
210 | case PCI_DEVICE_ID_INTEL_IOAT_HSW5: | ||
211 | case PCI_DEVICE_ID_INTEL_IOAT_HSW6: | ||
212 | case PCI_DEVICE_ID_INTEL_IOAT_HSW7: | ||
213 | case PCI_DEVICE_ID_INTEL_IOAT_HSW8: | ||
214 | case PCI_DEVICE_ID_INTEL_IOAT_HSW9: | ||
215 | return true; | ||
216 | default: | ||
217 | return false; | ||
218 | } | ||
219 | |||
220 | } | ||
221 | |||
222 | static bool is_xeon_cb32(struct pci_dev *pdev) | ||
223 | { | ||
224 | return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) || | ||
225 | is_hsw_ioat(pdev); | ||
226 | } | ||
227 | |||
228 | static bool is_bwd_ioat(struct pci_dev *pdev) | ||
229 | { | ||
230 | switch (pdev->device) { | ||
231 | case PCI_DEVICE_ID_INTEL_IOAT_BWD0: | ||
232 | case PCI_DEVICE_ID_INTEL_IOAT_BWD1: | ||
233 | case PCI_DEVICE_ID_INTEL_IOAT_BWD2: | ||
234 | case PCI_DEVICE_ID_INTEL_IOAT_BWD3: | ||
235 | return true; | ||
236 | default: | ||
237 | return false; | ||
238 | } | ||
239 | } | ||
240 | |||
241 | static bool is_bwd_noraid(struct pci_dev *pdev) | ||
242 | { | ||
243 | switch (pdev->device) { | ||
244 | case PCI_DEVICE_ID_INTEL_IOAT_BWD2: | ||
245 | case PCI_DEVICE_ID_INTEL_IOAT_BWD3: | ||
246 | return true; | ||
247 | default: | ||
248 | return false; | ||
249 | } | ||
250 | |||
251 | } | ||
252 | |||
253 | static void pq16_set_src(struct ioat_raw_descriptor *desc[3], | ||
254 | dma_addr_t addr, u32 offset, u8 coef, int idx) | ||
255 | { | ||
256 | struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0]; | ||
257 | struct ioat_pq16a_descriptor *pq16 = | ||
258 | (struct ioat_pq16a_descriptor *)desc[1]; | ||
259 | struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; | ||
260 | |||
261 | raw->field[pq16_idx_to_field[idx]] = addr + offset; | ||
262 | |||
263 | if (idx < 8) | ||
264 | pq->coef[idx] = coef; | ||
265 | else | ||
266 | pq16->coef[idx - 8] = coef; | ||
267 | } | ||
268 | |||
269 | static struct ioat_sed_ent * | ||
270 | ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) | ||
271 | { | ||
272 | struct ioat_sed_ent *sed; | ||
273 | gfp_t flags = __GFP_ZERO | GFP_ATOMIC; | ||
274 | |||
275 | sed = kmem_cache_alloc(device->sed_pool, flags); | ||
276 | if (!sed) | ||
277 | return NULL; | ||
278 | |||
279 | sed->hw_pool = hw_pool; | ||
280 | sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool], | ||
281 | flags, &sed->dma); | ||
282 | if (!sed->hw) { | ||
283 | kmem_cache_free(device->sed_pool, sed); | ||
284 | return NULL; | ||
285 | } | ||
286 | |||
287 | return sed; | ||
288 | } | ||
289 | |||
290 | static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed) | ||
291 | { | ||
292 | if (!sed) | ||
293 | return; | ||
294 | |||
295 | dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); | ||
296 | kmem_cache_free(device->sed_pool, sed); | ||
297 | } | ||
298 | |||
114 | static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, | 299 | static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, |
115 | struct ioat_ring_ent *desc, int idx) | 300 | struct ioat_ring_ent *desc, int idx) |
116 | { | 301 | { |
@@ -223,6 +408,54 @@ static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, | |||
223 | } | 408 | } |
224 | break; | 409 | break; |
225 | } | 410 | } |
411 | case IOAT_OP_PQ_16S: | ||
412 | case IOAT_OP_PQ_VAL_16S: { | ||
413 | struct ioat_pq_descriptor *pq = desc->pq; | ||
414 | int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); | ||
415 | struct ioat_raw_descriptor *descs[4]; | ||
416 | int i; | ||
417 | |||
418 | /* in the 'continue' case don't unmap the dests as sources */ | ||
419 | if (dmaf_p_disabled_continue(flags)) | ||
420 | src_cnt--; | ||
421 | else if (dmaf_continue(flags)) | ||
422 | src_cnt -= 3; | ||
423 | |||
424 | if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
425 | descs[0] = (struct ioat_raw_descriptor *)pq; | ||
426 | descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw); | ||
427 | descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]); | ||
428 | for (i = 0; i < src_cnt; i++) { | ||
429 | dma_addr_t src = pq16_get_src(descs, i); | ||
430 | |||
431 | ioat_unmap(pdev, src - offset, len, | ||
432 | PCI_DMA_TODEVICE, flags, 0); | ||
433 | } | ||
434 | |||
435 | /* the dests are sources in pq validate operations */ | ||
436 | if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { | ||
437 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | ||
438 | ioat_unmap(pdev, pq->p_addr - offset, | ||
439 | len, PCI_DMA_TODEVICE, | ||
440 | flags, 0); | ||
441 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | ||
442 | ioat_unmap(pdev, pq->q_addr - offset, | ||
443 | len, PCI_DMA_TODEVICE, | ||
444 | flags, 0); | ||
445 | break; | ||
446 | } | ||
447 | } | ||
448 | |||
449 | if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
450 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | ||
451 | ioat_unmap(pdev, pq->p_addr - offset, len, | ||
452 | PCI_DMA_BIDIRECTIONAL, flags, 1); | ||
453 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | ||
454 | ioat_unmap(pdev, pq->q_addr - offset, len, | ||
455 | PCI_DMA_BIDIRECTIONAL, flags, 1); | ||
456 | } | ||
457 | break; | ||
458 | } | ||
226 | default: | 459 | default: |
227 | dev_err(&pdev->dev, "%s: unknown op type: %#x\n", | 460 | dev_err(&pdev->dev, "%s: unknown op type: %#x\n", |
228 | __func__, desc->hw->ctl_f.op); | 461 | __func__, desc->hw->ctl_f.op); |
@@ -250,6 +483,63 @@ static bool desc_has_ext(struct ioat_ring_ent *desc) | |||
250 | return false; | 483 | return false; |
251 | } | 484 | } |
252 | 485 | ||
486 | static u64 ioat3_get_current_completion(struct ioat_chan_common *chan) | ||
487 | { | ||
488 | u64 phys_complete; | ||
489 | u64 completion; | ||
490 | |||
491 | completion = *chan->completion; | ||
492 | phys_complete = ioat_chansts_to_addr(completion); | ||
493 | |||
494 | dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, | ||
495 | (unsigned long long) phys_complete); | ||
496 | |||
497 | return phys_complete; | ||
498 | } | ||
499 | |||
500 | static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan, | ||
501 | u64 *phys_complete) | ||
502 | { | ||
503 | *phys_complete = ioat3_get_current_completion(chan); | ||
504 | if (*phys_complete == chan->last_completion) | ||
505 | return false; | ||
506 | |||
507 | clear_bit(IOAT_COMPLETION_ACK, &chan->state); | ||
508 | mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); | ||
509 | |||
510 | return true; | ||
511 | } | ||
512 | |||
513 | static void | ||
514 | desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc) | ||
515 | { | ||
516 | struct ioat_dma_descriptor *hw = desc->hw; | ||
517 | |||
518 | switch (hw->ctl_f.op) { | ||
519 | case IOAT_OP_PQ_VAL: | ||
520 | case IOAT_OP_PQ_VAL_16S: | ||
521 | { | ||
522 | struct ioat_pq_descriptor *pq = desc->pq; | ||
523 | |||
524 | /* check if there's error written */ | ||
525 | if (!pq->dwbes_f.wbes) | ||
526 | return; | ||
527 | |||
528 | /* need to set a chanerr var for checking to clear later */ | ||
529 | |||
530 | if (pq->dwbes_f.p_val_err) | ||
531 | *desc->result |= SUM_CHECK_P_RESULT; | ||
532 | |||
533 | if (pq->dwbes_f.q_val_err) | ||
534 | *desc->result |= SUM_CHECK_Q_RESULT; | ||
535 | |||
536 | return; | ||
537 | } | ||
538 | default: | ||
539 | return; | ||
540 | } | ||
541 | } | ||
542 | |||
253 | /** | 543 | /** |
254 | * __cleanup - reclaim used descriptors | 544 | * __cleanup - reclaim used descriptors |
255 | * @ioat: channel (ring) to clean | 545 | * @ioat: channel (ring) to clean |
@@ -260,6 +550,7 @@ static bool desc_has_ext(struct ioat_ring_ent *desc) | |||
260 | static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) | 550 | static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) |
261 | { | 551 | { |
262 | struct ioat_chan_common *chan = &ioat->base; | 552 | struct ioat_chan_common *chan = &ioat->base; |
553 | struct ioatdma_device *device = chan->device; | ||
263 | struct ioat_ring_ent *desc; | 554 | struct ioat_ring_ent *desc; |
264 | bool seen_current = false; | 555 | bool seen_current = false; |
265 | int idx = ioat->tail, i; | 556 | int idx = ioat->tail, i; |
@@ -268,6 +559,16 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) | |||
268 | dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", | 559 | dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", |
269 | __func__, ioat->head, ioat->tail, ioat->issued); | 560 | __func__, ioat->head, ioat->tail, ioat->issued); |
270 | 561 | ||
562 | /* | ||
563 | * At restart of the channel, the completion address and the | ||
564 | * channel status will be 0 due to starting a new chain. Since | ||
565 | * it's new chain and the first descriptor "fails", there is | ||
566 | * nothing to clean up. We do not want to reap the entire submitted | ||
567 | * chain due to this 0 address value and then BUG. | ||
568 | */ | ||
569 | if (!phys_complete) | ||
570 | return; | ||
571 | |||
271 | active = ioat2_ring_active(ioat); | 572 | active = ioat2_ring_active(ioat); |
272 | for (i = 0; i < active && !seen_current; i++) { | 573 | for (i = 0; i < active && !seen_current; i++) { |
273 | struct dma_async_tx_descriptor *tx; | 574 | struct dma_async_tx_descriptor *tx; |
@@ -276,6 +577,11 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) | |||
276 | prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); | 577 | prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); |
277 | desc = ioat2_get_ring_ent(ioat, idx + i); | 578 | desc = ioat2_get_ring_ent(ioat, idx + i); |
278 | dump_desc_dbg(ioat, desc); | 579 | dump_desc_dbg(ioat, desc); |
580 | |||
581 | /* set err stat if we are using dwbes */ | ||
582 | if (device->cap & IOAT_CAP_DWBES) | ||
583 | desc_get_errstat(ioat, desc); | ||
584 | |||
279 | tx = &desc->txd; | 585 | tx = &desc->txd; |
280 | if (tx->cookie) { | 586 | if (tx->cookie) { |
281 | dma_cookie_complete(tx); | 587 | dma_cookie_complete(tx); |
@@ -294,6 +600,12 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) | |||
294 | BUG_ON(i + 1 >= active); | 600 | BUG_ON(i + 1 >= active); |
295 | i++; | 601 | i++; |
296 | } | 602 | } |
603 | |||
604 | /* cleanup super extended descriptors */ | ||
605 | if (desc->sed) { | ||
606 | ioat3_free_sed(device, desc->sed); | ||
607 | desc->sed = NULL; | ||
608 | } | ||
297 | } | 609 | } |
298 | smp_mb(); /* finish all descriptor reads before incrementing tail */ | 610 | smp_mb(); /* finish all descriptor reads before incrementing tail */ |
299 | ioat->tail = idx + i; | 611 | ioat->tail = idx + i; |
@@ -314,11 +626,22 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) | |||
314 | static void ioat3_cleanup(struct ioat2_dma_chan *ioat) | 626 | static void ioat3_cleanup(struct ioat2_dma_chan *ioat) |
315 | { | 627 | { |
316 | struct ioat_chan_common *chan = &ioat->base; | 628 | struct ioat_chan_common *chan = &ioat->base; |
317 | dma_addr_t phys_complete; | 629 | u64 phys_complete; |
318 | 630 | ||
319 | spin_lock_bh(&chan->cleanup_lock); | 631 | spin_lock_bh(&chan->cleanup_lock); |
320 | if (ioat_cleanup_preamble(chan, &phys_complete)) | 632 | |
633 | if (ioat3_cleanup_preamble(chan, &phys_complete)) | ||
321 | __cleanup(ioat, phys_complete); | 634 | __cleanup(ioat, phys_complete); |
635 | |||
636 | if (is_ioat_halted(*chan->completion)) { | ||
637 | u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
638 | |||
639 | if (chanerr & IOAT_CHANERR_HANDLE_MASK) { | ||
640 | mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); | ||
641 | ioat3_eh(ioat); | ||
642 | } | ||
643 | } | ||
644 | |||
322 | spin_unlock_bh(&chan->cleanup_lock); | 645 | spin_unlock_bh(&chan->cleanup_lock); |
323 | } | 646 | } |
324 | 647 | ||
@@ -333,15 +656,78 @@ static void ioat3_cleanup_event(unsigned long data) | |||
333 | static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) | 656 | static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) |
334 | { | 657 | { |
335 | struct ioat_chan_common *chan = &ioat->base; | 658 | struct ioat_chan_common *chan = &ioat->base; |
336 | dma_addr_t phys_complete; | 659 | u64 phys_complete; |
337 | 660 | ||
338 | ioat2_quiesce(chan, 0); | 661 | ioat2_quiesce(chan, 0); |
339 | if (ioat_cleanup_preamble(chan, &phys_complete)) | 662 | if (ioat3_cleanup_preamble(chan, &phys_complete)) |
340 | __cleanup(ioat, phys_complete); | 663 | __cleanup(ioat, phys_complete); |
341 | 664 | ||
342 | __ioat2_restart_chan(ioat); | 665 | __ioat2_restart_chan(ioat); |
343 | } | 666 | } |
344 | 667 | ||
668 | static void ioat3_eh(struct ioat2_dma_chan *ioat) | ||
669 | { | ||
670 | struct ioat_chan_common *chan = &ioat->base; | ||
671 | struct pci_dev *pdev = to_pdev(chan); | ||
672 | struct ioat_dma_descriptor *hw; | ||
673 | u64 phys_complete; | ||
674 | struct ioat_ring_ent *desc; | ||
675 | u32 err_handled = 0; | ||
676 | u32 chanerr_int; | ||
677 | u32 chanerr; | ||
678 | |||
679 | /* cleanup so tail points to descriptor that caused the error */ | ||
680 | if (ioat3_cleanup_preamble(chan, &phys_complete)) | ||
681 | __cleanup(ioat, phys_complete); | ||
682 | |||
683 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | ||
684 | pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int); | ||
685 | |||
686 | dev_dbg(to_dev(chan), "%s: error = %x:%x\n", | ||
687 | __func__, chanerr, chanerr_int); | ||
688 | |||
689 | desc = ioat2_get_ring_ent(ioat, ioat->tail); | ||
690 | hw = desc->hw; | ||
691 | dump_desc_dbg(ioat, desc); | ||
692 | |||
693 | switch (hw->ctl_f.op) { | ||
694 | case IOAT_OP_XOR_VAL: | ||
695 | if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { | ||
696 | *desc->result |= SUM_CHECK_P_RESULT; | ||
697 | err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; | ||
698 | } | ||
699 | break; | ||
700 | case IOAT_OP_PQ_VAL: | ||
701 | case IOAT_OP_PQ_VAL_16S: | ||
702 | if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { | ||
703 | *desc->result |= SUM_CHECK_P_RESULT; | ||
704 | err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; | ||
705 | } | ||
706 | if (chanerr & IOAT_CHANERR_XOR_Q_ERR) { | ||
707 | *desc->result |= SUM_CHECK_Q_RESULT; | ||
708 | err_handled |= IOAT_CHANERR_XOR_Q_ERR; | ||
709 | } | ||
710 | break; | ||
711 | } | ||
712 | |||
713 | /* fault on unhandled error or spurious halt */ | ||
714 | if (chanerr ^ err_handled || chanerr == 0) { | ||
715 | dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n", | ||
716 | __func__, chanerr, err_handled); | ||
717 | BUG(); | ||
718 | } | ||
719 | |||
720 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | ||
721 | pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int); | ||
722 | |||
723 | /* mark faulting descriptor as complete */ | ||
724 | *chan->completion = desc->txd.phys; | ||
725 | |||
726 | spin_lock_bh(&ioat->prep_lock); | ||
727 | ioat3_restart_channel(ioat); | ||
728 | spin_unlock_bh(&ioat->prep_lock); | ||
729 | } | ||
730 | |||
345 | static void check_active(struct ioat2_dma_chan *ioat) | 731 | static void check_active(struct ioat2_dma_chan *ioat) |
346 | { | 732 | { |
347 | struct ioat_chan_common *chan = &ioat->base; | 733 | struct ioat_chan_common *chan = &ioat->base; |
@@ -605,7 +991,8 @@ dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct | |||
605 | int i; | 991 | int i; |
606 | 992 | ||
607 | dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" | 993 | dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" |
608 | " sz: %#x ctl: %#x (op: %d int: %d compl: %d pq: '%s%s' src_cnt: %d)\n", | 994 | " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" |
995 | " src_cnt: %d)\n", | ||
609 | desc_id(desc), (unsigned long long) desc->txd.phys, | 996 | desc_id(desc), (unsigned long long) desc->txd.phys, |
610 | (unsigned long long) (pq_ex ? pq_ex->next : pq->next), | 997 | (unsigned long long) (pq_ex ? pq_ex->next : pq->next), |
611 | desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en, | 998 | desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en, |
@@ -617,6 +1004,42 @@ dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct | |||
617 | (unsigned long long) pq_get_src(descs, i), pq->coef[i]); | 1004 | (unsigned long long) pq_get_src(descs, i), pq->coef[i]); |
618 | dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); | 1005 | dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); |
619 | dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); | 1006 | dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); |
1007 | dev_dbg(dev, "\tNEXT: %#llx\n", pq->next); | ||
1008 | } | ||
1009 | |||
1010 | static void dump_pq16_desc_dbg(struct ioat2_dma_chan *ioat, | ||
1011 | struct ioat_ring_ent *desc) | ||
1012 | { | ||
1013 | struct device *dev = to_dev(&ioat->base); | ||
1014 | struct ioat_pq_descriptor *pq = desc->pq; | ||
1015 | struct ioat_raw_descriptor *descs[] = { (void *)pq, | ||
1016 | (void *)pq, | ||
1017 | (void *)pq }; | ||
1018 | int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); | ||
1019 | int i; | ||
1020 | |||
1021 | if (desc->sed) { | ||
1022 | descs[1] = (void *)desc->sed->hw; | ||
1023 | descs[2] = (void *)desc->sed->hw + 64; | ||
1024 | } | ||
1025 | |||
1026 | dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" | ||
1027 | " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" | ||
1028 | " src_cnt: %d)\n", | ||
1029 | desc_id(desc), (unsigned long long) desc->txd.phys, | ||
1030 | (unsigned long long) pq->next, | ||
1031 | desc->txd.flags, pq->size, pq->ctl, | ||
1032 | pq->ctl_f.op, pq->ctl_f.int_en, | ||
1033 | pq->ctl_f.compl_write, | ||
1034 | pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", | ||
1035 | pq->ctl_f.src_cnt); | ||
1036 | for (i = 0; i < src_cnt; i++) { | ||
1037 | dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, | ||
1038 | (unsigned long long) pq16_get_src(descs, i), | ||
1039 | pq->coef[i]); | ||
1040 | } | ||
1041 | dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); | ||
1042 | dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); | ||
620 | } | 1043 | } |
621 | 1044 | ||
622 | static struct dma_async_tx_descriptor * | 1045 | static struct dma_async_tx_descriptor * |
@@ -627,6 +1050,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
627 | { | 1050 | { |
628 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | 1051 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); |
629 | struct ioat_chan_common *chan = &ioat->base; | 1052 | struct ioat_chan_common *chan = &ioat->base; |
1053 | struct ioatdma_device *device = chan->device; | ||
630 | struct ioat_ring_ent *compl_desc; | 1054 | struct ioat_ring_ent *compl_desc; |
631 | struct ioat_ring_ent *desc; | 1055 | struct ioat_ring_ent *desc; |
632 | struct ioat_ring_ent *ext; | 1056 | struct ioat_ring_ent *ext; |
@@ -637,6 +1061,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
637 | u32 offset = 0; | 1061 | u32 offset = 0; |
638 | u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; | 1062 | u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; |
639 | int i, s, idx, with_ext, num_descs; | 1063 | int i, s, idx, with_ext, num_descs; |
1064 | int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0; | ||
640 | 1065 | ||
641 | dev_dbg(to_dev(chan), "%s\n", __func__); | 1066 | dev_dbg(to_dev(chan), "%s\n", __func__); |
642 | /* the engine requires at least two sources (we provide | 1067 | /* the engine requires at least two sources (we provide |
@@ -662,7 +1087,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
662 | * order. | 1087 | * order. |
663 | */ | 1088 | */ |
664 | if (likely(num_descs) && | 1089 | if (likely(num_descs) && |
665 | ioat2_check_space_lock(ioat, num_descs+1) == 0) | 1090 | ioat2_check_space_lock(ioat, num_descs + cb32) == 0) |
666 | idx = ioat->head; | 1091 | idx = ioat->head; |
667 | else | 1092 | else |
668 | return NULL; | 1093 | return NULL; |
@@ -700,6 +1125,9 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
700 | pq->q_addr = dst[1] + offset; | 1125 | pq->q_addr = dst[1] + offset; |
701 | pq->ctl = 0; | 1126 | pq->ctl = 0; |
702 | pq->ctl_f.op = op; | 1127 | pq->ctl_f.op = op; |
1128 | /* we turn on descriptor write back error status */ | ||
1129 | if (device->cap & IOAT_CAP_DWBES) | ||
1130 | pq->ctl_f.wb_en = result ? 1 : 0; | ||
703 | pq->ctl_f.src_cnt = src_cnt_to_hw(s); | 1131 | pq->ctl_f.src_cnt = src_cnt_to_hw(s); |
704 | pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); | 1132 | pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); |
705 | pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); | 1133 | pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); |
@@ -716,26 +1144,140 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, | |||
716 | pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | 1144 | pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); |
717 | dump_pq_desc_dbg(ioat, desc, ext); | 1145 | dump_pq_desc_dbg(ioat, desc, ext); |
718 | 1146 | ||
719 | /* completion descriptor carries interrupt bit */ | 1147 | if (!cb32) { |
720 | compl_desc = ioat2_get_ring_ent(ioat, idx + i); | 1148 | pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); |
721 | compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; | 1149 | pq->ctl_f.compl_write = 1; |
722 | hw = compl_desc->hw; | 1150 | compl_desc = desc; |
723 | hw->ctl = 0; | 1151 | } else { |
724 | hw->ctl_f.null = 1; | 1152 | /* completion descriptor carries interrupt bit */ |
725 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | 1153 | compl_desc = ioat2_get_ring_ent(ioat, idx + i); |
726 | hw->ctl_f.compl_write = 1; | 1154 | compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; |
727 | hw->size = NULL_DESC_BUFFER_SIZE; | 1155 | hw = compl_desc->hw; |
728 | dump_desc_dbg(ioat, compl_desc); | 1156 | hw->ctl = 0; |
1157 | hw->ctl_f.null = 1; | ||
1158 | hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | ||
1159 | hw->ctl_f.compl_write = 1; | ||
1160 | hw->size = NULL_DESC_BUFFER_SIZE; | ||
1161 | dump_desc_dbg(ioat, compl_desc); | ||
1162 | } | ||
1163 | |||
729 | 1164 | ||
730 | /* we leave the channel locked to ensure in order submission */ | 1165 | /* we leave the channel locked to ensure in order submission */ |
731 | return &compl_desc->txd; | 1166 | return &compl_desc->txd; |
732 | } | 1167 | } |
733 | 1168 | ||
734 | static struct dma_async_tx_descriptor * | 1169 | static struct dma_async_tx_descriptor * |
1170 | __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, | ||
1171 | const dma_addr_t *dst, const dma_addr_t *src, | ||
1172 | unsigned int src_cnt, const unsigned char *scf, | ||
1173 | size_t len, unsigned long flags) | ||
1174 | { | ||
1175 | struct ioat2_dma_chan *ioat = to_ioat2_chan(c); | ||
1176 | struct ioat_chan_common *chan = &ioat->base; | ||
1177 | struct ioatdma_device *device = chan->device; | ||
1178 | struct ioat_ring_ent *desc; | ||
1179 | size_t total_len = len; | ||
1180 | struct ioat_pq_descriptor *pq; | ||
1181 | u32 offset = 0; | ||
1182 | u8 op; | ||
1183 | int i, s, idx, num_descs; | ||
1184 | |||
1185 | /* this function only handles src_cnt 9 - 16 */ | ||
1186 | BUG_ON(src_cnt < 9); | ||
1187 | |||
1188 | /* this function is only called with 9-16 sources */ | ||
1189 | op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S; | ||
1190 | |||
1191 | dev_dbg(to_dev(chan), "%s\n", __func__); | ||
1192 | |||
1193 | num_descs = ioat2_xferlen_to_descs(ioat, len); | ||
1194 | |||
1195 | /* | ||
1196 | * 16 source pq is only available on cb3.3 and has no completion | ||
1197 | * write hw bug. | ||
1198 | */ | ||
1199 | if (num_descs && ioat2_check_space_lock(ioat, num_descs) == 0) | ||
1200 | idx = ioat->head; | ||
1201 | else | ||
1202 | return NULL; | ||
1203 | |||
1204 | i = 0; | ||
1205 | |||
1206 | do { | ||
1207 | struct ioat_raw_descriptor *descs[4]; | ||
1208 | size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); | ||
1209 | |||
1210 | desc = ioat2_get_ring_ent(ioat, idx + i); | ||
1211 | pq = desc->pq; | ||
1212 | |||
1213 | descs[0] = (struct ioat_raw_descriptor *) pq; | ||
1214 | |||
1215 | desc->sed = ioat3_alloc_sed(device, | ||
1216 | sed_get_pq16_pool_idx(src_cnt)); | ||
1217 | if (!desc->sed) { | ||
1218 | dev_err(to_dev(chan), | ||
1219 | "%s: no free sed entries\n", __func__); | ||
1220 | return NULL; | ||
1221 | } | ||
1222 | |||
1223 | pq->sed_addr = desc->sed->dma; | ||
1224 | desc->sed->parent = desc; | ||
1225 | |||
1226 | descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw; | ||
1227 | descs[2] = (void *)descs[1] + 64; | ||
1228 | |||
1229 | for (s = 0; s < src_cnt; s++) | ||
1230 | pq16_set_src(descs, src[s], offset, scf[s], s); | ||
1231 | |||
1232 | /* see the comment for dma_maxpq in include/linux/dmaengine.h */ | ||
1233 | if (dmaf_p_disabled_continue(flags)) | ||
1234 | pq16_set_src(descs, dst[1], offset, 1, s++); | ||
1235 | else if (dmaf_continue(flags)) { | ||
1236 | pq16_set_src(descs, dst[0], offset, 0, s++); | ||
1237 | pq16_set_src(descs, dst[1], offset, 1, s++); | ||
1238 | pq16_set_src(descs, dst[1], offset, 0, s++); | ||
1239 | } | ||
1240 | |||
1241 | pq->size = xfer_size; | ||
1242 | pq->p_addr = dst[0] + offset; | ||
1243 | pq->q_addr = dst[1] + offset; | ||
1244 | pq->ctl = 0; | ||
1245 | pq->ctl_f.op = op; | ||
1246 | pq->ctl_f.src_cnt = src16_cnt_to_hw(s); | ||
1247 | /* we turn on descriptor write back error status */ | ||
1248 | if (device->cap & IOAT_CAP_DWBES) | ||
1249 | pq->ctl_f.wb_en = result ? 1 : 0; | ||
1250 | pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); | ||
1251 | pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); | ||
1252 | |||
1253 | len -= xfer_size; | ||
1254 | offset += xfer_size; | ||
1255 | } while (++i < num_descs); | ||
1256 | |||
1257 | /* last pq descriptor carries the unmap parameters and fence bit */ | ||
1258 | desc->txd.flags = flags; | ||
1259 | desc->len = total_len; | ||
1260 | if (result) | ||
1261 | desc->result = result; | ||
1262 | pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); | ||
1263 | |||
1264 | /* with cb3.3 we should be able to do completion w/o a null desc */ | ||
1265 | pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); | ||
1266 | pq->ctl_f.compl_write = 1; | ||
1267 | |||
1268 | dump_pq16_desc_dbg(ioat, desc); | ||
1269 | |||
1270 | /* we leave the channel locked to ensure in order submission */ | ||
1271 | return &desc->txd; | ||
1272 | } | ||
1273 | |||
1274 | static struct dma_async_tx_descriptor * | ||
735 | ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | 1275 | ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, |
736 | unsigned int src_cnt, const unsigned char *scf, size_t len, | 1276 | unsigned int src_cnt, const unsigned char *scf, size_t len, |
737 | unsigned long flags) | 1277 | unsigned long flags) |
738 | { | 1278 | { |
1279 | struct dma_device *dma = chan->device; | ||
1280 | |||
739 | /* specify valid address for disabled result */ | 1281 | /* specify valid address for disabled result */ |
740 | if (flags & DMA_PREP_PQ_DISABLE_P) | 1282 | if (flags & DMA_PREP_PQ_DISABLE_P) |
741 | dst[0] = dst[1]; | 1283 | dst[0] = dst[1]; |
@@ -755,11 +1297,20 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | |||
755 | single_source_coef[0] = scf[0]; | 1297 | single_source_coef[0] = scf[0]; |
756 | single_source_coef[1] = 0; | 1298 | single_source_coef[1] = 0; |
757 | 1299 | ||
758 | return __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2, | 1300 | return (src_cnt > 8) && (dma->max_pq > 8) ? |
759 | single_source_coef, len, flags); | 1301 | __ioat3_prep_pq16_lock(chan, NULL, dst, single_source, |
760 | } else | 1302 | 2, single_source_coef, len, |
761 | return __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, scf, | 1303 | flags) : |
762 | len, flags); | 1304 | __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2, |
1305 | single_source_coef, len, flags); | ||
1306 | |||
1307 | } else { | ||
1308 | return (src_cnt > 8) && (dma->max_pq > 8) ? | ||
1309 | __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt, | ||
1310 | scf, len, flags) : | ||
1311 | __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, | ||
1312 | scf, len, flags); | ||
1313 | } | ||
763 | } | 1314 | } |
764 | 1315 | ||
765 | struct dma_async_tx_descriptor * | 1316 | struct dma_async_tx_descriptor * |
@@ -767,6 +1318,8 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | |||
767 | unsigned int src_cnt, const unsigned char *scf, size_t len, | 1318 | unsigned int src_cnt, const unsigned char *scf, size_t len, |
768 | enum sum_check_flags *pqres, unsigned long flags) | 1319 | enum sum_check_flags *pqres, unsigned long flags) |
769 | { | 1320 | { |
1321 | struct dma_device *dma = chan->device; | ||
1322 | |||
770 | /* specify valid address for disabled result */ | 1323 | /* specify valid address for disabled result */ |
771 | if (flags & DMA_PREP_PQ_DISABLE_P) | 1324 | if (flags & DMA_PREP_PQ_DISABLE_P) |
772 | pq[0] = pq[1]; | 1325 | pq[0] = pq[1]; |
@@ -778,14 +1331,18 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | |||
778 | */ | 1331 | */ |
779 | *pqres = 0; | 1332 | *pqres = 0; |
780 | 1333 | ||
781 | return __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, | 1334 | return (src_cnt > 8) && (dma->max_pq > 8) ? |
782 | flags); | 1335 | __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, |
1336 | flags) : | ||
1337 | __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, | ||
1338 | flags); | ||
783 | } | 1339 | } |
784 | 1340 | ||
785 | static struct dma_async_tx_descriptor * | 1341 | static struct dma_async_tx_descriptor * |
786 | ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, | 1342 | ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, |
787 | unsigned int src_cnt, size_t len, unsigned long flags) | 1343 | unsigned int src_cnt, size_t len, unsigned long flags) |
788 | { | 1344 | { |
1345 | struct dma_device *dma = chan->device; | ||
789 | unsigned char scf[src_cnt]; | 1346 | unsigned char scf[src_cnt]; |
790 | dma_addr_t pq[2]; | 1347 | dma_addr_t pq[2]; |
791 | 1348 | ||
@@ -794,8 +1351,11 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, | |||
794 | flags |= DMA_PREP_PQ_DISABLE_Q; | 1351 | flags |= DMA_PREP_PQ_DISABLE_Q; |
795 | pq[1] = dst; /* specify valid address for disabled result */ | 1352 | pq[1] = dst; /* specify valid address for disabled result */ |
796 | 1353 | ||
797 | return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, | 1354 | return (src_cnt > 8) && (dma->max_pq > 8) ? |
798 | flags); | 1355 | __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, |
1356 | flags) : | ||
1357 | __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, | ||
1358 | flags); | ||
799 | } | 1359 | } |
800 | 1360 | ||
801 | struct dma_async_tx_descriptor * | 1361 | struct dma_async_tx_descriptor * |
@@ -803,6 +1363,7 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, | |||
803 | unsigned int src_cnt, size_t len, | 1363 | unsigned int src_cnt, size_t len, |
804 | enum sum_check_flags *result, unsigned long flags) | 1364 | enum sum_check_flags *result, unsigned long flags) |
805 | { | 1365 | { |
1366 | struct dma_device *dma = chan->device; | ||
806 | unsigned char scf[src_cnt]; | 1367 | unsigned char scf[src_cnt]; |
807 | dma_addr_t pq[2]; | 1368 | dma_addr_t pq[2]; |
808 | 1369 | ||
@@ -816,8 +1377,12 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, | |||
816 | flags |= DMA_PREP_PQ_DISABLE_Q; | 1377 | flags |= DMA_PREP_PQ_DISABLE_Q; |
817 | pq[1] = pq[0]; /* specify valid address for disabled result */ | 1378 | pq[1] = pq[0]; /* specify valid address for disabled result */ |
818 | 1379 | ||
819 | return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf, | 1380 | |
820 | len, flags); | 1381 | return (src_cnt > 8) && (dma->max_pq > 8) ? |
1382 | __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, | ||
1383 | scf, len, flags) : | ||
1384 | __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, | ||
1385 | scf, len, flags); | ||
821 | } | 1386 | } |
822 | 1387 | ||
823 | static struct dma_async_tx_descriptor * | 1388 | static struct dma_async_tx_descriptor * |
@@ -1167,6 +1732,56 @@ static int ioat3_dma_self_test(struct ioatdma_device *device) | |||
1167 | return 0; | 1732 | return 0; |
1168 | } | 1733 | } |
1169 | 1734 | ||
1735 | static int ioat3_irq_reinit(struct ioatdma_device *device) | ||
1736 | { | ||
1737 | int msixcnt = device->common.chancnt; | ||
1738 | struct pci_dev *pdev = device->pdev; | ||
1739 | int i; | ||
1740 | struct msix_entry *msix; | ||
1741 | struct ioat_chan_common *chan; | ||
1742 | int err = 0; | ||
1743 | |||
1744 | switch (device->irq_mode) { | ||
1745 | case IOAT_MSIX: | ||
1746 | |||
1747 | for (i = 0; i < msixcnt; i++) { | ||
1748 | msix = &device->msix_entries[i]; | ||
1749 | chan = ioat_chan_by_index(device, i); | ||
1750 | devm_free_irq(&pdev->dev, msix->vector, chan); | ||
1751 | } | ||
1752 | |||
1753 | pci_disable_msix(pdev); | ||
1754 | break; | ||
1755 | |||
1756 | case IOAT_MSIX_SINGLE: | ||
1757 | msix = &device->msix_entries[0]; | ||
1758 | chan = ioat_chan_by_index(device, 0); | ||
1759 | devm_free_irq(&pdev->dev, msix->vector, chan); | ||
1760 | pci_disable_msix(pdev); | ||
1761 | break; | ||
1762 | |||
1763 | case IOAT_MSI: | ||
1764 | chan = ioat_chan_by_index(device, 0); | ||
1765 | devm_free_irq(&pdev->dev, pdev->irq, chan); | ||
1766 | pci_disable_msi(pdev); | ||
1767 | break; | ||
1768 | |||
1769 | case IOAT_INTX: | ||
1770 | chan = ioat_chan_by_index(device, 0); | ||
1771 | devm_free_irq(&pdev->dev, pdev->irq, chan); | ||
1772 | break; | ||
1773 | |||
1774 | default: | ||
1775 | return 0; | ||
1776 | } | ||
1777 | |||
1778 | device->irq_mode = IOAT_NOIRQ; | ||
1779 | |||
1780 | err = ioat_dma_setup_interrupts(device); | ||
1781 | |||
1782 | return err; | ||
1783 | } | ||
1784 | |||
1170 | static int ioat3_reset_hw(struct ioat_chan_common *chan) | 1785 | static int ioat3_reset_hw(struct ioat_chan_common *chan) |
1171 | { | 1786 | { |
1172 | /* throw away whatever the channel was doing and get it | 1787 | /* throw away whatever the channel was doing and get it |
@@ -1183,80 +1798,65 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan) | |||
1183 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); | 1798 | chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); |
1184 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); | 1799 | writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); |
1185 | 1800 | ||
1186 | /* clear any pending errors */ | 1801 | if (device->version < IOAT_VER_3_3) { |
1187 | err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); | 1802 | /* clear any pending errors */ |
1803 | err = pci_read_config_dword(pdev, | ||
1804 | IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); | ||
1805 | if (err) { | ||
1806 | dev_err(&pdev->dev, | ||
1807 | "channel error register unreachable\n"); | ||
1808 | return err; | ||
1809 | } | ||
1810 | pci_write_config_dword(pdev, | ||
1811 | IOAT_PCI_CHANERR_INT_OFFSET, chanerr); | ||
1812 | |||
1813 | /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit | ||
1814 | * (workaround for spurious config parity error after restart) | ||
1815 | */ | ||
1816 | pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); | ||
1817 | if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { | ||
1818 | pci_write_config_dword(pdev, | ||
1819 | IOAT_PCI_DMAUNCERRSTS_OFFSET, | ||
1820 | 0x10); | ||
1821 | } | ||
1822 | } | ||
1823 | |||
1824 | err = ioat2_reset_sync(chan, msecs_to_jiffies(200)); | ||
1188 | if (err) { | 1825 | if (err) { |
1189 | dev_err(&pdev->dev, "channel error register unreachable\n"); | 1826 | dev_err(&pdev->dev, "Failed to reset!\n"); |
1190 | return err; | 1827 | return err; |
1191 | } | 1828 | } |
1192 | pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr); | ||
1193 | 1829 | ||
1194 | /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit | 1830 | if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev)) |
1195 | * (workaround for spurious config parity error after restart) | 1831 | err = ioat3_irq_reinit(device); |
1196 | */ | ||
1197 | pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); | ||
1198 | if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) | ||
1199 | pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); | ||
1200 | 1832 | ||
1201 | return ioat2_reset_sync(chan, msecs_to_jiffies(200)); | 1833 | return err; |
1202 | } | 1834 | } |
1203 | 1835 | ||
1204 | static bool is_jf_ioat(struct pci_dev *pdev) | 1836 | static void ioat3_intr_quirk(struct ioatdma_device *device) |
1205 | { | 1837 | { |
1206 | switch (pdev->device) { | 1838 | struct dma_device *dma; |
1207 | case PCI_DEVICE_ID_INTEL_IOAT_JSF0: | 1839 | struct dma_chan *c; |
1208 | case PCI_DEVICE_ID_INTEL_IOAT_JSF1: | 1840 | struct ioat_chan_common *chan; |
1209 | case PCI_DEVICE_ID_INTEL_IOAT_JSF2: | 1841 | u32 errmask; |
1210 | case PCI_DEVICE_ID_INTEL_IOAT_JSF3: | ||
1211 | case PCI_DEVICE_ID_INTEL_IOAT_JSF4: | ||
1212 | case PCI_DEVICE_ID_INTEL_IOAT_JSF5: | ||
1213 | case PCI_DEVICE_ID_INTEL_IOAT_JSF6: | ||
1214 | case PCI_DEVICE_ID_INTEL_IOAT_JSF7: | ||
1215 | case PCI_DEVICE_ID_INTEL_IOAT_JSF8: | ||
1216 | case PCI_DEVICE_ID_INTEL_IOAT_JSF9: | ||
1217 | return true; | ||
1218 | default: | ||
1219 | return false; | ||
1220 | } | ||
1221 | } | ||
1222 | 1842 | ||
1223 | static bool is_snb_ioat(struct pci_dev *pdev) | 1843 | dma = &device->common; |
1224 | { | ||
1225 | switch (pdev->device) { | ||
1226 | case PCI_DEVICE_ID_INTEL_IOAT_SNB0: | ||
1227 | case PCI_DEVICE_ID_INTEL_IOAT_SNB1: | ||
1228 | case PCI_DEVICE_ID_INTEL_IOAT_SNB2: | ||
1229 | case PCI_DEVICE_ID_INTEL_IOAT_SNB3: | ||
1230 | case PCI_DEVICE_ID_INTEL_IOAT_SNB4: | ||
1231 | case PCI_DEVICE_ID_INTEL_IOAT_SNB5: | ||
1232 | case PCI_DEVICE_ID_INTEL_IOAT_SNB6: | ||
1233 | case PCI_DEVICE_ID_INTEL_IOAT_SNB7: | ||
1234 | case PCI_DEVICE_ID_INTEL_IOAT_SNB8: | ||
1235 | case PCI_DEVICE_ID_INTEL_IOAT_SNB9: | ||
1236 | return true; | ||
1237 | default: | ||
1238 | return false; | ||
1239 | } | ||
1240 | } | ||
1241 | 1844 | ||
1242 | static bool is_ivb_ioat(struct pci_dev *pdev) | 1845 | /* |
1243 | { | 1846 | * if we have descriptor write back error status, we mask the |
1244 | switch (pdev->device) { | 1847 | * error interrupts |
1245 | case PCI_DEVICE_ID_INTEL_IOAT_IVB0: | 1848 | */ |
1246 | case PCI_DEVICE_ID_INTEL_IOAT_IVB1: | 1849 | if (device->cap & IOAT_CAP_DWBES) { |
1247 | case PCI_DEVICE_ID_INTEL_IOAT_IVB2: | 1850 | list_for_each_entry(c, &dma->channels, device_node) { |
1248 | case PCI_DEVICE_ID_INTEL_IOAT_IVB3: | 1851 | chan = to_chan_common(c); |
1249 | case PCI_DEVICE_ID_INTEL_IOAT_IVB4: | 1852 | errmask = readl(chan->reg_base + |
1250 | case PCI_DEVICE_ID_INTEL_IOAT_IVB5: | 1853 | IOAT_CHANERR_MASK_OFFSET); |
1251 | case PCI_DEVICE_ID_INTEL_IOAT_IVB6: | 1854 | errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR | |
1252 | case PCI_DEVICE_ID_INTEL_IOAT_IVB7: | 1855 | IOAT_CHANERR_XOR_Q_ERR; |
1253 | case PCI_DEVICE_ID_INTEL_IOAT_IVB8: | 1856 | writel(errmask, chan->reg_base + |
1254 | case PCI_DEVICE_ID_INTEL_IOAT_IVB9: | 1857 | IOAT_CHANERR_MASK_OFFSET); |
1255 | return true; | 1858 | } |
1256 | default: | ||
1257 | return false; | ||
1258 | } | 1859 | } |
1259 | |||
1260 | } | 1860 | } |
1261 | 1861 | ||
1262 | int ioat3_dma_probe(struct ioatdma_device *device, int dca) | 1862 | int ioat3_dma_probe(struct ioatdma_device *device, int dca) |
@@ -1268,30 +1868,33 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1268 | struct ioat_chan_common *chan; | 1868 | struct ioat_chan_common *chan; |
1269 | bool is_raid_device = false; | 1869 | bool is_raid_device = false; |
1270 | int err; | 1870 | int err; |
1271 | u32 cap; | ||
1272 | 1871 | ||
1273 | device->enumerate_channels = ioat2_enumerate_channels; | 1872 | device->enumerate_channels = ioat2_enumerate_channels; |
1274 | device->reset_hw = ioat3_reset_hw; | 1873 | device->reset_hw = ioat3_reset_hw; |
1275 | device->self_test = ioat3_dma_self_test; | 1874 | device->self_test = ioat3_dma_self_test; |
1875 | device->intr_quirk = ioat3_intr_quirk; | ||
1276 | dma = &device->common; | 1876 | dma = &device->common; |
1277 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; | 1877 | dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; |
1278 | dma->device_issue_pending = ioat2_issue_pending; | 1878 | dma->device_issue_pending = ioat2_issue_pending; |
1279 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; | 1879 | dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; |
1280 | dma->device_free_chan_resources = ioat2_free_chan_resources; | 1880 | dma->device_free_chan_resources = ioat2_free_chan_resources; |
1281 | 1881 | ||
1282 | if (is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev)) | 1882 | if (is_xeon_cb32(pdev)) |
1283 | dma->copy_align = 6; | 1883 | dma->copy_align = 6; |
1284 | 1884 | ||
1285 | dma_cap_set(DMA_INTERRUPT, dma->cap_mask); | 1885 | dma_cap_set(DMA_INTERRUPT, dma->cap_mask); |
1286 | dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; | 1886 | dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; |
1287 | 1887 | ||
1288 | cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); | 1888 | device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); |
1889 | |||
1890 | if (is_bwd_noraid(pdev)) | ||
1891 | device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); | ||
1289 | 1892 | ||
1290 | /* dca is incompatible with raid operations */ | 1893 | /* dca is incompatible with raid operations */ |
1291 | if (dca_en && (cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) | 1894 | if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) |
1292 | cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); | 1895 | device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); |
1293 | 1896 | ||
1294 | if (cap & IOAT_CAP_XOR) { | 1897 | if (device->cap & IOAT_CAP_XOR) { |
1295 | is_raid_device = true; | 1898 | is_raid_device = true; |
1296 | dma->max_xor = 8; | 1899 | dma->max_xor = 8; |
1297 | dma->xor_align = 6; | 1900 | dma->xor_align = 6; |
@@ -1302,53 +1905,86 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1302 | dma_cap_set(DMA_XOR_VAL, dma->cap_mask); | 1905 | dma_cap_set(DMA_XOR_VAL, dma->cap_mask); |
1303 | dma->device_prep_dma_xor_val = ioat3_prep_xor_val; | 1906 | dma->device_prep_dma_xor_val = ioat3_prep_xor_val; |
1304 | } | 1907 | } |
1305 | if (cap & IOAT_CAP_PQ) { | 1908 | |
1909 | if (device->cap & IOAT_CAP_PQ) { | ||
1306 | is_raid_device = true; | 1910 | is_raid_device = true; |
1307 | dma_set_maxpq(dma, 8, 0); | ||
1308 | dma->pq_align = 6; | ||
1309 | 1911 | ||
1310 | dma_cap_set(DMA_PQ, dma->cap_mask); | ||
1311 | dma->device_prep_dma_pq = ioat3_prep_pq; | 1912 | dma->device_prep_dma_pq = ioat3_prep_pq; |
1312 | |||
1313 | dma_cap_set(DMA_PQ_VAL, dma->cap_mask); | ||
1314 | dma->device_prep_dma_pq_val = ioat3_prep_pq_val; | 1913 | dma->device_prep_dma_pq_val = ioat3_prep_pq_val; |
1914 | dma_cap_set(DMA_PQ, dma->cap_mask); | ||
1915 | dma_cap_set(DMA_PQ_VAL, dma->cap_mask); | ||
1315 | 1916 | ||
1316 | if (!(cap & IOAT_CAP_XOR)) { | 1917 | if (device->cap & IOAT_CAP_RAID16SS) { |
1317 | dma->max_xor = 8; | 1918 | dma_set_maxpq(dma, 16, 0); |
1318 | dma->xor_align = 6; | 1919 | dma->pq_align = 0; |
1920 | } else { | ||
1921 | dma_set_maxpq(dma, 8, 0); | ||
1922 | if (is_xeon_cb32(pdev)) | ||
1923 | dma->pq_align = 6; | ||
1924 | else | ||
1925 | dma->pq_align = 0; | ||
1926 | } | ||
1319 | 1927 | ||
1320 | dma_cap_set(DMA_XOR, dma->cap_mask); | 1928 | if (!(device->cap & IOAT_CAP_XOR)) { |
1321 | dma->device_prep_dma_xor = ioat3_prep_pqxor; | 1929 | dma->device_prep_dma_xor = ioat3_prep_pqxor; |
1322 | |||
1323 | dma_cap_set(DMA_XOR_VAL, dma->cap_mask); | ||
1324 | dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val; | 1930 | dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val; |
1931 | dma_cap_set(DMA_XOR, dma->cap_mask); | ||
1932 | dma_cap_set(DMA_XOR_VAL, dma->cap_mask); | ||
1933 | |||
1934 | if (device->cap & IOAT_CAP_RAID16SS) { | ||
1935 | dma->max_xor = 16; | ||
1936 | dma->xor_align = 0; | ||
1937 | } else { | ||
1938 | dma->max_xor = 8; | ||
1939 | if (is_xeon_cb32(pdev)) | ||
1940 | dma->xor_align = 6; | ||
1941 | else | ||
1942 | dma->xor_align = 0; | ||
1943 | } | ||
1325 | } | 1944 | } |
1326 | } | 1945 | } |
1327 | if (is_raid_device && (cap & IOAT_CAP_FILL_BLOCK)) { | 1946 | |
1947 | if (is_raid_device && (device->cap & IOAT_CAP_FILL_BLOCK)) { | ||
1328 | dma_cap_set(DMA_MEMSET, dma->cap_mask); | 1948 | dma_cap_set(DMA_MEMSET, dma->cap_mask); |
1329 | dma->device_prep_dma_memset = ioat3_prep_memset_lock; | 1949 | dma->device_prep_dma_memset = ioat3_prep_memset_lock; |
1330 | } | 1950 | } |
1331 | 1951 | ||
1332 | 1952 | ||
1333 | if (is_raid_device) { | 1953 | dma->device_tx_status = ioat3_tx_status; |
1334 | dma->device_tx_status = ioat3_tx_status; | 1954 | device->cleanup_fn = ioat3_cleanup_event; |
1335 | device->cleanup_fn = ioat3_cleanup_event; | 1955 | device->timer_fn = ioat3_timer_event; |
1336 | device->timer_fn = ioat3_timer_event; | 1956 | |
1337 | } else { | 1957 | if (is_xeon_cb32(pdev)) { |
1338 | dma->device_tx_status = ioat_dma_tx_status; | 1958 | dma_cap_clear(DMA_XOR_VAL, dma->cap_mask); |
1339 | device->cleanup_fn = ioat2_cleanup_event; | 1959 | dma->device_prep_dma_xor_val = NULL; |
1340 | device->timer_fn = ioat2_timer_event; | 1960 | |
1961 | dma_cap_clear(DMA_PQ_VAL, dma->cap_mask); | ||
1962 | dma->device_prep_dma_pq_val = NULL; | ||
1341 | } | 1963 | } |
1342 | 1964 | ||
1343 | #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA | 1965 | /* starting with CB3.3 super extended descriptors are supported */ |
1344 | dma_cap_clear(DMA_PQ_VAL, dma->cap_mask); | 1966 | if (device->cap & IOAT_CAP_RAID16SS) { |
1345 | dma->device_prep_dma_pq_val = NULL; | 1967 | char pool_name[14]; |
1346 | #endif | 1968 | int i; |
1969 | |||
1970 | /* allocate sw descriptor pool for SED */ | ||
1971 | device->sed_pool = kmem_cache_create("ioat_sed", | ||
1972 | sizeof(struct ioat_sed_ent), 0, 0, NULL); | ||
1973 | if (!device->sed_pool) | ||
1974 | return -ENOMEM; | ||
1975 | |||
1976 | for (i = 0; i < MAX_SED_POOLS; i++) { | ||
1977 | snprintf(pool_name, 14, "ioat_hw%d_sed", i); | ||
1347 | 1978 | ||
1348 | #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA | 1979 | /* allocate SED DMA pool */ |
1349 | dma_cap_clear(DMA_XOR_VAL, dma->cap_mask); | 1980 | device->sed_hw_pool[i] = dma_pool_create(pool_name, |
1350 | dma->device_prep_dma_xor_val = NULL; | 1981 | &pdev->dev, |
1351 | #endif | 1982 | SED_SIZE * (i + 1), 64, 0); |
1983 | if (!device->sed_hw_pool[i]) | ||
1984 | goto sed_pool_cleanup; | ||
1985 | |||
1986 | } | ||
1987 | } | ||
1352 | 1988 | ||
1353 | err = ioat_probe(device); | 1989 | err = ioat_probe(device); |
1354 | if (err) | 1990 | if (err) |
@@ -1371,4 +2007,28 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) | |||
1371 | device->dca = ioat3_dca_init(pdev, device->reg_base); | 2007 | device->dca = ioat3_dca_init(pdev, device->reg_base); |
1372 | 2008 | ||
1373 | return 0; | 2009 | return 0; |
2010 | |||
2011 | sed_pool_cleanup: | ||
2012 | if (device->sed_pool) { | ||
2013 | int i; | ||
2014 | kmem_cache_destroy(device->sed_pool); | ||
2015 | |||
2016 | for (i = 0; i < MAX_SED_POOLS; i++) | ||
2017 | if (device->sed_hw_pool[i]) | ||
2018 | dma_pool_destroy(device->sed_hw_pool[i]); | ||
2019 | } | ||
2020 | |||
2021 | return -ENOMEM; | ||
2022 | } | ||
2023 | |||
2024 | void ioat3_dma_remove(struct ioatdma_device *device) | ||
2025 | { | ||
2026 | if (device->sed_pool) { | ||
2027 | int i; | ||
2028 | kmem_cache_destroy(device->sed_pool); | ||
2029 | |||
2030 | for (i = 0; i < MAX_SED_POOLS; i++) | ||
2031 | if (device->sed_hw_pool[i]) | ||
2032 | dma_pool_destroy(device->sed_hw_pool[i]); | ||
2033 | } | ||
1374 | } | 2034 | } |
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index 7cb74c62c719..5ee57d402a6e 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h | |||
@@ -30,11 +30,6 @@ | |||
30 | #define IOAT_PCI_DID_SCNB 0x65FF | 30 | #define IOAT_PCI_DID_SCNB 0x65FF |
31 | #define IOAT_PCI_DID_SNB 0x402F | 31 | #define IOAT_PCI_DID_SNB 0x402F |
32 | 32 | ||
33 | #define IOAT_VER_1_2 0x12 /* Version 1.2 */ | ||
34 | #define IOAT_VER_2_0 0x20 /* Version 2.0 */ | ||
35 | #define IOAT_VER_3_0 0x30 /* Version 3.0 */ | ||
36 | #define IOAT_VER_3_2 0x32 /* Version 3.2 */ | ||
37 | |||
38 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20 | 33 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20 |
39 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21 | 34 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21 |
40 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22 | 35 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22 |
@@ -46,6 +41,29 @@ | |||
46 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e | 41 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e |
47 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f | 42 | #define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f |
48 | 43 | ||
44 | #define PCI_DEVICE_ID_INTEL_IOAT_HSW0 0x2f20 | ||
45 | #define PCI_DEVICE_ID_INTEL_IOAT_HSW1 0x2f21 | ||
46 | #define PCI_DEVICE_ID_INTEL_IOAT_HSW2 0x2f22 | ||
47 | #define PCI_DEVICE_ID_INTEL_IOAT_HSW3 0x2f23 | ||
48 | #define PCI_DEVICE_ID_INTEL_IOAT_HSW4 0x2f24 | ||
49 | #define PCI_DEVICE_ID_INTEL_IOAT_HSW5 0x2f25 | ||
50 | #define PCI_DEVICE_ID_INTEL_IOAT_HSW6 0x2f26 | ||
51 | #define PCI_DEVICE_ID_INTEL_IOAT_HSW7 0x2f27 | ||
52 | #define PCI_DEVICE_ID_INTEL_IOAT_HSW8 0x2f2e | ||
53 | #define PCI_DEVICE_ID_INTEL_IOAT_HSW9 0x2f2f | ||
54 | |||
55 | #define PCI_DEVICE_ID_INTEL_IOAT_BWD0 0x0C50 | ||
56 | #define PCI_DEVICE_ID_INTEL_IOAT_BWD1 0x0C51 | ||
57 | #define PCI_DEVICE_ID_INTEL_IOAT_BWD2 0x0C52 | ||
58 | #define PCI_DEVICE_ID_INTEL_IOAT_BWD3 0x0C53 | ||
59 | |||
60 | #define IOAT_VER_1_2 0x12 /* Version 1.2 */ | ||
61 | #define IOAT_VER_2_0 0x20 /* Version 2.0 */ | ||
62 | #define IOAT_VER_3_0 0x30 /* Version 3.0 */ | ||
63 | #define IOAT_VER_3_2 0x32 /* Version 3.2 */ | ||
64 | #define IOAT_VER_3_3 0x33 /* Version 3.3 */ | ||
65 | |||
66 | |||
49 | int system_has_dca_enabled(struct pci_dev *pdev); | 67 | int system_has_dca_enabled(struct pci_dev *pdev); |
50 | 68 | ||
51 | struct ioat_dma_descriptor { | 69 | struct ioat_dma_descriptor { |
@@ -147,7 +165,17 @@ struct ioat_xor_ext_descriptor { | |||
147 | }; | 165 | }; |
148 | 166 | ||
149 | struct ioat_pq_descriptor { | 167 | struct ioat_pq_descriptor { |
150 | uint32_t size; | 168 | union { |
169 | uint32_t size; | ||
170 | uint32_t dwbes; | ||
171 | struct { | ||
172 | unsigned int rsvd:25; | ||
173 | unsigned int p_val_err:1; | ||
174 | unsigned int q_val_err:1; | ||
175 | unsigned int rsvd1:4; | ||
176 | unsigned int wbes:1; | ||
177 | } dwbes_f; | ||
178 | }; | ||
151 | union { | 179 | union { |
152 | uint32_t ctl; | 180 | uint32_t ctl; |
153 | struct { | 181 | struct { |
@@ -162,9 +190,14 @@ struct ioat_pq_descriptor { | |||
162 | unsigned int hint:1; | 190 | unsigned int hint:1; |
163 | unsigned int p_disable:1; | 191 | unsigned int p_disable:1; |
164 | unsigned int q_disable:1; | 192 | unsigned int q_disable:1; |
165 | unsigned int rsvd:11; | 193 | unsigned int rsvd2:2; |
194 | unsigned int wb_en:1; | ||
195 | unsigned int prl_en:1; | ||
196 | unsigned int rsvd3:7; | ||
166 | #define IOAT_OP_PQ 0x89 | 197 | #define IOAT_OP_PQ 0x89 |
167 | #define IOAT_OP_PQ_VAL 0x8a | 198 | #define IOAT_OP_PQ_VAL 0x8a |
199 | #define IOAT_OP_PQ_16S 0xa0 | ||
200 | #define IOAT_OP_PQ_VAL_16S 0xa1 | ||
168 | unsigned int op:8; | 201 | unsigned int op:8; |
169 | } ctl_f; | 202 | } ctl_f; |
170 | }; | 203 | }; |
@@ -172,7 +205,10 @@ struct ioat_pq_descriptor { | |||
172 | uint64_t p_addr; | 205 | uint64_t p_addr; |
173 | uint64_t next; | 206 | uint64_t next; |
174 | uint64_t src_addr2; | 207 | uint64_t src_addr2; |
175 | uint64_t src_addr3; | 208 | union { |
209 | uint64_t src_addr3; | ||
210 | uint64_t sed_addr; | ||
211 | }; | ||
176 | uint8_t coef[8]; | 212 | uint8_t coef[8]; |
177 | uint64_t q_addr; | 213 | uint64_t q_addr; |
178 | }; | 214 | }; |
@@ -221,4 +257,40 @@ struct ioat_pq_update_descriptor { | |||
221 | struct ioat_raw_descriptor { | 257 | struct ioat_raw_descriptor { |
222 | uint64_t field[8]; | 258 | uint64_t field[8]; |
223 | }; | 259 | }; |
260 | |||
261 | struct ioat_pq16a_descriptor { | ||
262 | uint8_t coef[8]; | ||
263 | uint64_t src_addr3; | ||
264 | uint64_t src_addr4; | ||
265 | uint64_t src_addr5; | ||
266 | uint64_t src_addr6; | ||
267 | uint64_t src_addr7; | ||
268 | uint64_t src_addr8; | ||
269 | uint64_t src_addr9; | ||
270 | }; | ||
271 | |||
272 | struct ioat_pq16b_descriptor { | ||
273 | uint64_t src_addr10; | ||
274 | uint64_t src_addr11; | ||
275 | uint64_t src_addr12; | ||
276 | uint64_t src_addr13; | ||
277 | uint64_t src_addr14; | ||
278 | uint64_t src_addr15; | ||
279 | uint64_t src_addr16; | ||
280 | uint64_t rsvd; | ||
281 | }; | ||
282 | |||
283 | union ioat_sed_pq_descriptor { | ||
284 | struct ioat_pq16a_descriptor a; | ||
285 | struct ioat_pq16b_descriptor b; | ||
286 | }; | ||
287 | |||
288 | #define SED_SIZE 64 | ||
289 | |||
290 | struct ioat_sed_raw_descriptor { | ||
291 | uint64_t a[8]; | ||
292 | uint64_t b[8]; | ||
293 | uint64_t c[8]; | ||
294 | }; | ||
295 | |||
224 | #endif | 296 | #endif |
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 71c7ecd80fac..2c8d560e6334 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c | |||
@@ -94,6 +94,23 @@ static struct pci_device_id ioat_pci_tbl[] = { | |||
94 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) }, | 94 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) }, |
95 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) }, | 95 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) }, |
96 | 96 | ||
97 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) }, | ||
98 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) }, | ||
99 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) }, | ||
100 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) }, | ||
101 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) }, | ||
102 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) }, | ||
103 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) }, | ||
104 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) }, | ||
105 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) }, | ||
106 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) }, | ||
107 | |||
108 | /* I/OAT v3.3 platforms */ | ||
109 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) }, | ||
110 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) }, | ||
111 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) }, | ||
112 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) }, | ||
113 | |||
97 | { 0, } | 114 | { 0, } |
98 | }; | 115 | }; |
99 | MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); | 116 | MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); |
@@ -190,6 +207,9 @@ static void ioat_remove(struct pci_dev *pdev) | |||
190 | if (!device) | 207 | if (!device) |
191 | return; | 208 | return; |
192 | 209 | ||
210 | if (device->version >= IOAT_VER_3_0) | ||
211 | ioat3_dma_remove(device); | ||
212 | |||
193 | dev_err(&pdev->dev, "Removing dma and dca services\n"); | 213 | dev_err(&pdev->dev, "Removing dma and dca services\n"); |
194 | if (device->dca) { | 214 | if (device->dca) { |
195 | unregister_dca_provider(device->dca, &pdev->dev); | 215 | unregister_dca_provider(device->dca, &pdev->dev); |
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index 1391798542b6..2f1cfa0f1f47 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h | |||
@@ -79,6 +79,8 @@ | |||
79 | #define IOAT_CAP_APIC 0x00000080 | 79 | #define IOAT_CAP_APIC 0x00000080 |
80 | #define IOAT_CAP_XOR 0x00000100 | 80 | #define IOAT_CAP_XOR 0x00000100 |
81 | #define IOAT_CAP_PQ 0x00000200 | 81 | #define IOAT_CAP_PQ 0x00000200 |
82 | #define IOAT_CAP_DWBES 0x00002000 | ||
83 | #define IOAT_CAP_RAID16SS 0x00020000 | ||
82 | 84 | ||
83 | #define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */ | 85 | #define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */ |
84 | 86 | ||
@@ -93,6 +95,8 @@ | |||
93 | #define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004 | 95 | #define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004 |
94 | #define IOAT_CHANCTRL_INT_REARM 0x0001 | 96 | #define IOAT_CHANCTRL_INT_REARM 0x0001 |
95 | #define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\ | 97 | #define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\ |
98 | IOAT_CHANCTRL_ERR_INT_EN |\ | ||
99 | IOAT_CHANCTRL_ERR_COMPLETION_EN |\ | ||
96 | IOAT_CHANCTRL_ANY_ERR_ABORT_EN) | 100 | IOAT_CHANCTRL_ANY_ERR_ABORT_EN) |
97 | 101 | ||
98 | #define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */ | 102 | #define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */ |
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 8c61d17a86bf..d39c2cd0795d 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c | |||
@@ -1642,7 +1642,7 @@ static int __init ipu_idmac_init(struct ipu *ipu) | |||
1642 | return dma_async_device_register(&idmac->dma); | 1642 | return dma_async_device_register(&idmac->dma); |
1643 | } | 1643 | } |
1644 | 1644 | ||
1645 | static void __exit ipu_idmac_exit(struct ipu *ipu) | 1645 | static void ipu_idmac_exit(struct ipu *ipu) |
1646 | { | 1646 | { |
1647 | int i; | 1647 | int i; |
1648 | struct idmac *idmac = &ipu->idmac; | 1648 | struct idmac *idmac = &ipu->idmac; |
@@ -1756,7 +1756,7 @@ err_noirq: | |||
1756 | return ret; | 1756 | return ret; |
1757 | } | 1757 | } |
1758 | 1758 | ||
1759 | static int __exit ipu_remove(struct platform_device *pdev) | 1759 | static int ipu_remove(struct platform_device *pdev) |
1760 | { | 1760 | { |
1761 | struct ipu *ipu = platform_get_drvdata(pdev); | 1761 | struct ipu *ipu = platform_get_drvdata(pdev); |
1762 | 1762 | ||
@@ -1781,7 +1781,7 @@ static struct platform_driver ipu_platform_driver = { | |||
1781 | .name = "ipu-core", | 1781 | .name = "ipu-core", |
1782 | .owner = THIS_MODULE, | 1782 | .owner = THIS_MODULE, |
1783 | }, | 1783 | }, |
1784 | .remove = __exit_p(ipu_remove), | 1784 | .remove = ipu_remove, |
1785 | }; | 1785 | }; |
1786 | 1786 | ||
1787 | static int __init ipu_init(void) | 1787 | static int __init ipu_init(void) |
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index 69d04d28b1ef..7aa0864cd487 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c | |||
@@ -13,43 +13,31 @@ | |||
13 | #include <linux/device.h> | 13 | #include <linux/device.h> |
14 | #include <linux/err.h> | 14 | #include <linux/err.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/rculist.h> | 16 | #include <linux/mutex.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/of.h> | 18 | #include <linux/of.h> |
19 | #include <linux/of_dma.h> | 19 | #include <linux/of_dma.h> |
20 | 20 | ||
21 | static LIST_HEAD(of_dma_list); | 21 | static LIST_HEAD(of_dma_list); |
22 | static DEFINE_SPINLOCK(of_dma_lock); | 22 | static DEFINE_MUTEX(of_dma_lock); |
23 | 23 | ||
24 | /** | 24 | /** |
25 | * of_dma_get_controller - Get a DMA controller in DT DMA helpers list | 25 | * of_dma_find_controller - Get a DMA controller in DT DMA helpers list |
26 | * @dma_spec: pointer to DMA specifier as found in the device tree | 26 | * @dma_spec: pointer to DMA specifier as found in the device tree |
27 | * | 27 | * |
28 | * Finds a DMA controller with matching device node and number for dma cells | 28 | * Finds a DMA controller with matching device node and number for dma cells |
29 | * in a list of registered DMA controllers. If a match is found the use_count | 29 | * in a list of registered DMA controllers. If a match is found a valid pointer |
30 | * variable is increased and a valid pointer to the DMA data stored is retuned. | 30 | * to the DMA data stored is retuned. A NULL pointer is returned if no match is |
31 | * A NULL pointer is returned if no match is found. | 31 | * found. |
32 | */ | 32 | */ |
33 | static struct of_dma *of_dma_get_controller(struct of_phandle_args *dma_spec) | 33 | static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec) |
34 | { | 34 | { |
35 | struct of_dma *ofdma; | 35 | struct of_dma *ofdma; |
36 | 36 | ||
37 | spin_lock(&of_dma_lock); | ||
38 | |||
39 | if (list_empty(&of_dma_list)) { | ||
40 | spin_unlock(&of_dma_lock); | ||
41 | return NULL; | ||
42 | } | ||
43 | |||
44 | list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers) | 37 | list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers) |
45 | if ((ofdma->of_node == dma_spec->np) && | 38 | if ((ofdma->of_node == dma_spec->np) && |
46 | (ofdma->of_dma_nbcells == dma_spec->args_count)) { | 39 | (ofdma->of_dma_nbcells == dma_spec->args_count)) |
47 | ofdma->use_count++; | ||
48 | spin_unlock(&of_dma_lock); | ||
49 | return ofdma; | 40 | return ofdma; |
50 | } | ||
51 | |||
52 | spin_unlock(&of_dma_lock); | ||
53 | 41 | ||
54 | pr_debug("%s: can't find DMA controller %s\n", __func__, | 42 | pr_debug("%s: can't find DMA controller %s\n", __func__, |
55 | dma_spec->np->full_name); | 43 | dma_spec->np->full_name); |
@@ -58,22 +46,6 @@ static struct of_dma *of_dma_get_controller(struct of_phandle_args *dma_spec) | |||
58 | } | 46 | } |
59 | 47 | ||
60 | /** | 48 | /** |
61 | * of_dma_put_controller - Decrement use count for a registered DMA controller | ||
62 | * @of_dma: pointer to DMA controller data | ||
63 | * | ||
64 | * Decrements the use_count variable in the DMA data structure. This function | ||
65 | * should be called only when a valid pointer is returned from | ||
66 | * of_dma_get_controller() and no further accesses to data referenced by that | ||
67 | * pointer are needed. | ||
68 | */ | ||
69 | static void of_dma_put_controller(struct of_dma *ofdma) | ||
70 | { | ||
71 | spin_lock(&of_dma_lock); | ||
72 | ofdma->use_count--; | ||
73 | spin_unlock(&of_dma_lock); | ||
74 | } | ||
75 | |||
76 | /** | ||
77 | * of_dma_controller_register - Register a DMA controller to DT DMA helpers | 49 | * of_dma_controller_register - Register a DMA controller to DT DMA helpers |
78 | * @np: device node of DMA controller | 50 | * @np: device node of DMA controller |
79 | * @of_dma_xlate: translation function which converts a phandle | 51 | * @of_dma_xlate: translation function which converts a phandle |
@@ -93,6 +65,7 @@ int of_dma_controller_register(struct device_node *np, | |||
93 | { | 65 | { |
94 | struct of_dma *ofdma; | 66 | struct of_dma *ofdma; |
95 | int nbcells; | 67 | int nbcells; |
68 | const __be32 *prop; | ||
96 | 69 | ||
97 | if (!np || !of_dma_xlate) { | 70 | if (!np || !of_dma_xlate) { |
98 | pr_err("%s: not enough information provided\n", __func__); | 71 | pr_err("%s: not enough information provided\n", __func__); |
@@ -103,8 +76,11 @@ int of_dma_controller_register(struct device_node *np, | |||
103 | if (!ofdma) | 76 | if (!ofdma) |
104 | return -ENOMEM; | 77 | return -ENOMEM; |
105 | 78 | ||
106 | nbcells = be32_to_cpup(of_get_property(np, "#dma-cells", NULL)); | 79 | prop = of_get_property(np, "#dma-cells", NULL); |
107 | if (!nbcells) { | 80 | if (prop) |
81 | nbcells = be32_to_cpup(prop); | ||
82 | |||
83 | if (!prop || !nbcells) { | ||
108 | pr_err("%s: #dma-cells property is missing or invalid\n", | 84 | pr_err("%s: #dma-cells property is missing or invalid\n", |
109 | __func__); | 85 | __func__); |
110 | kfree(ofdma); | 86 | kfree(ofdma); |
@@ -115,12 +91,11 @@ int of_dma_controller_register(struct device_node *np, | |||
115 | ofdma->of_dma_nbcells = nbcells; | 91 | ofdma->of_dma_nbcells = nbcells; |
116 | ofdma->of_dma_xlate = of_dma_xlate; | 92 | ofdma->of_dma_xlate = of_dma_xlate; |
117 | ofdma->of_dma_data = data; | 93 | ofdma->of_dma_data = data; |
118 | ofdma->use_count = 0; | ||
119 | 94 | ||
120 | /* Now queue of_dma controller structure in list */ | 95 | /* Now queue of_dma controller structure in list */ |
121 | spin_lock(&of_dma_lock); | 96 | mutex_lock(&of_dma_lock); |
122 | list_add_tail(&ofdma->of_dma_controllers, &of_dma_list); | 97 | list_add_tail(&ofdma->of_dma_controllers, &of_dma_list); |
123 | spin_unlock(&of_dma_lock); | 98 | mutex_unlock(&of_dma_lock); |
124 | 99 | ||
125 | return 0; | 100 | return 0; |
126 | } | 101 | } |
@@ -132,32 +107,20 @@ EXPORT_SYMBOL_GPL(of_dma_controller_register); | |||
132 | * | 107 | * |
133 | * Memory allocated by of_dma_controller_register() is freed here. | 108 | * Memory allocated by of_dma_controller_register() is freed here. |
134 | */ | 109 | */ |
135 | int of_dma_controller_free(struct device_node *np) | 110 | void of_dma_controller_free(struct device_node *np) |
136 | { | 111 | { |
137 | struct of_dma *ofdma; | 112 | struct of_dma *ofdma; |
138 | 113 | ||
139 | spin_lock(&of_dma_lock); | 114 | mutex_lock(&of_dma_lock); |
140 | |||
141 | if (list_empty(&of_dma_list)) { | ||
142 | spin_unlock(&of_dma_lock); | ||
143 | return -ENODEV; | ||
144 | } | ||
145 | 115 | ||
146 | list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers) | 116 | list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers) |
147 | if (ofdma->of_node == np) { | 117 | if (ofdma->of_node == np) { |
148 | if (ofdma->use_count) { | ||
149 | spin_unlock(&of_dma_lock); | ||
150 | return -EBUSY; | ||
151 | } | ||
152 | |||
153 | list_del(&ofdma->of_dma_controllers); | 118 | list_del(&ofdma->of_dma_controllers); |
154 | spin_unlock(&of_dma_lock); | ||
155 | kfree(ofdma); | 119 | kfree(ofdma); |
156 | return 0; | 120 | break; |
157 | } | 121 | } |
158 | 122 | ||
159 | spin_unlock(&of_dma_lock); | 123 | mutex_unlock(&of_dma_lock); |
160 | return -ENODEV; | ||
161 | } | 124 | } |
162 | EXPORT_SYMBOL_GPL(of_dma_controller_free); | 125 | EXPORT_SYMBOL_GPL(of_dma_controller_free); |
163 | 126 | ||
@@ -172,8 +135,8 @@ EXPORT_SYMBOL_GPL(of_dma_controller_free); | |||
172 | * specifiers, matches the name provided. Returns 0 if the name matches and | 135 | * specifiers, matches the name provided. Returns 0 if the name matches and |
173 | * a valid pointer to the DMA specifier is found. Otherwise returns -ENODEV. | 136 | * a valid pointer to the DMA specifier is found. Otherwise returns -ENODEV. |
174 | */ | 137 | */ |
175 | static int of_dma_match_channel(struct device_node *np, char *name, int index, | 138 | static int of_dma_match_channel(struct device_node *np, const char *name, |
176 | struct of_phandle_args *dma_spec) | 139 | int index, struct of_phandle_args *dma_spec) |
177 | { | 140 | { |
178 | const char *s; | 141 | const char *s; |
179 | 142 | ||
@@ -198,7 +161,7 @@ static int of_dma_match_channel(struct device_node *np, char *name, int index, | |||
198 | * Returns pointer to appropriate dma channel on success or NULL on error. | 161 | * Returns pointer to appropriate dma channel on success or NULL on error. |
199 | */ | 162 | */ |
200 | struct dma_chan *of_dma_request_slave_channel(struct device_node *np, | 163 | struct dma_chan *of_dma_request_slave_channel(struct device_node *np, |
201 | char *name) | 164 | const char *name) |
202 | { | 165 | { |
203 | struct of_phandle_args dma_spec; | 166 | struct of_phandle_args dma_spec; |
204 | struct of_dma *ofdma; | 167 | struct of_dma *ofdma; |
@@ -220,14 +183,15 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np, | |||
220 | if (of_dma_match_channel(np, name, i, &dma_spec)) | 183 | if (of_dma_match_channel(np, name, i, &dma_spec)) |
221 | continue; | 184 | continue; |
222 | 185 | ||
223 | ofdma = of_dma_get_controller(&dma_spec); | 186 | mutex_lock(&of_dma_lock); |
224 | 187 | ofdma = of_dma_find_controller(&dma_spec); | |
225 | if (!ofdma) | ||
226 | continue; | ||
227 | 188 | ||
228 | chan = ofdma->of_dma_xlate(&dma_spec, ofdma); | 189 | if (ofdma) |
190 | chan = ofdma->of_dma_xlate(&dma_spec, ofdma); | ||
191 | else | ||
192 | chan = NULL; | ||
229 | 193 | ||
230 | of_dma_put_controller(ofdma); | 194 | mutex_unlock(&of_dma_lock); |
231 | 195 | ||
232 | of_node_put(dma_spec.np); | 196 | of_node_put(dma_spec.np); |
233 | 197 | ||
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 08b43bf37158..ec3fc4fd9160 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c | |||
@@ -16,6 +16,8 @@ | |||
16 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/spinlock.h> | 18 | #include <linux/spinlock.h> |
19 | #include <linux/of_dma.h> | ||
20 | #include <linux/of_device.h> | ||
19 | 21 | ||
20 | #include "virt-dma.h" | 22 | #include "virt-dma.h" |
21 | 23 | ||
@@ -67,6 +69,10 @@ static const unsigned es_bytes[] = { | |||
67 | [OMAP_DMA_DATA_TYPE_S32] = 4, | 69 | [OMAP_DMA_DATA_TYPE_S32] = 4, |
68 | }; | 70 | }; |
69 | 71 | ||
72 | static struct of_dma_filter_info omap_dma_info = { | ||
73 | .filter_fn = omap_dma_filter_fn, | ||
74 | }; | ||
75 | |||
70 | static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d) | 76 | static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d) |
71 | { | 77 | { |
72 | return container_of(d, struct omap_dmadev, ddev); | 78 | return container_of(d, struct omap_dmadev, ddev); |
@@ -629,8 +635,22 @@ static int omap_dma_probe(struct platform_device *pdev) | |||
629 | pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", | 635 | pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", |
630 | rc); | 636 | rc); |
631 | omap_dma_free(od); | 637 | omap_dma_free(od); |
632 | } else { | 638 | return rc; |
633 | platform_set_drvdata(pdev, od); | 639 | } |
640 | |||
641 | platform_set_drvdata(pdev, od); | ||
642 | |||
643 | if (pdev->dev.of_node) { | ||
644 | omap_dma_info.dma_cap = od->ddev.cap_mask; | ||
645 | |||
646 | /* Device-tree DMA controller registration */ | ||
647 | rc = of_dma_controller_register(pdev->dev.of_node, | ||
648 | of_dma_simple_xlate, &omap_dma_info); | ||
649 | if (rc) { | ||
650 | pr_warn("OMAP-DMA: failed to register DMA controller\n"); | ||
651 | dma_async_device_unregister(&od->ddev); | ||
652 | omap_dma_free(od); | ||
653 | } | ||
634 | } | 654 | } |
635 | 655 | ||
636 | dev_info(&pdev->dev, "OMAP DMA engine driver\n"); | 656 | dev_info(&pdev->dev, "OMAP DMA engine driver\n"); |
@@ -642,18 +662,32 @@ static int omap_dma_remove(struct platform_device *pdev) | |||
642 | { | 662 | { |
643 | struct omap_dmadev *od = platform_get_drvdata(pdev); | 663 | struct omap_dmadev *od = platform_get_drvdata(pdev); |
644 | 664 | ||
665 | if (pdev->dev.of_node) | ||
666 | of_dma_controller_free(pdev->dev.of_node); | ||
667 | |||
645 | dma_async_device_unregister(&od->ddev); | 668 | dma_async_device_unregister(&od->ddev); |
646 | omap_dma_free(od); | 669 | omap_dma_free(od); |
647 | 670 | ||
648 | return 0; | 671 | return 0; |
649 | } | 672 | } |
650 | 673 | ||
674 | static const struct of_device_id omap_dma_match[] = { | ||
675 | { .compatible = "ti,omap2420-sdma", }, | ||
676 | { .compatible = "ti,omap2430-sdma", }, | ||
677 | { .compatible = "ti,omap3430-sdma", }, | ||
678 | { .compatible = "ti,omap3630-sdma", }, | ||
679 | { .compatible = "ti,omap4430-sdma", }, | ||
680 | {}, | ||
681 | }; | ||
682 | MODULE_DEVICE_TABLE(of, omap_dma_match); | ||
683 | |||
651 | static struct platform_driver omap_dma_driver = { | 684 | static struct platform_driver omap_dma_driver = { |
652 | .probe = omap_dma_probe, | 685 | .probe = omap_dma_probe, |
653 | .remove = omap_dma_remove, | 686 | .remove = omap_dma_remove, |
654 | .driver = { | 687 | .driver = { |
655 | .name = "omap-dma-engine", | 688 | .name = "omap-dma-engine", |
656 | .owner = THIS_MODULE, | 689 | .owner = THIS_MODULE, |
690 | .of_match_table = of_match_ptr(omap_dma_match), | ||
657 | }, | 691 | }, |
658 | }; | 692 | }; |
659 | 693 | ||
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index d01faeb0f27c..ce3dc3e9688c 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
@@ -476,7 +476,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) | |||
476 | dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); | 476 | dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); |
477 | 477 | ||
478 | if (!ret) { | 478 | if (!ret) { |
479 | ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); | 479 | ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC); |
480 | if (ret) { | 480 | if (ret) { |
481 | spin_lock(&pd_chan->lock); | 481 | spin_lock(&pd_chan->lock); |
482 | pd_chan->descs_allocated++; | 482 | pd_chan->descs_allocated++; |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 5dbc5946c4c3..a17553f7c028 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/scatterlist.h> | 26 | #include <linux/scatterlist.h> |
27 | #include <linux/of.h> | 27 | #include <linux/of.h> |
28 | #include <linux/of_dma.h> | 28 | #include <linux/of_dma.h> |
29 | #include <linux/err.h> | ||
29 | 30 | ||
30 | #include "dmaengine.h" | 31 | #include "dmaengine.h" |
31 | #define PL330_MAX_CHAN 8 | 32 | #define PL330_MAX_CHAN 8 |
@@ -2288,13 +2289,12 @@ static inline void fill_queue(struct dma_pl330_chan *pch) | |||
2288 | 2289 | ||
2289 | /* If already submitted */ | 2290 | /* If already submitted */ |
2290 | if (desc->status == BUSY) | 2291 | if (desc->status == BUSY) |
2291 | break; | 2292 | continue; |
2292 | 2293 | ||
2293 | ret = pl330_submit_req(pch->pl330_chid, | 2294 | ret = pl330_submit_req(pch->pl330_chid, |
2294 | &desc->req); | 2295 | &desc->req); |
2295 | if (!ret) { | 2296 | if (!ret) { |
2296 | desc->status = BUSY; | 2297 | desc->status = BUSY; |
2297 | break; | ||
2298 | } else if (ret == -EAGAIN) { | 2298 | } else if (ret == -EAGAIN) { |
2299 | /* QFull or DMAC Dying */ | 2299 | /* QFull or DMAC Dying */ |
2300 | break; | 2300 | break; |
@@ -2904,9 +2904,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2904 | pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0; | 2904 | pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0; |
2905 | 2905 | ||
2906 | res = &adev->res; | 2906 | res = &adev->res; |
2907 | pi->base = devm_request_and_ioremap(&adev->dev, res); | 2907 | pi->base = devm_ioremap_resource(&adev->dev, res); |
2908 | if (!pi->base) | 2908 | if (IS_ERR(pi->base)) |
2909 | return -ENXIO; | 2909 | return PTR_ERR(pi->base); |
2910 | 2910 | ||
2911 | amba_set_drvdata(adev, pdmac); | 2911 | amba_set_drvdata(adev, pdmac); |
2912 | 2912 | ||
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig new file mode 100644 index 000000000000..5c1dee20c13e --- /dev/null +++ b/drivers/dma/sh/Kconfig | |||
@@ -0,0 +1,24 @@ | |||
1 | # | ||
2 | # DMA engine configuration for sh | ||
3 | # | ||
4 | |||
5 | config SH_DMAE_BASE | ||
6 | bool "Renesas SuperH DMA Engine support" | ||
7 | depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE) | ||
8 | depends on !SH_DMA_API | ||
9 | default y | ||
10 | select DMA_ENGINE | ||
11 | help | ||
12 | Enable support for the Renesas SuperH DMA controllers. | ||
13 | |||
14 | config SH_DMAE | ||
15 | tristate "Renesas SuperH DMAC support" | ||
16 | depends on SH_DMAE_BASE | ||
17 | help | ||
18 | Enable support for the Renesas SuperH DMA controllers. | ||
19 | |||
20 | config SUDMAC | ||
21 | tristate "Renesas SUDMAC support" | ||
22 | depends on SH_DMAE_BASE | ||
23 | help | ||
24 | Enable support for the Renesas SUDMAC controllers. | ||
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile index 54ae9572b0ac..c07ca4612e46 100644 --- a/drivers/dma/sh/Makefile +++ b/drivers/dma/sh/Makefile | |||
@@ -1,2 +1,3 @@ | |||
1 | obj-$(CONFIG_SH_DMAE) += shdma-base.o | 1 | obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o |
2 | obj-$(CONFIG_SH_DMAE) += shdma.o | 2 | obj-$(CONFIG_SH_DMAE) += shdma.o |
3 | obj-$(CONFIG_SUDMAC) += sudmac.o | ||
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c new file mode 100644 index 000000000000..e7c94bbddb53 --- /dev/null +++ b/drivers/dma/sh/sudmac.c | |||
@@ -0,0 +1,428 @@ | |||
1 | /* | ||
2 | * Renesas SUDMAC support | ||
3 | * | ||
4 | * Copyright (C) 2013 Renesas Solutions Corp. | ||
5 | * | ||
6 | * based on drivers/dma/sh/shdma.c: | ||
7 | * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> | ||
8 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> | ||
9 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. | ||
10 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | ||
11 | * | ||
12 | * This is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of version 2 of the GNU General Public License as | ||
14 | * published by the Free Software Foundation. | ||
15 | */ | ||
16 | |||
17 | #include <linux/init.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/dmaengine.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/sudmac.h> | ||
24 | |||
25 | struct sudmac_chan { | ||
26 | struct shdma_chan shdma_chan; | ||
27 | void __iomem *base; | ||
28 | char dev_id[16]; /* unique name per DMAC of channel */ | ||
29 | |||
30 | u32 offset; /* for CFG, BA, BBC, CA, CBC, DEN */ | ||
31 | u32 cfg; | ||
32 | u32 dint_end_bit; | ||
33 | }; | ||
34 | |||
35 | struct sudmac_device { | ||
36 | struct shdma_dev shdma_dev; | ||
37 | struct sudmac_pdata *pdata; | ||
38 | void __iomem *chan_reg; | ||
39 | }; | ||
40 | |||
41 | struct sudmac_regs { | ||
42 | u32 base_addr; | ||
43 | u32 base_byte_count; | ||
44 | }; | ||
45 | |||
46 | struct sudmac_desc { | ||
47 | struct sudmac_regs hw; | ||
48 | struct shdma_desc shdma_desc; | ||
49 | }; | ||
50 | |||
51 | #define to_chan(schan) container_of(schan, struct sudmac_chan, shdma_chan) | ||
52 | #define to_desc(sdesc) container_of(sdesc, struct sudmac_desc, shdma_desc) | ||
53 | #define to_sdev(sc) container_of(sc->shdma_chan.dma_chan.device, \ | ||
54 | struct sudmac_device, shdma_dev.dma_dev) | ||
55 | |||
56 | /* SUDMAC register */ | ||
57 | #define SUDMAC_CH0CFG 0x00 | ||
58 | #define SUDMAC_CH0BA 0x10 | ||
59 | #define SUDMAC_CH0BBC 0x18 | ||
60 | #define SUDMAC_CH0CA 0x20 | ||
61 | #define SUDMAC_CH0CBC 0x28 | ||
62 | #define SUDMAC_CH0DEN 0x30 | ||
63 | #define SUDMAC_DSTSCLR 0x38 | ||
64 | #define SUDMAC_DBUFCTRL 0x3C | ||
65 | #define SUDMAC_DINTCTRL 0x40 | ||
66 | #define SUDMAC_DINTSTS 0x44 | ||
67 | #define SUDMAC_DINTSTSCLR 0x48 | ||
68 | #define SUDMAC_CH0SHCTRL 0x50 | ||
69 | |||
70 | /* Definitions for the sudmac_channel.config */ | ||
71 | #define SUDMAC_SENDBUFM 0x1000 /* b12: Transmit Buffer Mode */ | ||
72 | #define SUDMAC_RCVENDM 0x0100 /* b8: Receive Data Transfer End Mode */ | ||
73 | #define SUDMAC_LBA_WAIT 0x0030 /* b5-4: Local Bus Access Wait */ | ||
74 | |||
75 | /* Definitions for the sudmac_channel.dint_end_bit */ | ||
76 | #define SUDMAC_CH1ENDE 0x0002 /* b1: Ch1 DMA Transfer End Int Enable */ | ||
77 | #define SUDMAC_CH0ENDE 0x0001 /* b0: Ch0 DMA Transfer End Int Enable */ | ||
78 | |||
79 | #define SUDMAC_DRV_NAME "sudmac" | ||
80 | |||
81 | static void sudmac_writel(struct sudmac_chan *sc, u32 data, u32 reg) | ||
82 | { | ||
83 | iowrite32(data, sc->base + reg); | ||
84 | } | ||
85 | |||
86 | static u32 sudmac_readl(struct sudmac_chan *sc, u32 reg) | ||
87 | { | ||
88 | return ioread32(sc->base + reg); | ||
89 | } | ||
90 | |||
91 | static bool sudmac_is_busy(struct sudmac_chan *sc) | ||
92 | { | ||
93 | u32 den = sudmac_readl(sc, SUDMAC_CH0DEN + sc->offset); | ||
94 | |||
95 | if (den) | ||
96 | return true; /* working */ | ||
97 | |||
98 | return false; /* waiting */ | ||
99 | } | ||
100 | |||
101 | static void sudmac_set_reg(struct sudmac_chan *sc, struct sudmac_regs *hw, | ||
102 | struct shdma_desc *sdesc) | ||
103 | { | ||
104 | sudmac_writel(sc, sc->cfg, SUDMAC_CH0CFG + sc->offset); | ||
105 | sudmac_writel(sc, hw->base_addr, SUDMAC_CH0BA + sc->offset); | ||
106 | sudmac_writel(sc, hw->base_byte_count, SUDMAC_CH0BBC + sc->offset); | ||
107 | } | ||
108 | |||
109 | static void sudmac_start(struct sudmac_chan *sc) | ||
110 | { | ||
111 | u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL); | ||
112 | |||
113 | sudmac_writel(sc, dintctrl | sc->dint_end_bit, SUDMAC_DINTCTRL); | ||
114 | sudmac_writel(sc, 1, SUDMAC_CH0DEN + sc->offset); | ||
115 | } | ||
116 | |||
117 | static void sudmac_start_xfer(struct shdma_chan *schan, | ||
118 | struct shdma_desc *sdesc) | ||
119 | { | ||
120 | struct sudmac_chan *sc = to_chan(schan); | ||
121 | struct sudmac_desc *sd = to_desc(sdesc); | ||
122 | |||
123 | sudmac_set_reg(sc, &sd->hw, sdesc); | ||
124 | sudmac_start(sc); | ||
125 | } | ||
126 | |||
127 | static bool sudmac_channel_busy(struct shdma_chan *schan) | ||
128 | { | ||
129 | struct sudmac_chan *sc = to_chan(schan); | ||
130 | |||
131 | return sudmac_is_busy(sc); | ||
132 | } | ||
133 | |||
134 | static void sudmac_setup_xfer(struct shdma_chan *schan, int slave_id) | ||
135 | { | ||
136 | } | ||
137 | |||
138 | static const struct sudmac_slave_config *sudmac_find_slave( | ||
139 | struct sudmac_chan *sc, int slave_id) | ||
140 | { | ||
141 | struct sudmac_device *sdev = to_sdev(sc); | ||
142 | struct sudmac_pdata *pdata = sdev->pdata; | ||
143 | const struct sudmac_slave_config *cfg; | ||
144 | int i; | ||
145 | |||
146 | for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) | ||
147 | if (cfg->slave_id == slave_id) | ||
148 | return cfg; | ||
149 | |||
150 | return NULL; | ||
151 | } | ||
152 | |||
153 | static int sudmac_set_slave(struct shdma_chan *schan, int slave_id, bool try) | ||
154 | { | ||
155 | struct sudmac_chan *sc = to_chan(schan); | ||
156 | const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id); | ||
157 | |||
158 | if (!cfg) | ||
159 | return -ENODEV; | ||
160 | |||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | static inline void sudmac_dma_halt(struct sudmac_chan *sc) | ||
165 | { | ||
166 | u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL); | ||
167 | |||
168 | sudmac_writel(sc, 0, SUDMAC_CH0DEN + sc->offset); | ||
169 | sudmac_writel(sc, dintctrl & ~sc->dint_end_bit, SUDMAC_DINTCTRL); | ||
170 | sudmac_writel(sc, sc->dint_end_bit, SUDMAC_DINTSTSCLR); | ||
171 | } | ||
172 | |||
173 | static int sudmac_desc_setup(struct shdma_chan *schan, | ||
174 | struct shdma_desc *sdesc, | ||
175 | dma_addr_t src, dma_addr_t dst, size_t *len) | ||
176 | { | ||
177 | struct sudmac_chan *sc = to_chan(schan); | ||
178 | struct sudmac_desc *sd = to_desc(sdesc); | ||
179 | |||
180 | dev_dbg(sc->shdma_chan.dev, "%s: src=%x, dst=%x, len=%d\n", | ||
181 | __func__, src, dst, *len); | ||
182 | |||
183 | if (*len > schan->max_xfer_len) | ||
184 | *len = schan->max_xfer_len; | ||
185 | |||
186 | if (dst) | ||
187 | sd->hw.base_addr = dst; | ||
188 | else if (src) | ||
189 | sd->hw.base_addr = src; | ||
190 | sd->hw.base_byte_count = *len; | ||
191 | |||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | static void sudmac_halt(struct shdma_chan *schan) | ||
196 | { | ||
197 | struct sudmac_chan *sc = to_chan(schan); | ||
198 | |||
199 | sudmac_dma_halt(sc); | ||
200 | } | ||
201 | |||
202 | static bool sudmac_chan_irq(struct shdma_chan *schan, int irq) | ||
203 | { | ||
204 | struct sudmac_chan *sc = to_chan(schan); | ||
205 | u32 dintsts = sudmac_readl(sc, SUDMAC_DINTSTS); | ||
206 | |||
207 | if (!(dintsts & sc->dint_end_bit)) | ||
208 | return false; | ||
209 | |||
210 | /* DMA stop */ | ||
211 | sudmac_dma_halt(sc); | ||
212 | |||
213 | return true; | ||
214 | } | ||
215 | |||
216 | static size_t sudmac_get_partial(struct shdma_chan *schan, | ||
217 | struct shdma_desc *sdesc) | ||
218 | { | ||
219 | struct sudmac_chan *sc = to_chan(schan); | ||
220 | struct sudmac_desc *sd = to_desc(sdesc); | ||
221 | u32 current_byte_count = sudmac_readl(sc, SUDMAC_CH0CBC + sc->offset); | ||
222 | |||
223 | return sd->hw.base_byte_count - current_byte_count; | ||
224 | } | ||
225 | |||
226 | static bool sudmac_desc_completed(struct shdma_chan *schan, | ||
227 | struct shdma_desc *sdesc) | ||
228 | { | ||
229 | struct sudmac_chan *sc = to_chan(schan); | ||
230 | struct sudmac_desc *sd = to_desc(sdesc); | ||
231 | u32 current_addr = sudmac_readl(sc, SUDMAC_CH0CA + sc->offset); | ||
232 | |||
233 | return sd->hw.base_addr + sd->hw.base_byte_count == current_addr; | ||
234 | } | ||
235 | |||
236 | static int sudmac_chan_probe(struct sudmac_device *su_dev, int id, int irq, | ||
237 | unsigned long flags) | ||
238 | { | ||
239 | struct shdma_dev *sdev = &su_dev->shdma_dev; | ||
240 | struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev); | ||
241 | struct sudmac_chan *sc; | ||
242 | struct shdma_chan *schan; | ||
243 | int err; | ||
244 | |||
245 | sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL); | ||
246 | if (!sc) { | ||
247 | dev_err(sdev->dma_dev.dev, | ||
248 | "No free memory for allocating dma channels!\n"); | ||
249 | return -ENOMEM; | ||
250 | } | ||
251 | |||
252 | schan = &sc->shdma_chan; | ||
253 | schan->max_xfer_len = 64 * 1024 * 1024 - 1; | ||
254 | |||
255 | shdma_chan_probe(sdev, schan, id); | ||
256 | |||
257 | sc->base = su_dev->chan_reg; | ||
258 | |||
259 | /* get platform_data */ | ||
260 | sc->offset = su_dev->pdata->channel->offset; | ||
261 | if (su_dev->pdata->channel->config & SUDMAC_TX_BUFFER_MODE) | ||
262 | sc->cfg |= SUDMAC_SENDBUFM; | ||
263 | if (su_dev->pdata->channel->config & SUDMAC_RX_END_MODE) | ||
264 | sc->cfg |= SUDMAC_RCVENDM; | ||
265 | sc->cfg |= (su_dev->pdata->channel->wait << 4) & SUDMAC_LBA_WAIT; | ||
266 | |||
267 | if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH0) | ||
268 | sc->dint_end_bit |= SUDMAC_CH0ENDE; | ||
269 | if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH1) | ||
270 | sc->dint_end_bit |= SUDMAC_CH1ENDE; | ||
271 | |||
272 | /* set up channel irq */ | ||
273 | if (pdev->id >= 0) | ||
274 | snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d.%d", | ||
275 | pdev->id, id); | ||
276 | else | ||
277 | snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d", id); | ||
278 | |||
279 | err = shdma_request_irq(schan, irq, flags, sc->dev_id); | ||
280 | if (err) { | ||
281 | dev_err(sdev->dma_dev.dev, | ||
282 | "DMA channel %d request_irq failed %d\n", id, err); | ||
283 | goto err_no_irq; | ||
284 | } | ||
285 | |||
286 | return 0; | ||
287 | |||
288 | err_no_irq: | ||
289 | /* remove from dmaengine device node */ | ||
290 | shdma_chan_remove(schan); | ||
291 | return err; | ||
292 | } | ||
293 | |||
294 | static void sudmac_chan_remove(struct sudmac_device *su_dev) | ||
295 | { | ||
296 | struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev; | ||
297 | struct shdma_chan *schan; | ||
298 | int i; | ||
299 | |||
300 | shdma_for_each_chan(schan, &su_dev->shdma_dev, i) { | ||
301 | struct sudmac_chan *sc = to_chan(schan); | ||
302 | |||
303 | BUG_ON(!schan); | ||
304 | |||
305 | shdma_free_irq(&sc->shdma_chan); | ||
306 | shdma_chan_remove(schan); | ||
307 | } | ||
308 | dma_dev->chancnt = 0; | ||
309 | } | ||
310 | |||
311 | static dma_addr_t sudmac_slave_addr(struct shdma_chan *schan) | ||
312 | { | ||
313 | /* SUDMAC doesn't need the address */ | ||
314 | return 0; | ||
315 | } | ||
316 | |||
317 | static struct shdma_desc *sudmac_embedded_desc(void *buf, int i) | ||
318 | { | ||
319 | return &((struct sudmac_desc *)buf)[i].shdma_desc; | ||
320 | } | ||
321 | |||
322 | static const struct shdma_ops sudmac_shdma_ops = { | ||
323 | .desc_completed = sudmac_desc_completed, | ||
324 | .halt_channel = sudmac_halt, | ||
325 | .channel_busy = sudmac_channel_busy, | ||
326 | .slave_addr = sudmac_slave_addr, | ||
327 | .desc_setup = sudmac_desc_setup, | ||
328 | .set_slave = sudmac_set_slave, | ||
329 | .setup_xfer = sudmac_setup_xfer, | ||
330 | .start_xfer = sudmac_start_xfer, | ||
331 | .embedded_desc = sudmac_embedded_desc, | ||
332 | .chan_irq = sudmac_chan_irq, | ||
333 | .get_partial = sudmac_get_partial, | ||
334 | }; | ||
335 | |||
336 | static int sudmac_probe(struct platform_device *pdev) | ||
337 | { | ||
338 | struct sudmac_pdata *pdata = pdev->dev.platform_data; | ||
339 | int err, i; | ||
340 | struct sudmac_device *su_dev; | ||
341 | struct dma_device *dma_dev; | ||
342 | struct resource *chan, *irq_res; | ||
343 | |||
344 | /* get platform data */ | ||
345 | if (!pdata) | ||
346 | return -ENODEV; | ||
347 | |||
348 | chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
349 | irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
350 | if (!chan || !irq_res) | ||
351 | return -ENODEV; | ||
352 | |||
353 | err = -ENOMEM; | ||
354 | su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device), | ||
355 | GFP_KERNEL); | ||
356 | if (!su_dev) { | ||
357 | dev_err(&pdev->dev, "Not enough memory\n"); | ||
358 | return err; | ||
359 | } | ||
360 | |||
361 | dma_dev = &su_dev->shdma_dev.dma_dev; | ||
362 | |||
363 | su_dev->chan_reg = devm_request_and_ioremap(&pdev->dev, chan); | ||
364 | if (!su_dev->chan_reg) | ||
365 | return err; | ||
366 | |||
367 | dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); | ||
368 | |||
369 | su_dev->shdma_dev.ops = &sudmac_shdma_ops; | ||
370 | su_dev->shdma_dev.desc_size = sizeof(struct sudmac_desc); | ||
371 | err = shdma_init(&pdev->dev, &su_dev->shdma_dev, pdata->channel_num); | ||
372 | if (err < 0) | ||
373 | return err; | ||
374 | |||
375 | /* platform data */ | ||
376 | su_dev->pdata = pdev->dev.platform_data; | ||
377 | |||
378 | platform_set_drvdata(pdev, su_dev); | ||
379 | |||
380 | /* Create DMA Channel */ | ||
381 | for (i = 0; i < pdata->channel_num; i++) { | ||
382 | err = sudmac_chan_probe(su_dev, i, irq_res->start, IRQF_SHARED); | ||
383 | if (err) | ||
384 | goto chan_probe_err; | ||
385 | } | ||
386 | |||
387 | err = dma_async_device_register(&su_dev->shdma_dev.dma_dev); | ||
388 | if (err < 0) | ||
389 | goto chan_probe_err; | ||
390 | |||
391 | return err; | ||
392 | |||
393 | chan_probe_err: | ||
394 | sudmac_chan_remove(su_dev); | ||
395 | |||
396 | platform_set_drvdata(pdev, NULL); | ||
397 | shdma_cleanup(&su_dev->shdma_dev); | ||
398 | |||
399 | return err; | ||
400 | } | ||
401 | |||
402 | static int sudmac_remove(struct platform_device *pdev) | ||
403 | { | ||
404 | struct sudmac_device *su_dev = platform_get_drvdata(pdev); | ||
405 | struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev; | ||
406 | |||
407 | dma_async_device_unregister(dma_dev); | ||
408 | sudmac_chan_remove(su_dev); | ||
409 | shdma_cleanup(&su_dev->shdma_dev); | ||
410 | platform_set_drvdata(pdev, NULL); | ||
411 | |||
412 | return 0; | ||
413 | } | ||
414 | |||
415 | static struct platform_driver sudmac_driver = { | ||
416 | .driver = { | ||
417 | .owner = THIS_MODULE, | ||
418 | .name = SUDMAC_DRV_NAME, | ||
419 | }, | ||
420 | .probe = sudmac_probe, | ||
421 | .remove = sudmac_remove, | ||
422 | }; | ||
423 | module_platform_driver(sudmac_driver); | ||
424 | |||
425 | MODULE_AUTHOR("Yoshihiro Shimoda"); | ||
426 | MODULE_DESCRIPTION("Renesas SUDMAC driver"); | ||
427 | MODULE_LICENSE("GPL v2"); | ||
428 | MODULE_ALIAS("platform:" SUDMAC_DRV_NAME); | ||
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 1d627e2391f4..1765a0a2736d 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/of_address.h> | 16 | #include <linux/of_address.h> |
17 | #include <linux/of_device.h> | 17 | #include <linux/of_device.h> |
18 | #include <linux/of_platform.h> | 18 | #include <linux/of_platform.h> |
19 | #include <linux/clk.h> | ||
19 | #include <linux/sirfsoc_dma.h> | 20 | #include <linux/sirfsoc_dma.h> |
20 | 21 | ||
21 | #include "dmaengine.h" | 22 | #include "dmaengine.h" |
@@ -78,6 +79,7 @@ struct sirfsoc_dma { | |||
78 | struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS]; | 79 | struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS]; |
79 | void __iomem *base; | 80 | void __iomem *base; |
80 | int irq; | 81 | int irq; |
82 | struct clk *clk; | ||
81 | bool is_marco; | 83 | bool is_marco; |
82 | }; | 84 | }; |
83 | 85 | ||
@@ -639,6 +641,12 @@ static int sirfsoc_dma_probe(struct platform_device *op) | |||
639 | return -EINVAL; | 641 | return -EINVAL; |
640 | } | 642 | } |
641 | 643 | ||
644 | sdma->clk = devm_clk_get(dev, NULL); | ||
645 | if (IS_ERR(sdma->clk)) { | ||
646 | dev_err(dev, "failed to get a clock.\n"); | ||
647 | return PTR_ERR(sdma->clk); | ||
648 | } | ||
649 | |||
642 | ret = of_address_to_resource(dn, 0, &res); | 650 | ret = of_address_to_resource(dn, 0, &res); |
643 | if (ret) { | 651 | if (ret) { |
644 | dev_err(dev, "Error parsing memory region!\n"); | 652 | dev_err(dev, "Error parsing memory region!\n"); |
@@ -698,6 +706,8 @@ static int sirfsoc_dma_probe(struct platform_device *op) | |||
698 | 706 | ||
699 | tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma); | 707 | tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma); |
700 | 708 | ||
709 | clk_prepare_enable(sdma->clk); | ||
710 | |||
701 | /* Register DMA engine */ | 711 | /* Register DMA engine */ |
702 | dev_set_drvdata(dev, sdma); | 712 | dev_set_drvdata(dev, sdma); |
703 | ret = dma_async_device_register(dma); | 713 | ret = dma_async_device_register(dma); |
@@ -720,6 +730,7 @@ static int sirfsoc_dma_remove(struct platform_device *op) | |||
720 | struct device *dev = &op->dev; | 730 | struct device *dev = &op->dev; |
721 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | 731 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); |
722 | 732 | ||
733 | clk_disable_unprepare(sdma->clk); | ||
723 | dma_async_device_unregister(&sdma->dma); | 734 | dma_async_device_unregister(&sdma->dma); |
724 | free_irq(sdma->irq, sdma); | 735 | free_irq(sdma->irq, sdma); |
725 | irq_dispose_mapping(sdma->irq); | 736 | irq_dispose_mapping(sdma->irq); |
@@ -742,7 +753,18 @@ static struct platform_driver sirfsoc_dma_driver = { | |||
742 | }, | 753 | }, |
743 | }; | 754 | }; |
744 | 755 | ||
745 | module_platform_driver(sirfsoc_dma_driver); | 756 | static __init int sirfsoc_dma_init(void) |
757 | { | ||
758 | return platform_driver_register(&sirfsoc_dma_driver); | ||
759 | } | ||
760 | |||
761 | static void __exit sirfsoc_dma_exit(void) | ||
762 | { | ||
763 | platform_driver_unregister(&sirfsoc_dma_driver); | ||
764 | } | ||
765 | |||
766 | subsys_initcall(sirfsoc_dma_init); | ||
767 | module_exit(sirfsoc_dma_exit); | ||
746 | 768 | ||
747 | MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, " | 769 | MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, " |
748 | "Barry Song <baohua.song@csr.com>"); | 770 | "Barry Song <baohua.song@csr.com>"); |
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index fcee27eae1f6..ce193409ebd3 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/of.h> | 30 | #include <linux/of.h> |
31 | #include <linux/of_device.h> | 31 | #include <linux/of_device.h> |
32 | #include <linux/platform_device.h> | 32 | #include <linux/platform_device.h> |
33 | #include <linux/pm.h> | ||
33 | #include <linux/pm_runtime.h> | 34 | #include <linux/pm_runtime.h> |
34 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
35 | #include <linux/clk/tegra.h> | 36 | #include <linux/clk/tegra.h> |
@@ -199,6 +200,7 @@ struct tegra_dma_channel { | |||
199 | 200 | ||
200 | /* Channel-slave specific configuration */ | 201 | /* Channel-slave specific configuration */ |
201 | struct dma_slave_config dma_sconfig; | 202 | struct dma_slave_config dma_sconfig; |
203 | struct tegra_dma_channel_regs channel_reg; | ||
202 | }; | 204 | }; |
203 | 205 | ||
204 | /* tegra_dma: Tegra DMA specific information */ | 206 | /* tegra_dma: Tegra DMA specific information */ |
@@ -1213,7 +1215,6 @@ static const struct tegra_dma_chip_data tegra20_dma_chip_data = { | |||
1213 | .support_channel_pause = false, | 1215 | .support_channel_pause = false, |
1214 | }; | 1216 | }; |
1215 | 1217 | ||
1216 | #if defined(CONFIG_OF) | ||
1217 | /* Tegra30 specific DMA controller information */ | 1218 | /* Tegra30 specific DMA controller information */ |
1218 | static const struct tegra_dma_chip_data tegra30_dma_chip_data = { | 1219 | static const struct tegra_dma_chip_data tegra30_dma_chip_data = { |
1219 | .nr_channels = 32, | 1220 | .nr_channels = 32, |
@@ -1243,7 +1244,6 @@ static const struct of_device_id tegra_dma_of_match[] = { | |||
1243 | }, | 1244 | }, |
1244 | }; | 1245 | }; |
1245 | MODULE_DEVICE_TABLE(of, tegra_dma_of_match); | 1246 | MODULE_DEVICE_TABLE(of, tegra_dma_of_match); |
1246 | #endif | ||
1247 | 1247 | ||
1248 | static int tegra_dma_probe(struct platform_device *pdev) | 1248 | static int tegra_dma_probe(struct platform_device *pdev) |
1249 | { | 1249 | { |
@@ -1252,20 +1252,14 @@ static int tegra_dma_probe(struct platform_device *pdev) | |||
1252 | int ret; | 1252 | int ret; |
1253 | int i; | 1253 | int i; |
1254 | const struct tegra_dma_chip_data *cdata = NULL; | 1254 | const struct tegra_dma_chip_data *cdata = NULL; |
1255 | const struct of_device_id *match; | ||
1255 | 1256 | ||
1256 | if (pdev->dev.of_node) { | 1257 | match = of_match_device(tegra_dma_of_match, &pdev->dev); |
1257 | const struct of_device_id *match; | 1258 | if (!match) { |
1258 | match = of_match_device(of_match_ptr(tegra_dma_of_match), | 1259 | dev_err(&pdev->dev, "Error: No device match found\n"); |
1259 | &pdev->dev); | 1260 | return -ENODEV; |
1260 | if (!match) { | ||
1261 | dev_err(&pdev->dev, "Error: No device match found\n"); | ||
1262 | return -ENODEV; | ||
1263 | } | ||
1264 | cdata = match->data; | ||
1265 | } else { | ||
1266 | /* If no device tree then fallback to tegra20 */ | ||
1267 | cdata = &tegra20_dma_chip_data; | ||
1268 | } | 1261 | } |
1262 | cdata = match->data; | ||
1269 | 1263 | ||
1270 | tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels * | 1264 | tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels * |
1271 | sizeof(struct tegra_dma_channel), GFP_KERNEL); | 1265 | sizeof(struct tegra_dma_channel), GFP_KERNEL); |
@@ -1448,11 +1442,74 @@ static int tegra_dma_runtime_resume(struct device *dev) | |||
1448 | return 0; | 1442 | return 0; |
1449 | } | 1443 | } |
1450 | 1444 | ||
1445 | #ifdef CONFIG_PM_SLEEP | ||
1446 | static int tegra_dma_pm_suspend(struct device *dev) | ||
1447 | { | ||
1448 | struct tegra_dma *tdma = dev_get_drvdata(dev); | ||
1449 | int i; | ||
1450 | int ret; | ||
1451 | |||
1452 | /* Enable clock before accessing register */ | ||
1453 | ret = tegra_dma_runtime_resume(dev); | ||
1454 | if (ret < 0) | ||
1455 | return ret; | ||
1456 | |||
1457 | tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL); | ||
1458 | for (i = 0; i < tdma->chip_data->nr_channels; i++) { | ||
1459 | struct tegra_dma_channel *tdc = &tdma->channels[i]; | ||
1460 | struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg; | ||
1461 | |||
1462 | ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR); | ||
1463 | ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR); | ||
1464 | ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR); | ||
1465 | ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ); | ||
1466 | ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ); | ||
1467 | } | ||
1468 | |||
1469 | /* Disable clock */ | ||
1470 | tegra_dma_runtime_suspend(dev); | ||
1471 | return 0; | ||
1472 | } | ||
1473 | |||
1474 | static int tegra_dma_pm_resume(struct device *dev) | ||
1475 | { | ||
1476 | struct tegra_dma *tdma = dev_get_drvdata(dev); | ||
1477 | int i; | ||
1478 | int ret; | ||
1479 | |||
1480 | /* Enable clock before accessing register */ | ||
1481 | ret = tegra_dma_runtime_resume(dev); | ||
1482 | if (ret < 0) | ||
1483 | return ret; | ||
1484 | |||
1485 | tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen); | ||
1486 | tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0); | ||
1487 | tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul); | ||
1488 | |||
1489 | for (i = 0; i < tdma->chip_data->nr_channels; i++) { | ||
1490 | struct tegra_dma_channel *tdc = &tdma->channels[i]; | ||
1491 | struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg; | ||
1492 | |||
1493 | tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq); | ||
1494 | tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr); | ||
1495 | tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq); | ||
1496 | tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_reg->ahb_ptr); | ||
1497 | tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, | ||
1498 | (ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB)); | ||
1499 | } | ||
1500 | |||
1501 | /* Disable clock */ | ||
1502 | tegra_dma_runtime_suspend(dev); | ||
1503 | return 0; | ||
1504 | } | ||
1505 | #endif | ||
1506 | |||
1451 | static const struct dev_pm_ops tegra_dma_dev_pm_ops = { | 1507 | static const struct dev_pm_ops tegra_dma_dev_pm_ops = { |
1452 | #ifdef CONFIG_PM_RUNTIME | 1508 | #ifdef CONFIG_PM_RUNTIME |
1453 | .runtime_suspend = tegra_dma_runtime_suspend, | 1509 | .runtime_suspend = tegra_dma_runtime_suspend, |
1454 | .runtime_resume = tegra_dma_runtime_resume, | 1510 | .runtime_resume = tegra_dma_runtime_resume, |
1455 | #endif | 1511 | #endif |
1512 | SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume) | ||
1456 | }; | 1513 | }; |
1457 | 1514 | ||
1458 | static struct platform_driver tegra_dmac_driver = { | 1515 | static struct platform_driver tegra_dmac_driver = { |
@@ -1460,7 +1517,7 @@ static struct platform_driver tegra_dmac_driver = { | |||
1460 | .name = "tegra-apbdma", | 1517 | .name = "tegra-apbdma", |
1461 | .owner = THIS_MODULE, | 1518 | .owner = THIS_MODULE, |
1462 | .pm = &tegra_dma_dev_pm_ops, | 1519 | .pm = &tegra_dma_dev_pm_ops, |
1463 | .of_match_table = of_match_ptr(tegra_dma_of_match), | 1520 | .of_match_table = tegra_dma_of_match, |
1464 | }, | 1521 | }, |
1465 | .probe = tegra_dma_probe, | 1522 | .probe = tegra_dma_probe, |
1466 | .remove = tegra_dma_remove, | 1523 | .remove = tegra_dma_remove, |
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 952f823901a6..26107ba6edb3 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c | |||
@@ -823,7 +823,7 @@ static struct platform_driver td_driver = { | |||
823 | .owner = THIS_MODULE, | 823 | .owner = THIS_MODULE, |
824 | }, | 824 | }, |
825 | .probe = td_probe, | 825 | .probe = td_probe, |
826 | .remove = __exit_p(td_remove), | 826 | .remove = td_remove, |
827 | }; | 827 | }; |
828 | 828 | ||
829 | module_platform_driver(td_driver); | 829 | module_platform_driver(td_driver); |
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index 913f55c76c99..a59fb4841d4c 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c | |||
@@ -1190,7 +1190,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev) | |||
1190 | return 0; | 1190 | return 0; |
1191 | } | 1191 | } |
1192 | 1192 | ||
1193 | static int __exit txx9dmac_chan_remove(struct platform_device *pdev) | 1193 | static int txx9dmac_chan_remove(struct platform_device *pdev) |
1194 | { | 1194 | { |
1195 | struct txx9dmac_chan *dc = platform_get_drvdata(pdev); | 1195 | struct txx9dmac_chan *dc = platform_get_drvdata(pdev); |
1196 | 1196 | ||
@@ -1252,7 +1252,7 @@ static int __init txx9dmac_probe(struct platform_device *pdev) | |||
1252 | return 0; | 1252 | return 0; |
1253 | } | 1253 | } |
1254 | 1254 | ||
1255 | static int __exit txx9dmac_remove(struct platform_device *pdev) | 1255 | static int txx9dmac_remove(struct platform_device *pdev) |
1256 | { | 1256 | { |
1257 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); | 1257 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); |
1258 | 1258 | ||
@@ -1299,14 +1299,14 @@ static const struct dev_pm_ops txx9dmac_dev_pm_ops = { | |||
1299 | }; | 1299 | }; |
1300 | 1300 | ||
1301 | static struct platform_driver txx9dmac_chan_driver = { | 1301 | static struct platform_driver txx9dmac_chan_driver = { |
1302 | .remove = __exit_p(txx9dmac_chan_remove), | 1302 | .remove = txx9dmac_chan_remove, |
1303 | .driver = { | 1303 | .driver = { |
1304 | .name = "txx9dmac-chan", | 1304 | .name = "txx9dmac-chan", |
1305 | }, | 1305 | }, |
1306 | }; | 1306 | }; |
1307 | 1307 | ||
1308 | static struct platform_driver txx9dmac_driver = { | 1308 | static struct platform_driver txx9dmac_driver = { |
1309 | .remove = __exit_p(txx9dmac_remove), | 1309 | .remove = txx9dmac_remove, |
1310 | .shutdown = txx9dmac_shutdown, | 1310 | .shutdown = txx9dmac_shutdown, |
1311 | .driver = { | 1311 | .driver = { |
1312 | .name = "txx9dmac", | 1312 | .name = "txx9dmac", |
diff --git a/include/linux/acpi_dma.h b/include/linux/acpi_dma.h new file mode 100644 index 000000000000..d09deabc7bf6 --- /dev/null +++ b/include/linux/acpi_dma.h | |||
@@ -0,0 +1,116 @@ | |||
1 | /* | ||
2 | * ACPI helpers for DMA request / controller | ||
3 | * | ||
4 | * Based on of_dma.h | ||
5 | * | ||
6 | * Copyright (C) 2013, Intel Corporation | ||
7 | * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #ifndef __LINUX_ACPI_DMA_H | ||
15 | #define __LINUX_ACPI_DMA_H | ||
16 | |||
17 | #include <linux/list.h> | ||
18 | #include <linux/device.h> | ||
19 | #include <linux/dmaengine.h> | ||
20 | |||
21 | /** | ||
22 | * struct acpi_dma_spec - slave device DMA resources | ||
23 | * @chan_id: channel unique id | ||
24 | * @slave_id: request line unique id | ||
25 | * @dev: struct device of the DMA controller to be used in the filter | ||
26 | * function | ||
27 | */ | ||
28 | struct acpi_dma_spec { | ||
29 | int chan_id; | ||
30 | int slave_id; | ||
31 | struct device *dev; | ||
32 | }; | ||
33 | |||
34 | /** | ||
35 | * struct acpi_dma - representation of the registered DMAC | ||
36 | * @dma_controllers: linked list node | ||
37 | * @dev: struct device of this controller | ||
38 | * @acpi_dma_xlate: callback function to find a suitable channel | ||
39 | * @data: private data used by a callback function | ||
40 | */ | ||
41 | struct acpi_dma { | ||
42 | struct list_head dma_controllers; | ||
43 | struct device *dev; | ||
44 | struct dma_chan *(*acpi_dma_xlate) | ||
45 | (struct acpi_dma_spec *, struct acpi_dma *); | ||
46 | void *data; | ||
47 | }; | ||
48 | |||
49 | /* Used with acpi_dma_simple_xlate() */ | ||
50 | struct acpi_dma_filter_info { | ||
51 | dma_cap_mask_t dma_cap; | ||
52 | dma_filter_fn filter_fn; | ||
53 | }; | ||
54 | |||
55 | #ifdef CONFIG_DMA_ACPI | ||
56 | |||
57 | int acpi_dma_controller_register(struct device *dev, | ||
58 | struct dma_chan *(*acpi_dma_xlate) | ||
59 | (struct acpi_dma_spec *, struct acpi_dma *), | ||
60 | void *data); | ||
61 | int acpi_dma_controller_free(struct device *dev); | ||
62 | int devm_acpi_dma_controller_register(struct device *dev, | ||
63 | struct dma_chan *(*acpi_dma_xlate) | ||
64 | (struct acpi_dma_spec *, struct acpi_dma *), | ||
65 | void *data); | ||
66 | void devm_acpi_dma_controller_free(struct device *dev); | ||
67 | |||
68 | struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, | ||
69 | size_t index); | ||
70 | struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev, | ||
71 | const char *name); | ||
72 | |||
73 | struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec, | ||
74 | struct acpi_dma *adma); | ||
75 | #else | ||
76 | |||
77 | static inline int acpi_dma_controller_register(struct device *dev, | ||
78 | struct dma_chan *(*acpi_dma_xlate) | ||
79 | (struct acpi_dma_spec *, struct acpi_dma *), | ||
80 | void *data) | ||
81 | { | ||
82 | return -ENODEV; | ||
83 | } | ||
84 | static inline int acpi_dma_controller_free(struct device *dev) | ||
85 | { | ||
86 | return -ENODEV; | ||
87 | } | ||
88 | static inline int devm_acpi_dma_controller_register(struct device *dev, | ||
89 | struct dma_chan *(*acpi_dma_xlate) | ||
90 | (struct acpi_dma_spec *, struct acpi_dma *), | ||
91 | void *data) | ||
92 | { | ||
93 | return -ENODEV; | ||
94 | } | ||
95 | static inline void devm_acpi_dma_controller_free(struct device *dev) | ||
96 | { | ||
97 | } | ||
98 | |||
99 | static inline struct dma_chan *acpi_dma_request_slave_chan_by_index( | ||
100 | struct device *dev, size_t index) | ||
101 | { | ||
102 | return NULL; | ||
103 | } | ||
104 | static inline struct dma_chan *acpi_dma_request_slave_chan_by_name( | ||
105 | struct device *dev, const char *name) | ||
106 | { | ||
107 | return NULL; | ||
108 | } | ||
109 | |||
110 | #define acpi_dma_simple_xlate NULL | ||
111 | |||
112 | #endif | ||
113 | |||
114 | #define acpi_dma_request_slave_channel acpi_dma_request_slave_chan_by_index | ||
115 | |||
116 | #endif /* __LINUX_ACPI_DMA_H */ | ||
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 91ac8da25020..96d3e4ab11a9 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -967,8 +967,9 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); | |||
967 | #ifdef CONFIG_DMA_ENGINE | 967 | #ifdef CONFIG_DMA_ENGINE |
968 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); | 968 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); |
969 | void dma_issue_pending_all(void); | 969 | void dma_issue_pending_all(void); |
970 | struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); | 970 | struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, |
971 | struct dma_chan *dma_request_slave_channel(struct device *dev, char *name); | 971 | dma_filter_fn fn, void *fn_param); |
972 | struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); | ||
972 | void dma_release_channel(struct dma_chan *chan); | 973 | void dma_release_channel(struct dma_chan *chan); |
973 | #else | 974 | #else |
974 | static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | 975 | static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) |
@@ -978,13 +979,13 @@ static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descript | |||
978 | static inline void dma_issue_pending_all(void) | 979 | static inline void dma_issue_pending_all(void) |
979 | { | 980 | { |
980 | } | 981 | } |
981 | static inline struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, | 982 | static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, |
982 | dma_filter_fn fn, void *fn_param) | 983 | dma_filter_fn fn, void *fn_param) |
983 | { | 984 | { |
984 | return NULL; | 985 | return NULL; |
985 | } | 986 | } |
986 | static inline struct dma_chan *dma_request_slave_channel(struct device *dev, | 987 | static inline struct dma_chan *dma_request_slave_channel(struct device *dev, |
987 | char *name) | 988 | const char *name) |
988 | { | 989 | { |
989 | return NULL; | 990 | return NULL; |
990 | } | 991 | } |
@@ -1005,9 +1006,9 @@ struct dma_chan *net_dma_find_channel(void); | |||
1005 | __dma_request_slave_channel_compat(&(mask), x, y, dev, name) | 1006 | __dma_request_slave_channel_compat(&(mask), x, y, dev, name) |
1006 | 1007 | ||
1007 | static inline struct dma_chan | 1008 | static inline struct dma_chan |
1008 | *__dma_request_slave_channel_compat(dma_cap_mask_t *mask, dma_filter_fn fn, | 1009 | *__dma_request_slave_channel_compat(const dma_cap_mask_t *mask, |
1009 | void *fn_param, struct device *dev, | 1010 | dma_filter_fn fn, void *fn_param, |
1010 | char *name) | 1011 | struct device *dev, char *name) |
1011 | { | 1012 | { |
1012 | struct dma_chan *chan; | 1013 | struct dma_chan *chan; |
1013 | 1014 | ||
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h index d15073e080dd..364dda734877 100644 --- a/include/linux/of_dma.h +++ b/include/linux/of_dma.h | |||
@@ -25,7 +25,6 @@ struct of_dma { | |||
25 | struct dma_chan *(*of_dma_xlate) | 25 | struct dma_chan *(*of_dma_xlate) |
26 | (struct of_phandle_args *, struct of_dma *); | 26 | (struct of_phandle_args *, struct of_dma *); |
27 | void *of_dma_data; | 27 | void *of_dma_data; |
28 | int use_count; | ||
29 | }; | 28 | }; |
30 | 29 | ||
31 | struct of_dma_filter_info { | 30 | struct of_dma_filter_info { |
@@ -38,9 +37,9 @@ extern int of_dma_controller_register(struct device_node *np, | |||
38 | struct dma_chan *(*of_dma_xlate) | 37 | struct dma_chan *(*of_dma_xlate) |
39 | (struct of_phandle_args *, struct of_dma *), | 38 | (struct of_phandle_args *, struct of_dma *), |
40 | void *data); | 39 | void *data); |
41 | extern int of_dma_controller_free(struct device_node *np); | 40 | extern void of_dma_controller_free(struct device_node *np); |
42 | extern struct dma_chan *of_dma_request_slave_channel(struct device_node *np, | 41 | extern struct dma_chan *of_dma_request_slave_channel(struct device_node *np, |
43 | char *name); | 42 | const char *name); |
44 | extern struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, | 43 | extern struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, |
45 | struct of_dma *ofdma); | 44 | struct of_dma *ofdma); |
46 | #else | 45 | #else |
@@ -52,13 +51,12 @@ static inline int of_dma_controller_register(struct device_node *np, | |||
52 | return -ENODEV; | 51 | return -ENODEV; |
53 | } | 52 | } |
54 | 53 | ||
55 | static inline int of_dma_controller_free(struct device_node *np) | 54 | static inline void of_dma_controller_free(struct device_node *np) |
56 | { | 55 | { |
57 | return -ENODEV; | ||
58 | } | 56 | } |
59 | 57 | ||
60 | static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np, | 58 | static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np, |
61 | char *name) | 59 | const char *name) |
62 | { | 60 | { |
63 | return NULL; | 61 | return NULL; |
64 | } | 62 | } |
diff --git a/include/linux/sudmac.h b/include/linux/sudmac.h new file mode 100644 index 000000000000..377b8a5788fa --- /dev/null +++ b/include/linux/sudmac.h | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * Header for the SUDMAC driver | ||
3 | * | ||
4 | * Copyright (C) 2013 Renesas Solutions Corp. | ||
5 | * | ||
6 | * This is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of version 2 of the GNU General Public License as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #ifndef SUDMAC_H | ||
11 | #define SUDMAC_H | ||
12 | |||
13 | #include <linux/dmaengine.h> | ||
14 | #include <linux/shdma-base.h> | ||
15 | #include <linux/types.h> | ||
16 | |||
17 | /* Used by slave DMA clients to request DMA to/from a specific peripheral */ | ||
18 | struct sudmac_slave { | ||
19 | struct shdma_slave shdma_slave; /* Set by the platform */ | ||
20 | }; | ||
21 | |||
22 | /* | ||
23 | * Supplied by platforms to specify, how a DMA channel has to be configured for | ||
24 | * a certain peripheral | ||
25 | */ | ||
26 | struct sudmac_slave_config { | ||
27 | int slave_id; | ||
28 | }; | ||
29 | |||
30 | struct sudmac_channel { | ||
31 | unsigned long offset; | ||
32 | unsigned long config; | ||
33 | unsigned long wait; /* The configuable range is 0 to 3 */ | ||
34 | unsigned long dint_end_bit; | ||
35 | }; | ||
36 | |||
37 | struct sudmac_pdata { | ||
38 | const struct sudmac_slave_config *slave; | ||
39 | int slave_num; | ||
40 | const struct sudmac_channel *channel; | ||
41 | int channel_num; | ||
42 | }; | ||
43 | |||
44 | /* Definitions for the sudmac_channel.config */ | ||
45 | #define SUDMAC_TX_BUFFER_MODE BIT(0) | ||
46 | #define SUDMAC_RX_END_MODE BIT(1) | ||
47 | |||
48 | /* Definitions for the sudmac_channel.dint_end_bit */ | ||
49 | #define SUDMAC_DMA_BIT_CH0 BIT(0) | ||
50 | #define SUDMAC_DMA_BIT_CH1 BIT(1) | ||
51 | |||
52 | #endif | ||