diff options
73 files changed, 1872 insertions, 530 deletions
diff --git a/Documentation/devicetree/bindings/dma/atmel-dma.txt b/Documentation/devicetree/bindings/dma/atmel-dma.txt new file mode 100644 index 000000000000..3c046ee6e8b5 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/atmel-dma.txt | |||
@@ -0,0 +1,14 @@ | |||
1 | * Atmel Direct Memory Access Controller (DMA) | ||
2 | |||
3 | Required properties: | ||
4 | - compatible: Should be "atmel,<chip>-dma" | ||
5 | - reg: Should contain DMA registers location and length | ||
6 | - interrupts: Should contain DMA interrupt | ||
7 | |||
8 | Examples: | ||
9 | |||
10 | dma@ffffec00 { | ||
11 | compatible = "atmel,at91sam9g45-dma"; | ||
12 | reg = <0xffffec00 0x200>; | ||
13 | interrupts = <21>; | ||
14 | }; | ||
diff --git a/Documentation/dmaengine.txt b/Documentation/dmaengine.txt index 94b7e0f96b38..bbe6cb3d1856 100644 --- a/Documentation/dmaengine.txt +++ b/Documentation/dmaengine.txt | |||
@@ -75,6 +75,10 @@ The slave DMA usage consists of following steps: | |||
75 | slave_sg - DMA a list of scatter gather buffers from/to a peripheral | 75 | slave_sg - DMA a list of scatter gather buffers from/to a peripheral |
76 | dma_cyclic - Perform a cyclic DMA operation from/to a peripheral till the | 76 | dma_cyclic - Perform a cyclic DMA operation from/to a peripheral till the |
77 | operation is explicitly stopped. | 77 | operation is explicitly stopped. |
78 | interleaved_dma - This is common to Slave as well as M2M clients. For slave | ||
79 | address of devices' fifo could be already known to the driver. | ||
80 | Various types of operations could be expressed by setting | ||
81 | appropriate values to the 'dma_interleaved_template' members. | ||
78 | 82 | ||
79 | A non-NULL return of this transfer API represents a "descriptor" for | 83 | A non-NULL return of this transfer API represents a "descriptor" for |
80 | the given transaction. | 84 | the given transaction. |
@@ -89,6 +93,10 @@ The slave DMA usage consists of following steps: | |||
89 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | 93 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
90 | size_t period_len, enum dma_data_direction direction); | 94 | size_t period_len, enum dma_data_direction direction); |
91 | 95 | ||
96 | struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( | ||
97 | struct dma_chan *chan, struct dma_interleaved_template *xt, | ||
98 | unsigned long flags); | ||
99 | |||
92 | The peripheral driver is expected to have mapped the scatterlist for | 100 | The peripheral driver is expected to have mapped the scatterlist for |
93 | the DMA operation prior to calling device_prep_slave_sg, and must | 101 | the DMA operation prior to calling device_prep_slave_sg, and must |
94 | keep the scatterlist mapped until the DMA operation has completed. | 102 | keep the scatterlist mapped until the DMA operation has completed. |
diff --git a/MAINTAINERS b/MAINTAINERS index ece8935025e3..341dee3b02c6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -745,6 +745,7 @@ M: Barry Song <baohua.song@csr.com> | |||
745 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 745 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
746 | S: Maintained | 746 | S: Maintained |
747 | F: arch/arm/mach-prima2/ | 747 | F: arch/arm/mach-prima2/ |
748 | F: drivers/dma/sirf-dma* | ||
748 | 749 | ||
749 | ARM/EBSA110 MACHINE SUPPORT | 750 | ARM/EBSA110 MACHINE SUPPORT |
750 | M: Russell King <linux@arm.linux.org.uk> | 751 | M: Russell King <linux@arm.linux.org.uk> |
diff --git a/arch/arm/mach-ep93xx/include/mach/dma.h b/arch/arm/mach-ep93xx/include/mach/dma.h index 46d4d876e6fb..e82c642fa53c 100644 --- a/arch/arm/mach-ep93xx/include/mach/dma.h +++ b/arch/arm/mach-ep93xx/include/mach/dma.h | |||
@@ -37,7 +37,7 @@ | |||
37 | */ | 37 | */ |
38 | struct ep93xx_dma_data { | 38 | struct ep93xx_dma_data { |
39 | int port; | 39 | int port; |
40 | enum dma_data_direction direction; | 40 | enum dma_transfer_direction direction; |
41 | const char *name; | 41 | const char *name; |
42 | }; | 42 | }; |
43 | 43 | ||
@@ -80,14 +80,14 @@ static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan) | |||
80 | * channel supports given DMA direction. Only M2P channels have such | 80 | * channel supports given DMA direction. Only M2P channels have such |
81 | * limitation, for M2M channels the direction is configurable. | 81 | * limitation, for M2M channels the direction is configurable. |
82 | */ | 82 | */ |
83 | static inline enum dma_data_direction | 83 | static inline enum dma_transfer_direction |
84 | ep93xx_dma_chan_direction(struct dma_chan *chan) | 84 | ep93xx_dma_chan_direction(struct dma_chan *chan) |
85 | { | 85 | { |
86 | if (!ep93xx_dma_chan_is_m2p(chan)) | 86 | if (!ep93xx_dma_chan_is_m2p(chan)) |
87 | return DMA_NONE; | 87 | return DMA_NONE; |
88 | 88 | ||
89 | /* even channels are for TX, odd for RX */ | 89 | /* even channels are for TX, odd for RX */ |
90 | return (chan->chan_id % 2 == 0) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; | 90 | return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; |
91 | } | 91 | } |
92 | 92 | ||
93 | #endif /* __ASM_ARCH_DMA_H */ | 93 | #endif /* __ASM_ARCH_DMA_H */ |
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c index 1ea89be63e29..6fcf304d3cdf 100644 --- a/arch/arm/mach-shmobile/setup-sh7372.c +++ b/arch/arm/mach-shmobile/setup-sh7372.c | |||
@@ -445,31 +445,39 @@ static const struct sh_dmae_slave_config sh7372_dmae_slaves[] = { | |||
445 | }, | 445 | }, |
446 | }; | 446 | }; |
447 | 447 | ||
448 | #define SH7372_CHCLR 0x220 | ||
449 | |||
448 | static const struct sh_dmae_channel sh7372_dmae_channels[] = { | 450 | static const struct sh_dmae_channel sh7372_dmae_channels[] = { |
449 | { | 451 | { |
450 | .offset = 0, | 452 | .offset = 0, |
451 | .dmars = 0, | 453 | .dmars = 0, |
452 | .dmars_bit = 0, | 454 | .dmars_bit = 0, |
455 | .chclr_offset = SH7372_CHCLR + 0, | ||
453 | }, { | 456 | }, { |
454 | .offset = 0x10, | 457 | .offset = 0x10, |
455 | .dmars = 0, | 458 | .dmars = 0, |
456 | .dmars_bit = 8, | 459 | .dmars_bit = 8, |
460 | .chclr_offset = SH7372_CHCLR + 0x10, | ||
457 | }, { | 461 | }, { |
458 | .offset = 0x20, | 462 | .offset = 0x20, |
459 | .dmars = 4, | 463 | .dmars = 4, |
460 | .dmars_bit = 0, | 464 | .dmars_bit = 0, |
465 | .chclr_offset = SH7372_CHCLR + 0x20, | ||
461 | }, { | 466 | }, { |
462 | .offset = 0x30, | 467 | .offset = 0x30, |
463 | .dmars = 4, | 468 | .dmars = 4, |
464 | .dmars_bit = 8, | 469 | .dmars_bit = 8, |
470 | .chclr_offset = SH7372_CHCLR + 0x30, | ||
465 | }, { | 471 | }, { |
466 | .offset = 0x50, | 472 | .offset = 0x50, |
467 | .dmars = 8, | 473 | .dmars = 8, |
468 | .dmars_bit = 0, | 474 | .dmars_bit = 0, |
475 | .chclr_offset = SH7372_CHCLR + 0x50, | ||
469 | }, { | 476 | }, { |
470 | .offset = 0x60, | 477 | .offset = 0x60, |
471 | .dmars = 8, | 478 | .dmars = 8, |
472 | .dmars_bit = 8, | 479 | .dmars_bit = 8, |
480 | .chclr_offset = SH7372_CHCLR + 0x60, | ||
473 | } | 481 | } |
474 | }; | 482 | }; |
475 | 483 | ||
@@ -487,6 +495,7 @@ static struct sh_dmae_pdata dma_platform_data = { | |||
487 | .ts_shift = ts_shift, | 495 | .ts_shift = ts_shift, |
488 | .ts_shift_num = ARRAY_SIZE(ts_shift), | 496 | .ts_shift_num = ARRAY_SIZE(ts_shift), |
489 | .dmaor_init = DMAOR_DME, | 497 | .dmaor_init = DMAOR_DME, |
498 | .chclr_present = 1, | ||
490 | }; | 499 | }; |
491 | 500 | ||
492 | /* Resource order important! */ | 501 | /* Resource order important! */ |
@@ -494,7 +503,7 @@ static struct resource sh7372_dmae0_resources[] = { | |||
494 | { | 503 | { |
495 | /* Channel registers and DMAOR */ | 504 | /* Channel registers and DMAOR */ |
496 | .start = 0xfe008020, | 505 | .start = 0xfe008020, |
497 | .end = 0xfe00808f, | 506 | .end = 0xfe00828f, |
498 | .flags = IORESOURCE_MEM, | 507 | .flags = IORESOURCE_MEM, |
499 | }, | 508 | }, |
500 | { | 509 | { |
@@ -522,7 +531,7 @@ static struct resource sh7372_dmae1_resources[] = { | |||
522 | { | 531 | { |
523 | /* Channel registers and DMAOR */ | 532 | /* Channel registers and DMAOR */ |
524 | .start = 0xfe018020, | 533 | .start = 0xfe018020, |
525 | .end = 0xfe01808f, | 534 | .end = 0xfe01828f, |
526 | .flags = IORESOURCE_MEM, | 535 | .flags = IORESOURCE_MEM, |
527 | }, | 536 | }, |
528 | { | 537 | { |
@@ -550,7 +559,7 @@ static struct resource sh7372_dmae2_resources[] = { | |||
550 | { | 559 | { |
551 | /* Channel registers and DMAOR */ | 560 | /* Channel registers and DMAOR */ |
552 | .start = 0xfe028020, | 561 | .start = 0xfe028020, |
553 | .end = 0xfe02808f, | 562 | .end = 0xfe02828f, |
554 | .flags = IORESOURCE_MEM, | 563 | .flags = IORESOURCE_MEM, |
555 | }, | 564 | }, |
556 | { | 565 | { |
diff --git a/arch/arm/plat-mxc/include/mach/mx3fb.h b/arch/arm/plat-mxc/include/mach/mx3fb.h index ac24c5c4bc83..fdbe60001542 100644 --- a/arch/arm/plat-mxc/include/mach/mx3fb.h +++ b/arch/arm/plat-mxc/include/mach/mx3fb.h | |||
@@ -22,6 +22,20 @@ | |||
22 | #define FB_SYNC_SWAP_RGB 0x04000000 | 22 | #define FB_SYNC_SWAP_RGB 0x04000000 |
23 | #define FB_SYNC_CLK_SEL_EN 0x02000000 | 23 | #define FB_SYNC_CLK_SEL_EN 0x02000000 |
24 | 24 | ||
25 | /* | ||
26 | * Specify the way your display is connected. The IPU can arbitrarily | ||
27 | * map the internal colors to the external data lines. We only support | ||
28 | * the following mappings at the moment. | ||
29 | */ | ||
30 | enum disp_data_mapping { | ||
31 | /* blue -> d[0..5], green -> d[6..11], red -> d[12..17] */ | ||
32 | IPU_DISP_DATA_MAPPING_RGB666, | ||
33 | /* blue -> d[0..4], green -> d[5..10], red -> d[11..15] */ | ||
34 | IPU_DISP_DATA_MAPPING_RGB565, | ||
35 | /* blue -> d[0..7], green -> d[8..15], red -> d[16..23] */ | ||
36 | IPU_DISP_DATA_MAPPING_RGB888, | ||
37 | }; | ||
38 | |||
25 | /** | 39 | /** |
26 | * struct mx3fb_platform_data - mx3fb platform data | 40 | * struct mx3fb_platform_data - mx3fb platform data |
27 | * | 41 | * |
@@ -33,6 +47,7 @@ struct mx3fb_platform_data { | |||
33 | const char *name; | 47 | const char *name; |
34 | const struct fb_videomode *mode; | 48 | const struct fb_videomode *mode; |
35 | int num_modes; | 49 | int num_modes; |
50 | enum disp_data_mapping disp_data_fmt; | ||
36 | }; | 51 | }; |
37 | 52 | ||
38 | #endif | 53 | #endif |
diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h index 685c78716d95..fd0ee84c45d1 100644 --- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h +++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h | |||
@@ -113,7 +113,8 @@ struct stedma40_half_channel_info { | |||
113 | * @dst_dev_type: Dst device type | 113 | * @dst_dev_type: Dst device type |
114 | * @src_info: Parameters for dst half channel | 114 | * @src_info: Parameters for dst half channel |
115 | * @dst_info: Parameters for dst half channel | 115 | * @dst_info: Parameters for dst half channel |
116 | * | 116 | * @use_fixed_channel: if true, use physical channel specified by phy_channel |
117 | * @phy_channel: physical channel to use, only if use_fixed_channel is true | ||
117 | * | 118 | * |
118 | * This structure has to be filled by the client drivers. | 119 | * This structure has to be filled by the client drivers. |
119 | * It is recommended to do all dma configurations for clients in the machine. | 120 | * It is recommended to do all dma configurations for clients in the machine. |
@@ -129,6 +130,9 @@ struct stedma40_chan_cfg { | |||
129 | int dst_dev_type; | 130 | int dst_dev_type; |
130 | struct stedma40_half_channel_info src_info; | 131 | struct stedma40_half_channel_info src_info; |
131 | struct stedma40_half_channel_info dst_info; | 132 | struct stedma40_half_channel_info dst_info; |
133 | |||
134 | bool use_fixed_channel; | ||
135 | int phy_channel; | ||
132 | }; | 136 | }; |
133 | 137 | ||
134 | /** | 138 | /** |
@@ -153,6 +157,7 @@ struct stedma40_platform_data { | |||
153 | struct stedma40_chan_cfg *memcpy_conf_phy; | 157 | struct stedma40_chan_cfg *memcpy_conf_phy; |
154 | struct stedma40_chan_cfg *memcpy_conf_log; | 158 | struct stedma40_chan_cfg *memcpy_conf_log; |
155 | int disabled_channels[STEDMA40_MAX_PHYS]; | 159 | int disabled_channels[STEDMA40_MAX_PHYS]; |
160 | bool use_esram_lcla; | ||
156 | }; | 161 | }; |
157 | 162 | ||
158 | #ifdef CONFIG_STE_DMA40 | 163 | #ifdef CONFIG_STE_DMA40 |
@@ -187,7 +192,7 @@ static inline struct | |||
187 | dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan, | 192 | dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan, |
188 | dma_addr_t addr, | 193 | dma_addr_t addr, |
189 | unsigned int size, | 194 | unsigned int size, |
190 | enum dma_data_direction direction, | 195 | enum dma_transfer_direction direction, |
191 | unsigned long flags) | 196 | unsigned long flags) |
192 | { | 197 | { |
193 | struct scatterlist sg; | 198 | struct scatterlist sg; |
@@ -209,7 +214,7 @@ static inline struct | |||
209 | dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan, | 214 | dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan, |
210 | dma_addr_t addr, | 215 | dma_addr_t addr, |
211 | unsigned int size, | 216 | unsigned int size, |
212 | enum dma_data_direction direction, | 217 | enum dma_transfer_direction direction, |
213 | unsigned long flags) | 218 | unsigned long flags) |
214 | { | 219 | { |
215 | return NULL; | 220 | return NULL; |
diff --git a/arch/arm/plat-samsung/dma-ops.c b/arch/arm/plat-samsung/dma-ops.c index 2cded872f22b..0747c77a2fd5 100644 --- a/arch/arm/plat-samsung/dma-ops.c +++ b/arch/arm/plat-samsung/dma-ops.c | |||
@@ -37,14 +37,14 @@ static unsigned samsung_dmadev_request(enum dma_ch dma_ch, | |||
37 | (void *)dma_ch; | 37 | (void *)dma_ch; |
38 | chan = dma_request_channel(mask, pl330_filter, filter_param); | 38 | chan = dma_request_channel(mask, pl330_filter, filter_param); |
39 | 39 | ||
40 | if (info->direction == DMA_FROM_DEVICE) { | 40 | if (info->direction == DMA_DEV_TO_MEM) { |
41 | memset(&slave_config, 0, sizeof(struct dma_slave_config)); | 41 | memset(&slave_config, 0, sizeof(struct dma_slave_config)); |
42 | slave_config.direction = info->direction; | 42 | slave_config.direction = info->direction; |
43 | slave_config.src_addr = info->fifo; | 43 | slave_config.src_addr = info->fifo; |
44 | slave_config.src_addr_width = info->width; | 44 | slave_config.src_addr_width = info->width; |
45 | slave_config.src_maxburst = 1; | 45 | slave_config.src_maxburst = 1; |
46 | dmaengine_slave_config(chan, &slave_config); | 46 | dmaengine_slave_config(chan, &slave_config); |
47 | } else if (info->direction == DMA_TO_DEVICE) { | 47 | } else if (info->direction == DMA_MEM_TO_DEV) { |
48 | memset(&slave_config, 0, sizeof(struct dma_slave_config)); | 48 | memset(&slave_config, 0, sizeof(struct dma_slave_config)); |
49 | slave_config.direction = info->direction; | 49 | slave_config.direction = info->direction; |
50 | slave_config.dst_addr = info->fifo; | 50 | slave_config.dst_addr = info->fifo; |
diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h index 22eafc310bd7..70b6325edb99 100644 --- a/arch/arm/plat-samsung/include/plat/dma-ops.h +++ b/arch/arm/plat-samsung/include/plat/dma-ops.h | |||
@@ -17,7 +17,7 @@ | |||
17 | 17 | ||
18 | struct samsung_dma_prep_info { | 18 | struct samsung_dma_prep_info { |
19 | enum dma_transaction_type cap; | 19 | enum dma_transaction_type cap; |
20 | enum dma_data_direction direction; | 20 | enum dma_transfer_direction direction; |
21 | dma_addr_t buf; | 21 | dma_addr_t buf; |
22 | unsigned long period; | 22 | unsigned long period; |
23 | unsigned long len; | 23 | unsigned long len; |
@@ -27,7 +27,7 @@ struct samsung_dma_prep_info { | |||
27 | 27 | ||
28 | struct samsung_dma_info { | 28 | struct samsung_dma_info { |
29 | enum dma_transaction_type cap; | 29 | enum dma_transaction_type cap; |
30 | enum dma_data_direction direction; | 30 | enum dma_transfer_direction direction; |
31 | enum dma_slave_buswidth width; | 31 | enum dma_slave_buswidth width; |
32 | dma_addr_t fifo; | 32 | dma_addr_t fifo; |
33 | struct s3c2410_dma_client *client; | 33 | struct s3c2410_dma_client *client; |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 5a99bb3f255a..f1a274994bb1 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -124,7 +124,7 @@ config MV_XOR | |||
124 | 124 | ||
125 | config MX3_IPU | 125 | config MX3_IPU |
126 | bool "MX3x Image Processing Unit support" | 126 | bool "MX3x Image Processing Unit support" |
127 | depends on SOC_IMX31 ||Â SOC_IMX35 | 127 | depends on ARCH_MXC |
128 | select DMA_ENGINE | 128 | select DMA_ENGINE |
129 | default y | 129 | default y |
130 | help | 130 | help |
@@ -187,6 +187,13 @@ config TIMB_DMA | |||
187 | help | 187 | help |
188 | Enable support for the Timberdale FPGA DMA engine. | 188 | Enable support for the Timberdale FPGA DMA engine. |
189 | 189 | ||
190 | config SIRF_DMA | ||
191 | tristate "CSR SiRFprimaII DMA support" | ||
192 | depends on ARCH_PRIMA2 | ||
193 | select DMA_ENGINE | ||
194 | help | ||
195 | Enable support for the CSR SiRFprimaII DMA engine. | ||
196 | |||
190 | config ARCH_HAS_ASYNC_TX_FIND_CHANNEL | 197 | config ARCH_HAS_ASYNC_TX_FIND_CHANNEL |
191 | bool | 198 | bool |
192 | 199 | ||
@@ -201,26 +208,26 @@ config PL330_DMA | |||
201 | platform_data for a dma-pl330 device. | 208 | platform_data for a dma-pl330 device. |
202 | 209 | ||
203 | config PCH_DMA | 210 | config PCH_DMA |
204 | tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support" | 211 | tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA" |
205 | depends on PCI && X86 | 212 | depends on PCI && X86 |
206 | select DMA_ENGINE | 213 | select DMA_ENGINE |
207 | help | 214 | help |
208 | Enable support for Intel EG20T PCH DMA engine. | 215 | Enable support for Intel EG20T PCH DMA engine. |
209 | 216 | ||
210 | This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ | 217 | This driver also can be used for LAPIS Semiconductor IOH(Input/ |
211 | Output Hub), ML7213 and ML7223. | 218 | Output Hub), ML7213, ML7223 and ML7831. |
212 | ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is | 219 | ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is |
213 | for MP(Media Phone) use. | 220 | for MP(Media Phone) use and ML7831 IOH is for general purpose use. |
214 | ML7213/ML7223 is companion chip for Intel Atom E6xx series. | 221 | ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series. |
215 | ML7213/ML7223 is completely compatible for Intel EG20T PCH. | 222 | ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH. |
216 | 223 | ||
217 | config IMX_SDMA | 224 | config IMX_SDMA |
218 | tristate "i.MX SDMA support" | 225 | tristate "i.MX SDMA support" |
219 | depends on ARCH_MX25 || SOC_IMX31 ||Â SOC_IMX35 || ARCH_MX5 | 226 | depends on ARCH_MXC |
220 | select DMA_ENGINE | 227 | select DMA_ENGINE |
221 | help | 228 | help |
222 | Support the i.MX SDMA engine. This engine is integrated into | 229 | Support the i.MX SDMA engine. This engine is integrated into |
223 | Freescale i.MX25/31/35/51 chips. | 230 | Freescale i.MX25/31/35/51/53 chips. |
224 | 231 | ||
225 | config IMX_DMA | 232 | config IMX_DMA |
226 | tristate "i.MX DMA support" | 233 | tristate "i.MX DMA support" |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 30cf3b1f0c5c..009a222e8283 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -21,6 +21,7 @@ obj-$(CONFIG_IMX_SDMA) += imx-sdma.o | |||
21 | obj-$(CONFIG_IMX_DMA) += imx-dma.o | 21 | obj-$(CONFIG_IMX_DMA) += imx-dma.o |
22 | obj-$(CONFIG_MXS_DMA) += mxs-dma.o | 22 | obj-$(CONFIG_MXS_DMA) += mxs-dma.o |
23 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o | 23 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o |
24 | obj-$(CONFIG_SIRF_DMA) += sirf-dma.o | ||
24 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o | 25 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o |
25 | obj-$(CONFIG_PL330_DMA) += pl330.o | 26 | obj-$(CONFIG_PL330_DMA) += pl330.o |
26 | obj-$(CONFIG_PCH_DMA) += pch_dma.o | 27 | obj-$(CONFIG_PCH_DMA) += pch_dma.o |
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 0698695e8bf9..8a281584458b 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c | |||
@@ -854,8 +854,10 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan, | |||
854 | int ret; | 854 | int ret; |
855 | 855 | ||
856 | /* Check if we already have a channel */ | 856 | /* Check if we already have a channel */ |
857 | if (plchan->phychan) | 857 | if (plchan->phychan) { |
858 | return 0; | 858 | ch = plchan->phychan; |
859 | goto got_channel; | ||
860 | } | ||
859 | 861 | ||
860 | ch = pl08x_get_phy_channel(pl08x, plchan); | 862 | ch = pl08x_get_phy_channel(pl08x, plchan); |
861 | if (!ch) { | 863 | if (!ch) { |
@@ -880,21 +882,22 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan, | |||
880 | return -EBUSY; | 882 | return -EBUSY; |
881 | } | 883 | } |
882 | ch->signal = ret; | 884 | ch->signal = ret; |
883 | |||
884 | /* Assign the flow control signal to this channel */ | ||
885 | if (txd->direction == DMA_TO_DEVICE) | ||
886 | txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; | ||
887 | else if (txd->direction == DMA_FROM_DEVICE) | ||
888 | txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; | ||
889 | } | 885 | } |
890 | 886 | ||
887 | plchan->phychan = ch; | ||
891 | dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", | 888 | dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", |
892 | ch->id, | 889 | ch->id, |
893 | ch->signal, | 890 | ch->signal, |
894 | plchan->name); | 891 | plchan->name); |
895 | 892 | ||
893 | got_channel: | ||
894 | /* Assign the flow control signal to this channel */ | ||
895 | if (txd->direction == DMA_MEM_TO_DEV) | ||
896 | txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; | ||
897 | else if (txd->direction == DMA_DEV_TO_MEM) | ||
898 | txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; | ||
899 | |||
896 | plchan->phychan_hold++; | 900 | plchan->phychan_hold++; |
897 | plchan->phychan = ch; | ||
898 | 901 | ||
899 | return 0; | 902 | return 0; |
900 | } | 903 | } |
@@ -1102,10 +1105,10 @@ static int dma_set_runtime_config(struct dma_chan *chan, | |||
1102 | 1105 | ||
1103 | /* Transfer direction */ | 1106 | /* Transfer direction */ |
1104 | plchan->runtime_direction = config->direction; | 1107 | plchan->runtime_direction = config->direction; |
1105 | if (config->direction == DMA_TO_DEVICE) { | 1108 | if (config->direction == DMA_MEM_TO_DEV) { |
1106 | addr_width = config->dst_addr_width; | 1109 | addr_width = config->dst_addr_width; |
1107 | maxburst = config->dst_maxburst; | 1110 | maxburst = config->dst_maxburst; |
1108 | } else if (config->direction == DMA_FROM_DEVICE) { | 1111 | } else if (config->direction == DMA_DEV_TO_MEM) { |
1109 | addr_width = config->src_addr_width; | 1112 | addr_width = config->src_addr_width; |
1110 | maxburst = config->src_maxburst; | 1113 | maxburst = config->src_maxburst; |
1111 | } else { | 1114 | } else { |
@@ -1136,7 +1139,7 @@ static int dma_set_runtime_config(struct dma_chan *chan, | |||
1136 | cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; | 1139 | cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; |
1137 | cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; | 1140 | cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; |
1138 | 1141 | ||
1139 | if (plchan->runtime_direction == DMA_FROM_DEVICE) { | 1142 | if (plchan->runtime_direction == DMA_DEV_TO_MEM) { |
1140 | plchan->src_addr = config->src_addr; | 1143 | plchan->src_addr = config->src_addr; |
1141 | plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | | 1144 | plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | |
1142 | pl08x_select_bus(plchan->cd->periph_buses, | 1145 | pl08x_select_bus(plchan->cd->periph_buses, |
@@ -1152,7 +1155,7 @@ static int dma_set_runtime_config(struct dma_chan *chan, | |||
1152 | "configured channel %s (%s) for %s, data width %d, " | 1155 | "configured channel %s (%s) for %s, data width %d, " |
1153 | "maxburst %d words, LE, CCTL=0x%08x\n", | 1156 | "maxburst %d words, LE, CCTL=0x%08x\n", |
1154 | dma_chan_name(chan), plchan->name, | 1157 | dma_chan_name(chan), plchan->name, |
1155 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", | 1158 | (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", |
1156 | addr_width, | 1159 | addr_width, |
1157 | maxburst, | 1160 | maxburst, |
1158 | cctl); | 1161 | cctl); |
@@ -1322,7 +1325,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( | |||
1322 | 1325 | ||
1323 | static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | 1326 | static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( |
1324 | struct dma_chan *chan, struct scatterlist *sgl, | 1327 | struct dma_chan *chan, struct scatterlist *sgl, |
1325 | unsigned int sg_len, enum dma_data_direction direction, | 1328 | unsigned int sg_len, enum dma_transfer_direction direction, |
1326 | unsigned long flags) | 1329 | unsigned long flags) |
1327 | { | 1330 | { |
1328 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | 1331 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); |
@@ -1354,10 +1357,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1354 | */ | 1357 | */ |
1355 | txd->direction = direction; | 1358 | txd->direction = direction; |
1356 | 1359 | ||
1357 | if (direction == DMA_TO_DEVICE) { | 1360 | if (direction == DMA_MEM_TO_DEV) { |
1358 | txd->cctl = plchan->dst_cctl; | 1361 | txd->cctl = plchan->dst_cctl; |
1359 | slave_addr = plchan->dst_addr; | 1362 | slave_addr = plchan->dst_addr; |
1360 | } else if (direction == DMA_FROM_DEVICE) { | 1363 | } else if (direction == DMA_DEV_TO_MEM) { |
1361 | txd->cctl = plchan->src_cctl; | 1364 | txd->cctl = plchan->src_cctl; |
1362 | slave_addr = plchan->src_addr; | 1365 | slave_addr = plchan->src_addr; |
1363 | } else { | 1366 | } else { |
@@ -1368,10 +1371,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1368 | } | 1371 | } |
1369 | 1372 | ||
1370 | if (plchan->cd->device_fc) | 1373 | if (plchan->cd->device_fc) |
1371 | tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER : | 1374 | tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : |
1372 | PL080_FLOW_PER2MEM_PER; | 1375 | PL080_FLOW_PER2MEM_PER; |
1373 | else | 1376 | else |
1374 | tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER : | 1377 | tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER : |
1375 | PL080_FLOW_PER2MEM; | 1378 | PL080_FLOW_PER2MEM; |
1376 | 1379 | ||
1377 | txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; | 1380 | txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; |
@@ -1387,7 +1390,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | |||
1387 | list_add_tail(&dsg->node, &txd->dsg_list); | 1390 | list_add_tail(&dsg->node, &txd->dsg_list); |
1388 | 1391 | ||
1389 | dsg->len = sg_dma_len(sg); | 1392 | dsg->len = sg_dma_len(sg); |
1390 | if (direction == DMA_TO_DEVICE) { | 1393 | if (direction == DMA_MEM_TO_DEV) { |
1391 | dsg->src_addr = sg_phys(sg); | 1394 | dsg->src_addr = sg_phys(sg); |
1392 | dsg->dst_addr = slave_addr; | 1395 | dsg->dst_addr = slave_addr; |
1393 | } else { | 1396 | } else { |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index fcfa0a8b5c59..97f87b29b9f3 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
@@ -23,6 +23,8 @@ | |||
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/of.h> | ||
27 | #include <linux/of_device.h> | ||
26 | 28 | ||
27 | #include "at_hdmac_regs.h" | 29 | #include "at_hdmac_regs.h" |
28 | 30 | ||
@@ -660,7 +662,7 @@ err_desc_get: | |||
660 | */ | 662 | */ |
661 | static struct dma_async_tx_descriptor * | 663 | static struct dma_async_tx_descriptor * |
662 | atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 664 | atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
663 | unsigned int sg_len, enum dma_data_direction direction, | 665 | unsigned int sg_len, enum dma_transfer_direction direction, |
664 | unsigned long flags) | 666 | unsigned long flags) |
665 | { | 667 | { |
666 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 668 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
@@ -678,7 +680,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
678 | 680 | ||
679 | dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", | 681 | dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", |
680 | sg_len, | 682 | sg_len, |
681 | direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", | 683 | direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", |
682 | flags); | 684 | flags); |
683 | 685 | ||
684 | if (unlikely(!atslave || !sg_len)) { | 686 | if (unlikely(!atslave || !sg_len)) { |
@@ -692,7 +694,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
692 | ctrlb = ATC_IEN; | 694 | ctrlb = ATC_IEN; |
693 | 695 | ||
694 | switch (direction) { | 696 | switch (direction) { |
695 | case DMA_TO_DEVICE: | 697 | case DMA_MEM_TO_DEV: |
696 | ctrla |= ATC_DST_WIDTH(reg_width); | 698 | ctrla |= ATC_DST_WIDTH(reg_width); |
697 | ctrlb |= ATC_DST_ADDR_MODE_FIXED | 699 | ctrlb |= ATC_DST_ADDR_MODE_FIXED |
698 | | ATC_SRC_ADDR_MODE_INCR | 700 | | ATC_SRC_ADDR_MODE_INCR |
@@ -725,7 +727,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
725 | total_len += len; | 727 | total_len += len; |
726 | } | 728 | } |
727 | break; | 729 | break; |
728 | case DMA_FROM_DEVICE: | 730 | case DMA_DEV_TO_MEM: |
729 | ctrla |= ATC_SRC_WIDTH(reg_width); | 731 | ctrla |= ATC_SRC_WIDTH(reg_width); |
730 | ctrlb |= ATC_DST_ADDR_MODE_INCR | 732 | ctrlb |= ATC_DST_ADDR_MODE_INCR |
731 | | ATC_SRC_ADDR_MODE_FIXED | 733 | | ATC_SRC_ADDR_MODE_FIXED |
@@ -787,7 +789,7 @@ err_desc_get: | |||
787 | */ | 789 | */ |
788 | static int | 790 | static int |
789 | atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, | 791 | atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, |
790 | size_t period_len, enum dma_data_direction direction) | 792 | size_t period_len, enum dma_transfer_direction direction) |
791 | { | 793 | { |
792 | if (period_len > (ATC_BTSIZE_MAX << reg_width)) | 794 | if (period_len > (ATC_BTSIZE_MAX << reg_width)) |
793 | goto err_out; | 795 | goto err_out; |
@@ -795,7 +797,7 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, | |||
795 | goto err_out; | 797 | goto err_out; |
796 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) | 798 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) |
797 | goto err_out; | 799 | goto err_out; |
798 | if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) | 800 | if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV)))) |
799 | goto err_out; | 801 | goto err_out; |
800 | 802 | ||
801 | return 0; | 803 | return 0; |
@@ -810,7 +812,7 @@ err_out: | |||
810 | static int | 812 | static int |
811 | atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, | 813 | atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, |
812 | unsigned int period_index, dma_addr_t buf_addr, | 814 | unsigned int period_index, dma_addr_t buf_addr, |
813 | size_t period_len, enum dma_data_direction direction) | 815 | size_t period_len, enum dma_transfer_direction direction) |
814 | { | 816 | { |
815 | u32 ctrla; | 817 | u32 ctrla; |
816 | unsigned int reg_width = atslave->reg_width; | 818 | unsigned int reg_width = atslave->reg_width; |
@@ -822,7 +824,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, | |||
822 | | period_len >> reg_width; | 824 | | period_len >> reg_width; |
823 | 825 | ||
824 | switch (direction) { | 826 | switch (direction) { |
825 | case DMA_TO_DEVICE: | 827 | case DMA_MEM_TO_DEV: |
826 | desc->lli.saddr = buf_addr + (period_len * period_index); | 828 | desc->lli.saddr = buf_addr + (period_len * period_index); |
827 | desc->lli.daddr = atslave->tx_reg; | 829 | desc->lli.daddr = atslave->tx_reg; |
828 | desc->lli.ctrla = ctrla; | 830 | desc->lli.ctrla = ctrla; |
@@ -833,7 +835,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, | |||
833 | | ATC_DIF(AT_DMA_PER_IF); | 835 | | ATC_DIF(AT_DMA_PER_IF); |
834 | break; | 836 | break; |
835 | 837 | ||
836 | case DMA_FROM_DEVICE: | 838 | case DMA_DEV_TO_MEM: |
837 | desc->lli.saddr = atslave->rx_reg; | 839 | desc->lli.saddr = atslave->rx_reg; |
838 | desc->lli.daddr = buf_addr + (period_len * period_index); | 840 | desc->lli.daddr = buf_addr + (period_len * period_index); |
839 | desc->lli.ctrla = ctrla; | 841 | desc->lli.ctrla = ctrla; |
@@ -861,7 +863,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, | |||
861 | */ | 863 | */ |
862 | static struct dma_async_tx_descriptor * | 864 | static struct dma_async_tx_descriptor * |
863 | atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | 865 | atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
864 | size_t period_len, enum dma_data_direction direction) | 866 | size_t period_len, enum dma_transfer_direction direction) |
865 | { | 867 | { |
866 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 868 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
867 | struct at_dma_slave *atslave = chan->private; | 869 | struct at_dma_slave *atslave = chan->private; |
@@ -872,7 +874,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |||
872 | unsigned int i; | 874 | unsigned int i; |
873 | 875 | ||
874 | dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", | 876 | dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", |
875 | direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", | 877 | direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", |
876 | buf_addr, | 878 | buf_addr, |
877 | periods, buf_len, period_len); | 879 | periods, buf_len, period_len); |
878 | 880 | ||
@@ -1175,6 +1177,56 @@ static void atc_free_chan_resources(struct dma_chan *chan) | |||
1175 | 1177 | ||
1176 | /*-- Module Management -----------------------------------------------*/ | 1178 | /*-- Module Management -----------------------------------------------*/ |
1177 | 1179 | ||
1180 | /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */ | ||
1181 | static struct at_dma_platform_data at91sam9rl_config = { | ||
1182 | .nr_channels = 2, | ||
1183 | }; | ||
1184 | static struct at_dma_platform_data at91sam9g45_config = { | ||
1185 | .nr_channels = 8, | ||
1186 | }; | ||
1187 | |||
1188 | #if defined(CONFIG_OF) | ||
1189 | static const struct of_device_id atmel_dma_dt_ids[] = { | ||
1190 | { | ||
1191 | .compatible = "atmel,at91sam9rl-dma", | ||
1192 | .data = &at91sam9rl_config, | ||
1193 | }, { | ||
1194 | .compatible = "atmel,at91sam9g45-dma", | ||
1195 | .data = &at91sam9g45_config, | ||
1196 | }, { | ||
1197 | /* sentinel */ | ||
1198 | } | ||
1199 | }; | ||
1200 | |||
1201 | MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids); | ||
1202 | #endif | ||
1203 | |||
1204 | static const struct platform_device_id atdma_devtypes[] = { | ||
1205 | { | ||
1206 | .name = "at91sam9rl_dma", | ||
1207 | .driver_data = (unsigned long) &at91sam9rl_config, | ||
1208 | }, { | ||
1209 | .name = "at91sam9g45_dma", | ||
1210 | .driver_data = (unsigned long) &at91sam9g45_config, | ||
1211 | }, { | ||
1212 | /* sentinel */ | ||
1213 | } | ||
1214 | }; | ||
1215 | |||
1216 | static inline struct at_dma_platform_data * __init at_dma_get_driver_data( | ||
1217 | struct platform_device *pdev) | ||
1218 | { | ||
1219 | if (pdev->dev.of_node) { | ||
1220 | const struct of_device_id *match; | ||
1221 | match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node); | ||
1222 | if (match == NULL) | ||
1223 | return NULL; | ||
1224 | return match->data; | ||
1225 | } | ||
1226 | return (struct at_dma_platform_data *) | ||
1227 | platform_get_device_id(pdev)->driver_data; | ||
1228 | } | ||
1229 | |||
1178 | /** | 1230 | /** |
1179 | * at_dma_off - disable DMA controller | 1231 | * at_dma_off - disable DMA controller |
1180 | * @atdma: the Atmel HDAMC device | 1232 | * @atdma: the Atmel HDAMC device |
@@ -1193,18 +1245,23 @@ static void at_dma_off(struct at_dma *atdma) | |||
1193 | 1245 | ||
1194 | static int __init at_dma_probe(struct platform_device *pdev) | 1246 | static int __init at_dma_probe(struct platform_device *pdev) |
1195 | { | 1247 | { |
1196 | struct at_dma_platform_data *pdata; | ||
1197 | struct resource *io; | 1248 | struct resource *io; |
1198 | struct at_dma *atdma; | 1249 | struct at_dma *atdma; |
1199 | size_t size; | 1250 | size_t size; |
1200 | int irq; | 1251 | int irq; |
1201 | int err; | 1252 | int err; |
1202 | int i; | 1253 | int i; |
1254 | struct at_dma_platform_data *plat_dat; | ||
1203 | 1255 | ||
1204 | /* get DMA Controller parameters from platform */ | 1256 | /* setup platform data for each SoC */ |
1205 | pdata = pdev->dev.platform_data; | 1257 | dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask); |
1206 | if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS) | 1258 | dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); |
1207 | return -EINVAL; | 1259 | dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); |
1260 | |||
1261 | /* get DMA parameters from controller type */ | ||
1262 | plat_dat = at_dma_get_driver_data(pdev); | ||
1263 | if (!plat_dat) | ||
1264 | return -ENODEV; | ||
1208 | 1265 | ||
1209 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1266 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1210 | if (!io) | 1267 | if (!io) |
@@ -1215,14 +1272,14 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
1215 | return irq; | 1272 | return irq; |
1216 | 1273 | ||
1217 | size = sizeof(struct at_dma); | 1274 | size = sizeof(struct at_dma); |
1218 | size += pdata->nr_channels * sizeof(struct at_dma_chan); | 1275 | size += plat_dat->nr_channels * sizeof(struct at_dma_chan); |
1219 | atdma = kzalloc(size, GFP_KERNEL); | 1276 | atdma = kzalloc(size, GFP_KERNEL); |
1220 | if (!atdma) | 1277 | if (!atdma) |
1221 | return -ENOMEM; | 1278 | return -ENOMEM; |
1222 | 1279 | ||
1223 | /* discover transaction capabilites from the platform data */ | 1280 | /* discover transaction capabilities */ |
1224 | atdma->dma_common.cap_mask = pdata->cap_mask; | 1281 | atdma->dma_common.cap_mask = plat_dat->cap_mask; |
1225 | atdma->all_chan_mask = (1 << pdata->nr_channels) - 1; | 1282 | atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1; |
1226 | 1283 | ||
1227 | size = resource_size(io); | 1284 | size = resource_size(io); |
1228 | if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { | 1285 | if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { |
@@ -1268,7 +1325,7 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
1268 | 1325 | ||
1269 | /* initialize channels related values */ | 1326 | /* initialize channels related values */ |
1270 | INIT_LIST_HEAD(&atdma->dma_common.channels); | 1327 | INIT_LIST_HEAD(&atdma->dma_common.channels); |
1271 | for (i = 0; i < pdata->nr_channels; i++) { | 1328 | for (i = 0; i < plat_dat->nr_channels; i++) { |
1272 | struct at_dma_chan *atchan = &atdma->chan[i]; | 1329 | struct at_dma_chan *atchan = &atdma->chan[i]; |
1273 | 1330 | ||
1274 | atchan->chan_common.device = &atdma->dma_common; | 1331 | atchan->chan_common.device = &atdma->dma_common; |
@@ -1313,7 +1370,7 @@ static int __init at_dma_probe(struct platform_device *pdev) | |||
1313 | dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", | 1370 | dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", |
1314 | dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", | 1371 | dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", |
1315 | dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", | 1372 | dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", |
1316 | pdata->nr_channels); | 1373 | plat_dat->nr_channels); |
1317 | 1374 | ||
1318 | dma_async_device_register(&atdma->dma_common); | 1375 | dma_async_device_register(&atdma->dma_common); |
1319 | 1376 | ||
@@ -1495,9 +1552,11 @@ static const struct dev_pm_ops at_dma_dev_pm_ops = { | |||
1495 | static struct platform_driver at_dma_driver = { | 1552 | static struct platform_driver at_dma_driver = { |
1496 | .remove = __exit_p(at_dma_remove), | 1553 | .remove = __exit_p(at_dma_remove), |
1497 | .shutdown = at_dma_shutdown, | 1554 | .shutdown = at_dma_shutdown, |
1555 | .id_table = atdma_devtypes, | ||
1498 | .driver = { | 1556 | .driver = { |
1499 | .name = "at_hdmac", | 1557 | .name = "at_hdmac", |
1500 | .pm = &at_dma_dev_pm_ops, | 1558 | .pm = &at_dma_dev_pm_ops, |
1559 | .of_match_table = of_match_ptr(atmel_dma_dt_ids), | ||
1501 | }, | 1560 | }, |
1502 | }; | 1561 | }; |
1503 | 1562 | ||
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index aa4c9aebab7c..dcaedfc181cf 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h | |||
@@ -251,6 +251,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan) | |||
251 | /** | 251 | /** |
252 | * struct at_dma - internal representation of an Atmel HDMA Controller | 252 | * struct at_dma - internal representation of an Atmel HDMA Controller |
253 | * @chan_common: common dmaengine dma_device object members | 253 | * @chan_common: common dmaengine dma_device object members |
254 | * @atdma_devtype: identifier of DMA controller compatibility | ||
254 | * @ch_regs: memory mapped register base | 255 | * @ch_regs: memory mapped register base |
255 | * @clk: dma controller clock | 256 | * @clk: dma controller clock |
256 | * @save_imr: interrupt mask register that is saved on suspend/resume cycle | 257 | * @save_imr: interrupt mask register that is saved on suspend/resume cycle |
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 4234f416ef11..d65a718c0f9b 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
@@ -39,7 +39,7 @@ struct coh901318_desc { | |||
39 | struct scatterlist *sg; | 39 | struct scatterlist *sg; |
40 | unsigned int sg_len; | 40 | unsigned int sg_len; |
41 | struct coh901318_lli *lli; | 41 | struct coh901318_lli *lli; |
42 | enum dma_data_direction dir; | 42 | enum dma_transfer_direction dir; |
43 | unsigned long flags; | 43 | unsigned long flags; |
44 | u32 head_config; | 44 | u32 head_config; |
45 | u32 head_ctrl; | 45 | u32 head_ctrl; |
@@ -1034,7 +1034,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
1034 | 1034 | ||
1035 | static struct dma_async_tx_descriptor * | 1035 | static struct dma_async_tx_descriptor * |
1036 | coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 1036 | coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
1037 | unsigned int sg_len, enum dma_data_direction direction, | 1037 | unsigned int sg_len, enum dma_transfer_direction direction, |
1038 | unsigned long flags) | 1038 | unsigned long flags) |
1039 | { | 1039 | { |
1040 | struct coh901318_chan *cohc = to_coh901318_chan(chan); | 1040 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
@@ -1077,7 +1077,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
1077 | ctrl_last |= cohc->runtime_ctrl; | 1077 | ctrl_last |= cohc->runtime_ctrl; |
1078 | ctrl |= cohc->runtime_ctrl; | 1078 | ctrl |= cohc->runtime_ctrl; |
1079 | 1079 | ||
1080 | if (direction == DMA_TO_DEVICE) { | 1080 | if (direction == DMA_MEM_TO_DEV) { |
1081 | u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | | 1081 | u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | |
1082 | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE; | 1082 | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE; |
1083 | 1083 | ||
@@ -1085,7 +1085,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
1085 | ctrl_chained |= tx_flags; | 1085 | ctrl_chained |= tx_flags; |
1086 | ctrl_last |= tx_flags; | 1086 | ctrl_last |= tx_flags; |
1087 | ctrl |= tx_flags; | 1087 | ctrl |= tx_flags; |
1088 | } else if (direction == DMA_FROM_DEVICE) { | 1088 | } else if (direction == DMA_DEV_TO_MEM) { |
1089 | u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST | | 1089 | u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST | |
1090 | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE; | 1090 | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE; |
1091 | 1091 | ||
@@ -1274,11 +1274,11 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan, | |||
1274 | int i = 0; | 1274 | int i = 0; |
1275 | 1275 | ||
1276 | /* We only support mem to per or per to mem transfers */ | 1276 | /* We only support mem to per or per to mem transfers */ |
1277 | if (config->direction == DMA_FROM_DEVICE) { | 1277 | if (config->direction == DMA_DEV_TO_MEM) { |
1278 | addr = config->src_addr; | 1278 | addr = config->src_addr; |
1279 | addr_width = config->src_addr_width; | 1279 | addr_width = config->src_addr_width; |
1280 | maxburst = config->src_maxburst; | 1280 | maxburst = config->src_maxburst; |
1281 | } else if (config->direction == DMA_TO_DEVICE) { | 1281 | } else if (config->direction == DMA_MEM_TO_DEV) { |
1282 | addr = config->dst_addr; | 1282 | addr = config->dst_addr; |
1283 | addr_width = config->dst_addr_width; | 1283 | addr_width = config->dst_addr_width; |
1284 | maxburst = config->dst_maxburst; | 1284 | maxburst = config->dst_maxburst; |
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c index 9f7e0e6a7eea..6c0e2d4c6682 100644 --- a/drivers/dma/coh901318_lli.c +++ b/drivers/dma/coh901318_lli.c | |||
@@ -7,11 +7,10 @@ | |||
7 | * Author: Per Friden <per.friden@stericsson.com> | 7 | * Author: Per Friden <per.friden@stericsson.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/dma-mapping.h> | ||
11 | #include <linux/spinlock.h> | 10 | #include <linux/spinlock.h> |
12 | #include <linux/dmapool.h> | ||
13 | #include <linux/memory.h> | 11 | #include <linux/memory.h> |
14 | #include <linux/gfp.h> | 12 | #include <linux/gfp.h> |
13 | #include <linux/dmapool.h> | ||
15 | #include <mach/coh901318.h> | 14 | #include <mach/coh901318.h> |
16 | 15 | ||
17 | #include "coh901318_lli.h" | 16 | #include "coh901318_lli.h" |
@@ -177,18 +176,18 @@ coh901318_lli_fill_single(struct coh901318_pool *pool, | |||
177 | struct coh901318_lli *lli, | 176 | struct coh901318_lli *lli, |
178 | dma_addr_t buf, unsigned int size, | 177 | dma_addr_t buf, unsigned int size, |
179 | dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom, | 178 | dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom, |
180 | enum dma_data_direction dir) | 179 | enum dma_transfer_direction dir) |
181 | { | 180 | { |
182 | int s = size; | 181 | int s = size; |
183 | dma_addr_t src; | 182 | dma_addr_t src; |
184 | dma_addr_t dst; | 183 | dma_addr_t dst; |
185 | 184 | ||
186 | 185 | ||
187 | if (dir == DMA_TO_DEVICE) { | 186 | if (dir == DMA_MEM_TO_DEV) { |
188 | src = buf; | 187 | src = buf; |
189 | dst = dev_addr; | 188 | dst = dev_addr; |
190 | 189 | ||
191 | } else if (dir == DMA_FROM_DEVICE) { | 190 | } else if (dir == DMA_DEV_TO_MEM) { |
192 | 191 | ||
193 | src = dev_addr; | 192 | src = dev_addr; |
194 | dst = buf; | 193 | dst = buf; |
@@ -215,9 +214,9 @@ coh901318_lli_fill_single(struct coh901318_pool *pool, | |||
215 | 214 | ||
216 | lli = coh901318_lli_next(lli); | 215 | lli = coh901318_lli_next(lli); |
217 | 216 | ||
218 | if (dir == DMA_TO_DEVICE) | 217 | if (dir == DMA_MEM_TO_DEV) |
219 | src += block_size; | 218 | src += block_size; |
220 | else if (dir == DMA_FROM_DEVICE) | 219 | else if (dir == DMA_DEV_TO_MEM) |
221 | dst += block_size; | 220 | dst += block_size; |
222 | } | 221 | } |
223 | 222 | ||
@@ -234,7 +233,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, | |||
234 | struct scatterlist *sgl, unsigned int nents, | 233 | struct scatterlist *sgl, unsigned int nents, |
235 | dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl, | 234 | dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl, |
236 | u32 ctrl_last, | 235 | u32 ctrl_last, |
237 | enum dma_data_direction dir, u32 ctrl_irq_mask) | 236 | enum dma_transfer_direction dir, u32 ctrl_irq_mask) |
238 | { | 237 | { |
239 | int i; | 238 | int i; |
240 | struct scatterlist *sg; | 239 | struct scatterlist *sg; |
@@ -249,9 +248,9 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, | |||
249 | 248 | ||
250 | spin_lock(&pool->lock); | 249 | spin_lock(&pool->lock); |
251 | 250 | ||
252 | if (dir == DMA_TO_DEVICE) | 251 | if (dir == DMA_MEM_TO_DEV) |
253 | dst = dev_addr; | 252 | dst = dev_addr; |
254 | else if (dir == DMA_FROM_DEVICE) | 253 | else if (dir == DMA_DEV_TO_MEM) |
255 | src = dev_addr; | 254 | src = dev_addr; |
256 | else | 255 | else |
257 | goto err; | 256 | goto err; |
@@ -269,7 +268,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, | |||
269 | ctrl_sg = ctrl ? ctrl : ctrl_last; | 268 | ctrl_sg = ctrl ? ctrl : ctrl_last; |
270 | 269 | ||
271 | 270 | ||
272 | if (dir == DMA_TO_DEVICE) | 271 | if (dir == DMA_MEM_TO_DEV) |
273 | /* increment source address */ | 272 | /* increment source address */ |
274 | src = sg_phys(sg); | 273 | src = sg_phys(sg); |
275 | else | 274 | else |
@@ -293,7 +292,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, | |||
293 | lli->src_addr = src; | 292 | lli->src_addr = src; |
294 | lli->dst_addr = dst; | 293 | lli->dst_addr = dst; |
295 | 294 | ||
296 | if (dir == DMA_FROM_DEVICE) | 295 | if (dir == DMA_DEV_TO_MEM) |
297 | dst += elem_size; | 296 | dst += elem_size; |
298 | else | 297 | else |
299 | src += elem_size; | 298 | src += elem_size; |
diff --git a/drivers/dma/coh901318_lli.h b/drivers/dma/coh901318_lli.h index 7a5c80990e9e..abff3714fdda 100644 --- a/drivers/dma/coh901318_lli.h +++ b/drivers/dma/coh901318_lli.h | |||
@@ -97,7 +97,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool, | |||
97 | struct coh901318_lli *lli, | 97 | struct coh901318_lli *lli, |
98 | dma_addr_t buf, unsigned int size, | 98 | dma_addr_t buf, unsigned int size, |
99 | dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last, | 99 | dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last, |
100 | enum dma_data_direction dir); | 100 | enum dma_transfer_direction dir); |
101 | 101 | ||
102 | /** | 102 | /** |
103 | * coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer | 103 | * coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer |
@@ -119,6 +119,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, | |||
119 | struct scatterlist *sg, unsigned int nents, | 119 | struct scatterlist *sg, unsigned int nents, |
120 | dma_addr_t dev_addr, u32 ctrl_chained, | 120 | dma_addr_t dev_addr, u32 ctrl_chained, |
121 | u32 ctrl, u32 ctrl_last, | 121 | u32 ctrl, u32 ctrl_last, |
122 | enum dma_data_direction dir, u32 ctrl_irq_mask); | 122 | enum dma_transfer_direction dir, u32 ctrl_irq_mask); |
123 | 123 | ||
124 | #endif /* COH901318_LLI_H */ | 124 | #endif /* COH901318_LLI_H */ |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index b48967b499da..a6c6051ec858 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -693,12 +693,12 @@ int dma_async_device_register(struct dma_device *device) | |||
693 | !device->device_prep_dma_interrupt); | 693 | !device->device_prep_dma_interrupt); |
694 | BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && | 694 | BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && |
695 | !device->device_prep_dma_sg); | 695 | !device->device_prep_dma_sg); |
696 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && | ||
697 | !device->device_prep_slave_sg); | ||
698 | BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && | 696 | BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && |
699 | !device->device_prep_dma_cyclic); | 697 | !device->device_prep_dma_cyclic); |
700 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && | 698 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && |
701 | !device->device_control); | 699 | !device->device_control); |
700 | BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && | ||
701 | !device->device_prep_interleaved_dma); | ||
702 | 702 | ||
703 | BUG_ON(!device->device_alloc_chan_resources); | 703 | BUG_ON(!device->device_alloc_chan_resources); |
704 | BUG_ON(!device->device_free_chan_resources); | 704 | BUG_ON(!device->device_free_chan_resources); |
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 9bfd6d360718..9b592b02b5f4 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -166,6 +166,38 @@ dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc) | |||
166 | return cookie; | 166 | return cookie; |
167 | } | 167 | } |
168 | 168 | ||
169 | static void dwc_initialize(struct dw_dma_chan *dwc) | ||
170 | { | ||
171 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
172 | struct dw_dma_slave *dws = dwc->chan.private; | ||
173 | u32 cfghi = DWC_CFGH_FIFO_MODE; | ||
174 | u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); | ||
175 | |||
176 | if (dwc->initialized == true) | ||
177 | return; | ||
178 | |||
179 | if (dws) { | ||
180 | /* | ||
181 | * We need controller-specific data to set up slave | ||
182 | * transfers. | ||
183 | */ | ||
184 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); | ||
185 | |||
186 | cfghi = dws->cfg_hi; | ||
187 | cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; | ||
188 | } | ||
189 | |||
190 | channel_writel(dwc, CFG_LO, cfglo); | ||
191 | channel_writel(dwc, CFG_HI, cfghi); | ||
192 | |||
193 | /* Enable interrupts */ | ||
194 | channel_set_bit(dw, MASK.XFER, dwc->mask); | ||
195 | channel_set_bit(dw, MASK.BLOCK, dwc->mask); | ||
196 | channel_set_bit(dw, MASK.ERROR, dwc->mask); | ||
197 | |||
198 | dwc->initialized = true; | ||
199 | } | ||
200 | |||
169 | /*----------------------------------------------------------------------*/ | 201 | /*----------------------------------------------------------------------*/ |
170 | 202 | ||
171 | /* Called with dwc->lock held and bh disabled */ | 203 | /* Called with dwc->lock held and bh disabled */ |
@@ -189,6 +221,8 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | |||
189 | return; | 221 | return; |
190 | } | 222 | } |
191 | 223 | ||
224 | dwc_initialize(dwc); | ||
225 | |||
192 | channel_writel(dwc, LLP, first->txd.phys); | 226 | channel_writel(dwc, LLP, first->txd.phys); |
193 | channel_writel(dwc, CTL_LO, | 227 | channel_writel(dwc, CTL_LO, |
194 | DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | 228 | DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); |
@@ -696,7 +730,7 @@ err_desc_get: | |||
696 | 730 | ||
697 | static struct dma_async_tx_descriptor * | 731 | static struct dma_async_tx_descriptor * |
698 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 732 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
699 | unsigned int sg_len, enum dma_data_direction direction, | 733 | unsigned int sg_len, enum dma_transfer_direction direction, |
700 | unsigned long flags) | 734 | unsigned long flags) |
701 | { | 735 | { |
702 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 736 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
@@ -720,7 +754,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
720 | prev = first = NULL; | 754 | prev = first = NULL; |
721 | 755 | ||
722 | switch (direction) { | 756 | switch (direction) { |
723 | case DMA_TO_DEVICE: | 757 | case DMA_MEM_TO_DEV: |
724 | ctllo = (DWC_DEFAULT_CTLLO(chan->private) | 758 | ctllo = (DWC_DEFAULT_CTLLO(chan->private) |
725 | | DWC_CTLL_DST_WIDTH(reg_width) | 759 | | DWC_CTLL_DST_WIDTH(reg_width) |
726 | | DWC_CTLL_DST_FIX | 760 | | DWC_CTLL_DST_FIX |
@@ -777,7 +811,7 @@ slave_sg_todev_fill_desc: | |||
777 | goto slave_sg_todev_fill_desc; | 811 | goto slave_sg_todev_fill_desc; |
778 | } | 812 | } |
779 | break; | 813 | break; |
780 | case DMA_FROM_DEVICE: | 814 | case DMA_DEV_TO_MEM: |
781 | ctllo = (DWC_DEFAULT_CTLLO(chan->private) | 815 | ctllo = (DWC_DEFAULT_CTLLO(chan->private) |
782 | | DWC_CTLL_SRC_WIDTH(reg_width) | 816 | | DWC_CTLL_SRC_WIDTH(reg_width) |
783 | | DWC_CTLL_DST_INC | 817 | | DWC_CTLL_DST_INC |
@@ -959,10 +993,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
959 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 993 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
960 | struct dw_dma *dw = to_dw_dma(chan->device); | 994 | struct dw_dma *dw = to_dw_dma(chan->device); |
961 | struct dw_desc *desc; | 995 | struct dw_desc *desc; |
962 | struct dw_dma_slave *dws; | ||
963 | int i; | 996 | int i; |
964 | u32 cfghi; | ||
965 | u32 cfglo; | ||
966 | unsigned long flags; | 997 | unsigned long flags; |
967 | 998 | ||
968 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); | 999 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); |
@@ -975,26 +1006,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
975 | 1006 | ||
976 | dwc->completed = chan->cookie = 1; | 1007 | dwc->completed = chan->cookie = 1; |
977 | 1008 | ||
978 | cfghi = DWC_CFGH_FIFO_MODE; | ||
979 | cfglo = 0; | ||
980 | |||
981 | dws = chan->private; | ||
982 | if (dws) { | ||
983 | /* | ||
984 | * We need controller-specific data to set up slave | ||
985 | * transfers. | ||
986 | */ | ||
987 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); | ||
988 | |||
989 | cfghi = dws->cfg_hi; | ||
990 | cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; | ||
991 | } | ||
992 | |||
993 | cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority); | ||
994 | |||
995 | channel_writel(dwc, CFG_LO, cfglo); | ||
996 | channel_writel(dwc, CFG_HI, cfghi); | ||
997 | |||
998 | /* | 1009 | /* |
999 | * NOTE: some controllers may have additional features that we | 1010 | * NOTE: some controllers may have additional features that we |
1000 | * need to initialize here, like "scatter-gather" (which | 1011 | * need to initialize here, like "scatter-gather" (which |
@@ -1026,11 +1037,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) | |||
1026 | i = ++dwc->descs_allocated; | 1037 | i = ++dwc->descs_allocated; |
1027 | } | 1038 | } |
1028 | 1039 | ||
1029 | /* Enable interrupts */ | ||
1030 | channel_set_bit(dw, MASK.XFER, dwc->mask); | ||
1031 | channel_set_bit(dw, MASK.BLOCK, dwc->mask); | ||
1032 | channel_set_bit(dw, MASK.ERROR, dwc->mask); | ||
1033 | |||
1034 | spin_unlock_irqrestore(&dwc->lock, flags); | 1040 | spin_unlock_irqrestore(&dwc->lock, flags); |
1035 | 1041 | ||
1036 | dev_dbg(chan2dev(chan), | 1042 | dev_dbg(chan2dev(chan), |
@@ -1058,6 +1064,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) | |||
1058 | spin_lock_irqsave(&dwc->lock, flags); | 1064 | spin_lock_irqsave(&dwc->lock, flags); |
1059 | list_splice_init(&dwc->free_list, &list); | 1065 | list_splice_init(&dwc->free_list, &list); |
1060 | dwc->descs_allocated = 0; | 1066 | dwc->descs_allocated = 0; |
1067 | dwc->initialized = false; | ||
1061 | 1068 | ||
1062 | /* Disable interrupts */ | 1069 | /* Disable interrupts */ |
1063 | channel_clear_bit(dw, MASK.XFER, dwc->mask); | 1070 | channel_clear_bit(dw, MASK.XFER, dwc->mask); |
@@ -1165,7 +1172,7 @@ EXPORT_SYMBOL(dw_dma_cyclic_stop); | |||
1165 | */ | 1172 | */ |
1166 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | 1173 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, |
1167 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, | 1174 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, |
1168 | enum dma_data_direction direction) | 1175 | enum dma_transfer_direction direction) |
1169 | { | 1176 | { |
1170 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1177 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1171 | struct dw_cyclic_desc *cdesc; | 1178 | struct dw_cyclic_desc *cdesc; |
@@ -1206,7 +1213,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1206 | goto out_err; | 1213 | goto out_err; |
1207 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) | 1214 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) |
1208 | goto out_err; | 1215 | goto out_err; |
1209 | if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) | 1216 | if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM)))) |
1210 | goto out_err; | 1217 | goto out_err; |
1211 | 1218 | ||
1212 | retval = ERR_PTR(-ENOMEM); | 1219 | retval = ERR_PTR(-ENOMEM); |
@@ -1228,7 +1235,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1228 | goto out_err_desc_get; | 1235 | goto out_err_desc_get; |
1229 | 1236 | ||
1230 | switch (direction) { | 1237 | switch (direction) { |
1231 | case DMA_TO_DEVICE: | 1238 | case DMA_MEM_TO_DEV: |
1232 | desc->lli.dar = dws->tx_reg; | 1239 | desc->lli.dar = dws->tx_reg; |
1233 | desc->lli.sar = buf_addr + (period_len * i); | 1240 | desc->lli.sar = buf_addr + (period_len * i); |
1234 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) | 1241 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) |
@@ -1239,7 +1246,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | |||
1239 | | DWC_CTLL_FC(dws->fc) | 1246 | | DWC_CTLL_FC(dws->fc) |
1240 | | DWC_CTLL_INT_EN); | 1247 | | DWC_CTLL_INT_EN); |
1241 | break; | 1248 | break; |
1242 | case DMA_FROM_DEVICE: | 1249 | case DMA_DEV_TO_MEM: |
1243 | desc->lli.dar = buf_addr + (period_len * i); | 1250 | desc->lli.dar = buf_addr + (period_len * i); |
1244 | desc->lli.sar = dws->rx_reg; | 1251 | desc->lli.sar = dws->rx_reg; |
1245 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) | 1252 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) |
@@ -1335,6 +1342,8 @@ EXPORT_SYMBOL(dw_dma_cyclic_free); | |||
1335 | 1342 | ||
1336 | static void dw_dma_off(struct dw_dma *dw) | 1343 | static void dw_dma_off(struct dw_dma *dw) |
1337 | { | 1344 | { |
1345 | int i; | ||
1346 | |||
1338 | dma_writel(dw, CFG, 0); | 1347 | dma_writel(dw, CFG, 0); |
1339 | 1348 | ||
1340 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | 1349 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); |
@@ -1345,6 +1354,9 @@ static void dw_dma_off(struct dw_dma *dw) | |||
1345 | 1354 | ||
1346 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) | 1355 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) |
1347 | cpu_relax(); | 1356 | cpu_relax(); |
1357 | |||
1358 | for (i = 0; i < dw->dma.chancnt; i++) | ||
1359 | dw->chan[i].initialized = false; | ||
1348 | } | 1360 | } |
1349 | 1361 | ||
1350 | static int __init dw_probe(struct platform_device *pdev) | 1362 | static int __init dw_probe(struct platform_device *pdev) |
@@ -1533,6 +1545,7 @@ static int dw_suspend_noirq(struct device *dev) | |||
1533 | 1545 | ||
1534 | dw_dma_off(platform_get_drvdata(pdev)); | 1546 | dw_dma_off(platform_get_drvdata(pdev)); |
1535 | clk_disable(dw->clk); | 1547 | clk_disable(dw->clk); |
1548 | |||
1536 | return 0; | 1549 | return 0; |
1537 | } | 1550 | } |
1538 | 1551 | ||
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index c3419518d701..5eef6946a367 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h | |||
@@ -140,6 +140,7 @@ struct dw_dma_chan { | |||
140 | u8 mask; | 140 | u8 mask; |
141 | u8 priority; | 141 | u8 priority; |
142 | bool paused; | 142 | bool paused; |
143 | bool initialized; | ||
143 | 144 | ||
144 | spinlock_t lock; | 145 | spinlock_t lock; |
145 | 146 | ||
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index b47e2b803faf..59e7a965772b 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c | |||
@@ -246,6 +246,9 @@ static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac, | |||
246 | static struct ep93xx_dma_desc * | 246 | static struct ep93xx_dma_desc * |
247 | ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac) | 247 | ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac) |
248 | { | 248 | { |
249 | if (list_empty(&edmac->active)) | ||
250 | return NULL; | ||
251 | |||
249 | return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node); | 252 | return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node); |
250 | } | 253 | } |
251 | 254 | ||
@@ -263,16 +266,22 @@ ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac) | |||
263 | */ | 266 | */ |
264 | static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac) | 267 | static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac) |
265 | { | 268 | { |
269 | struct ep93xx_dma_desc *desc; | ||
270 | |||
266 | list_rotate_left(&edmac->active); | 271 | list_rotate_left(&edmac->active); |
267 | 272 | ||
268 | if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) | 273 | if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) |
269 | return true; | 274 | return true; |
270 | 275 | ||
276 | desc = ep93xx_dma_get_active(edmac); | ||
277 | if (!desc) | ||
278 | return false; | ||
279 | |||
271 | /* | 280 | /* |
272 | * If txd.cookie is set it means that we are back in the first | 281 | * If txd.cookie is set it means that we are back in the first |
273 | * descriptor in the chain and hence done with it. | 282 | * descriptor in the chain and hence done with it. |
274 | */ | 283 | */ |
275 | return !ep93xx_dma_get_active(edmac)->txd.cookie; | 284 | return !desc->txd.cookie; |
276 | } | 285 | } |
277 | 286 | ||
278 | /* | 287 | /* |
@@ -327,10 +336,16 @@ static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) | |||
327 | 336 | ||
328 | static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) | 337 | static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) |
329 | { | 338 | { |
330 | struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); | 339 | struct ep93xx_dma_desc *desc; |
331 | u32 bus_addr; | 340 | u32 bus_addr; |
332 | 341 | ||
333 | if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE) | 342 | desc = ep93xx_dma_get_active(edmac); |
343 | if (!desc) { | ||
344 | dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n"); | ||
345 | return; | ||
346 | } | ||
347 | |||
348 | if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV) | ||
334 | bus_addr = desc->src_addr; | 349 | bus_addr = desc->src_addr; |
335 | else | 350 | else |
336 | bus_addr = desc->dst_addr; | 351 | bus_addr = desc->dst_addr; |
@@ -443,7 +458,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) | |||
443 | control = (5 << M2M_CONTROL_PWSC_SHIFT); | 458 | control = (5 << M2M_CONTROL_PWSC_SHIFT); |
444 | control |= M2M_CONTROL_NO_HDSK; | 459 | control |= M2M_CONTROL_NO_HDSK; |
445 | 460 | ||
446 | if (data->direction == DMA_TO_DEVICE) { | 461 | if (data->direction == DMA_MEM_TO_DEV) { |
447 | control |= M2M_CONTROL_DAH; | 462 | control |= M2M_CONTROL_DAH; |
448 | control |= M2M_CONTROL_TM_TX; | 463 | control |= M2M_CONTROL_TM_TX; |
449 | control |= M2M_CONTROL_RSS_SSPTX; | 464 | control |= M2M_CONTROL_RSS_SSPTX; |
@@ -459,11 +474,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) | |||
459 | * This IDE part is totally untested. Values below are taken | 474 | * This IDE part is totally untested. Values below are taken |
460 | * from the EP93xx Users's Guide and might not be correct. | 475 | * from the EP93xx Users's Guide and might not be correct. |
461 | */ | 476 | */ |
462 | control |= M2M_CONTROL_NO_HDSK; | 477 | if (data->direction == DMA_MEM_TO_DEV) { |
463 | control |= M2M_CONTROL_RSS_IDE; | ||
464 | control |= M2M_CONTROL_PW_16; | ||
465 | |||
466 | if (data->direction == DMA_TO_DEVICE) { | ||
467 | /* Worst case from the UG */ | 478 | /* Worst case from the UG */ |
468 | control = (3 << M2M_CONTROL_PWSC_SHIFT); | 479 | control = (3 << M2M_CONTROL_PWSC_SHIFT); |
469 | control |= M2M_CONTROL_DAH; | 480 | control |= M2M_CONTROL_DAH; |
@@ -473,6 +484,10 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) | |||
473 | control |= M2M_CONTROL_SAH; | 484 | control |= M2M_CONTROL_SAH; |
474 | control |= M2M_CONTROL_TM_RX; | 485 | control |= M2M_CONTROL_TM_RX; |
475 | } | 486 | } |
487 | |||
488 | control |= M2M_CONTROL_NO_HDSK; | ||
489 | control |= M2M_CONTROL_RSS_IDE; | ||
490 | control |= M2M_CONTROL_PW_16; | ||
476 | break; | 491 | break; |
477 | 492 | ||
478 | default: | 493 | default: |
@@ -491,7 +506,13 @@ static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac) | |||
491 | 506 | ||
492 | static void m2m_fill_desc(struct ep93xx_dma_chan *edmac) | 507 | static void m2m_fill_desc(struct ep93xx_dma_chan *edmac) |
493 | { | 508 | { |
494 | struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); | 509 | struct ep93xx_dma_desc *desc; |
510 | |||
511 | desc = ep93xx_dma_get_active(edmac); | ||
512 | if (!desc) { | ||
513 | dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n"); | ||
514 | return; | ||
515 | } | ||
495 | 516 | ||
496 | if (edmac->buffer == 0) { | 517 | if (edmac->buffer == 0) { |
497 | writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0); | 518 | writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0); |
@@ -669,24 +690,30 @@ static void ep93xx_dma_tasklet(unsigned long data) | |||
669 | { | 690 | { |
670 | struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; | 691 | struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data; |
671 | struct ep93xx_dma_desc *desc, *d; | 692 | struct ep93xx_dma_desc *desc, *d; |
672 | dma_async_tx_callback callback; | 693 | dma_async_tx_callback callback = NULL; |
673 | void *callback_param; | 694 | void *callback_param = NULL; |
674 | LIST_HEAD(list); | 695 | LIST_HEAD(list); |
675 | 696 | ||
676 | spin_lock_irq(&edmac->lock); | 697 | spin_lock_irq(&edmac->lock); |
698 | /* | ||
699 | * If dma_terminate_all() was called before we get to run, the active | ||
700 | * list has become empty. If that happens we aren't supposed to do | ||
701 | * anything more than call ep93xx_dma_advance_work(). | ||
702 | */ | ||
677 | desc = ep93xx_dma_get_active(edmac); | 703 | desc = ep93xx_dma_get_active(edmac); |
678 | if (desc->complete) { | 704 | if (desc) { |
679 | edmac->last_completed = desc->txd.cookie; | 705 | if (desc->complete) { |
680 | list_splice_init(&edmac->active, &list); | 706 | edmac->last_completed = desc->txd.cookie; |
707 | list_splice_init(&edmac->active, &list); | ||
708 | } | ||
709 | callback = desc->txd.callback; | ||
710 | callback_param = desc->txd.callback_param; | ||
681 | } | 711 | } |
682 | spin_unlock_irq(&edmac->lock); | 712 | spin_unlock_irq(&edmac->lock); |
683 | 713 | ||
684 | /* Pick up the next descriptor from the queue */ | 714 | /* Pick up the next descriptor from the queue */ |
685 | ep93xx_dma_advance_work(edmac); | 715 | ep93xx_dma_advance_work(edmac); |
686 | 716 | ||
687 | callback = desc->txd.callback; | ||
688 | callback_param = desc->txd.callback_param; | ||
689 | |||
690 | /* Now we can release all the chained descriptors */ | 717 | /* Now we can release all the chained descriptors */ |
691 | list_for_each_entry_safe(desc, d, &list, node) { | 718 | list_for_each_entry_safe(desc, d, &list, node) { |
692 | /* | 719 | /* |
@@ -706,13 +733,22 @@ static void ep93xx_dma_tasklet(unsigned long data) | |||
706 | static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id) | 733 | static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id) |
707 | { | 734 | { |
708 | struct ep93xx_dma_chan *edmac = dev_id; | 735 | struct ep93xx_dma_chan *edmac = dev_id; |
736 | struct ep93xx_dma_desc *desc; | ||
709 | irqreturn_t ret = IRQ_HANDLED; | 737 | irqreturn_t ret = IRQ_HANDLED; |
710 | 738 | ||
711 | spin_lock(&edmac->lock); | 739 | spin_lock(&edmac->lock); |
712 | 740 | ||
741 | desc = ep93xx_dma_get_active(edmac); | ||
742 | if (!desc) { | ||
743 | dev_warn(chan2dev(edmac), | ||
744 | "got interrupt while active list is empty\n"); | ||
745 | spin_unlock(&edmac->lock); | ||
746 | return IRQ_NONE; | ||
747 | } | ||
748 | |||
713 | switch (edmac->edma->hw_interrupt(edmac)) { | 749 | switch (edmac->edma->hw_interrupt(edmac)) { |
714 | case INTERRUPT_DONE: | 750 | case INTERRUPT_DONE: |
715 | ep93xx_dma_get_active(edmac)->complete = true; | 751 | desc->complete = true; |
716 | tasklet_schedule(&edmac->tasklet); | 752 | tasklet_schedule(&edmac->tasklet); |
717 | break; | 753 | break; |
718 | 754 | ||
@@ -803,8 +839,8 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan) | |||
803 | switch (data->port) { | 839 | switch (data->port) { |
804 | case EP93XX_DMA_SSP: | 840 | case EP93XX_DMA_SSP: |
805 | case EP93XX_DMA_IDE: | 841 | case EP93XX_DMA_IDE: |
806 | if (data->direction != DMA_TO_DEVICE && | 842 | if (data->direction != DMA_MEM_TO_DEV && |
807 | data->direction != DMA_FROM_DEVICE) | 843 | data->direction != DMA_DEV_TO_MEM) |
808 | return -EINVAL; | 844 | return -EINVAL; |
809 | break; | 845 | break; |
810 | default: | 846 | default: |
@@ -952,7 +988,7 @@ fail: | |||
952 | */ | 988 | */ |
953 | static struct dma_async_tx_descriptor * | 989 | static struct dma_async_tx_descriptor * |
954 | ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 990 | ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
955 | unsigned int sg_len, enum dma_data_direction dir, | 991 | unsigned int sg_len, enum dma_transfer_direction dir, |
956 | unsigned long flags) | 992 | unsigned long flags) |
957 | { | 993 | { |
958 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | 994 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); |
@@ -988,7 +1024,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
988 | goto fail; | 1024 | goto fail; |
989 | } | 1025 | } |
990 | 1026 | ||
991 | if (dir == DMA_TO_DEVICE) { | 1027 | if (dir == DMA_MEM_TO_DEV) { |
992 | desc->src_addr = sg_dma_address(sg); | 1028 | desc->src_addr = sg_dma_address(sg); |
993 | desc->dst_addr = edmac->runtime_addr; | 1029 | desc->dst_addr = edmac->runtime_addr; |
994 | } else { | 1030 | } else { |
@@ -1032,7 +1068,7 @@ fail: | |||
1032 | static struct dma_async_tx_descriptor * | 1068 | static struct dma_async_tx_descriptor * |
1033 | ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | 1069 | ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, |
1034 | size_t buf_len, size_t period_len, | 1070 | size_t buf_len, size_t period_len, |
1035 | enum dma_data_direction dir) | 1071 | enum dma_transfer_direction dir) |
1036 | { | 1072 | { |
1037 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | 1073 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); |
1038 | struct ep93xx_dma_desc *desc, *first; | 1074 | struct ep93xx_dma_desc *desc, *first; |
@@ -1065,7 +1101,7 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | |||
1065 | goto fail; | 1101 | goto fail; |
1066 | } | 1102 | } |
1067 | 1103 | ||
1068 | if (dir == DMA_TO_DEVICE) { | 1104 | if (dir == DMA_MEM_TO_DEV) { |
1069 | desc->src_addr = dma_addr + offset; | 1105 | desc->src_addr = dma_addr + offset; |
1070 | desc->dst_addr = edmac->runtime_addr; | 1106 | desc->dst_addr = edmac->runtime_addr; |
1071 | } else { | 1107 | } else { |
@@ -1133,12 +1169,12 @@ static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac, | |||
1133 | return -EINVAL; | 1169 | return -EINVAL; |
1134 | 1170 | ||
1135 | switch (config->direction) { | 1171 | switch (config->direction) { |
1136 | case DMA_FROM_DEVICE: | 1172 | case DMA_DEV_TO_MEM: |
1137 | width = config->src_addr_width; | 1173 | width = config->src_addr_width; |
1138 | addr = config->src_addr; | 1174 | addr = config->src_addr; |
1139 | break; | 1175 | break; |
1140 | 1176 | ||
1141 | case DMA_TO_DEVICE: | 1177 | case DMA_MEM_TO_DEV: |
1142 | width = config->dst_addr_width; | 1178 | width = config->dst_addr_width; |
1143 | addr = config->dst_addr; | 1179 | addr = config->dst_addr; |
1144 | break; | 1180 | break; |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 8a781540590c..b98070c33ca9 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
@@ -772,7 +772,7 @@ fail: | |||
772 | */ | 772 | */ |
773 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | 773 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( |
774 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, | 774 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, |
775 | enum dma_data_direction direction, unsigned long flags) | 775 | enum dma_transfer_direction direction, unsigned long flags) |
776 | { | 776 | { |
777 | /* | 777 | /* |
778 | * This operation is not supported on the Freescale DMA controller | 778 | * This operation is not supported on the Freescale DMA controller |
@@ -819,7 +819,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan, | |||
819 | return -ENXIO; | 819 | return -ENXIO; |
820 | 820 | ||
821 | /* we set the controller burst size depending on direction */ | 821 | /* we set the controller burst size depending on direction */ |
822 | if (config->direction == DMA_TO_DEVICE) | 822 | if (config->direction == DMA_MEM_TO_DEV) |
823 | size = config->dst_addr_width * config->dst_maxburst; | 823 | size = config->dst_addr_width * config->dst_maxburst; |
824 | else | 824 | else |
825 | size = config->src_addr_width * config->src_maxburst; | 825 | size = config->src_addr_width * config->src_maxburst; |
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 4be55f9bb6c1..e4383ee2c9ac 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
@@ -107,7 +107,7 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
107 | imx_dma_disable(imxdmac->imxdma_channel); | 107 | imx_dma_disable(imxdmac->imxdma_channel); |
108 | return 0; | 108 | return 0; |
109 | case DMA_SLAVE_CONFIG: | 109 | case DMA_SLAVE_CONFIG: |
110 | if (dmaengine_cfg->direction == DMA_FROM_DEVICE) { | 110 | if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { |
111 | imxdmac->per_address = dmaengine_cfg->src_addr; | 111 | imxdmac->per_address = dmaengine_cfg->src_addr; |
112 | imxdmac->watermark_level = dmaengine_cfg->src_maxburst; | 112 | imxdmac->watermark_level = dmaengine_cfg->src_maxburst; |
113 | imxdmac->word_size = dmaengine_cfg->src_addr_width; | 113 | imxdmac->word_size = dmaengine_cfg->src_addr_width; |
@@ -224,7 +224,7 @@ static void imxdma_free_chan_resources(struct dma_chan *chan) | |||
224 | 224 | ||
225 | static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( | 225 | static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( |
226 | struct dma_chan *chan, struct scatterlist *sgl, | 226 | struct dma_chan *chan, struct scatterlist *sgl, |
227 | unsigned int sg_len, enum dma_data_direction direction, | 227 | unsigned int sg_len, enum dma_transfer_direction direction, |
228 | unsigned long flags) | 228 | unsigned long flags) |
229 | { | 229 | { |
230 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 230 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
@@ -241,7 +241,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( | |||
241 | dma_length += sg->length; | 241 | dma_length += sg->length; |
242 | } | 242 | } |
243 | 243 | ||
244 | if (direction == DMA_FROM_DEVICE) | 244 | if (direction == DMA_DEV_TO_MEM) |
245 | dmamode = DMA_MODE_READ; | 245 | dmamode = DMA_MODE_READ; |
246 | else | 246 | else |
247 | dmamode = DMA_MODE_WRITE; | 247 | dmamode = DMA_MODE_WRITE; |
@@ -271,7 +271,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( | |||
271 | 271 | ||
272 | static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( | 272 | static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( |
273 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | 273 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
274 | size_t period_len, enum dma_data_direction direction) | 274 | size_t period_len, enum dma_transfer_direction direction) |
275 | { | 275 | { |
276 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | 276 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
277 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 277 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
@@ -317,7 +317,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( | |||
317 | imxdmac->sg_list[periods].page_link = | 317 | imxdmac->sg_list[periods].page_link = |
318 | ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; | 318 | ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; |
319 | 319 | ||
320 | if (direction == DMA_FROM_DEVICE) | 320 | if (direction == DMA_DEV_TO_MEM) |
321 | dmamode = DMA_MODE_READ; | 321 | dmamode = DMA_MODE_READ; |
322 | else | 322 | else |
323 | dmamode = DMA_MODE_WRITE; | 323 | dmamode = DMA_MODE_WRITE; |
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index f993955a640c..a8af379680c1 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c | |||
@@ -247,7 +247,7 @@ struct sdma_engine; | |||
247 | struct sdma_channel { | 247 | struct sdma_channel { |
248 | struct sdma_engine *sdma; | 248 | struct sdma_engine *sdma; |
249 | unsigned int channel; | 249 | unsigned int channel; |
250 | enum dma_data_direction direction; | 250 | enum dma_transfer_direction direction; |
251 | enum sdma_peripheral_type peripheral_type; | 251 | enum sdma_peripheral_type peripheral_type; |
252 | unsigned int event_id0; | 252 | unsigned int event_id0; |
253 | unsigned int event_id1; | 253 | unsigned int event_id1; |
@@ -268,6 +268,8 @@ struct sdma_channel { | |||
268 | struct dma_async_tx_descriptor desc; | 268 | struct dma_async_tx_descriptor desc; |
269 | dma_cookie_t last_completed; | 269 | dma_cookie_t last_completed; |
270 | enum dma_status status; | 270 | enum dma_status status; |
271 | unsigned int chn_count; | ||
272 | unsigned int chn_real_count; | ||
271 | }; | 273 | }; |
272 | 274 | ||
273 | #define IMX_DMA_SG_LOOP (1 << 0) | 275 | #define IMX_DMA_SG_LOOP (1 << 0) |
@@ -503,6 +505,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) | |||
503 | struct sdma_buffer_descriptor *bd; | 505 | struct sdma_buffer_descriptor *bd; |
504 | int i, error = 0; | 506 | int i, error = 0; |
505 | 507 | ||
508 | sdmac->chn_real_count = 0; | ||
506 | /* | 509 | /* |
507 | * non loop mode. Iterate over all descriptors, collect | 510 | * non loop mode. Iterate over all descriptors, collect |
508 | * errors and call callback function | 511 | * errors and call callback function |
@@ -512,6 +515,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) | |||
512 | 515 | ||
513 | if (bd->mode.status & (BD_DONE | BD_RROR)) | 516 | if (bd->mode.status & (BD_DONE | BD_RROR)) |
514 | error = -EIO; | 517 | error = -EIO; |
518 | sdmac->chn_real_count += bd->mode.count; | ||
515 | } | 519 | } |
516 | 520 | ||
517 | if (error) | 521 | if (error) |
@@ -519,9 +523,9 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) | |||
519 | else | 523 | else |
520 | sdmac->status = DMA_SUCCESS; | 524 | sdmac->status = DMA_SUCCESS; |
521 | 525 | ||
526 | sdmac->last_completed = sdmac->desc.cookie; | ||
522 | if (sdmac->desc.callback) | 527 | if (sdmac->desc.callback) |
523 | sdmac->desc.callback(sdmac->desc.callback_param); | 528 | sdmac->desc.callback(sdmac->desc.callback_param); |
524 | sdmac->last_completed = sdmac->desc.cookie; | ||
525 | } | 529 | } |
526 | 530 | ||
527 | static void mxc_sdma_handle_channel(struct sdma_channel *sdmac) | 531 | static void mxc_sdma_handle_channel(struct sdma_channel *sdmac) |
@@ -650,7 +654,7 @@ static int sdma_load_context(struct sdma_channel *sdmac) | |||
650 | struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; | 654 | struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; |
651 | int ret; | 655 | int ret; |
652 | 656 | ||
653 | if (sdmac->direction == DMA_FROM_DEVICE) { | 657 | if (sdmac->direction == DMA_DEV_TO_MEM) { |
654 | load_address = sdmac->pc_from_device; | 658 | load_address = sdmac->pc_from_device; |
655 | } else { | 659 | } else { |
656 | load_address = sdmac->pc_to_device; | 660 | load_address = sdmac->pc_to_device; |
@@ -832,17 +836,18 @@ static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) | |||
832 | 836 | ||
833 | static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) | 837 | static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) |
834 | { | 838 | { |
839 | unsigned long flags; | ||
835 | struct sdma_channel *sdmac = to_sdma_chan(tx->chan); | 840 | struct sdma_channel *sdmac = to_sdma_chan(tx->chan); |
836 | struct sdma_engine *sdma = sdmac->sdma; | 841 | struct sdma_engine *sdma = sdmac->sdma; |
837 | dma_cookie_t cookie; | 842 | dma_cookie_t cookie; |
838 | 843 | ||
839 | spin_lock_irq(&sdmac->lock); | 844 | spin_lock_irqsave(&sdmac->lock, flags); |
840 | 845 | ||
841 | cookie = sdma_assign_cookie(sdmac); | 846 | cookie = sdma_assign_cookie(sdmac); |
842 | 847 | ||
843 | sdma_enable_channel(sdma, sdmac->channel); | 848 | sdma_enable_channel(sdma, sdmac->channel); |
844 | 849 | ||
845 | spin_unlock_irq(&sdmac->lock); | 850 | spin_unlock_irqrestore(&sdmac->lock, flags); |
846 | 851 | ||
847 | return cookie; | 852 | return cookie; |
848 | } | 853 | } |
@@ -911,7 +916,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan) | |||
911 | 916 | ||
912 | static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | 917 | static struct dma_async_tx_descriptor *sdma_prep_slave_sg( |
913 | struct dma_chan *chan, struct scatterlist *sgl, | 918 | struct dma_chan *chan, struct scatterlist *sgl, |
914 | unsigned int sg_len, enum dma_data_direction direction, | 919 | unsigned int sg_len, enum dma_transfer_direction direction, |
915 | unsigned long flags) | 920 | unsigned long flags) |
916 | { | 921 | { |
917 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 922 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
@@ -941,6 +946,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
941 | goto err_out; | 946 | goto err_out; |
942 | } | 947 | } |
943 | 948 | ||
949 | sdmac->chn_count = 0; | ||
944 | for_each_sg(sgl, sg, sg_len, i) { | 950 | for_each_sg(sgl, sg, sg_len, i) { |
945 | struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; | 951 | struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; |
946 | int param; | 952 | int param; |
@@ -957,6 +963,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | |||
957 | } | 963 | } |
958 | 964 | ||
959 | bd->mode.count = count; | 965 | bd->mode.count = count; |
966 | sdmac->chn_count += count; | ||
960 | 967 | ||
961 | if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) { | 968 | if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) { |
962 | ret = -EINVAL; | 969 | ret = -EINVAL; |
@@ -1008,7 +1015,7 @@ err_out: | |||
1008 | 1015 | ||
1009 | static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( | 1016 | static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( |
1010 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | 1017 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
1011 | size_t period_len, enum dma_data_direction direction) | 1018 | size_t period_len, enum dma_transfer_direction direction) |
1012 | { | 1019 | { |
1013 | struct sdma_channel *sdmac = to_sdma_chan(chan); | 1020 | struct sdma_channel *sdmac = to_sdma_chan(chan); |
1014 | struct sdma_engine *sdma = sdmac->sdma; | 1021 | struct sdma_engine *sdma = sdmac->sdma; |
@@ -1093,7 +1100,7 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1093 | sdma_disable_channel(sdmac); | 1100 | sdma_disable_channel(sdmac); |
1094 | return 0; | 1101 | return 0; |
1095 | case DMA_SLAVE_CONFIG: | 1102 | case DMA_SLAVE_CONFIG: |
1096 | if (dmaengine_cfg->direction == DMA_FROM_DEVICE) { | 1103 | if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { |
1097 | sdmac->per_address = dmaengine_cfg->src_addr; | 1104 | sdmac->per_address = dmaengine_cfg->src_addr; |
1098 | sdmac->watermark_level = dmaengine_cfg->src_maxburst; | 1105 | sdmac->watermark_level = dmaengine_cfg->src_maxburst; |
1099 | sdmac->word_size = dmaengine_cfg->src_addr_width; | 1106 | sdmac->word_size = dmaengine_cfg->src_addr_width; |
@@ -1102,6 +1109,7 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
1102 | sdmac->watermark_level = dmaengine_cfg->dst_maxburst; | 1109 | sdmac->watermark_level = dmaengine_cfg->dst_maxburst; |
1103 | sdmac->word_size = dmaengine_cfg->dst_addr_width; | 1110 | sdmac->word_size = dmaengine_cfg->dst_addr_width; |
1104 | } | 1111 | } |
1112 | sdmac->direction = dmaengine_cfg->direction; | ||
1105 | return sdma_config_channel(sdmac); | 1113 | return sdma_config_channel(sdmac); |
1106 | default: | 1114 | default: |
1107 | return -ENOSYS; | 1115 | return -ENOSYS; |
@@ -1119,7 +1127,8 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, | |||
1119 | 1127 | ||
1120 | last_used = chan->cookie; | 1128 | last_used = chan->cookie; |
1121 | 1129 | ||
1122 | dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0); | 1130 | dma_set_tx_state(txstate, sdmac->last_completed, last_used, |
1131 | sdmac->chn_count - sdmac->chn_real_count); | ||
1123 | 1132 | ||
1124 | return sdmac->status; | 1133 | return sdmac->status; |
1125 | } | 1134 | } |
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index 19a0c64d45d3..74f70aadf9e4 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c | |||
@@ -280,7 +280,8 @@ static void midc_dostart(struct intel_mid_dma_chan *midc, | |||
280 | * callbacks but must be called with the lock held. | 280 | * callbacks but must be called with the lock held. |
281 | */ | 281 | */ |
282 | static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, | 282 | static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, |
283 | struct intel_mid_dma_desc *desc) | 283 | struct intel_mid_dma_desc *desc) |
284 | __releases(&midc->lock) __acquires(&midc->lock) | ||
284 | { | 285 | { |
285 | struct dma_async_tx_descriptor *txd = &desc->txd; | 286 | struct dma_async_tx_descriptor *txd = &desc->txd; |
286 | dma_async_tx_callback callback_txd = NULL; | 287 | dma_async_tx_callback callback_txd = NULL; |
@@ -311,6 +312,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, | |||
311 | pci_pool_free(desc->lli_pool, desc->lli, | 312 | pci_pool_free(desc->lli_pool, desc->lli, |
312 | desc->lli_phys); | 313 | desc->lli_phys); |
313 | pci_pool_destroy(desc->lli_pool); | 314 | pci_pool_destroy(desc->lli_pool); |
315 | desc->lli = NULL; | ||
314 | } | 316 | } |
315 | list_move(&desc->desc_node, &midc->free_list); | 317 | list_move(&desc->desc_node, &midc->free_list); |
316 | midc->busy = false; | 318 | midc->busy = false; |
@@ -395,10 +397,10 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc, | |||
395 | midc->dma->block_size); | 397 | midc->dma->block_size); |
396 | /*Populate SAR and DAR values*/ | 398 | /*Populate SAR and DAR values*/ |
397 | sg_phy_addr = sg_phys(sg); | 399 | sg_phy_addr = sg_phys(sg); |
398 | if (desc->dirn == DMA_TO_DEVICE) { | 400 | if (desc->dirn == DMA_MEM_TO_DEV) { |
399 | lli_bloc_desc->sar = sg_phy_addr; | 401 | lli_bloc_desc->sar = sg_phy_addr; |
400 | lli_bloc_desc->dar = mids->dma_slave.dst_addr; | 402 | lli_bloc_desc->dar = mids->dma_slave.dst_addr; |
401 | } else if (desc->dirn == DMA_FROM_DEVICE) { | 403 | } else if (desc->dirn == DMA_DEV_TO_MEM) { |
402 | lli_bloc_desc->sar = mids->dma_slave.src_addr; | 404 | lli_bloc_desc->sar = mids->dma_slave.src_addr; |
403 | lli_bloc_desc->dar = sg_phy_addr; | 405 | lli_bloc_desc->dar = sg_phy_addr; |
404 | } | 406 | } |
@@ -490,7 +492,9 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, | |||
490 | 492 | ||
491 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 493 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
492 | if (ret != DMA_SUCCESS) { | 494 | if (ret != DMA_SUCCESS) { |
495 | spin_lock_bh(&midc->lock); | ||
493 | midc_scan_descriptors(to_middma_device(chan->device), midc); | 496 | midc_scan_descriptors(to_middma_device(chan->device), midc); |
497 | spin_unlock_bh(&midc->lock); | ||
494 | 498 | ||
495 | last_complete = midc->completed; | 499 | last_complete = midc->completed; |
496 | last_used = chan->cookie; | 500 | last_used = chan->cookie; |
@@ -566,6 +570,7 @@ static int intel_mid_dma_device_control(struct dma_chan *chan, | |||
566 | pci_pool_free(desc->lli_pool, desc->lli, | 570 | pci_pool_free(desc->lli_pool, desc->lli, |
567 | desc->lli_phys); | 571 | desc->lli_phys); |
568 | pci_pool_destroy(desc->lli_pool); | 572 | pci_pool_destroy(desc->lli_pool); |
573 | desc->lli = NULL; | ||
569 | } | 574 | } |
570 | list_move(&desc->desc_node, &midc->free_list); | 575 | list_move(&desc->desc_node, &midc->free_list); |
571 | } | 576 | } |
@@ -632,13 +637,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( | |||
632 | if (midc->dma->pimr_mask) { | 637 | if (midc->dma->pimr_mask) { |
633 | cfg_hi.cfgx.protctl = 0x0; /*default value*/ | 638 | cfg_hi.cfgx.protctl = 0x0; /*default value*/ |
634 | cfg_hi.cfgx.fifo_mode = 1; | 639 | cfg_hi.cfgx.fifo_mode = 1; |
635 | if (mids->dma_slave.direction == DMA_TO_DEVICE) { | 640 | if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { |
636 | cfg_hi.cfgx.src_per = 0; | 641 | cfg_hi.cfgx.src_per = 0; |
637 | if (mids->device_instance == 0) | 642 | if (mids->device_instance == 0) |
638 | cfg_hi.cfgx.dst_per = 3; | 643 | cfg_hi.cfgx.dst_per = 3; |
639 | if (mids->device_instance == 1) | 644 | if (mids->device_instance == 1) |
640 | cfg_hi.cfgx.dst_per = 1; | 645 | cfg_hi.cfgx.dst_per = 1; |
641 | } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { | 646 | } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { |
642 | if (mids->device_instance == 0) | 647 | if (mids->device_instance == 0) |
643 | cfg_hi.cfgx.src_per = 2; | 648 | cfg_hi.cfgx.src_per = 2; |
644 | if (mids->device_instance == 1) | 649 | if (mids->device_instance == 1) |
@@ -682,11 +687,11 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( | |||
682 | ctl_lo.ctlx.sinc = 0; | 687 | ctl_lo.ctlx.sinc = 0; |
683 | ctl_lo.ctlx.dinc = 0; | 688 | ctl_lo.ctlx.dinc = 0; |
684 | } else { | 689 | } else { |
685 | if (mids->dma_slave.direction == DMA_TO_DEVICE) { | 690 | if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { |
686 | ctl_lo.ctlx.sinc = 0; | 691 | ctl_lo.ctlx.sinc = 0; |
687 | ctl_lo.ctlx.dinc = 2; | 692 | ctl_lo.ctlx.dinc = 2; |
688 | ctl_lo.ctlx.tt_fc = 1; | 693 | ctl_lo.ctlx.tt_fc = 1; |
689 | } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { | 694 | } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { |
690 | ctl_lo.ctlx.sinc = 2; | 695 | ctl_lo.ctlx.sinc = 2; |
691 | ctl_lo.ctlx.dinc = 0; | 696 | ctl_lo.ctlx.dinc = 0; |
692 | ctl_lo.ctlx.tt_fc = 2; | 697 | ctl_lo.ctlx.tt_fc = 2; |
@@ -732,7 +737,7 @@ err_desc_get: | |||
732 | */ | 737 | */ |
733 | static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( | 738 | static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( |
734 | struct dma_chan *chan, struct scatterlist *sgl, | 739 | struct dma_chan *chan, struct scatterlist *sgl, |
735 | unsigned int sg_len, enum dma_data_direction direction, | 740 | unsigned int sg_len, enum dma_transfer_direction direction, |
736 | unsigned long flags) | 741 | unsigned long flags) |
737 | { | 742 | { |
738 | struct intel_mid_dma_chan *midc = NULL; | 743 | struct intel_mid_dma_chan *midc = NULL; |
@@ -868,7 +873,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) | |||
868 | pm_runtime_get_sync(&mid->pdev->dev); | 873 | pm_runtime_get_sync(&mid->pdev->dev); |
869 | 874 | ||
870 | if (mid->state == SUSPENDED) { | 875 | if (mid->state == SUSPENDED) { |
871 | if (dma_resume(mid->pdev)) { | 876 | if (dma_resume(&mid->pdev->dev)) { |
872 | pr_err("ERR_MDMA: resume failed"); | 877 | pr_err("ERR_MDMA: resume failed"); |
873 | return -EFAULT; | 878 | return -EFAULT; |
874 | } | 879 | } |
@@ -1099,7 +1104,8 @@ static int mid_setup_dma(struct pci_dev *pdev) | |||
1099 | LNW_PERIPHRAL_MASK_SIZE); | 1104 | LNW_PERIPHRAL_MASK_SIZE); |
1100 | if (dma->mask_reg == NULL) { | 1105 | if (dma->mask_reg == NULL) { |
1101 | pr_err("ERR_MDMA:Can't map periphral intr space !!\n"); | 1106 | pr_err("ERR_MDMA:Can't map periphral intr space !!\n"); |
1102 | return -ENOMEM; | 1107 | err = -ENOMEM; |
1108 | goto err_ioremap; | ||
1103 | } | 1109 | } |
1104 | } else | 1110 | } else |
1105 | dma->mask_reg = NULL; | 1111 | dma->mask_reg = NULL; |
@@ -1196,6 +1202,9 @@ static int mid_setup_dma(struct pci_dev *pdev) | |||
1196 | err_engine: | 1202 | err_engine: |
1197 | free_irq(pdev->irq, dma); | 1203 | free_irq(pdev->irq, dma); |
1198 | err_irq: | 1204 | err_irq: |
1205 | if (dma->mask_reg) | ||
1206 | iounmap(dma->mask_reg); | ||
1207 | err_ioremap: | ||
1199 | pci_pool_destroy(dma->dma_pool); | 1208 | pci_pool_destroy(dma->dma_pool); |
1200 | err_dma_pool: | 1209 | err_dma_pool: |
1201 | pr_err("ERR_MDMA:setup_dma failed: %d\n", err); | 1210 | pr_err("ERR_MDMA:setup_dma failed: %d\n", err); |
@@ -1337,8 +1346,9 @@ static void __devexit intel_mid_dma_remove(struct pci_dev *pdev) | |||
1337 | * | 1346 | * |
1338 | * This function is called by OS when a power event occurs | 1347 | * This function is called by OS when a power event occurs |
1339 | */ | 1348 | */ |
1340 | int dma_suspend(struct pci_dev *pci, pm_message_t state) | 1349 | static int dma_suspend(struct device *dev) |
1341 | { | 1350 | { |
1351 | struct pci_dev *pci = to_pci_dev(dev); | ||
1342 | int i; | 1352 | int i; |
1343 | struct middma_device *device = pci_get_drvdata(pci); | 1353 | struct middma_device *device = pci_get_drvdata(pci); |
1344 | pr_debug("MDMA: dma_suspend called\n"); | 1354 | pr_debug("MDMA: dma_suspend called\n"); |
@@ -1362,8 +1372,9 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state) | |||
1362 | * | 1372 | * |
1363 | * This function is called by OS when a power event occurs | 1373 | * This function is called by OS when a power event occurs |
1364 | */ | 1374 | */ |
1365 | int dma_resume(struct pci_dev *pci) | 1375 | int dma_resume(struct device *dev) |
1366 | { | 1376 | { |
1377 | struct pci_dev *pci = to_pci_dev(dev); | ||
1367 | int ret; | 1378 | int ret; |
1368 | struct middma_device *device = pci_get_drvdata(pci); | 1379 | struct middma_device *device = pci_get_drvdata(pci); |
1369 | 1380 | ||
@@ -1429,6 +1440,8 @@ static const struct dev_pm_ops intel_mid_dma_pm = { | |||
1429 | .runtime_suspend = dma_runtime_suspend, | 1440 | .runtime_suspend = dma_runtime_suspend, |
1430 | .runtime_resume = dma_runtime_resume, | 1441 | .runtime_resume = dma_runtime_resume, |
1431 | .runtime_idle = dma_runtime_idle, | 1442 | .runtime_idle = dma_runtime_idle, |
1443 | .suspend = dma_suspend, | ||
1444 | .resume = dma_resume, | ||
1432 | }; | 1445 | }; |
1433 | 1446 | ||
1434 | static struct pci_driver intel_mid_dma_pci_driver = { | 1447 | static struct pci_driver intel_mid_dma_pci_driver = { |
@@ -1437,8 +1450,6 @@ static struct pci_driver intel_mid_dma_pci_driver = { | |||
1437 | .probe = intel_mid_dma_probe, | 1450 | .probe = intel_mid_dma_probe, |
1438 | .remove = __devexit_p(intel_mid_dma_remove), | 1451 | .remove = __devexit_p(intel_mid_dma_remove), |
1439 | #ifdef CONFIG_PM | 1452 | #ifdef CONFIG_PM |
1440 | .suspend = dma_suspend, | ||
1441 | .resume = dma_resume, | ||
1442 | .driver = { | 1453 | .driver = { |
1443 | .pm = &intel_mid_dma_pm, | 1454 | .pm = &intel_mid_dma_pm, |
1444 | }, | 1455 | }, |
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h index aea5ee88ce03..c83d35b97bd8 100644 --- a/drivers/dma/intel_mid_dma_regs.h +++ b/drivers/dma/intel_mid_dma_regs.h | |||
@@ -262,7 +262,7 @@ struct intel_mid_dma_desc { | |||
262 | unsigned int lli_length; | 262 | unsigned int lli_length; |
263 | unsigned int current_lli; | 263 | unsigned int current_lli; |
264 | dma_addr_t next; | 264 | dma_addr_t next; |
265 | enum dma_data_direction dirn; | 265 | enum dma_transfer_direction dirn; |
266 | enum dma_status status; | 266 | enum dma_status status; |
267 | enum dma_slave_buswidth width; /*width of DMA txn*/ | 267 | enum dma_slave_buswidth width; /*width of DMA txn*/ |
268 | enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ | 268 | enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ |
@@ -296,6 +296,6 @@ static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave | |||
296 | } | 296 | } |
297 | 297 | ||
298 | 298 | ||
299 | int dma_resume(struct pci_dev *pci); | 299 | int dma_resume(struct device *dev); |
300 | 300 | ||
301 | #endif /*__INTEL_MID_DMAC_REGS_H__*/ | 301 | #endif /*__INTEL_MID_DMAC_REGS_H__*/ |
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index e03f811a83dd..04be90b645b8 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c | |||
@@ -1735,8 +1735,6 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) | |||
1735 | spin_unlock_bh(&iop_chan->lock); | 1735 | spin_unlock_bh(&iop_chan->lock); |
1736 | } | 1736 | } |
1737 | 1737 | ||
1738 | MODULE_ALIAS("platform:iop-adma"); | ||
1739 | |||
1740 | static struct platform_driver iop_adma_driver = { | 1738 | static struct platform_driver iop_adma_driver = { |
1741 | .probe = iop_adma_probe, | 1739 | .probe = iop_adma_probe, |
1742 | .remove = __devexit_p(iop_adma_remove), | 1740 | .remove = __devexit_p(iop_adma_remove), |
@@ -1746,19 +1744,9 @@ static struct platform_driver iop_adma_driver = { | |||
1746 | }, | 1744 | }, |
1747 | }; | 1745 | }; |
1748 | 1746 | ||
1749 | static int __init iop_adma_init (void) | 1747 | module_platform_driver(iop_adma_driver); |
1750 | { | ||
1751 | return platform_driver_register(&iop_adma_driver); | ||
1752 | } | ||
1753 | |||
1754 | static void __exit iop_adma_exit (void) | ||
1755 | { | ||
1756 | platform_driver_unregister(&iop_adma_driver); | ||
1757 | return; | ||
1758 | } | ||
1759 | module_exit(iop_adma_exit); | ||
1760 | module_init(iop_adma_init); | ||
1761 | 1748 | ||
1762 | MODULE_AUTHOR("Intel Corporation"); | 1749 | MODULE_AUTHOR("Intel Corporation"); |
1763 | MODULE_DESCRIPTION("IOP ADMA Engine Driver"); | 1750 | MODULE_DESCRIPTION("IOP ADMA Engine Driver"); |
1764 | MODULE_LICENSE("GPL"); | 1751 | MODULE_LICENSE("GPL"); |
1752 | MODULE_ALIAS("platform:iop-adma"); | ||
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 0e5ef33f90a1..6212b16e8cf2 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c | |||
@@ -312,7 +312,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params, | |||
312 | case IPU_PIX_FMT_RGB565: | 312 | case IPU_PIX_FMT_RGB565: |
313 | params->ip.bpp = 2; | 313 | params->ip.bpp = 2; |
314 | params->ip.pfs = 4; | 314 | params->ip.pfs = 4; |
315 | params->ip.npb = 7; | 315 | params->ip.npb = 15; |
316 | params->ip.sat = 2; /* SAT = 32-bit access */ | 316 | params->ip.sat = 2; /* SAT = 32-bit access */ |
317 | params->ip.ofs0 = 0; /* Red bit offset */ | 317 | params->ip.ofs0 = 0; /* Red bit offset */ |
318 | params->ip.ofs1 = 5; /* Green bit offset */ | 318 | params->ip.ofs1 = 5; /* Green bit offset */ |
@@ -422,12 +422,6 @@ static void ipu_ch_param_set_size(union chan_param_mem *params, | |||
422 | params->pp.nsb = 1; | 422 | params->pp.nsb = 1; |
423 | } | 423 | } |
424 | 424 | ||
425 | static void ipu_ch_param_set_burst_size(union chan_param_mem *params, | ||
426 | uint16_t burst_pixels) | ||
427 | { | ||
428 | params->pp.npb = burst_pixels - 1; | ||
429 | } | ||
430 | |||
431 | static void ipu_ch_param_set_buffer(union chan_param_mem *params, | 425 | static void ipu_ch_param_set_buffer(union chan_param_mem *params, |
432 | dma_addr_t buf0, dma_addr_t buf1) | 426 | dma_addr_t buf0, dma_addr_t buf1) |
433 | { | 427 | { |
@@ -690,23 +684,6 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan, | |||
690 | ipu_ch_param_set_size(¶ms, pixel_fmt, width, height, stride_bytes); | 684 | ipu_ch_param_set_size(¶ms, pixel_fmt, width, height, stride_bytes); |
691 | ipu_ch_param_set_buffer(¶ms, phyaddr_0, phyaddr_1); | 685 | ipu_ch_param_set_buffer(¶ms, phyaddr_0, phyaddr_1); |
692 | ipu_ch_param_set_rotation(¶ms, rot_mode); | 686 | ipu_ch_param_set_rotation(¶ms, rot_mode); |
693 | /* Some channels (rotation) have restriction on burst length */ | ||
694 | switch (channel) { | ||
695 | case IDMAC_IC_7: /* Hangs with burst 8, 16, other values | ||
696 | invalid - Table 44-30 */ | ||
697 | /* | ||
698 | ipu_ch_param_set_burst_size(¶ms, 8); | ||
699 | */ | ||
700 | break; | ||
701 | case IDMAC_SDC_0: | ||
702 | case IDMAC_SDC_1: | ||
703 | /* In original code only IPU_PIX_FMT_RGB565 was setting burst */ | ||
704 | ipu_ch_param_set_burst_size(¶ms, 16); | ||
705 | break; | ||
706 | case IDMAC_IC_0: | ||
707 | default: | ||
708 | break; | ||
709 | } | ||
710 | 687 | ||
711 | spin_lock_irqsave(&ipu->lock, flags); | 688 | spin_lock_irqsave(&ipu->lock, flags); |
712 | 689 | ||
@@ -1364,7 +1341,7 @@ static void ipu_gc_tasklet(unsigned long arg) | |||
1364 | /* Allocate and initialise a transfer descriptor. */ | 1341 | /* Allocate and initialise a transfer descriptor. */ |
1365 | static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan, | 1342 | static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan, |
1366 | struct scatterlist *sgl, unsigned int sg_len, | 1343 | struct scatterlist *sgl, unsigned int sg_len, |
1367 | enum dma_data_direction direction, unsigned long tx_flags) | 1344 | enum dma_transfer_direction direction, unsigned long tx_flags) |
1368 | { | 1345 | { |
1369 | struct idmac_channel *ichan = to_idmac_chan(chan); | 1346 | struct idmac_channel *ichan = to_idmac_chan(chan); |
1370 | struct idmac_tx_desc *desc = NULL; | 1347 | struct idmac_tx_desc *desc = NULL; |
@@ -1376,7 +1353,7 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan | |||
1376 | chan->chan_id != IDMAC_IC_7) | 1353 | chan->chan_id != IDMAC_IC_7) |
1377 | return NULL; | 1354 | return NULL; |
1378 | 1355 | ||
1379 | if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) { | 1356 | if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) { |
1380 | dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction); | 1357 | dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction); |
1381 | return NULL; | 1358 | return NULL; |
1382 | } | 1359 | } |
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 8ba4edc6185e..4d6d4cf66949 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c | |||
@@ -835,17 +835,7 @@ static struct platform_driver mpc_dma_driver = { | |||
835 | }, | 835 | }, |
836 | }; | 836 | }; |
837 | 837 | ||
838 | static int __init mpc_dma_init(void) | 838 | module_platform_driver(mpc_dma_driver); |
839 | { | ||
840 | return platform_driver_register(&mpc_dma_driver); | ||
841 | } | ||
842 | module_init(mpc_dma_init); | ||
843 | |||
844 | static void __exit mpc_dma_exit(void) | ||
845 | { | ||
846 | platform_driver_unregister(&mpc_dma_driver); | ||
847 | } | ||
848 | module_exit(mpc_dma_exit); | ||
849 | 839 | ||
850 | MODULE_LICENSE("GPL"); | 840 | MODULE_LICENSE("GPL"); |
851 | MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>"); | 841 | MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>"); |
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index fc903c0ed234..b06cd4ca626f 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #define HW_APBHX_CTRL0 0x000 | 44 | #define HW_APBHX_CTRL0 0x000 |
45 | #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) | 45 | #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) |
46 | #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28) | 46 | #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28) |
47 | #define BP_APBH_CTRL0_CLKGATE_CHANNEL 8 | ||
48 | #define BP_APBH_CTRL0_RESET_CHANNEL 16 | 47 | #define BP_APBH_CTRL0_RESET_CHANNEL 16 |
49 | #define HW_APBHX_CTRL1 0x010 | 48 | #define HW_APBHX_CTRL1 0x010 |
50 | #define HW_APBHX_CTRL2 0x020 | 49 | #define HW_APBHX_CTRL2 0x020 |
@@ -111,6 +110,7 @@ struct mxs_dma_chan { | |||
111 | int chan_irq; | 110 | int chan_irq; |
112 | struct mxs_dma_ccw *ccw; | 111 | struct mxs_dma_ccw *ccw; |
113 | dma_addr_t ccw_phys; | 112 | dma_addr_t ccw_phys; |
113 | int desc_count; | ||
114 | dma_cookie_t last_completed; | 114 | dma_cookie_t last_completed; |
115 | enum dma_status status; | 115 | enum dma_status status; |
116 | unsigned int flags; | 116 | unsigned int flags; |
@@ -130,23 +130,6 @@ struct mxs_dma_engine { | |||
130 | struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; | 130 | struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; |
131 | }; | 131 | }; |
132 | 132 | ||
133 | static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable) | ||
134 | { | ||
135 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | ||
136 | int chan_id = mxs_chan->chan.chan_id; | ||
137 | int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR; | ||
138 | |||
139 | /* enable apbh channel clock */ | ||
140 | if (dma_is_apbh()) { | ||
141 | if (apbh_is_old()) | ||
142 | writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), | ||
143 | mxs_dma->base + HW_APBHX_CTRL0 + set_clr); | ||
144 | else | ||
145 | writel(1 << chan_id, | ||
146 | mxs_dma->base + HW_APBHX_CTRL0 + set_clr); | ||
147 | } | ||
148 | } | ||
149 | |||
150 | static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) | 133 | static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) |
151 | { | 134 | { |
152 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 135 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
@@ -165,9 +148,6 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) | |||
165 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 148 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
166 | int chan_id = mxs_chan->chan.chan_id; | 149 | int chan_id = mxs_chan->chan.chan_id; |
167 | 150 | ||
168 | /* clkgate needs to be enabled before writing other registers */ | ||
169 | mxs_dma_clkgate(mxs_chan, 1); | ||
170 | |||
171 | /* set cmd_addr up */ | 151 | /* set cmd_addr up */ |
172 | writel(mxs_chan->ccw_phys, | 152 | writel(mxs_chan->ccw_phys, |
173 | mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); | 153 | mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); |
@@ -178,9 +158,6 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) | |||
178 | 158 | ||
179 | static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) | 159 | static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) |
180 | { | 160 | { |
181 | /* disable apbh channel clock */ | ||
182 | mxs_dma_clkgate(mxs_chan, 0); | ||
183 | |||
184 | mxs_chan->status = DMA_SUCCESS; | 161 | mxs_chan->status = DMA_SUCCESS; |
185 | } | 162 | } |
186 | 163 | ||
@@ -268,7 +245,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) | |||
268 | /* | 245 | /* |
269 | * When both completion and error of termination bits set at the | 246 | * When both completion and error of termination bits set at the |
270 | * same time, we do not take it as an error. IOW, it only becomes | 247 | * same time, we do not take it as an error. IOW, it only becomes |
271 | * an error we need to handler here in case of ether it's (1) an bus | 248 | * an error we need to handle here in case of either it's (1) a bus |
272 | * error or (2) a termination error with no completion. | 249 | * error or (2) a termination error with no completion. |
273 | */ | 250 | */ |
274 | stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ | 251 | stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ |
@@ -338,10 +315,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) | |||
338 | if (ret) | 315 | if (ret) |
339 | goto err_clk; | 316 | goto err_clk; |
340 | 317 | ||
341 | /* clkgate needs to be enabled for reset to finish */ | ||
342 | mxs_dma_clkgate(mxs_chan, 1); | ||
343 | mxs_dma_reset_chan(mxs_chan); | 318 | mxs_dma_reset_chan(mxs_chan); |
344 | mxs_dma_clkgate(mxs_chan, 0); | ||
345 | 319 | ||
346 | dma_async_tx_descriptor_init(&mxs_chan->desc, chan); | 320 | dma_async_tx_descriptor_init(&mxs_chan->desc, chan); |
347 | mxs_chan->desc.tx_submit = mxs_dma_tx_submit; | 321 | mxs_chan->desc.tx_submit = mxs_dma_tx_submit; |
@@ -377,7 +351,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan) | |||
377 | 351 | ||
378 | static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | 352 | static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( |
379 | struct dma_chan *chan, struct scatterlist *sgl, | 353 | struct dma_chan *chan, struct scatterlist *sgl, |
380 | unsigned int sg_len, enum dma_data_direction direction, | 354 | unsigned int sg_len, enum dma_transfer_direction direction, |
381 | unsigned long append) | 355 | unsigned long append) |
382 | { | 356 | { |
383 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 357 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
@@ -386,7 +360,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | |||
386 | struct scatterlist *sg; | 360 | struct scatterlist *sg; |
387 | int i, j; | 361 | int i, j; |
388 | u32 *pio; | 362 | u32 *pio; |
389 | static int idx; | 363 | int idx = append ? mxs_chan->desc_count : 0; |
390 | 364 | ||
391 | if (mxs_chan->status == DMA_IN_PROGRESS && !append) | 365 | if (mxs_chan->status == DMA_IN_PROGRESS && !append) |
392 | return NULL; | 366 | return NULL; |
@@ -417,7 +391,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | |||
417 | idx = 0; | 391 | idx = 0; |
418 | } | 392 | } |
419 | 393 | ||
420 | if (direction == DMA_NONE) { | 394 | if (direction == DMA_TRANS_NONE) { |
421 | ccw = &mxs_chan->ccw[idx++]; | 395 | ccw = &mxs_chan->ccw[idx++]; |
422 | pio = (u32 *) sgl; | 396 | pio = (u32 *) sgl; |
423 | 397 | ||
@@ -450,7 +424,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | |||
450 | ccw->bits |= CCW_CHAIN; | 424 | ccw->bits |= CCW_CHAIN; |
451 | ccw->bits |= CCW_HALT_ON_TERM; | 425 | ccw->bits |= CCW_HALT_ON_TERM; |
452 | ccw->bits |= CCW_TERM_FLUSH; | 426 | ccw->bits |= CCW_TERM_FLUSH; |
453 | ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? | 427 | ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? |
454 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, | 428 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, |
455 | COMMAND); | 429 | COMMAND); |
456 | 430 | ||
@@ -462,6 +436,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | |||
462 | } | 436 | } |
463 | } | 437 | } |
464 | } | 438 | } |
439 | mxs_chan->desc_count = idx; | ||
465 | 440 | ||
466 | return &mxs_chan->desc; | 441 | return &mxs_chan->desc; |
467 | 442 | ||
@@ -472,7 +447,7 @@ err_out: | |||
472 | 447 | ||
473 | static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( | 448 | static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( |
474 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | 449 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
475 | size_t period_len, enum dma_data_direction direction) | 450 | size_t period_len, enum dma_transfer_direction direction) |
476 | { | 451 | { |
477 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 452 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
478 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 453 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
@@ -515,7 +490,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( | |||
515 | ccw->bits |= CCW_IRQ; | 490 | ccw->bits |= CCW_IRQ; |
516 | ccw->bits |= CCW_HALT_ON_TERM; | 491 | ccw->bits |= CCW_HALT_ON_TERM; |
517 | ccw->bits |= CCW_TERM_FLUSH; | 492 | ccw->bits |= CCW_TERM_FLUSH; |
518 | ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? | 493 | ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? |
519 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); | 494 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); |
520 | 495 | ||
521 | dma_addr += period_len; | 496 | dma_addr += period_len; |
@@ -523,6 +498,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( | |||
523 | 498 | ||
524 | i++; | 499 | i++; |
525 | } | 500 | } |
501 | mxs_chan->desc_count = i; | ||
526 | 502 | ||
527 | return &mxs_chan->desc; | 503 | return &mxs_chan->desc; |
528 | 504 | ||
@@ -539,8 +515,8 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |||
539 | 515 | ||
540 | switch (cmd) { | 516 | switch (cmd) { |
541 | case DMA_TERMINATE_ALL: | 517 | case DMA_TERMINATE_ALL: |
542 | mxs_dma_disable_chan(mxs_chan); | ||
543 | mxs_dma_reset_chan(mxs_chan); | 518 | mxs_dma_reset_chan(mxs_chan); |
519 | mxs_dma_disable_chan(mxs_chan); | ||
544 | break; | 520 | break; |
545 | case DMA_PAUSE: | 521 | case DMA_PAUSE: |
546 | mxs_dma_pause_chan(mxs_chan); | 522 | mxs_dma_pause_chan(mxs_chan); |
@@ -580,7 +556,7 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) | |||
580 | 556 | ||
581 | ret = clk_prepare_enable(mxs_dma->clk); | 557 | ret = clk_prepare_enable(mxs_dma->clk); |
582 | if (ret) | 558 | if (ret) |
583 | goto err_out; | 559 | return ret; |
584 | 560 | ||
585 | ret = mxs_reset_block(mxs_dma->base); | 561 | ret = mxs_reset_block(mxs_dma->base); |
586 | if (ret) | 562 | if (ret) |
@@ -604,11 +580,8 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) | |||
604 | writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, | 580 | writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, |
605 | mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR); | 581 | mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR); |
606 | 582 | ||
607 | clk_disable_unprepare(mxs_dma->clk); | ||
608 | |||
609 | return 0; | ||
610 | |||
611 | err_out: | 583 | err_out: |
584 | clk_disable_unprepare(mxs_dma->clk); | ||
612 | return ret; | 585 | return ret; |
613 | } | 586 | } |
614 | 587 | ||
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index a6d0e3dbed07..823f58179f9d 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Topcliff PCH DMA controller driver | 2 | * Topcliff PCH DMA controller driver |
3 | * Copyright (c) 2010 Intel Corporation | 3 | * Copyright (c) 2010 Intel Corporation |
4 | * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD. | 4 | * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
@@ -99,7 +99,7 @@ struct pch_dma_desc { | |||
99 | struct pch_dma_chan { | 99 | struct pch_dma_chan { |
100 | struct dma_chan chan; | 100 | struct dma_chan chan; |
101 | void __iomem *membase; | 101 | void __iomem *membase; |
102 | enum dma_data_direction dir; | 102 | enum dma_transfer_direction dir; |
103 | struct tasklet_struct tasklet; | 103 | struct tasklet_struct tasklet; |
104 | unsigned long err_status; | 104 | unsigned long err_status; |
105 | 105 | ||
@@ -224,7 +224,7 @@ static void pdc_set_dir(struct dma_chan *chan) | |||
224 | mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS << | 224 | mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS << |
225 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); | 225 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); |
226 | val &= mask_mode; | 226 | val &= mask_mode; |
227 | if (pd_chan->dir == DMA_TO_DEVICE) | 227 | if (pd_chan->dir == DMA_MEM_TO_DEV) |
228 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + | 228 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + |
229 | DMA_CTL0_DIR_SHIFT_BITS); | 229 | DMA_CTL0_DIR_SHIFT_BITS); |
230 | else | 230 | else |
@@ -242,7 +242,7 @@ static void pdc_set_dir(struct dma_chan *chan) | |||
242 | mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS << | 242 | mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS << |
243 | (DMA_CTL0_BITS_PER_CH * ch)); | 243 | (DMA_CTL0_BITS_PER_CH * ch)); |
244 | val &= mask_mode; | 244 | val &= mask_mode; |
245 | if (pd_chan->dir == DMA_TO_DEVICE) | 245 | if (pd_chan->dir == DMA_MEM_TO_DEV) |
246 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + | 246 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + |
247 | DMA_CTL0_DIR_SHIFT_BITS); | 247 | DMA_CTL0_DIR_SHIFT_BITS); |
248 | else | 248 | else |
@@ -607,7 +607,7 @@ static void pd_issue_pending(struct dma_chan *chan) | |||
607 | 607 | ||
608 | static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, | 608 | static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, |
609 | struct scatterlist *sgl, unsigned int sg_len, | 609 | struct scatterlist *sgl, unsigned int sg_len, |
610 | enum dma_data_direction direction, unsigned long flags) | 610 | enum dma_transfer_direction direction, unsigned long flags) |
611 | { | 611 | { |
612 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | 612 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
613 | struct pch_dma_slave *pd_slave = chan->private; | 613 | struct pch_dma_slave *pd_slave = chan->private; |
@@ -623,9 +623,9 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, | |||
623 | return NULL; | 623 | return NULL; |
624 | } | 624 | } |
625 | 625 | ||
626 | if (direction == DMA_FROM_DEVICE) | 626 | if (direction == DMA_DEV_TO_MEM) |
627 | reg = pd_slave->rx_reg; | 627 | reg = pd_slave->rx_reg; |
628 | else if (direction == DMA_TO_DEVICE) | 628 | else if (direction == DMA_MEM_TO_DEV) |
629 | reg = pd_slave->tx_reg; | 629 | reg = pd_slave->tx_reg; |
630 | else | 630 | else |
631 | return NULL; | 631 | return NULL; |
@@ -1018,6 +1018,8 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev) | |||
1018 | #define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E | 1018 | #define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E |
1019 | #define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017 | 1019 | #define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017 |
1020 | #define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B | 1020 | #define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B |
1021 | #define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810 | ||
1022 | #define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815 | ||
1021 | 1023 | ||
1022 | DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = { | 1024 | DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = { |
1023 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, | 1025 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, |
@@ -1030,6 +1032,8 @@ DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = { | |||
1030 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */ | 1032 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */ |
1031 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */ | 1033 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */ |
1032 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */ | 1034 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */ |
1035 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */ | ||
1036 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */ | ||
1033 | { 0, }, | 1037 | { 0, }, |
1034 | }; | 1038 | }; |
1035 | 1039 | ||
@@ -1057,7 +1061,7 @@ static void __exit pch_dma_exit(void) | |||
1057 | module_init(pch_dma_init); | 1061 | module_init(pch_dma_init); |
1058 | module_exit(pch_dma_exit); | 1062 | module_exit(pch_dma_exit); |
1059 | 1063 | ||
1060 | MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH " | 1064 | MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH " |
1061 | "DMA controller driver"); | 1065 | "DMA controller driver"); |
1062 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); | 1066 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); |
1063 | MODULE_LICENSE("GPL v2"); | 1067 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 09adcfcd953e..b8ec03ee8e22 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -350,14 +350,14 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned | |||
350 | case DMA_SLAVE_CONFIG: | 350 | case DMA_SLAVE_CONFIG: |
351 | slave_config = (struct dma_slave_config *)arg; | 351 | slave_config = (struct dma_slave_config *)arg; |
352 | 352 | ||
353 | if (slave_config->direction == DMA_TO_DEVICE) { | 353 | if (slave_config->direction == DMA_MEM_TO_DEV) { |
354 | if (slave_config->dst_addr) | 354 | if (slave_config->dst_addr) |
355 | pch->fifo_addr = slave_config->dst_addr; | 355 | pch->fifo_addr = slave_config->dst_addr; |
356 | if (slave_config->dst_addr_width) | 356 | if (slave_config->dst_addr_width) |
357 | pch->burst_sz = __ffs(slave_config->dst_addr_width); | 357 | pch->burst_sz = __ffs(slave_config->dst_addr_width); |
358 | if (slave_config->dst_maxburst) | 358 | if (slave_config->dst_maxburst) |
359 | pch->burst_len = slave_config->dst_maxburst; | 359 | pch->burst_len = slave_config->dst_maxburst; |
360 | } else if (slave_config->direction == DMA_FROM_DEVICE) { | 360 | } else if (slave_config->direction == DMA_DEV_TO_MEM) { |
361 | if (slave_config->src_addr) | 361 | if (slave_config->src_addr) |
362 | pch->fifo_addr = slave_config->src_addr; | 362 | pch->fifo_addr = slave_config->src_addr; |
363 | if (slave_config->src_addr_width) | 363 | if (slave_config->src_addr_width) |
@@ -621,7 +621,7 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) | |||
621 | 621 | ||
622 | static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( | 622 | static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( |
623 | struct dma_chan *chan, dma_addr_t dma_addr, size_t len, | 623 | struct dma_chan *chan, dma_addr_t dma_addr, size_t len, |
624 | size_t period_len, enum dma_data_direction direction) | 624 | size_t period_len, enum dma_transfer_direction direction) |
625 | { | 625 | { |
626 | struct dma_pl330_desc *desc; | 626 | struct dma_pl330_desc *desc; |
627 | struct dma_pl330_chan *pch = to_pchan(chan); | 627 | struct dma_pl330_chan *pch = to_pchan(chan); |
@@ -636,14 +636,14 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( | |||
636 | } | 636 | } |
637 | 637 | ||
638 | switch (direction) { | 638 | switch (direction) { |
639 | case DMA_TO_DEVICE: | 639 | case DMA_MEM_TO_DEV: |
640 | desc->rqcfg.src_inc = 1; | 640 | desc->rqcfg.src_inc = 1; |
641 | desc->rqcfg.dst_inc = 0; | 641 | desc->rqcfg.dst_inc = 0; |
642 | desc->req.rqtype = MEMTODEV; | 642 | desc->req.rqtype = MEMTODEV; |
643 | src = dma_addr; | 643 | src = dma_addr; |
644 | dst = pch->fifo_addr; | 644 | dst = pch->fifo_addr; |
645 | break; | 645 | break; |
646 | case DMA_FROM_DEVICE: | 646 | case DMA_DEV_TO_MEM: |
647 | desc->rqcfg.src_inc = 0; | 647 | desc->rqcfg.src_inc = 0; |
648 | desc->rqcfg.dst_inc = 1; | 648 | desc->rqcfg.dst_inc = 1; |
649 | desc->req.rqtype = DEVTOMEM; | 649 | desc->req.rqtype = DEVTOMEM; |
@@ -710,7 +710,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, | |||
710 | 710 | ||
711 | static struct dma_async_tx_descriptor * | 711 | static struct dma_async_tx_descriptor * |
712 | pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 712 | pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
713 | unsigned int sg_len, enum dma_data_direction direction, | 713 | unsigned int sg_len, enum dma_transfer_direction direction, |
714 | unsigned long flg) | 714 | unsigned long flg) |
715 | { | 715 | { |
716 | struct dma_pl330_desc *first, *desc = NULL; | 716 | struct dma_pl330_desc *first, *desc = NULL; |
@@ -759,7 +759,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
759 | else | 759 | else |
760 | list_add_tail(&desc->node, &first->node); | 760 | list_add_tail(&desc->node, &first->node); |
761 | 761 | ||
762 | if (direction == DMA_TO_DEVICE) { | 762 | if (direction == DMA_MEM_TO_DEV) { |
763 | desc->rqcfg.src_inc = 1; | 763 | desc->rqcfg.src_inc = 1; |
764 | desc->rqcfg.dst_inc = 0; | 764 | desc->rqcfg.dst_inc = 0; |
765 | desc->req.rqtype = MEMTODEV; | 765 | desc->req.rqtype = MEMTODEV; |
@@ -834,17 +834,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
834 | 834 | ||
835 | amba_set_drvdata(adev, pdmac); | 835 | amba_set_drvdata(adev, pdmac); |
836 | 836 | ||
837 | #ifdef CONFIG_PM_RUNTIME | 837 | #ifndef CONFIG_PM_RUNTIME |
838 | /* to use the runtime PM helper functions */ | ||
839 | pm_runtime_enable(&adev->dev); | ||
840 | |||
841 | /* enable the power domain */ | ||
842 | if (pm_runtime_get_sync(&adev->dev)) { | ||
843 | dev_err(&adev->dev, "failed to get runtime pm\n"); | ||
844 | ret = -ENODEV; | ||
845 | goto probe_err1; | ||
846 | } | ||
847 | #else | ||
848 | /* enable dma clk */ | 838 | /* enable dma clk */ |
849 | clk_enable(pdmac->clk); | 839 | clk_enable(pdmac->clk); |
850 | #endif | 840 | #endif |
@@ -977,10 +967,7 @@ static int __devexit pl330_remove(struct amba_device *adev) | |||
977 | res = &adev->res; | 967 | res = &adev->res; |
978 | release_mem_region(res->start, resource_size(res)); | 968 | release_mem_region(res->start, resource_size(res)); |
979 | 969 | ||
980 | #ifdef CONFIG_PM_RUNTIME | 970 | #ifndef CONFIG_PM_RUNTIME |
981 | pm_runtime_put(&adev->dev); | ||
982 | pm_runtime_disable(&adev->dev); | ||
983 | #else | ||
984 | clk_disable(pdmac->clk); | 971 | clk_disable(pdmac->clk); |
985 | #endif | 972 | #endif |
986 | 973 | ||
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index 81809c2b46ab..54043cd831c8 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <linux/dmaengine.h> | 24 | #include <linux/dmaengine.h> |
25 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
26 | #include <linux/dma-mapping.h> | ||
27 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
28 | #include <linux/pm_runtime.h> | 27 | #include <linux/pm_runtime.h> |
29 | #include <linux/sh_dma.h> | 28 | #include <linux/sh_dma.h> |
@@ -57,6 +56,15 @@ static LIST_HEAD(sh_dmae_devices); | |||
57 | static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)]; | 56 | static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)]; |
58 | 57 | ||
59 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); | 58 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); |
59 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan); | ||
60 | |||
61 | static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data) | ||
62 | { | ||
63 | struct sh_dmae_device *shdev = to_sh_dev(sh_dc); | ||
64 | |||
65 | __raw_writel(data, shdev->chan_reg + | ||
66 | shdev->pdata->channel[sh_dc->id].chclr_offset); | ||
67 | } | ||
60 | 68 | ||
61 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) | 69 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) |
62 | { | 70 | { |
@@ -129,6 +137,15 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev) | |||
129 | 137 | ||
130 | dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); | 138 | dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); |
131 | 139 | ||
140 | if (shdev->pdata->chclr_present) { | ||
141 | int i; | ||
142 | for (i = 0; i < shdev->pdata->channel_num; i++) { | ||
143 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | ||
144 | if (sh_chan) | ||
145 | chclr_write(sh_chan, 0); | ||
146 | } | ||
147 | } | ||
148 | |||
132 | dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); | 149 | dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); |
133 | 150 | ||
134 | dmaor = dmaor_read(shdev); | 151 | dmaor = dmaor_read(shdev); |
@@ -139,6 +156,10 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev) | |||
139 | dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n"); | 156 | dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n"); |
140 | return -EIO; | 157 | return -EIO; |
141 | } | 158 | } |
159 | if (shdev->pdata->dmaor_init & ~dmaor) | ||
160 | dev_warn(shdev->common.dev, | ||
161 | "DMAOR=0x%x hasn't latched the initial value 0x%x.\n", | ||
162 | dmaor, shdev->pdata->dmaor_init); | ||
142 | return 0; | 163 | return 0; |
143 | } | 164 | } |
144 | 165 | ||
@@ -259,8 +280,6 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | |||
259 | return 0; | 280 | return 0; |
260 | } | 281 | } |
261 | 282 | ||
262 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan); | ||
263 | |||
264 | static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) | 283 | static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) |
265 | { | 284 | { |
266 | struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; | 285 | struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; |
@@ -340,6 +359,8 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) | |||
340 | sh_chan_xfer_ld_queue(sh_chan); | 359 | sh_chan_xfer_ld_queue(sh_chan); |
341 | sh_chan->pm_state = DMAE_PM_ESTABLISHED; | 360 | sh_chan->pm_state = DMAE_PM_ESTABLISHED; |
342 | } | 361 | } |
362 | } else { | ||
363 | sh_chan->pm_state = DMAE_PM_PENDING; | ||
343 | } | 364 | } |
344 | 365 | ||
345 | spin_unlock_irq(&sh_chan->desc_lock); | 366 | spin_unlock_irq(&sh_chan->desc_lock); |
@@ -479,19 +500,19 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan) | |||
479 | * @sh_chan: DMA channel | 500 | * @sh_chan: DMA channel |
480 | * @flags: DMA transfer flags | 501 | * @flags: DMA transfer flags |
481 | * @dest: destination DMA address, incremented when direction equals | 502 | * @dest: destination DMA address, incremented when direction equals |
482 | * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL | 503 | * DMA_DEV_TO_MEM |
483 | * @src: source DMA address, incremented when direction equals | 504 | * @src: source DMA address, incremented when direction equals |
484 | * DMA_TO_DEVICE or DMA_BIDIRECTIONAL | 505 | * DMA_MEM_TO_DEV |
485 | * @len: DMA transfer length | 506 | * @len: DMA transfer length |
486 | * @first: if NULL, set to the current descriptor and cookie set to -EBUSY | 507 | * @first: if NULL, set to the current descriptor and cookie set to -EBUSY |
487 | * @direction: needed for slave DMA to decide which address to keep constant, | 508 | * @direction: needed for slave DMA to decide which address to keep constant, |
488 | * equals DMA_BIDIRECTIONAL for MEMCPY | 509 | * equals DMA_MEM_TO_MEM for MEMCPY |
489 | * Returns 0 or an error | 510 | * Returns 0 or an error |
490 | * Locks: called with desc_lock held | 511 | * Locks: called with desc_lock held |
491 | */ | 512 | */ |
492 | static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, | 513 | static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, |
493 | unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, | 514 | unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, |
494 | struct sh_desc **first, enum dma_data_direction direction) | 515 | struct sh_desc **first, enum dma_transfer_direction direction) |
495 | { | 516 | { |
496 | struct sh_desc *new; | 517 | struct sh_desc *new; |
497 | size_t copy_size; | 518 | size_t copy_size; |
@@ -531,9 +552,9 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, | |||
531 | new->direction = direction; | 552 | new->direction = direction; |
532 | 553 | ||
533 | *len -= copy_size; | 554 | *len -= copy_size; |
534 | if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE) | 555 | if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) |
535 | *src += copy_size; | 556 | *src += copy_size; |
536 | if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE) | 557 | if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM) |
537 | *dest += copy_size; | 558 | *dest += copy_size; |
538 | 559 | ||
539 | return new; | 560 | return new; |
@@ -546,12 +567,12 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, | |||
546 | * converted to scatter-gather to guarantee consistent locking and a correct | 567 | * converted to scatter-gather to guarantee consistent locking and a correct |
547 | * list manipulation. For slave DMA direction carries the usual meaning, and, | 568 | * list manipulation. For slave DMA direction carries the usual meaning, and, |
548 | * logically, the SG list is RAM and the addr variable contains slave address, | 569 | * logically, the SG list is RAM and the addr variable contains slave address, |
549 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL | 570 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM |
550 | * and the SG list contains only one element and points at the source buffer. | 571 | * and the SG list contains only one element and points at the source buffer. |
551 | */ | 572 | */ |
552 | static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, | 573 | static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, |
553 | struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, | 574 | struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, |
554 | enum dma_data_direction direction, unsigned long flags) | 575 | enum dma_transfer_direction direction, unsigned long flags) |
555 | { | 576 | { |
556 | struct scatterlist *sg; | 577 | struct scatterlist *sg; |
557 | struct sh_desc *first = NULL, *new = NULL /* compiler... */; | 578 | struct sh_desc *first = NULL, *new = NULL /* compiler... */; |
@@ -592,7 +613,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c | |||
592 | dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", | 613 | dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", |
593 | i, sg, len, (unsigned long long)sg_addr); | 614 | i, sg, len, (unsigned long long)sg_addr); |
594 | 615 | ||
595 | if (direction == DMA_FROM_DEVICE) | 616 | if (direction == DMA_DEV_TO_MEM) |
596 | new = sh_dmae_add_desc(sh_chan, flags, | 617 | new = sh_dmae_add_desc(sh_chan, flags, |
597 | &sg_addr, addr, &len, &first, | 618 | &sg_addr, addr, &len, &first, |
598 | direction); | 619 | direction); |
@@ -646,13 +667,13 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | |||
646 | sg_dma_address(&sg) = dma_src; | 667 | sg_dma_address(&sg) = dma_src; |
647 | sg_dma_len(&sg) = len; | 668 | sg_dma_len(&sg) = len; |
648 | 669 | ||
649 | return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL, | 670 | return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, |
650 | flags); | 671 | flags); |
651 | } | 672 | } |
652 | 673 | ||
653 | static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( | 674 | static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( |
654 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | 675 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, |
655 | enum dma_data_direction direction, unsigned long flags) | 676 | enum dma_transfer_direction direction, unsigned long flags) |
656 | { | 677 | { |
657 | struct sh_dmae_slave *param; | 678 | struct sh_dmae_slave *param; |
658 | struct sh_dmae_chan *sh_chan; | 679 | struct sh_dmae_chan *sh_chan; |
@@ -996,7 +1017,7 @@ static void dmae_do_tasklet(unsigned long data) | |||
996 | spin_lock_irq(&sh_chan->desc_lock); | 1017 | spin_lock_irq(&sh_chan->desc_lock); |
997 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { | 1018 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { |
998 | if (desc->mark == DESC_SUBMITTED && | 1019 | if (desc->mark == DESC_SUBMITTED && |
999 | ((desc->direction == DMA_FROM_DEVICE && | 1020 | ((desc->direction == DMA_DEV_TO_MEM && |
1000 | (desc->hw.dar + desc->hw.tcr) == dar_buf) || | 1021 | (desc->hw.dar + desc->hw.tcr) == dar_buf) || |
1001 | (desc->hw.sar + desc->hw.tcr) == sar_buf)) { | 1022 | (desc->hw.sar + desc->hw.tcr) == sar_buf)) { |
1002 | dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", | 1023 | dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", |
@@ -1225,6 +1246,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
1225 | 1246 | ||
1226 | platform_set_drvdata(pdev, shdev); | 1247 | platform_set_drvdata(pdev, shdev); |
1227 | 1248 | ||
1249 | shdev->common.dev = &pdev->dev; | ||
1250 | |||
1228 | pm_runtime_enable(&pdev->dev); | 1251 | pm_runtime_enable(&pdev->dev); |
1229 | pm_runtime_get_sync(&pdev->dev); | 1252 | pm_runtime_get_sync(&pdev->dev); |
1230 | 1253 | ||
@@ -1254,7 +1277,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
1254 | shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; | 1277 | shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; |
1255 | shdev->common.device_control = sh_dmae_control; | 1278 | shdev->common.device_control = sh_dmae_control; |
1256 | 1279 | ||
1257 | shdev->common.dev = &pdev->dev; | ||
1258 | /* Default transfer size of 32 bytes requires 32-byte alignment */ | 1280 | /* Default transfer size of 32 bytes requires 32-byte alignment */ |
1259 | shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE; | 1281 | shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE; |
1260 | 1282 | ||
@@ -1435,22 +1457,17 @@ static int sh_dmae_runtime_resume(struct device *dev) | |||
1435 | #ifdef CONFIG_PM | 1457 | #ifdef CONFIG_PM |
1436 | static int sh_dmae_suspend(struct device *dev) | 1458 | static int sh_dmae_suspend(struct device *dev) |
1437 | { | 1459 | { |
1438 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | ||
1439 | int i; | ||
1440 | |||
1441 | for (i = 0; i < shdev->pdata->channel_num; i++) { | ||
1442 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | ||
1443 | if (sh_chan->descs_allocated) | ||
1444 | sh_chan->pm_error = pm_runtime_put_sync(dev); | ||
1445 | } | ||
1446 | |||
1447 | return 0; | 1460 | return 0; |
1448 | } | 1461 | } |
1449 | 1462 | ||
1450 | static int sh_dmae_resume(struct device *dev) | 1463 | static int sh_dmae_resume(struct device *dev) |
1451 | { | 1464 | { |
1452 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); | 1465 | struct sh_dmae_device *shdev = dev_get_drvdata(dev); |
1453 | int i; | 1466 | int i, ret; |
1467 | |||
1468 | ret = sh_dmae_rst(shdev); | ||
1469 | if (ret < 0) | ||
1470 | dev_err(dev, "Failed to reset!\n"); | ||
1454 | 1471 | ||
1455 | for (i = 0; i < shdev->pdata->channel_num; i++) { | 1472 | for (i = 0; i < shdev->pdata->channel_num; i++) { |
1456 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | 1473 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; |
@@ -1459,9 +1476,6 @@ static int sh_dmae_resume(struct device *dev) | |||
1459 | if (!sh_chan->descs_allocated) | 1476 | if (!sh_chan->descs_allocated) |
1460 | continue; | 1477 | continue; |
1461 | 1478 | ||
1462 | if (!sh_chan->pm_error) | ||
1463 | pm_runtime_get_sync(dev); | ||
1464 | |||
1465 | if (param) { | 1479 | if (param) { |
1466 | const struct sh_dmae_slave_config *cfg = param->config; | 1480 | const struct sh_dmae_slave_config *cfg = param->config; |
1467 | dmae_set_dmars(sh_chan, cfg->mid_rid); | 1481 | dmae_set_dmars(sh_chan, cfg->mid_rid); |
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c new file mode 100644 index 000000000000..2333810d1688 --- /dev/null +++ b/drivers/dma/sirf-dma.c | |||
@@ -0,0 +1,707 @@ | |||
1 | /* | ||
2 | * DMA controller driver for CSR SiRFprimaII | ||
3 | * | ||
4 | * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. | ||
5 | * | ||
6 | * Licensed under GPLv2 or later. | ||
7 | */ | ||
8 | |||
9 | #include <linux/module.h> | ||
10 | #include <linux/dmaengine.h> | ||
11 | #include <linux/dma-mapping.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/io.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/of_irq.h> | ||
16 | #include <linux/of_address.h> | ||
17 | #include <linux/of_device.h> | ||
18 | #include <linux/of_platform.h> | ||
19 | #include <linux/sirfsoc_dma.h> | ||
20 | |||
21 | #define SIRFSOC_DMA_DESCRIPTORS 16 | ||
22 | #define SIRFSOC_DMA_CHANNELS 16 | ||
23 | |||
24 | #define SIRFSOC_DMA_CH_ADDR 0x00 | ||
25 | #define SIRFSOC_DMA_CH_XLEN 0x04 | ||
26 | #define SIRFSOC_DMA_CH_YLEN 0x08 | ||
27 | #define SIRFSOC_DMA_CH_CTRL 0x0C | ||
28 | |||
29 | #define SIRFSOC_DMA_WIDTH_0 0x100 | ||
30 | #define SIRFSOC_DMA_CH_VALID 0x140 | ||
31 | #define SIRFSOC_DMA_CH_INT 0x144 | ||
32 | #define SIRFSOC_DMA_INT_EN 0x148 | ||
33 | #define SIRFSOC_DMA_CH_LOOP_CTRL 0x150 | ||
34 | |||
35 | #define SIRFSOC_DMA_MODE_CTRL_BIT 4 | ||
36 | #define SIRFSOC_DMA_DIR_CTRL_BIT 5 | ||
37 | |||
38 | /* xlen and dma_width register is in 4 bytes boundary */ | ||
39 | #define SIRFSOC_DMA_WORD_LEN 4 | ||
40 | |||
41 | struct sirfsoc_dma_desc { | ||
42 | struct dma_async_tx_descriptor desc; | ||
43 | struct list_head node; | ||
44 | |||
45 | /* SiRFprimaII 2D-DMA parameters */ | ||
46 | |||
47 | int xlen; /* DMA xlen */ | ||
48 | int ylen; /* DMA ylen */ | ||
49 | int width; /* DMA width */ | ||
50 | int dir; | ||
51 | bool cyclic; /* is loop DMA? */ | ||
52 | u32 addr; /* DMA buffer address */ | ||
53 | }; | ||
54 | |||
55 | struct sirfsoc_dma_chan { | ||
56 | struct dma_chan chan; | ||
57 | struct list_head free; | ||
58 | struct list_head prepared; | ||
59 | struct list_head queued; | ||
60 | struct list_head active; | ||
61 | struct list_head completed; | ||
62 | dma_cookie_t completed_cookie; | ||
63 | unsigned long happened_cyclic; | ||
64 | unsigned long completed_cyclic; | ||
65 | |||
66 | /* Lock for this structure */ | ||
67 | spinlock_t lock; | ||
68 | |||
69 | int mode; | ||
70 | }; | ||
71 | |||
72 | struct sirfsoc_dma { | ||
73 | struct dma_device dma; | ||
74 | struct tasklet_struct tasklet; | ||
75 | struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS]; | ||
76 | void __iomem *base; | ||
77 | int irq; | ||
78 | }; | ||
79 | |||
80 | #define DRV_NAME "sirfsoc_dma" | ||
81 | |||
82 | /* Convert struct dma_chan to struct sirfsoc_dma_chan */ | ||
83 | static inline | ||
84 | struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c) | ||
85 | { | ||
86 | return container_of(c, struct sirfsoc_dma_chan, chan); | ||
87 | } | ||
88 | |||
89 | /* Convert struct dma_chan to struct sirfsoc_dma */ | ||
90 | static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c) | ||
91 | { | ||
92 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c); | ||
93 | return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]); | ||
94 | } | ||
95 | |||
96 | /* Execute all queued DMA descriptors */ | ||
97 | static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan) | ||
98 | { | ||
99 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | ||
100 | int cid = schan->chan.chan_id; | ||
101 | struct sirfsoc_dma_desc *sdesc = NULL; | ||
102 | |||
103 | /* | ||
104 | * lock has been held by functions calling this, so we don't hold | ||
105 | * lock again | ||
106 | */ | ||
107 | |||
108 | sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc, | ||
109 | node); | ||
110 | /* Move the first queued descriptor to active list */ | ||
111 | list_move_tail(&schan->queued, &schan->active); | ||
112 | |||
113 | /* Start the DMA transfer */ | ||
114 | writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 + | ||
115 | cid * 4); | ||
116 | writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) | | ||
117 | (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT), | ||
118 | sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL); | ||
119 | writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 + | ||
120 | SIRFSOC_DMA_CH_XLEN); | ||
121 | writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 + | ||
122 | SIRFSOC_DMA_CH_YLEN); | ||
123 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) | | ||
124 | (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); | ||
125 | |||
126 | /* | ||
127 | * writel has an implict memory write barrier to make sure data is | ||
128 | * flushed into memory before starting DMA | ||
129 | */ | ||
130 | writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR); | ||
131 | |||
132 | if (sdesc->cyclic) { | ||
133 | writel((1 << cid) | 1 << (cid + 16) | | ||
134 | readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL), | ||
135 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | ||
136 | schan->happened_cyclic = schan->completed_cyclic = 0; | ||
137 | } | ||
138 | } | ||
139 | |||
140 | /* Interrupt handler */ | ||
141 | static irqreturn_t sirfsoc_dma_irq(int irq, void *data) | ||
142 | { | ||
143 | struct sirfsoc_dma *sdma = data; | ||
144 | struct sirfsoc_dma_chan *schan; | ||
145 | struct sirfsoc_dma_desc *sdesc = NULL; | ||
146 | u32 is; | ||
147 | int ch; | ||
148 | |||
149 | is = readl(sdma->base + SIRFSOC_DMA_CH_INT); | ||
150 | while ((ch = fls(is) - 1) >= 0) { | ||
151 | is &= ~(1 << ch); | ||
152 | writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT); | ||
153 | schan = &sdma->channels[ch]; | ||
154 | |||
155 | spin_lock(&schan->lock); | ||
156 | |||
157 | sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, | ||
158 | node); | ||
159 | if (!sdesc->cyclic) { | ||
160 | /* Execute queued descriptors */ | ||
161 | list_splice_tail_init(&schan->active, &schan->completed); | ||
162 | if (!list_empty(&schan->queued)) | ||
163 | sirfsoc_dma_execute(schan); | ||
164 | } else | ||
165 | schan->happened_cyclic++; | ||
166 | |||
167 | spin_unlock(&schan->lock); | ||
168 | } | ||
169 | |||
170 | /* Schedule tasklet */ | ||
171 | tasklet_schedule(&sdma->tasklet); | ||
172 | |||
173 | return IRQ_HANDLED; | ||
174 | } | ||
175 | |||
176 | /* process completed descriptors */ | ||
177 | static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma) | ||
178 | { | ||
179 | dma_cookie_t last_cookie = 0; | ||
180 | struct sirfsoc_dma_chan *schan; | ||
181 | struct sirfsoc_dma_desc *sdesc; | ||
182 | struct dma_async_tx_descriptor *desc; | ||
183 | unsigned long flags; | ||
184 | unsigned long happened_cyclic; | ||
185 | LIST_HEAD(list); | ||
186 | int i; | ||
187 | |||
188 | for (i = 0; i < sdma->dma.chancnt; i++) { | ||
189 | schan = &sdma->channels[i]; | ||
190 | |||
191 | /* Get all completed descriptors */ | ||
192 | spin_lock_irqsave(&schan->lock, flags); | ||
193 | if (!list_empty(&schan->completed)) { | ||
194 | list_splice_tail_init(&schan->completed, &list); | ||
195 | spin_unlock_irqrestore(&schan->lock, flags); | ||
196 | |||
197 | /* Execute callbacks and run dependencies */ | ||
198 | list_for_each_entry(sdesc, &list, node) { | ||
199 | desc = &sdesc->desc; | ||
200 | |||
201 | if (desc->callback) | ||
202 | desc->callback(desc->callback_param); | ||
203 | |||
204 | last_cookie = desc->cookie; | ||
205 | dma_run_dependencies(desc); | ||
206 | } | ||
207 | |||
208 | /* Free descriptors */ | ||
209 | spin_lock_irqsave(&schan->lock, flags); | ||
210 | list_splice_tail_init(&list, &schan->free); | ||
211 | schan->completed_cookie = last_cookie; | ||
212 | spin_unlock_irqrestore(&schan->lock, flags); | ||
213 | } else { | ||
214 | /* for cyclic channel, desc is always in active list */ | ||
215 | sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, | ||
216 | node); | ||
217 | |||
218 | if (!sdesc || (sdesc && !sdesc->cyclic)) { | ||
219 | /* without active cyclic DMA */ | ||
220 | spin_unlock_irqrestore(&schan->lock, flags); | ||
221 | continue; | ||
222 | } | ||
223 | |||
224 | /* cyclic DMA */ | ||
225 | happened_cyclic = schan->happened_cyclic; | ||
226 | spin_unlock_irqrestore(&schan->lock, flags); | ||
227 | |||
228 | desc = &sdesc->desc; | ||
229 | while (happened_cyclic != schan->completed_cyclic) { | ||
230 | if (desc->callback) | ||
231 | desc->callback(desc->callback_param); | ||
232 | schan->completed_cyclic++; | ||
233 | } | ||
234 | } | ||
235 | } | ||
236 | } | ||
237 | |||
238 | /* DMA Tasklet */ | ||
239 | static void sirfsoc_dma_tasklet(unsigned long data) | ||
240 | { | ||
241 | struct sirfsoc_dma *sdma = (void *)data; | ||
242 | |||
243 | sirfsoc_dma_process_completed(sdma); | ||
244 | } | ||
245 | |||
246 | /* Submit descriptor to hardware */ | ||
247 | static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd) | ||
248 | { | ||
249 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan); | ||
250 | struct sirfsoc_dma_desc *sdesc; | ||
251 | unsigned long flags; | ||
252 | dma_cookie_t cookie; | ||
253 | |||
254 | sdesc = container_of(txd, struct sirfsoc_dma_desc, desc); | ||
255 | |||
256 | spin_lock_irqsave(&schan->lock, flags); | ||
257 | |||
258 | /* Move descriptor to queue */ | ||
259 | list_move_tail(&sdesc->node, &schan->queued); | ||
260 | |||
261 | /* Update cookie */ | ||
262 | cookie = schan->chan.cookie + 1; | ||
263 | if (cookie <= 0) | ||
264 | cookie = 1; | ||
265 | |||
266 | schan->chan.cookie = cookie; | ||
267 | sdesc->desc.cookie = cookie; | ||
268 | |||
269 | spin_unlock_irqrestore(&schan->lock, flags); | ||
270 | |||
271 | return cookie; | ||
272 | } | ||
273 | |||
274 | static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan, | ||
275 | struct dma_slave_config *config) | ||
276 | { | ||
277 | unsigned long flags; | ||
278 | |||
279 | if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || | ||
280 | (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)) | ||
281 | return -EINVAL; | ||
282 | |||
283 | spin_lock_irqsave(&schan->lock, flags); | ||
284 | schan->mode = (config->src_maxburst == 4 ? 1 : 0); | ||
285 | spin_unlock_irqrestore(&schan->lock, flags); | ||
286 | |||
287 | return 0; | ||
288 | } | ||
289 | |||
290 | static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan) | ||
291 | { | ||
292 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | ||
293 | int cid = schan->chan.chan_id; | ||
294 | unsigned long flags; | ||
295 | |||
296 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) & | ||
297 | ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); | ||
298 | writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); | ||
299 | |||
300 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) | ||
301 | & ~((1 << cid) | 1 << (cid + 16)), | ||
302 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | ||
303 | |||
304 | spin_lock_irqsave(&schan->lock, flags); | ||
305 | list_splice_tail_init(&schan->active, &schan->free); | ||
306 | list_splice_tail_init(&schan->queued, &schan->free); | ||
307 | spin_unlock_irqrestore(&schan->lock, flags); | ||
308 | |||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
313 | unsigned long arg) | ||
314 | { | ||
315 | struct dma_slave_config *config; | ||
316 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
317 | |||
318 | switch (cmd) { | ||
319 | case DMA_TERMINATE_ALL: | ||
320 | return sirfsoc_dma_terminate_all(schan); | ||
321 | case DMA_SLAVE_CONFIG: | ||
322 | config = (struct dma_slave_config *)arg; | ||
323 | return sirfsoc_dma_slave_config(schan, config); | ||
324 | |||
325 | default: | ||
326 | break; | ||
327 | } | ||
328 | |||
329 | return -ENOSYS; | ||
330 | } | ||
331 | |||
332 | /* Alloc channel resources */ | ||
333 | static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan) | ||
334 | { | ||
335 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); | ||
336 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
337 | struct sirfsoc_dma_desc *sdesc; | ||
338 | unsigned long flags; | ||
339 | LIST_HEAD(descs); | ||
340 | int i; | ||
341 | |||
342 | /* Alloc descriptors for this channel */ | ||
343 | for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) { | ||
344 | sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL); | ||
345 | if (!sdesc) { | ||
346 | dev_notice(sdma->dma.dev, "Memory allocation error. " | ||
347 | "Allocated only %u descriptors\n", i); | ||
348 | break; | ||
349 | } | ||
350 | |||
351 | dma_async_tx_descriptor_init(&sdesc->desc, chan); | ||
352 | sdesc->desc.flags = DMA_CTRL_ACK; | ||
353 | sdesc->desc.tx_submit = sirfsoc_dma_tx_submit; | ||
354 | |||
355 | list_add_tail(&sdesc->node, &descs); | ||
356 | } | ||
357 | |||
358 | /* Return error only if no descriptors were allocated */ | ||
359 | if (i == 0) | ||
360 | return -ENOMEM; | ||
361 | |||
362 | spin_lock_irqsave(&schan->lock, flags); | ||
363 | |||
364 | list_splice_tail_init(&descs, &schan->free); | ||
365 | spin_unlock_irqrestore(&schan->lock, flags); | ||
366 | |||
367 | return i; | ||
368 | } | ||
369 | |||
370 | /* Free channel resources */ | ||
371 | static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan) | ||
372 | { | ||
373 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
374 | struct sirfsoc_dma_desc *sdesc, *tmp; | ||
375 | unsigned long flags; | ||
376 | LIST_HEAD(descs); | ||
377 | |||
378 | spin_lock_irqsave(&schan->lock, flags); | ||
379 | |||
380 | /* Channel must be idle */ | ||
381 | BUG_ON(!list_empty(&schan->prepared)); | ||
382 | BUG_ON(!list_empty(&schan->queued)); | ||
383 | BUG_ON(!list_empty(&schan->active)); | ||
384 | BUG_ON(!list_empty(&schan->completed)); | ||
385 | |||
386 | /* Move data */ | ||
387 | list_splice_tail_init(&schan->free, &descs); | ||
388 | |||
389 | spin_unlock_irqrestore(&schan->lock, flags); | ||
390 | |||
391 | /* Free descriptors */ | ||
392 | list_for_each_entry_safe(sdesc, tmp, &descs, node) | ||
393 | kfree(sdesc); | ||
394 | } | ||
395 | |||
396 | /* Send pending descriptor to hardware */ | ||
397 | static void sirfsoc_dma_issue_pending(struct dma_chan *chan) | ||
398 | { | ||
399 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
400 | unsigned long flags; | ||
401 | |||
402 | spin_lock_irqsave(&schan->lock, flags); | ||
403 | |||
404 | if (list_empty(&schan->active) && !list_empty(&schan->queued)) | ||
405 | sirfsoc_dma_execute(schan); | ||
406 | |||
407 | spin_unlock_irqrestore(&schan->lock, flags); | ||
408 | } | ||
409 | |||
410 | /* Check request completion status */ | ||
411 | static enum dma_status | ||
412 | sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | ||
413 | struct dma_tx_state *txstate) | ||
414 | { | ||
415 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
416 | unsigned long flags; | ||
417 | dma_cookie_t last_used; | ||
418 | dma_cookie_t last_complete; | ||
419 | |||
420 | spin_lock_irqsave(&schan->lock, flags); | ||
421 | last_used = schan->chan.cookie; | ||
422 | last_complete = schan->completed_cookie; | ||
423 | spin_unlock_irqrestore(&schan->lock, flags); | ||
424 | |||
425 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
426 | return dma_async_is_complete(cookie, last_complete, last_used); | ||
427 | } | ||
428 | |||
429 | static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved( | ||
430 | struct dma_chan *chan, struct dma_interleaved_template *xt, | ||
431 | unsigned long flags) | ||
432 | { | ||
433 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); | ||
434 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
435 | struct sirfsoc_dma_desc *sdesc = NULL; | ||
436 | unsigned long iflags; | ||
437 | int ret; | ||
438 | |||
439 | if ((xt->dir != DMA_MEM_TO_DEV) || (xt->dir != DMA_DEV_TO_MEM)) { | ||
440 | ret = -EINVAL; | ||
441 | goto err_dir; | ||
442 | } | ||
443 | |||
444 | /* Get free descriptor */ | ||
445 | spin_lock_irqsave(&schan->lock, iflags); | ||
446 | if (!list_empty(&schan->free)) { | ||
447 | sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, | ||
448 | node); | ||
449 | list_del(&sdesc->node); | ||
450 | } | ||
451 | spin_unlock_irqrestore(&schan->lock, iflags); | ||
452 | |||
453 | if (!sdesc) { | ||
454 | /* try to free completed descriptors */ | ||
455 | sirfsoc_dma_process_completed(sdma); | ||
456 | ret = 0; | ||
457 | goto no_desc; | ||
458 | } | ||
459 | |||
460 | /* Place descriptor in prepared list */ | ||
461 | spin_lock_irqsave(&schan->lock, iflags); | ||
462 | |||
463 | /* | ||
464 | * Number of chunks in a frame can only be 1 for prima2 | ||
465 | * and ylen (number of frame - 1) must be at least 0 | ||
466 | */ | ||
467 | if ((xt->frame_size == 1) && (xt->numf > 0)) { | ||
468 | sdesc->cyclic = 0; | ||
469 | sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN; | ||
470 | sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) / | ||
471 | SIRFSOC_DMA_WORD_LEN; | ||
472 | sdesc->ylen = xt->numf - 1; | ||
473 | if (xt->dir == DMA_MEM_TO_DEV) { | ||
474 | sdesc->addr = xt->src_start; | ||
475 | sdesc->dir = 1; | ||
476 | } else { | ||
477 | sdesc->addr = xt->dst_start; | ||
478 | sdesc->dir = 0; | ||
479 | } | ||
480 | |||
481 | list_add_tail(&sdesc->node, &schan->prepared); | ||
482 | } else { | ||
483 | pr_err("sirfsoc DMA Invalid xfer\n"); | ||
484 | ret = -EINVAL; | ||
485 | goto err_xfer; | ||
486 | } | ||
487 | spin_unlock_irqrestore(&schan->lock, iflags); | ||
488 | |||
489 | return &sdesc->desc; | ||
490 | err_xfer: | ||
491 | spin_unlock_irqrestore(&schan->lock, iflags); | ||
492 | no_desc: | ||
493 | err_dir: | ||
494 | return ERR_PTR(ret); | ||
495 | } | ||
496 | |||
497 | static struct dma_async_tx_descriptor * | ||
498 | sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr, | ||
499 | size_t buf_len, size_t period_len, | ||
500 | enum dma_transfer_direction direction) | ||
501 | { | ||
502 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | ||
503 | struct sirfsoc_dma_desc *sdesc = NULL; | ||
504 | unsigned long iflags; | ||
505 | |||
506 | /* | ||
507 | * we only support cycle transfer with 2 period | ||
508 | * If the X-length is set to 0, it would be the loop mode. | ||
509 | * The DMA address keeps increasing until reaching the end of a loop | ||
510 | * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then | ||
511 | * the DMA address goes back to the beginning of this area. | ||
512 | * In loop mode, the DMA data region is divided into two parts, BUFA | ||
513 | * and BUFB. DMA controller generates interrupts twice in each loop: | ||
514 | * when the DMA address reaches the end of BUFA or the end of the | ||
515 | * BUFB | ||
516 | */ | ||
517 | if (buf_len != 2 * period_len) | ||
518 | return ERR_PTR(-EINVAL); | ||
519 | |||
520 | /* Get free descriptor */ | ||
521 | spin_lock_irqsave(&schan->lock, iflags); | ||
522 | if (!list_empty(&schan->free)) { | ||
523 | sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, | ||
524 | node); | ||
525 | list_del(&sdesc->node); | ||
526 | } | ||
527 | spin_unlock_irqrestore(&schan->lock, iflags); | ||
528 | |||
529 | if (!sdesc) | ||
530 | return 0; | ||
531 | |||
532 | /* Place descriptor in prepared list */ | ||
533 | spin_lock_irqsave(&schan->lock, iflags); | ||
534 | sdesc->addr = addr; | ||
535 | sdesc->cyclic = 1; | ||
536 | sdesc->xlen = 0; | ||
537 | sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1; | ||
538 | sdesc->width = 1; | ||
539 | list_add_tail(&sdesc->node, &schan->prepared); | ||
540 | spin_unlock_irqrestore(&schan->lock, iflags); | ||
541 | |||
542 | return &sdesc->desc; | ||
543 | } | ||
544 | |||
545 | /* | ||
546 | * The DMA controller consists of 16 independent DMA channels. | ||
547 | * Each channel is allocated to a different function | ||
548 | */ | ||
549 | bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id) | ||
550 | { | ||
551 | unsigned int ch_nr = (unsigned int) chan_id; | ||
552 | |||
553 | if (ch_nr == chan->chan_id + | ||
554 | chan->device->dev_id * SIRFSOC_DMA_CHANNELS) | ||
555 | return true; | ||
556 | |||
557 | return false; | ||
558 | } | ||
559 | EXPORT_SYMBOL(sirfsoc_dma_filter_id); | ||
560 | |||
561 | static int __devinit sirfsoc_dma_probe(struct platform_device *op) | ||
562 | { | ||
563 | struct device_node *dn = op->dev.of_node; | ||
564 | struct device *dev = &op->dev; | ||
565 | struct dma_device *dma; | ||
566 | struct sirfsoc_dma *sdma; | ||
567 | struct sirfsoc_dma_chan *schan; | ||
568 | struct resource res; | ||
569 | ulong regs_start, regs_size; | ||
570 | u32 id; | ||
571 | int ret, i; | ||
572 | |||
573 | sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL); | ||
574 | if (!sdma) { | ||
575 | dev_err(dev, "Memory exhausted!\n"); | ||
576 | return -ENOMEM; | ||
577 | } | ||
578 | |||
579 | if (of_property_read_u32(dn, "cell-index", &id)) { | ||
580 | dev_err(dev, "Fail to get DMAC index\n"); | ||
581 | ret = -ENODEV; | ||
582 | goto free_mem; | ||
583 | } | ||
584 | |||
585 | sdma->irq = irq_of_parse_and_map(dn, 0); | ||
586 | if (sdma->irq == NO_IRQ) { | ||
587 | dev_err(dev, "Error mapping IRQ!\n"); | ||
588 | ret = -EINVAL; | ||
589 | goto free_mem; | ||
590 | } | ||
591 | |||
592 | ret = of_address_to_resource(dn, 0, &res); | ||
593 | if (ret) { | ||
594 | dev_err(dev, "Error parsing memory region!\n"); | ||
595 | goto free_mem; | ||
596 | } | ||
597 | |||
598 | regs_start = res.start; | ||
599 | regs_size = resource_size(&res); | ||
600 | |||
601 | sdma->base = devm_ioremap(dev, regs_start, regs_size); | ||
602 | if (!sdma->base) { | ||
603 | dev_err(dev, "Error mapping memory region!\n"); | ||
604 | ret = -ENOMEM; | ||
605 | goto irq_dispose; | ||
606 | } | ||
607 | |||
608 | ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, | ||
609 | sdma); | ||
610 | if (ret) { | ||
611 | dev_err(dev, "Error requesting IRQ!\n"); | ||
612 | ret = -EINVAL; | ||
613 | goto unmap_mem; | ||
614 | } | ||
615 | |||
616 | dma = &sdma->dma; | ||
617 | dma->dev = dev; | ||
618 | dma->chancnt = SIRFSOC_DMA_CHANNELS; | ||
619 | |||
620 | dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources; | ||
621 | dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources; | ||
622 | dma->device_issue_pending = sirfsoc_dma_issue_pending; | ||
623 | dma->device_control = sirfsoc_dma_control; | ||
624 | dma->device_tx_status = sirfsoc_dma_tx_status; | ||
625 | dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved; | ||
626 | dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic; | ||
627 | |||
628 | INIT_LIST_HEAD(&dma->channels); | ||
629 | dma_cap_set(DMA_SLAVE, dma->cap_mask); | ||
630 | dma_cap_set(DMA_CYCLIC, dma->cap_mask); | ||
631 | dma_cap_set(DMA_INTERLEAVE, dma->cap_mask); | ||
632 | dma_cap_set(DMA_PRIVATE, dma->cap_mask); | ||
633 | |||
634 | for (i = 0; i < dma->chancnt; i++) { | ||
635 | schan = &sdma->channels[i]; | ||
636 | |||
637 | schan->chan.device = dma; | ||
638 | schan->chan.cookie = 1; | ||
639 | schan->completed_cookie = schan->chan.cookie; | ||
640 | |||
641 | INIT_LIST_HEAD(&schan->free); | ||
642 | INIT_LIST_HEAD(&schan->prepared); | ||
643 | INIT_LIST_HEAD(&schan->queued); | ||
644 | INIT_LIST_HEAD(&schan->active); | ||
645 | INIT_LIST_HEAD(&schan->completed); | ||
646 | |||
647 | spin_lock_init(&schan->lock); | ||
648 | list_add_tail(&schan->chan.device_node, &dma->channels); | ||
649 | } | ||
650 | |||
651 | tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma); | ||
652 | |||
653 | /* Register DMA engine */ | ||
654 | dev_set_drvdata(dev, sdma); | ||
655 | ret = dma_async_device_register(dma); | ||
656 | if (ret) | ||
657 | goto free_irq; | ||
658 | |||
659 | dev_info(dev, "initialized SIRFSOC DMAC driver\n"); | ||
660 | |||
661 | return 0; | ||
662 | |||
663 | free_irq: | ||
664 | devm_free_irq(dev, sdma->irq, sdma); | ||
665 | irq_dispose: | ||
666 | irq_dispose_mapping(sdma->irq); | ||
667 | unmap_mem: | ||
668 | iounmap(sdma->base); | ||
669 | free_mem: | ||
670 | devm_kfree(dev, sdma); | ||
671 | return ret; | ||
672 | } | ||
673 | |||
674 | static int __devexit sirfsoc_dma_remove(struct platform_device *op) | ||
675 | { | ||
676 | struct device *dev = &op->dev; | ||
677 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | ||
678 | |||
679 | dma_async_device_unregister(&sdma->dma); | ||
680 | devm_free_irq(dev, sdma->irq, sdma); | ||
681 | irq_dispose_mapping(sdma->irq); | ||
682 | iounmap(sdma->base); | ||
683 | devm_kfree(dev, sdma); | ||
684 | return 0; | ||
685 | } | ||
686 | |||
687 | static struct of_device_id sirfsoc_dma_match[] = { | ||
688 | { .compatible = "sirf,prima2-dmac", }, | ||
689 | {}, | ||
690 | }; | ||
691 | |||
692 | static struct platform_driver sirfsoc_dma_driver = { | ||
693 | .probe = sirfsoc_dma_probe, | ||
694 | .remove = __devexit_p(sirfsoc_dma_remove), | ||
695 | .driver = { | ||
696 | .name = DRV_NAME, | ||
697 | .owner = THIS_MODULE, | ||
698 | .of_match_table = sirfsoc_dma_match, | ||
699 | }, | ||
700 | }; | ||
701 | |||
702 | module_platform_driver(sirfsoc_dma_driver); | ||
703 | |||
704 | MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, " | ||
705 | "Barry Song <baohua.song@csr.com>"); | ||
706 | MODULE_DESCRIPTION("SIRFSOC DMA control driver"); | ||
707 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 13259cad0ceb..cc5ecbc067a3 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
@@ -14,6 +14,8 @@ | |||
14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
15 | #include <linux/clk.h> | 15 | #include <linux/clk.h> |
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | #include <linux/pm.h> | ||
18 | #include <linux/pm_runtime.h> | ||
17 | #include <linux/err.h> | 19 | #include <linux/err.h> |
18 | #include <linux/amba/bus.h> | 20 | #include <linux/amba/bus.h> |
19 | 21 | ||
@@ -32,6 +34,9 @@ | |||
32 | /* Maximum iterations taken before giving up suspending a channel */ | 34 | /* Maximum iterations taken before giving up suspending a channel */ |
33 | #define D40_SUSPEND_MAX_IT 500 | 35 | #define D40_SUSPEND_MAX_IT 500 |
34 | 36 | ||
37 | /* Milliseconds */ | ||
38 | #define DMA40_AUTOSUSPEND_DELAY 100 | ||
39 | |||
35 | /* Hardware requirement on LCLA alignment */ | 40 | /* Hardware requirement on LCLA alignment */ |
36 | #define LCLA_ALIGNMENT 0x40000 | 41 | #define LCLA_ALIGNMENT 0x40000 |
37 | 42 | ||
@@ -62,6 +67,55 @@ enum d40_command { | |||
62 | D40_DMA_SUSPENDED = 3 | 67 | D40_DMA_SUSPENDED = 3 |
63 | }; | 68 | }; |
64 | 69 | ||
70 | /* | ||
71 | * These are the registers that has to be saved and later restored | ||
72 | * when the DMA hw is powered off. | ||
73 | * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works. | ||
74 | */ | ||
75 | static u32 d40_backup_regs[] = { | ||
76 | D40_DREG_LCPA, | ||
77 | D40_DREG_LCLA, | ||
78 | D40_DREG_PRMSE, | ||
79 | D40_DREG_PRMSO, | ||
80 | D40_DREG_PRMOE, | ||
81 | D40_DREG_PRMOO, | ||
82 | }; | ||
83 | |||
84 | #define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs) | ||
85 | |||
86 | /* TODO: Check if all these registers have to be saved/restored on dma40 v3 */ | ||
87 | static u32 d40_backup_regs_v3[] = { | ||
88 | D40_DREG_PSEG1, | ||
89 | D40_DREG_PSEG2, | ||
90 | D40_DREG_PSEG3, | ||
91 | D40_DREG_PSEG4, | ||
92 | D40_DREG_PCEG1, | ||
93 | D40_DREG_PCEG2, | ||
94 | D40_DREG_PCEG3, | ||
95 | D40_DREG_PCEG4, | ||
96 | D40_DREG_RSEG1, | ||
97 | D40_DREG_RSEG2, | ||
98 | D40_DREG_RSEG3, | ||
99 | D40_DREG_RSEG4, | ||
100 | D40_DREG_RCEG1, | ||
101 | D40_DREG_RCEG2, | ||
102 | D40_DREG_RCEG3, | ||
103 | D40_DREG_RCEG4, | ||
104 | }; | ||
105 | |||
106 | #define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3) | ||
107 | |||
108 | static u32 d40_backup_regs_chan[] = { | ||
109 | D40_CHAN_REG_SSCFG, | ||
110 | D40_CHAN_REG_SSELT, | ||
111 | D40_CHAN_REG_SSPTR, | ||
112 | D40_CHAN_REG_SSLNK, | ||
113 | D40_CHAN_REG_SDCFG, | ||
114 | D40_CHAN_REG_SDELT, | ||
115 | D40_CHAN_REG_SDPTR, | ||
116 | D40_CHAN_REG_SDLNK, | ||
117 | }; | ||
118 | |||
65 | /** | 119 | /** |
66 | * struct d40_lli_pool - Structure for keeping LLIs in memory | 120 | * struct d40_lli_pool - Structure for keeping LLIs in memory |
67 | * | 121 | * |
@@ -96,7 +150,7 @@ struct d40_lli_pool { | |||
96 | * during a transfer. | 150 | * during a transfer. |
97 | * @node: List entry. | 151 | * @node: List entry. |
98 | * @is_in_client_list: true if the client owns this descriptor. | 152 | * @is_in_client_list: true if the client owns this descriptor. |
99 | * the previous one. | 153 | * @cyclic: true if this is a cyclic job |
100 | * | 154 | * |
101 | * This descriptor is used for both logical and physical transfers. | 155 | * This descriptor is used for both logical and physical transfers. |
102 | */ | 156 | */ |
@@ -143,6 +197,7 @@ struct d40_lcla_pool { | |||
143 | * channels. | 197 | * channels. |
144 | * | 198 | * |
145 | * @lock: A lock protection this entity. | 199 | * @lock: A lock protection this entity. |
200 | * @reserved: True if used by secure world or otherwise. | ||
146 | * @num: The physical channel number of this entity. | 201 | * @num: The physical channel number of this entity. |
147 | * @allocated_src: Bit mapped to show which src event line's are mapped to | 202 | * @allocated_src: Bit mapped to show which src event line's are mapped to |
148 | * this physical channel. Can also be free or physically allocated. | 203 | * this physical channel. Can also be free or physically allocated. |
@@ -152,6 +207,7 @@ struct d40_lcla_pool { | |||
152 | */ | 207 | */ |
153 | struct d40_phy_res { | 208 | struct d40_phy_res { |
154 | spinlock_t lock; | 209 | spinlock_t lock; |
210 | bool reserved; | ||
155 | int num; | 211 | int num; |
156 | u32 allocated_src; | 212 | u32 allocated_src; |
157 | u32 allocated_dst; | 213 | u32 allocated_dst; |
@@ -185,7 +241,6 @@ struct d40_base; | |||
185 | * @src_def_cfg: Default cfg register setting for src. | 241 | * @src_def_cfg: Default cfg register setting for src. |
186 | * @dst_def_cfg: Default cfg register setting for dst. | 242 | * @dst_def_cfg: Default cfg register setting for dst. |
187 | * @log_def: Default logical channel settings. | 243 | * @log_def: Default logical channel settings. |
188 | * @lcla: Space for one dst src pair for logical channel transfers. | ||
189 | * @lcpa: Pointer to dst and src lcpa settings. | 244 | * @lcpa: Pointer to dst and src lcpa settings. |
190 | * @runtime_addr: runtime configured address. | 245 | * @runtime_addr: runtime configured address. |
191 | * @runtime_direction: runtime configured direction. | 246 | * @runtime_direction: runtime configured direction. |
@@ -217,7 +272,7 @@ struct d40_chan { | |||
217 | struct d40_log_lli_full *lcpa; | 272 | struct d40_log_lli_full *lcpa; |
218 | /* Runtime reconfiguration */ | 273 | /* Runtime reconfiguration */ |
219 | dma_addr_t runtime_addr; | 274 | dma_addr_t runtime_addr; |
220 | enum dma_data_direction runtime_direction; | 275 | enum dma_transfer_direction runtime_direction; |
221 | }; | 276 | }; |
222 | 277 | ||
223 | /** | 278 | /** |
@@ -241,6 +296,7 @@ struct d40_chan { | |||
241 | * @dma_both: dma_device channels that can do both memcpy and slave transfers. | 296 | * @dma_both: dma_device channels that can do both memcpy and slave transfers. |
242 | * @dma_slave: dma_device channels that can do only do slave transfers. | 297 | * @dma_slave: dma_device channels that can do only do slave transfers. |
243 | * @dma_memcpy: dma_device channels that can do only do memcpy transfers. | 298 | * @dma_memcpy: dma_device channels that can do only do memcpy transfers. |
299 | * @phy_chans: Room for all possible physical channels in system. | ||
244 | * @log_chans: Room for all possible logical channels in system. | 300 | * @log_chans: Room for all possible logical channels in system. |
245 | * @lookup_log_chans: Used to map interrupt number to logical channel. Points | 301 | * @lookup_log_chans: Used to map interrupt number to logical channel. Points |
246 | * to log_chans entries. | 302 | * to log_chans entries. |
@@ -248,12 +304,20 @@ struct d40_chan { | |||
248 | * to phy_chans entries. | 304 | * to phy_chans entries. |
249 | * @plat_data: Pointer to provided platform_data which is the driver | 305 | * @plat_data: Pointer to provided platform_data which is the driver |
250 | * configuration. | 306 | * configuration. |
307 | * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla. | ||
251 | * @phy_res: Vector containing all physical channels. | 308 | * @phy_res: Vector containing all physical channels. |
252 | * @lcla_pool: lcla pool settings and data. | 309 | * @lcla_pool: lcla pool settings and data. |
253 | * @lcpa_base: The virtual mapped address of LCPA. | 310 | * @lcpa_base: The virtual mapped address of LCPA. |
254 | * @phy_lcpa: The physical address of the LCPA. | 311 | * @phy_lcpa: The physical address of the LCPA. |
255 | * @lcpa_size: The size of the LCPA area. | 312 | * @lcpa_size: The size of the LCPA area. |
256 | * @desc_slab: cache for descriptors. | 313 | * @desc_slab: cache for descriptors. |
314 | * @reg_val_backup: Here the values of some hardware registers are stored | ||
315 | * before the DMA is powered off. They are restored when the power is back on. | ||
316 | * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and | ||
317 | * later. | ||
318 | * @reg_val_backup_chan: Backup data for standard channel parameter registers. | ||
319 | * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. | ||
320 | * @initialized: true if the dma has been initialized | ||
257 | */ | 321 | */ |
258 | struct d40_base { | 322 | struct d40_base { |
259 | spinlock_t interrupt_lock; | 323 | spinlock_t interrupt_lock; |
@@ -275,6 +339,7 @@ struct d40_base { | |||
275 | struct d40_chan **lookup_log_chans; | 339 | struct d40_chan **lookup_log_chans; |
276 | struct d40_chan **lookup_phy_chans; | 340 | struct d40_chan **lookup_phy_chans; |
277 | struct stedma40_platform_data *plat_data; | 341 | struct stedma40_platform_data *plat_data; |
342 | struct regulator *lcpa_regulator; | ||
278 | /* Physical half channels */ | 343 | /* Physical half channels */ |
279 | struct d40_phy_res *phy_res; | 344 | struct d40_phy_res *phy_res; |
280 | struct d40_lcla_pool lcla_pool; | 345 | struct d40_lcla_pool lcla_pool; |
@@ -282,6 +347,11 @@ struct d40_base { | |||
282 | dma_addr_t phy_lcpa; | 347 | dma_addr_t phy_lcpa; |
283 | resource_size_t lcpa_size; | 348 | resource_size_t lcpa_size; |
284 | struct kmem_cache *desc_slab; | 349 | struct kmem_cache *desc_slab; |
350 | u32 reg_val_backup[BACKUP_REGS_SZ]; | ||
351 | u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3]; | ||
352 | u32 *reg_val_backup_chan; | ||
353 | u16 gcc_pwr_off_mask; | ||
354 | bool initialized; | ||
285 | }; | 355 | }; |
286 | 356 | ||
287 | /** | 357 | /** |
@@ -479,13 +549,14 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c) | |||
479 | struct d40_desc *d; | 549 | struct d40_desc *d; |
480 | struct d40_desc *_d; | 550 | struct d40_desc *_d; |
481 | 551 | ||
482 | list_for_each_entry_safe(d, _d, &d40c->client, node) | 552 | list_for_each_entry_safe(d, _d, &d40c->client, node) { |
483 | if (async_tx_test_ack(&d->txd)) { | 553 | if (async_tx_test_ack(&d->txd)) { |
484 | d40_desc_remove(d); | 554 | d40_desc_remove(d); |
485 | desc = d; | 555 | desc = d; |
486 | memset(desc, 0, sizeof(*desc)); | 556 | memset(desc, 0, sizeof(*desc)); |
487 | break; | 557 | break; |
488 | } | 558 | } |
559 | } | ||
489 | } | 560 | } |
490 | 561 | ||
491 | if (!desc) | 562 | if (!desc) |
@@ -536,6 +607,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) | |||
536 | bool cyclic = desc->cyclic; | 607 | bool cyclic = desc->cyclic; |
537 | int curr_lcla = -EINVAL; | 608 | int curr_lcla = -EINVAL; |
538 | int first_lcla = 0; | 609 | int first_lcla = 0; |
610 | bool use_esram_lcla = chan->base->plat_data->use_esram_lcla; | ||
539 | bool linkback; | 611 | bool linkback; |
540 | 612 | ||
541 | /* | 613 | /* |
@@ -608,11 +680,16 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) | |||
608 | &lli->src[lli_current], | 680 | &lli->src[lli_current], |
609 | next_lcla, flags); | 681 | next_lcla, flags); |
610 | 682 | ||
611 | dma_sync_single_range_for_device(chan->base->dev, | 683 | /* |
612 | pool->dma_addr, lcla_offset, | 684 | * Cache maintenance is not needed if lcla is |
613 | 2 * sizeof(struct d40_log_lli), | 685 | * mapped in esram |
614 | DMA_TO_DEVICE); | 686 | */ |
615 | 687 | if (!use_esram_lcla) { | |
688 | dma_sync_single_range_for_device(chan->base->dev, | ||
689 | pool->dma_addr, lcla_offset, | ||
690 | 2 * sizeof(struct d40_log_lli), | ||
691 | DMA_TO_DEVICE); | ||
692 | } | ||
616 | curr_lcla = next_lcla; | 693 | curr_lcla = next_lcla; |
617 | 694 | ||
618 | if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { | 695 | if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { |
@@ -740,7 +817,61 @@ static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len, | |||
740 | return len; | 817 | return len; |
741 | } | 818 | } |
742 | 819 | ||
743 | /* Support functions for logical channels */ | 820 | |
821 | #ifdef CONFIG_PM | ||
822 | static void dma40_backup(void __iomem *baseaddr, u32 *backup, | ||
823 | u32 *regaddr, int num, bool save) | ||
824 | { | ||
825 | int i; | ||
826 | |||
827 | for (i = 0; i < num; i++) { | ||
828 | void __iomem *addr = baseaddr + regaddr[i]; | ||
829 | |||
830 | if (save) | ||
831 | backup[i] = readl_relaxed(addr); | ||
832 | else | ||
833 | writel_relaxed(backup[i], addr); | ||
834 | } | ||
835 | } | ||
836 | |||
837 | static void d40_save_restore_registers(struct d40_base *base, bool save) | ||
838 | { | ||
839 | int i; | ||
840 | |||
841 | /* Save/Restore channel specific registers */ | ||
842 | for (i = 0; i < base->num_phy_chans; i++) { | ||
843 | void __iomem *addr; | ||
844 | int idx; | ||
845 | |||
846 | if (base->phy_res[i].reserved) | ||
847 | continue; | ||
848 | |||
849 | addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA; | ||
850 | idx = i * ARRAY_SIZE(d40_backup_regs_chan); | ||
851 | |||
852 | dma40_backup(addr, &base->reg_val_backup_chan[idx], | ||
853 | d40_backup_regs_chan, | ||
854 | ARRAY_SIZE(d40_backup_regs_chan), | ||
855 | save); | ||
856 | } | ||
857 | |||
858 | /* Save/Restore global registers */ | ||
859 | dma40_backup(base->virtbase, base->reg_val_backup, | ||
860 | d40_backup_regs, ARRAY_SIZE(d40_backup_regs), | ||
861 | save); | ||
862 | |||
863 | /* Save/Restore registers only existing on dma40 v3 and later */ | ||
864 | if (base->rev >= 3) | ||
865 | dma40_backup(base->virtbase, base->reg_val_backup_v3, | ||
866 | d40_backup_regs_v3, | ||
867 | ARRAY_SIZE(d40_backup_regs_v3), | ||
868 | save); | ||
869 | } | ||
870 | #else | ||
871 | static void d40_save_restore_registers(struct d40_base *base, bool save) | ||
872 | { | ||
873 | } | ||
874 | #endif | ||
744 | 875 | ||
745 | static int d40_channel_execute_command(struct d40_chan *d40c, | 876 | static int d40_channel_execute_command(struct d40_chan *d40c, |
746 | enum d40_command command) | 877 | enum d40_command command) |
@@ -973,6 +1104,10 @@ static void d40_config_write(struct d40_chan *d40c) | |||
973 | /* Set LIDX for lcla */ | 1104 | /* Set LIDX for lcla */ |
974 | writel(lidx, chanbase + D40_CHAN_REG_SSELT); | 1105 | writel(lidx, chanbase + D40_CHAN_REG_SSELT); |
975 | writel(lidx, chanbase + D40_CHAN_REG_SDELT); | 1106 | writel(lidx, chanbase + D40_CHAN_REG_SDELT); |
1107 | |||
1108 | /* Clear LNK which will be used by d40_chan_has_events() */ | ||
1109 | writel(0, chanbase + D40_CHAN_REG_SSLNK); | ||
1110 | writel(0, chanbase + D40_CHAN_REG_SDLNK); | ||
976 | } | 1111 | } |
977 | } | 1112 | } |
978 | 1113 | ||
@@ -1013,6 +1148,7 @@ static int d40_pause(struct d40_chan *d40c) | |||
1013 | if (!d40c->busy) | 1148 | if (!d40c->busy) |
1014 | return 0; | 1149 | return 0; |
1015 | 1150 | ||
1151 | pm_runtime_get_sync(d40c->base->dev); | ||
1016 | spin_lock_irqsave(&d40c->lock, flags); | 1152 | spin_lock_irqsave(&d40c->lock, flags); |
1017 | 1153 | ||
1018 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 1154 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
@@ -1025,7 +1161,8 @@ static int d40_pause(struct d40_chan *d40c) | |||
1025 | D40_DMA_RUN); | 1161 | D40_DMA_RUN); |
1026 | } | 1162 | } |
1027 | } | 1163 | } |
1028 | 1164 | pm_runtime_mark_last_busy(d40c->base->dev); | |
1165 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1029 | spin_unlock_irqrestore(&d40c->lock, flags); | 1166 | spin_unlock_irqrestore(&d40c->lock, flags); |
1030 | return res; | 1167 | return res; |
1031 | } | 1168 | } |
@@ -1039,7 +1176,7 @@ static int d40_resume(struct d40_chan *d40c) | |||
1039 | return 0; | 1176 | return 0; |
1040 | 1177 | ||
1041 | spin_lock_irqsave(&d40c->lock, flags); | 1178 | spin_lock_irqsave(&d40c->lock, flags); |
1042 | 1179 | pm_runtime_get_sync(d40c->base->dev); | |
1043 | if (d40c->base->rev == 0) | 1180 | if (d40c->base->rev == 0) |
1044 | if (chan_is_logical(d40c)) { | 1181 | if (chan_is_logical(d40c)) { |
1045 | res = d40_channel_execute_command(d40c, | 1182 | res = d40_channel_execute_command(d40c, |
@@ -1057,6 +1194,8 @@ static int d40_resume(struct d40_chan *d40c) | |||
1057 | } | 1194 | } |
1058 | 1195 | ||
1059 | no_suspend: | 1196 | no_suspend: |
1197 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
1198 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1060 | spin_unlock_irqrestore(&d40c->lock, flags); | 1199 | spin_unlock_irqrestore(&d40c->lock, flags); |
1061 | return res; | 1200 | return res; |
1062 | } | 1201 | } |
@@ -1129,7 +1268,10 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c) | |||
1129 | d40d = d40_first_queued(d40c); | 1268 | d40d = d40_first_queued(d40c); |
1130 | 1269 | ||
1131 | if (d40d != NULL) { | 1270 | if (d40d != NULL) { |
1132 | d40c->busy = true; | 1271 | if (!d40c->busy) |
1272 | d40c->busy = true; | ||
1273 | |||
1274 | pm_runtime_get_sync(d40c->base->dev); | ||
1133 | 1275 | ||
1134 | /* Remove from queue */ | 1276 | /* Remove from queue */ |
1135 | d40_desc_remove(d40d); | 1277 | d40_desc_remove(d40d); |
@@ -1190,6 +1332,8 @@ static void dma_tc_handle(struct d40_chan *d40c) | |||
1190 | 1332 | ||
1191 | if (d40_queue_start(d40c) == NULL) | 1333 | if (d40_queue_start(d40c) == NULL) |
1192 | d40c->busy = false; | 1334 | d40c->busy = false; |
1335 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
1336 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1193 | } | 1337 | } |
1194 | 1338 | ||
1195 | d40c->pending_tx++; | 1339 | d40c->pending_tx++; |
@@ -1405,11 +1549,16 @@ static int d40_validate_conf(struct d40_chan *d40c, | |||
1405 | return res; | 1549 | return res; |
1406 | } | 1550 | } |
1407 | 1551 | ||
1408 | static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src, | 1552 | static bool d40_alloc_mask_set(struct d40_phy_res *phy, |
1409 | int log_event_line, bool is_log) | 1553 | bool is_src, int log_event_line, bool is_log, |
1554 | bool *first_user) | ||
1410 | { | 1555 | { |
1411 | unsigned long flags; | 1556 | unsigned long flags; |
1412 | spin_lock_irqsave(&phy->lock, flags); | 1557 | spin_lock_irqsave(&phy->lock, flags); |
1558 | |||
1559 | *first_user = ((phy->allocated_src | phy->allocated_dst) | ||
1560 | == D40_ALLOC_FREE); | ||
1561 | |||
1413 | if (!is_log) { | 1562 | if (!is_log) { |
1414 | /* Physical interrupts are masked per physical full channel */ | 1563 | /* Physical interrupts are masked per physical full channel */ |
1415 | if (phy->allocated_src == D40_ALLOC_FREE && | 1564 | if (phy->allocated_src == D40_ALLOC_FREE && |
@@ -1490,7 +1639,7 @@ out: | |||
1490 | return is_free; | 1639 | return is_free; |
1491 | } | 1640 | } |
1492 | 1641 | ||
1493 | static int d40_allocate_channel(struct d40_chan *d40c) | 1642 | static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) |
1494 | { | 1643 | { |
1495 | int dev_type; | 1644 | int dev_type; |
1496 | int event_group; | 1645 | int event_group; |
@@ -1526,7 +1675,8 @@ static int d40_allocate_channel(struct d40_chan *d40c) | |||
1526 | for (i = 0; i < d40c->base->num_phy_chans; i++) { | 1675 | for (i = 0; i < d40c->base->num_phy_chans; i++) { |
1527 | 1676 | ||
1528 | if (d40_alloc_mask_set(&phys[i], is_src, | 1677 | if (d40_alloc_mask_set(&phys[i], is_src, |
1529 | 0, is_log)) | 1678 | 0, is_log, |
1679 | first_phy_user)) | ||
1530 | goto found_phy; | 1680 | goto found_phy; |
1531 | } | 1681 | } |
1532 | } else | 1682 | } else |
@@ -1536,7 +1686,8 @@ static int d40_allocate_channel(struct d40_chan *d40c) | |||
1536 | if (d40_alloc_mask_set(&phys[i], | 1686 | if (d40_alloc_mask_set(&phys[i], |
1537 | is_src, | 1687 | is_src, |
1538 | 0, | 1688 | 0, |
1539 | is_log)) | 1689 | is_log, |
1690 | first_phy_user)) | ||
1540 | goto found_phy; | 1691 | goto found_phy; |
1541 | } | 1692 | } |
1542 | } | 1693 | } |
@@ -1552,6 +1703,25 @@ found_phy: | |||
1552 | /* Find logical channel */ | 1703 | /* Find logical channel */ |
1553 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { | 1704 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { |
1554 | int phy_num = j + event_group * 2; | 1705 | int phy_num = j + event_group * 2; |
1706 | |||
1707 | if (d40c->dma_cfg.use_fixed_channel) { | ||
1708 | i = d40c->dma_cfg.phy_channel; | ||
1709 | |||
1710 | if ((i != phy_num) && (i != phy_num + 1)) { | ||
1711 | dev_err(chan2dev(d40c), | ||
1712 | "invalid fixed phy channel %d\n", i); | ||
1713 | return -EINVAL; | ||
1714 | } | ||
1715 | |||
1716 | if (d40_alloc_mask_set(&phys[i], is_src, event_line, | ||
1717 | is_log, first_phy_user)) | ||
1718 | goto found_log; | ||
1719 | |||
1720 | dev_err(chan2dev(d40c), | ||
1721 | "could not allocate fixed phy channel %d\n", i); | ||
1722 | return -EINVAL; | ||
1723 | } | ||
1724 | |||
1555 | /* | 1725 | /* |
1556 | * Spread logical channels across all available physical rather | 1726 | * Spread logical channels across all available physical rather |
1557 | * than pack every logical channel at the first available phy | 1727 | * than pack every logical channel at the first available phy |
@@ -1560,13 +1730,15 @@ found_phy: | |||
1560 | if (is_src) { | 1730 | if (is_src) { |
1561 | for (i = phy_num; i < phy_num + 2; i++) { | 1731 | for (i = phy_num; i < phy_num + 2; i++) { |
1562 | if (d40_alloc_mask_set(&phys[i], is_src, | 1732 | if (d40_alloc_mask_set(&phys[i], is_src, |
1563 | event_line, is_log)) | 1733 | event_line, is_log, |
1734 | first_phy_user)) | ||
1564 | goto found_log; | 1735 | goto found_log; |
1565 | } | 1736 | } |
1566 | } else { | 1737 | } else { |
1567 | for (i = phy_num + 1; i >= phy_num; i--) { | 1738 | for (i = phy_num + 1; i >= phy_num; i--) { |
1568 | if (d40_alloc_mask_set(&phys[i], is_src, | 1739 | if (d40_alloc_mask_set(&phys[i], is_src, |
1569 | event_line, is_log)) | 1740 | event_line, is_log, |
1741 | first_phy_user)) | ||
1570 | goto found_log; | 1742 | goto found_log; |
1571 | } | 1743 | } |
1572 | } | 1744 | } |
@@ -1643,10 +1815,11 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1643 | return -EINVAL; | 1815 | return -EINVAL; |
1644 | } | 1816 | } |
1645 | 1817 | ||
1818 | pm_runtime_get_sync(d40c->base->dev); | ||
1646 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 1819 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
1647 | if (res) { | 1820 | if (res) { |
1648 | chan_err(d40c, "suspend failed\n"); | 1821 | chan_err(d40c, "suspend failed\n"); |
1649 | return res; | 1822 | goto out; |
1650 | } | 1823 | } |
1651 | 1824 | ||
1652 | if (chan_is_logical(d40c)) { | 1825 | if (chan_is_logical(d40c)) { |
@@ -1664,13 +1837,11 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1664 | if (d40_chan_has_events(d40c)) { | 1837 | if (d40_chan_has_events(d40c)) { |
1665 | res = d40_channel_execute_command(d40c, | 1838 | res = d40_channel_execute_command(d40c, |
1666 | D40_DMA_RUN); | 1839 | D40_DMA_RUN); |
1667 | if (res) { | 1840 | if (res) |
1668 | chan_err(d40c, | 1841 | chan_err(d40c, |
1669 | "Executing RUN command\n"); | 1842 | "Executing RUN command\n"); |
1670 | return res; | ||
1671 | } | ||
1672 | } | 1843 | } |
1673 | return 0; | 1844 | goto out; |
1674 | } | 1845 | } |
1675 | } else { | 1846 | } else { |
1676 | (void) d40_alloc_mask_free(phy, is_src, 0); | 1847 | (void) d40_alloc_mask_free(phy, is_src, 0); |
@@ -1680,13 +1851,23 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
1680 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); | 1851 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); |
1681 | if (res) { | 1852 | if (res) { |
1682 | chan_err(d40c, "Failed to stop channel\n"); | 1853 | chan_err(d40c, "Failed to stop channel\n"); |
1683 | return res; | 1854 | goto out; |
1684 | } | 1855 | } |
1856 | |||
1857 | if (d40c->busy) { | ||
1858 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
1859 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1860 | } | ||
1861 | |||
1862 | d40c->busy = false; | ||
1685 | d40c->phy_chan = NULL; | 1863 | d40c->phy_chan = NULL; |
1686 | d40c->configured = false; | 1864 | d40c->configured = false; |
1687 | d40c->base->lookup_phy_chans[phy->num] = NULL; | 1865 | d40c->base->lookup_phy_chans[phy->num] = NULL; |
1866 | out: | ||
1688 | 1867 | ||
1689 | return 0; | 1868 | pm_runtime_mark_last_busy(d40c->base->dev); |
1869 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
1870 | return res; | ||
1690 | } | 1871 | } |
1691 | 1872 | ||
1692 | static bool d40_is_paused(struct d40_chan *d40c) | 1873 | static bool d40_is_paused(struct d40_chan *d40c) |
@@ -1855,7 +2036,7 @@ err: | |||
1855 | } | 2036 | } |
1856 | 2037 | ||
1857 | static dma_addr_t | 2038 | static dma_addr_t |
1858 | d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) | 2039 | d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction) |
1859 | { | 2040 | { |
1860 | struct stedma40_platform_data *plat = chan->base->plat_data; | 2041 | struct stedma40_platform_data *plat = chan->base->plat_data; |
1861 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; | 2042 | struct stedma40_chan_cfg *cfg = &chan->dma_cfg; |
@@ -1864,9 +2045,9 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) | |||
1864 | if (chan->runtime_addr) | 2045 | if (chan->runtime_addr) |
1865 | return chan->runtime_addr; | 2046 | return chan->runtime_addr; |
1866 | 2047 | ||
1867 | if (direction == DMA_FROM_DEVICE) | 2048 | if (direction == DMA_DEV_TO_MEM) |
1868 | addr = plat->dev_rx[cfg->src_dev_type]; | 2049 | addr = plat->dev_rx[cfg->src_dev_type]; |
1869 | else if (direction == DMA_TO_DEVICE) | 2050 | else if (direction == DMA_MEM_TO_DEV) |
1870 | addr = plat->dev_tx[cfg->dst_dev_type]; | 2051 | addr = plat->dev_tx[cfg->dst_dev_type]; |
1871 | 2052 | ||
1872 | return addr; | 2053 | return addr; |
@@ -1875,7 +2056,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) | |||
1875 | static struct dma_async_tx_descriptor * | 2056 | static struct dma_async_tx_descriptor * |
1876 | d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, | 2057 | d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, |
1877 | struct scatterlist *sg_dst, unsigned int sg_len, | 2058 | struct scatterlist *sg_dst, unsigned int sg_len, |
1878 | enum dma_data_direction direction, unsigned long dma_flags) | 2059 | enum dma_transfer_direction direction, unsigned long dma_flags) |
1879 | { | 2060 | { |
1880 | struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); | 2061 | struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); |
1881 | dma_addr_t src_dev_addr = 0; | 2062 | dma_addr_t src_dev_addr = 0; |
@@ -1902,9 +2083,9 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, | |||
1902 | if (direction != DMA_NONE) { | 2083 | if (direction != DMA_NONE) { |
1903 | dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); | 2084 | dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); |
1904 | 2085 | ||
1905 | if (direction == DMA_FROM_DEVICE) | 2086 | if (direction == DMA_DEV_TO_MEM) |
1906 | src_dev_addr = dev_addr; | 2087 | src_dev_addr = dev_addr; |
1907 | else if (direction == DMA_TO_DEVICE) | 2088 | else if (direction == DMA_MEM_TO_DEV) |
1908 | dst_dev_addr = dev_addr; | 2089 | dst_dev_addr = dev_addr; |
1909 | } | 2090 | } |
1910 | 2091 | ||
@@ -2011,14 +2192,15 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
2011 | goto fail; | 2192 | goto fail; |
2012 | } | 2193 | } |
2013 | } | 2194 | } |
2014 | is_free_phy = (d40c->phy_chan == NULL); | ||
2015 | 2195 | ||
2016 | err = d40_allocate_channel(d40c); | 2196 | err = d40_allocate_channel(d40c, &is_free_phy); |
2017 | if (err) { | 2197 | if (err) { |
2018 | chan_err(d40c, "Failed to allocate channel\n"); | 2198 | chan_err(d40c, "Failed to allocate channel\n"); |
2199 | d40c->configured = false; | ||
2019 | goto fail; | 2200 | goto fail; |
2020 | } | 2201 | } |
2021 | 2202 | ||
2203 | pm_runtime_get_sync(d40c->base->dev); | ||
2022 | /* Fill in basic CFG register values */ | 2204 | /* Fill in basic CFG register values */ |
2023 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, | 2205 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, |
2024 | &d40c->dst_def_cfg, chan_is_logical(d40c)); | 2206 | &d40c->dst_def_cfg, chan_is_logical(d40c)); |
@@ -2038,6 +2220,12 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
2038 | D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; | 2220 | D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; |
2039 | } | 2221 | } |
2040 | 2222 | ||
2223 | dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", | ||
2224 | chan_is_logical(d40c) ? "logical" : "physical", | ||
2225 | d40c->phy_chan->num, | ||
2226 | d40c->dma_cfg.use_fixed_channel ? ", fixed" : ""); | ||
2227 | |||
2228 | |||
2041 | /* | 2229 | /* |
2042 | * Only write channel configuration to the DMA if the physical | 2230 | * Only write channel configuration to the DMA if the physical |
2043 | * resource is free. In case of multiple logical channels | 2231 | * resource is free. In case of multiple logical channels |
@@ -2046,6 +2234,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
2046 | if (is_free_phy) | 2234 | if (is_free_phy) |
2047 | d40_config_write(d40c); | 2235 | d40_config_write(d40c); |
2048 | fail: | 2236 | fail: |
2237 | pm_runtime_mark_last_busy(d40c->base->dev); | ||
2238 | pm_runtime_put_autosuspend(d40c->base->dev); | ||
2049 | spin_unlock_irqrestore(&d40c->lock, flags); | 2239 | spin_unlock_irqrestore(&d40c->lock, flags); |
2050 | return err; | 2240 | return err; |
2051 | } | 2241 | } |
@@ -2108,10 +2298,10 @@ d40_prep_memcpy_sg(struct dma_chan *chan, | |||
2108 | static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | 2298 | static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, |
2109 | struct scatterlist *sgl, | 2299 | struct scatterlist *sgl, |
2110 | unsigned int sg_len, | 2300 | unsigned int sg_len, |
2111 | enum dma_data_direction direction, | 2301 | enum dma_transfer_direction direction, |
2112 | unsigned long dma_flags) | 2302 | unsigned long dma_flags) |
2113 | { | 2303 | { |
2114 | if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) | 2304 | if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) |
2115 | return NULL; | 2305 | return NULL; |
2116 | 2306 | ||
2117 | return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); | 2307 | return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); |
@@ -2120,7 +2310,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | |||
2120 | static struct dma_async_tx_descriptor * | 2310 | static struct dma_async_tx_descriptor * |
2121 | dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, | 2311 | dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, |
2122 | size_t buf_len, size_t period_len, | 2312 | size_t buf_len, size_t period_len, |
2123 | enum dma_data_direction direction) | 2313 | enum dma_transfer_direction direction) |
2124 | { | 2314 | { |
2125 | unsigned int periods = buf_len / period_len; | 2315 | unsigned int periods = buf_len / period_len; |
2126 | struct dma_async_tx_descriptor *txd; | 2316 | struct dma_async_tx_descriptor *txd; |
@@ -2269,7 +2459,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
2269 | dst_addr_width = config->dst_addr_width; | 2459 | dst_addr_width = config->dst_addr_width; |
2270 | dst_maxburst = config->dst_maxburst; | 2460 | dst_maxburst = config->dst_maxburst; |
2271 | 2461 | ||
2272 | if (config->direction == DMA_FROM_DEVICE) { | 2462 | if (config->direction == DMA_DEV_TO_MEM) { |
2273 | dma_addr_t dev_addr_rx = | 2463 | dma_addr_t dev_addr_rx = |
2274 | d40c->base->plat_data->dev_rx[cfg->src_dev_type]; | 2464 | d40c->base->plat_data->dev_rx[cfg->src_dev_type]; |
2275 | 2465 | ||
@@ -2292,7 +2482,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
2292 | if (dst_maxburst == 0) | 2482 | if (dst_maxburst == 0) |
2293 | dst_maxburst = src_maxburst; | 2483 | dst_maxburst = src_maxburst; |
2294 | 2484 | ||
2295 | } else if (config->direction == DMA_TO_DEVICE) { | 2485 | } else if (config->direction == DMA_MEM_TO_DEV) { |
2296 | dma_addr_t dev_addr_tx = | 2486 | dma_addr_t dev_addr_tx = |
2297 | d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; | 2487 | d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; |
2298 | 2488 | ||
@@ -2357,7 +2547,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, | |||
2357 | "configured channel %s for %s, data width %d/%d, " | 2547 | "configured channel %s for %s, data width %d/%d, " |
2358 | "maxburst %d/%d elements, LE, no flow control\n", | 2548 | "maxburst %d/%d elements, LE, no flow control\n", |
2359 | dma_chan_name(chan), | 2549 | dma_chan_name(chan), |
2360 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", | 2550 | (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", |
2361 | src_addr_width, dst_addr_width, | 2551 | src_addr_width, dst_addr_width, |
2362 | src_maxburst, dst_maxburst); | 2552 | src_maxburst, dst_maxburst); |
2363 | 2553 | ||
@@ -2519,6 +2709,72 @@ failure1: | |||
2519 | return err; | 2709 | return err; |
2520 | } | 2710 | } |
2521 | 2711 | ||
2712 | /* Suspend resume functionality */ | ||
2713 | #ifdef CONFIG_PM | ||
2714 | static int dma40_pm_suspend(struct device *dev) | ||
2715 | { | ||
2716 | struct platform_device *pdev = to_platform_device(dev); | ||
2717 | struct d40_base *base = platform_get_drvdata(pdev); | ||
2718 | int ret = 0; | ||
2719 | if (!pm_runtime_suspended(dev)) | ||
2720 | return -EBUSY; | ||
2721 | |||
2722 | if (base->lcpa_regulator) | ||
2723 | ret = regulator_disable(base->lcpa_regulator); | ||
2724 | return ret; | ||
2725 | } | ||
2726 | |||
2727 | static int dma40_runtime_suspend(struct device *dev) | ||
2728 | { | ||
2729 | struct platform_device *pdev = to_platform_device(dev); | ||
2730 | struct d40_base *base = platform_get_drvdata(pdev); | ||
2731 | |||
2732 | d40_save_restore_registers(base, true); | ||
2733 | |||
2734 | /* Don't disable/enable clocks for v1 due to HW bugs */ | ||
2735 | if (base->rev != 1) | ||
2736 | writel_relaxed(base->gcc_pwr_off_mask, | ||
2737 | base->virtbase + D40_DREG_GCC); | ||
2738 | |||
2739 | return 0; | ||
2740 | } | ||
2741 | |||
2742 | static int dma40_runtime_resume(struct device *dev) | ||
2743 | { | ||
2744 | struct platform_device *pdev = to_platform_device(dev); | ||
2745 | struct d40_base *base = platform_get_drvdata(pdev); | ||
2746 | |||
2747 | if (base->initialized) | ||
2748 | d40_save_restore_registers(base, false); | ||
2749 | |||
2750 | writel_relaxed(D40_DREG_GCC_ENABLE_ALL, | ||
2751 | base->virtbase + D40_DREG_GCC); | ||
2752 | return 0; | ||
2753 | } | ||
2754 | |||
2755 | static int dma40_resume(struct device *dev) | ||
2756 | { | ||
2757 | struct platform_device *pdev = to_platform_device(dev); | ||
2758 | struct d40_base *base = platform_get_drvdata(pdev); | ||
2759 | int ret = 0; | ||
2760 | |||
2761 | if (base->lcpa_regulator) | ||
2762 | ret = regulator_enable(base->lcpa_regulator); | ||
2763 | |||
2764 | return ret; | ||
2765 | } | ||
2766 | |||
2767 | static const struct dev_pm_ops dma40_pm_ops = { | ||
2768 | .suspend = dma40_pm_suspend, | ||
2769 | .runtime_suspend = dma40_runtime_suspend, | ||
2770 | .runtime_resume = dma40_runtime_resume, | ||
2771 | .resume = dma40_resume, | ||
2772 | }; | ||
2773 | #define DMA40_PM_OPS (&dma40_pm_ops) | ||
2774 | #else | ||
2775 | #define DMA40_PM_OPS NULL | ||
2776 | #endif | ||
2777 | |||
2522 | /* Initialization functions. */ | 2778 | /* Initialization functions. */ |
2523 | 2779 | ||
2524 | static int __init d40_phy_res_init(struct d40_base *base) | 2780 | static int __init d40_phy_res_init(struct d40_base *base) |
@@ -2527,6 +2783,7 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2527 | int num_phy_chans_avail = 0; | 2783 | int num_phy_chans_avail = 0; |
2528 | u32 val[2]; | 2784 | u32 val[2]; |
2529 | int odd_even_bit = -2; | 2785 | int odd_even_bit = -2; |
2786 | int gcc = D40_DREG_GCC_ENA; | ||
2530 | 2787 | ||
2531 | val[0] = readl(base->virtbase + D40_DREG_PRSME); | 2788 | val[0] = readl(base->virtbase + D40_DREG_PRSME); |
2532 | val[1] = readl(base->virtbase + D40_DREG_PRSMO); | 2789 | val[1] = readl(base->virtbase + D40_DREG_PRSMO); |
@@ -2538,9 +2795,17 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2538 | /* Mark security only channels as occupied */ | 2795 | /* Mark security only channels as occupied */ |
2539 | base->phy_res[i].allocated_src = D40_ALLOC_PHY; | 2796 | base->phy_res[i].allocated_src = D40_ALLOC_PHY; |
2540 | base->phy_res[i].allocated_dst = D40_ALLOC_PHY; | 2797 | base->phy_res[i].allocated_dst = D40_ALLOC_PHY; |
2798 | base->phy_res[i].reserved = true; | ||
2799 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), | ||
2800 | D40_DREG_GCC_SRC); | ||
2801 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i), | ||
2802 | D40_DREG_GCC_DST); | ||
2803 | |||
2804 | |||
2541 | } else { | 2805 | } else { |
2542 | base->phy_res[i].allocated_src = D40_ALLOC_FREE; | 2806 | base->phy_res[i].allocated_src = D40_ALLOC_FREE; |
2543 | base->phy_res[i].allocated_dst = D40_ALLOC_FREE; | 2807 | base->phy_res[i].allocated_dst = D40_ALLOC_FREE; |
2808 | base->phy_res[i].reserved = false; | ||
2544 | num_phy_chans_avail++; | 2809 | num_phy_chans_avail++; |
2545 | } | 2810 | } |
2546 | spin_lock_init(&base->phy_res[i].lock); | 2811 | spin_lock_init(&base->phy_res[i].lock); |
@@ -2552,6 +2817,11 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2552 | 2817 | ||
2553 | base->phy_res[chan].allocated_src = D40_ALLOC_PHY; | 2818 | base->phy_res[chan].allocated_src = D40_ALLOC_PHY; |
2554 | base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; | 2819 | base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; |
2820 | base->phy_res[chan].reserved = true; | ||
2821 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), | ||
2822 | D40_DREG_GCC_SRC); | ||
2823 | gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan), | ||
2824 | D40_DREG_GCC_DST); | ||
2555 | num_phy_chans_avail--; | 2825 | num_phy_chans_avail--; |
2556 | } | 2826 | } |
2557 | 2827 | ||
@@ -2572,6 +2842,15 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
2572 | val[0] = val[0] >> 2; | 2842 | val[0] = val[0] >> 2; |
2573 | } | 2843 | } |
2574 | 2844 | ||
2845 | /* | ||
2846 | * To keep things simple, Enable all clocks initially. | ||
2847 | * The clocks will get managed later post channel allocation. | ||
2848 | * The clocks for the event lines on which reserved channels exists | ||
2849 | * are not managed here. | ||
2850 | */ | ||
2851 | writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC); | ||
2852 | base->gcc_pwr_off_mask = gcc; | ||
2853 | |||
2575 | return num_phy_chans_avail; | 2854 | return num_phy_chans_avail; |
2576 | } | 2855 | } |
2577 | 2856 | ||
@@ -2699,10 +2978,15 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
2699 | goto failure; | 2978 | goto failure; |
2700 | } | 2979 | } |
2701 | 2980 | ||
2702 | base->lcla_pool.alloc_map = kzalloc(num_phy_chans * | 2981 | base->reg_val_backup_chan = kmalloc(base->num_phy_chans * |
2703 | sizeof(struct d40_desc *) * | 2982 | sizeof(d40_backup_regs_chan), |
2704 | D40_LCLA_LINK_PER_EVENT_GRP, | ||
2705 | GFP_KERNEL); | 2983 | GFP_KERNEL); |
2984 | if (!base->reg_val_backup_chan) | ||
2985 | goto failure; | ||
2986 | |||
2987 | base->lcla_pool.alloc_map = | ||
2988 | kzalloc(num_phy_chans * sizeof(struct d40_desc *) | ||
2989 | * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL); | ||
2706 | if (!base->lcla_pool.alloc_map) | 2990 | if (!base->lcla_pool.alloc_map) |
2707 | goto failure; | 2991 | goto failure; |
2708 | 2992 | ||
@@ -2741,9 +3025,9 @@ failure: | |||
2741 | static void __init d40_hw_init(struct d40_base *base) | 3025 | static void __init d40_hw_init(struct d40_base *base) |
2742 | { | 3026 | { |
2743 | 3027 | ||
2744 | static const struct d40_reg_val dma_init_reg[] = { | 3028 | static struct d40_reg_val dma_init_reg[] = { |
2745 | /* Clock every part of the DMA block from start */ | 3029 | /* Clock every part of the DMA block from start */ |
2746 | { .reg = D40_DREG_GCC, .val = 0x0000ff01}, | 3030 | { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL}, |
2747 | 3031 | ||
2748 | /* Interrupts on all logical channels */ | 3032 | /* Interrupts on all logical channels */ |
2749 | { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, | 3033 | { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, |
@@ -2943,11 +3227,31 @@ static int __init d40_probe(struct platform_device *pdev) | |||
2943 | d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); | 3227 | d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); |
2944 | goto failure; | 3228 | goto failure; |
2945 | } | 3229 | } |
3230 | /* If lcla has to be located in ESRAM we don't need to allocate */ | ||
3231 | if (base->plat_data->use_esram_lcla) { | ||
3232 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | ||
3233 | "lcla_esram"); | ||
3234 | if (!res) { | ||
3235 | ret = -ENOENT; | ||
3236 | d40_err(&pdev->dev, | ||
3237 | "No \"lcla_esram\" memory resource\n"); | ||
3238 | goto failure; | ||
3239 | } | ||
3240 | base->lcla_pool.base = ioremap(res->start, | ||
3241 | resource_size(res)); | ||
3242 | if (!base->lcla_pool.base) { | ||
3243 | ret = -ENOMEM; | ||
3244 | d40_err(&pdev->dev, "Failed to ioremap LCLA region\n"); | ||
3245 | goto failure; | ||
3246 | } | ||
3247 | writel(res->start, base->virtbase + D40_DREG_LCLA); | ||
2946 | 3248 | ||
2947 | ret = d40_lcla_allocate(base); | 3249 | } else { |
2948 | if (ret) { | 3250 | ret = d40_lcla_allocate(base); |
2949 | d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); | 3251 | if (ret) { |
2950 | goto failure; | 3252 | d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); |
3253 | goto failure; | ||
3254 | } | ||
2951 | } | 3255 | } |
2952 | 3256 | ||
2953 | spin_lock_init(&base->lcla_pool.lock); | 3257 | spin_lock_init(&base->lcla_pool.lock); |
@@ -2960,6 +3264,32 @@ static int __init d40_probe(struct platform_device *pdev) | |||
2960 | goto failure; | 3264 | goto failure; |
2961 | } | 3265 | } |
2962 | 3266 | ||
3267 | pm_runtime_irq_safe(base->dev); | ||
3268 | pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY); | ||
3269 | pm_runtime_use_autosuspend(base->dev); | ||
3270 | pm_runtime_enable(base->dev); | ||
3271 | pm_runtime_resume(base->dev); | ||
3272 | |||
3273 | if (base->plat_data->use_esram_lcla) { | ||
3274 | |||
3275 | base->lcpa_regulator = regulator_get(base->dev, "lcla_esram"); | ||
3276 | if (IS_ERR(base->lcpa_regulator)) { | ||
3277 | d40_err(&pdev->dev, "Failed to get lcpa_regulator\n"); | ||
3278 | base->lcpa_regulator = NULL; | ||
3279 | goto failure; | ||
3280 | } | ||
3281 | |||
3282 | ret = regulator_enable(base->lcpa_regulator); | ||
3283 | if (ret) { | ||
3284 | d40_err(&pdev->dev, | ||
3285 | "Failed to enable lcpa_regulator\n"); | ||
3286 | regulator_put(base->lcpa_regulator); | ||
3287 | base->lcpa_regulator = NULL; | ||
3288 | goto failure; | ||
3289 | } | ||
3290 | } | ||
3291 | |||
3292 | base->initialized = true; | ||
2963 | err = d40_dmaengine_init(base, num_reserved_chans); | 3293 | err = d40_dmaengine_init(base, num_reserved_chans); |
2964 | if (err) | 3294 | if (err) |
2965 | goto failure; | 3295 | goto failure; |
@@ -2976,6 +3306,11 @@ failure: | |||
2976 | if (base->virtbase) | 3306 | if (base->virtbase) |
2977 | iounmap(base->virtbase); | 3307 | iounmap(base->virtbase); |
2978 | 3308 | ||
3309 | if (base->lcla_pool.base && base->plat_data->use_esram_lcla) { | ||
3310 | iounmap(base->lcla_pool.base); | ||
3311 | base->lcla_pool.base = NULL; | ||
3312 | } | ||
3313 | |||
2979 | if (base->lcla_pool.dma_addr) | 3314 | if (base->lcla_pool.dma_addr) |
2980 | dma_unmap_single(base->dev, base->lcla_pool.dma_addr, | 3315 | dma_unmap_single(base->dev, base->lcla_pool.dma_addr, |
2981 | SZ_1K * base->num_phy_chans, | 3316 | SZ_1K * base->num_phy_chans, |
@@ -2998,6 +3333,11 @@ failure: | |||
2998 | clk_put(base->clk); | 3333 | clk_put(base->clk); |
2999 | } | 3334 | } |
3000 | 3335 | ||
3336 | if (base->lcpa_regulator) { | ||
3337 | regulator_disable(base->lcpa_regulator); | ||
3338 | regulator_put(base->lcpa_regulator); | ||
3339 | } | ||
3340 | |||
3001 | kfree(base->lcla_pool.alloc_map); | 3341 | kfree(base->lcla_pool.alloc_map); |
3002 | kfree(base->lookup_log_chans); | 3342 | kfree(base->lookup_log_chans); |
3003 | kfree(base->lookup_phy_chans); | 3343 | kfree(base->lookup_phy_chans); |
@@ -3013,6 +3353,7 @@ static struct platform_driver d40_driver = { | |||
3013 | .driver = { | 3353 | .driver = { |
3014 | .owner = THIS_MODULE, | 3354 | .owner = THIS_MODULE, |
3015 | .name = D40_NAME, | 3355 | .name = D40_NAME, |
3356 | .pm = DMA40_PM_OPS, | ||
3016 | }, | 3357 | }, |
3017 | }; | 3358 | }; |
3018 | 3359 | ||
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index b44c455158de..8d3d490968a3 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h | |||
@@ -16,6 +16,8 @@ | |||
16 | 16 | ||
17 | #define D40_TYPE_TO_GROUP(type) (type / 16) | 17 | #define D40_TYPE_TO_GROUP(type) (type / 16) |
18 | #define D40_TYPE_TO_EVENT(type) (type % 16) | 18 | #define D40_TYPE_TO_EVENT(type) (type % 16) |
19 | #define D40_GROUP_SIZE 8 | ||
20 | #define D40_PHYS_TO_GROUP(phys) ((phys & (D40_GROUP_SIZE - 1)) / 2) | ||
19 | 21 | ||
20 | /* Most bits of the CFG register are the same in log as in phy mode */ | 22 | /* Most bits of the CFG register are the same in log as in phy mode */ |
21 | #define D40_SREG_CFG_MST_POS 15 | 23 | #define D40_SREG_CFG_MST_POS 15 |
@@ -123,6 +125,15 @@ | |||
123 | 125 | ||
124 | /* DMA Register Offsets */ | 126 | /* DMA Register Offsets */ |
125 | #define D40_DREG_GCC 0x000 | 127 | #define D40_DREG_GCC 0x000 |
128 | #define D40_DREG_GCC_ENA 0x1 | ||
129 | /* This assumes that there are only 4 event groups */ | ||
130 | #define D40_DREG_GCC_ENABLE_ALL 0xff01 | ||
131 | #define D40_DREG_GCC_EVTGRP_POS 8 | ||
132 | #define D40_DREG_GCC_SRC 0 | ||
133 | #define D40_DREG_GCC_DST 1 | ||
134 | #define D40_DREG_GCC_EVTGRP_ENA(x, y) \ | ||
135 | (1 << (D40_DREG_GCC_EVTGRP_POS + 2 * x + y)) | ||
136 | |||
126 | #define D40_DREG_PRTYP 0x004 | 137 | #define D40_DREG_PRTYP 0x004 |
127 | #define D40_DREG_PRSME 0x008 | 138 | #define D40_DREG_PRSME 0x008 |
128 | #define D40_DREG_PRSMO 0x00C | 139 | #define D40_DREG_PRSMO 0x00C |
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index a4a398f2ef61..a6f9c1684a0f 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c | |||
@@ -90,7 +90,7 @@ struct timb_dma_chan { | |||
90 | struct list_head queue; | 90 | struct list_head queue; |
91 | struct list_head free_list; | 91 | struct list_head free_list; |
92 | unsigned int bytes_per_line; | 92 | unsigned int bytes_per_line; |
93 | enum dma_data_direction direction; | 93 | enum dma_transfer_direction direction; |
94 | unsigned int descs; /* Descriptors to allocate */ | 94 | unsigned int descs; /* Descriptors to allocate */ |
95 | unsigned int desc_elems; /* number of elems per descriptor */ | 95 | unsigned int desc_elems; /* number of elems per descriptor */ |
96 | }; | 96 | }; |
@@ -166,10 +166,10 @@ static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc, | |||
166 | 166 | ||
167 | if (single) | 167 | if (single) |
168 | dma_unmap_single(chan2dev(&td_chan->chan), addr, len, | 168 | dma_unmap_single(chan2dev(&td_chan->chan), addr, len, |
169 | td_chan->direction); | 169 | DMA_TO_DEVICE); |
170 | else | 170 | else |
171 | dma_unmap_page(chan2dev(&td_chan->chan), addr, len, | 171 | dma_unmap_page(chan2dev(&td_chan->chan), addr, len, |
172 | td_chan->direction); | 172 | DMA_TO_DEVICE); |
173 | } | 173 | } |
174 | 174 | ||
175 | static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single) | 175 | static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single) |
@@ -235,7 +235,7 @@ static void __td_start_dma(struct timb_dma_chan *td_chan) | |||
235 | "td_chan: %p, chan: %d, membase: %p\n", | 235 | "td_chan: %p, chan: %d, membase: %p\n", |
236 | td_chan, td_chan->chan.chan_id, td_chan->membase); | 236 | td_chan, td_chan->chan.chan_id, td_chan->membase); |
237 | 237 | ||
238 | if (td_chan->direction == DMA_FROM_DEVICE) { | 238 | if (td_chan->direction == DMA_DEV_TO_MEM) { |
239 | 239 | ||
240 | /* descriptor address */ | 240 | /* descriptor address */ |
241 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR); | 241 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR); |
@@ -278,7 +278,7 @@ static void __td_finish(struct timb_dma_chan *td_chan) | |||
278 | txd->cookie); | 278 | txd->cookie); |
279 | 279 | ||
280 | /* make sure to stop the transfer */ | 280 | /* make sure to stop the transfer */ |
281 | if (td_chan->direction == DMA_FROM_DEVICE) | 281 | if (td_chan->direction == DMA_DEV_TO_MEM) |
282 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER); | 282 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER); |
283 | /* Currently no support for stopping DMA transfers | 283 | /* Currently no support for stopping DMA transfers |
284 | else | 284 | else |
@@ -558,7 +558,7 @@ static void td_issue_pending(struct dma_chan *chan) | |||
558 | 558 | ||
559 | static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, | 559 | static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, |
560 | struct scatterlist *sgl, unsigned int sg_len, | 560 | struct scatterlist *sgl, unsigned int sg_len, |
561 | enum dma_data_direction direction, unsigned long flags) | 561 | enum dma_transfer_direction direction, unsigned long flags) |
562 | { | 562 | { |
563 | struct timb_dma_chan *td_chan = | 563 | struct timb_dma_chan *td_chan = |
564 | container_of(chan, struct timb_dma_chan, chan); | 564 | container_of(chan, struct timb_dma_chan, chan); |
@@ -606,7 +606,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, | |||
606 | } | 606 | } |
607 | 607 | ||
608 | dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, | 608 | dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, |
609 | td_desc->desc_list_len, DMA_TO_DEVICE); | 609 | td_desc->desc_list_len, DMA_MEM_TO_DEV); |
610 | 610 | ||
611 | return &td_desc->txd; | 611 | return &td_desc->txd; |
612 | } | 612 | } |
@@ -775,8 +775,8 @@ static int __devinit td_probe(struct platform_device *pdev) | |||
775 | td_chan->descs = pchan->descriptors; | 775 | td_chan->descs = pchan->descriptors; |
776 | td_chan->desc_elems = pchan->descriptor_elements; | 776 | td_chan->desc_elems = pchan->descriptor_elements; |
777 | td_chan->bytes_per_line = pchan->bytes_per_line; | 777 | td_chan->bytes_per_line = pchan->bytes_per_line; |
778 | td_chan->direction = pchan->rx ? DMA_FROM_DEVICE : | 778 | td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM : |
779 | DMA_TO_DEVICE; | 779 | DMA_MEM_TO_DEV; |
780 | 780 | ||
781 | td_chan->membase = td->membase + | 781 | td_chan->membase = td->membase + |
782 | (i / 2) * TIMBDMA_INSTANCE_OFFSET + | 782 | (i / 2) * TIMBDMA_INSTANCE_OFFSET + |
@@ -841,17 +841,7 @@ static struct platform_driver td_driver = { | |||
841 | .remove = __exit_p(td_remove), | 841 | .remove = __exit_p(td_remove), |
842 | }; | 842 | }; |
843 | 843 | ||
844 | static int __init td_init(void) | 844 | module_platform_driver(td_driver); |
845 | { | ||
846 | return platform_driver_register(&td_driver); | ||
847 | } | ||
848 | module_init(td_init); | ||
849 | |||
850 | static void __exit td_exit(void) | ||
851 | { | ||
852 | platform_driver_unregister(&td_driver); | ||
853 | } | ||
854 | module_exit(td_exit); | ||
855 | 845 | ||
856 | MODULE_LICENSE("GPL v2"); | 846 | MODULE_LICENSE("GPL v2"); |
857 | MODULE_DESCRIPTION("Timberdale DMA controller driver"); | 847 | MODULE_DESCRIPTION("Timberdale DMA controller driver"); |
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index cbd83e362b5e..6122c364cf11 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c | |||
@@ -845,7 +845,7 @@ txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
845 | 845 | ||
846 | static struct dma_async_tx_descriptor * | 846 | static struct dma_async_tx_descriptor * |
847 | txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 847 | txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
848 | unsigned int sg_len, enum dma_data_direction direction, | 848 | unsigned int sg_len, enum dma_transfer_direction direction, |
849 | unsigned long flags) | 849 | unsigned long flags) |
850 | { | 850 | { |
851 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | 851 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
@@ -860,9 +860,9 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
860 | 860 | ||
861 | BUG_ON(!ds || !ds->reg_width); | 861 | BUG_ON(!ds || !ds->reg_width); |
862 | if (ds->tx_reg) | 862 | if (ds->tx_reg) |
863 | BUG_ON(direction != DMA_TO_DEVICE); | 863 | BUG_ON(direction != DMA_MEM_TO_DEV); |
864 | else | 864 | else |
865 | BUG_ON(direction != DMA_FROM_DEVICE); | 865 | BUG_ON(direction != DMA_DEV_TO_MEM); |
866 | if (unlikely(!sg_len)) | 866 | if (unlikely(!sg_len)) |
867 | return NULL; | 867 | return NULL; |
868 | 868 | ||
@@ -882,7 +882,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
882 | mem = sg_dma_address(sg); | 882 | mem = sg_dma_address(sg); |
883 | 883 | ||
884 | if (__is_dmac64(ddev)) { | 884 | if (__is_dmac64(ddev)) { |
885 | if (direction == DMA_TO_DEVICE) { | 885 | if (direction == DMA_MEM_TO_DEV) { |
886 | desc->hwdesc.SAR = mem; | 886 | desc->hwdesc.SAR = mem; |
887 | desc->hwdesc.DAR = ds->tx_reg; | 887 | desc->hwdesc.DAR = ds->tx_reg; |
888 | } else { | 888 | } else { |
@@ -891,7 +891,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
891 | } | 891 | } |
892 | desc->hwdesc.CNTR = sg_dma_len(sg); | 892 | desc->hwdesc.CNTR = sg_dma_len(sg); |
893 | } else { | 893 | } else { |
894 | if (direction == DMA_TO_DEVICE) { | 894 | if (direction == DMA_MEM_TO_DEV) { |
895 | desc->hwdesc32.SAR = mem; | 895 | desc->hwdesc32.SAR = mem; |
896 | desc->hwdesc32.DAR = ds->tx_reg; | 896 | desc->hwdesc32.DAR = ds->tx_reg; |
897 | } else { | 897 | } else { |
@@ -900,7 +900,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
900 | } | 900 | } |
901 | desc->hwdesc32.CNTR = sg_dma_len(sg); | 901 | desc->hwdesc32.CNTR = sg_dma_len(sg); |
902 | } | 902 | } |
903 | if (direction == DMA_TO_DEVICE) { | 903 | if (direction == DMA_MEM_TO_DEV) { |
904 | sai = ds->reg_width; | 904 | sai = ds->reg_width; |
905 | dai = 0; | 905 | dai = 0; |
906 | } else { | 906 | } else { |
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c index 0cb461dd396a..74522773e934 100644 --- a/drivers/media/video/mx3_camera.c +++ b/drivers/media/video/mx3_camera.c | |||
@@ -287,7 +287,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb) | |||
287 | sg_dma_len(sg) = new_size; | 287 | sg_dma_len(sg) = new_size; |
288 | 288 | ||
289 | txd = ichan->dma_chan.device->device_prep_slave_sg( | 289 | txd = ichan->dma_chan.device->device_prep_slave_sg( |
290 | &ichan->dma_chan, sg, 1, DMA_FROM_DEVICE, | 290 | &ichan->dma_chan, sg, 1, DMA_DEV_TO_MEM, |
291 | DMA_PREP_INTERRUPT); | 291 | DMA_PREP_INTERRUPT); |
292 | if (!txd) | 292 | if (!txd) |
293 | goto error; | 293 | goto error; |
diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c index 0a2d75f04066..4ed1c7c28ae7 100644 --- a/drivers/media/video/timblogiw.c +++ b/drivers/media/video/timblogiw.c | |||
@@ -565,7 +565,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) | |||
565 | spin_unlock_irq(&fh->queue_lock); | 565 | spin_unlock_irq(&fh->queue_lock); |
566 | 566 | ||
567 | desc = fh->chan->device->device_prep_slave_sg(fh->chan, | 567 | desc = fh->chan->device->device_prep_slave_sg(fh->chan, |
568 | buf->sg, sg_elems, DMA_FROM_DEVICE, | 568 | buf->sg, sg_elems, DMA_DEV_TO_MEM, |
569 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); | 569 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); |
570 | if (!desc) { | 570 | if (!desc) { |
571 | spin_lock_irq(&fh->queue_lock); | 571 | spin_lock_irq(&fh->queue_lock); |
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c index eb5cd28bc6d8..a2d25e4857e3 100644 --- a/drivers/misc/carma/carma-fpga-program.c +++ b/drivers/misc/carma/carma-fpga-program.c | |||
@@ -513,7 +513,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv) | |||
513 | * transaction, and then put it under external control | 513 | * transaction, and then put it under external control |
514 | */ | 514 | */ |
515 | memset(&config, 0, sizeof(config)); | 515 | memset(&config, 0, sizeof(config)); |
516 | config.direction = DMA_TO_DEVICE; | 516 | config.direction = DMA_MEM_TO_DEV; |
517 | config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | 517 | config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
518 | config.dst_maxburst = fpga_fifo_size(priv->regs) / 2 / 4; | 518 | config.dst_maxburst = fpga_fifo_size(priv->regs) / 2 / 4; |
519 | ret = chan->device->device_control(chan, DMA_SLAVE_CONFIG, | 519 | ret = chan->device->device_control(chan, DMA_SLAVE_CONFIG, |
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index a7ee50271465..fcfe1eb5acc8 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c | |||
@@ -823,6 +823,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) | |||
823 | struct scatterlist *sg; | 823 | struct scatterlist *sg; |
824 | unsigned int i; | 824 | unsigned int i; |
825 | enum dma_data_direction direction; | 825 | enum dma_data_direction direction; |
826 | enum dma_transfer_direction slave_dirn; | ||
826 | unsigned int sglen; | 827 | unsigned int sglen; |
827 | u32 iflags; | 828 | u32 iflags; |
828 | 829 | ||
@@ -860,16 +861,19 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) | |||
860 | if (host->caps.has_dma) | 861 | if (host->caps.has_dma) |
861 | atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN); | 862 | atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN); |
862 | 863 | ||
863 | if (data->flags & MMC_DATA_READ) | 864 | if (data->flags & MMC_DATA_READ) { |
864 | direction = DMA_FROM_DEVICE; | 865 | direction = DMA_FROM_DEVICE; |
865 | else | 866 | slave_dirn = DMA_DEV_TO_MEM; |
867 | } else { | ||
866 | direction = DMA_TO_DEVICE; | 868 | direction = DMA_TO_DEVICE; |
869 | slave_dirn = DMA_MEM_TO_DEV; | ||
870 | } | ||
867 | 871 | ||
868 | sglen = dma_map_sg(chan->device->dev, data->sg, | 872 | sglen = dma_map_sg(chan->device->dev, data->sg, |
869 | data->sg_len, direction); | 873 | data->sg_len, direction); |
870 | 874 | ||
871 | desc = chan->device->device_prep_slave_sg(chan, | 875 | desc = chan->device->device_prep_slave_sg(chan, |
872 | data->sg, sglen, direction, | 876 | data->sg, sglen, slave_dirn, |
873 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 877 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
874 | if (!desc) | 878 | if (!desc) |
875 | goto unmap_exit; | 879 | goto unmap_exit; |
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index ece03b491c7d..0d955ffaf44e 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c | |||
@@ -374,6 +374,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, | |||
374 | struct dma_chan *chan; | 374 | struct dma_chan *chan; |
375 | struct dma_device *device; | 375 | struct dma_device *device; |
376 | struct dma_async_tx_descriptor *desc; | 376 | struct dma_async_tx_descriptor *desc; |
377 | enum dma_data_direction buffer_dirn; | ||
377 | int nr_sg; | 378 | int nr_sg; |
378 | 379 | ||
379 | /* Check if next job is already prepared */ | 380 | /* Check if next job is already prepared */ |
@@ -387,10 +388,12 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, | |||
387 | } | 388 | } |
388 | 389 | ||
389 | if (data->flags & MMC_DATA_READ) { | 390 | if (data->flags & MMC_DATA_READ) { |
390 | conf.direction = DMA_FROM_DEVICE; | 391 | conf.direction = DMA_DEV_TO_MEM; |
392 | buffer_dirn = DMA_FROM_DEVICE; | ||
391 | chan = host->dma_rx_channel; | 393 | chan = host->dma_rx_channel; |
392 | } else { | 394 | } else { |
393 | conf.direction = DMA_TO_DEVICE; | 395 | conf.direction = DMA_MEM_TO_DEV; |
396 | buffer_dirn = DMA_TO_DEVICE; | ||
394 | chan = host->dma_tx_channel; | 397 | chan = host->dma_tx_channel; |
395 | } | 398 | } |
396 | 399 | ||
@@ -403,7 +406,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, | |||
403 | return -EINVAL; | 406 | return -EINVAL; |
404 | 407 | ||
405 | device = chan->device; | 408 | device = chan->device; |
406 | nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction); | 409 | nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn); |
407 | if (nr_sg == 0) | 410 | if (nr_sg == 0) |
408 | return -EINVAL; | 411 | return -EINVAL; |
409 | 412 | ||
@@ -426,7 +429,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, | |||
426 | unmap_exit: | 429 | unmap_exit: |
427 | if (!next) | 430 | if (!next) |
428 | dmaengine_terminate_all(chan); | 431 | dmaengine_terminate_all(chan); |
429 | dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction); | 432 | dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); |
430 | return -ENOMEM; | 433 | return -ENOMEM; |
431 | } | 434 | } |
432 | 435 | ||
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index 7088b40f9579..4184b7946bbf 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c | |||
@@ -218,6 +218,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) | |||
218 | unsigned int blksz = data->blksz; | 218 | unsigned int blksz = data->blksz; |
219 | unsigned int datasize = nob * blksz; | 219 | unsigned int datasize = nob * blksz; |
220 | struct scatterlist *sg; | 220 | struct scatterlist *sg; |
221 | enum dma_transfer_direction slave_dirn; | ||
221 | int i, nents; | 222 | int i, nents; |
222 | 223 | ||
223 | if (data->flags & MMC_DATA_STREAM) | 224 | if (data->flags & MMC_DATA_STREAM) |
@@ -240,10 +241,13 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) | |||
240 | } | 241 | } |
241 | } | 242 | } |
242 | 243 | ||
243 | if (data->flags & MMC_DATA_READ) | 244 | if (data->flags & MMC_DATA_READ) { |
244 | host->dma_dir = DMA_FROM_DEVICE; | 245 | host->dma_dir = DMA_FROM_DEVICE; |
245 | else | 246 | slave_dirn = DMA_DEV_TO_MEM; |
247 | } else { | ||
246 | host->dma_dir = DMA_TO_DEVICE; | 248 | host->dma_dir = DMA_TO_DEVICE; |
249 | slave_dirn = DMA_MEM_TO_DEV; | ||
250 | } | ||
247 | 251 | ||
248 | nents = dma_map_sg(host->dma->device->dev, data->sg, | 252 | nents = dma_map_sg(host->dma->device->dev, data->sg, |
249 | data->sg_len, host->dma_dir); | 253 | data->sg_len, host->dma_dir); |
@@ -251,7 +255,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) | |||
251 | return -EINVAL; | 255 | return -EINVAL; |
252 | 256 | ||
253 | host->desc = host->dma->device->device_prep_slave_sg(host->dma, | 257 | host->desc = host->dma->device->device_prep_slave_sg(host->dma, |
254 | data->sg, data->sg_len, host->dma_dir, | 258 | data->sg, data->sg_len, slave_dirn, |
255 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 259 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
256 | 260 | ||
257 | if (!host->desc) { | 261 | if (!host->desc) { |
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index 4e2e019dd5c9..382c835d217c 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c | |||
@@ -154,6 +154,7 @@ struct mxs_mmc_host { | |||
154 | struct dma_chan *dmach; | 154 | struct dma_chan *dmach; |
155 | struct mxs_dma_data dma_data; | 155 | struct mxs_dma_data dma_data; |
156 | unsigned int dma_dir; | 156 | unsigned int dma_dir; |
157 | enum dma_transfer_direction slave_dirn; | ||
157 | u32 ssp_pio_words[SSP_PIO_NUM]; | 158 | u32 ssp_pio_words[SSP_PIO_NUM]; |
158 | 159 | ||
159 | unsigned int version; | 160 | unsigned int version; |
@@ -324,7 +325,7 @@ static struct dma_async_tx_descriptor *mxs_mmc_prep_dma( | |||
324 | } | 325 | } |
325 | 326 | ||
326 | desc = host->dmach->device->device_prep_slave_sg(host->dmach, | 327 | desc = host->dmach->device->device_prep_slave_sg(host->dmach, |
327 | sgl, sg_len, host->dma_dir, append); | 328 | sgl, sg_len, host->slave_dirn, append); |
328 | if (desc) { | 329 | if (desc) { |
329 | desc->callback = mxs_mmc_dma_irq_callback; | 330 | desc->callback = mxs_mmc_dma_irq_callback; |
330 | desc->callback_param = host; | 331 | desc->callback_param = host; |
@@ -356,6 +357,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host) | |||
356 | host->ssp_pio_words[1] = cmd0; | 357 | host->ssp_pio_words[1] = cmd0; |
357 | host->ssp_pio_words[2] = cmd1; | 358 | host->ssp_pio_words[2] = cmd1; |
358 | host->dma_dir = DMA_NONE; | 359 | host->dma_dir = DMA_NONE; |
360 | host->slave_dirn = DMA_TRANS_NONE; | ||
359 | desc = mxs_mmc_prep_dma(host, 0); | 361 | desc = mxs_mmc_prep_dma(host, 0); |
360 | if (!desc) | 362 | if (!desc) |
361 | goto out; | 363 | goto out; |
@@ -395,6 +397,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host) | |||
395 | host->ssp_pio_words[1] = cmd0; | 397 | host->ssp_pio_words[1] = cmd0; |
396 | host->ssp_pio_words[2] = cmd1; | 398 | host->ssp_pio_words[2] = cmd1; |
397 | host->dma_dir = DMA_NONE; | 399 | host->dma_dir = DMA_NONE; |
400 | host->slave_dirn = DMA_TRANS_NONE; | ||
398 | desc = mxs_mmc_prep_dma(host, 0); | 401 | desc = mxs_mmc_prep_dma(host, 0); |
399 | if (!desc) | 402 | if (!desc) |
400 | goto out; | 403 | goto out; |
@@ -433,6 +436,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) | |||
433 | int i; | 436 | int i; |
434 | 437 | ||
435 | unsigned short dma_data_dir, timeout; | 438 | unsigned short dma_data_dir, timeout; |
439 | enum dma_transfer_direction slave_dirn; | ||
436 | unsigned int data_size = 0, log2_blksz; | 440 | unsigned int data_size = 0, log2_blksz; |
437 | unsigned int blocks = data->blocks; | 441 | unsigned int blocks = data->blocks; |
438 | 442 | ||
@@ -448,9 +452,11 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) | |||
448 | 452 | ||
449 | if (data->flags & MMC_DATA_WRITE) { | 453 | if (data->flags & MMC_DATA_WRITE) { |
450 | dma_data_dir = DMA_TO_DEVICE; | 454 | dma_data_dir = DMA_TO_DEVICE; |
455 | slave_dirn = DMA_MEM_TO_DEV; | ||
451 | read = 0; | 456 | read = 0; |
452 | } else { | 457 | } else { |
453 | dma_data_dir = DMA_FROM_DEVICE; | 458 | dma_data_dir = DMA_FROM_DEVICE; |
459 | slave_dirn = DMA_DEV_TO_MEM; | ||
454 | read = BM_SSP_CTRL0_READ; | 460 | read = BM_SSP_CTRL0_READ; |
455 | } | 461 | } |
456 | 462 | ||
@@ -510,6 +516,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) | |||
510 | host->ssp_pio_words[1] = cmd0; | 516 | host->ssp_pio_words[1] = cmd0; |
511 | host->ssp_pio_words[2] = cmd1; | 517 | host->ssp_pio_words[2] = cmd1; |
512 | host->dma_dir = DMA_NONE; | 518 | host->dma_dir = DMA_NONE; |
519 | host->slave_dirn = DMA_TRANS_NONE; | ||
513 | desc = mxs_mmc_prep_dma(host, 0); | 520 | desc = mxs_mmc_prep_dma(host, 0); |
514 | if (!desc) | 521 | if (!desc) |
515 | goto out; | 522 | goto out; |
@@ -518,6 +525,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) | |||
518 | WARN_ON(host->data != NULL); | 525 | WARN_ON(host->data != NULL); |
519 | host->data = data; | 526 | host->data = data; |
520 | host->dma_dir = dma_data_dir; | 527 | host->dma_dir = dma_data_dir; |
528 | host->slave_dirn = slave_dirn; | ||
521 | desc = mxs_mmc_prep_dma(host, 1); | 529 | desc = mxs_mmc_prep_dma(host, 1); |
522 | if (!desc) | 530 | if (!desc) |
523 | goto out; | 531 | goto out; |
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 4a2c5b2355f2..f5d8b53be333 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c | |||
@@ -286,7 +286,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host) | |||
286 | if (ret > 0) { | 286 | if (ret > 0) { |
287 | host->dma_active = true; | 287 | host->dma_active = true; |
288 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | 288 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, |
289 | DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 289 | DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
290 | } | 290 | } |
291 | 291 | ||
292 | if (desc) { | 292 | if (desc) { |
@@ -335,7 +335,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) | |||
335 | if (ret > 0) { | 335 | if (ret > 0) { |
336 | host->dma_active = true; | 336 | host->dma_active = true; |
337 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | 337 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, |
338 | DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 338 | DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
339 | } | 339 | } |
340 | 340 | ||
341 | if (desc) { | 341 | if (desc) { |
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c index 86f259cdfcbc..7a6e6cc8f8b8 100644 --- a/drivers/mmc/host/tmio_mmc_dma.c +++ b/drivers/mmc/host/tmio_mmc_dma.c | |||
@@ -77,7 +77,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) | |||
77 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); | 77 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); |
78 | if (ret > 0) | 78 | if (ret > 0) |
79 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | 79 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, |
80 | DMA_FROM_DEVICE, DMA_CTRL_ACK); | 80 | DMA_DEV_TO_MEM, DMA_CTRL_ACK); |
81 | 81 | ||
82 | if (desc) { | 82 | if (desc) { |
83 | cookie = dmaengine_submit(desc); | 83 | cookie = dmaengine_submit(desc); |
@@ -158,7 +158,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) | |||
158 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); | 158 | ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); |
159 | if (ret > 0) | 159 | if (ret > 0) |
160 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, | 160 | desc = chan->device->device_prep_slave_sg(chan, sg, ret, |
161 | DMA_TO_DEVICE, DMA_CTRL_ACK); | 161 | DMA_MEM_TO_DEV, DMA_CTRL_ACK); |
162 | 162 | ||
163 | if (desc) { | 163 | if (desc) { |
164 | cookie = dmaengine_submit(desc); | 164 | cookie = dmaengine_submit(desc); |
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c index 2a56fc6f399a..7f680420bfab 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c | |||
@@ -827,7 +827,7 @@ int gpmi_send_command(struct gpmi_nand_data *this) | |||
827 | pio[1] = pio[2] = 0; | 827 | pio[1] = pio[2] = 0; |
828 | desc = channel->device->device_prep_slave_sg(channel, | 828 | desc = channel->device->device_prep_slave_sg(channel, |
829 | (struct scatterlist *)pio, | 829 | (struct scatterlist *)pio, |
830 | ARRAY_SIZE(pio), DMA_NONE, 0); | 830 | ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); |
831 | if (!desc) { | 831 | if (!desc) { |
832 | pr_err("step 1 error\n"); | 832 | pr_err("step 1 error\n"); |
833 | return -1; | 833 | return -1; |
@@ -839,7 +839,7 @@ int gpmi_send_command(struct gpmi_nand_data *this) | |||
839 | sg_init_one(sgl, this->cmd_buffer, this->command_length); | 839 | sg_init_one(sgl, this->cmd_buffer, this->command_length); |
840 | dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE); | 840 | dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE); |
841 | desc = channel->device->device_prep_slave_sg(channel, | 841 | desc = channel->device->device_prep_slave_sg(channel, |
842 | sgl, 1, DMA_TO_DEVICE, 1); | 842 | sgl, 1, DMA_MEM_TO_DEV, 1); |
843 | if (!desc) { | 843 | if (!desc) { |
844 | pr_err("step 2 error\n"); | 844 | pr_err("step 2 error\n"); |
845 | return -1; | 845 | return -1; |
@@ -872,7 +872,7 @@ int gpmi_send_data(struct gpmi_nand_data *this) | |||
872 | pio[1] = 0; | 872 | pio[1] = 0; |
873 | desc = channel->device->device_prep_slave_sg(channel, | 873 | desc = channel->device->device_prep_slave_sg(channel, |
874 | (struct scatterlist *)pio, | 874 | (struct scatterlist *)pio, |
875 | ARRAY_SIZE(pio), DMA_NONE, 0); | 875 | ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); |
876 | if (!desc) { | 876 | if (!desc) { |
877 | pr_err("step 1 error\n"); | 877 | pr_err("step 1 error\n"); |
878 | return -1; | 878 | return -1; |
@@ -881,7 +881,7 @@ int gpmi_send_data(struct gpmi_nand_data *this) | |||
881 | /* [2] send DMA request */ | 881 | /* [2] send DMA request */ |
882 | prepare_data_dma(this, DMA_TO_DEVICE); | 882 | prepare_data_dma(this, DMA_TO_DEVICE); |
883 | desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl, | 883 | desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl, |
884 | 1, DMA_TO_DEVICE, 1); | 884 | 1, DMA_MEM_TO_DEV, 1); |
885 | if (!desc) { | 885 | if (!desc) { |
886 | pr_err("step 2 error\n"); | 886 | pr_err("step 2 error\n"); |
887 | return -1; | 887 | return -1; |
@@ -908,7 +908,7 @@ int gpmi_read_data(struct gpmi_nand_data *this) | |||
908 | pio[1] = 0; | 908 | pio[1] = 0; |
909 | desc = channel->device->device_prep_slave_sg(channel, | 909 | desc = channel->device->device_prep_slave_sg(channel, |
910 | (struct scatterlist *)pio, | 910 | (struct scatterlist *)pio, |
911 | ARRAY_SIZE(pio), DMA_NONE, 0); | 911 | ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); |
912 | if (!desc) { | 912 | if (!desc) { |
913 | pr_err("step 1 error\n"); | 913 | pr_err("step 1 error\n"); |
914 | return -1; | 914 | return -1; |
@@ -917,7 +917,7 @@ int gpmi_read_data(struct gpmi_nand_data *this) | |||
917 | /* [2] : send DMA request */ | 917 | /* [2] : send DMA request */ |
918 | prepare_data_dma(this, DMA_FROM_DEVICE); | 918 | prepare_data_dma(this, DMA_FROM_DEVICE); |
919 | desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl, | 919 | desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl, |
920 | 1, DMA_FROM_DEVICE, 1); | 920 | 1, DMA_DEV_TO_MEM, 1); |
921 | if (!desc) { | 921 | if (!desc) { |
922 | pr_err("step 2 error\n"); | 922 | pr_err("step 2 error\n"); |
923 | return -1; | 923 | return -1; |
@@ -964,7 +964,7 @@ int gpmi_send_page(struct gpmi_nand_data *this, | |||
964 | 964 | ||
965 | desc = channel->device->device_prep_slave_sg(channel, | 965 | desc = channel->device->device_prep_slave_sg(channel, |
966 | (struct scatterlist *)pio, | 966 | (struct scatterlist *)pio, |
967 | ARRAY_SIZE(pio), DMA_NONE, 0); | 967 | ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); |
968 | if (!desc) { | 968 | if (!desc) { |
969 | pr_err("step 2 error\n"); | 969 | pr_err("step 2 error\n"); |
970 | return -1; | 970 | return -1; |
@@ -998,7 +998,8 @@ int gpmi_read_page(struct gpmi_nand_data *this, | |||
998 | | BF_GPMI_CTRL0_XFER_COUNT(0); | 998 | | BF_GPMI_CTRL0_XFER_COUNT(0); |
999 | pio[1] = 0; | 999 | pio[1] = 0; |
1000 | desc = channel->device->device_prep_slave_sg(channel, | 1000 | desc = channel->device->device_prep_slave_sg(channel, |
1001 | (struct scatterlist *)pio, 2, DMA_NONE, 0); | 1001 | (struct scatterlist *)pio, 2, |
1002 | DMA_TRANS_NONE, 0); | ||
1002 | if (!desc) { | 1003 | if (!desc) { |
1003 | pr_err("step 1 error\n"); | 1004 | pr_err("step 1 error\n"); |
1004 | return -1; | 1005 | return -1; |
@@ -1027,7 +1028,7 @@ int gpmi_read_page(struct gpmi_nand_data *this, | |||
1027 | pio[5] = auxiliary; | 1028 | pio[5] = auxiliary; |
1028 | desc = channel->device->device_prep_slave_sg(channel, | 1029 | desc = channel->device->device_prep_slave_sg(channel, |
1029 | (struct scatterlist *)pio, | 1030 | (struct scatterlist *)pio, |
1030 | ARRAY_SIZE(pio), DMA_NONE, 1); | 1031 | ARRAY_SIZE(pio), DMA_TRANS_NONE, 1); |
1031 | if (!desc) { | 1032 | if (!desc) { |
1032 | pr_err("step 2 error\n"); | 1033 | pr_err("step 2 error\n"); |
1033 | return -1; | 1034 | return -1; |
@@ -1045,7 +1046,8 @@ int gpmi_read_page(struct gpmi_nand_data *this, | |||
1045 | | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size); | 1046 | | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size); |
1046 | pio[1] = 0; | 1047 | pio[1] = 0; |
1047 | desc = channel->device->device_prep_slave_sg(channel, | 1048 | desc = channel->device->device_prep_slave_sg(channel, |
1048 | (struct scatterlist *)pio, 2, DMA_NONE, 1); | 1049 | (struct scatterlist *)pio, 2, |
1050 | DMA_TRANS_NONE, 1); | ||
1049 | if (!desc) { | 1051 | if (!desc) { |
1050 | pr_err("step 3 error\n"); | 1052 | pr_err("step 3 error\n"); |
1051 | return -1; | 1053 | return -1; |
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index 75ec87a822b8..0a85690a1321 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c | |||
@@ -459,7 +459,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev) | |||
459 | sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; | 459 | sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4; |
460 | 460 | ||
461 | ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan, | 461 | ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan, |
462 | &ctl->sg, 1, DMA_TO_DEVICE, | 462 | &ctl->sg, 1, DMA_MEM_TO_DEV, |
463 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); | 463 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); |
464 | if (!ctl->adesc) | 464 | if (!ctl->adesc) |
465 | return NETDEV_TX_BUSY; | 465 | return NETDEV_TX_BUSY; |
@@ -571,7 +571,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev) | |||
571 | sg_dma_len(sg) = DMA_BUFFER_SIZE; | 571 | sg_dma_len(sg) = DMA_BUFFER_SIZE; |
572 | 572 | ||
573 | ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan, | 573 | ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan, |
574 | sg, 1, DMA_FROM_DEVICE, | 574 | sg, 1, DMA_DEV_TO_MEM, |
575 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); | 575 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP); |
576 | 576 | ||
577 | if (!ctl->adesc) | 577 | if (!ctl->adesc) |
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index e743a45ee92c..8418eb036651 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c | |||
@@ -131,7 +131,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) | |||
131 | rxchan = dws->rxchan; | 131 | rxchan = dws->rxchan; |
132 | 132 | ||
133 | /* 2. Prepare the TX dma transfer */ | 133 | /* 2. Prepare the TX dma transfer */ |
134 | txconf.direction = DMA_TO_DEVICE; | 134 | txconf.direction = DMA_MEM_TO_DEV; |
135 | txconf.dst_addr = dws->dma_addr; | 135 | txconf.dst_addr = dws->dma_addr; |
136 | txconf.dst_maxburst = LNW_DMA_MSIZE_16; | 136 | txconf.dst_maxburst = LNW_DMA_MSIZE_16; |
137 | txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | 137 | txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
@@ -147,13 +147,13 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) | |||
147 | txdesc = txchan->device->device_prep_slave_sg(txchan, | 147 | txdesc = txchan->device->device_prep_slave_sg(txchan, |
148 | &dws->tx_sgl, | 148 | &dws->tx_sgl, |
149 | 1, | 149 | 1, |
150 | DMA_TO_DEVICE, | 150 | DMA_MEM_TO_DEV, |
151 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); | 151 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); |
152 | txdesc->callback = dw_spi_dma_done; | 152 | txdesc->callback = dw_spi_dma_done; |
153 | txdesc->callback_param = dws; | 153 | txdesc->callback_param = dws; |
154 | 154 | ||
155 | /* 3. Prepare the RX dma transfer */ | 155 | /* 3. Prepare the RX dma transfer */ |
156 | rxconf.direction = DMA_FROM_DEVICE; | 156 | rxconf.direction = DMA_DEV_TO_MEM; |
157 | rxconf.src_addr = dws->dma_addr; | 157 | rxconf.src_addr = dws->dma_addr; |
158 | rxconf.src_maxburst = LNW_DMA_MSIZE_16; | 158 | rxconf.src_maxburst = LNW_DMA_MSIZE_16; |
159 | rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | 159 | rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; |
@@ -169,7 +169,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) | |||
169 | rxdesc = rxchan->device->device_prep_slave_sg(rxchan, | 169 | rxdesc = rxchan->device->device_prep_slave_sg(rxchan, |
170 | &dws->rx_sgl, | 170 | &dws->rx_sgl, |
171 | 1, | 171 | 1, |
172 | DMA_FROM_DEVICE, | 172 | DMA_DEV_TO_MEM, |
173 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); | 173 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); |
174 | rxdesc->callback = dw_spi_dma_done; | 174 | rxdesc->callback = dw_spi_dma_done; |
175 | rxdesc->callback_param = dws; | 175 | rxdesc->callback_param = dws; |
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c index 0a282e5fcc9c..d46e55c720b7 100644 --- a/drivers/spi/spi-ep93xx.c +++ b/drivers/spi/spi-ep93xx.c | |||
@@ -551,6 +551,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) | |||
551 | struct dma_async_tx_descriptor *txd; | 551 | struct dma_async_tx_descriptor *txd; |
552 | enum dma_slave_buswidth buswidth; | 552 | enum dma_slave_buswidth buswidth; |
553 | struct dma_slave_config conf; | 553 | struct dma_slave_config conf; |
554 | enum dma_transfer_direction slave_dirn; | ||
554 | struct scatterlist *sg; | 555 | struct scatterlist *sg; |
555 | struct sg_table *sgt; | 556 | struct sg_table *sgt; |
556 | struct dma_chan *chan; | 557 | struct dma_chan *chan; |
@@ -573,6 +574,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) | |||
573 | 574 | ||
574 | conf.src_addr = espi->sspdr_phys; | 575 | conf.src_addr = espi->sspdr_phys; |
575 | conf.src_addr_width = buswidth; | 576 | conf.src_addr_width = buswidth; |
577 | slave_dirn = DMA_DEV_TO_MEM; | ||
576 | } else { | 578 | } else { |
577 | chan = espi->dma_tx; | 579 | chan = espi->dma_tx; |
578 | buf = t->tx_buf; | 580 | buf = t->tx_buf; |
@@ -580,6 +582,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) | |||
580 | 582 | ||
581 | conf.dst_addr = espi->sspdr_phys; | 583 | conf.dst_addr = espi->sspdr_phys; |
582 | conf.dst_addr_width = buswidth; | 584 | conf.dst_addr_width = buswidth; |
585 | slave_dirn = DMA_MEM_TO_DEV; | ||
583 | } | 586 | } |
584 | 587 | ||
585 | ret = dmaengine_slave_config(chan, &conf); | 588 | ret = dmaengine_slave_config(chan, &conf); |
@@ -631,7 +634,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) | |||
631 | return ERR_PTR(-ENOMEM); | 634 | return ERR_PTR(-ENOMEM); |
632 | 635 | ||
633 | txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents, | 636 | txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents, |
634 | dir, DMA_CTRL_ACK); | 637 | slave_dirn, DMA_CTRL_ACK); |
635 | if (!txd) { | 638 | if (!txd) { |
636 | dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); | 639 | dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); |
637 | return ERR_PTR(-ENOMEM); | 640 | return ERR_PTR(-ENOMEM); |
@@ -979,7 +982,7 @@ static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi) | |||
979 | dma_cap_set(DMA_SLAVE, mask); | 982 | dma_cap_set(DMA_SLAVE, mask); |
980 | 983 | ||
981 | espi->dma_rx_data.port = EP93XX_DMA_SSP; | 984 | espi->dma_rx_data.port = EP93XX_DMA_SSP; |
982 | espi->dma_rx_data.direction = DMA_FROM_DEVICE; | 985 | espi->dma_rx_data.direction = DMA_DEV_TO_MEM; |
983 | espi->dma_rx_data.name = "ep93xx-spi-rx"; | 986 | espi->dma_rx_data.name = "ep93xx-spi-rx"; |
984 | 987 | ||
985 | espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter, | 988 | espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter, |
@@ -990,7 +993,7 @@ static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi) | |||
990 | } | 993 | } |
991 | 994 | ||
992 | espi->dma_tx_data.port = EP93XX_DMA_SSP; | 995 | espi->dma_tx_data.port = EP93XX_DMA_SSP; |
993 | espi->dma_tx_data.direction = DMA_TO_DEVICE; | 996 | espi->dma_tx_data.direction = DMA_MEM_TO_DEV; |
994 | espi->dma_tx_data.name = "ep93xx-spi-tx"; | 997 | espi->dma_tx_data.name = "ep93xx-spi-tx"; |
995 | 998 | ||
996 | espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter, | 999 | espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter, |
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index f1f5efbc3404..2f9cb43a2398 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c | |||
@@ -900,11 +900,11 @@ static int configure_dma(struct pl022 *pl022) | |||
900 | { | 900 | { |
901 | struct dma_slave_config rx_conf = { | 901 | struct dma_slave_config rx_conf = { |
902 | .src_addr = SSP_DR(pl022->phybase), | 902 | .src_addr = SSP_DR(pl022->phybase), |
903 | .direction = DMA_FROM_DEVICE, | 903 | .direction = DMA_DEV_TO_MEM, |
904 | }; | 904 | }; |
905 | struct dma_slave_config tx_conf = { | 905 | struct dma_slave_config tx_conf = { |
906 | .dst_addr = SSP_DR(pl022->phybase), | 906 | .dst_addr = SSP_DR(pl022->phybase), |
907 | .direction = DMA_TO_DEVICE, | 907 | .direction = DMA_MEM_TO_DEV, |
908 | }; | 908 | }; |
909 | unsigned int pages; | 909 | unsigned int pages; |
910 | int ret; | 910 | int ret; |
@@ -1041,7 +1041,7 @@ static int configure_dma(struct pl022 *pl022) | |||
1041 | rxdesc = rxchan->device->device_prep_slave_sg(rxchan, | 1041 | rxdesc = rxchan->device->device_prep_slave_sg(rxchan, |
1042 | pl022->sgt_rx.sgl, | 1042 | pl022->sgt_rx.sgl, |
1043 | rx_sglen, | 1043 | rx_sglen, |
1044 | DMA_FROM_DEVICE, | 1044 | DMA_DEV_TO_MEM, |
1045 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 1045 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
1046 | if (!rxdesc) | 1046 | if (!rxdesc) |
1047 | goto err_rxdesc; | 1047 | goto err_rxdesc; |
@@ -1049,7 +1049,7 @@ static int configure_dma(struct pl022 *pl022) | |||
1049 | txdesc = txchan->device->device_prep_slave_sg(txchan, | 1049 | txdesc = txchan->device->device_prep_slave_sg(txchan, |
1050 | pl022->sgt_tx.sgl, | 1050 | pl022->sgt_tx.sgl, |
1051 | tx_sglen, | 1051 | tx_sglen, |
1052 | DMA_TO_DEVICE, | 1052 | DMA_MEM_TO_DEV, |
1053 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 1053 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
1054 | if (!txdesc) | 1054 | if (!txdesc) |
1055 | goto err_txdesc; | 1055 | goto err_txdesc; |
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index 7086583b9107..2a6429d8c363 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c | |||
@@ -1079,7 +1079,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) | |||
1079 | } | 1079 | } |
1080 | sg = dma->sg_rx_p; | 1080 | sg = dma->sg_rx_p; |
1081 | desc_rx = dma->chan_rx->device->device_prep_slave_sg(dma->chan_rx, sg, | 1081 | desc_rx = dma->chan_rx->device->device_prep_slave_sg(dma->chan_rx, sg, |
1082 | num, DMA_FROM_DEVICE, | 1082 | num, DMA_DEV_TO_MEM, |
1083 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 1083 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
1084 | if (!desc_rx) { | 1084 | if (!desc_rx) { |
1085 | dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n", | 1085 | dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n", |
@@ -1124,7 +1124,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) | |||
1124 | } | 1124 | } |
1125 | sg = dma->sg_tx_p; | 1125 | sg = dma->sg_tx_p; |
1126 | desc_tx = dma->chan_tx->device->device_prep_slave_sg(dma->chan_tx, | 1126 | desc_tx = dma->chan_tx->device->device_prep_slave_sg(dma->chan_tx, |
1127 | sg, num, DMA_TO_DEVICE, | 1127 | sg, num, DMA_MEM_TO_DEV, |
1128 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 1128 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
1129 | if (!desc_tx) { | 1129 | if (!desc_tx) { |
1130 | dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n", | 1130 | dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n", |
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 6958594f2fc0..9ae024025ff3 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c | |||
@@ -268,7 +268,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap) | |||
268 | struct dma_slave_config tx_conf = { | 268 | struct dma_slave_config tx_conf = { |
269 | .dst_addr = uap->port.mapbase + UART01x_DR, | 269 | .dst_addr = uap->port.mapbase + UART01x_DR, |
270 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, | 270 | .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, |
271 | .direction = DMA_TO_DEVICE, | 271 | .direction = DMA_MEM_TO_DEV, |
272 | .dst_maxburst = uap->fifosize >> 1, | 272 | .dst_maxburst = uap->fifosize >> 1, |
273 | }; | 273 | }; |
274 | struct dma_chan *chan; | 274 | struct dma_chan *chan; |
@@ -301,7 +301,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap) | |||
301 | struct dma_slave_config rx_conf = { | 301 | struct dma_slave_config rx_conf = { |
302 | .src_addr = uap->port.mapbase + UART01x_DR, | 302 | .src_addr = uap->port.mapbase + UART01x_DR, |
303 | .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, | 303 | .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, |
304 | .direction = DMA_FROM_DEVICE, | 304 | .direction = DMA_DEV_TO_MEM, |
305 | .src_maxburst = uap->fifosize >> 1, | 305 | .src_maxburst = uap->fifosize >> 1, |
306 | }; | 306 | }; |
307 | 307 | ||
@@ -480,7 +480,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap) | |||
480 | return -EBUSY; | 480 | return -EBUSY; |
481 | } | 481 | } |
482 | 482 | ||
483 | desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_TO_DEVICE, | 483 | desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV, |
484 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 484 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
485 | if (!desc) { | 485 | if (!desc) { |
486 | dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE); | 486 | dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE); |
@@ -676,7 +676,7 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) | |||
676 | &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; | 676 | &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; |
677 | dma_dev = rxchan->device; | 677 | dma_dev = rxchan->device; |
678 | desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1, | 678 | desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1, |
679 | DMA_FROM_DEVICE, | 679 | DMA_DEV_TO_MEM, |
680 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 680 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
681 | /* | 681 | /* |
682 | * If the DMA engine is busy and cannot prepare a | 682 | * If the DMA engine is busy and cannot prepare a |
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index de0f613ed6f5..17ae65762d1a 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c | |||
@@ -764,7 +764,7 @@ static int dma_handle_rx(struct eg20t_port *priv) | |||
764 | sg_dma_address(sg) = priv->rx_buf_dma; | 764 | sg_dma_address(sg) = priv->rx_buf_dma; |
765 | 765 | ||
766 | desc = priv->chan_rx->device->device_prep_slave_sg(priv->chan_rx, | 766 | desc = priv->chan_rx->device->device_prep_slave_sg(priv->chan_rx, |
767 | sg, 1, DMA_FROM_DEVICE, | 767 | sg, 1, DMA_DEV_TO_MEM, |
768 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 768 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
769 | 769 | ||
770 | if (!desc) | 770 | if (!desc) |
@@ -923,7 +923,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv) | |||
923 | } | 923 | } |
924 | 924 | ||
925 | desc = priv->chan_tx->device->device_prep_slave_sg(priv->chan_tx, | 925 | desc = priv->chan_tx->device->device_prep_slave_sg(priv->chan_tx, |
926 | priv->sg_tx_p, nent, DMA_TO_DEVICE, | 926 | priv->sg_tx_p, nent, DMA_MEM_TO_DEV, |
927 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 927 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
928 | if (!desc) { | 928 | if (!desc) { |
929 | dev_err(priv->port.dev, "%s:device_prep_slave_sg Failed\n", | 929 | dev_err(priv->port.dev, "%s:device_prep_slave_sg Failed\n", |
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 9e62349b3d9f..75085795528e 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c | |||
@@ -1339,7 +1339,7 @@ static void sci_submit_rx(struct sci_port *s) | |||
1339 | struct dma_async_tx_descriptor *desc; | 1339 | struct dma_async_tx_descriptor *desc; |
1340 | 1340 | ||
1341 | desc = chan->device->device_prep_slave_sg(chan, | 1341 | desc = chan->device->device_prep_slave_sg(chan, |
1342 | sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT); | 1342 | sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); |
1343 | 1343 | ||
1344 | if (desc) { | 1344 | if (desc) { |
1345 | s->desc_rx[i] = desc; | 1345 | s->desc_rx[i] = desc; |
@@ -1454,7 +1454,7 @@ static void work_fn_tx(struct work_struct *work) | |||
1454 | BUG_ON(!sg_dma_len(sg)); | 1454 | BUG_ON(!sg_dma_len(sg)); |
1455 | 1455 | ||
1456 | desc = chan->device->device_prep_slave_sg(chan, | 1456 | desc = chan->device->device_prep_slave_sg(chan, |
1457 | sg, s->sg_len_tx, DMA_TO_DEVICE, | 1457 | sg, s->sg_len_tx, DMA_MEM_TO_DEV, |
1458 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 1458 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
1459 | if (!desc) { | 1459 | if (!desc) { |
1460 | /* switch to PIO */ | 1460 | /* switch to PIO */ |
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c index a163632877af..97cb45916c43 100644 --- a/drivers/usb/musb/ux500_dma.c +++ b/drivers/usb/musb/ux500_dma.c | |||
@@ -84,7 +84,7 @@ static bool ux500_configure_channel(struct dma_channel *channel, | |||
84 | struct musb_hw_ep *hw_ep = ux500_channel->hw_ep; | 84 | struct musb_hw_ep *hw_ep = ux500_channel->hw_ep; |
85 | struct dma_chan *dma_chan = ux500_channel->dma_chan; | 85 | struct dma_chan *dma_chan = ux500_channel->dma_chan; |
86 | struct dma_async_tx_descriptor *dma_desc; | 86 | struct dma_async_tx_descriptor *dma_desc; |
87 | enum dma_data_direction direction; | 87 | enum dma_transfer_direction direction; |
88 | struct scatterlist sg; | 88 | struct scatterlist sg; |
89 | struct dma_slave_config slave_conf; | 89 | struct dma_slave_config slave_conf; |
90 | enum dma_slave_buswidth addr_width; | 90 | enum dma_slave_buswidth addr_width; |
@@ -104,7 +104,7 @@ static bool ux500_configure_channel(struct dma_channel *channel, | |||
104 | sg_dma_address(&sg) = dma_addr; | 104 | sg_dma_address(&sg) = dma_addr; |
105 | sg_dma_len(&sg) = len; | 105 | sg_dma_len(&sg) = len; |
106 | 106 | ||
107 | direction = ux500_channel->is_tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE; | 107 | direction = ux500_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; |
108 | addr_width = (len & 0x3) ? DMA_SLAVE_BUSWIDTH_1_BYTE : | 108 | addr_width = (len & 0x3) ? DMA_SLAVE_BUSWIDTH_1_BYTE : |
109 | DMA_SLAVE_BUSWIDTH_4_BYTES; | 109 | DMA_SLAVE_BUSWIDTH_4_BYTES; |
110 | 110 | ||
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index b51fcd80d244..72339bd6fcab 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c | |||
@@ -772,10 +772,10 @@ static void usbhsf_dma_prepare_tasklet(unsigned long data) | |||
772 | struct dma_async_tx_descriptor *desc; | 772 | struct dma_async_tx_descriptor *desc; |
773 | struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt); | 773 | struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt); |
774 | struct device *dev = usbhs_priv_to_dev(priv); | 774 | struct device *dev = usbhs_priv_to_dev(priv); |
775 | enum dma_data_direction dir; | 775 | enum dma_transfer_direction dir; |
776 | dma_cookie_t cookie; | 776 | dma_cookie_t cookie; |
777 | 777 | ||
778 | dir = usbhs_pipe_is_dir_in(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | 778 | dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; |
779 | 779 | ||
780 | sg_init_table(&sg, 1); | 780 | sg_init_table(&sg, 1); |
781 | sg_set_page(&sg, virt_to_page(pkt->dma), | 781 | sg_set_page(&sg, virt_to_page(pkt->dma), |
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c index e3406ab31305..727a5149d818 100644 --- a/drivers/video/mx3fb.c +++ b/drivers/video/mx3fb.c | |||
@@ -245,6 +245,7 @@ struct mx3fb_data { | |||
245 | 245 | ||
246 | uint32_t h_start_width; | 246 | uint32_t h_start_width; |
247 | uint32_t v_start_width; | 247 | uint32_t v_start_width; |
248 | enum disp_data_mapping disp_data_fmt; | ||
248 | }; | 249 | }; |
249 | 250 | ||
250 | struct dma_chan_request { | 251 | struct dma_chan_request { |
@@ -287,11 +288,14 @@ static void mx3fb_write_reg(struct mx3fb_data *mx3fb, u32 value, unsigned long r | |||
287 | __raw_writel(value, mx3fb->reg_base + reg); | 288 | __raw_writel(value, mx3fb->reg_base + reg); |
288 | } | 289 | } |
289 | 290 | ||
290 | static const uint32_t di_mappings[] = { | 291 | struct di_mapping { |
291 | 0x1600AAAA, 0x00E05555, 0x00070000, 3, /* RGB888 */ | 292 | uint32_t b0, b1, b2; |
292 | 0x0005000F, 0x000B000F, 0x0011000F, 1, /* RGB666 */ | 293 | }; |
293 | 0x0011000F, 0x000B000F, 0x0005000F, 1, /* BGR666 */ | 294 | |
294 | 0x0004003F, 0x000A000F, 0x000F003F, 1 /* RGB565 */ | 295 | static const struct di_mapping di_mappings[] = { |
296 | [IPU_DISP_DATA_MAPPING_RGB666] = { 0x0005000f, 0x000b000f, 0x0011000f }, | ||
297 | [IPU_DISP_DATA_MAPPING_RGB565] = { 0x0004003f, 0x000a000f, 0x000f003f }, | ||
298 | [IPU_DISP_DATA_MAPPING_RGB888] = { 0x00070000, 0x000f0000, 0x00170000 }, | ||
295 | }; | 299 | }; |
296 | 300 | ||
297 | static void sdc_fb_init(struct mx3fb_info *fbi) | 301 | static void sdc_fb_init(struct mx3fb_info *fbi) |
@@ -334,7 +338,7 @@ static void sdc_enable_channel(struct mx3fb_info *mx3_fbi) | |||
334 | /* This enables the channel */ | 338 | /* This enables the channel */ |
335 | if (mx3_fbi->cookie < 0) { | 339 | if (mx3_fbi->cookie < 0) { |
336 | mx3_fbi->txd = dma_chan->device->device_prep_slave_sg(dma_chan, | 340 | mx3_fbi->txd = dma_chan->device->device_prep_slave_sg(dma_chan, |
337 | &mx3_fbi->sg[0], 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT); | 341 | &mx3_fbi->sg[0], 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); |
338 | if (!mx3_fbi->txd) { | 342 | if (!mx3_fbi->txd) { |
339 | dev_err(mx3fb->dev, "Cannot allocate descriptor on %d\n", | 343 | dev_err(mx3fb->dev, "Cannot allocate descriptor on %d\n", |
340 | dma_chan->chan_id); | 344 | dma_chan->chan_id); |
@@ -425,7 +429,6 @@ static int sdc_set_window_pos(struct mx3fb_data *mx3fb, enum ipu_channel channel | |||
425 | * @pixel_clk: desired pixel clock frequency in Hz. | 429 | * @pixel_clk: desired pixel clock frequency in Hz. |
426 | * @width: width of panel in pixels. | 430 | * @width: width of panel in pixels. |
427 | * @height: height of panel in pixels. | 431 | * @height: height of panel in pixels. |
428 | * @pixel_fmt: pixel format of buffer as FOURCC ASCII code. | ||
429 | * @h_start_width: number of pixel clocks between the HSYNC signal pulse | 432 | * @h_start_width: number of pixel clocks between the HSYNC signal pulse |
430 | * and the start of valid data. | 433 | * and the start of valid data. |
431 | * @h_sync_width: width of the HSYNC signal in units of pixel clocks. | 434 | * @h_sync_width: width of the HSYNC signal in units of pixel clocks. |
@@ -442,7 +445,6 @@ static int sdc_set_window_pos(struct mx3fb_data *mx3fb, enum ipu_channel channel | |||
442 | static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel, | 445 | static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel, |
443 | uint32_t pixel_clk, | 446 | uint32_t pixel_clk, |
444 | uint16_t width, uint16_t height, | 447 | uint16_t width, uint16_t height, |
445 | enum pixel_fmt pixel_fmt, | ||
446 | uint16_t h_start_width, uint16_t h_sync_width, | 448 | uint16_t h_start_width, uint16_t h_sync_width, |
447 | uint16_t h_end_width, uint16_t v_start_width, | 449 | uint16_t h_end_width, uint16_t v_start_width, |
448 | uint16_t v_sync_width, uint16_t v_end_width, | 450 | uint16_t v_sync_width, uint16_t v_end_width, |
@@ -453,6 +455,7 @@ static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel, | |||
453 | uint32_t old_conf; | 455 | uint32_t old_conf; |
454 | uint32_t div; | 456 | uint32_t div; |
455 | struct clk *ipu_clk; | 457 | struct clk *ipu_clk; |
458 | const struct di_mapping *map; | ||
456 | 459 | ||
457 | dev_dbg(mx3fb->dev, "panel size = %d x %d", width, height); | 460 | dev_dbg(mx3fb->dev, "panel size = %d x %d", width, height); |
458 | 461 | ||
@@ -540,36 +543,10 @@ static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel, | |||
540 | sig.Vsync_pol << DI_D3_VSYNC_POL_SHIFT; | 543 | sig.Vsync_pol << DI_D3_VSYNC_POL_SHIFT; |
541 | mx3fb_write_reg(mx3fb, old_conf, DI_DISP_SIG_POL); | 544 | mx3fb_write_reg(mx3fb, old_conf, DI_DISP_SIG_POL); |
542 | 545 | ||
543 | switch (pixel_fmt) { | 546 | map = &di_mappings[mx3fb->disp_data_fmt]; |
544 | case IPU_PIX_FMT_RGB24: | 547 | mx3fb_write_reg(mx3fb, map->b0, DI_DISP3_B0_MAP); |
545 | mx3fb_write_reg(mx3fb, di_mappings[0], DI_DISP3_B0_MAP); | 548 | mx3fb_write_reg(mx3fb, map->b1, DI_DISP3_B1_MAP); |
546 | mx3fb_write_reg(mx3fb, di_mappings[1], DI_DISP3_B1_MAP); | 549 | mx3fb_write_reg(mx3fb, map->b2, DI_DISP3_B2_MAP); |
547 | mx3fb_write_reg(mx3fb, di_mappings[2], DI_DISP3_B2_MAP); | ||
548 | mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) | | ||
549 | ((di_mappings[3] - 1) << 12), DI_DISP_ACC_CC); | ||
550 | break; | ||
551 | case IPU_PIX_FMT_RGB666: | ||
552 | mx3fb_write_reg(mx3fb, di_mappings[4], DI_DISP3_B0_MAP); | ||
553 | mx3fb_write_reg(mx3fb, di_mappings[5], DI_DISP3_B1_MAP); | ||
554 | mx3fb_write_reg(mx3fb, di_mappings[6], DI_DISP3_B2_MAP); | ||
555 | mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) | | ||
556 | ((di_mappings[7] - 1) << 12), DI_DISP_ACC_CC); | ||
557 | break; | ||
558 | case IPU_PIX_FMT_BGR666: | ||
559 | mx3fb_write_reg(mx3fb, di_mappings[8], DI_DISP3_B0_MAP); | ||
560 | mx3fb_write_reg(mx3fb, di_mappings[9], DI_DISP3_B1_MAP); | ||
561 | mx3fb_write_reg(mx3fb, di_mappings[10], DI_DISP3_B2_MAP); | ||
562 | mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) | | ||
563 | ((di_mappings[11] - 1) << 12), DI_DISP_ACC_CC); | ||
564 | break; | ||
565 | default: | ||
566 | mx3fb_write_reg(mx3fb, di_mappings[12], DI_DISP3_B0_MAP); | ||
567 | mx3fb_write_reg(mx3fb, di_mappings[13], DI_DISP3_B1_MAP); | ||
568 | mx3fb_write_reg(mx3fb, di_mappings[14], DI_DISP3_B2_MAP); | ||
569 | mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) | | ||
570 | ((di_mappings[15] - 1) << 12), DI_DISP_ACC_CC); | ||
571 | break; | ||
572 | } | ||
573 | 550 | ||
574 | spin_unlock_irqrestore(&mx3fb->lock, lock_flags); | 551 | spin_unlock_irqrestore(&mx3fb->lock, lock_flags); |
575 | 552 | ||
@@ -780,8 +757,6 @@ static int __set_par(struct fb_info *fbi, bool lock) | |||
780 | if (sdc_init_panel(mx3fb, mode, | 757 | if (sdc_init_panel(mx3fb, mode, |
781 | (PICOS2KHZ(fbi->var.pixclock)) * 1000UL, | 758 | (PICOS2KHZ(fbi->var.pixclock)) * 1000UL, |
782 | fbi->var.xres, fbi->var.yres, | 759 | fbi->var.xres, fbi->var.yres, |
783 | (fbi->var.sync & FB_SYNC_SWAP_RGB) ? | ||
784 | IPU_PIX_FMT_BGR666 : IPU_PIX_FMT_RGB666, | ||
785 | fbi->var.left_margin, | 760 | fbi->var.left_margin, |
786 | fbi->var.hsync_len, | 761 | fbi->var.hsync_len, |
787 | fbi->var.right_margin + | 762 | fbi->var.right_margin + |
@@ -1117,7 +1092,7 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var, | |||
1117 | async_tx_ack(mx3_fbi->txd); | 1092 | async_tx_ack(mx3_fbi->txd); |
1118 | 1093 | ||
1119 | txd = dma_chan->device->device_prep_slave_sg(dma_chan, sg + | 1094 | txd = dma_chan->device->device_prep_slave_sg(dma_chan, sg + |
1120 | mx3_fbi->cur_ipu_buf, 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT); | 1095 | mx3_fbi->cur_ipu_buf, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); |
1121 | if (!txd) { | 1096 | if (!txd) { |
1122 | dev_err(fbi->device, | 1097 | dev_err(fbi->device, |
1123 | "Error preparing a DMA transaction descriptor.\n"); | 1098 | "Error preparing a DMA transaction descriptor.\n"); |
@@ -1349,6 +1324,12 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan) | |||
1349 | const struct fb_videomode *mode; | 1324 | const struct fb_videomode *mode; |
1350 | int ret, num_modes; | 1325 | int ret, num_modes; |
1351 | 1326 | ||
1327 | if (mx3fb_pdata->disp_data_fmt >= ARRAY_SIZE(di_mappings)) { | ||
1328 | dev_err(dev, "Illegal display data format %d\n", | ||
1329 | mx3fb_pdata->disp_data_fmt); | ||
1330 | return -EINVAL; | ||
1331 | } | ||
1332 | |||
1352 | ichan->client = mx3fb; | 1333 | ichan->client = mx3fb; |
1353 | irq = ichan->eof_irq; | 1334 | irq = ichan->eof_irq; |
1354 | 1335 | ||
@@ -1402,6 +1383,8 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan) | |||
1402 | mx3fbi->mx3fb = mx3fb; | 1383 | mx3fbi->mx3fb = mx3fb; |
1403 | mx3fbi->blank = FB_BLANK_NORMAL; | 1384 | mx3fbi->blank = FB_BLANK_NORMAL; |
1404 | 1385 | ||
1386 | mx3fb->disp_data_fmt = mx3fb_pdata->disp_data_fmt; | ||
1387 | |||
1405 | init_completion(&mx3fbi->flip_cmpl); | 1388 | init_completion(&mx3fbi->flip_cmpl); |
1406 | disable_irq(ichan->eof_irq); | 1389 | disable_irq(ichan->eof_irq); |
1407 | dev_dbg(mx3fb->dev, "disabling irq %d\n", ichan->eof_irq); | 1390 | dev_dbg(mx3fb->dev, "disabling irq %d\n", ichan->eof_irq); |
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h index 9eabffbc4e50..033f6aa670de 100644 --- a/include/linux/amba/pl08x.h +++ b/include/linux/amba/pl08x.h | |||
@@ -134,7 +134,7 @@ struct pl08x_txd { | |||
134 | struct dma_async_tx_descriptor tx; | 134 | struct dma_async_tx_descriptor tx; |
135 | struct list_head node; | 135 | struct list_head node; |
136 | struct list_head dsg_list; | 136 | struct list_head dsg_list; |
137 | enum dma_data_direction direction; | 137 | enum dma_transfer_direction direction; |
138 | dma_addr_t llis_bus; | 138 | dma_addr_t llis_bus; |
139 | struct pl08x_lli *llis_va; | 139 | struct pl08x_lli *llis_va; |
140 | /* Default cctl value for LLIs */ | 140 | /* Default cctl value for LLIs */ |
@@ -197,7 +197,7 @@ struct pl08x_dma_chan { | |||
197 | dma_addr_t dst_addr; | 197 | dma_addr_t dst_addr; |
198 | u32 src_cctl; | 198 | u32 src_cctl; |
199 | u32 dst_cctl; | 199 | u32 dst_cctl; |
200 | enum dma_data_direction runtime_direction; | 200 | enum dma_transfer_direction runtime_direction; |
201 | dma_cookie_t lc; | 201 | dma_cookie_t lc; |
202 | struct list_head pend_list; | 202 | struct list_head pend_list; |
203 | struct pl08x_txd *at; | 203 | struct pl08x_txd *at; |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 75f53f874b24..679b349d9b66 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -23,7 +23,6 @@ | |||
23 | 23 | ||
24 | #include <linux/device.h> | 24 | #include <linux/device.h> |
25 | #include <linux/uio.h> | 25 | #include <linux/uio.h> |
26 | #include <linux/dma-direction.h> | ||
27 | #include <linux/scatterlist.h> | 26 | #include <linux/scatterlist.h> |
28 | #include <linux/bitmap.h> | 27 | #include <linux/bitmap.h> |
29 | #include <asm/page.h> | 28 | #include <asm/page.h> |
@@ -72,11 +71,93 @@ enum dma_transaction_type { | |||
72 | DMA_ASYNC_TX, | 71 | DMA_ASYNC_TX, |
73 | DMA_SLAVE, | 72 | DMA_SLAVE, |
74 | DMA_CYCLIC, | 73 | DMA_CYCLIC, |
74 | DMA_INTERLEAVE, | ||
75 | /* last transaction type for creation of the capabilities mask */ | ||
76 | DMA_TX_TYPE_END, | ||
75 | }; | 77 | }; |
76 | 78 | ||
77 | /* last transaction type for creation of the capabilities mask */ | 79 | /** |
78 | #define DMA_TX_TYPE_END (DMA_CYCLIC + 1) | 80 | * enum dma_transfer_direction - dma transfer mode and direction indicator |
81 | * @DMA_MEM_TO_MEM: Async/Memcpy mode | ||
82 | * @DMA_MEM_TO_DEV: Slave mode & From Memory to Device | ||
83 | * @DMA_DEV_TO_MEM: Slave mode & From Device to Memory | ||
84 | * @DMA_DEV_TO_DEV: Slave mode & From Device to Device | ||
85 | */ | ||
86 | enum dma_transfer_direction { | ||
87 | DMA_MEM_TO_MEM, | ||
88 | DMA_MEM_TO_DEV, | ||
89 | DMA_DEV_TO_MEM, | ||
90 | DMA_DEV_TO_DEV, | ||
91 | DMA_TRANS_NONE, | ||
92 | }; | ||
93 | |||
94 | /** | ||
95 | * Interleaved Transfer Request | ||
96 | * ---------------------------- | ||
97 | * A chunk is collection of contiguous bytes to be transfered. | ||
98 | * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG). | ||
99 | * ICGs may or maynot change between chunks. | ||
100 | * A FRAME is the smallest series of contiguous {chunk,icg} pairs, | ||
101 | * that when repeated an integral number of times, specifies the transfer. | ||
102 | * A transfer template is specification of a Frame, the number of times | ||
103 | * it is to be repeated and other per-transfer attributes. | ||
104 | * | ||
105 | * Practically, a client driver would have ready a template for each | ||
106 | * type of transfer it is going to need during its lifetime and | ||
107 | * set only 'src_start' and 'dst_start' before submitting the requests. | ||
108 | * | ||
109 | * | ||
110 | * | Frame-1 | Frame-2 | ~ | Frame-'numf' | | ||
111 | * |====....==.===...=...|====....==.===...=...| ~ |====....==.===...=...| | ||
112 | * | ||
113 | * == Chunk size | ||
114 | * ... ICG | ||
115 | */ | ||
116 | |||
117 | /** | ||
118 | * struct data_chunk - Element of scatter-gather list that makes a frame. | ||
119 | * @size: Number of bytes to read from source. | ||
120 | * size_dst := fn(op, size_src), so doesn't mean much for destination. | ||
121 | * @icg: Number of bytes to jump after last src/dst address of this | ||
122 | * chunk and before first src/dst address for next chunk. | ||
123 | * Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false. | ||
124 | * Ignored for src(assumed 0), if src_inc is true and src_sgl is false. | ||
125 | */ | ||
126 | struct data_chunk { | ||
127 | size_t size; | ||
128 | size_t icg; | ||
129 | }; | ||
79 | 130 | ||
131 | /** | ||
132 | * struct dma_interleaved_template - Template to convey DMAC the transfer pattern | ||
133 | * and attributes. | ||
134 | * @src_start: Bus address of source for the first chunk. | ||
135 | * @dst_start: Bus address of destination for the first chunk. | ||
136 | * @dir: Specifies the type of Source and Destination. | ||
137 | * @src_inc: If the source address increments after reading from it. | ||
138 | * @dst_inc: If the destination address increments after writing to it. | ||
139 | * @src_sgl: If the 'icg' of sgl[] applies to Source (scattered read). | ||
140 | * Otherwise, source is read contiguously (icg ignored). | ||
141 | * Ignored if src_inc is false. | ||
142 | * @dst_sgl: If the 'icg' of sgl[] applies to Destination (scattered write). | ||
143 | * Otherwise, destination is filled contiguously (icg ignored). | ||
144 | * Ignored if dst_inc is false. | ||
145 | * @numf: Number of frames in this template. | ||
146 | * @frame_size: Number of chunks in a frame i.e, size of sgl[]. | ||
147 | * @sgl: Array of {chunk,icg} pairs that make up a frame. | ||
148 | */ | ||
149 | struct dma_interleaved_template { | ||
150 | dma_addr_t src_start; | ||
151 | dma_addr_t dst_start; | ||
152 | enum dma_transfer_direction dir; | ||
153 | bool src_inc; | ||
154 | bool dst_inc; | ||
155 | bool src_sgl; | ||
156 | bool dst_sgl; | ||
157 | size_t numf; | ||
158 | size_t frame_size; | ||
159 | struct data_chunk sgl[0]; | ||
160 | }; | ||
80 | 161 | ||
81 | /** | 162 | /** |
82 | * enum dma_ctrl_flags - DMA flags to augment operation preparation, | 163 | * enum dma_ctrl_flags - DMA flags to augment operation preparation, |
@@ -269,7 +350,7 @@ enum dma_slave_buswidth { | |||
269 | * struct, if applicable. | 350 | * struct, if applicable. |
270 | */ | 351 | */ |
271 | struct dma_slave_config { | 352 | struct dma_slave_config { |
272 | enum dma_data_direction direction; | 353 | enum dma_transfer_direction direction; |
273 | dma_addr_t src_addr; | 354 | dma_addr_t src_addr; |
274 | dma_addr_t dst_addr; | 355 | dma_addr_t dst_addr; |
275 | enum dma_slave_buswidth src_addr_width; | 356 | enum dma_slave_buswidth src_addr_width; |
@@ -433,6 +514,7 @@ struct dma_tx_state { | |||
433 | * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. | 514 | * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. |
434 | * The function takes a buffer of size buf_len. The callback function will | 515 | * The function takes a buffer of size buf_len. The callback function will |
435 | * be called after period_len bytes have been transferred. | 516 | * be called after period_len bytes have been transferred. |
517 | * @device_prep_interleaved_dma: Transfer expression in a generic way. | ||
436 | * @device_control: manipulate all pending operations on a channel, returns | 518 | * @device_control: manipulate all pending operations on a channel, returns |
437 | * zero or error code | 519 | * zero or error code |
438 | * @device_tx_status: poll for transaction completion, the optional | 520 | * @device_tx_status: poll for transaction completion, the optional |
@@ -492,11 +574,14 @@ struct dma_device { | |||
492 | 574 | ||
493 | struct dma_async_tx_descriptor *(*device_prep_slave_sg)( | 575 | struct dma_async_tx_descriptor *(*device_prep_slave_sg)( |
494 | struct dma_chan *chan, struct scatterlist *sgl, | 576 | struct dma_chan *chan, struct scatterlist *sgl, |
495 | unsigned int sg_len, enum dma_data_direction direction, | 577 | unsigned int sg_len, enum dma_transfer_direction direction, |
496 | unsigned long flags); | 578 | unsigned long flags); |
497 | struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)( | 579 | struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)( |
498 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | 580 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
499 | size_t period_len, enum dma_data_direction direction); | 581 | size_t period_len, enum dma_transfer_direction direction); |
582 | struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( | ||
583 | struct dma_chan *chan, struct dma_interleaved_template *xt, | ||
584 | unsigned long flags); | ||
500 | int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 585 | int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
501 | unsigned long arg); | 586 | unsigned long arg); |
502 | 587 | ||
@@ -522,7 +607,7 @@ static inline int dmaengine_slave_config(struct dma_chan *chan, | |||
522 | 607 | ||
523 | static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( | 608 | static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( |
524 | struct dma_chan *chan, void *buf, size_t len, | 609 | struct dma_chan *chan, void *buf, size_t len, |
525 | enum dma_data_direction dir, unsigned long flags) | 610 | enum dma_transfer_direction dir, unsigned long flags) |
526 | { | 611 | { |
527 | struct scatterlist sg; | 612 | struct scatterlist sg; |
528 | sg_init_one(&sg, buf, len); | 613 | sg_init_one(&sg, buf, len); |
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h index 4bfe0a2f7d50..f2c64f92c4a0 100644 --- a/include/linux/dw_dmac.h +++ b/include/linux/dw_dmac.h | |||
@@ -127,7 +127,7 @@ struct dw_cyclic_desc { | |||
127 | 127 | ||
128 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | 128 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, |
129 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, | 129 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, |
130 | enum dma_data_direction direction); | 130 | enum dma_transfer_direction direction); |
131 | void dw_dma_cyclic_free(struct dma_chan *chan); | 131 | void dw_dma_cyclic_free(struct dma_chan *chan); |
132 | int dw_dma_cyclic_start(struct dma_chan *chan); | 132 | int dw_dma_cyclic_start(struct dma_chan *chan); |
133 | void dw_dma_cyclic_stop(struct dma_chan *chan); | 133 | void dw_dma_cyclic_stop(struct dma_chan *chan); |
diff --git a/include/linux/mtd/gpmi-nand.h b/include/linux/mtd/gpmi-nand.h new file mode 100644 index 000000000000..69b6dbf46b5e --- /dev/null +++ b/include/linux/mtd/gpmi-nand.h | |||
@@ -0,0 +1,68 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along | ||
15 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
17 | */ | ||
18 | |||
19 | #ifndef __MACH_MXS_GPMI_NAND_H__ | ||
20 | #define __MACH_MXS_GPMI_NAND_H__ | ||
21 | |||
22 | /* The size of the resources is fixed. */ | ||
23 | #define GPMI_NAND_RES_SIZE 6 | ||
24 | |||
25 | /* Resource names for the GPMI NAND driver. */ | ||
26 | #define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "GPMI NAND GPMI Registers" | ||
27 | #define GPMI_NAND_GPMI_INTERRUPT_RES_NAME "GPMI NAND GPMI Interrupt" | ||
28 | #define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "GPMI NAND BCH Registers" | ||
29 | #define GPMI_NAND_BCH_INTERRUPT_RES_NAME "GPMI NAND BCH Interrupt" | ||
30 | #define GPMI_NAND_DMA_CHANNELS_RES_NAME "GPMI NAND DMA Channels" | ||
31 | #define GPMI_NAND_DMA_INTERRUPT_RES_NAME "GPMI NAND DMA Interrupt" | ||
32 | |||
33 | /** | ||
34 | * struct gpmi_nand_platform_data - GPMI NAND driver platform data. | ||
35 | * | ||
36 | * This structure communicates platform-specific information to the GPMI NAND | ||
37 | * driver that can't be expressed as resources. | ||
38 | * | ||
39 | * @platform_init: A pointer to a function the driver will call to | ||
40 | * initialize the platform (e.g., set up the pin mux). | ||
41 | * @min_prop_delay_in_ns: Minimum propagation delay of GPMI signals to and | ||
42 | * from the NAND Flash device, in nanoseconds. | ||
43 | * @max_prop_delay_in_ns: Maximum propagation delay of GPMI signals to and | ||
44 | * from the NAND Flash device, in nanoseconds. | ||
45 | * @max_chip_count: The maximum number of chips for which the driver | ||
46 | * should configure the hardware. This value most | ||
47 | * likely reflects the number of pins that are | ||
48 | * connected to a NAND Flash device. If this is | ||
49 | * greater than the SoC hardware can support, the | ||
50 | * driver will print a message and fail to initialize. | ||
51 | * @partitions: An optional pointer to an array of partition | ||
52 | * descriptions. | ||
53 | * @partition_count: The number of elements in the partitions array. | ||
54 | */ | ||
55 | struct gpmi_nand_platform_data { | ||
56 | /* SoC hardware information. */ | ||
57 | int (*platform_init)(void); | ||
58 | |||
59 | /* NAND Flash information. */ | ||
60 | unsigned int min_prop_delay_in_ns; | ||
61 | unsigned int max_prop_delay_in_ns; | ||
62 | unsigned int max_chip_count; | ||
63 | |||
64 | /* Medium information. */ | ||
65 | struct mtd_partition *partitions; | ||
66 | unsigned partition_count; | ||
67 | }; | ||
68 | #endif | ||
diff --git a/include/linux/sh_dma.h b/include/linux/sh_dma.h index cb2dd118cc0f..8cd7fe59cf1a 100644 --- a/include/linux/sh_dma.h +++ b/include/linux/sh_dma.h | |||
@@ -30,7 +30,7 @@ struct sh_desc { | |||
30 | struct sh_dmae_regs hw; | 30 | struct sh_dmae_regs hw; |
31 | struct list_head node; | 31 | struct list_head node; |
32 | struct dma_async_tx_descriptor async_tx; | 32 | struct dma_async_tx_descriptor async_tx; |
33 | enum dma_data_direction direction; | 33 | enum dma_transfer_direction direction; |
34 | dma_cookie_t cookie; | 34 | dma_cookie_t cookie; |
35 | size_t partial; | 35 | size_t partial; |
36 | int chunks; | 36 | int chunks; |
@@ -48,6 +48,7 @@ struct sh_dmae_channel { | |||
48 | unsigned int offset; | 48 | unsigned int offset; |
49 | unsigned int dmars; | 49 | unsigned int dmars; |
50 | unsigned int dmars_bit; | 50 | unsigned int dmars_bit; |
51 | unsigned int chclr_offset; | ||
51 | }; | 52 | }; |
52 | 53 | ||
53 | struct sh_dmae_pdata { | 54 | struct sh_dmae_pdata { |
@@ -68,6 +69,7 @@ struct sh_dmae_pdata { | |||
68 | unsigned int dmaor_is_32bit:1; | 69 | unsigned int dmaor_is_32bit:1; |
69 | unsigned int needs_tend_set:1; | 70 | unsigned int needs_tend_set:1; |
70 | unsigned int no_dmars:1; | 71 | unsigned int no_dmars:1; |
72 | unsigned int chclr_present:1; | ||
71 | }; | 73 | }; |
72 | 74 | ||
73 | /* DMA register */ | 75 | /* DMA register */ |
diff --git a/include/linux/sirfsoc_dma.h b/include/linux/sirfsoc_dma.h new file mode 100644 index 000000000000..29d959333d81 --- /dev/null +++ b/include/linux/sirfsoc_dma.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _SIRFSOC_DMA_H_ | ||
2 | #define _SIRFSOC_DMA_H_ | ||
3 | |||
4 | bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id); | ||
5 | |||
6 | #endif | ||
diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c index 6fd9391b3a6c..4fa1dbd8ee83 100644 --- a/sound/atmel/abdac.c +++ b/sound/atmel/abdac.c | |||
@@ -133,7 +133,7 @@ static int atmel_abdac_prepare_dma(struct atmel_abdac *dac, | |||
133 | period_len = frames_to_bytes(runtime, runtime->period_size); | 133 | period_len = frames_to_bytes(runtime, runtime->period_size); |
134 | 134 | ||
135 | cdesc = dw_dma_cyclic_prep(chan, runtime->dma_addr, buffer_len, | 135 | cdesc = dw_dma_cyclic_prep(chan, runtime->dma_addr, buffer_len, |
136 | period_len, DMA_TO_DEVICE); | 136 | period_len, DMA_MEM_TO_DEV); |
137 | if (IS_ERR(cdesc)) { | 137 | if (IS_ERR(cdesc)) { |
138 | dev_dbg(&dac->pdev->dev, "could not prepare cyclic DMA\n"); | 138 | dev_dbg(&dac->pdev->dev, "could not prepare cyclic DMA\n"); |
139 | return PTR_ERR(cdesc); | 139 | return PTR_ERR(cdesc); |
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c index 73516f69ac7c..61dade698358 100644 --- a/sound/atmel/ac97c.c +++ b/sound/atmel/ac97c.c | |||
@@ -102,7 +102,7 @@ static void atmel_ac97c_dma_capture_period_done(void *arg) | |||
102 | 102 | ||
103 | static int atmel_ac97c_prepare_dma(struct atmel_ac97c *chip, | 103 | static int atmel_ac97c_prepare_dma(struct atmel_ac97c *chip, |
104 | struct snd_pcm_substream *substream, | 104 | struct snd_pcm_substream *substream, |
105 | enum dma_data_direction direction) | 105 | enum dma_transfer_direction direction) |
106 | { | 106 | { |
107 | struct dma_chan *chan; | 107 | struct dma_chan *chan; |
108 | struct dw_cyclic_desc *cdesc; | 108 | struct dw_cyclic_desc *cdesc; |
@@ -118,7 +118,7 @@ static int atmel_ac97c_prepare_dma(struct atmel_ac97c *chip, | |||
118 | return -EINVAL; | 118 | return -EINVAL; |
119 | } | 119 | } |
120 | 120 | ||
121 | if (direction == DMA_TO_DEVICE) | 121 | if (direction == DMA_MEM_TO_DEV) |
122 | chan = chip->dma.tx_chan; | 122 | chan = chip->dma.tx_chan; |
123 | else | 123 | else |
124 | chan = chip->dma.rx_chan; | 124 | chan = chip->dma.rx_chan; |
@@ -133,7 +133,7 @@ static int atmel_ac97c_prepare_dma(struct atmel_ac97c *chip, | |||
133 | return PTR_ERR(cdesc); | 133 | return PTR_ERR(cdesc); |
134 | } | 134 | } |
135 | 135 | ||
136 | if (direction == DMA_TO_DEVICE) { | 136 | if (direction == DMA_MEM_TO_DEV) { |
137 | cdesc->period_callback = atmel_ac97c_dma_playback_period_done; | 137 | cdesc->period_callback = atmel_ac97c_dma_playback_period_done; |
138 | set_bit(DMA_TX_READY, &chip->flags); | 138 | set_bit(DMA_TX_READY, &chip->flags); |
139 | } else { | 139 | } else { |
@@ -393,7 +393,7 @@ static int atmel_ac97c_playback_prepare(struct snd_pcm_substream *substream) | |||
393 | if (cpu_is_at32ap7000()) { | 393 | if (cpu_is_at32ap7000()) { |
394 | if (!test_bit(DMA_TX_READY, &chip->flags)) | 394 | if (!test_bit(DMA_TX_READY, &chip->flags)) |
395 | retval = atmel_ac97c_prepare_dma(chip, substream, | 395 | retval = atmel_ac97c_prepare_dma(chip, substream, |
396 | DMA_TO_DEVICE); | 396 | DMA_MEM_TO_DEV); |
397 | } else { | 397 | } else { |
398 | /* Initialize and start the PDC */ | 398 | /* Initialize and start the PDC */ |
399 | writel(runtime->dma_addr, chip->regs + ATMEL_PDC_TPR); | 399 | writel(runtime->dma_addr, chip->regs + ATMEL_PDC_TPR); |
@@ -484,7 +484,7 @@ static int atmel_ac97c_capture_prepare(struct snd_pcm_substream *substream) | |||
484 | if (cpu_is_at32ap7000()) { | 484 | if (cpu_is_at32ap7000()) { |
485 | if (!test_bit(DMA_RX_READY, &chip->flags)) | 485 | if (!test_bit(DMA_RX_READY, &chip->flags)) |
486 | retval = atmel_ac97c_prepare_dma(chip, substream, | 486 | retval = atmel_ac97c_prepare_dma(chip, substream, |
487 | DMA_FROM_DEVICE); | 487 | DMA_DEV_TO_MEM); |
488 | } else { | 488 | } else { |
489 | /* Initialize and start the PDC */ | 489 | /* Initialize and start the PDC */ |
490 | writel(runtime->dma_addr, chip->regs + ATMEL_PDC_RPR); | 490 | writel(runtime->dma_addr, chip->regs + ATMEL_PDC_RPR); |
diff --git a/sound/soc/ep93xx/ep93xx-pcm.c b/sound/soc/ep93xx/ep93xx-pcm.c index 3fc96130d1a6..de8390449873 100644 --- a/sound/soc/ep93xx/ep93xx-pcm.c +++ b/sound/soc/ep93xx/ep93xx-pcm.c | |||
@@ -113,9 +113,9 @@ static int ep93xx_pcm_open(struct snd_pcm_substream *substream) | |||
113 | rtd->dma_data.name = dma_params->name; | 113 | rtd->dma_data.name = dma_params->name; |
114 | 114 | ||
115 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) | 115 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) |
116 | rtd->dma_data.direction = DMA_TO_DEVICE; | 116 | rtd->dma_data.direction = DMA_MEM_TO_DEV; |
117 | else | 117 | else |
118 | rtd->dma_data.direction = DMA_FROM_DEVICE; | 118 | rtd->dma_data.direction = DMA_DEV_TO_MEM; |
119 | 119 | ||
120 | rtd->dma_chan = dma_request_channel(mask, ep93xx_pcm_dma_filter, | 120 | rtd->dma_chan = dma_request_channel(mask, ep93xx_pcm_dma_filter, |
121 | &rtd->dma_data); | 121 | &rtd->dma_data); |
diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c index 1cf2fe889f6a..aecdba9f65a1 100644 --- a/sound/soc/imx/imx-pcm-dma-mx2.c +++ b/sound/soc/imx/imx-pcm-dma-mx2.c | |||
@@ -107,12 +107,12 @@ static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream, | |||
107 | } | 107 | } |
108 | 108 | ||
109 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { | 109 | if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { |
110 | slave_config.direction = DMA_TO_DEVICE; | 110 | slave_config.direction = DMA_MEM_TO_DEV; |
111 | slave_config.dst_addr = dma_params->dma_addr; | 111 | slave_config.dst_addr = dma_params->dma_addr; |
112 | slave_config.dst_addr_width = buswidth; | 112 | slave_config.dst_addr_width = buswidth; |
113 | slave_config.dst_maxburst = dma_params->burstsize; | 113 | slave_config.dst_maxburst = dma_params->burstsize; |
114 | } else { | 114 | } else { |
115 | slave_config.direction = DMA_FROM_DEVICE; | 115 | slave_config.direction = DMA_DEV_TO_MEM; |
116 | slave_config.src_addr = dma_params->dma_addr; | 116 | slave_config.src_addr = dma_params->dma_addr; |
117 | slave_config.src_addr_width = buswidth; | 117 | slave_config.src_addr_width = buswidth; |
118 | slave_config.src_maxburst = dma_params->burstsize; | 118 | slave_config.src_maxburst = dma_params->burstsize; |
@@ -159,7 +159,7 @@ static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream, | |||
159 | iprtd->period_bytes * iprtd->periods, | 159 | iprtd->period_bytes * iprtd->periods, |
160 | iprtd->period_bytes, | 160 | iprtd->period_bytes, |
161 | substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? | 161 | substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? |
162 | DMA_TO_DEVICE : DMA_FROM_DEVICE); | 162 | DMA_MEM_TO_DEV : DMA_DEV_TO_MEM); |
163 | if (!iprtd->desc) { | 163 | if (!iprtd->desc) { |
164 | dev_err(&chan->dev->device, "cannot prepare slave dma\n"); | 164 | dev_err(&chan->dev->device, "cannot prepare slave dma\n"); |
165 | return -EINVAL; | 165 | return -EINVAL; |
diff --git a/sound/soc/mxs/mxs-pcm.c b/sound/soc/mxs/mxs-pcm.c index 0e12f4e0a76d..105f42a394df 100644 --- a/sound/soc/mxs/mxs-pcm.c +++ b/sound/soc/mxs/mxs-pcm.c | |||
@@ -136,7 +136,7 @@ static int snd_mxs_pcm_hw_params(struct snd_pcm_substream *substream, | |||
136 | iprtd->period_bytes * iprtd->periods, | 136 | iprtd->period_bytes * iprtd->periods, |
137 | iprtd->period_bytes, | 137 | iprtd->period_bytes, |
138 | substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? | 138 | substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? |
139 | DMA_TO_DEVICE : DMA_FROM_DEVICE); | 139 | DMA_MEM_TO_DEV : DMA_DEV_TO_MEM); |
140 | if (!iprtd->desc) { | 140 | if (!iprtd->desc) { |
141 | dev_err(&chan->dev->device, "cannot prepare slave dma\n"); | 141 | dev_err(&chan->dev->device, "cannot prepare slave dma\n"); |
142 | return -EINVAL; | 142 | return -EINVAL; |
diff --git a/sound/soc/samsung/dma.c b/sound/soc/samsung/dma.c index 427ae0d9817b..e4ba17ce6b32 100644 --- a/sound/soc/samsung/dma.c +++ b/sound/soc/samsung/dma.c | |||
@@ -86,7 +86,7 @@ static void dma_enqueue(struct snd_pcm_substream *substream) | |||
86 | dma_info.cap = (samsung_dma_has_circular() ? DMA_CYCLIC : DMA_SLAVE); | 86 | dma_info.cap = (samsung_dma_has_circular() ? DMA_CYCLIC : DMA_SLAVE); |
87 | dma_info.direction = | 87 | dma_info.direction = |
88 | (substream->stream == SNDRV_PCM_STREAM_PLAYBACK | 88 | (substream->stream == SNDRV_PCM_STREAM_PLAYBACK |
89 | ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | 89 | ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM); |
90 | dma_info.fp = audio_buffdone; | 90 | dma_info.fp = audio_buffdone; |
91 | dma_info.fp_param = substream; | 91 | dma_info.fp_param = substream; |
92 | dma_info.period = prtd->dma_period; | 92 | dma_info.period = prtd->dma_period; |
@@ -171,7 +171,7 @@ static int dma_hw_params(struct snd_pcm_substream *substream, | |||
171 | dma_info.client = prtd->params->client; | 171 | dma_info.client = prtd->params->client; |
172 | dma_info.direction = | 172 | dma_info.direction = |
173 | (substream->stream == SNDRV_PCM_STREAM_PLAYBACK | 173 | (substream->stream == SNDRV_PCM_STREAM_PLAYBACK |
174 | ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | 174 | ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM); |
175 | dma_info.width = prtd->params->dma_size; | 175 | dma_info.width = prtd->params->dma_size; |
176 | dma_info.fifo = prtd->params->dma_addr; | 176 | dma_info.fifo = prtd->params->dma_addr; |
177 | prtd->params->ch = prtd->params->ops->request( | 177 | prtd->params->ch = prtd->params->ops->request( |
diff --git a/sound/soc/sh/siu_pcm.c b/sound/soc/sh/siu_pcm.c index f8f681690a71..0193e595d415 100644 --- a/sound/soc/sh/siu_pcm.c +++ b/sound/soc/sh/siu_pcm.c | |||
@@ -131,7 +131,7 @@ static int siu_pcm_wr_set(struct siu_port *port_info, | |||
131 | sg_dma_address(&sg) = buff; | 131 | sg_dma_address(&sg) = buff; |
132 | 132 | ||
133 | desc = siu_stream->chan->device->device_prep_slave_sg(siu_stream->chan, | 133 | desc = siu_stream->chan->device->device_prep_slave_sg(siu_stream->chan, |
134 | &sg, 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 134 | &sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
135 | if (!desc) { | 135 | if (!desc) { |
136 | dev_err(dev, "Failed to allocate a dma descriptor\n"); | 136 | dev_err(dev, "Failed to allocate a dma descriptor\n"); |
137 | return -ENOMEM; | 137 | return -ENOMEM; |
@@ -181,7 +181,7 @@ static int siu_pcm_rd_set(struct siu_port *port_info, | |||
181 | sg_dma_address(&sg) = buff; | 181 | sg_dma_address(&sg) = buff; |
182 | 182 | ||
183 | desc = siu_stream->chan->device->device_prep_slave_sg(siu_stream->chan, | 183 | desc = siu_stream->chan->device->device_prep_slave_sg(siu_stream->chan, |
184 | &sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 184 | &sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
185 | if (!desc) { | 185 | if (!desc) { |
186 | dev_err(dev, "Failed to allocate dma descriptor\n"); | 186 | dev_err(dev, "Failed to allocate dma descriptor\n"); |
187 | return -ENOMEM; | 187 | return -ENOMEM; |
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c index 93931def0dce..21554611557c 100644 --- a/sound/soc/txx9/txx9aclc.c +++ b/sound/soc/txx9/txx9aclc.c | |||
@@ -134,7 +134,7 @@ txx9aclc_dma_submit(struct txx9aclc_dmadata *dmadata, dma_addr_t buf_dma_addr) | |||
134 | sg_dma_address(&sg) = buf_dma_addr; | 134 | sg_dma_address(&sg) = buf_dma_addr; |
135 | desc = chan->device->device_prep_slave_sg(chan, &sg, 1, | 135 | desc = chan->device->device_prep_slave_sg(chan, &sg, 1, |
136 | dmadata->substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? | 136 | dmadata->substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? |
137 | DMA_TO_DEVICE : DMA_FROM_DEVICE, | 137 | DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, |
138 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 138 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
139 | if (!desc) { | 139 | if (!desc) { |
140 | dev_err(&chan->dev->device, "cannot prepare slave dma\n"); | 140 | dev_err(&chan->dev->device, "cannot prepare slave dma\n"); |