diff options
25 files changed, 5716 insertions, 1159 deletions
diff --git a/arch/arm/mach-imx/include/mach/dma-v1.h b/arch/arm/mach-imx/include/mach/dma-v1.h index 287431cc13e5..ac6fd713828a 100644 --- a/arch/arm/mach-imx/include/mach/dma-v1.h +++ b/arch/arm/mach-imx/include/mach/dma-v1.h | |||
| @@ -27,6 +27,8 @@ | |||
| 27 | 27 | ||
| 28 | #define imx_has_dma_v1() (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27()) | 28 | #define imx_has_dma_v1() (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27()) |
| 29 | 29 | ||
| 30 | #include <mach/dma.h> | ||
| 31 | |||
| 30 | #define IMX_DMA_CHANNELS 16 | 32 | #define IMX_DMA_CHANNELS 16 |
| 31 | 33 | ||
| 32 | #define DMA_MODE_READ 0 | 34 | #define DMA_MODE_READ 0 |
| @@ -96,12 +98,6 @@ int imx_dma_request(int channel, const char *name); | |||
| 96 | 98 | ||
| 97 | void imx_dma_free(int channel); | 99 | void imx_dma_free(int channel); |
| 98 | 100 | ||
| 99 | enum imx_dma_prio { | ||
| 100 | DMA_PRIO_HIGH = 0, | ||
| 101 | DMA_PRIO_MEDIUM = 1, | ||
| 102 | DMA_PRIO_LOW = 2 | ||
| 103 | }; | ||
| 104 | |||
| 105 | int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio); | 101 | int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio); |
| 106 | 102 | ||
| 107 | #endif /* __MACH_DMA_V1_H__ */ | 103 | #endif /* __MACH_DMA_V1_H__ */ |
diff --git a/arch/arm/mach-ux500/devices-db8500.c b/arch/arm/mach-ux500/devices-db8500.c index cbbe69a76a7c..4a94be3304b9 100644 --- a/arch/arm/mach-ux500/devices-db8500.c +++ b/arch/arm/mach-ux500/devices-db8500.c | |||
| @@ -208,35 +208,25 @@ static struct resource dma40_resources[] = { | |||
| 208 | 208 | ||
| 209 | /* Default configuration for physcial memcpy */ | 209 | /* Default configuration for physcial memcpy */ |
| 210 | struct stedma40_chan_cfg dma40_memcpy_conf_phy = { | 210 | struct stedma40_chan_cfg dma40_memcpy_conf_phy = { |
| 211 | .channel_type = (STEDMA40_CHANNEL_IN_PHY_MODE | | 211 | .mode = STEDMA40_MODE_PHYSICAL, |
| 212 | STEDMA40_LOW_PRIORITY_CHANNEL | | ||
| 213 | STEDMA40_PCHAN_BASIC_MODE), | ||
| 214 | .dir = STEDMA40_MEM_TO_MEM, | 212 | .dir = STEDMA40_MEM_TO_MEM, |
| 215 | 213 | ||
| 216 | .src_info.endianess = STEDMA40_LITTLE_ENDIAN, | ||
| 217 | .src_info.data_width = STEDMA40_BYTE_WIDTH, | 214 | .src_info.data_width = STEDMA40_BYTE_WIDTH, |
| 218 | .src_info.psize = STEDMA40_PSIZE_PHY_1, | 215 | .src_info.psize = STEDMA40_PSIZE_PHY_1, |
| 219 | .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, | 216 | .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, |
| 220 | 217 | ||
| 221 | .dst_info.endianess = STEDMA40_LITTLE_ENDIAN, | ||
| 222 | .dst_info.data_width = STEDMA40_BYTE_WIDTH, | 218 | .dst_info.data_width = STEDMA40_BYTE_WIDTH, |
| 223 | .dst_info.psize = STEDMA40_PSIZE_PHY_1, | 219 | .dst_info.psize = STEDMA40_PSIZE_PHY_1, |
| 224 | .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, | 220 | .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, |
| 225 | }; | 221 | }; |
| 226 | /* Default configuration for logical memcpy */ | 222 | /* Default configuration for logical memcpy */ |
| 227 | struct stedma40_chan_cfg dma40_memcpy_conf_log = { | 223 | struct stedma40_chan_cfg dma40_memcpy_conf_log = { |
| 228 | .channel_type = (STEDMA40_CHANNEL_IN_LOG_MODE | | ||
| 229 | STEDMA40_LOW_PRIORITY_CHANNEL | | ||
| 230 | STEDMA40_LCHAN_SRC_LOG_DST_LOG | | ||
| 231 | STEDMA40_NO_TIM_FOR_LINK), | ||
| 232 | .dir = STEDMA40_MEM_TO_MEM, | 224 | .dir = STEDMA40_MEM_TO_MEM, |
| 233 | 225 | ||
| 234 | .src_info.endianess = STEDMA40_LITTLE_ENDIAN, | ||
| 235 | .src_info.data_width = STEDMA40_BYTE_WIDTH, | 226 | .src_info.data_width = STEDMA40_BYTE_WIDTH, |
| 236 | .src_info.psize = STEDMA40_PSIZE_LOG_1, | 227 | .src_info.psize = STEDMA40_PSIZE_LOG_1, |
| 237 | .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, | 228 | .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, |
| 238 | 229 | ||
| 239 | .dst_info.endianess = STEDMA40_LITTLE_ENDIAN, | ||
| 240 | .dst_info.data_width = STEDMA40_BYTE_WIDTH, | 230 | .dst_info.data_width = STEDMA40_BYTE_WIDTH, |
| 241 | .dst_info.psize = STEDMA40_PSIZE_LOG_1, | 231 | .dst_info.psize = STEDMA40_PSIZE_LOG_1, |
| 242 | .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, | 232 | .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, |
| @@ -269,7 +259,6 @@ static struct stedma40_platform_data dma40_plat_data = { | |||
| 269 | .memcpy_len = ARRAY_SIZE(dma40_memcpy_event), | 259 | .memcpy_len = ARRAY_SIZE(dma40_memcpy_event), |
| 270 | .memcpy_conf_phy = &dma40_memcpy_conf_phy, | 260 | .memcpy_conf_phy = &dma40_memcpy_conf_phy, |
| 271 | .memcpy_conf_log = &dma40_memcpy_conf_log, | 261 | .memcpy_conf_log = &dma40_memcpy_conf_log, |
| 272 | .llis_per_log = 8, | ||
| 273 | .disabled_channels = {-1}, | 262 | .disabled_channels = {-1}, |
| 274 | }; | 263 | }; |
| 275 | 264 | ||
diff --git a/arch/arm/plat-mxc/include/mach/dma.h b/arch/arm/plat-mxc/include/mach/dma.h new file mode 100644 index 000000000000..ef7751546f5f --- /dev/null +++ b/arch/arm/plat-mxc/include/mach/dma.h | |||
| @@ -0,0 +1,67 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License version 2 as | ||
| 6 | * published by the Free Software Foundation. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #ifndef __ASM_ARCH_MXC_DMA_H__ | ||
| 10 | #define __ASM_ARCH_MXC_DMA_H__ | ||
| 11 | |||
| 12 | #include <linux/scatterlist.h> | ||
| 13 | #include <linux/device.h> | ||
| 14 | #include <linux/dmaengine.h> | ||
| 15 | |||
| 16 | /* | ||
| 17 | * This enumerates peripheral types. Used for SDMA. | ||
| 18 | */ | ||
| 19 | enum sdma_peripheral_type { | ||
| 20 | IMX_DMATYPE_SSI, /* MCU domain SSI */ | ||
| 21 | IMX_DMATYPE_SSI_SP, /* Shared SSI */ | ||
| 22 | IMX_DMATYPE_MMC, /* MMC */ | ||
| 23 | IMX_DMATYPE_SDHC, /* SDHC */ | ||
| 24 | IMX_DMATYPE_UART, /* MCU domain UART */ | ||
| 25 | IMX_DMATYPE_UART_SP, /* Shared UART */ | ||
| 26 | IMX_DMATYPE_FIRI, /* FIRI */ | ||
| 27 | IMX_DMATYPE_CSPI, /* MCU domain CSPI */ | ||
| 28 | IMX_DMATYPE_CSPI_SP, /* Shared CSPI */ | ||
| 29 | IMX_DMATYPE_SIM, /* SIM */ | ||
| 30 | IMX_DMATYPE_ATA, /* ATA */ | ||
| 31 | IMX_DMATYPE_CCM, /* CCM */ | ||
| 32 | IMX_DMATYPE_EXT, /* External peripheral */ | ||
| 33 | IMX_DMATYPE_MSHC, /* Memory Stick Host Controller */ | ||
| 34 | IMX_DMATYPE_MSHC_SP, /* Shared Memory Stick Host Controller */ | ||
| 35 | IMX_DMATYPE_DSP, /* DSP */ | ||
| 36 | IMX_DMATYPE_MEMORY, /* Memory */ | ||
| 37 | IMX_DMATYPE_FIFO_MEMORY,/* FIFO type Memory */ | ||
| 38 | IMX_DMATYPE_SPDIF, /* SPDIF */ | ||
| 39 | IMX_DMATYPE_IPU_MEMORY, /* IPU Memory */ | ||
| 40 | IMX_DMATYPE_ASRC, /* ASRC */ | ||
| 41 | IMX_DMATYPE_ESAI, /* ESAI */ | ||
| 42 | }; | ||
| 43 | |||
| 44 | enum imx_dma_prio { | ||
| 45 | DMA_PRIO_HIGH = 0, | ||
| 46 | DMA_PRIO_MEDIUM = 1, | ||
| 47 | DMA_PRIO_LOW = 2 | ||
| 48 | }; | ||
| 49 | |||
| 50 | struct imx_dma_data { | ||
| 51 | int dma_request; /* DMA request line */ | ||
| 52 | enum sdma_peripheral_type peripheral_type; | ||
| 53 | int priority; | ||
| 54 | }; | ||
| 55 | |||
| 56 | static inline int imx_dma_is_ipu(struct dma_chan *chan) | ||
| 57 | { | ||
| 58 | return !strcmp(dev_name(chan->device->dev), "ipu-core"); | ||
| 59 | } | ||
| 60 | |||
| 61 | static inline int imx_dma_is_general_purpose(struct dma_chan *chan) | ||
| 62 | { | ||
| 63 | return !strcmp(dev_name(chan->device->dev), "imx-sdma") || | ||
| 64 | !strcmp(dev_name(chan->device->dev), "imx-dma"); | ||
| 65 | } | ||
| 66 | |||
| 67 | #endif | ||
diff --git a/arch/arm/plat-mxc/include/mach/sdma.h b/arch/arm/plat-mxc/include/mach/sdma.h new file mode 100644 index 000000000000..9be112227ac4 --- /dev/null +++ b/arch/arm/plat-mxc/include/mach/sdma.h | |||
| @@ -0,0 +1,17 @@ | |||
| 1 | #ifndef __MACH_MXC_SDMA_H__ | ||
| 2 | #define __MACH_MXC_SDMA_H__ | ||
| 3 | |||
| 4 | /** | ||
| 5 | * struct sdma_platform_data - platform specific data for SDMA engine | ||
| 6 | * | ||
| 7 | * @sdma_version The version of this SDMA engine | ||
| 8 | * @cpu_name used to generate the firmware name | ||
| 9 | * @to_version CPU Tape out version | ||
| 10 | */ | ||
| 11 | struct sdma_platform_data { | ||
| 12 | int sdma_version; | ||
| 13 | char *cpu_name; | ||
| 14 | int to_version; | ||
| 15 | }; | ||
| 16 | |||
| 17 | #endif /* __MACH_MXC_SDMA_H__ */ | ||
diff --git a/arch/arm/plat-nomadik/include/plat/ste_dma40.h b/arch/arm/plat-nomadik/include/plat/ste_dma40.h index 93a812672d9a..74b62f10d07f 100644 --- a/arch/arm/plat-nomadik/include/plat/ste_dma40.h +++ b/arch/arm/plat-nomadik/include/plat/ste_dma40.h | |||
| @@ -1,10 +1,8 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * arch/arm/plat-nomadik/include/plat/ste_dma40.h | 2 | * Copyright (C) ST-Ericsson SA 2007-2010 |
| 3 | * | 3 | * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson |
| 4 | * Copyright (C) ST-Ericsson 2007-2010 | 4 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson |
| 5 | * License terms: GNU General Public License (GPL) version 2 | 5 | * License terms: GNU General Public License (GPL) version 2 |
| 6 | * Author: Per Friden <per.friden@stericsson.com> | ||
| 7 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> | ||
| 8 | */ | 6 | */ |
| 9 | 7 | ||
| 10 | 8 | ||
| @@ -19,37 +17,20 @@ | |||
| 19 | #define STEDMA40_DEV_DST_MEMORY (-1) | 17 | #define STEDMA40_DEV_DST_MEMORY (-1) |
| 20 | #define STEDMA40_DEV_SRC_MEMORY (-1) | 18 | #define STEDMA40_DEV_SRC_MEMORY (-1) |
| 21 | 19 | ||
| 22 | /* | 20 | enum stedma40_mode { |
| 23 | * Description of bitfields of channel_type variable is available in | 21 | STEDMA40_MODE_LOGICAL = 0, |
| 24 | * the info structure. | 22 | STEDMA40_MODE_PHYSICAL, |
| 25 | */ | 23 | STEDMA40_MODE_OPERATION, |
| 24 | }; | ||
| 26 | 25 | ||
| 27 | /* Priority */ | 26 | enum stedma40_mode_opt { |
| 28 | #define STEDMA40_INFO_PRIO_TYPE_POS 2 | 27 | STEDMA40_PCHAN_BASIC_MODE = 0, |
| 29 | #define STEDMA40_HIGH_PRIORITY_CHANNEL (0x1 << STEDMA40_INFO_PRIO_TYPE_POS) | 28 | STEDMA40_LCHAN_SRC_LOG_DST_LOG = 0, |
| 30 | #define STEDMA40_LOW_PRIORITY_CHANNEL (0x2 << STEDMA40_INFO_PRIO_TYPE_POS) | 29 | STEDMA40_PCHAN_MODULO_MODE, |
| 31 | 30 | STEDMA40_PCHAN_DOUBLE_DST_MODE, | |
| 32 | /* Mode */ | 31 | STEDMA40_LCHAN_SRC_PHY_DST_LOG, |
| 33 | #define STEDMA40_INFO_CH_MODE_TYPE_POS 6 | 32 | STEDMA40_LCHAN_SRC_LOG_DST_PHY, |
| 34 | #define STEDMA40_CHANNEL_IN_PHY_MODE (0x1 << STEDMA40_INFO_CH_MODE_TYPE_POS) | 33 | }; |
| 35 | #define STEDMA40_CHANNEL_IN_LOG_MODE (0x2 << STEDMA40_INFO_CH_MODE_TYPE_POS) | ||
| 36 | #define STEDMA40_CHANNEL_IN_OPER_MODE (0x3 << STEDMA40_INFO_CH_MODE_TYPE_POS) | ||
| 37 | |||
| 38 | /* Mode options */ | ||
| 39 | #define STEDMA40_INFO_CH_MODE_OPT_POS 8 | ||
| 40 | #define STEDMA40_PCHAN_BASIC_MODE (0x1 << STEDMA40_INFO_CH_MODE_OPT_POS) | ||
| 41 | #define STEDMA40_PCHAN_MODULO_MODE (0x2 << STEDMA40_INFO_CH_MODE_OPT_POS) | ||
| 42 | #define STEDMA40_PCHAN_DOUBLE_DST_MODE (0x3 << STEDMA40_INFO_CH_MODE_OPT_POS) | ||
| 43 | #define STEDMA40_LCHAN_SRC_PHY_DST_LOG (0x1 << STEDMA40_INFO_CH_MODE_OPT_POS) | ||
| 44 | #define STEDMA40_LCHAN_SRC_LOG_DST_PHS (0x2 << STEDMA40_INFO_CH_MODE_OPT_POS) | ||
| 45 | #define STEDMA40_LCHAN_SRC_LOG_DST_LOG (0x3 << STEDMA40_INFO_CH_MODE_OPT_POS) | ||
| 46 | |||
| 47 | /* Interrupt */ | ||
| 48 | #define STEDMA40_INFO_TIM_POS 10 | ||
| 49 | #define STEDMA40_NO_TIM_FOR_LINK (0x0 << STEDMA40_INFO_TIM_POS) | ||
| 50 | #define STEDMA40_TIM_FOR_LINK (0x1 << STEDMA40_INFO_TIM_POS) | ||
| 51 | |||
| 52 | /* End of channel_type configuration */ | ||
| 53 | 34 | ||
| 54 | #define STEDMA40_ESIZE_8_BIT 0x0 | 35 | #define STEDMA40_ESIZE_8_BIT 0x0 |
| 55 | #define STEDMA40_ESIZE_16_BIT 0x1 | 36 | #define STEDMA40_ESIZE_16_BIT 0x1 |
| @@ -72,16 +53,14 @@ | |||
| 72 | #define STEDMA40_PSIZE_LOG_8 STEDMA40_PSIZE_PHY_8 | 53 | #define STEDMA40_PSIZE_LOG_8 STEDMA40_PSIZE_PHY_8 |
| 73 | #define STEDMA40_PSIZE_LOG_16 STEDMA40_PSIZE_PHY_16 | 54 | #define STEDMA40_PSIZE_LOG_16 STEDMA40_PSIZE_PHY_16 |
| 74 | 55 | ||
| 56 | /* Maximum number of possible physical channels */ | ||
| 57 | #define STEDMA40_MAX_PHYS 32 | ||
| 58 | |||
| 75 | enum stedma40_flow_ctrl { | 59 | enum stedma40_flow_ctrl { |
| 76 | STEDMA40_NO_FLOW_CTRL, | 60 | STEDMA40_NO_FLOW_CTRL, |
| 77 | STEDMA40_FLOW_CTRL, | 61 | STEDMA40_FLOW_CTRL, |
| 78 | }; | 62 | }; |
| 79 | 63 | ||
| 80 | enum stedma40_endianess { | ||
| 81 | STEDMA40_LITTLE_ENDIAN, | ||
| 82 | STEDMA40_BIG_ENDIAN | ||
| 83 | }; | ||
| 84 | |||
| 85 | enum stedma40_periph_data_width { | 64 | enum stedma40_periph_data_width { |
| 86 | STEDMA40_BYTE_WIDTH = STEDMA40_ESIZE_8_BIT, | 65 | STEDMA40_BYTE_WIDTH = STEDMA40_ESIZE_8_BIT, |
| 87 | STEDMA40_HALFWORD_WIDTH = STEDMA40_ESIZE_16_BIT, | 66 | STEDMA40_HALFWORD_WIDTH = STEDMA40_ESIZE_16_BIT, |
| @@ -89,15 +68,8 @@ enum stedma40_periph_data_width { | |||
| 89 | STEDMA40_DOUBLEWORD_WIDTH = STEDMA40_ESIZE_64_BIT | 68 | STEDMA40_DOUBLEWORD_WIDTH = STEDMA40_ESIZE_64_BIT |
| 90 | }; | 69 | }; |
| 91 | 70 | ||
| 92 | struct stedma40_half_channel_info { | ||
| 93 | enum stedma40_endianess endianess; | ||
| 94 | enum stedma40_periph_data_width data_width; | ||
| 95 | int psize; | ||
| 96 | enum stedma40_flow_ctrl flow_ctrl; | ||
| 97 | }; | ||
| 98 | |||
| 99 | enum stedma40_xfer_dir { | 71 | enum stedma40_xfer_dir { |
| 100 | STEDMA40_MEM_TO_MEM, | 72 | STEDMA40_MEM_TO_MEM = 1, |
| 101 | STEDMA40_MEM_TO_PERIPH, | 73 | STEDMA40_MEM_TO_PERIPH, |
| 102 | STEDMA40_PERIPH_TO_MEM, | 74 | STEDMA40_PERIPH_TO_MEM, |
| 103 | STEDMA40_PERIPH_TO_PERIPH | 75 | STEDMA40_PERIPH_TO_PERIPH |
| @@ -105,18 +77,31 @@ enum stedma40_xfer_dir { | |||
| 105 | 77 | ||
| 106 | 78 | ||
| 107 | /** | 79 | /** |
| 80 | * struct stedma40_chan_cfg - dst/src channel configuration | ||
| 81 | * | ||
| 82 | * @big_endian: true if the src/dst should be read as big endian | ||
| 83 | * @data_width: Data width of the src/dst hardware | ||
| 84 | * @p_size: Burst size | ||
| 85 | * @flow_ctrl: Flow control on/off. | ||
| 86 | */ | ||
| 87 | struct stedma40_half_channel_info { | ||
| 88 | bool big_endian; | ||
| 89 | enum stedma40_periph_data_width data_width; | ||
| 90 | int psize; | ||
| 91 | enum stedma40_flow_ctrl flow_ctrl; | ||
| 92 | }; | ||
| 93 | |||
| 94 | /** | ||
| 108 | * struct stedma40_chan_cfg - Structure to be filled by client drivers. | 95 | * struct stedma40_chan_cfg - Structure to be filled by client drivers. |
| 109 | * | 96 | * |
| 110 | * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH | 97 | * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH |
| 111 | * @channel_type: priority, mode, mode options and interrupt configuration. | 98 | * @high_priority: true if high-priority |
| 99 | * @mode: channel mode: physical, logical, or operation | ||
| 100 | * @mode_opt: options for the chosen channel mode | ||
| 112 | * @src_dev_type: Src device type | 101 | * @src_dev_type: Src device type |
| 113 | * @dst_dev_type: Dst device type | 102 | * @dst_dev_type: Dst device type |
| 114 | * @src_info: Parameters for dst half channel | 103 | * @src_info: Parameters for dst half channel |
| 115 | * @dst_info: Parameters for dst half channel | 104 | * @dst_info: Parameters for dst half channel |
| 116 | * @pre_transfer_data: Data to be passed on to the pre_transfer() function. | ||
| 117 | * @pre_transfer: Callback used if needed before preparation of transfer. | ||
| 118 | * Only called if device is set. size of bytes to transfer | ||
| 119 | * (in case of multiple element transfer size is size of the first element). | ||
| 120 | * | 105 | * |
| 121 | * | 106 | * |
| 122 | * This structure has to be filled by the client drivers. | 107 | * This structure has to be filled by the client drivers. |
| @@ -125,15 +110,13 @@ enum stedma40_xfer_dir { | |||
| 125 | */ | 110 | */ |
| 126 | struct stedma40_chan_cfg { | 111 | struct stedma40_chan_cfg { |
| 127 | enum stedma40_xfer_dir dir; | 112 | enum stedma40_xfer_dir dir; |
| 128 | unsigned int channel_type; | 113 | bool high_priority; |
| 114 | enum stedma40_mode mode; | ||
| 115 | enum stedma40_mode_opt mode_opt; | ||
| 129 | int src_dev_type; | 116 | int src_dev_type; |
| 130 | int dst_dev_type; | 117 | int dst_dev_type; |
| 131 | struct stedma40_half_channel_info src_info; | 118 | struct stedma40_half_channel_info src_info; |
| 132 | struct stedma40_half_channel_info dst_info; | 119 | struct stedma40_half_channel_info dst_info; |
| 133 | void *pre_transfer_data; | ||
| 134 | int (*pre_transfer) (struct dma_chan *chan, | ||
| 135 | void *data, | ||
| 136 | int size); | ||
| 137 | }; | 120 | }; |
| 138 | 121 | ||
| 139 | /** | 122 | /** |
| @@ -146,7 +129,6 @@ struct stedma40_chan_cfg { | |||
| 146 | * @memcpy_len: length of memcpy | 129 | * @memcpy_len: length of memcpy |
| 147 | * @memcpy_conf_phy: default configuration of physical channel memcpy | 130 | * @memcpy_conf_phy: default configuration of physical channel memcpy |
| 148 | * @memcpy_conf_log: default configuration of logical channel memcpy | 131 | * @memcpy_conf_log: default configuration of logical channel memcpy |
| 149 | * @llis_per_log: number of max linked list items per logical channel | ||
| 150 | * @disabled_channels: A vector, ending with -1, that marks physical channels | 132 | * @disabled_channels: A vector, ending with -1, that marks physical channels |
| 151 | * that are for different reasons not available for the driver. | 133 | * that are for different reasons not available for the driver. |
| 152 | */ | 134 | */ |
| @@ -158,23 +140,10 @@ struct stedma40_platform_data { | |||
| 158 | u32 memcpy_len; | 140 | u32 memcpy_len; |
| 159 | struct stedma40_chan_cfg *memcpy_conf_phy; | 141 | struct stedma40_chan_cfg *memcpy_conf_phy; |
| 160 | struct stedma40_chan_cfg *memcpy_conf_log; | 142 | struct stedma40_chan_cfg *memcpy_conf_log; |
| 161 | unsigned int llis_per_log; | 143 | int disabled_channels[STEDMA40_MAX_PHYS]; |
| 162 | int disabled_channels[8]; | ||
| 163 | }; | 144 | }; |
| 164 | 145 | ||
| 165 | /** | 146 | #ifdef CONFIG_STE_DMA40 |
| 166 | * setdma40_set_psize() - Used for changing the package size of an | ||
| 167 | * already configured dma channel. | ||
| 168 | * | ||
| 169 | * @chan: dmaengine handle | ||
| 170 | * @src_psize: new package side for src. (STEDMA40_PSIZE*) | ||
| 171 | * @src_psize: new package side for dst. (STEDMA40_PSIZE*) | ||
| 172 | * | ||
| 173 | * returns 0 on ok, otherwise negative error number. | ||
| 174 | */ | ||
| 175 | int stedma40_set_psize(struct dma_chan *chan, | ||
| 176 | int src_psize, | ||
| 177 | int dst_psize); | ||
| 178 | 147 | ||
| 179 | /** | 148 | /** |
| 180 | * stedma40_filter() - Provides stedma40_chan_cfg to the | 149 | * stedma40_filter() - Provides stedma40_chan_cfg to the |
| @@ -237,4 +206,21 @@ dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan, | |||
| 237 | direction, flags); | 206 | direction, flags); |
| 238 | } | 207 | } |
| 239 | 208 | ||
| 209 | #else | ||
| 210 | static inline bool stedma40_filter(struct dma_chan *chan, void *data) | ||
| 211 | { | ||
| 212 | return false; | ||
| 213 | } | ||
| 214 | |||
| 215 | static inline struct | ||
| 216 | dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan, | ||
| 217 | dma_addr_t addr, | ||
| 218 | unsigned int size, | ||
| 219 | enum dma_data_direction direction, | ||
| 220 | unsigned long flags) | ||
| 221 | { | ||
| 222 | return NULL; | ||
| 223 | } | ||
| 224 | #endif | ||
| 225 | |||
| 240 | #endif | 226 | #endif |
diff --git a/arch/powerpc/include/asm/fsldma.h b/arch/powerpc/include/asm/fsldma.h deleted file mode 100644 index debc5ed96d6e..000000000000 --- a/arch/powerpc/include/asm/fsldma.h +++ /dev/null | |||
| @@ -1,137 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Freescale MPC83XX / MPC85XX DMA Controller | ||
| 3 | * | ||
| 4 | * Copyright (c) 2009 Ira W. Snyder <iws@ovro.caltech.edu> | ||
| 5 | * | ||
| 6 | * This file is licensed under the terms of the GNU General Public License | ||
| 7 | * version 2. This program is licensed "as is" without any warranty of any | ||
| 8 | * kind, whether express or implied. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef __ARCH_POWERPC_ASM_FSLDMA_H__ | ||
| 12 | #define __ARCH_POWERPC_ASM_FSLDMA_H__ | ||
| 13 | |||
| 14 | #include <linux/slab.h> | ||
| 15 | #include <linux/dmaengine.h> | ||
| 16 | |||
| 17 | /* | ||
| 18 | * Definitions for the Freescale DMA controller's DMA_SLAVE implemention | ||
| 19 | * | ||
| 20 | * The Freescale DMA_SLAVE implementation was designed to handle many-to-many | ||
| 21 | * transfers. An example usage would be an accelerated copy between two | ||
| 22 | * scatterlists. Another example use would be an accelerated copy from | ||
| 23 | * multiple non-contiguous device buffers into a single scatterlist. | ||
| 24 | * | ||
| 25 | * A DMA_SLAVE transaction is defined by a struct fsl_dma_slave. This | ||
| 26 | * structure contains a list of hardware addresses that should be copied | ||
| 27 | * to/from the scatterlist passed into device_prep_slave_sg(). The structure | ||
| 28 | * also has some fields to enable hardware-specific features. | ||
| 29 | */ | ||
| 30 | |||
| 31 | /** | ||
| 32 | * struct fsl_dma_hw_addr | ||
| 33 | * @entry: linked list entry | ||
| 34 | * @address: the hardware address | ||
| 35 | * @length: length to transfer | ||
| 36 | * | ||
| 37 | * Holds a single physical hardware address / length pair for use | ||
| 38 | * with the DMAEngine DMA_SLAVE API. | ||
| 39 | */ | ||
| 40 | struct fsl_dma_hw_addr { | ||
| 41 | struct list_head entry; | ||
| 42 | |||
| 43 | dma_addr_t address; | ||
| 44 | size_t length; | ||
| 45 | }; | ||
| 46 | |||
| 47 | /** | ||
| 48 | * struct fsl_dma_slave | ||
| 49 | * @addresses: a linked list of struct fsl_dma_hw_addr structures | ||
| 50 | * @request_count: value for DMA request count | ||
| 51 | * @src_loop_size: setup and enable constant source-address DMA transfers | ||
| 52 | * @dst_loop_size: setup and enable constant destination address DMA transfers | ||
| 53 | * @external_start: enable externally started DMA transfers | ||
| 54 | * @external_pause: enable externally paused DMA transfers | ||
| 55 | * | ||
| 56 | * Holds a list of address / length pairs for use with the DMAEngine | ||
| 57 | * DMA_SLAVE API implementation for the Freescale DMA controller. | ||
| 58 | */ | ||
| 59 | struct fsl_dma_slave { | ||
| 60 | |||
| 61 | /* List of hardware address/length pairs */ | ||
| 62 | struct list_head addresses; | ||
| 63 | |||
| 64 | /* Support for extra controller features */ | ||
| 65 | unsigned int request_count; | ||
| 66 | unsigned int src_loop_size; | ||
| 67 | unsigned int dst_loop_size; | ||
| 68 | bool external_start; | ||
| 69 | bool external_pause; | ||
| 70 | }; | ||
| 71 | |||
| 72 | /** | ||
| 73 | * fsl_dma_slave_append - add an address/length pair to a struct fsl_dma_slave | ||
| 74 | * @slave: the &struct fsl_dma_slave to add to | ||
| 75 | * @address: the hardware address to add | ||
| 76 | * @length: the length of bytes to transfer from @address | ||
| 77 | * | ||
| 78 | * Add a hardware address/length pair to a struct fsl_dma_slave. Returns 0 on | ||
| 79 | * success, -ERRNO otherwise. | ||
| 80 | */ | ||
| 81 | static inline int fsl_dma_slave_append(struct fsl_dma_slave *slave, | ||
| 82 | dma_addr_t address, size_t length) | ||
| 83 | { | ||
| 84 | struct fsl_dma_hw_addr *addr; | ||
| 85 | |||
| 86 | addr = kzalloc(sizeof(*addr), GFP_ATOMIC); | ||
| 87 | if (!addr) | ||
| 88 | return -ENOMEM; | ||
| 89 | |||
| 90 | INIT_LIST_HEAD(&addr->entry); | ||
| 91 | addr->address = address; | ||
| 92 | addr->length = length; | ||
| 93 | |||
| 94 | list_add_tail(&addr->entry, &slave->addresses); | ||
| 95 | return 0; | ||
| 96 | } | ||
| 97 | |||
| 98 | /** | ||
| 99 | * fsl_dma_slave_free - free a struct fsl_dma_slave | ||
| 100 | * @slave: the struct fsl_dma_slave to free | ||
| 101 | * | ||
| 102 | * Free a struct fsl_dma_slave and all associated address/length pairs | ||
| 103 | */ | ||
| 104 | static inline void fsl_dma_slave_free(struct fsl_dma_slave *slave) | ||
| 105 | { | ||
| 106 | struct fsl_dma_hw_addr *addr, *tmp; | ||
| 107 | |||
| 108 | if (slave) { | ||
| 109 | list_for_each_entry_safe(addr, tmp, &slave->addresses, entry) { | ||
| 110 | list_del(&addr->entry); | ||
| 111 | kfree(addr); | ||
| 112 | } | ||
| 113 | |||
| 114 | kfree(slave); | ||
| 115 | } | ||
| 116 | } | ||
| 117 | |||
| 118 | /** | ||
| 119 | * fsl_dma_slave_alloc - allocate a struct fsl_dma_slave | ||
| 120 | * @gfp: the flags to pass to kmalloc when allocating this structure | ||
| 121 | * | ||
| 122 | * Allocate a struct fsl_dma_slave for use by the DMA_SLAVE API. Returns a new | ||
| 123 | * struct fsl_dma_slave on success, or NULL on failure. | ||
| 124 | */ | ||
| 125 | static inline struct fsl_dma_slave *fsl_dma_slave_alloc(gfp_t gfp) | ||
| 126 | { | ||
| 127 | struct fsl_dma_slave *slave; | ||
| 128 | |||
| 129 | slave = kzalloc(sizeof(*slave), gfp); | ||
| 130 | if (!slave) | ||
| 131 | return NULL; | ||
| 132 | |||
| 133 | INIT_LIST_HEAD(&slave->addresses); | ||
| 134 | return slave; | ||
| 135 | } | ||
| 136 | |||
| 137 | #endif /* __ARCH_POWERPC_ASM_FSLDMA_H__ */ | ||
diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig index 5de2ed13b35d..1b11abbb5c91 100644 --- a/crypto/async_tx/Kconfig +++ b/crypto/async_tx/Kconfig | |||
| @@ -24,19 +24,6 @@ config ASYNC_RAID6_RECOV | |||
| 24 | select ASYNC_PQ | 24 | select ASYNC_PQ |
| 25 | select ASYNC_XOR | 25 | select ASYNC_XOR |
| 26 | 26 | ||
| 27 | config ASYNC_RAID6_TEST | ||
| 28 | tristate "Self test for hardware accelerated raid6 recovery" | ||
| 29 | depends on ASYNC_RAID6_RECOV | ||
| 30 | select ASYNC_MEMCPY | ||
| 31 | ---help--- | ||
| 32 | This is a one-shot self test that permutes through the | ||
| 33 | recovery of all the possible two disk failure scenarios for a | ||
| 34 | N-disk array. Recovery is performed with the asynchronous | ||
| 35 | raid6 recovery routines, and will optionally use an offload | ||
| 36 | engine if one is available. | ||
| 37 | |||
| 38 | If unsure, say N. | ||
| 39 | |||
| 40 | config ASYNC_TX_DISABLE_PQ_VAL_DMA | 27 | config ASYNC_TX_DISABLE_PQ_VAL_DMA |
| 41 | bool | 28 | bool |
| 42 | 29 | ||
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 9520cf02edc8..79d1542f31c0 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
| @@ -46,15 +46,22 @@ config INTEL_MID_DMAC | |||
| 46 | 46 | ||
| 47 | If unsure, say N. | 47 | If unsure, say N. |
| 48 | 48 | ||
| 49 | config ASYNC_TX_DISABLE_CHANNEL_SWITCH | 49 | config ASYNC_TX_ENABLE_CHANNEL_SWITCH |
| 50 | bool | 50 | bool |
| 51 | 51 | ||
| 52 | config AMBA_PL08X | ||
| 53 | bool "ARM PrimeCell PL080 or PL081 support" | ||
| 54 | depends on ARM_AMBA && EXPERIMENTAL | ||
| 55 | select DMA_ENGINE | ||
| 56 | help | ||
| 57 | Platform has a PL08x DMAC device | ||
| 58 | which can provide DMA engine support | ||
| 59 | |||
| 52 | config INTEL_IOATDMA | 60 | config INTEL_IOATDMA |
| 53 | tristate "Intel I/OAT DMA support" | 61 | tristate "Intel I/OAT DMA support" |
| 54 | depends on PCI && X86 | 62 | depends on PCI && X86 |
| 55 | select DMA_ENGINE | 63 | select DMA_ENGINE |
| 56 | select DCA | 64 | select DCA |
| 57 | select ASYNC_TX_DISABLE_CHANNEL_SWITCH | ||
| 58 | select ASYNC_TX_DISABLE_PQ_VAL_DMA | 65 | select ASYNC_TX_DISABLE_PQ_VAL_DMA |
| 59 | select ASYNC_TX_DISABLE_XOR_VAL_DMA | 66 | select ASYNC_TX_DISABLE_XOR_VAL_DMA |
| 60 | help | 67 | help |
| @@ -69,6 +76,7 @@ config INTEL_IOP_ADMA | |||
| 69 | tristate "Intel IOP ADMA support" | 76 | tristate "Intel IOP ADMA support" |
| 70 | depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX | 77 | depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX |
| 71 | select DMA_ENGINE | 78 | select DMA_ENGINE |
| 79 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | ||
| 72 | help | 80 | help |
| 73 | Enable support for the Intel(R) IOP Series RAID engines. | 81 | Enable support for the Intel(R) IOP Series RAID engines. |
| 74 | 82 | ||
| @@ -93,6 +101,7 @@ config FSL_DMA | |||
| 93 | tristate "Freescale Elo and Elo Plus DMA support" | 101 | tristate "Freescale Elo and Elo Plus DMA support" |
| 94 | depends on FSL_SOC | 102 | depends on FSL_SOC |
| 95 | select DMA_ENGINE | 103 | select DMA_ENGINE |
| 104 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | ||
| 96 | ---help--- | 105 | ---help--- |
| 97 | Enable support for the Freescale Elo and Elo Plus DMA controllers. | 106 | Enable support for the Freescale Elo and Elo Plus DMA controllers. |
| 98 | The Elo is the DMA controller on some 82xx and 83xx parts, and the | 107 | The Elo is the DMA controller on some 82xx and 83xx parts, and the |
| @@ -109,6 +118,7 @@ config MV_XOR | |||
| 109 | bool "Marvell XOR engine support" | 118 | bool "Marvell XOR engine support" |
| 110 | depends on PLAT_ORION | 119 | depends on PLAT_ORION |
| 111 | select DMA_ENGINE | 120 | select DMA_ENGINE |
| 121 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | ||
| 112 | ---help--- | 122 | ---help--- |
| 113 | Enable support for the Marvell XOR engine. | 123 | Enable support for the Marvell XOR engine. |
| 114 | 124 | ||
| @@ -166,6 +176,7 @@ config AMCC_PPC440SPE_ADMA | |||
| 166 | depends on 440SPe || 440SP | 176 | depends on 440SPe || 440SP |
| 167 | select DMA_ENGINE | 177 | select DMA_ENGINE |
| 168 | select ARCH_HAS_ASYNC_TX_FIND_CHANNEL | 178 | select ARCH_HAS_ASYNC_TX_FIND_CHANNEL |
| 179 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | ||
| 169 | help | 180 | help |
| 170 | Enable support for the AMCC PPC440SPe RAID engines. | 181 | Enable support for the AMCC PPC440SPe RAID engines. |
| 171 | 182 | ||
| @@ -195,6 +206,22 @@ config PCH_DMA | |||
| 195 | help | 206 | help |
| 196 | Enable support for the Topcliff PCH DMA engine. | 207 | Enable support for the Topcliff PCH DMA engine. |
| 197 | 208 | ||
| 209 | config IMX_SDMA | ||
| 210 | tristate "i.MX SDMA support" | ||
| 211 | depends on ARCH_MX25 || ARCH_MX3 || ARCH_MX5 | ||
| 212 | select DMA_ENGINE | ||
| 213 | help | ||
| 214 | Support the i.MX SDMA engine. This engine is integrated into | ||
| 215 | Freescale i.MX25/31/35/51 chips. | ||
| 216 | |||
| 217 | config IMX_DMA | ||
| 218 | tristate "i.MX DMA support" | ||
| 219 | depends on ARCH_MX1 || ARCH_MX21 || MACH_MX27 | ||
| 220 | select DMA_ENGINE | ||
| 221 | help | ||
| 222 | Support the i.MX DMA engine. This engine is integrated into | ||
| 223 | Freescale i.MX1/21/27 chips. | ||
| 224 | |||
| 198 | config DMA_ENGINE | 225 | config DMA_ENGINE |
| 199 | bool | 226 | bool |
| 200 | 227 | ||
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 72bd70384d8a..a8a84f4587f2 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
| @@ -21,7 +21,10 @@ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o | |||
| 21 | obj-$(CONFIG_SH_DMAE) += shdma.o | 21 | obj-$(CONFIG_SH_DMAE) += shdma.o |
| 22 | obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o | 22 | obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o |
| 23 | obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ | 23 | obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ |
| 24 | obj-$(CONFIG_IMX_SDMA) += imx-sdma.o | ||
| 25 | obj-$(CONFIG_IMX_DMA) += imx-dma.o | ||
| 24 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o | 26 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o |
| 25 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o | 27 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o |
| 26 | obj-$(CONFIG_PL330_DMA) += pl330.o | 28 | obj-$(CONFIG_PL330_DMA) += pl330.o |
| 27 | obj-$(CONFIG_PCH_DMA) += pch_dma.o | 29 | obj-$(CONFIG_PCH_DMA) += pch_dma.o |
| 30 | obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o | ||
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c new file mode 100644 index 000000000000..b605cc9ac3a2 --- /dev/null +++ b/drivers/dma/amba-pl08x.c | |||
| @@ -0,0 +1,2167 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2006 ARM Ltd. | ||
| 3 | * Copyright (c) 2010 ST-Ericsson SA | ||
| 4 | * | ||
| 5 | * Author: Peter Pearse <peter.pearse@arm.com> | ||
| 6 | * Author: Linus Walleij <linus.walleij@stericsson.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify it | ||
| 9 | * under the terms of the GNU General Public License as published by the Free | ||
| 10 | * Software Foundation; either version 2 of the License, or (at your option) | ||
| 11 | * any later version. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
| 14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 16 | * more details. | ||
| 17 | * | ||
| 18 | * You should have received a copy of the GNU General Public License along with | ||
| 19 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
| 20 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 21 | * | ||
| 22 | * The full GNU General Public License is iin this distribution in the | ||
| 23 | * file called COPYING. | ||
| 24 | * | ||
| 25 | * Documentation: ARM DDI 0196G == PL080 | ||
| 26 | * Documentation: ARM DDI 0218E == PL081 | ||
| 27 | * | ||
| 28 | * PL080 & PL081 both have 16 sets of DMA signals that can be routed to | ||
| 29 | * any channel. | ||
| 30 | * | ||
| 31 | * The PL080 has 8 channels available for simultaneous use, and the PL081 | ||
| 32 | * has only two channels. So on these DMA controllers the number of channels | ||
| 33 | * and the number of incoming DMA signals are two totally different things. | ||
| 34 | * It is usually not possible to theoretically handle all physical signals, | ||
| 35 | * so a multiplexing scheme with possible denial of use is necessary. | ||
| 36 | * | ||
| 37 | * The PL080 has a dual bus master, PL081 has a single master. | ||
| 38 | * | ||
| 39 | * Memory to peripheral transfer may be visualized as | ||
| 40 | * Get data from memory to DMAC | ||
| 41 | * Until no data left | ||
| 42 | * On burst request from peripheral | ||
| 43 | * Destination burst from DMAC to peripheral | ||
| 44 | * Clear burst request | ||
| 45 | * Raise terminal count interrupt | ||
| 46 | * | ||
| 47 | * For peripherals with a FIFO: | ||
| 48 | * Source burst size == half the depth of the peripheral FIFO | ||
| 49 | * Destination burst size == the depth of the peripheral FIFO | ||
| 50 | * | ||
| 51 | * (Bursts are irrelevant for mem to mem transfers - there are no burst | ||
| 52 | * signals, the DMA controller will simply facilitate its AHB master.) | ||
| 53 | * | ||
| 54 | * ASSUMES default (little) endianness for DMA transfers | ||
| 55 | * | ||
| 56 | * Only DMAC flow control is implemented | ||
| 57 | * | ||
| 58 | * Global TODO: | ||
| 59 | * - Break out common code from arch/arm/mach-s3c64xx and share | ||
| 60 | */ | ||
| 61 | #include <linux/device.h> | ||
| 62 | #include <linux/init.h> | ||
| 63 | #include <linux/module.h> | ||
| 64 | #include <linux/pci.h> | ||
| 65 | #include <linux/interrupt.h> | ||
| 66 | #include <linux/slab.h> | ||
| 67 | #include <linux/dmapool.h> | ||
| 68 | #include <linux/amba/bus.h> | ||
| 69 | #include <linux/dmaengine.h> | ||
| 70 | #include <linux/amba/pl08x.h> | ||
| 71 | #include <linux/debugfs.h> | ||
| 72 | #include <linux/seq_file.h> | ||
| 73 | |||
| 74 | #include <asm/hardware/pl080.h> | ||
| 75 | #include <asm/dma.h> | ||
| 76 | #include <asm/mach/dma.h> | ||
| 77 | #include <asm/atomic.h> | ||
| 78 | #include <asm/processor.h> | ||
| 79 | #include <asm/cacheflush.h> | ||
| 80 | |||
| 81 | #define DRIVER_NAME "pl08xdmac" | ||
| 82 | |||
| 83 | /** | ||
| 84 | * struct vendor_data - vendor-specific config parameters | ||
| 85 | * for PL08x derivates | ||
| 86 | * @name: the name of this specific variant | ||
| 87 | * @channels: the number of channels available in this variant | ||
| 88 | * @dualmaster: whether this version supports dual AHB masters | ||
| 89 | * or not. | ||
| 90 | */ | ||
| 91 | struct vendor_data { | ||
| 92 | char *name; | ||
| 93 | u8 channels; | ||
| 94 | bool dualmaster; | ||
| 95 | }; | ||
| 96 | |||
| 97 | /* | ||
| 98 | * PL08X private data structures | ||
| 99 | * An LLI struct - see pl08x TRM | ||
| 100 | * Note that next uses bit[0] as a bus bit, | ||
| 101 | * start & end do not - their bus bit info | ||
| 102 | * is in cctl | ||
| 103 | */ | ||
| 104 | struct lli { | ||
| 105 | dma_addr_t src; | ||
| 106 | dma_addr_t dst; | ||
| 107 | dma_addr_t next; | ||
| 108 | u32 cctl; | ||
| 109 | }; | ||
| 110 | |||
| 111 | /** | ||
| 112 | * struct pl08x_driver_data - the local state holder for the PL08x | ||
| 113 | * @slave: slave engine for this instance | ||
| 114 | * @memcpy: memcpy engine for this instance | ||
| 115 | * @base: virtual memory base (remapped) for the PL08x | ||
| 116 | * @adev: the corresponding AMBA (PrimeCell) bus entry | ||
| 117 | * @vd: vendor data for this PL08x variant | ||
| 118 | * @pd: platform data passed in from the platform/machine | ||
| 119 | * @phy_chans: array of data for the physical channels | ||
| 120 | * @pool: a pool for the LLI descriptors | ||
| 121 | * @pool_ctr: counter of LLIs in the pool | ||
| 122 | * @lock: a spinlock for this struct | ||
| 123 | */ | ||
| 124 | struct pl08x_driver_data { | ||
| 125 | struct dma_device slave; | ||
| 126 | struct dma_device memcpy; | ||
| 127 | void __iomem *base; | ||
| 128 | struct amba_device *adev; | ||
| 129 | struct vendor_data *vd; | ||
| 130 | struct pl08x_platform_data *pd; | ||
| 131 | struct pl08x_phy_chan *phy_chans; | ||
| 132 | struct dma_pool *pool; | ||
| 133 | int pool_ctr; | ||
| 134 | spinlock_t lock; | ||
| 135 | }; | ||
| 136 | |||
| 137 | /* | ||
| 138 | * PL08X specific defines | ||
| 139 | */ | ||
| 140 | |||
| 141 | /* | ||
| 142 | * Memory boundaries: the manual for PL08x says that the controller | ||
| 143 | * cannot read past a 1KiB boundary, so these defines are used to | ||
| 144 | * create transfer LLIs that do not cross such boundaries. | ||
| 145 | */ | ||
| 146 | #define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */ | ||
| 147 | #define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT) | ||
| 148 | |||
| 149 | /* Minimum period between work queue runs */ | ||
| 150 | #define PL08X_WQ_PERIODMIN 20 | ||
| 151 | |||
| 152 | /* Size (bytes) of each LLI buffer allocated for one transfer */ | ||
| 153 | # define PL08X_LLI_TSFR_SIZE 0x2000 | ||
| 154 | |||
| 155 | /* Maximimum times we call dma_pool_alloc on this pool without freeing */ | ||
| 156 | #define PL08X_MAX_ALLOCS 0x40 | ||
| 157 | #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct lli)) | ||
| 158 | #define PL08X_ALIGN 8 | ||
| 159 | |||
| 160 | static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) | ||
| 161 | { | ||
| 162 | return container_of(chan, struct pl08x_dma_chan, chan); | ||
| 163 | } | ||
| 164 | |||
| 165 | /* | ||
| 166 | * Physical channel handling | ||
| 167 | */ | ||
| 168 | |||
| 169 | /* Whether a certain channel is busy or not */ | ||
| 170 | static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) | ||
| 171 | { | ||
| 172 | unsigned int val; | ||
| 173 | |||
| 174 | val = readl(ch->base + PL080_CH_CONFIG); | ||
| 175 | return val & PL080_CONFIG_ACTIVE; | ||
| 176 | } | ||
| 177 | |||
| 178 | /* | ||
| 179 | * Set the initial DMA register values i.e. those for the first LLI | ||
| 180 | * The next lli pointer and the configuration interrupt bit have | ||
| 181 | * been set when the LLIs were constructed | ||
| 182 | */ | ||
| 183 | static void pl08x_set_cregs(struct pl08x_driver_data *pl08x, | ||
| 184 | struct pl08x_phy_chan *ch) | ||
| 185 | { | ||
| 186 | /* Wait for channel inactive */ | ||
| 187 | while (pl08x_phy_channel_busy(ch)) | ||
| 188 | ; | ||
| 189 | |||
| 190 | dev_vdbg(&pl08x->adev->dev, | ||
| 191 | "WRITE channel %d: csrc=%08x, cdst=%08x, " | ||
| 192 | "cctl=%08x, clli=%08x, ccfg=%08x\n", | ||
| 193 | ch->id, | ||
| 194 | ch->csrc, | ||
| 195 | ch->cdst, | ||
| 196 | ch->cctl, | ||
| 197 | ch->clli, | ||
| 198 | ch->ccfg); | ||
| 199 | |||
| 200 | writel(ch->csrc, ch->base + PL080_CH_SRC_ADDR); | ||
| 201 | writel(ch->cdst, ch->base + PL080_CH_DST_ADDR); | ||
| 202 | writel(ch->clli, ch->base + PL080_CH_LLI); | ||
| 203 | writel(ch->cctl, ch->base + PL080_CH_CONTROL); | ||
| 204 | writel(ch->ccfg, ch->base + PL080_CH_CONFIG); | ||
| 205 | } | ||
| 206 | |||
| 207 | static inline void pl08x_config_phychan_for_txd(struct pl08x_dma_chan *plchan) | ||
| 208 | { | ||
| 209 | struct pl08x_channel_data *cd = plchan->cd; | ||
| 210 | struct pl08x_phy_chan *phychan = plchan->phychan; | ||
| 211 | struct pl08x_txd *txd = plchan->at; | ||
| 212 | |||
| 213 | /* Copy the basic control register calculated at transfer config */ | ||
| 214 | phychan->csrc = txd->csrc; | ||
| 215 | phychan->cdst = txd->cdst; | ||
| 216 | phychan->clli = txd->clli; | ||
| 217 | phychan->cctl = txd->cctl; | ||
| 218 | |||
| 219 | /* Assign the signal to the proper control registers */ | ||
| 220 | phychan->ccfg = cd->ccfg; | ||
| 221 | phychan->ccfg &= ~PL080_CONFIG_SRC_SEL_MASK; | ||
| 222 | phychan->ccfg &= ~PL080_CONFIG_DST_SEL_MASK; | ||
| 223 | /* If it wasn't set from AMBA, ignore it */ | ||
| 224 | if (txd->direction == DMA_TO_DEVICE) | ||
| 225 | /* Select signal as destination */ | ||
| 226 | phychan->ccfg |= | ||
| 227 | (phychan->signal << PL080_CONFIG_DST_SEL_SHIFT); | ||
| 228 | else if (txd->direction == DMA_FROM_DEVICE) | ||
| 229 | /* Select signal as source */ | ||
| 230 | phychan->ccfg |= | ||
| 231 | (phychan->signal << PL080_CONFIG_SRC_SEL_SHIFT); | ||
| 232 | /* Always enable error interrupts */ | ||
| 233 | phychan->ccfg |= PL080_CONFIG_ERR_IRQ_MASK; | ||
| 234 | /* Always enable terminal interrupts */ | ||
| 235 | phychan->ccfg |= PL080_CONFIG_TC_IRQ_MASK; | ||
| 236 | } | ||
| 237 | |||
| 238 | /* | ||
| 239 | * Enable the DMA channel | ||
| 240 | * Assumes all other configuration bits have been set | ||
| 241 | * as desired before this code is called | ||
| 242 | */ | ||
| 243 | static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x, | ||
| 244 | struct pl08x_phy_chan *ch) | ||
| 245 | { | ||
| 246 | u32 val; | ||
| 247 | |||
| 248 | /* | ||
| 249 | * Do not access config register until channel shows as disabled | ||
| 250 | */ | ||
| 251 | while (readl(pl08x->base + PL080_EN_CHAN) & (1 << ch->id)) | ||
| 252 | ; | ||
| 253 | |||
| 254 | /* | ||
| 255 | * Do not access config register until channel shows as inactive | ||
| 256 | */ | ||
| 257 | val = readl(ch->base + PL080_CH_CONFIG); | ||
| 258 | while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) | ||
| 259 | val = readl(ch->base + PL080_CH_CONFIG); | ||
| 260 | |||
| 261 | writel(val | PL080_CONFIG_ENABLE, ch->base + PL080_CH_CONFIG); | ||
| 262 | } | ||
| 263 | |||
| 264 | /* | ||
| 265 | * Overall DMAC remains enabled always. | ||
| 266 | * | ||
| 267 | * Disabling individual channels could lose data. | ||
| 268 | * | ||
| 269 | * Disable the peripheral DMA after disabling the DMAC | ||
| 270 | * in order to allow the DMAC FIFO to drain, and | ||
| 271 | * hence allow the channel to show inactive | ||
| 272 | * | ||
| 273 | */ | ||
| 274 | static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) | ||
| 275 | { | ||
| 276 | u32 val; | ||
| 277 | |||
| 278 | /* Set the HALT bit and wait for the FIFO to drain */ | ||
| 279 | val = readl(ch->base + PL080_CH_CONFIG); | ||
| 280 | val |= PL080_CONFIG_HALT; | ||
| 281 | writel(val, ch->base + PL080_CH_CONFIG); | ||
| 282 | |||
| 283 | /* Wait for channel inactive */ | ||
| 284 | while (pl08x_phy_channel_busy(ch)) | ||
| 285 | ; | ||
| 286 | } | ||
| 287 | |||
| 288 | static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) | ||
| 289 | { | ||
| 290 | u32 val; | ||
| 291 | |||
| 292 | /* Clear the HALT bit */ | ||
| 293 | val = readl(ch->base + PL080_CH_CONFIG); | ||
| 294 | val &= ~PL080_CONFIG_HALT; | ||
| 295 | writel(val, ch->base + PL080_CH_CONFIG); | ||
| 296 | } | ||
| 297 | |||
| 298 | |||
| 299 | /* Stops the channel */ | ||
| 300 | static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch) | ||
| 301 | { | ||
| 302 | u32 val; | ||
| 303 | |||
| 304 | pl08x_pause_phy_chan(ch); | ||
| 305 | |||
| 306 | /* Disable channel */ | ||
| 307 | val = readl(ch->base + PL080_CH_CONFIG); | ||
| 308 | val &= ~PL080_CONFIG_ENABLE; | ||
| 309 | val &= ~PL080_CONFIG_ERR_IRQ_MASK; | ||
| 310 | val &= ~PL080_CONFIG_TC_IRQ_MASK; | ||
| 311 | writel(val, ch->base + PL080_CH_CONFIG); | ||
| 312 | } | ||
| 313 | |||
| 314 | static inline u32 get_bytes_in_cctl(u32 cctl) | ||
| 315 | { | ||
| 316 | /* The source width defines the number of bytes */ | ||
| 317 | u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK; | ||
| 318 | |||
| 319 | switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { | ||
| 320 | case PL080_WIDTH_8BIT: | ||
| 321 | break; | ||
| 322 | case PL080_WIDTH_16BIT: | ||
| 323 | bytes *= 2; | ||
| 324 | break; | ||
| 325 | case PL080_WIDTH_32BIT: | ||
| 326 | bytes *= 4; | ||
| 327 | break; | ||
| 328 | } | ||
| 329 | return bytes; | ||
| 330 | } | ||
| 331 | |||
| 332 | /* The channel should be paused when calling this */ | ||
| 333 | static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) | ||
| 334 | { | ||
| 335 | struct pl08x_phy_chan *ch; | ||
| 336 | struct pl08x_txd *txdi = NULL; | ||
| 337 | struct pl08x_txd *txd; | ||
| 338 | unsigned long flags; | ||
| 339 | u32 bytes = 0; | ||
| 340 | |||
| 341 | spin_lock_irqsave(&plchan->lock, flags); | ||
| 342 | |||
| 343 | ch = plchan->phychan; | ||
| 344 | txd = plchan->at; | ||
| 345 | |||
| 346 | /* | ||
| 347 | * Next follow the LLIs to get the number of pending bytes in the | ||
| 348 | * currently active transaction. | ||
| 349 | */ | ||
| 350 | if (ch && txd) { | ||
| 351 | struct lli *llis_va = txd->llis_va; | ||
| 352 | struct lli *llis_bus = (struct lli *) txd->llis_bus; | ||
| 353 | u32 clli = readl(ch->base + PL080_CH_LLI); | ||
| 354 | |||
| 355 | /* First get the bytes in the current active LLI */ | ||
| 356 | bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); | ||
| 357 | |||
| 358 | if (clli) { | ||
| 359 | int i = 0; | ||
| 360 | |||
| 361 | /* Forward to the LLI pointed to by clli */ | ||
| 362 | while ((clli != (u32) &(llis_bus[i])) && | ||
| 363 | (i < MAX_NUM_TSFR_LLIS)) | ||
| 364 | i++; | ||
| 365 | |||
| 366 | while (clli) { | ||
| 367 | bytes += get_bytes_in_cctl(llis_va[i].cctl); | ||
| 368 | /* | ||
| 369 | * A clli of 0x00000000 will terminate the | ||
| 370 | * LLI list | ||
| 371 | */ | ||
| 372 | clli = llis_va[i].next; | ||
| 373 | i++; | ||
| 374 | } | ||
| 375 | } | ||
| 376 | } | ||
| 377 | |||
| 378 | /* Sum up all queued transactions */ | ||
| 379 | if (!list_empty(&plchan->desc_list)) { | ||
| 380 | list_for_each_entry(txdi, &plchan->desc_list, node) { | ||
| 381 | bytes += txdi->len; | ||
| 382 | } | ||
| 383 | |||
| 384 | } | ||
| 385 | |||
| 386 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
| 387 | |||
| 388 | return bytes; | ||
| 389 | } | ||
| 390 | |||
| 391 | /* | ||
| 392 | * Allocate a physical channel for a virtual channel | ||
| 393 | */ | ||
| 394 | static struct pl08x_phy_chan * | ||
| 395 | pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, | ||
| 396 | struct pl08x_dma_chan *virt_chan) | ||
| 397 | { | ||
| 398 | struct pl08x_phy_chan *ch = NULL; | ||
| 399 | unsigned long flags; | ||
| 400 | int i; | ||
| 401 | |||
| 402 | /* | ||
| 403 | * Try to locate a physical channel to be used for | ||
| 404 | * this transfer. If all are taken return NULL and | ||
| 405 | * the requester will have to cope by using some fallback | ||
| 406 | * PIO mode or retrying later. | ||
| 407 | */ | ||
| 408 | for (i = 0; i < pl08x->vd->channels; i++) { | ||
| 409 | ch = &pl08x->phy_chans[i]; | ||
| 410 | |||
| 411 | spin_lock_irqsave(&ch->lock, flags); | ||
| 412 | |||
| 413 | if (!ch->serving) { | ||
| 414 | ch->serving = virt_chan; | ||
| 415 | ch->signal = -1; | ||
| 416 | spin_unlock_irqrestore(&ch->lock, flags); | ||
| 417 | break; | ||
| 418 | } | ||
| 419 | |||
| 420 | spin_unlock_irqrestore(&ch->lock, flags); | ||
| 421 | } | ||
| 422 | |||
| 423 | if (i == pl08x->vd->channels) { | ||
| 424 | /* No physical channel available, cope with it */ | ||
| 425 | return NULL; | ||
| 426 | } | ||
| 427 | |||
| 428 | return ch; | ||
| 429 | } | ||
| 430 | |||
| 431 | static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, | ||
| 432 | struct pl08x_phy_chan *ch) | ||
| 433 | { | ||
| 434 | unsigned long flags; | ||
| 435 | |||
| 436 | /* Stop the channel and clear its interrupts */ | ||
| 437 | pl08x_stop_phy_chan(ch); | ||
| 438 | writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR); | ||
| 439 | writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR); | ||
| 440 | |||
| 441 | /* Mark it as free */ | ||
| 442 | spin_lock_irqsave(&ch->lock, flags); | ||
| 443 | ch->serving = NULL; | ||
| 444 | spin_unlock_irqrestore(&ch->lock, flags); | ||
| 445 | } | ||
| 446 | |||
| 447 | /* | ||
| 448 | * LLI handling | ||
| 449 | */ | ||
| 450 | |||
| 451 | static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) | ||
| 452 | { | ||
| 453 | switch (coded) { | ||
| 454 | case PL080_WIDTH_8BIT: | ||
| 455 | return 1; | ||
| 456 | case PL080_WIDTH_16BIT: | ||
| 457 | return 2; | ||
| 458 | case PL080_WIDTH_32BIT: | ||
| 459 | return 4; | ||
| 460 | default: | ||
| 461 | break; | ||
| 462 | } | ||
| 463 | BUG(); | ||
| 464 | return 0; | ||
| 465 | } | ||
| 466 | |||
| 467 | static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, | ||
| 468 | u32 tsize) | ||
| 469 | { | ||
| 470 | u32 retbits = cctl; | ||
| 471 | |||
| 472 | /* Remove all src, dst and transfersize bits */ | ||
| 473 | retbits &= ~PL080_CONTROL_DWIDTH_MASK; | ||
| 474 | retbits &= ~PL080_CONTROL_SWIDTH_MASK; | ||
| 475 | retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; | ||
| 476 | |||
| 477 | /* Then set the bits according to the parameters */ | ||
| 478 | switch (srcwidth) { | ||
| 479 | case 1: | ||
| 480 | retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT; | ||
| 481 | break; | ||
| 482 | case 2: | ||
| 483 | retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT; | ||
| 484 | break; | ||
| 485 | case 4: | ||
| 486 | retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT; | ||
| 487 | break; | ||
| 488 | default: | ||
| 489 | BUG(); | ||
| 490 | break; | ||
| 491 | } | ||
| 492 | |||
| 493 | switch (dstwidth) { | ||
| 494 | case 1: | ||
| 495 | retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; | ||
| 496 | break; | ||
| 497 | case 2: | ||
| 498 | retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; | ||
| 499 | break; | ||
| 500 | case 4: | ||
| 501 | retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; | ||
| 502 | break; | ||
| 503 | default: | ||
| 504 | BUG(); | ||
| 505 | break; | ||
| 506 | } | ||
| 507 | |||
| 508 | retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; | ||
| 509 | return retbits; | ||
| 510 | } | ||
| 511 | |||
| 512 | /* | ||
| 513 | * Autoselect a master bus to use for the transfer | ||
| 514 | * this prefers the destination bus if both available | ||
| 515 | * if fixed address on one bus the other will be chosen | ||
| 516 | */ | ||
| 517 | void pl08x_choose_master_bus(struct pl08x_bus_data *src_bus, | ||
| 518 | struct pl08x_bus_data *dst_bus, struct pl08x_bus_data **mbus, | ||
| 519 | struct pl08x_bus_data **sbus, u32 cctl) | ||
| 520 | { | ||
| 521 | if (!(cctl & PL080_CONTROL_DST_INCR)) { | ||
| 522 | *mbus = src_bus; | ||
| 523 | *sbus = dst_bus; | ||
| 524 | } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { | ||
| 525 | *mbus = dst_bus; | ||
| 526 | *sbus = src_bus; | ||
| 527 | } else { | ||
| 528 | if (dst_bus->buswidth == 4) { | ||
| 529 | *mbus = dst_bus; | ||
| 530 | *sbus = src_bus; | ||
| 531 | } else if (src_bus->buswidth == 4) { | ||
| 532 | *mbus = src_bus; | ||
| 533 | *sbus = dst_bus; | ||
| 534 | } else if (dst_bus->buswidth == 2) { | ||
| 535 | *mbus = dst_bus; | ||
| 536 | *sbus = src_bus; | ||
| 537 | } else if (src_bus->buswidth == 2) { | ||
| 538 | *mbus = src_bus; | ||
| 539 | *sbus = dst_bus; | ||
| 540 | } else { | ||
| 541 | /* src_bus->buswidth == 1 */ | ||
| 542 | *mbus = dst_bus; | ||
| 543 | *sbus = src_bus; | ||
| 544 | } | ||
| 545 | } | ||
| 546 | } | ||
| 547 | |||
| 548 | /* | ||
| 549 | * Fills in one LLI for a certain transfer descriptor | ||
| 550 | * and advance the counter | ||
| 551 | */ | ||
| 552 | int pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x, | ||
| 553 | struct pl08x_txd *txd, int num_llis, int len, | ||
| 554 | u32 cctl, u32 *remainder) | ||
| 555 | { | ||
| 556 | struct lli *llis_va = txd->llis_va; | ||
| 557 | struct lli *llis_bus = (struct lli *) txd->llis_bus; | ||
| 558 | |||
| 559 | BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); | ||
| 560 | |||
| 561 | llis_va[num_llis].cctl = cctl; | ||
| 562 | llis_va[num_llis].src = txd->srcbus.addr; | ||
| 563 | llis_va[num_llis].dst = txd->dstbus.addr; | ||
| 564 | |||
| 565 | /* | ||
| 566 | * On versions with dual masters, you can optionally AND on | ||
| 567 | * PL080_LLI_LM_AHB2 to the LLI to tell the hardware to read | ||
| 568 | * in new LLIs with that controller, but we always try to | ||
| 569 | * choose AHB1 to point into memory. The idea is to have AHB2 | ||
| 570 | * fixed on the peripheral and AHB1 messing around in the | ||
| 571 | * memory. So we don't manipulate this bit currently. | ||
| 572 | */ | ||
| 573 | |||
| 574 | llis_va[num_llis].next = | ||
| 575 | (dma_addr_t)((u32) &(llis_bus[num_llis + 1])); | ||
| 576 | |||
| 577 | if (cctl & PL080_CONTROL_SRC_INCR) | ||
| 578 | txd->srcbus.addr += len; | ||
| 579 | if (cctl & PL080_CONTROL_DST_INCR) | ||
| 580 | txd->dstbus.addr += len; | ||
| 581 | |||
| 582 | *remainder -= len; | ||
| 583 | |||
| 584 | return num_llis + 1; | ||
| 585 | } | ||
| 586 | |||
| 587 | /* | ||
| 588 | * Return number of bytes to fill to boundary, or len | ||
| 589 | */ | ||
| 590 | static inline u32 pl08x_pre_boundary(u32 addr, u32 len) | ||
| 591 | { | ||
| 592 | u32 boundary; | ||
| 593 | |||
| 594 | boundary = ((addr >> PL08X_BOUNDARY_SHIFT) + 1) | ||
| 595 | << PL08X_BOUNDARY_SHIFT; | ||
| 596 | |||
| 597 | if (boundary < addr + len) | ||
| 598 | return boundary - addr; | ||
| 599 | else | ||
| 600 | return len; | ||
| 601 | } | ||
| 602 | |||
| 603 | /* | ||
| 604 | * This fills in the table of LLIs for the transfer descriptor | ||
| 605 | * Note that we assume we never have to change the burst sizes | ||
| 606 | * Return 0 for error | ||
| 607 | */ | ||
| 608 | static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, | ||
| 609 | struct pl08x_txd *txd) | ||
| 610 | { | ||
| 611 | struct pl08x_channel_data *cd = txd->cd; | ||
| 612 | struct pl08x_bus_data *mbus, *sbus; | ||
| 613 | u32 remainder; | ||
| 614 | int num_llis = 0; | ||
| 615 | u32 cctl; | ||
| 616 | int max_bytes_per_lli; | ||
| 617 | int total_bytes = 0; | ||
| 618 | struct lli *llis_va; | ||
| 619 | struct lli *llis_bus; | ||
| 620 | |||
| 621 | if (!txd) { | ||
| 622 | dev_err(&pl08x->adev->dev, "%s no descriptor\n", __func__); | ||
| 623 | return 0; | ||
| 624 | } | ||
| 625 | |||
| 626 | txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, | ||
| 627 | &txd->llis_bus); | ||
| 628 | if (!txd->llis_va) { | ||
| 629 | dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); | ||
| 630 | return 0; | ||
| 631 | } | ||
| 632 | |||
| 633 | pl08x->pool_ctr++; | ||
| 634 | |||
| 635 | /* | ||
| 636 | * Initialize bus values for this transfer | ||
| 637 | * from the passed optimal values | ||
| 638 | */ | ||
| 639 | if (!cd) { | ||
| 640 | dev_err(&pl08x->adev->dev, "%s no channel data\n", __func__); | ||
| 641 | return 0; | ||
| 642 | } | ||
| 643 | |||
| 644 | /* Get the default CCTL from the platform data */ | ||
| 645 | cctl = cd->cctl; | ||
| 646 | |||
| 647 | /* | ||
| 648 | * On the PL080 we have two bus masters and we | ||
| 649 | * should select one for source and one for | ||
| 650 | * destination. We try to use AHB2 for the | ||
| 651 | * bus which does not increment (typically the | ||
| 652 | * peripheral) else we just choose something. | ||
| 653 | */ | ||
| 654 | cctl &= ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); | ||
| 655 | if (pl08x->vd->dualmaster) { | ||
| 656 | if (cctl & PL080_CONTROL_SRC_INCR) | ||
| 657 | /* Source increments, use AHB2 for destination */ | ||
| 658 | cctl |= PL080_CONTROL_DST_AHB2; | ||
| 659 | else if (cctl & PL080_CONTROL_DST_INCR) | ||
| 660 | /* Destination increments, use AHB2 for source */ | ||
| 661 | cctl |= PL080_CONTROL_SRC_AHB2; | ||
| 662 | else | ||
| 663 | /* Just pick something, source AHB1 dest AHB2 */ | ||
| 664 | cctl |= PL080_CONTROL_DST_AHB2; | ||
| 665 | } | ||
| 666 | |||
| 667 | /* Find maximum width of the source bus */ | ||
| 668 | txd->srcbus.maxwidth = | ||
| 669 | pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> | ||
| 670 | PL080_CONTROL_SWIDTH_SHIFT); | ||
| 671 | |||
| 672 | /* Find maximum width of the destination bus */ | ||
| 673 | txd->dstbus.maxwidth = | ||
| 674 | pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> | ||
| 675 | PL080_CONTROL_DWIDTH_SHIFT); | ||
| 676 | |||
| 677 | /* Set up the bus widths to the maximum */ | ||
| 678 | txd->srcbus.buswidth = txd->srcbus.maxwidth; | ||
| 679 | txd->dstbus.buswidth = txd->dstbus.maxwidth; | ||
| 680 | dev_vdbg(&pl08x->adev->dev, | ||
| 681 | "%s source bus is %d bytes wide, dest bus is %d bytes wide\n", | ||
| 682 | __func__, txd->srcbus.buswidth, txd->dstbus.buswidth); | ||
| 683 | |||
| 684 | |||
| 685 | /* | ||
| 686 | * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) | ||
| 687 | */ | ||
| 688 | max_bytes_per_lli = min(txd->srcbus.buswidth, txd->dstbus.buswidth) * | ||
| 689 | PL080_CONTROL_TRANSFER_SIZE_MASK; | ||
| 690 | dev_vdbg(&pl08x->adev->dev, | ||
| 691 | "%s max bytes per lli = %d\n", | ||
| 692 | __func__, max_bytes_per_lli); | ||
| 693 | |||
| 694 | /* We need to count this down to zero */ | ||
| 695 | remainder = txd->len; | ||
| 696 | dev_vdbg(&pl08x->adev->dev, | ||
| 697 | "%s remainder = %d\n", | ||
| 698 | __func__, remainder); | ||
| 699 | |||
| 700 | /* | ||
| 701 | * Choose bus to align to | ||
| 702 | * - prefers destination bus if both available | ||
| 703 | * - if fixed address on one bus chooses other | ||
| 704 | * - modifies cctl to choose an apropriate master | ||
| 705 | */ | ||
| 706 | pl08x_choose_master_bus(&txd->srcbus, &txd->dstbus, | ||
| 707 | &mbus, &sbus, cctl); | ||
| 708 | |||
| 709 | |||
| 710 | /* | ||
| 711 | * The lowest bit of the LLI register | ||
| 712 | * is also used to indicate which master to | ||
| 713 | * use for reading the LLIs. | ||
| 714 | */ | ||
| 715 | |||
| 716 | if (txd->len < mbus->buswidth) { | ||
| 717 | /* | ||
| 718 | * Less than a bus width available | ||
| 719 | * - send as single bytes | ||
| 720 | */ | ||
| 721 | while (remainder) { | ||
| 722 | dev_vdbg(&pl08x->adev->dev, | ||
| 723 | "%s single byte LLIs for a transfer of " | ||
| 724 | "less than a bus width (remain %08x)\n", | ||
| 725 | __func__, remainder); | ||
| 726 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | ||
| 727 | num_llis = | ||
| 728 | pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 1, | ||
| 729 | cctl, &remainder); | ||
| 730 | total_bytes++; | ||
| 731 | } | ||
| 732 | } else { | ||
| 733 | /* | ||
| 734 | * Make one byte LLIs until master bus is aligned | ||
| 735 | * - slave will then be aligned also | ||
| 736 | */ | ||
| 737 | while ((mbus->addr) % (mbus->buswidth)) { | ||
| 738 | dev_vdbg(&pl08x->adev->dev, | ||
| 739 | "%s adjustment lli for less than bus width " | ||
| 740 | "(remain %08x)\n", | ||
| 741 | __func__, remainder); | ||
| 742 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | ||
| 743 | num_llis = pl08x_fill_lli_for_desc | ||
| 744 | (pl08x, txd, num_llis, 1, cctl, &remainder); | ||
| 745 | total_bytes++; | ||
| 746 | } | ||
| 747 | |||
| 748 | /* | ||
| 749 | * Master now aligned | ||
| 750 | * - if slave is not then we must set its width down | ||
| 751 | */ | ||
| 752 | if (sbus->addr % sbus->buswidth) { | ||
| 753 | dev_dbg(&pl08x->adev->dev, | ||
| 754 | "%s set down bus width to one byte\n", | ||
| 755 | __func__); | ||
| 756 | |||
| 757 | sbus->buswidth = 1; | ||
| 758 | } | ||
| 759 | |||
| 760 | /* | ||
| 761 | * Make largest possible LLIs until less than one bus | ||
| 762 | * width left | ||
| 763 | */ | ||
| 764 | while (remainder > (mbus->buswidth - 1)) { | ||
| 765 | int lli_len, target_len; | ||
| 766 | int tsize; | ||
| 767 | int odd_bytes; | ||
| 768 | |||
| 769 | /* | ||
| 770 | * If enough left try to send max possible, | ||
| 771 | * otherwise try to send the remainder | ||
| 772 | */ | ||
| 773 | target_len = remainder; | ||
| 774 | if (remainder > max_bytes_per_lli) | ||
| 775 | target_len = max_bytes_per_lli; | ||
| 776 | |||
| 777 | /* | ||
| 778 | * Set bus lengths for incrementing busses | ||
| 779 | * to number of bytes which fill to next memory | ||
| 780 | * boundary | ||
| 781 | */ | ||
| 782 | if (cctl & PL080_CONTROL_SRC_INCR) | ||
| 783 | txd->srcbus.fill_bytes = | ||
| 784 | pl08x_pre_boundary( | ||
| 785 | txd->srcbus.addr, | ||
| 786 | remainder); | ||
| 787 | else | ||
| 788 | txd->srcbus.fill_bytes = | ||
| 789 | max_bytes_per_lli; | ||
| 790 | |||
| 791 | if (cctl & PL080_CONTROL_DST_INCR) | ||
| 792 | txd->dstbus.fill_bytes = | ||
| 793 | pl08x_pre_boundary( | ||
| 794 | txd->dstbus.addr, | ||
| 795 | remainder); | ||
| 796 | else | ||
| 797 | txd->dstbus.fill_bytes = | ||
| 798 | max_bytes_per_lli; | ||
| 799 | |||
| 800 | /* | ||
| 801 | * Find the nearest | ||
| 802 | */ | ||
| 803 | lli_len = min(txd->srcbus.fill_bytes, | ||
| 804 | txd->dstbus.fill_bytes); | ||
| 805 | |||
| 806 | BUG_ON(lli_len > remainder); | ||
| 807 | |||
| 808 | if (lli_len <= 0) { | ||
| 809 | dev_err(&pl08x->adev->dev, | ||
| 810 | "%s lli_len is %d, <= 0\n", | ||
| 811 | __func__, lli_len); | ||
| 812 | return 0; | ||
| 813 | } | ||
| 814 | |||
| 815 | if (lli_len == target_len) { | ||
| 816 | /* | ||
| 817 | * Can send what we wanted | ||
| 818 | */ | ||
| 819 | /* | ||
| 820 | * Maintain alignment | ||
| 821 | */ | ||
| 822 | lli_len = (lli_len/mbus->buswidth) * | ||
| 823 | mbus->buswidth; | ||
| 824 | odd_bytes = 0; | ||
| 825 | } else { | ||
| 826 | /* | ||
| 827 | * So now we know how many bytes to transfer | ||
| 828 | * to get to the nearest boundary | ||
| 829 | * The next lli will past the boundary | ||
| 830 | * - however we may be working to a boundary | ||
| 831 | * on the slave bus | ||
| 832 | * We need to ensure the master stays aligned | ||
| 833 | */ | ||
| 834 | odd_bytes = lli_len % mbus->buswidth; | ||
| 835 | /* | ||
| 836 | * - and that we are working in multiples | ||
| 837 | * of the bus widths | ||
| 838 | */ | ||
| 839 | lli_len -= odd_bytes; | ||
| 840 | |||
| 841 | } | ||
| 842 | |||
| 843 | if (lli_len) { | ||
| 844 | /* | ||
| 845 | * Check against minimum bus alignment: | ||
| 846 | * Calculate actual transfer size in relation | ||
| 847 | * to bus width an get a maximum remainder of | ||
| 848 | * the smallest bus width - 1 | ||
| 849 | */ | ||
| 850 | /* FIXME: use round_down()? */ | ||
| 851 | tsize = lli_len / min(mbus->buswidth, | ||
| 852 | sbus->buswidth); | ||
| 853 | lli_len = tsize * min(mbus->buswidth, | ||
| 854 | sbus->buswidth); | ||
| 855 | |||
| 856 | if (target_len != lli_len) { | ||
| 857 | dev_vdbg(&pl08x->adev->dev, | ||
| 858 | "%s can't send what we want. Desired %08x, lli of %08x bytes in txd of %08x\n", | ||
| 859 | __func__, target_len, lli_len, txd->len); | ||
| 860 | } | ||
| 861 | |||
| 862 | cctl = pl08x_cctl_bits(cctl, | ||
| 863 | txd->srcbus.buswidth, | ||
| 864 | txd->dstbus.buswidth, | ||
| 865 | tsize); | ||
| 866 | |||
| 867 | dev_vdbg(&pl08x->adev->dev, | ||
| 868 | "%s fill lli with single lli chunk of size %08x (remainder %08x)\n", | ||
| 869 | __func__, lli_len, remainder); | ||
| 870 | num_llis = pl08x_fill_lli_for_desc(pl08x, txd, | ||
| 871 | num_llis, lli_len, cctl, | ||
| 872 | &remainder); | ||
| 873 | total_bytes += lli_len; | ||
| 874 | } | ||
| 875 | |||
| 876 | |||
| 877 | if (odd_bytes) { | ||
| 878 | /* | ||
| 879 | * Creep past the boundary, | ||
| 880 | * maintaining master alignment | ||
| 881 | */ | ||
| 882 | int j; | ||
| 883 | for (j = 0; (j < mbus->buswidth) | ||
| 884 | && (remainder); j++) { | ||
| 885 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | ||
| 886 | dev_vdbg(&pl08x->adev->dev, | ||
| 887 | "%s align with boundardy, single byte (remain %08x)\n", | ||
| 888 | __func__, remainder); | ||
| 889 | num_llis = | ||
| 890 | pl08x_fill_lli_for_desc(pl08x, | ||
| 891 | txd, num_llis, 1, | ||
| 892 | cctl, &remainder); | ||
| 893 | total_bytes++; | ||
| 894 | } | ||
| 895 | } | ||
| 896 | } | ||
| 897 | |||
| 898 | /* | ||
| 899 | * Send any odd bytes | ||
| 900 | */ | ||
| 901 | if (remainder < 0) { | ||
| 902 | dev_err(&pl08x->adev->dev, "%s remainder not fitted 0x%08x bytes\n", | ||
| 903 | __func__, remainder); | ||
| 904 | return 0; | ||
| 905 | } | ||
| 906 | |||
| 907 | while (remainder) { | ||
| 908 | cctl = pl08x_cctl_bits(cctl, 1, 1, 1); | ||
| 909 | dev_vdbg(&pl08x->adev->dev, | ||
| 910 | "%s align with boundardy, single odd byte (remain %d)\n", | ||
| 911 | __func__, remainder); | ||
| 912 | num_llis = pl08x_fill_lli_for_desc(pl08x, txd, num_llis, | ||
| 913 | 1, cctl, &remainder); | ||
| 914 | total_bytes++; | ||
| 915 | } | ||
| 916 | } | ||
| 917 | if (total_bytes != txd->len) { | ||
| 918 | dev_err(&pl08x->adev->dev, | ||
| 919 | "%s size of encoded lli:s don't match total txd, transferred 0x%08x from size 0x%08x\n", | ||
| 920 | __func__, total_bytes, txd->len); | ||
| 921 | return 0; | ||
| 922 | } | ||
| 923 | |||
| 924 | if (num_llis >= MAX_NUM_TSFR_LLIS) { | ||
| 925 | dev_err(&pl08x->adev->dev, | ||
| 926 | "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", | ||
| 927 | __func__, (u32) MAX_NUM_TSFR_LLIS); | ||
| 928 | return 0; | ||
| 929 | } | ||
| 930 | /* | ||
| 931 | * Decide whether this is a loop or a terminated transfer | ||
| 932 | */ | ||
| 933 | llis_va = txd->llis_va; | ||
| 934 | llis_bus = (struct lli *) txd->llis_bus; | ||
| 935 | |||
| 936 | if (cd->circular_buffer) { | ||
| 937 | /* | ||
| 938 | * Loop the circular buffer so that the next element | ||
| 939 | * points back to the beginning of the LLI. | ||
| 940 | */ | ||
| 941 | llis_va[num_llis - 1].next = | ||
| 942 | (dma_addr_t)((unsigned int)&(llis_bus[0])); | ||
| 943 | } else { | ||
| 944 | /* | ||
| 945 | * On non-circular buffers, the final LLI terminates | ||
| 946 | * the LLI. | ||
| 947 | */ | ||
| 948 | llis_va[num_llis - 1].next = 0; | ||
| 949 | /* | ||
| 950 | * The final LLI element shall also fire an interrupt | ||
| 951 | */ | ||
| 952 | llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN; | ||
| 953 | } | ||
| 954 | |||
| 955 | /* Now store the channel register values */ | ||
| 956 | txd->csrc = llis_va[0].src; | ||
| 957 | txd->cdst = llis_va[0].dst; | ||
| 958 | if (num_llis > 1) | ||
| 959 | txd->clli = llis_va[0].next; | ||
| 960 | else | ||
| 961 | txd->clli = 0; | ||
| 962 | |||
| 963 | txd->cctl = llis_va[0].cctl; | ||
| 964 | /* ccfg will be set at physical channel allocation time */ | ||
| 965 | |||
| 966 | #ifdef VERBOSE_DEBUG | ||
| 967 | { | ||
| 968 | int i; | ||
| 969 | |||
| 970 | for (i = 0; i < num_llis; i++) { | ||
| 971 | dev_vdbg(&pl08x->adev->dev, | ||
| 972 | "lli %d @%p: csrc=%08x, cdst=%08x, cctl=%08x, clli=%08x\n", | ||
| 973 | i, | ||
| 974 | &llis_va[i], | ||
| 975 | llis_va[i].src, | ||
| 976 | llis_va[i].dst, | ||
| 977 | llis_va[i].cctl, | ||
| 978 | llis_va[i].next | ||
| 979 | ); | ||
| 980 | } | ||
| 981 | } | ||
| 982 | #endif | ||
| 983 | |||
| 984 | return num_llis; | ||
| 985 | } | ||
| 986 | |||
| 987 | /* You should call this with the struct pl08x lock held */ | ||
| 988 | static void pl08x_free_txd(struct pl08x_driver_data *pl08x, | ||
| 989 | struct pl08x_txd *txd) | ||
| 990 | { | ||
| 991 | if (!txd) | ||
| 992 | dev_err(&pl08x->adev->dev, | ||
| 993 | "%s no descriptor to free\n", | ||
| 994 | __func__); | ||
| 995 | |||
| 996 | /* Free the LLI */ | ||
| 997 | dma_pool_free(pl08x->pool, txd->llis_va, | ||
| 998 | txd->llis_bus); | ||
| 999 | |||
| 1000 | pl08x->pool_ctr--; | ||
| 1001 | |||
| 1002 | kfree(txd); | ||
| 1003 | } | ||
| 1004 | |||
| 1005 | static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, | ||
| 1006 | struct pl08x_dma_chan *plchan) | ||
| 1007 | { | ||
| 1008 | struct pl08x_txd *txdi = NULL; | ||
| 1009 | struct pl08x_txd *next; | ||
| 1010 | |||
| 1011 | if (!list_empty(&plchan->desc_list)) { | ||
| 1012 | list_for_each_entry_safe(txdi, | ||
| 1013 | next, &plchan->desc_list, node) { | ||
| 1014 | list_del(&txdi->node); | ||
| 1015 | pl08x_free_txd(pl08x, txdi); | ||
| 1016 | } | ||
| 1017 | |||
| 1018 | } | ||
| 1019 | } | ||
| 1020 | |||
| 1021 | /* | ||
| 1022 | * The DMA ENGINE API | ||
| 1023 | */ | ||
| 1024 | static int pl08x_alloc_chan_resources(struct dma_chan *chan) | ||
| 1025 | { | ||
| 1026 | return 0; | ||
| 1027 | } | ||
| 1028 | |||
| 1029 | static void pl08x_free_chan_resources(struct dma_chan *chan) | ||
| 1030 | { | ||
| 1031 | } | ||
| 1032 | |||
| 1033 | /* | ||
| 1034 | * This should be called with the channel plchan->lock held | ||
| 1035 | */ | ||
| 1036 | static int prep_phy_channel(struct pl08x_dma_chan *plchan, | ||
| 1037 | struct pl08x_txd *txd) | ||
| 1038 | { | ||
| 1039 | struct pl08x_driver_data *pl08x = plchan->host; | ||
| 1040 | struct pl08x_phy_chan *ch; | ||
| 1041 | int ret; | ||
| 1042 | |||
| 1043 | /* Check if we already have a channel */ | ||
| 1044 | if (plchan->phychan) | ||
| 1045 | return 0; | ||
| 1046 | |||
| 1047 | ch = pl08x_get_phy_channel(pl08x, plchan); | ||
| 1048 | if (!ch) { | ||
| 1049 | /* No physical channel available, cope with it */ | ||
| 1050 | dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); | ||
| 1051 | return -EBUSY; | ||
| 1052 | } | ||
| 1053 | |||
| 1054 | /* | ||
| 1055 | * OK we have a physical channel: for memcpy() this is all we | ||
| 1056 | * need, but for slaves the physical signals may be muxed! | ||
| 1057 | * Can the platform allow us to use this channel? | ||
| 1058 | */ | ||
| 1059 | if (plchan->slave && | ||
| 1060 | ch->signal < 0 && | ||
| 1061 | pl08x->pd->get_signal) { | ||
| 1062 | ret = pl08x->pd->get_signal(plchan); | ||
| 1063 | if (ret < 0) { | ||
| 1064 | dev_dbg(&pl08x->adev->dev, | ||
| 1065 | "unable to use physical channel %d for transfer on %s due to platform restrictions\n", | ||
| 1066 | ch->id, plchan->name); | ||
| 1067 | /* Release physical channel & return */ | ||
| 1068 | pl08x_put_phy_channel(pl08x, ch); | ||
| 1069 | return -EBUSY; | ||
| 1070 | } | ||
| 1071 | ch->signal = ret; | ||
| 1072 | } | ||
| 1073 | |||
| 1074 | dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", | ||
| 1075 | ch->id, | ||
| 1076 | ch->signal, | ||
| 1077 | plchan->name); | ||
| 1078 | |||
| 1079 | plchan->phychan = ch; | ||
| 1080 | |||
| 1081 | return 0; | ||
| 1082 | } | ||
| 1083 | |||
| 1084 | static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) | ||
| 1085 | { | ||
| 1086 | struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); | ||
| 1087 | |||
| 1088 | atomic_inc(&plchan->last_issued); | ||
| 1089 | tx->cookie = atomic_read(&plchan->last_issued); | ||
| 1090 | /* This unlock follows the lock in the prep() function */ | ||
| 1091 | spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); | ||
| 1092 | |||
| 1093 | return tx->cookie; | ||
| 1094 | } | ||
| 1095 | |||
| 1096 | static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( | ||
| 1097 | struct dma_chan *chan, unsigned long flags) | ||
| 1098 | { | ||
| 1099 | struct dma_async_tx_descriptor *retval = NULL; | ||
| 1100 | |||
| 1101 | return retval; | ||
| 1102 | } | ||
| 1103 | |||
| 1104 | /* | ||
| 1105 | * Code accessing dma_async_is_complete() in a tight loop | ||
| 1106 | * may give problems - could schedule where indicated. | ||
| 1107 | * If slaves are relying on interrupts to signal completion this | ||
| 1108 | * function must not be called with interrupts disabled | ||
| 1109 | */ | ||
| 1110 | static enum dma_status | ||
| 1111 | pl08x_dma_tx_status(struct dma_chan *chan, | ||
| 1112 | dma_cookie_t cookie, | ||
| 1113 | struct dma_tx_state *txstate) | ||
| 1114 | { | ||
| 1115 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | ||
| 1116 | dma_cookie_t last_used; | ||
| 1117 | dma_cookie_t last_complete; | ||
| 1118 | enum dma_status ret; | ||
| 1119 | u32 bytesleft = 0; | ||
| 1120 | |||
| 1121 | last_used = atomic_read(&plchan->last_issued); | ||
| 1122 | last_complete = plchan->lc; | ||
| 1123 | |||
| 1124 | ret = dma_async_is_complete(cookie, last_complete, last_used); | ||
| 1125 | if (ret == DMA_SUCCESS) { | ||
| 1126 | dma_set_tx_state(txstate, last_complete, last_used, 0); | ||
| 1127 | return ret; | ||
| 1128 | } | ||
| 1129 | |||
| 1130 | /* | ||
| 1131 | * schedule(); could be inserted here | ||
| 1132 | */ | ||
| 1133 | |||
| 1134 | /* | ||
| 1135 | * This cookie not complete yet | ||
| 1136 | */ | ||
| 1137 | last_used = atomic_read(&plchan->last_issued); | ||
| 1138 | last_complete = plchan->lc; | ||
| 1139 | |||
| 1140 | /* Get number of bytes left in the active transactions and queue */ | ||
| 1141 | bytesleft = pl08x_getbytes_chan(plchan); | ||
| 1142 | |||
| 1143 | dma_set_tx_state(txstate, last_complete, last_used, | ||
| 1144 | bytesleft); | ||
| 1145 | |||
| 1146 | if (plchan->state == PL08X_CHAN_PAUSED) | ||
| 1147 | return DMA_PAUSED; | ||
| 1148 | |||
| 1149 | /* Whether waiting or running, we're in progress */ | ||
| 1150 | return DMA_IN_PROGRESS; | ||
| 1151 | } | ||
| 1152 | |||
| 1153 | /* PrimeCell DMA extension */ | ||
| 1154 | struct burst_table { | ||
| 1155 | int burstwords; | ||
| 1156 | u32 reg; | ||
| 1157 | }; | ||
| 1158 | |||
| 1159 | static const struct burst_table burst_sizes[] = { | ||
| 1160 | { | ||
| 1161 | .burstwords = 256, | ||
| 1162 | .reg = (PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT) | | ||
| 1163 | (PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
| 1164 | }, | ||
| 1165 | { | ||
| 1166 | .burstwords = 128, | ||
| 1167 | .reg = (PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT) | | ||
| 1168 | (PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
| 1169 | }, | ||
| 1170 | { | ||
| 1171 | .burstwords = 64, | ||
| 1172 | .reg = (PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT) | | ||
| 1173 | (PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
| 1174 | }, | ||
| 1175 | { | ||
| 1176 | .burstwords = 32, | ||
| 1177 | .reg = (PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT) | | ||
| 1178 | (PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
| 1179 | }, | ||
| 1180 | { | ||
| 1181 | .burstwords = 16, | ||
| 1182 | .reg = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT) | | ||
| 1183 | (PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
| 1184 | }, | ||
| 1185 | { | ||
| 1186 | .burstwords = 8, | ||
| 1187 | .reg = (PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT) | | ||
| 1188 | (PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
| 1189 | }, | ||
| 1190 | { | ||
| 1191 | .burstwords = 4, | ||
| 1192 | .reg = (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT) | | ||
| 1193 | (PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
| 1194 | }, | ||
| 1195 | { | ||
| 1196 | .burstwords = 1, | ||
| 1197 | .reg = (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | | ||
| 1198 | (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT), | ||
| 1199 | }, | ||
| 1200 | }; | ||
| 1201 | |||
| 1202 | static void dma_set_runtime_config(struct dma_chan *chan, | ||
| 1203 | struct dma_slave_config *config) | ||
| 1204 | { | ||
| 1205 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | ||
| 1206 | struct pl08x_driver_data *pl08x = plchan->host; | ||
| 1207 | struct pl08x_channel_data *cd = plchan->cd; | ||
| 1208 | enum dma_slave_buswidth addr_width; | ||
| 1209 | u32 maxburst; | ||
| 1210 | u32 cctl = 0; | ||
| 1211 | /* Mask out all except src and dst channel */ | ||
| 1212 | u32 ccfg = cd->ccfg & 0x000003DEU; | ||
| 1213 | int i = 0; | ||
| 1214 | |||
| 1215 | /* Transfer direction */ | ||
| 1216 | plchan->runtime_direction = config->direction; | ||
| 1217 | if (config->direction == DMA_TO_DEVICE) { | ||
| 1218 | plchan->runtime_addr = config->dst_addr; | ||
| 1219 | cctl |= PL080_CONTROL_SRC_INCR; | ||
| 1220 | ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; | ||
| 1221 | addr_width = config->dst_addr_width; | ||
| 1222 | maxburst = config->dst_maxburst; | ||
| 1223 | } else if (config->direction == DMA_FROM_DEVICE) { | ||
| 1224 | plchan->runtime_addr = config->src_addr; | ||
| 1225 | cctl |= PL080_CONTROL_DST_INCR; | ||
| 1226 | ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; | ||
| 1227 | addr_width = config->src_addr_width; | ||
| 1228 | maxburst = config->src_maxburst; | ||
| 1229 | } else { | ||
| 1230 | dev_err(&pl08x->adev->dev, | ||
| 1231 | "bad runtime_config: alien transfer direction\n"); | ||
| 1232 | return; | ||
| 1233 | } | ||
| 1234 | |||
| 1235 | switch (addr_width) { | ||
| 1236 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
| 1237 | cctl |= (PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT) | | ||
| 1238 | (PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT); | ||
| 1239 | break; | ||
| 1240 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
| 1241 | cctl |= (PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT) | | ||
| 1242 | (PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT); | ||
| 1243 | break; | ||
| 1244 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
| 1245 | cctl |= (PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT) | | ||
| 1246 | (PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT); | ||
| 1247 | break; | ||
| 1248 | default: | ||
| 1249 | dev_err(&pl08x->adev->dev, | ||
| 1250 | "bad runtime_config: alien address width\n"); | ||
| 1251 | return; | ||
| 1252 | } | ||
| 1253 | |||
| 1254 | /* | ||
| 1255 | * Now decide on a maxburst: | ||
| 1256 | * If this channel will only request single transfers, set | ||
| 1257 | * this down to ONE element. | ||
| 1258 | */ | ||
| 1259 | if (plchan->cd->single) { | ||
| 1260 | cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | | ||
| 1261 | (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT); | ||
| 1262 | } else { | ||
| 1263 | while (i < ARRAY_SIZE(burst_sizes)) { | ||
| 1264 | if (burst_sizes[i].burstwords <= maxburst) | ||
| 1265 | break; | ||
| 1266 | i++; | ||
| 1267 | } | ||
| 1268 | cctl |= burst_sizes[i].reg; | ||
| 1269 | } | ||
| 1270 | |||
| 1271 | /* Access the cell in privileged mode, non-bufferable, non-cacheable */ | ||
| 1272 | cctl &= ~PL080_CONTROL_PROT_MASK; | ||
| 1273 | cctl |= PL080_CONTROL_PROT_SYS; | ||
| 1274 | |||
| 1275 | /* Modify the default channel data to fit PrimeCell request */ | ||
| 1276 | cd->cctl = cctl; | ||
| 1277 | cd->ccfg = ccfg; | ||
| 1278 | |||
| 1279 | dev_dbg(&pl08x->adev->dev, | ||
| 1280 | "configured channel %s (%s) for %s, data width %d, " | ||
| 1281 | "maxburst %d words, LE, CCTL=%08x, CCFG=%08x\n", | ||
| 1282 | dma_chan_name(chan), plchan->name, | ||
| 1283 | (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", | ||
| 1284 | addr_width, | ||
| 1285 | maxburst, | ||
| 1286 | cctl, ccfg); | ||
| 1287 | } | ||
| 1288 | |||
| 1289 | /* | ||
| 1290 | * Slave transactions callback to the slave device to allow | ||
| 1291 | * synchronization of slave DMA signals with the DMAC enable | ||
| 1292 | */ | ||
| 1293 | static void pl08x_issue_pending(struct dma_chan *chan) | ||
| 1294 | { | ||
| 1295 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | ||
| 1296 | struct pl08x_driver_data *pl08x = plchan->host; | ||
| 1297 | unsigned long flags; | ||
| 1298 | |||
| 1299 | spin_lock_irqsave(&plchan->lock, flags); | ||
| 1300 | /* Something is already active */ | ||
| 1301 | if (plchan->at) { | ||
| 1302 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
| 1303 | return; | ||
| 1304 | } | ||
| 1305 | |||
| 1306 | /* Didn't get a physical channel so waiting for it ... */ | ||
| 1307 | if (plchan->state == PL08X_CHAN_WAITING) | ||
| 1308 | return; | ||
| 1309 | |||
| 1310 | /* Take the first element in the queue and execute it */ | ||
| 1311 | if (!list_empty(&plchan->desc_list)) { | ||
| 1312 | struct pl08x_txd *next; | ||
| 1313 | |||
| 1314 | next = list_first_entry(&plchan->desc_list, | ||
| 1315 | struct pl08x_txd, | ||
| 1316 | node); | ||
| 1317 | list_del(&next->node); | ||
| 1318 | plchan->at = next; | ||
| 1319 | plchan->state = PL08X_CHAN_RUNNING; | ||
| 1320 | |||
| 1321 | /* Configure the physical channel for the active txd */ | ||
| 1322 | pl08x_config_phychan_for_txd(plchan); | ||
| 1323 | pl08x_set_cregs(pl08x, plchan->phychan); | ||
| 1324 | pl08x_enable_phy_chan(pl08x, plchan->phychan); | ||
| 1325 | } | ||
| 1326 | |||
| 1327 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
| 1328 | } | ||
| 1329 | |||
| 1330 | static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, | ||
| 1331 | struct pl08x_txd *txd) | ||
| 1332 | { | ||
| 1333 | int num_llis; | ||
| 1334 | struct pl08x_driver_data *pl08x = plchan->host; | ||
| 1335 | int ret; | ||
| 1336 | |||
| 1337 | num_llis = pl08x_fill_llis_for_desc(pl08x, txd); | ||
| 1338 | |||
| 1339 | if (!num_llis) | ||
| 1340 | return -EINVAL; | ||
| 1341 | |||
| 1342 | spin_lock_irqsave(&plchan->lock, plchan->lockflags); | ||
| 1343 | |||
| 1344 | /* | ||
| 1345 | * If this device is not using a circular buffer then | ||
| 1346 | * queue this new descriptor for transfer. | ||
| 1347 | * The descriptor for a circular buffer continues | ||
| 1348 | * to be used until the channel is freed. | ||
| 1349 | */ | ||
| 1350 | if (txd->cd->circular_buffer) | ||
| 1351 | dev_err(&pl08x->adev->dev, | ||
| 1352 | "%s attempting to queue a circular buffer\n", | ||
| 1353 | __func__); | ||
| 1354 | else | ||
| 1355 | list_add_tail(&txd->node, | ||
| 1356 | &plchan->desc_list); | ||
| 1357 | |||
| 1358 | /* | ||
| 1359 | * See if we already have a physical channel allocated, | ||
| 1360 | * else this is the time to try to get one. | ||
| 1361 | */ | ||
| 1362 | ret = prep_phy_channel(plchan, txd); | ||
| 1363 | if (ret) { | ||
| 1364 | /* | ||
| 1365 | * No physical channel available, we will | ||
| 1366 | * stack up the memcpy channels until there is a channel | ||
| 1367 | * available to handle it whereas slave transfers may | ||
| 1368 | * have been denied due to platform channel muxing restrictions | ||
| 1369 | * and since there is no guarantee that this will ever be | ||
| 1370 | * resolved, and since the signal must be aquired AFTER | ||
| 1371 | * aquiring the physical channel, we will let them be NACK:ed | ||
| 1372 | * with -EBUSY here. The drivers can alway retry the prep() | ||
| 1373 | * call if they are eager on doing this using DMA. | ||
| 1374 | */ | ||
| 1375 | if (plchan->slave) { | ||
| 1376 | pl08x_free_txd_list(pl08x, plchan); | ||
| 1377 | spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); | ||
| 1378 | return -EBUSY; | ||
| 1379 | } | ||
| 1380 | /* Do this memcpy whenever there is a channel ready */ | ||
| 1381 | plchan->state = PL08X_CHAN_WAITING; | ||
| 1382 | plchan->waiting = txd; | ||
| 1383 | } else | ||
| 1384 | /* | ||
| 1385 | * Else we're all set, paused and ready to roll, | ||
| 1386 | * status will switch to PL08X_CHAN_RUNNING when | ||
| 1387 | * we call issue_pending(). If there is something | ||
| 1388 | * running on the channel already we don't change | ||
| 1389 | * its state. | ||
| 1390 | */ | ||
| 1391 | if (plchan->state == PL08X_CHAN_IDLE) | ||
| 1392 | plchan->state = PL08X_CHAN_PAUSED; | ||
| 1393 | |||
| 1394 | /* | ||
| 1395 | * Notice that we leave plchan->lock locked on purpose: | ||
| 1396 | * it will be unlocked in the subsequent tx_submit() | ||
| 1397 | * call. This is a consequence of the current API. | ||
| 1398 | */ | ||
| 1399 | |||
| 1400 | return 0; | ||
| 1401 | } | ||
| 1402 | |||
| 1403 | /* | ||
| 1404 | * Initialize a descriptor to be used by memcpy submit | ||
| 1405 | */ | ||
| 1406 | static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( | ||
| 1407 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | ||
| 1408 | size_t len, unsigned long flags) | ||
| 1409 | { | ||
| 1410 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | ||
| 1411 | struct pl08x_driver_data *pl08x = plchan->host; | ||
| 1412 | struct pl08x_txd *txd; | ||
| 1413 | int ret; | ||
| 1414 | |||
| 1415 | txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); | ||
| 1416 | if (!txd) { | ||
| 1417 | dev_err(&pl08x->adev->dev, | ||
| 1418 | "%s no memory for descriptor\n", __func__); | ||
| 1419 | return NULL; | ||
| 1420 | } | ||
| 1421 | |||
| 1422 | dma_async_tx_descriptor_init(&txd->tx, chan); | ||
| 1423 | txd->direction = DMA_NONE; | ||
| 1424 | txd->srcbus.addr = src; | ||
| 1425 | txd->dstbus.addr = dest; | ||
| 1426 | |||
| 1427 | /* Set platform data for m2m */ | ||
| 1428 | txd->cd = &pl08x->pd->memcpy_channel; | ||
| 1429 | /* Both to be incremented or the code will break */ | ||
| 1430 | txd->cd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; | ||
| 1431 | txd->tx.tx_submit = pl08x_tx_submit; | ||
| 1432 | txd->tx.callback = NULL; | ||
| 1433 | txd->tx.callback_param = NULL; | ||
| 1434 | txd->len = len; | ||
| 1435 | |||
| 1436 | INIT_LIST_HEAD(&txd->node); | ||
| 1437 | ret = pl08x_prep_channel_resources(plchan, txd); | ||
| 1438 | if (ret) | ||
| 1439 | return NULL; | ||
| 1440 | /* | ||
| 1441 | * NB: the channel lock is held at this point so tx_submit() | ||
| 1442 | * must be called in direct succession. | ||
| 1443 | */ | ||
| 1444 | |||
| 1445 | return &txd->tx; | ||
| 1446 | } | ||
| 1447 | |||
| 1448 | struct dma_async_tx_descriptor *pl08x_prep_slave_sg( | ||
| 1449 | struct dma_chan *chan, struct scatterlist *sgl, | ||
| 1450 | unsigned int sg_len, enum dma_data_direction direction, | ||
| 1451 | unsigned long flags) | ||
| 1452 | { | ||
| 1453 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | ||
| 1454 | struct pl08x_driver_data *pl08x = plchan->host; | ||
| 1455 | struct pl08x_txd *txd; | ||
| 1456 | int ret; | ||
| 1457 | |||
| 1458 | /* | ||
| 1459 | * Current implementation ASSUMES only one sg | ||
| 1460 | */ | ||
| 1461 | if (sg_len != 1) { | ||
| 1462 | dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n", | ||
| 1463 | __func__); | ||
| 1464 | BUG(); | ||
| 1465 | } | ||
| 1466 | |||
| 1467 | dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", | ||
| 1468 | __func__, sgl->length, plchan->name); | ||
| 1469 | |||
| 1470 | txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); | ||
| 1471 | if (!txd) { | ||
| 1472 | dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); | ||
| 1473 | return NULL; | ||
| 1474 | } | ||
| 1475 | |||
| 1476 | dma_async_tx_descriptor_init(&txd->tx, chan); | ||
| 1477 | |||
| 1478 | if (direction != plchan->runtime_direction) | ||
| 1479 | dev_err(&pl08x->adev->dev, "%s DMA setup does not match " | ||
| 1480 | "the direction configured for the PrimeCell\n", | ||
| 1481 | __func__); | ||
| 1482 | |||
| 1483 | /* | ||
| 1484 | * Set up addresses, the PrimeCell configured address | ||
| 1485 | * will take precedence since this may configure the | ||
| 1486 | * channel target address dynamically at runtime. | ||
| 1487 | */ | ||
| 1488 | txd->direction = direction; | ||
| 1489 | if (direction == DMA_TO_DEVICE) { | ||
| 1490 | txd->srcbus.addr = sgl->dma_address; | ||
| 1491 | if (plchan->runtime_addr) | ||
| 1492 | txd->dstbus.addr = plchan->runtime_addr; | ||
| 1493 | else | ||
| 1494 | txd->dstbus.addr = plchan->cd->addr; | ||
| 1495 | } else if (direction == DMA_FROM_DEVICE) { | ||
| 1496 | if (plchan->runtime_addr) | ||
| 1497 | txd->srcbus.addr = plchan->runtime_addr; | ||
| 1498 | else | ||
| 1499 | txd->srcbus.addr = plchan->cd->addr; | ||
| 1500 | txd->dstbus.addr = sgl->dma_address; | ||
| 1501 | } else { | ||
| 1502 | dev_err(&pl08x->adev->dev, | ||
| 1503 | "%s direction unsupported\n", __func__); | ||
| 1504 | return NULL; | ||
| 1505 | } | ||
| 1506 | txd->cd = plchan->cd; | ||
| 1507 | txd->tx.tx_submit = pl08x_tx_submit; | ||
| 1508 | txd->tx.callback = NULL; | ||
| 1509 | txd->tx.callback_param = NULL; | ||
| 1510 | txd->len = sgl->length; | ||
| 1511 | INIT_LIST_HEAD(&txd->node); | ||
| 1512 | |||
| 1513 | ret = pl08x_prep_channel_resources(plchan, txd); | ||
| 1514 | if (ret) | ||
| 1515 | return NULL; | ||
| 1516 | /* | ||
| 1517 | * NB: the channel lock is held at this point so tx_submit() | ||
| 1518 | * must be called in direct succession. | ||
| 1519 | */ | ||
| 1520 | |||
| 1521 | return &txd->tx; | ||
| 1522 | } | ||
| 1523 | |||
| 1524 | static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
| 1525 | unsigned long arg) | ||
| 1526 | { | ||
| 1527 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | ||
| 1528 | struct pl08x_driver_data *pl08x = plchan->host; | ||
| 1529 | unsigned long flags; | ||
| 1530 | int ret = 0; | ||
| 1531 | |||
| 1532 | /* Controls applicable to inactive channels */ | ||
| 1533 | if (cmd == DMA_SLAVE_CONFIG) { | ||
| 1534 | dma_set_runtime_config(chan, | ||
| 1535 | (struct dma_slave_config *) | ||
| 1536 | arg); | ||
| 1537 | return 0; | ||
| 1538 | } | ||
| 1539 | |||
| 1540 | /* | ||
| 1541 | * Anything succeeds on channels with no physical allocation and | ||
| 1542 | * no queued transfers. | ||
| 1543 | */ | ||
| 1544 | spin_lock_irqsave(&plchan->lock, flags); | ||
| 1545 | if (!plchan->phychan && !plchan->at) { | ||
| 1546 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
| 1547 | return 0; | ||
| 1548 | } | ||
| 1549 | |||
| 1550 | switch (cmd) { | ||
| 1551 | case DMA_TERMINATE_ALL: | ||
| 1552 | plchan->state = PL08X_CHAN_IDLE; | ||
| 1553 | |||
| 1554 | if (plchan->phychan) { | ||
| 1555 | pl08x_stop_phy_chan(plchan->phychan); | ||
| 1556 | |||
| 1557 | /* | ||
| 1558 | * Mark physical channel as free and free any slave | ||
| 1559 | * signal | ||
| 1560 | */ | ||
| 1561 | if ((plchan->phychan->signal >= 0) && | ||
| 1562 | pl08x->pd->put_signal) { | ||
| 1563 | pl08x->pd->put_signal(plchan); | ||
| 1564 | plchan->phychan->signal = -1; | ||
| 1565 | } | ||
| 1566 | pl08x_put_phy_channel(pl08x, plchan->phychan); | ||
| 1567 | plchan->phychan = NULL; | ||
| 1568 | } | ||
| 1569 | /* Stop any pending tasklet */ | ||
| 1570 | tasklet_disable(&plchan->tasklet); | ||
| 1571 | /* Dequeue jobs and free LLIs */ | ||
| 1572 | if (plchan->at) { | ||
| 1573 | pl08x_free_txd(pl08x, plchan->at); | ||
| 1574 | plchan->at = NULL; | ||
| 1575 | } | ||
| 1576 | /* Dequeue jobs not yet fired as well */ | ||
| 1577 | pl08x_free_txd_list(pl08x, plchan); | ||
| 1578 | break; | ||
| 1579 | case DMA_PAUSE: | ||
| 1580 | pl08x_pause_phy_chan(plchan->phychan); | ||
| 1581 | plchan->state = PL08X_CHAN_PAUSED; | ||
| 1582 | break; | ||
| 1583 | case DMA_RESUME: | ||
| 1584 | pl08x_resume_phy_chan(plchan->phychan); | ||
| 1585 | plchan->state = PL08X_CHAN_RUNNING; | ||
| 1586 | break; | ||
| 1587 | default: | ||
| 1588 | /* Unknown command */ | ||
| 1589 | ret = -ENXIO; | ||
| 1590 | break; | ||
| 1591 | } | ||
| 1592 | |||
| 1593 | spin_unlock_irqrestore(&plchan->lock, flags); | ||
| 1594 | |||
| 1595 | return ret; | ||
| 1596 | } | ||
| 1597 | |||
| 1598 | bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) | ||
| 1599 | { | ||
| 1600 | struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); | ||
| 1601 | char *name = chan_id; | ||
| 1602 | |||
| 1603 | /* Check that the channel is not taken! */ | ||
| 1604 | if (!strcmp(plchan->name, name)) | ||
| 1605 | return true; | ||
| 1606 | |||
| 1607 | return false; | ||
| 1608 | } | ||
| 1609 | |||
| 1610 | /* | ||
| 1611 | * Just check that the device is there and active | ||
| 1612 | * TODO: turn this bit on/off depending on the number of | ||
| 1613 | * physical channels actually used, if it is zero... well | ||
| 1614 | * shut it off. That will save some power. Cut the clock | ||
| 1615 | * at the same time. | ||
| 1616 | */ | ||
| 1617 | static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) | ||
| 1618 | { | ||
| 1619 | u32 val; | ||
| 1620 | |||
| 1621 | val = readl(pl08x->base + PL080_CONFIG); | ||
| 1622 | val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE); | ||
| 1623 | /* We implictly clear bit 1 and that means little-endian mode */ | ||
| 1624 | val |= PL080_CONFIG_ENABLE; | ||
| 1625 | writel(val, pl08x->base + PL080_CONFIG); | ||
| 1626 | } | ||
| 1627 | |||
| 1628 | static void pl08x_tasklet(unsigned long data) | ||
| 1629 | { | ||
| 1630 | struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; | ||
| 1631 | struct pl08x_phy_chan *phychan = plchan->phychan; | ||
| 1632 | struct pl08x_driver_data *pl08x = plchan->host; | ||
| 1633 | |||
| 1634 | if (!plchan) | ||
| 1635 | BUG(); | ||
| 1636 | |||
| 1637 | spin_lock(&plchan->lock); | ||
| 1638 | |||
| 1639 | if (plchan->at) { | ||
| 1640 | dma_async_tx_callback callback = | ||
| 1641 | plchan->at->tx.callback; | ||
| 1642 | void *callback_param = | ||
| 1643 | plchan->at->tx.callback_param; | ||
| 1644 | |||
| 1645 | /* | ||
| 1646 | * Update last completed | ||
| 1647 | */ | ||
| 1648 | plchan->lc = | ||
| 1649 | (plchan->at->tx.cookie); | ||
| 1650 | |||
| 1651 | /* | ||
| 1652 | * Callback to signal completion | ||
| 1653 | */ | ||
| 1654 | if (callback) | ||
| 1655 | callback(callback_param); | ||
| 1656 | |||
| 1657 | /* | ||
| 1658 | * Device callbacks should NOT clear | ||
| 1659 | * the current transaction on the channel | ||
| 1660 | * Linus: sometimes they should? | ||
| 1661 | */ | ||
| 1662 | if (!plchan->at) | ||
| 1663 | BUG(); | ||
| 1664 | |||
| 1665 | /* | ||
| 1666 | * Free the descriptor if it's not for a device | ||
| 1667 | * using a circular buffer | ||
| 1668 | */ | ||
| 1669 | if (!plchan->at->cd->circular_buffer) { | ||
| 1670 | pl08x_free_txd(pl08x, plchan->at); | ||
| 1671 | plchan->at = NULL; | ||
| 1672 | } | ||
| 1673 | /* | ||
| 1674 | * else descriptor for circular | ||
| 1675 | * buffers only freed when | ||
| 1676 | * client has disabled dma | ||
| 1677 | */ | ||
| 1678 | } | ||
| 1679 | /* | ||
| 1680 | * If a new descriptor is queued, set it up | ||
| 1681 | * plchan->at is NULL here | ||
| 1682 | */ | ||
| 1683 | if (!list_empty(&plchan->desc_list)) { | ||
| 1684 | struct pl08x_txd *next; | ||
| 1685 | |||
| 1686 | next = list_first_entry(&plchan->desc_list, | ||
| 1687 | struct pl08x_txd, | ||
| 1688 | node); | ||
| 1689 | list_del(&next->node); | ||
| 1690 | plchan->at = next; | ||
| 1691 | /* Configure the physical channel for the next txd */ | ||
| 1692 | pl08x_config_phychan_for_txd(plchan); | ||
| 1693 | pl08x_set_cregs(pl08x, plchan->phychan); | ||
| 1694 | pl08x_enable_phy_chan(pl08x, plchan->phychan); | ||
| 1695 | } else { | ||
| 1696 | struct pl08x_dma_chan *waiting = NULL; | ||
| 1697 | |||
| 1698 | /* | ||
| 1699 | * No more jobs, so free up the physical channel | ||
| 1700 | * Free any allocated signal on slave transfers too | ||
| 1701 | */ | ||
| 1702 | if ((phychan->signal >= 0) && pl08x->pd->put_signal) { | ||
| 1703 | pl08x->pd->put_signal(plchan); | ||
| 1704 | phychan->signal = -1; | ||
| 1705 | } | ||
| 1706 | pl08x_put_phy_channel(pl08x, phychan); | ||
| 1707 | plchan->phychan = NULL; | ||
| 1708 | plchan->state = PL08X_CHAN_IDLE; | ||
| 1709 | |||
| 1710 | /* | ||
| 1711 | * And NOW before anyone else can grab that free:d | ||
| 1712 | * up physical channel, see if there is some memcpy | ||
| 1713 | * pending that seriously needs to start because of | ||
| 1714 | * being stacked up while we were choking the | ||
| 1715 | * physical channels with data. | ||
| 1716 | */ | ||
| 1717 | list_for_each_entry(waiting, &pl08x->memcpy.channels, | ||
| 1718 | chan.device_node) { | ||
| 1719 | if (waiting->state == PL08X_CHAN_WAITING && | ||
| 1720 | waiting->waiting != NULL) { | ||
| 1721 | int ret; | ||
| 1722 | |||
| 1723 | /* This should REALLY not fail now */ | ||
| 1724 | ret = prep_phy_channel(waiting, | ||
| 1725 | waiting->waiting); | ||
| 1726 | BUG_ON(ret); | ||
| 1727 | waiting->state = PL08X_CHAN_RUNNING; | ||
| 1728 | waiting->waiting = NULL; | ||
| 1729 | pl08x_issue_pending(&waiting->chan); | ||
| 1730 | break; | ||
| 1731 | } | ||
| 1732 | } | ||
| 1733 | } | ||
| 1734 | |||
| 1735 | spin_unlock(&plchan->lock); | ||
| 1736 | } | ||
| 1737 | |||
| 1738 | static irqreturn_t pl08x_irq(int irq, void *dev) | ||
| 1739 | { | ||
| 1740 | struct pl08x_driver_data *pl08x = dev; | ||
| 1741 | u32 mask = 0; | ||
| 1742 | u32 val; | ||
| 1743 | int i; | ||
| 1744 | |||
| 1745 | val = readl(pl08x->base + PL080_ERR_STATUS); | ||
| 1746 | if (val) { | ||
| 1747 | /* | ||
| 1748 | * An error interrupt (on one or more channels) | ||
| 1749 | */ | ||
| 1750 | dev_err(&pl08x->adev->dev, | ||
| 1751 | "%s error interrupt, register value 0x%08x\n", | ||
| 1752 | __func__, val); | ||
| 1753 | /* | ||
| 1754 | * Simply clear ALL PL08X error interrupts, | ||
| 1755 | * regardless of channel and cause | ||
| 1756 | * FIXME: should be 0x00000003 on PL081 really. | ||
| 1757 | */ | ||
| 1758 | writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); | ||
| 1759 | } | ||
| 1760 | val = readl(pl08x->base + PL080_INT_STATUS); | ||
| 1761 | for (i = 0; i < pl08x->vd->channels; i++) { | ||
| 1762 | if ((1 << i) & val) { | ||
| 1763 | /* Locate physical channel */ | ||
| 1764 | struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; | ||
| 1765 | struct pl08x_dma_chan *plchan = phychan->serving; | ||
| 1766 | |||
| 1767 | /* Schedule tasklet on this channel */ | ||
| 1768 | tasklet_schedule(&plchan->tasklet); | ||
| 1769 | |||
| 1770 | mask |= (1 << i); | ||
| 1771 | } | ||
| 1772 | } | ||
| 1773 | /* | ||
| 1774 | * Clear only the terminal interrupts on channels we processed | ||
| 1775 | */ | ||
| 1776 | writel(mask, pl08x->base + PL080_TC_CLEAR); | ||
| 1777 | |||
| 1778 | return mask ? IRQ_HANDLED : IRQ_NONE; | ||
| 1779 | } | ||
| 1780 | |||
| 1781 | /* | ||
| 1782 | * Initialise the DMAC memcpy/slave channels. | ||
| 1783 | * Make a local wrapper to hold required data | ||
| 1784 | */ | ||
| 1785 | static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, | ||
| 1786 | struct dma_device *dmadev, | ||
| 1787 | unsigned int channels, | ||
| 1788 | bool slave) | ||
| 1789 | { | ||
| 1790 | struct pl08x_dma_chan *chan; | ||
| 1791 | int i; | ||
| 1792 | |||
| 1793 | INIT_LIST_HEAD(&dmadev->channels); | ||
| 1794 | /* | ||
| 1795 | * Register as many many memcpy as we have physical channels, | ||
| 1796 | * we won't always be able to use all but the code will have | ||
| 1797 | * to cope with that situation. | ||
| 1798 | */ | ||
| 1799 | for (i = 0; i < channels; i++) { | ||
| 1800 | chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL); | ||
| 1801 | if (!chan) { | ||
| 1802 | dev_err(&pl08x->adev->dev, | ||
| 1803 | "%s no memory for channel\n", __func__); | ||
| 1804 | return -ENOMEM; | ||
| 1805 | } | ||
| 1806 | |||
| 1807 | chan->host = pl08x; | ||
| 1808 | chan->state = PL08X_CHAN_IDLE; | ||
| 1809 | |||
| 1810 | if (slave) { | ||
| 1811 | chan->slave = true; | ||
| 1812 | chan->name = pl08x->pd->slave_channels[i].bus_id; | ||
| 1813 | chan->cd = &pl08x->pd->slave_channels[i]; | ||
| 1814 | } else { | ||
| 1815 | chan->cd = &pl08x->pd->memcpy_channel; | ||
| 1816 | chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); | ||
| 1817 | if (!chan->name) { | ||
| 1818 | kfree(chan); | ||
| 1819 | return -ENOMEM; | ||
| 1820 | } | ||
| 1821 | } | ||
| 1822 | dev_info(&pl08x->adev->dev, | ||
| 1823 | "initialize virtual channel \"%s\"\n", | ||
| 1824 | chan->name); | ||
| 1825 | |||
| 1826 | chan->chan.device = dmadev; | ||
| 1827 | atomic_set(&chan->last_issued, 0); | ||
| 1828 | chan->lc = atomic_read(&chan->last_issued); | ||
| 1829 | |||
| 1830 | spin_lock_init(&chan->lock); | ||
| 1831 | INIT_LIST_HEAD(&chan->desc_list); | ||
| 1832 | tasklet_init(&chan->tasklet, pl08x_tasklet, | ||
| 1833 | (unsigned long) chan); | ||
| 1834 | |||
| 1835 | list_add_tail(&chan->chan.device_node, &dmadev->channels); | ||
| 1836 | } | ||
| 1837 | dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", | ||
| 1838 | i, slave ? "slave" : "memcpy"); | ||
| 1839 | return i; | ||
| 1840 | } | ||
| 1841 | |||
| 1842 | static void pl08x_free_virtual_channels(struct dma_device *dmadev) | ||
| 1843 | { | ||
| 1844 | struct pl08x_dma_chan *chan = NULL; | ||
| 1845 | struct pl08x_dma_chan *next; | ||
| 1846 | |||
| 1847 | list_for_each_entry_safe(chan, | ||
| 1848 | next, &dmadev->channels, chan.device_node) { | ||
| 1849 | list_del(&chan->chan.device_node); | ||
| 1850 | kfree(chan); | ||
| 1851 | } | ||
| 1852 | } | ||
| 1853 | |||
| 1854 | #ifdef CONFIG_DEBUG_FS | ||
| 1855 | static const char *pl08x_state_str(enum pl08x_dma_chan_state state) | ||
| 1856 | { | ||
| 1857 | switch (state) { | ||
| 1858 | case PL08X_CHAN_IDLE: | ||
| 1859 | return "idle"; | ||
| 1860 | case PL08X_CHAN_RUNNING: | ||
| 1861 | return "running"; | ||
| 1862 | case PL08X_CHAN_PAUSED: | ||
| 1863 | return "paused"; | ||
| 1864 | case PL08X_CHAN_WAITING: | ||
| 1865 | return "waiting"; | ||
| 1866 | default: | ||
| 1867 | break; | ||
| 1868 | } | ||
| 1869 | return "UNKNOWN STATE"; | ||
| 1870 | } | ||
| 1871 | |||
| 1872 | static int pl08x_debugfs_show(struct seq_file *s, void *data) | ||
| 1873 | { | ||
| 1874 | struct pl08x_driver_data *pl08x = s->private; | ||
| 1875 | struct pl08x_dma_chan *chan; | ||
| 1876 | struct pl08x_phy_chan *ch; | ||
| 1877 | unsigned long flags; | ||
| 1878 | int i; | ||
| 1879 | |||
| 1880 | seq_printf(s, "PL08x physical channels:\n"); | ||
| 1881 | seq_printf(s, "CHANNEL:\tUSER:\n"); | ||
| 1882 | seq_printf(s, "--------\t-----\n"); | ||
| 1883 | for (i = 0; i < pl08x->vd->channels; i++) { | ||
| 1884 | struct pl08x_dma_chan *virt_chan; | ||
| 1885 | |||
| 1886 | ch = &pl08x->phy_chans[i]; | ||
| 1887 | |||
| 1888 | spin_lock_irqsave(&ch->lock, flags); | ||
| 1889 | virt_chan = ch->serving; | ||
| 1890 | |||
| 1891 | seq_printf(s, "%d\t\t%s\n", | ||
| 1892 | ch->id, virt_chan ? virt_chan->name : "(none)"); | ||
| 1893 | |||
| 1894 | spin_unlock_irqrestore(&ch->lock, flags); | ||
| 1895 | } | ||
| 1896 | |||
| 1897 | seq_printf(s, "\nPL08x virtual memcpy channels:\n"); | ||
| 1898 | seq_printf(s, "CHANNEL:\tSTATE:\n"); | ||
| 1899 | seq_printf(s, "--------\t------\n"); | ||
| 1900 | list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { | ||
| 1901 | seq_printf(s, "%s\t\t\%s\n", chan->name, | ||
| 1902 | pl08x_state_str(chan->state)); | ||
| 1903 | } | ||
| 1904 | |||
| 1905 | seq_printf(s, "\nPL08x virtual slave channels:\n"); | ||
| 1906 | seq_printf(s, "CHANNEL:\tSTATE:\n"); | ||
| 1907 | seq_printf(s, "--------\t------\n"); | ||
| 1908 | list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { | ||
| 1909 | seq_printf(s, "%s\t\t\%s\n", chan->name, | ||
| 1910 | pl08x_state_str(chan->state)); | ||
| 1911 | } | ||
| 1912 | |||
| 1913 | return 0; | ||
| 1914 | } | ||
| 1915 | |||
| 1916 | static int pl08x_debugfs_open(struct inode *inode, struct file *file) | ||
| 1917 | { | ||
| 1918 | return single_open(file, pl08x_debugfs_show, inode->i_private); | ||
| 1919 | } | ||
| 1920 | |||
| 1921 | static const struct file_operations pl08x_debugfs_operations = { | ||
| 1922 | .open = pl08x_debugfs_open, | ||
| 1923 | .read = seq_read, | ||
| 1924 | .llseek = seq_lseek, | ||
| 1925 | .release = single_release, | ||
| 1926 | }; | ||
| 1927 | |||
| 1928 | static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) | ||
| 1929 | { | ||
| 1930 | /* Expose a simple debugfs interface to view all clocks */ | ||
| 1931 | (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, | ||
| 1932 | NULL, pl08x, | ||
| 1933 | &pl08x_debugfs_operations); | ||
| 1934 | } | ||
| 1935 | |||
| 1936 | #else | ||
| 1937 | static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) | ||
| 1938 | { | ||
| 1939 | } | ||
| 1940 | #endif | ||
| 1941 | |||
| 1942 | static int pl08x_probe(struct amba_device *adev, struct amba_id *id) | ||
| 1943 | { | ||
| 1944 | struct pl08x_driver_data *pl08x; | ||
| 1945 | struct vendor_data *vd = id->data; | ||
| 1946 | int ret = 0; | ||
| 1947 | int i; | ||
| 1948 | |||
| 1949 | ret = amba_request_regions(adev, NULL); | ||
| 1950 | if (ret) | ||
| 1951 | return ret; | ||
| 1952 | |||
| 1953 | /* Create the driver state holder */ | ||
| 1954 | pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL); | ||
| 1955 | if (!pl08x) { | ||
| 1956 | ret = -ENOMEM; | ||
| 1957 | goto out_no_pl08x; | ||
| 1958 | } | ||
| 1959 | |||
| 1960 | /* Initialize memcpy engine */ | ||
| 1961 | dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); | ||
| 1962 | pl08x->memcpy.dev = &adev->dev; | ||
| 1963 | pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources; | ||
| 1964 | pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; | ||
| 1965 | pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; | ||
| 1966 | pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; | ||
| 1967 | pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; | ||
| 1968 | pl08x->memcpy.device_issue_pending = pl08x_issue_pending; | ||
| 1969 | pl08x->memcpy.device_control = pl08x_control; | ||
| 1970 | |||
| 1971 | /* Initialize slave engine */ | ||
| 1972 | dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); | ||
| 1973 | pl08x->slave.dev = &adev->dev; | ||
| 1974 | pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources; | ||
| 1975 | pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; | ||
| 1976 | pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; | ||
| 1977 | pl08x->slave.device_tx_status = pl08x_dma_tx_status; | ||
| 1978 | pl08x->slave.device_issue_pending = pl08x_issue_pending; | ||
| 1979 | pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; | ||
| 1980 | pl08x->slave.device_control = pl08x_control; | ||
| 1981 | |||
| 1982 | /* Get the platform data */ | ||
| 1983 | pl08x->pd = dev_get_platdata(&adev->dev); | ||
| 1984 | if (!pl08x->pd) { | ||
| 1985 | dev_err(&adev->dev, "no platform data supplied\n"); | ||
| 1986 | goto out_no_platdata; | ||
| 1987 | } | ||
| 1988 | |||
| 1989 | /* Assign useful pointers to the driver state */ | ||
| 1990 | pl08x->adev = adev; | ||
| 1991 | pl08x->vd = vd; | ||
| 1992 | |||
| 1993 | /* A DMA memory pool for LLIs, align on 1-byte boundary */ | ||
| 1994 | pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, | ||
| 1995 | PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); | ||
| 1996 | if (!pl08x->pool) { | ||
| 1997 | ret = -ENOMEM; | ||
| 1998 | goto out_no_lli_pool; | ||
| 1999 | } | ||
| 2000 | |||
| 2001 | spin_lock_init(&pl08x->lock); | ||
| 2002 | |||
| 2003 | pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); | ||
| 2004 | if (!pl08x->base) { | ||
| 2005 | ret = -ENOMEM; | ||
| 2006 | goto out_no_ioremap; | ||
| 2007 | } | ||
| 2008 | |||
| 2009 | /* Turn on the PL08x */ | ||
| 2010 | pl08x_ensure_on(pl08x); | ||
| 2011 | |||
| 2012 | /* | ||
| 2013 | * Attach the interrupt handler | ||
| 2014 | */ | ||
| 2015 | writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); | ||
| 2016 | writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); | ||
| 2017 | |||
| 2018 | ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, | ||
| 2019 | vd->name, pl08x); | ||
| 2020 | if (ret) { | ||
| 2021 | dev_err(&adev->dev, "%s failed to request interrupt %d\n", | ||
| 2022 | __func__, adev->irq[0]); | ||
| 2023 | goto out_no_irq; | ||
| 2024 | } | ||
| 2025 | |||
| 2026 | /* Initialize physical channels */ | ||
| 2027 | pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)), | ||
| 2028 | GFP_KERNEL); | ||
| 2029 | if (!pl08x->phy_chans) { | ||
| 2030 | dev_err(&adev->dev, "%s failed to allocate " | ||
| 2031 | "physical channel holders\n", | ||
| 2032 | __func__); | ||
| 2033 | goto out_no_phychans; | ||
| 2034 | } | ||
| 2035 | |||
| 2036 | for (i = 0; i < vd->channels; i++) { | ||
| 2037 | struct pl08x_phy_chan *ch = &pl08x->phy_chans[i]; | ||
| 2038 | |||
| 2039 | ch->id = i; | ||
| 2040 | ch->base = pl08x->base + PL080_Cx_BASE(i); | ||
| 2041 | spin_lock_init(&ch->lock); | ||
| 2042 | ch->serving = NULL; | ||
| 2043 | ch->signal = -1; | ||
| 2044 | dev_info(&adev->dev, | ||
| 2045 | "physical channel %d is %s\n", i, | ||
| 2046 | pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); | ||
| 2047 | } | ||
| 2048 | |||
| 2049 | /* Register as many memcpy channels as there are physical channels */ | ||
| 2050 | ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy, | ||
| 2051 | pl08x->vd->channels, false); | ||
| 2052 | if (ret <= 0) { | ||
| 2053 | dev_warn(&pl08x->adev->dev, | ||
| 2054 | "%s failed to enumerate memcpy channels - %d\n", | ||
| 2055 | __func__, ret); | ||
| 2056 | goto out_no_memcpy; | ||
| 2057 | } | ||
| 2058 | pl08x->memcpy.chancnt = ret; | ||
| 2059 | |||
| 2060 | /* Register slave channels */ | ||
| 2061 | ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, | ||
| 2062 | pl08x->pd->num_slave_channels, | ||
| 2063 | true); | ||
| 2064 | if (ret <= 0) { | ||
| 2065 | dev_warn(&pl08x->adev->dev, | ||
| 2066 | "%s failed to enumerate slave channels - %d\n", | ||
| 2067 | __func__, ret); | ||
| 2068 | goto out_no_slave; | ||
| 2069 | } | ||
| 2070 | pl08x->slave.chancnt = ret; | ||
| 2071 | |||
| 2072 | ret = dma_async_device_register(&pl08x->memcpy); | ||
| 2073 | if (ret) { | ||
| 2074 | dev_warn(&pl08x->adev->dev, | ||
| 2075 | "%s failed to register memcpy as an async device - %d\n", | ||
| 2076 | __func__, ret); | ||
| 2077 | goto out_no_memcpy_reg; | ||
| 2078 | } | ||
| 2079 | |||
| 2080 | ret = dma_async_device_register(&pl08x->slave); | ||
| 2081 | if (ret) { | ||
| 2082 | dev_warn(&pl08x->adev->dev, | ||
| 2083 | "%s failed to register slave as an async device - %d\n", | ||
| 2084 | __func__, ret); | ||
| 2085 | goto out_no_slave_reg; | ||
| 2086 | } | ||
| 2087 | |||
| 2088 | amba_set_drvdata(adev, pl08x); | ||
| 2089 | init_pl08x_debugfs(pl08x); | ||
| 2090 | dev_info(&pl08x->adev->dev, "ARM(R) %s DMA block initialized @%08x\n", | ||
| 2091 | vd->name, adev->res.start); | ||
| 2092 | return 0; | ||
| 2093 | |||
| 2094 | out_no_slave_reg: | ||
| 2095 | dma_async_device_unregister(&pl08x->memcpy); | ||
| 2096 | out_no_memcpy_reg: | ||
| 2097 | pl08x_free_virtual_channels(&pl08x->slave); | ||
| 2098 | out_no_slave: | ||
| 2099 | pl08x_free_virtual_channels(&pl08x->memcpy); | ||
| 2100 | out_no_memcpy: | ||
| 2101 | kfree(pl08x->phy_chans); | ||
| 2102 | out_no_phychans: | ||
| 2103 | free_irq(adev->irq[0], pl08x); | ||
| 2104 | out_no_irq: | ||
| 2105 | iounmap(pl08x->base); | ||
| 2106 | out_no_ioremap: | ||
| 2107 | dma_pool_destroy(pl08x->pool); | ||
| 2108 | out_no_lli_pool: | ||
| 2109 | out_no_platdata: | ||
| 2110 | kfree(pl08x); | ||
| 2111 | out_no_pl08x: | ||
| 2112 | amba_release_regions(adev); | ||
| 2113 | return ret; | ||
| 2114 | } | ||
| 2115 | |||
| 2116 | /* PL080 has 8 channels and the PL080 have just 2 */ | ||
| 2117 | static struct vendor_data vendor_pl080 = { | ||
| 2118 | .name = "PL080", | ||
| 2119 | .channels = 8, | ||
| 2120 | .dualmaster = true, | ||
| 2121 | }; | ||
| 2122 | |||
| 2123 | static struct vendor_data vendor_pl081 = { | ||
| 2124 | .name = "PL081", | ||
| 2125 | .channels = 2, | ||
| 2126 | .dualmaster = false, | ||
| 2127 | }; | ||
| 2128 | |||
| 2129 | static struct amba_id pl08x_ids[] = { | ||
| 2130 | /* PL080 */ | ||
| 2131 | { | ||
| 2132 | .id = 0x00041080, | ||
| 2133 | .mask = 0x000fffff, | ||
| 2134 | .data = &vendor_pl080, | ||
| 2135 | }, | ||
| 2136 | /* PL081 */ | ||
| 2137 | { | ||
| 2138 | .id = 0x00041081, | ||
| 2139 | .mask = 0x000fffff, | ||
| 2140 | .data = &vendor_pl081, | ||
| 2141 | }, | ||
| 2142 | /* Nomadik 8815 PL080 variant */ | ||
| 2143 | { | ||
| 2144 | .id = 0x00280880, | ||
| 2145 | .mask = 0x00ffffff, | ||
| 2146 | .data = &vendor_pl080, | ||
| 2147 | }, | ||
| 2148 | { 0, 0 }, | ||
| 2149 | }; | ||
| 2150 | |||
| 2151 | static struct amba_driver pl08x_amba_driver = { | ||
| 2152 | .drv.name = DRIVER_NAME, | ||
| 2153 | .id_table = pl08x_ids, | ||
| 2154 | .probe = pl08x_probe, | ||
| 2155 | }; | ||
| 2156 | |||
| 2157 | static int __init pl08x_init(void) | ||
| 2158 | { | ||
| 2159 | int retval; | ||
| 2160 | retval = amba_driver_register(&pl08x_amba_driver); | ||
| 2161 | if (retval) | ||
| 2162 | printk(KERN_WARNING DRIVER_NAME | ||
| 2163 | "failed to register as an amba device (%d)\n", | ||
| 2164 | retval); | ||
| 2165 | return retval; | ||
| 2166 | } | ||
| 2167 | subsys_initcall(pl08x_init); | ||
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index ae2b8714d190..a6656834f0ff 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c | |||
| @@ -1610,7 +1610,7 @@ int __init coh901318_init(void) | |||
| 1610 | { | 1610 | { |
| 1611 | return platform_driver_probe(&coh901318_driver, coh901318_probe); | 1611 | return platform_driver_probe(&coh901318_driver, coh901318_probe); |
| 1612 | } | 1612 | } |
| 1613 | subsys_initcall(coh901318_init); | 1613 | arch_initcall(coh901318_init); |
| 1614 | 1614 | ||
| 1615 | void __exit coh901318_exit(void) | 1615 | void __exit coh901318_exit(void) |
| 1616 | { | 1616 | { |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 9d31d5eb95c1..8bcb15fb959d 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
| @@ -690,8 +690,12 @@ int dma_async_device_register(struct dma_device *device) | |||
| 690 | !device->device_prep_dma_memset); | 690 | !device->device_prep_dma_memset); |
| 691 | BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && | 691 | BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && |
| 692 | !device->device_prep_dma_interrupt); | 692 | !device->device_prep_dma_interrupt); |
| 693 | BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && | ||
| 694 | !device->device_prep_dma_sg); | ||
| 693 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && | 695 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && |
| 694 | !device->device_prep_slave_sg); | 696 | !device->device_prep_slave_sg); |
| 697 | BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && | ||
| 698 | !device->device_prep_dma_cyclic); | ||
| 695 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && | 699 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && |
| 696 | !device->device_control); | 700 | !device->device_control); |
| 697 | 701 | ||
| @@ -702,7 +706,7 @@ int dma_async_device_register(struct dma_device *device) | |||
| 702 | BUG_ON(!device->dev); | 706 | BUG_ON(!device->dev); |
| 703 | 707 | ||
| 704 | /* note: this only matters in the | 708 | /* note: this only matters in the |
| 705 | * CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH=y case | 709 | * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case |
| 706 | */ | 710 | */ |
| 707 | if (device_has_all_tx_types(device)) | 711 | if (device_has_all_tx_types(device)) |
| 708 | dma_cap_set(DMA_ASYNC_TX, device->cap_mask); | 712 | dma_cap_set(DMA_ASYNC_TX, device->cap_mask); |
| @@ -976,7 +980,7 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | |||
| 976 | struct dma_chan *chan) | 980 | struct dma_chan *chan) |
| 977 | { | 981 | { |
| 978 | tx->chan = chan; | 982 | tx->chan = chan; |
| 979 | #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | 983 | #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH |
| 980 | spin_lock_init(&tx->lock); | 984 | spin_lock_init(&tx->lock); |
| 981 | #endif | 985 | #endif |
| 982 | } | 986 | } |
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index cea08bed9cf9..286c3ac6bdcc 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
| @@ -35,9 +35,10 @@ | |||
| 35 | #include <linux/dmapool.h> | 35 | #include <linux/dmapool.h> |
| 36 | #include <linux/of_platform.h> | 36 | #include <linux/of_platform.h> |
| 37 | 37 | ||
| 38 | #include <asm/fsldma.h> | ||
| 39 | #include "fsldma.h" | 38 | #include "fsldma.h" |
| 40 | 39 | ||
| 40 | static const char msg_ld_oom[] = "No free memory for link descriptor\n"; | ||
| 41 | |||
| 41 | static void dma_init(struct fsldma_chan *chan) | 42 | static void dma_init(struct fsldma_chan *chan) |
| 42 | { | 43 | { |
| 43 | /* Reset the channel */ | 44 | /* Reset the channel */ |
| @@ -499,7 +500,7 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) | |||
| 499 | 500 | ||
| 500 | new = fsl_dma_alloc_descriptor(chan); | 501 | new = fsl_dma_alloc_descriptor(chan); |
| 501 | if (!new) { | 502 | if (!new) { |
| 502 | dev_err(chan->dev, "No free memory for link descriptor\n"); | 503 | dev_err(chan->dev, msg_ld_oom); |
| 503 | return NULL; | 504 | return NULL; |
| 504 | } | 505 | } |
| 505 | 506 | ||
| @@ -536,8 +537,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
| 536 | /* Allocate the link descriptor from DMA pool */ | 537 | /* Allocate the link descriptor from DMA pool */ |
| 537 | new = fsl_dma_alloc_descriptor(chan); | 538 | new = fsl_dma_alloc_descriptor(chan); |
| 538 | if (!new) { | 539 | if (!new) { |
| 539 | dev_err(chan->dev, | 540 | dev_err(chan->dev, msg_ld_oom); |
| 540 | "No free memory for link descriptor\n"); | ||
| 541 | goto fail; | 541 | goto fail; |
| 542 | } | 542 | } |
| 543 | #ifdef FSL_DMA_LD_DEBUG | 543 | #ifdef FSL_DMA_LD_DEBUG |
| @@ -583,223 +583,205 @@ fail: | |||
| 583 | return NULL; | 583 | return NULL; |
| 584 | } | 584 | } |
| 585 | 585 | ||
| 586 | /** | 586 | static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan, |
| 587 | * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction | 587 | struct scatterlist *dst_sg, unsigned int dst_nents, |
| 588 | * @chan: DMA channel | 588 | struct scatterlist *src_sg, unsigned int src_nents, |
| 589 | * @sgl: scatterlist to transfer to/from | 589 | unsigned long flags) |
| 590 | * @sg_len: number of entries in @scatterlist | ||
| 591 | * @direction: DMA direction | ||
| 592 | * @flags: DMAEngine flags | ||
| 593 | * | ||
| 594 | * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the | ||
| 595 | * DMA_SLAVE API, this gets the device-specific information from the | ||
| 596 | * chan->private variable. | ||
| 597 | */ | ||
| 598 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | ||
| 599 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, | ||
| 600 | enum dma_data_direction direction, unsigned long flags) | ||
| 601 | { | 590 | { |
| 602 | struct fsldma_chan *chan; | ||
| 603 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; | 591 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; |
| 604 | struct fsl_dma_slave *slave; | 592 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
| 605 | size_t copy; | 593 | size_t dst_avail, src_avail; |
| 606 | 594 | dma_addr_t dst, src; | |
| 607 | int i; | 595 | size_t len; |
| 608 | struct scatterlist *sg; | ||
| 609 | size_t sg_used; | ||
| 610 | size_t hw_used; | ||
| 611 | struct fsl_dma_hw_addr *hw; | ||
| 612 | dma_addr_t dma_dst, dma_src; | ||
| 613 | 596 | ||
| 614 | if (!dchan) | 597 | /* basic sanity checks */ |
| 598 | if (dst_nents == 0 || src_nents == 0) | ||
| 615 | return NULL; | 599 | return NULL; |
| 616 | 600 | ||
| 617 | if (!dchan->private) | 601 | if (dst_sg == NULL || src_sg == NULL) |
| 618 | return NULL; | 602 | return NULL; |
| 619 | 603 | ||
| 620 | chan = to_fsl_chan(dchan); | 604 | /* |
| 621 | slave = dchan->private; | 605 | * TODO: should we check that both scatterlists have the same |
| 606 | * TODO: number of bytes in total? Is that really an error? | ||
| 607 | */ | ||
| 622 | 608 | ||
| 623 | if (list_empty(&slave->addresses)) | 609 | /* get prepared for the loop */ |
| 624 | return NULL; | 610 | dst_avail = sg_dma_len(dst_sg); |
| 611 | src_avail = sg_dma_len(src_sg); | ||
| 625 | 612 | ||
| 626 | hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry); | 613 | /* run until we are out of scatterlist entries */ |
| 627 | hw_used = 0; | 614 | while (true) { |
| 628 | 615 | ||
| 629 | /* | 616 | /* create the largest transaction possible */ |
| 630 | * Build the hardware transaction to copy from the scatterlist to | 617 | len = min_t(size_t, src_avail, dst_avail); |
| 631 | * the hardware, or from the hardware to the scatterlist | 618 | len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT); |
| 632 | * | 619 | if (len == 0) |
| 633 | * If you are copying from the hardware to the scatterlist and it | 620 | goto fetch; |
| 634 | * takes two hardware entries to fill an entire page, then both | 621 | |
| 635 | * hardware entries will be coalesced into the same page | 622 | dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail; |
| 636 | * | 623 | src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail; |
| 637 | * If you are copying from the scatterlist to the hardware and a | 624 | |
| 638 | * single page can fill two hardware entries, then the data will | 625 | /* allocate and populate the descriptor */ |
| 639 | * be read out of the page into the first hardware entry, and so on | 626 | new = fsl_dma_alloc_descriptor(chan); |
| 640 | */ | 627 | if (!new) { |
| 641 | for_each_sg(sgl, sg, sg_len, i) { | 628 | dev_err(chan->dev, msg_ld_oom); |
| 642 | sg_used = 0; | 629 | goto fail; |
| 643 | 630 | } | |
| 644 | /* Loop until the entire scatterlist entry is used */ | ||
| 645 | while (sg_used < sg_dma_len(sg)) { | ||
| 646 | |||
| 647 | /* | ||
| 648 | * If we've used up the current hardware address/length | ||
| 649 | * pair, we need to load a new one | ||
| 650 | * | ||
| 651 | * This is done in a while loop so that descriptors with | ||
| 652 | * length == 0 will be skipped | ||
| 653 | */ | ||
| 654 | while (hw_used >= hw->length) { | ||
| 655 | |||
| 656 | /* | ||
| 657 | * If the current hardware entry is the last | ||
| 658 | * entry in the list, we're finished | ||
| 659 | */ | ||
| 660 | if (list_is_last(&hw->entry, &slave->addresses)) | ||
| 661 | goto finished; | ||
| 662 | |||
| 663 | /* Get the next hardware address/length pair */ | ||
| 664 | hw = list_entry(hw->entry.next, | ||
| 665 | struct fsl_dma_hw_addr, entry); | ||
| 666 | hw_used = 0; | ||
| 667 | } | ||
| 668 | |||
| 669 | /* Allocate the link descriptor from DMA pool */ | ||
| 670 | new = fsl_dma_alloc_descriptor(chan); | ||
| 671 | if (!new) { | ||
| 672 | dev_err(chan->dev, "No free memory for " | ||
| 673 | "link descriptor\n"); | ||
| 674 | goto fail; | ||
| 675 | } | ||
| 676 | #ifdef FSL_DMA_LD_DEBUG | 631 | #ifdef FSL_DMA_LD_DEBUG |
| 677 | dev_dbg(chan->dev, "new link desc alloc %p\n", new); | 632 | dev_dbg(chan->dev, "new link desc alloc %p\n", new); |
| 678 | #endif | 633 | #endif |
| 679 | 634 | ||
| 680 | /* | 635 | set_desc_cnt(chan, &new->hw, len); |
| 681 | * Calculate the maximum number of bytes to transfer, | 636 | set_desc_src(chan, &new->hw, src); |
| 682 | * making sure it is less than the DMA controller limit | 637 | set_desc_dst(chan, &new->hw, dst); |
| 683 | */ | ||
| 684 | copy = min_t(size_t, sg_dma_len(sg) - sg_used, | ||
| 685 | hw->length - hw_used); | ||
| 686 | copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT); | ||
| 687 | |||
| 688 | /* | ||
| 689 | * DMA_FROM_DEVICE | ||
| 690 | * from the hardware to the scatterlist | ||
| 691 | * | ||
| 692 | * DMA_TO_DEVICE | ||
| 693 | * from the scatterlist to the hardware | ||
| 694 | */ | ||
| 695 | if (direction == DMA_FROM_DEVICE) { | ||
| 696 | dma_src = hw->address + hw_used; | ||
| 697 | dma_dst = sg_dma_address(sg) + sg_used; | ||
| 698 | } else { | ||
| 699 | dma_src = sg_dma_address(sg) + sg_used; | ||
| 700 | dma_dst = hw->address + hw_used; | ||
| 701 | } | ||
| 702 | |||
| 703 | /* Fill in the descriptor */ | ||
| 704 | set_desc_cnt(chan, &new->hw, copy); | ||
| 705 | set_desc_src(chan, &new->hw, dma_src); | ||
| 706 | set_desc_dst(chan, &new->hw, dma_dst); | ||
| 707 | |||
| 708 | /* | ||
| 709 | * If this is not the first descriptor, chain the | ||
| 710 | * current descriptor after the previous descriptor | ||
| 711 | */ | ||
| 712 | if (!first) { | ||
| 713 | first = new; | ||
| 714 | } else { | ||
| 715 | set_desc_next(chan, &prev->hw, | ||
| 716 | new->async_tx.phys); | ||
| 717 | } | ||
| 718 | |||
| 719 | new->async_tx.cookie = 0; | ||
| 720 | async_tx_ack(&new->async_tx); | ||
| 721 | |||
| 722 | prev = new; | ||
| 723 | sg_used += copy; | ||
| 724 | hw_used += copy; | ||
| 725 | |||
| 726 | /* Insert the link descriptor into the LD ring */ | ||
| 727 | list_add_tail(&new->node, &first->tx_list); | ||
| 728 | } | ||
| 729 | } | ||
| 730 | 638 | ||
| 731 | finished: | 639 | if (!first) |
| 640 | first = new; | ||
| 641 | else | ||
| 642 | set_desc_next(chan, &prev->hw, new->async_tx.phys); | ||
| 732 | 643 | ||
| 733 | /* All of the hardware address/length pairs had length == 0 */ | 644 | new->async_tx.cookie = 0; |
| 734 | if (!first || !new) | 645 | async_tx_ack(&new->async_tx); |
| 735 | return NULL; | 646 | prev = new; |
| 736 | 647 | ||
| 737 | new->async_tx.flags = flags; | 648 | /* Insert the link descriptor to the LD ring */ |
| 738 | new->async_tx.cookie = -EBUSY; | 649 | list_add_tail(&new->node, &first->tx_list); |
| 739 | 650 | ||
| 740 | /* Set End-of-link to the last link descriptor of new list */ | 651 | /* update metadata */ |
| 741 | set_ld_eol(chan, new); | 652 | dst_avail -= len; |
| 653 | src_avail -= len; | ||
| 654 | |||
| 655 | fetch: | ||
| 656 | /* fetch the next dst scatterlist entry */ | ||
| 657 | if (dst_avail == 0) { | ||
| 658 | |||
| 659 | /* no more entries: we're done */ | ||
| 660 | if (dst_nents == 0) | ||
| 661 | break; | ||
| 662 | |||
| 663 | /* fetch the next entry: if there are no more: done */ | ||
| 664 | dst_sg = sg_next(dst_sg); | ||
| 665 | if (dst_sg == NULL) | ||
| 666 | break; | ||
| 667 | |||
| 668 | dst_nents--; | ||
| 669 | dst_avail = sg_dma_len(dst_sg); | ||
| 670 | } | ||
| 742 | 671 | ||
| 743 | /* Enable extra controller features */ | 672 | /* fetch the next src scatterlist entry */ |
| 744 | if (chan->set_src_loop_size) | 673 | if (src_avail == 0) { |
| 745 | chan->set_src_loop_size(chan, slave->src_loop_size); | ||
| 746 | 674 | ||
| 747 | if (chan->set_dst_loop_size) | 675 | /* no more entries: we're done */ |
| 748 | chan->set_dst_loop_size(chan, slave->dst_loop_size); | 676 | if (src_nents == 0) |
| 677 | break; | ||
| 749 | 678 | ||
| 750 | if (chan->toggle_ext_start) | 679 | /* fetch the next entry: if there are no more: done */ |
| 751 | chan->toggle_ext_start(chan, slave->external_start); | 680 | src_sg = sg_next(src_sg); |
| 681 | if (src_sg == NULL) | ||
| 682 | break; | ||
| 752 | 683 | ||
| 753 | if (chan->toggle_ext_pause) | 684 | src_nents--; |
| 754 | chan->toggle_ext_pause(chan, slave->external_pause); | 685 | src_avail = sg_dma_len(src_sg); |
| 686 | } | ||
| 687 | } | ||
| 755 | 688 | ||
| 756 | if (chan->set_request_count) | 689 | new->async_tx.flags = flags; /* client is in control of this ack */ |
| 757 | chan->set_request_count(chan, slave->request_count); | 690 | new->async_tx.cookie = -EBUSY; |
| 691 | |||
| 692 | /* Set End-of-link to the last link descriptor of new list */ | ||
| 693 | set_ld_eol(chan, new); | ||
| 758 | 694 | ||
| 759 | return &first->async_tx; | 695 | return &first->async_tx; |
| 760 | 696 | ||
| 761 | fail: | 697 | fail: |
| 762 | /* If first was not set, then we failed to allocate the very first | ||
| 763 | * descriptor, and we're done */ | ||
| 764 | if (!first) | 698 | if (!first) |
| 765 | return NULL; | 699 | return NULL; |
| 766 | 700 | ||
| 701 | fsldma_free_desc_list_reverse(chan, &first->tx_list); | ||
| 702 | return NULL; | ||
| 703 | } | ||
| 704 | |||
| 705 | /** | ||
| 706 | * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction | ||
| 707 | * @chan: DMA channel | ||
| 708 | * @sgl: scatterlist to transfer to/from | ||
| 709 | * @sg_len: number of entries in @scatterlist | ||
| 710 | * @direction: DMA direction | ||
| 711 | * @flags: DMAEngine flags | ||
| 712 | * | ||
| 713 | * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the | ||
| 714 | * DMA_SLAVE API, this gets the device-specific information from the | ||
| 715 | * chan->private variable. | ||
| 716 | */ | ||
| 717 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | ||
| 718 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, | ||
| 719 | enum dma_data_direction direction, unsigned long flags) | ||
| 720 | { | ||
| 767 | /* | 721 | /* |
| 768 | * First is set, so all of the descriptors we allocated have been added | 722 | * This operation is not supported on the Freescale DMA controller |
| 769 | * to first->tx_list, INCLUDING "first" itself. Therefore we | ||
| 770 | * must traverse the list backwards freeing each descriptor in turn | ||
| 771 | * | 723 | * |
| 772 | * We're re-using variables for the loop, oh well | 724 | * However, we need to provide the function pointer to allow the |
| 725 | * device_control() method to work. | ||
| 773 | */ | 726 | */ |
| 774 | fsldma_free_desc_list_reverse(chan, &first->tx_list); | ||
| 775 | return NULL; | 727 | return NULL; |
| 776 | } | 728 | } |
| 777 | 729 | ||
| 778 | static int fsl_dma_device_control(struct dma_chan *dchan, | 730 | static int fsl_dma_device_control(struct dma_chan *dchan, |
| 779 | enum dma_ctrl_cmd cmd, unsigned long arg) | 731 | enum dma_ctrl_cmd cmd, unsigned long arg) |
| 780 | { | 732 | { |
| 733 | struct dma_slave_config *config; | ||
| 781 | struct fsldma_chan *chan; | 734 | struct fsldma_chan *chan; |
| 782 | unsigned long flags; | 735 | unsigned long flags; |
| 783 | 736 | int size; | |
| 784 | /* Only supports DMA_TERMINATE_ALL */ | ||
| 785 | if (cmd != DMA_TERMINATE_ALL) | ||
| 786 | return -ENXIO; | ||
| 787 | 737 | ||
| 788 | if (!dchan) | 738 | if (!dchan) |
| 789 | return -EINVAL; | 739 | return -EINVAL; |
| 790 | 740 | ||
| 791 | chan = to_fsl_chan(dchan); | 741 | chan = to_fsl_chan(dchan); |
| 792 | 742 | ||
| 793 | /* Halt the DMA engine */ | 743 | switch (cmd) { |
| 794 | dma_halt(chan); | 744 | case DMA_TERMINATE_ALL: |
| 745 | /* Halt the DMA engine */ | ||
| 746 | dma_halt(chan); | ||
| 795 | 747 | ||
| 796 | spin_lock_irqsave(&chan->desc_lock, flags); | 748 | spin_lock_irqsave(&chan->desc_lock, flags); |
| 797 | 749 | ||
| 798 | /* Remove and free all of the descriptors in the LD queue */ | 750 | /* Remove and free all of the descriptors in the LD queue */ |
| 799 | fsldma_free_desc_list(chan, &chan->ld_pending); | 751 | fsldma_free_desc_list(chan, &chan->ld_pending); |
| 800 | fsldma_free_desc_list(chan, &chan->ld_running); | 752 | fsldma_free_desc_list(chan, &chan->ld_running); |
| 801 | 753 | ||
| 802 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 754 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
| 755 | return 0; | ||
| 756 | |||
| 757 | case DMA_SLAVE_CONFIG: | ||
| 758 | config = (struct dma_slave_config *)arg; | ||
| 759 | |||
| 760 | /* make sure the channel supports setting burst size */ | ||
| 761 | if (!chan->set_request_count) | ||
| 762 | return -ENXIO; | ||
| 763 | |||
| 764 | /* we set the controller burst size depending on direction */ | ||
| 765 | if (config->direction == DMA_TO_DEVICE) | ||
| 766 | size = config->dst_addr_width * config->dst_maxburst; | ||
| 767 | else | ||
| 768 | size = config->src_addr_width * config->src_maxburst; | ||
| 769 | |||
| 770 | chan->set_request_count(chan, size); | ||
| 771 | return 0; | ||
| 772 | |||
| 773 | case FSLDMA_EXTERNAL_START: | ||
| 774 | |||
| 775 | /* make sure the channel supports external start */ | ||
| 776 | if (!chan->toggle_ext_start) | ||
| 777 | return -ENXIO; | ||
| 778 | |||
| 779 | chan->toggle_ext_start(chan, arg); | ||
| 780 | return 0; | ||
| 781 | |||
| 782 | default: | ||
| 783 | return -ENXIO; | ||
| 784 | } | ||
| 803 | 785 | ||
| 804 | return 0; | 786 | return 0; |
| 805 | } | 787 | } |
| @@ -1327,11 +1309,13 @@ static int __devinit fsldma_of_probe(struct platform_device *op, | |||
| 1327 | 1309 | ||
| 1328 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); | 1310 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); |
| 1329 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); | 1311 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); |
| 1312 | dma_cap_set(DMA_SG, fdev->common.cap_mask); | ||
| 1330 | dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); | 1313 | dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); |
| 1331 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; | 1314 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; |
| 1332 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; | 1315 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; |
| 1333 | fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; | 1316 | fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; |
| 1334 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; | 1317 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; |
| 1318 | fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; | ||
| 1335 | fdev->common.device_tx_status = fsl_tx_status; | 1319 | fdev->common.device_tx_status = fsl_tx_status; |
| 1336 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; | 1320 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; |
| 1337 | fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; | 1321 | fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; |
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c new file mode 100644 index 000000000000..f629e4961af5 --- /dev/null +++ b/drivers/dma/imx-dma.c | |||
| @@ -0,0 +1,424 @@ | |||
| 1 | /* | ||
| 2 | * drivers/dma/imx-dma.c | ||
| 3 | * | ||
| 4 | * This file contains a driver for the Freescale i.MX DMA engine | ||
| 5 | * found on i.MX1/21/27 | ||
| 6 | * | ||
| 7 | * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> | ||
| 8 | * | ||
| 9 | * The code contained herein is licensed under the GNU General Public | ||
| 10 | * License. You may obtain a copy of the GNU General Public License | ||
| 11 | * Version 2 or later at the following locations: | ||
| 12 | * | ||
| 13 | * http://www.opensource.org/licenses/gpl-license.html | ||
| 14 | * http://www.gnu.org/copyleft/gpl.html | ||
| 15 | */ | ||
| 16 | #include <linux/init.h> | ||
| 17 | #include <linux/types.h> | ||
| 18 | #include <linux/mm.h> | ||
| 19 | #include <linux/interrupt.h> | ||
| 20 | #include <linux/spinlock.h> | ||
| 21 | #include <linux/device.h> | ||
| 22 | #include <linux/dma-mapping.h> | ||
| 23 | #include <linux/slab.h> | ||
| 24 | #include <linux/platform_device.h> | ||
| 25 | #include <linux/dmaengine.h> | ||
| 26 | |||
| 27 | #include <asm/irq.h> | ||
| 28 | #include <mach/dma-v1.h> | ||
| 29 | #include <mach/hardware.h> | ||
| 30 | |||
| 31 | struct imxdma_channel { | ||
| 32 | struct imxdma_engine *imxdma; | ||
| 33 | unsigned int channel; | ||
| 34 | unsigned int imxdma_channel; | ||
| 35 | |||
| 36 | enum dma_slave_buswidth word_size; | ||
| 37 | dma_addr_t per_address; | ||
| 38 | u32 watermark_level; | ||
| 39 | struct dma_chan chan; | ||
| 40 | spinlock_t lock; | ||
| 41 | struct dma_async_tx_descriptor desc; | ||
| 42 | dma_cookie_t last_completed; | ||
| 43 | enum dma_status status; | ||
| 44 | int dma_request; | ||
| 45 | struct scatterlist *sg_list; | ||
| 46 | }; | ||
| 47 | |||
| 48 | #define MAX_DMA_CHANNELS 8 | ||
| 49 | |||
| 50 | struct imxdma_engine { | ||
| 51 | struct device *dev; | ||
| 52 | struct dma_device dma_device; | ||
| 53 | struct imxdma_channel channel[MAX_DMA_CHANNELS]; | ||
| 54 | }; | ||
| 55 | |||
| 56 | static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan) | ||
| 57 | { | ||
| 58 | return container_of(chan, struct imxdma_channel, chan); | ||
| 59 | } | ||
| 60 | |||
| 61 | static void imxdma_handle(struct imxdma_channel *imxdmac) | ||
| 62 | { | ||
| 63 | if (imxdmac->desc.callback) | ||
| 64 | imxdmac->desc.callback(imxdmac->desc.callback_param); | ||
| 65 | imxdmac->last_completed = imxdmac->desc.cookie; | ||
| 66 | } | ||
| 67 | |||
| 68 | static void imxdma_irq_handler(int channel, void *data) | ||
| 69 | { | ||
| 70 | struct imxdma_channel *imxdmac = data; | ||
| 71 | |||
| 72 | imxdmac->status = DMA_SUCCESS; | ||
| 73 | imxdma_handle(imxdmac); | ||
| 74 | } | ||
| 75 | |||
| 76 | static void imxdma_err_handler(int channel, void *data, int error) | ||
| 77 | { | ||
| 78 | struct imxdma_channel *imxdmac = data; | ||
| 79 | |||
| 80 | imxdmac->status = DMA_ERROR; | ||
| 81 | imxdma_handle(imxdmac); | ||
| 82 | } | ||
| 83 | |||
| 84 | static void imxdma_progression(int channel, void *data, | ||
| 85 | struct scatterlist *sg) | ||
| 86 | { | ||
| 87 | struct imxdma_channel *imxdmac = data; | ||
| 88 | |||
| 89 | imxdmac->status = DMA_SUCCESS; | ||
| 90 | imxdma_handle(imxdmac); | ||
| 91 | } | ||
| 92 | |||
| 93 | static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
| 94 | unsigned long arg) | ||
| 95 | { | ||
| 96 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | ||
| 97 | struct dma_slave_config *dmaengine_cfg = (void *)arg; | ||
| 98 | int ret; | ||
| 99 | unsigned int mode = 0; | ||
| 100 | |||
| 101 | switch (cmd) { | ||
| 102 | case DMA_TERMINATE_ALL: | ||
| 103 | imxdmac->status = DMA_ERROR; | ||
| 104 | imx_dma_disable(imxdmac->imxdma_channel); | ||
| 105 | return 0; | ||
| 106 | case DMA_SLAVE_CONFIG: | ||
| 107 | if (dmaengine_cfg->direction == DMA_FROM_DEVICE) { | ||
| 108 | imxdmac->per_address = dmaengine_cfg->src_addr; | ||
| 109 | imxdmac->watermark_level = dmaengine_cfg->src_maxburst; | ||
| 110 | imxdmac->word_size = dmaengine_cfg->src_addr_width; | ||
| 111 | } else { | ||
| 112 | imxdmac->per_address = dmaengine_cfg->dst_addr; | ||
| 113 | imxdmac->watermark_level = dmaengine_cfg->dst_maxburst; | ||
| 114 | imxdmac->word_size = dmaengine_cfg->dst_addr_width; | ||
| 115 | } | ||
| 116 | |||
| 117 | switch (imxdmac->word_size) { | ||
| 118 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | ||
| 119 | mode = IMX_DMA_MEMSIZE_8; | ||
| 120 | break; | ||
| 121 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | ||
| 122 | mode = IMX_DMA_MEMSIZE_16; | ||
| 123 | break; | ||
| 124 | default: | ||
| 125 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | ||
| 126 | mode = IMX_DMA_MEMSIZE_32; | ||
| 127 | break; | ||
| 128 | } | ||
| 129 | ret = imx_dma_config_channel(imxdmac->imxdma_channel, | ||
| 130 | mode | IMX_DMA_TYPE_FIFO, | ||
| 131 | IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, | ||
| 132 | imxdmac->dma_request, 1); | ||
| 133 | |||
| 134 | if (ret) | ||
| 135 | return ret; | ||
| 136 | |||
| 137 | imx_dma_config_burstlen(imxdmac->imxdma_channel, imxdmac->watermark_level); | ||
| 138 | |||
| 139 | return 0; | ||
| 140 | default: | ||
| 141 | return -ENOSYS; | ||
| 142 | } | ||
| 143 | |||
| 144 | return -EINVAL; | ||
| 145 | } | ||
| 146 | |||
| 147 | static enum dma_status imxdma_tx_status(struct dma_chan *chan, | ||
| 148 | dma_cookie_t cookie, | ||
| 149 | struct dma_tx_state *txstate) | ||
| 150 | { | ||
| 151 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | ||
| 152 | dma_cookie_t last_used; | ||
| 153 | enum dma_status ret; | ||
| 154 | |||
| 155 | last_used = chan->cookie; | ||
| 156 | |||
| 157 | ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used); | ||
| 158 | dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0); | ||
| 159 | |||
| 160 | return ret; | ||
| 161 | } | ||
| 162 | |||
| 163 | static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma) | ||
| 164 | { | ||
| 165 | dma_cookie_t cookie = imxdma->chan.cookie; | ||
| 166 | |||
| 167 | if (++cookie < 0) | ||
| 168 | cookie = 1; | ||
| 169 | |||
| 170 | imxdma->chan.cookie = cookie; | ||
| 171 | imxdma->desc.cookie = cookie; | ||
| 172 | |||
| 173 | return cookie; | ||
| 174 | } | ||
| 175 | |||
| 176 | static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
| 177 | { | ||
| 178 | struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); | ||
| 179 | dma_cookie_t cookie; | ||
| 180 | |||
| 181 | spin_lock_irq(&imxdmac->lock); | ||
| 182 | |||
| 183 | cookie = imxdma_assign_cookie(imxdmac); | ||
| 184 | |||
| 185 | imx_dma_enable(imxdmac->imxdma_channel); | ||
| 186 | |||
| 187 | spin_unlock_irq(&imxdmac->lock); | ||
| 188 | |||
| 189 | return cookie; | ||
| 190 | } | ||
| 191 | |||
| 192 | static int imxdma_alloc_chan_resources(struct dma_chan *chan) | ||
| 193 | { | ||
| 194 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | ||
| 195 | struct imx_dma_data *data = chan->private; | ||
| 196 | |||
| 197 | imxdmac->dma_request = data->dma_request; | ||
| 198 | |||
| 199 | dma_async_tx_descriptor_init(&imxdmac->desc, chan); | ||
| 200 | imxdmac->desc.tx_submit = imxdma_tx_submit; | ||
| 201 | /* txd.flags will be overwritten in prep funcs */ | ||
| 202 | imxdmac->desc.flags = DMA_CTRL_ACK; | ||
| 203 | |||
| 204 | imxdmac->status = DMA_SUCCESS; | ||
| 205 | |||
| 206 | return 0; | ||
| 207 | } | ||
| 208 | |||
| 209 | static void imxdma_free_chan_resources(struct dma_chan *chan) | ||
| 210 | { | ||
| 211 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | ||
| 212 | |||
| 213 | imx_dma_disable(imxdmac->imxdma_channel); | ||
| 214 | |||
| 215 | if (imxdmac->sg_list) { | ||
| 216 | kfree(imxdmac->sg_list); | ||
| 217 | imxdmac->sg_list = NULL; | ||
| 218 | } | ||
| 219 | } | ||
| 220 | |||
| 221 | static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( | ||
| 222 | struct dma_chan *chan, struct scatterlist *sgl, | ||
| 223 | unsigned int sg_len, enum dma_data_direction direction, | ||
| 224 | unsigned long flags) | ||
| 225 | { | ||
| 226 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | ||
| 227 | struct scatterlist *sg; | ||
| 228 | int i, ret, dma_length = 0; | ||
| 229 | unsigned int dmamode; | ||
| 230 | |||
| 231 | if (imxdmac->status == DMA_IN_PROGRESS) | ||
| 232 | return NULL; | ||
| 233 | |||
| 234 | imxdmac->status = DMA_IN_PROGRESS; | ||
| 235 | |||
| 236 | for_each_sg(sgl, sg, sg_len, i) { | ||
| 237 | dma_length += sg->length; | ||
| 238 | } | ||
| 239 | |||
| 240 | if (direction == DMA_FROM_DEVICE) | ||
| 241 | dmamode = DMA_MODE_READ; | ||
| 242 | else | ||
| 243 | dmamode = DMA_MODE_WRITE; | ||
| 244 | |||
| 245 | ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len, | ||
| 246 | dma_length, imxdmac->per_address, dmamode); | ||
| 247 | if (ret) | ||
| 248 | return NULL; | ||
| 249 | |||
| 250 | return &imxdmac->desc; | ||
| 251 | } | ||
| 252 | |||
| 253 | static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( | ||
| 254 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | ||
| 255 | size_t period_len, enum dma_data_direction direction) | ||
| 256 | { | ||
| 257 | struct imxdma_channel *imxdmac = to_imxdma_chan(chan); | ||
| 258 | struct imxdma_engine *imxdma = imxdmac->imxdma; | ||
| 259 | int i, ret; | ||
| 260 | unsigned int periods = buf_len / period_len; | ||
| 261 | unsigned int dmamode; | ||
| 262 | |||
| 263 | dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n", | ||
| 264 | __func__, imxdmac->channel, buf_len, period_len); | ||
| 265 | |||
| 266 | if (imxdmac->status == DMA_IN_PROGRESS) | ||
| 267 | return NULL; | ||
| 268 | imxdmac->status = DMA_IN_PROGRESS; | ||
| 269 | |||
| 270 | ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel, | ||
| 271 | imxdma_progression); | ||
| 272 | if (ret) { | ||
| 273 | dev_err(imxdma->dev, "Failed to setup the DMA handler\n"); | ||
| 274 | return NULL; | ||
| 275 | } | ||
| 276 | |||
| 277 | if (imxdmac->sg_list) | ||
| 278 | kfree(imxdmac->sg_list); | ||
| 279 | |||
| 280 | imxdmac->sg_list = kcalloc(periods + 1, | ||
| 281 | sizeof(struct scatterlist), GFP_KERNEL); | ||
| 282 | if (!imxdmac->sg_list) | ||
| 283 | return NULL; | ||
| 284 | |||
| 285 | sg_init_table(imxdmac->sg_list, periods); | ||
| 286 | |||
| 287 | for (i = 0; i < periods; i++) { | ||
| 288 | imxdmac->sg_list[i].page_link = 0; | ||
| 289 | imxdmac->sg_list[i].offset = 0; | ||
| 290 | imxdmac->sg_list[i].dma_address = dma_addr; | ||
| 291 | imxdmac->sg_list[i].length = period_len; | ||
| 292 | dma_addr += period_len; | ||
| 293 | } | ||
| 294 | |||
| 295 | /* close the loop */ | ||
| 296 | imxdmac->sg_list[periods].offset = 0; | ||
| 297 | imxdmac->sg_list[periods].length = 0; | ||
| 298 | imxdmac->sg_list[periods].page_link = | ||
| 299 | ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; | ||
| 300 | |||
| 301 | if (direction == DMA_FROM_DEVICE) | ||
| 302 | dmamode = DMA_MODE_READ; | ||
| 303 | else | ||
| 304 | dmamode = DMA_MODE_WRITE; | ||
| 305 | |||
| 306 | ret = imx_dma_setup_sg(imxdmac->imxdma_channel, imxdmac->sg_list, periods, | ||
| 307 | IMX_DMA_LENGTH_LOOP, imxdmac->per_address, dmamode); | ||
| 308 | if (ret) | ||
| 309 | return NULL; | ||
| 310 | |||
| 311 | return &imxdmac->desc; | ||
| 312 | } | ||
| 313 | |||
| 314 | static void imxdma_issue_pending(struct dma_chan *chan) | ||
| 315 | { | ||
| 316 | /* | ||
| 317 | * Nothing to do. We only have a single descriptor | ||
| 318 | */ | ||
| 319 | } | ||
| 320 | |||
| 321 | static int __init imxdma_probe(struct platform_device *pdev) | ||
| 322 | { | ||
| 323 | struct imxdma_engine *imxdma; | ||
| 324 | int ret, i; | ||
| 325 | |||
| 326 | imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL); | ||
| 327 | if (!imxdma) | ||
| 328 | return -ENOMEM; | ||
| 329 | |||
| 330 | INIT_LIST_HEAD(&imxdma->dma_device.channels); | ||
| 331 | |||
| 332 | /* Initialize channel parameters */ | ||
| 333 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { | ||
| 334 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; | ||
| 335 | |||
| 336 | imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine", | ||
| 337 | DMA_PRIO_MEDIUM); | ||
| 338 | if ((int)imxdmac->channel < 0) { | ||
| 339 | ret = -ENODEV; | ||
| 340 | goto err_init; | ||
| 341 | } | ||
| 342 | |||
| 343 | imx_dma_setup_handlers(imxdmac->imxdma_channel, | ||
| 344 | imxdma_irq_handler, imxdma_err_handler, imxdmac); | ||
| 345 | |||
| 346 | imxdmac->imxdma = imxdma; | ||
| 347 | spin_lock_init(&imxdmac->lock); | ||
| 348 | |||
| 349 | dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); | ||
| 350 | dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); | ||
| 351 | |||
| 352 | imxdmac->chan.device = &imxdma->dma_device; | ||
| 353 | imxdmac->chan.chan_id = i; | ||
| 354 | imxdmac->channel = i; | ||
| 355 | |||
| 356 | /* Add the channel to the DMAC list */ | ||
| 357 | list_add_tail(&imxdmac->chan.device_node, &imxdma->dma_device.channels); | ||
| 358 | } | ||
| 359 | |||
| 360 | imxdma->dev = &pdev->dev; | ||
| 361 | imxdma->dma_device.dev = &pdev->dev; | ||
| 362 | |||
| 363 | imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources; | ||
| 364 | imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources; | ||
| 365 | imxdma->dma_device.device_tx_status = imxdma_tx_status; | ||
| 366 | imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg; | ||
| 367 | imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; | ||
| 368 | imxdma->dma_device.device_control = imxdma_control; | ||
| 369 | imxdma->dma_device.device_issue_pending = imxdma_issue_pending; | ||
| 370 | |||
| 371 | platform_set_drvdata(pdev, imxdma); | ||
| 372 | |||
| 373 | ret = dma_async_device_register(&imxdma->dma_device); | ||
| 374 | if (ret) { | ||
| 375 | dev_err(&pdev->dev, "unable to register\n"); | ||
| 376 | goto err_init; | ||
| 377 | } | ||
| 378 | |||
| 379 | return 0; | ||
| 380 | |||
| 381 | err_init: | ||
| 382 | while (i-- >= 0) { | ||
| 383 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; | ||
| 384 | imx_dma_free(imxdmac->imxdma_channel); | ||
| 385 | } | ||
| 386 | |||
| 387 | kfree(imxdma); | ||
| 388 | return ret; | ||
| 389 | } | ||
| 390 | |||
| 391 | static int __exit imxdma_remove(struct platform_device *pdev) | ||
| 392 | { | ||
| 393 | struct imxdma_engine *imxdma = platform_get_drvdata(pdev); | ||
| 394 | int i; | ||
| 395 | |||
| 396 | dma_async_device_unregister(&imxdma->dma_device); | ||
| 397 | |||
| 398 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { | ||
| 399 | struct imxdma_channel *imxdmac = &imxdma->channel[i]; | ||
| 400 | |||
| 401 | imx_dma_free(imxdmac->imxdma_channel); | ||
| 402 | } | ||
| 403 | |||
| 404 | kfree(imxdma); | ||
| 405 | |||
| 406 | return 0; | ||
| 407 | } | ||
| 408 | |||
| 409 | static struct platform_driver imxdma_driver = { | ||
| 410 | .driver = { | ||
| 411 | .name = "imx-dma", | ||
| 412 | }, | ||
| 413 | .remove = __exit_p(imxdma_remove), | ||
| 414 | }; | ||
| 415 | |||
| 416 | static int __init imxdma_module_init(void) | ||
| 417 | { | ||
| 418 | return platform_driver_probe(&imxdma_driver, imxdma_probe); | ||
| 419 | } | ||
| 420 | subsys_initcall(imxdma_module_init); | ||
| 421 | |||
| 422 | MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); | ||
| 423 | MODULE_DESCRIPTION("i.MX dma driver"); | ||
| 424 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c new file mode 100644 index 000000000000..0834323a0599 --- /dev/null +++ b/drivers/dma/imx-sdma.c | |||
| @@ -0,0 +1,1392 @@ | |||
| 1 | /* | ||
| 2 | * drivers/dma/imx-sdma.c | ||
| 3 | * | ||
| 4 | * This file contains a driver for the Freescale Smart DMA engine | ||
| 5 | * | ||
| 6 | * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> | ||
| 7 | * | ||
| 8 | * Based on code from Freescale: | ||
| 9 | * | ||
| 10 | * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved. | ||
| 11 | * | ||
| 12 | * The code contained herein is licensed under the GNU General Public | ||
| 13 | * License. You may obtain a copy of the GNU General Public License | ||
| 14 | * Version 2 or later at the following locations: | ||
| 15 | * | ||
| 16 | * http://www.opensource.org/licenses/gpl-license.html | ||
| 17 | * http://www.gnu.org/copyleft/gpl.html | ||
| 18 | */ | ||
| 19 | |||
| 20 | #include <linux/init.h> | ||
| 21 | #include <linux/types.h> | ||
| 22 | #include <linux/mm.h> | ||
| 23 | #include <linux/interrupt.h> | ||
| 24 | #include <linux/clk.h> | ||
| 25 | #include <linux/wait.h> | ||
| 26 | #include <linux/sched.h> | ||
| 27 | #include <linux/semaphore.h> | ||
| 28 | #include <linux/spinlock.h> | ||
| 29 | #include <linux/device.h> | ||
| 30 | #include <linux/dma-mapping.h> | ||
| 31 | #include <linux/firmware.h> | ||
| 32 | #include <linux/slab.h> | ||
| 33 | #include <linux/platform_device.h> | ||
| 34 | #include <linux/dmaengine.h> | ||
| 35 | |||
| 36 | #include <asm/irq.h> | ||
| 37 | #include <mach/sdma.h> | ||
| 38 | #include <mach/dma.h> | ||
| 39 | #include <mach/hardware.h> | ||
| 40 | |||
| 41 | /* SDMA registers */ | ||
| 42 | #define SDMA_H_C0PTR 0x000 | ||
| 43 | #define SDMA_H_INTR 0x004 | ||
| 44 | #define SDMA_H_STATSTOP 0x008 | ||
| 45 | #define SDMA_H_START 0x00c | ||
| 46 | #define SDMA_H_EVTOVR 0x010 | ||
| 47 | #define SDMA_H_DSPOVR 0x014 | ||
| 48 | #define SDMA_H_HOSTOVR 0x018 | ||
| 49 | #define SDMA_H_EVTPEND 0x01c | ||
| 50 | #define SDMA_H_DSPENBL 0x020 | ||
| 51 | #define SDMA_H_RESET 0x024 | ||
| 52 | #define SDMA_H_EVTERR 0x028 | ||
| 53 | #define SDMA_H_INTRMSK 0x02c | ||
| 54 | #define SDMA_H_PSW 0x030 | ||
| 55 | #define SDMA_H_EVTERRDBG 0x034 | ||
| 56 | #define SDMA_H_CONFIG 0x038 | ||
| 57 | #define SDMA_ONCE_ENB 0x040 | ||
| 58 | #define SDMA_ONCE_DATA 0x044 | ||
| 59 | #define SDMA_ONCE_INSTR 0x048 | ||
| 60 | #define SDMA_ONCE_STAT 0x04c | ||
| 61 | #define SDMA_ONCE_CMD 0x050 | ||
| 62 | #define SDMA_EVT_MIRROR 0x054 | ||
| 63 | #define SDMA_ILLINSTADDR 0x058 | ||
| 64 | #define SDMA_CHN0ADDR 0x05c | ||
| 65 | #define SDMA_ONCE_RTB 0x060 | ||
| 66 | #define SDMA_XTRIG_CONF1 0x070 | ||
| 67 | #define SDMA_XTRIG_CONF2 0x074 | ||
| 68 | #define SDMA_CHNENBL0_V2 0x200 | ||
| 69 | #define SDMA_CHNENBL0_V1 0x080 | ||
| 70 | #define SDMA_CHNPRI_0 0x100 | ||
| 71 | |||
| 72 | /* | ||
| 73 | * Buffer descriptor status values. | ||
| 74 | */ | ||
| 75 | #define BD_DONE 0x01 | ||
| 76 | #define BD_WRAP 0x02 | ||
| 77 | #define BD_CONT 0x04 | ||
| 78 | #define BD_INTR 0x08 | ||
| 79 | #define BD_RROR 0x10 | ||
| 80 | #define BD_LAST 0x20 | ||
| 81 | #define BD_EXTD 0x80 | ||
| 82 | |||
| 83 | /* | ||
| 84 | * Data Node descriptor status values. | ||
| 85 | */ | ||
| 86 | #define DND_END_OF_FRAME 0x80 | ||
| 87 | #define DND_END_OF_XFER 0x40 | ||
| 88 | #define DND_DONE 0x20 | ||
| 89 | #define DND_UNUSED 0x01 | ||
| 90 | |||
| 91 | /* | ||
| 92 | * IPCV2 descriptor status values. | ||
| 93 | */ | ||
| 94 | #define BD_IPCV2_END_OF_FRAME 0x40 | ||
| 95 | |||
| 96 | #define IPCV2_MAX_NODES 50 | ||
| 97 | /* | ||
| 98 | * Error bit set in the CCB status field by the SDMA, | ||
| 99 | * in setbd routine, in case of a transfer error | ||
| 100 | */ | ||
| 101 | #define DATA_ERROR 0x10000000 | ||
| 102 | |||
| 103 | /* | ||
| 104 | * Buffer descriptor commands. | ||
| 105 | */ | ||
| 106 | #define C0_ADDR 0x01 | ||
| 107 | #define C0_LOAD 0x02 | ||
| 108 | #define C0_DUMP 0x03 | ||
| 109 | #define C0_SETCTX 0x07 | ||
| 110 | #define C0_GETCTX 0x03 | ||
| 111 | #define C0_SETDM 0x01 | ||
| 112 | #define C0_SETPM 0x04 | ||
| 113 | #define C0_GETDM 0x02 | ||
| 114 | #define C0_GETPM 0x08 | ||
| 115 | /* | ||
| 116 | * Change endianness indicator in the BD command field | ||
| 117 | */ | ||
| 118 | #define CHANGE_ENDIANNESS 0x80 | ||
| 119 | |||
| 120 | /* | ||
| 121 | * Mode/Count of data node descriptors - IPCv2 | ||
| 122 | */ | ||
| 123 | struct sdma_mode_count { | ||
| 124 | u32 count : 16; /* size of the buffer pointed by this BD */ | ||
| 125 | u32 status : 8; /* E,R,I,C,W,D status bits stored here */ | ||
| 126 | u32 command : 8; /* command mostlky used for channel 0 */ | ||
| 127 | }; | ||
| 128 | |||
| 129 | /* | ||
| 130 | * Buffer descriptor | ||
| 131 | */ | ||
| 132 | struct sdma_buffer_descriptor { | ||
| 133 | struct sdma_mode_count mode; | ||
| 134 | u32 buffer_addr; /* address of the buffer described */ | ||
| 135 | u32 ext_buffer_addr; /* extended buffer address */ | ||
| 136 | } __attribute__ ((packed)); | ||
| 137 | |||
| 138 | /** | ||
| 139 | * struct sdma_channel_control - Channel control Block | ||
| 140 | * | ||
| 141 | * @current_bd_ptr current buffer descriptor processed | ||
| 142 | * @base_bd_ptr first element of buffer descriptor array | ||
| 143 | * @unused padding. The SDMA engine expects an array of 128 byte | ||
| 144 | * control blocks | ||
| 145 | */ | ||
| 146 | struct sdma_channel_control { | ||
| 147 | u32 current_bd_ptr; | ||
| 148 | u32 base_bd_ptr; | ||
| 149 | u32 unused[2]; | ||
| 150 | } __attribute__ ((packed)); | ||
| 151 | |||
| 152 | /** | ||
| 153 | * struct sdma_state_registers - SDMA context for a channel | ||
| 154 | * | ||
| 155 | * @pc: program counter | ||
| 156 | * @t: test bit: status of arithmetic & test instruction | ||
| 157 | * @rpc: return program counter | ||
| 158 | * @sf: source fault while loading data | ||
| 159 | * @spc: loop start program counter | ||
| 160 | * @df: destination fault while storing data | ||
| 161 | * @epc: loop end program counter | ||
| 162 | * @lm: loop mode | ||
| 163 | */ | ||
| 164 | struct sdma_state_registers { | ||
| 165 | u32 pc :14; | ||
| 166 | u32 unused1: 1; | ||
| 167 | u32 t : 1; | ||
| 168 | u32 rpc :14; | ||
| 169 | u32 unused0: 1; | ||
| 170 | u32 sf : 1; | ||
| 171 | u32 spc :14; | ||
| 172 | u32 unused2: 1; | ||
| 173 | u32 df : 1; | ||
| 174 | u32 epc :14; | ||
| 175 | u32 lm : 2; | ||
| 176 | } __attribute__ ((packed)); | ||
| 177 | |||
| 178 | /** | ||
| 179 | * struct sdma_context_data - sdma context specific to a channel | ||
| 180 | * | ||
| 181 | * @channel_state: channel state bits | ||
| 182 | * @gReg: general registers | ||
| 183 | * @mda: burst dma destination address register | ||
| 184 | * @msa: burst dma source address register | ||
| 185 | * @ms: burst dma status register | ||
| 186 | * @md: burst dma data register | ||
| 187 | * @pda: peripheral dma destination address register | ||
| 188 | * @psa: peripheral dma source address register | ||
| 189 | * @ps: peripheral dma status register | ||
| 190 | * @pd: peripheral dma data register | ||
| 191 | * @ca: CRC polynomial register | ||
| 192 | * @cs: CRC accumulator register | ||
| 193 | * @dda: dedicated core destination address register | ||
| 194 | * @dsa: dedicated core source address register | ||
| 195 | * @ds: dedicated core status register | ||
| 196 | * @dd: dedicated core data register | ||
| 197 | */ | ||
| 198 | struct sdma_context_data { | ||
| 199 | struct sdma_state_registers channel_state; | ||
| 200 | u32 gReg[8]; | ||
| 201 | u32 mda; | ||
| 202 | u32 msa; | ||
| 203 | u32 ms; | ||
| 204 | u32 md; | ||
| 205 | u32 pda; | ||
| 206 | u32 psa; | ||
| 207 | u32 ps; | ||
| 208 | u32 pd; | ||
| 209 | u32 ca; | ||
| 210 | u32 cs; | ||
| 211 | u32 dda; | ||
| 212 | u32 dsa; | ||
| 213 | u32 ds; | ||
| 214 | u32 dd; | ||
| 215 | u32 scratch0; | ||
| 216 | u32 scratch1; | ||
| 217 | u32 scratch2; | ||
| 218 | u32 scratch3; | ||
| 219 | u32 scratch4; | ||
| 220 | u32 scratch5; | ||
| 221 | u32 scratch6; | ||
| 222 | u32 scratch7; | ||
| 223 | } __attribute__ ((packed)); | ||
| 224 | |||
| 225 | #define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor)) | ||
| 226 | |||
| 227 | struct sdma_engine; | ||
| 228 | |||
| 229 | /** | ||
| 230 | * struct sdma_channel - housekeeping for a SDMA channel | ||
| 231 | * | ||
| 232 | * @sdma pointer to the SDMA engine for this channel | ||
| 233 | * @channel the channel number, matches dmaengine chan_id | ||
| 234 | * @direction transfer type. Needed for setting SDMA script | ||
| 235 | * @peripheral_type Peripheral type. Needed for setting SDMA script | ||
| 236 | * @event_id0 aka dma request line | ||
| 237 | * @event_id1 for channels that use 2 events | ||
| 238 | * @word_size peripheral access size | ||
| 239 | * @buf_tail ID of the buffer that was processed | ||
| 240 | * @done channel completion | ||
| 241 | * @num_bd max NUM_BD. number of descriptors currently handling | ||
| 242 | */ | ||
| 243 | struct sdma_channel { | ||
| 244 | struct sdma_engine *sdma; | ||
| 245 | unsigned int channel; | ||
| 246 | enum dma_data_direction direction; | ||
| 247 | enum sdma_peripheral_type peripheral_type; | ||
| 248 | unsigned int event_id0; | ||
| 249 | unsigned int event_id1; | ||
| 250 | enum dma_slave_buswidth word_size; | ||
| 251 | unsigned int buf_tail; | ||
| 252 | struct completion done; | ||
| 253 | unsigned int num_bd; | ||
| 254 | struct sdma_buffer_descriptor *bd; | ||
| 255 | dma_addr_t bd_phys; | ||
| 256 | unsigned int pc_from_device, pc_to_device; | ||
| 257 | unsigned long flags; | ||
| 258 | dma_addr_t per_address; | ||
| 259 | u32 event_mask0, event_mask1; | ||
| 260 | u32 watermark_level; | ||
| 261 | u32 shp_addr, per_addr; | ||
| 262 | struct dma_chan chan; | ||
| 263 | spinlock_t lock; | ||
| 264 | struct dma_async_tx_descriptor desc; | ||
| 265 | dma_cookie_t last_completed; | ||
| 266 | enum dma_status status; | ||
| 267 | }; | ||
| 268 | |||
| 269 | #define IMX_DMA_SG_LOOP (1 << 0) | ||
| 270 | |||
| 271 | #define MAX_DMA_CHANNELS 32 | ||
| 272 | #define MXC_SDMA_DEFAULT_PRIORITY 1 | ||
| 273 | #define MXC_SDMA_MIN_PRIORITY 1 | ||
| 274 | #define MXC_SDMA_MAX_PRIORITY 7 | ||
| 275 | |||
| 276 | /** | ||
| 277 | * struct sdma_script_start_addrs - SDMA script start pointers | ||
| 278 | * | ||
| 279 | * start addresses of the different functions in the physical | ||
| 280 | * address space of the SDMA engine. | ||
| 281 | */ | ||
| 282 | struct sdma_script_start_addrs { | ||
| 283 | u32 ap_2_ap_addr; | ||
| 284 | u32 ap_2_bp_addr; | ||
| 285 | u32 ap_2_ap_fixed_addr; | ||
| 286 | u32 bp_2_ap_addr; | ||
| 287 | u32 loopback_on_dsp_side_addr; | ||
| 288 | u32 mcu_interrupt_only_addr; | ||
| 289 | u32 firi_2_per_addr; | ||
| 290 | u32 firi_2_mcu_addr; | ||
| 291 | u32 per_2_firi_addr; | ||
| 292 | u32 mcu_2_firi_addr; | ||
| 293 | u32 uart_2_per_addr; | ||
| 294 | u32 uart_2_mcu_addr; | ||
| 295 | u32 per_2_app_addr; | ||
| 296 | u32 mcu_2_app_addr; | ||
| 297 | u32 per_2_per_addr; | ||
| 298 | u32 uartsh_2_per_addr; | ||
| 299 | u32 uartsh_2_mcu_addr; | ||
| 300 | u32 per_2_shp_addr; | ||
| 301 | u32 mcu_2_shp_addr; | ||
| 302 | u32 ata_2_mcu_addr; | ||
| 303 | u32 mcu_2_ata_addr; | ||
| 304 | u32 app_2_per_addr; | ||
| 305 | u32 app_2_mcu_addr; | ||
| 306 | u32 shp_2_per_addr; | ||
| 307 | u32 shp_2_mcu_addr; | ||
| 308 | u32 mshc_2_mcu_addr; | ||
| 309 | u32 mcu_2_mshc_addr; | ||
| 310 | u32 spdif_2_mcu_addr; | ||
| 311 | u32 mcu_2_spdif_addr; | ||
| 312 | u32 asrc_2_mcu_addr; | ||
| 313 | u32 ext_mem_2_ipu_addr; | ||
| 314 | u32 descrambler_addr; | ||
| 315 | u32 dptc_dvfs_addr; | ||
| 316 | u32 utra_addr; | ||
| 317 | u32 ram_code_start_addr; | ||
| 318 | }; | ||
| 319 | |||
| 320 | #define SDMA_FIRMWARE_MAGIC 0x414d4453 | ||
| 321 | |||
| 322 | /** | ||
| 323 | * struct sdma_firmware_header - Layout of the firmware image | ||
| 324 | * | ||
| 325 | * @magic "SDMA" | ||
| 326 | * @version_major increased whenever layout of struct sdma_script_start_addrs | ||
| 327 | * changes. | ||
| 328 | * @version_minor firmware minor version (for binary compatible changes) | ||
| 329 | * @script_addrs_start offset of struct sdma_script_start_addrs in this image | ||
| 330 | * @num_script_addrs Number of script addresses in this image | ||
| 331 | * @ram_code_start offset of SDMA ram image in this firmware image | ||
| 332 | * @ram_code_size size of SDMA ram image | ||
| 333 | * @script_addrs Stores the start address of the SDMA scripts | ||
| 334 | * (in SDMA memory space) | ||
| 335 | */ | ||
| 336 | struct sdma_firmware_header { | ||
| 337 | u32 magic; | ||
| 338 | u32 version_major; | ||
| 339 | u32 version_minor; | ||
| 340 | u32 script_addrs_start; | ||
| 341 | u32 num_script_addrs; | ||
| 342 | u32 ram_code_start; | ||
| 343 | u32 ram_code_size; | ||
| 344 | }; | ||
| 345 | |||
| 346 | struct sdma_engine { | ||
| 347 | struct device *dev; | ||
| 348 | struct sdma_channel channel[MAX_DMA_CHANNELS]; | ||
| 349 | struct sdma_channel_control *channel_control; | ||
| 350 | void __iomem *regs; | ||
| 351 | unsigned int version; | ||
| 352 | unsigned int num_events; | ||
| 353 | struct sdma_context_data *context; | ||
| 354 | dma_addr_t context_phys; | ||
| 355 | struct dma_device dma_device; | ||
| 356 | struct clk *clk; | ||
| 357 | struct sdma_script_start_addrs *script_addrs; | ||
| 358 | }; | ||
| 359 | |||
| 360 | #define SDMA_H_CONFIG_DSPDMA (1 << 12) /* indicates if the DSPDMA is used */ | ||
| 361 | #define SDMA_H_CONFIG_RTD_PINS (1 << 11) /* indicates if Real-Time Debug pins are enabled */ | ||
| 362 | #define SDMA_H_CONFIG_ACR (1 << 4) /* indicates if AHB freq /core freq = 2 or 1 */ | ||
| 363 | #define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/ | ||
| 364 | |||
| 365 | static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event) | ||
| 366 | { | ||
| 367 | u32 chnenbl0 = (sdma->version == 2 ? SDMA_CHNENBL0_V2 : SDMA_CHNENBL0_V1); | ||
| 368 | |||
| 369 | return chnenbl0 + event * 4; | ||
| 370 | } | ||
| 371 | |||
| 372 | static int sdma_config_ownership(struct sdma_channel *sdmac, | ||
| 373 | bool event_override, bool mcu_override, bool dsp_override) | ||
| 374 | { | ||
| 375 | struct sdma_engine *sdma = sdmac->sdma; | ||
| 376 | int channel = sdmac->channel; | ||
| 377 | u32 evt, mcu, dsp; | ||
| 378 | |||
| 379 | if (event_override && mcu_override && dsp_override) | ||
| 380 | return -EINVAL; | ||
| 381 | |||
| 382 | evt = __raw_readl(sdma->regs + SDMA_H_EVTOVR); | ||
| 383 | mcu = __raw_readl(sdma->regs + SDMA_H_HOSTOVR); | ||
| 384 | dsp = __raw_readl(sdma->regs + SDMA_H_DSPOVR); | ||
| 385 | |||
| 386 | if (dsp_override) | ||
| 387 | dsp &= ~(1 << channel); | ||
| 388 | else | ||
| 389 | dsp |= (1 << channel); | ||
| 390 | |||
| 391 | if (event_override) | ||
| 392 | evt &= ~(1 << channel); | ||
| 393 | else | ||
| 394 | evt |= (1 << channel); | ||
| 395 | |||
| 396 | if (mcu_override) | ||
| 397 | mcu &= ~(1 << channel); | ||
| 398 | else | ||
| 399 | mcu |= (1 << channel); | ||
| 400 | |||
| 401 | __raw_writel(evt, sdma->regs + SDMA_H_EVTOVR); | ||
| 402 | __raw_writel(mcu, sdma->regs + SDMA_H_HOSTOVR); | ||
| 403 | __raw_writel(dsp, sdma->regs + SDMA_H_DSPOVR); | ||
| 404 | |||
| 405 | return 0; | ||
| 406 | } | ||
| 407 | |||
| 408 | /* | ||
| 409 | * sdma_run_channel - run a channel and wait till it's done | ||
| 410 | */ | ||
| 411 | static int sdma_run_channel(struct sdma_channel *sdmac) | ||
| 412 | { | ||
| 413 | struct sdma_engine *sdma = sdmac->sdma; | ||
| 414 | int channel = sdmac->channel; | ||
| 415 | int ret; | ||
| 416 | |||
| 417 | init_completion(&sdmac->done); | ||
| 418 | |||
| 419 | __raw_writel(1 << channel, sdma->regs + SDMA_H_START); | ||
| 420 | |||
| 421 | ret = wait_for_completion_timeout(&sdmac->done, HZ); | ||
| 422 | |||
| 423 | return ret ? 0 : -ETIMEDOUT; | ||
| 424 | } | ||
| 425 | |||
| 426 | static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, | ||
| 427 | u32 address) | ||
| 428 | { | ||
| 429 | struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; | ||
| 430 | void *buf_virt; | ||
| 431 | dma_addr_t buf_phys; | ||
| 432 | int ret; | ||
| 433 | |||
| 434 | buf_virt = dma_alloc_coherent(NULL, | ||
| 435 | size, | ||
| 436 | &buf_phys, GFP_KERNEL); | ||
| 437 | if (!buf_virt) | ||
| 438 | return -ENOMEM; | ||
| 439 | |||
| 440 | bd0->mode.command = C0_SETPM; | ||
| 441 | bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; | ||
| 442 | bd0->mode.count = size / 2; | ||
| 443 | bd0->buffer_addr = buf_phys; | ||
| 444 | bd0->ext_buffer_addr = address; | ||
| 445 | |||
| 446 | memcpy(buf_virt, buf, size); | ||
| 447 | |||
| 448 | ret = sdma_run_channel(&sdma->channel[0]); | ||
| 449 | |||
| 450 | dma_free_coherent(NULL, size, buf_virt, buf_phys); | ||
| 451 | |||
| 452 | return ret; | ||
| 453 | } | ||
| 454 | |||
| 455 | static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event) | ||
| 456 | { | ||
| 457 | struct sdma_engine *sdma = sdmac->sdma; | ||
| 458 | int channel = sdmac->channel; | ||
| 459 | u32 val; | ||
| 460 | u32 chnenbl = chnenbl_ofs(sdma, event); | ||
| 461 | |||
| 462 | val = __raw_readl(sdma->regs + chnenbl); | ||
| 463 | val |= (1 << channel); | ||
| 464 | __raw_writel(val, sdma->regs + chnenbl); | ||
| 465 | } | ||
| 466 | |||
| 467 | static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event) | ||
| 468 | { | ||
| 469 | struct sdma_engine *sdma = sdmac->sdma; | ||
| 470 | int channel = sdmac->channel; | ||
| 471 | u32 chnenbl = chnenbl_ofs(sdma, event); | ||
| 472 | u32 val; | ||
| 473 | |||
| 474 | val = __raw_readl(sdma->regs + chnenbl); | ||
| 475 | val &= ~(1 << channel); | ||
| 476 | __raw_writel(val, sdma->regs + chnenbl); | ||
| 477 | } | ||
| 478 | |||
| 479 | static void sdma_handle_channel_loop(struct sdma_channel *sdmac) | ||
| 480 | { | ||
| 481 | struct sdma_buffer_descriptor *bd; | ||
| 482 | |||
| 483 | /* | ||
| 484 | * loop mode. Iterate over descriptors, re-setup them and | ||
| 485 | * call callback function. | ||
| 486 | */ | ||
| 487 | while (1) { | ||
| 488 | bd = &sdmac->bd[sdmac->buf_tail]; | ||
| 489 | |||
| 490 | if (bd->mode.status & BD_DONE) | ||
| 491 | break; | ||
| 492 | |||
| 493 | if (bd->mode.status & BD_RROR) | ||
| 494 | sdmac->status = DMA_ERROR; | ||
| 495 | else | ||
| 496 | sdmac->status = DMA_SUCCESS; | ||
| 497 | |||
| 498 | bd->mode.status |= BD_DONE; | ||
| 499 | sdmac->buf_tail++; | ||
| 500 | sdmac->buf_tail %= sdmac->num_bd; | ||
| 501 | |||
| 502 | if (sdmac->desc.callback) | ||
| 503 | sdmac->desc.callback(sdmac->desc.callback_param); | ||
| 504 | } | ||
| 505 | } | ||
| 506 | |||
| 507 | static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) | ||
| 508 | { | ||
| 509 | struct sdma_buffer_descriptor *bd; | ||
| 510 | int i, error = 0; | ||
| 511 | |||
| 512 | /* | ||
| 513 | * non loop mode. Iterate over all descriptors, collect | ||
| 514 | * errors and call callback function | ||
| 515 | */ | ||
| 516 | for (i = 0; i < sdmac->num_bd; i++) { | ||
| 517 | bd = &sdmac->bd[i]; | ||
| 518 | |||
| 519 | if (bd->mode.status & (BD_DONE | BD_RROR)) | ||
| 520 | error = -EIO; | ||
| 521 | } | ||
| 522 | |||
| 523 | if (error) | ||
| 524 | sdmac->status = DMA_ERROR; | ||
| 525 | else | ||
| 526 | sdmac->status = DMA_SUCCESS; | ||
| 527 | |||
| 528 | if (sdmac->desc.callback) | ||
| 529 | sdmac->desc.callback(sdmac->desc.callback_param); | ||
| 530 | sdmac->last_completed = sdmac->desc.cookie; | ||
| 531 | } | ||
| 532 | |||
| 533 | static void mxc_sdma_handle_channel(struct sdma_channel *sdmac) | ||
| 534 | { | ||
| 535 | complete(&sdmac->done); | ||
| 536 | |||
| 537 | /* not interested in channel 0 interrupts */ | ||
| 538 | if (sdmac->channel == 0) | ||
| 539 | return; | ||
| 540 | |||
| 541 | if (sdmac->flags & IMX_DMA_SG_LOOP) | ||
| 542 | sdma_handle_channel_loop(sdmac); | ||
| 543 | else | ||
| 544 | mxc_sdma_handle_channel_normal(sdmac); | ||
| 545 | } | ||
| 546 | |||
| 547 | static irqreturn_t sdma_int_handler(int irq, void *dev_id) | ||
| 548 | { | ||
| 549 | struct sdma_engine *sdma = dev_id; | ||
| 550 | u32 stat; | ||
| 551 | |||
| 552 | stat = __raw_readl(sdma->regs + SDMA_H_INTR); | ||
| 553 | __raw_writel(stat, sdma->regs + SDMA_H_INTR); | ||
| 554 | |||
| 555 | while (stat) { | ||
| 556 | int channel = fls(stat) - 1; | ||
| 557 | struct sdma_channel *sdmac = &sdma->channel[channel]; | ||
| 558 | |||
| 559 | mxc_sdma_handle_channel(sdmac); | ||
| 560 | |||
| 561 | stat &= ~(1 << channel); | ||
| 562 | } | ||
| 563 | |||
| 564 | return IRQ_HANDLED; | ||
| 565 | } | ||
| 566 | |||
| 567 | /* | ||
| 568 | * sets the pc of SDMA script according to the peripheral type | ||
| 569 | */ | ||
| 570 | static void sdma_get_pc(struct sdma_channel *sdmac, | ||
| 571 | enum sdma_peripheral_type peripheral_type) | ||
| 572 | { | ||
| 573 | struct sdma_engine *sdma = sdmac->sdma; | ||
| 574 | int per_2_emi = 0, emi_2_per = 0; | ||
| 575 | /* | ||
| 576 | * These are needed once we start to support transfers between | ||
| 577 | * two peripherals or memory-to-memory transfers | ||
| 578 | */ | ||
| 579 | int per_2_per = 0, emi_2_emi = 0; | ||
| 580 | |||
| 581 | sdmac->pc_from_device = 0; | ||
| 582 | sdmac->pc_to_device = 0; | ||
| 583 | |||
| 584 | switch (peripheral_type) { | ||
| 585 | case IMX_DMATYPE_MEMORY: | ||
| 586 | emi_2_emi = sdma->script_addrs->ap_2_ap_addr; | ||
| 587 | break; | ||
| 588 | case IMX_DMATYPE_DSP: | ||
| 589 | emi_2_per = sdma->script_addrs->bp_2_ap_addr; | ||
| 590 | per_2_emi = sdma->script_addrs->ap_2_bp_addr; | ||
| 591 | break; | ||
| 592 | case IMX_DMATYPE_FIRI: | ||
| 593 | per_2_emi = sdma->script_addrs->firi_2_mcu_addr; | ||
| 594 | emi_2_per = sdma->script_addrs->mcu_2_firi_addr; | ||
| 595 | break; | ||
| 596 | case IMX_DMATYPE_UART: | ||
| 597 | per_2_emi = sdma->script_addrs->uart_2_mcu_addr; | ||
| 598 | emi_2_per = sdma->script_addrs->mcu_2_app_addr; | ||
| 599 | break; | ||
| 600 | case IMX_DMATYPE_UART_SP: | ||
| 601 | per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr; | ||
| 602 | emi_2_per = sdma->script_addrs->mcu_2_shp_addr; | ||
| 603 | break; | ||
| 604 | case IMX_DMATYPE_ATA: | ||
| 605 | per_2_emi = sdma->script_addrs->ata_2_mcu_addr; | ||
| 606 | emi_2_per = sdma->script_addrs->mcu_2_ata_addr; | ||
| 607 | break; | ||
| 608 | case IMX_DMATYPE_CSPI: | ||
| 609 | case IMX_DMATYPE_EXT: | ||
| 610 | case IMX_DMATYPE_SSI: | ||
| 611 | per_2_emi = sdma->script_addrs->app_2_mcu_addr; | ||
| 612 | emi_2_per = sdma->script_addrs->mcu_2_app_addr; | ||
| 613 | break; | ||
| 614 | case IMX_DMATYPE_SSI_SP: | ||
| 615 | case IMX_DMATYPE_MMC: | ||
| 616 | case IMX_DMATYPE_SDHC: | ||
| 617 | case IMX_DMATYPE_CSPI_SP: | ||
| 618 | case IMX_DMATYPE_ESAI: | ||
| 619 | case IMX_DMATYPE_MSHC_SP: | ||
| 620 | per_2_emi = sdma->script_addrs->shp_2_mcu_addr; | ||
| 621 | emi_2_per = sdma->script_addrs->mcu_2_shp_addr; | ||
| 622 | break; | ||
| 623 | case IMX_DMATYPE_ASRC: | ||
| 624 | per_2_emi = sdma->script_addrs->asrc_2_mcu_addr; | ||
| 625 | emi_2_per = sdma->script_addrs->asrc_2_mcu_addr; | ||
| 626 | per_2_per = sdma->script_addrs->per_2_per_addr; | ||
| 627 | break; | ||
| 628 | case IMX_DMATYPE_MSHC: | ||
| 629 | per_2_emi = sdma->script_addrs->mshc_2_mcu_addr; | ||
| 630 | emi_2_per = sdma->script_addrs->mcu_2_mshc_addr; | ||
| 631 | break; | ||
| 632 | case IMX_DMATYPE_CCM: | ||
| 633 | per_2_emi = sdma->script_addrs->dptc_dvfs_addr; | ||
| 634 | break; | ||
| 635 | case IMX_DMATYPE_SPDIF: | ||
| 636 | per_2_emi = sdma->script_addrs->spdif_2_mcu_addr; | ||
| 637 | emi_2_per = sdma->script_addrs->mcu_2_spdif_addr; | ||
| 638 | break; | ||
| 639 | case IMX_DMATYPE_IPU_MEMORY: | ||
| 640 | emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr; | ||
| 641 | break; | ||
| 642 | default: | ||
| 643 | break; | ||
| 644 | } | ||
| 645 | |||
| 646 | sdmac->pc_from_device = per_2_emi; | ||
| 647 | sdmac->pc_to_device = emi_2_per; | ||
| 648 | } | ||
| 649 | |||
| 650 | static int sdma_load_context(struct sdma_channel *sdmac) | ||
| 651 | { | ||
| 652 | struct sdma_engine *sdma = sdmac->sdma; | ||
| 653 | int channel = sdmac->channel; | ||
| 654 | int load_address; | ||
| 655 | struct sdma_context_data *context = sdma->context; | ||
| 656 | struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; | ||
| 657 | int ret; | ||
| 658 | |||
| 659 | if (sdmac->direction == DMA_FROM_DEVICE) { | ||
| 660 | load_address = sdmac->pc_from_device; | ||
| 661 | } else { | ||
| 662 | load_address = sdmac->pc_to_device; | ||
| 663 | } | ||
| 664 | |||
| 665 | if (load_address < 0) | ||
| 666 | return load_address; | ||
| 667 | |||
| 668 | dev_dbg(sdma->dev, "load_address = %d\n", load_address); | ||
| 669 | dev_dbg(sdma->dev, "wml = 0x%08x\n", sdmac->watermark_level); | ||
| 670 | dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr); | ||
| 671 | dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr); | ||
| 672 | dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0); | ||
| 673 | dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1); | ||
| 674 | |||
| 675 | memset(context, 0, sizeof(*context)); | ||
| 676 | context->channel_state.pc = load_address; | ||
| 677 | |||
| 678 | /* Send by context the event mask,base address for peripheral | ||
| 679 | * and watermark level | ||
| 680 | */ | ||
| 681 | context->gReg[0] = sdmac->event_mask1; | ||
| 682 | context->gReg[1] = sdmac->event_mask0; | ||
| 683 | context->gReg[2] = sdmac->per_addr; | ||
| 684 | context->gReg[6] = sdmac->shp_addr; | ||
| 685 | context->gReg[7] = sdmac->watermark_level; | ||
| 686 | |||
| 687 | bd0->mode.command = C0_SETDM; | ||
| 688 | bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; | ||
| 689 | bd0->mode.count = sizeof(*context) / 4; | ||
| 690 | bd0->buffer_addr = sdma->context_phys; | ||
| 691 | bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; | ||
| 692 | |||
| 693 | ret = sdma_run_channel(&sdma->channel[0]); | ||
| 694 | |||
| 695 | return ret; | ||
| 696 | } | ||
| 697 | |||
| 698 | static void sdma_disable_channel(struct sdma_channel *sdmac) | ||
| 699 | { | ||
| 700 | struct sdma_engine *sdma = sdmac->sdma; | ||
| 701 | int channel = sdmac->channel; | ||
| 702 | |||
| 703 | __raw_writel(1 << channel, sdma->regs + SDMA_H_STATSTOP); | ||
| 704 | sdmac->status = DMA_ERROR; | ||
| 705 | } | ||
| 706 | |||
| 707 | static int sdma_config_channel(struct sdma_channel *sdmac) | ||
| 708 | { | ||
| 709 | int ret; | ||
| 710 | |||
| 711 | sdma_disable_channel(sdmac); | ||
| 712 | |||
| 713 | sdmac->event_mask0 = 0; | ||
| 714 | sdmac->event_mask1 = 0; | ||
| 715 | sdmac->shp_addr = 0; | ||
| 716 | sdmac->per_addr = 0; | ||
| 717 | |||
| 718 | if (sdmac->event_id0) { | ||
| 719 | if (sdmac->event_id0 > 32) | ||
| 720 | return -EINVAL; | ||
| 721 | sdma_event_enable(sdmac, sdmac->event_id0); | ||
| 722 | } | ||
| 723 | |||
| 724 | switch (sdmac->peripheral_type) { | ||
| 725 | case IMX_DMATYPE_DSP: | ||
| 726 | sdma_config_ownership(sdmac, false, true, true); | ||
| 727 | break; | ||
| 728 | case IMX_DMATYPE_MEMORY: | ||
| 729 | sdma_config_ownership(sdmac, false, true, false); | ||
| 730 | break; | ||
| 731 | default: | ||
| 732 | sdma_config_ownership(sdmac, true, true, false); | ||
| 733 | break; | ||
| 734 | } | ||
| 735 | |||
| 736 | sdma_get_pc(sdmac, sdmac->peripheral_type); | ||
| 737 | |||
| 738 | if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) && | ||
| 739 | (sdmac->peripheral_type != IMX_DMATYPE_DSP)) { | ||
| 740 | /* Handle multiple event channels differently */ | ||
| 741 | if (sdmac->event_id1) { | ||
| 742 | sdmac->event_mask1 = 1 << (sdmac->event_id1 % 32); | ||
| 743 | if (sdmac->event_id1 > 31) | ||
| 744 | sdmac->watermark_level |= 1 << 31; | ||
| 745 | sdmac->event_mask0 = 1 << (sdmac->event_id0 % 32); | ||
| 746 | if (sdmac->event_id0 > 31) | ||
| 747 | sdmac->watermark_level |= 1 << 30; | ||
| 748 | } else { | ||
| 749 | sdmac->event_mask0 = 1 << sdmac->event_id0; | ||
| 750 | sdmac->event_mask1 = 1 << (sdmac->event_id0 - 32); | ||
| 751 | } | ||
| 752 | /* Watermark Level */ | ||
| 753 | sdmac->watermark_level |= sdmac->watermark_level; | ||
| 754 | /* Address */ | ||
| 755 | sdmac->shp_addr = sdmac->per_address; | ||
| 756 | } else { | ||
| 757 | sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */ | ||
| 758 | } | ||
| 759 | |||
| 760 | ret = sdma_load_context(sdmac); | ||
| 761 | |||
| 762 | return ret; | ||
| 763 | } | ||
| 764 | |||
| 765 | static int sdma_set_channel_priority(struct sdma_channel *sdmac, | ||
| 766 | unsigned int priority) | ||
| 767 | { | ||
| 768 | struct sdma_engine *sdma = sdmac->sdma; | ||
| 769 | int channel = sdmac->channel; | ||
| 770 | |||
| 771 | if (priority < MXC_SDMA_MIN_PRIORITY | ||
| 772 | || priority > MXC_SDMA_MAX_PRIORITY) { | ||
| 773 | return -EINVAL; | ||
| 774 | } | ||
| 775 | |||
| 776 | __raw_writel(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel); | ||
| 777 | |||
| 778 | return 0; | ||
| 779 | } | ||
| 780 | |||
| 781 | static int sdma_request_channel(struct sdma_channel *sdmac) | ||
| 782 | { | ||
| 783 | struct sdma_engine *sdma = sdmac->sdma; | ||
| 784 | int channel = sdmac->channel; | ||
| 785 | int ret = -EBUSY; | ||
| 786 | |||
| 787 | sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL); | ||
| 788 | if (!sdmac->bd) { | ||
| 789 | ret = -ENOMEM; | ||
| 790 | goto out; | ||
| 791 | } | ||
| 792 | |||
| 793 | memset(sdmac->bd, 0, PAGE_SIZE); | ||
| 794 | |||
| 795 | sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys; | ||
| 796 | sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; | ||
| 797 | |||
| 798 | clk_enable(sdma->clk); | ||
| 799 | |||
| 800 | sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY); | ||
| 801 | |||
| 802 | init_completion(&sdmac->done); | ||
| 803 | |||
| 804 | sdmac->buf_tail = 0; | ||
| 805 | |||
| 806 | return 0; | ||
| 807 | out: | ||
| 808 | |||
| 809 | return ret; | ||
| 810 | } | ||
| 811 | |||
| 812 | static void sdma_enable_channel(struct sdma_engine *sdma, int channel) | ||
| 813 | { | ||
| 814 | __raw_writel(1 << channel, sdma->regs + SDMA_H_START); | ||
| 815 | } | ||
| 816 | |||
| 817 | static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdma) | ||
| 818 | { | ||
| 819 | dma_cookie_t cookie = sdma->chan.cookie; | ||
| 820 | |||
| 821 | if (++cookie < 0) | ||
| 822 | cookie = 1; | ||
| 823 | |||
| 824 | sdma->chan.cookie = cookie; | ||
| 825 | sdma->desc.cookie = cookie; | ||
| 826 | |||
| 827 | return cookie; | ||
| 828 | } | ||
| 829 | |||
| 830 | static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) | ||
| 831 | { | ||
| 832 | return container_of(chan, struct sdma_channel, chan); | ||
| 833 | } | ||
| 834 | |||
| 835 | static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
| 836 | { | ||
| 837 | struct sdma_channel *sdmac = to_sdma_chan(tx->chan); | ||
| 838 | struct sdma_engine *sdma = sdmac->sdma; | ||
| 839 | dma_cookie_t cookie; | ||
| 840 | |||
| 841 | spin_lock_irq(&sdmac->lock); | ||
| 842 | |||
| 843 | cookie = sdma_assign_cookie(sdmac); | ||
| 844 | |||
| 845 | sdma_enable_channel(sdma, tx->chan->chan_id); | ||
| 846 | |||
| 847 | spin_unlock_irq(&sdmac->lock); | ||
| 848 | |||
| 849 | return cookie; | ||
| 850 | } | ||
| 851 | |||
| 852 | static int sdma_alloc_chan_resources(struct dma_chan *chan) | ||
| 853 | { | ||
| 854 | struct sdma_channel *sdmac = to_sdma_chan(chan); | ||
| 855 | struct imx_dma_data *data = chan->private; | ||
| 856 | int prio, ret; | ||
| 857 | |||
| 858 | /* No need to execute this for internal channel 0 */ | ||
| 859 | if (chan->chan_id == 0) | ||
| 860 | return 0; | ||
| 861 | |||
| 862 | if (!data) | ||
| 863 | return -EINVAL; | ||
| 864 | |||
| 865 | switch (data->priority) { | ||
| 866 | case DMA_PRIO_HIGH: | ||
| 867 | prio = 3; | ||
| 868 | break; | ||
| 869 | case DMA_PRIO_MEDIUM: | ||
| 870 | prio = 2; | ||
| 871 | break; | ||
| 872 | case DMA_PRIO_LOW: | ||
| 873 | default: | ||
| 874 | prio = 1; | ||
| 875 | break; | ||
| 876 | } | ||
| 877 | |||
| 878 | sdmac->peripheral_type = data->peripheral_type; | ||
| 879 | sdmac->event_id0 = data->dma_request; | ||
| 880 | ret = sdma_set_channel_priority(sdmac, prio); | ||
| 881 | if (ret) | ||
| 882 | return ret; | ||
| 883 | |||
| 884 | ret = sdma_request_channel(sdmac); | ||
| 885 | if (ret) | ||
| 886 | return ret; | ||
| 887 | |||
| 888 | dma_async_tx_descriptor_init(&sdmac->desc, chan); | ||
| 889 | sdmac->desc.tx_submit = sdma_tx_submit; | ||
| 890 | /* txd.flags will be overwritten in prep funcs */ | ||
| 891 | sdmac->desc.flags = DMA_CTRL_ACK; | ||
| 892 | |||
| 893 | return 0; | ||
| 894 | } | ||
| 895 | |||
| 896 | static void sdma_free_chan_resources(struct dma_chan *chan) | ||
| 897 | { | ||
| 898 | struct sdma_channel *sdmac = to_sdma_chan(chan); | ||
| 899 | struct sdma_engine *sdma = sdmac->sdma; | ||
| 900 | |||
| 901 | sdma_disable_channel(sdmac); | ||
| 902 | |||
| 903 | if (sdmac->event_id0) | ||
| 904 | sdma_event_disable(sdmac, sdmac->event_id0); | ||
| 905 | if (sdmac->event_id1) | ||
| 906 | sdma_event_disable(sdmac, sdmac->event_id1); | ||
| 907 | |||
| 908 | sdmac->event_id0 = 0; | ||
| 909 | sdmac->event_id1 = 0; | ||
| 910 | |||
| 911 | sdma_set_channel_priority(sdmac, 0); | ||
| 912 | |||
| 913 | dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys); | ||
| 914 | |||
| 915 | clk_disable(sdma->clk); | ||
| 916 | } | ||
| 917 | |||
| 918 | static struct dma_async_tx_descriptor *sdma_prep_slave_sg( | ||
| 919 | struct dma_chan *chan, struct scatterlist *sgl, | ||
| 920 | unsigned int sg_len, enum dma_data_direction direction, | ||
| 921 | unsigned long flags) | ||
| 922 | { | ||
| 923 | struct sdma_channel *sdmac = to_sdma_chan(chan); | ||
| 924 | struct sdma_engine *sdma = sdmac->sdma; | ||
| 925 | int ret, i, count; | ||
| 926 | int channel = chan->chan_id; | ||
| 927 | struct scatterlist *sg; | ||
| 928 | |||
| 929 | if (sdmac->status == DMA_IN_PROGRESS) | ||
| 930 | return NULL; | ||
| 931 | sdmac->status = DMA_IN_PROGRESS; | ||
| 932 | |||
| 933 | sdmac->flags = 0; | ||
| 934 | |||
| 935 | dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n", | ||
| 936 | sg_len, channel); | ||
| 937 | |||
| 938 | sdmac->direction = direction; | ||
| 939 | ret = sdma_load_context(sdmac); | ||
| 940 | if (ret) | ||
| 941 | goto err_out; | ||
| 942 | |||
| 943 | if (sg_len > NUM_BD) { | ||
| 944 | dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n", | ||
| 945 | channel, sg_len, NUM_BD); | ||
| 946 | ret = -EINVAL; | ||
| 947 | goto err_out; | ||
| 948 | } | ||
| 949 | |||
| 950 | for_each_sg(sgl, sg, sg_len, i) { | ||
| 951 | struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; | ||
| 952 | int param; | ||
| 953 | |||
| 954 | bd->buffer_addr = sgl->dma_address; | ||
| 955 | |||
| 956 | count = sg->length; | ||
| 957 | |||
| 958 | if (count > 0xffff) { | ||
| 959 | dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n", | ||
| 960 | channel, count, 0xffff); | ||
| 961 | ret = -EINVAL; | ||
| 962 | goto err_out; | ||
| 963 | } | ||
| 964 | |||
| 965 | bd->mode.count = count; | ||
| 966 | |||
| 967 | if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) { | ||
| 968 | ret = -EINVAL; | ||
| 969 | goto err_out; | ||
| 970 | } | ||
| 971 | if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES) | ||
| 972 | bd->mode.command = 0; | ||
| 973 | else | ||
| 974 | bd->mode.command = sdmac->word_size; | ||
| 975 | |||
| 976 | param = BD_DONE | BD_EXTD | BD_CONT; | ||
| 977 | |||
| 978 | if (sdmac->flags & IMX_DMA_SG_LOOP) { | ||
| 979 | param |= BD_INTR; | ||
| 980 | if (i + 1 == sg_len) | ||
| 981 | param |= BD_WRAP; | ||
| 982 | } | ||
| 983 | |||
| 984 | if (i + 1 == sg_len) | ||
| 985 | param |= BD_INTR; | ||
| 986 | |||
| 987 | dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", | ||
| 988 | i, count, sg->dma_address, | ||
| 989 | param & BD_WRAP ? "wrap" : "", | ||
| 990 | param & BD_INTR ? " intr" : ""); | ||
| 991 | |||
| 992 | bd->mode.status = param; | ||
| 993 | } | ||
| 994 | |||
| 995 | sdmac->num_bd = sg_len; | ||
| 996 | sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; | ||
| 997 | |||
| 998 | return &sdmac->desc; | ||
| 999 | err_out: | ||
| 1000 | return NULL; | ||
| 1001 | } | ||
| 1002 | |||
| 1003 | static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( | ||
| 1004 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | ||
| 1005 | size_t period_len, enum dma_data_direction direction) | ||
| 1006 | { | ||
| 1007 | struct sdma_channel *sdmac = to_sdma_chan(chan); | ||
| 1008 | struct sdma_engine *sdma = sdmac->sdma; | ||
| 1009 | int num_periods = buf_len / period_len; | ||
| 1010 | int channel = chan->chan_id; | ||
| 1011 | int ret, i = 0, buf = 0; | ||
| 1012 | |||
| 1013 | dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); | ||
| 1014 | |||
| 1015 | if (sdmac->status == DMA_IN_PROGRESS) | ||
| 1016 | return NULL; | ||
| 1017 | |||
| 1018 | sdmac->status = DMA_IN_PROGRESS; | ||
| 1019 | |||
| 1020 | sdmac->flags |= IMX_DMA_SG_LOOP; | ||
| 1021 | sdmac->direction = direction; | ||
| 1022 | ret = sdma_load_context(sdmac); | ||
| 1023 | if (ret) | ||
| 1024 | goto err_out; | ||
| 1025 | |||
| 1026 | if (num_periods > NUM_BD) { | ||
| 1027 | dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n", | ||
| 1028 | channel, num_periods, NUM_BD); | ||
| 1029 | goto err_out; | ||
| 1030 | } | ||
| 1031 | |||
| 1032 | if (period_len > 0xffff) { | ||
| 1033 | dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n", | ||
| 1034 | channel, period_len, 0xffff); | ||
| 1035 | goto err_out; | ||
| 1036 | } | ||
| 1037 | |||
| 1038 | while (buf < buf_len) { | ||
| 1039 | struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; | ||
| 1040 | int param; | ||
| 1041 | |||
| 1042 | bd->buffer_addr = dma_addr; | ||
| 1043 | |||
| 1044 | bd->mode.count = period_len; | ||
| 1045 | |||
| 1046 | if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) | ||
| 1047 | goto err_out; | ||
| 1048 | if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES) | ||
| 1049 | bd->mode.command = 0; | ||
| 1050 | else | ||
| 1051 | bd->mode.command = sdmac->word_size; | ||
| 1052 | |||
| 1053 | param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR; | ||
| 1054 | if (i + 1 == num_periods) | ||
| 1055 | param |= BD_WRAP; | ||
| 1056 | |||
| 1057 | dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", | ||
| 1058 | i, period_len, dma_addr, | ||
| 1059 | param & BD_WRAP ? "wrap" : "", | ||
| 1060 | param & BD_INTR ? " intr" : ""); | ||
| 1061 | |||
| 1062 | bd->mode.status = param; | ||
| 1063 | |||
| 1064 | dma_addr += period_len; | ||
| 1065 | buf += period_len; | ||
| 1066 | |||
| 1067 | i++; | ||
| 1068 | } | ||
| 1069 | |||
| 1070 | sdmac->num_bd = num_periods; | ||
| 1071 | sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; | ||
| 1072 | |||
| 1073 | return &sdmac->desc; | ||
| 1074 | err_out: | ||
| 1075 | sdmac->status = DMA_ERROR; | ||
| 1076 | return NULL; | ||
| 1077 | } | ||
| 1078 | |||
| 1079 | static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
| 1080 | unsigned long arg) | ||
| 1081 | { | ||
| 1082 | struct sdma_channel *sdmac = to_sdma_chan(chan); | ||
| 1083 | struct dma_slave_config *dmaengine_cfg = (void *)arg; | ||
| 1084 | |||
| 1085 | switch (cmd) { | ||
| 1086 | case DMA_TERMINATE_ALL: | ||
| 1087 | sdma_disable_channel(sdmac); | ||
| 1088 | return 0; | ||
| 1089 | case DMA_SLAVE_CONFIG: | ||
| 1090 | if (dmaengine_cfg->direction == DMA_FROM_DEVICE) { | ||
| 1091 | sdmac->per_address = dmaengine_cfg->src_addr; | ||
| 1092 | sdmac->watermark_level = dmaengine_cfg->src_maxburst; | ||
| 1093 | sdmac->word_size = dmaengine_cfg->src_addr_width; | ||
| 1094 | } else { | ||
| 1095 | sdmac->per_address = dmaengine_cfg->dst_addr; | ||
| 1096 | sdmac->watermark_level = dmaengine_cfg->dst_maxburst; | ||
| 1097 | sdmac->word_size = dmaengine_cfg->dst_addr_width; | ||
| 1098 | } | ||
| 1099 | return sdma_config_channel(sdmac); | ||
| 1100 | default: | ||
| 1101 | return -ENOSYS; | ||
| 1102 | } | ||
| 1103 | |||
| 1104 | return -EINVAL; | ||
| 1105 | } | ||
| 1106 | |||
| 1107 | static enum dma_status sdma_tx_status(struct dma_chan *chan, | ||
| 1108 | dma_cookie_t cookie, | ||
| 1109 | struct dma_tx_state *txstate) | ||
| 1110 | { | ||
| 1111 | struct sdma_channel *sdmac = to_sdma_chan(chan); | ||
| 1112 | dma_cookie_t last_used; | ||
| 1113 | enum dma_status ret; | ||
| 1114 | |||
| 1115 | last_used = chan->cookie; | ||
| 1116 | |||
| 1117 | ret = dma_async_is_complete(cookie, sdmac->last_completed, last_used); | ||
| 1118 | dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0); | ||
| 1119 | |||
| 1120 | return ret; | ||
| 1121 | } | ||
| 1122 | |||
| 1123 | static void sdma_issue_pending(struct dma_chan *chan) | ||
| 1124 | { | ||
| 1125 | /* | ||
| 1126 | * Nothing to do. We only have a single descriptor | ||
| 1127 | */ | ||
| 1128 | } | ||
| 1129 | |||
| 1130 | static int __init sdma_init(struct sdma_engine *sdma, | ||
| 1131 | void *ram_code, int ram_code_size) | ||
| 1132 | { | ||
| 1133 | int i, ret; | ||
| 1134 | dma_addr_t ccb_phys; | ||
| 1135 | |||
| 1136 | switch (sdma->version) { | ||
| 1137 | case 1: | ||
| 1138 | sdma->num_events = 32; | ||
| 1139 | break; | ||
| 1140 | case 2: | ||
| 1141 | sdma->num_events = 48; | ||
| 1142 | break; | ||
| 1143 | default: | ||
| 1144 | dev_err(sdma->dev, "Unknown version %d. aborting\n", sdma->version); | ||
| 1145 | return -ENODEV; | ||
| 1146 | } | ||
| 1147 | |||
| 1148 | clk_enable(sdma->clk); | ||
| 1149 | |||
| 1150 | /* Be sure SDMA has not started yet */ | ||
| 1151 | __raw_writel(0, sdma->regs + SDMA_H_C0PTR); | ||
| 1152 | |||
| 1153 | sdma->channel_control = dma_alloc_coherent(NULL, | ||
| 1154 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + | ||
| 1155 | sizeof(struct sdma_context_data), | ||
| 1156 | &ccb_phys, GFP_KERNEL); | ||
| 1157 | |||
| 1158 | if (!sdma->channel_control) { | ||
| 1159 | ret = -ENOMEM; | ||
| 1160 | goto err_dma_alloc; | ||
| 1161 | } | ||
| 1162 | |||
| 1163 | sdma->context = (void *)sdma->channel_control + | ||
| 1164 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); | ||
| 1165 | sdma->context_phys = ccb_phys + | ||
| 1166 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); | ||
| 1167 | |||
| 1168 | /* Zero-out the CCB structures array just allocated */ | ||
| 1169 | memset(sdma->channel_control, 0, | ||
| 1170 | MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control)); | ||
| 1171 | |||
| 1172 | /* disable all channels */ | ||
| 1173 | for (i = 0; i < sdma->num_events; i++) | ||
| 1174 | __raw_writel(0, sdma->regs + chnenbl_ofs(sdma, i)); | ||
| 1175 | |||
| 1176 | /* All channels have priority 0 */ | ||
| 1177 | for (i = 0; i < MAX_DMA_CHANNELS; i++) | ||
| 1178 | __raw_writel(0, sdma->regs + SDMA_CHNPRI_0 + i * 4); | ||
| 1179 | |||
| 1180 | ret = sdma_request_channel(&sdma->channel[0]); | ||
| 1181 | if (ret) | ||
| 1182 | goto err_dma_alloc; | ||
| 1183 | |||
| 1184 | sdma_config_ownership(&sdma->channel[0], false, true, false); | ||
| 1185 | |||
| 1186 | /* Set Command Channel (Channel Zero) */ | ||
| 1187 | __raw_writel(0x4050, sdma->regs + SDMA_CHN0ADDR); | ||
| 1188 | |||
| 1189 | /* Set bits of CONFIG register but with static context switching */ | ||
| 1190 | /* FIXME: Check whether to set ACR bit depending on clock ratios */ | ||
| 1191 | __raw_writel(0, sdma->regs + SDMA_H_CONFIG); | ||
| 1192 | |||
| 1193 | __raw_writel(ccb_phys, sdma->regs + SDMA_H_C0PTR); | ||
| 1194 | |||
| 1195 | /* download the RAM image for SDMA */ | ||
| 1196 | sdma_load_script(sdma, ram_code, | ||
| 1197 | ram_code_size, | ||
| 1198 | sdma->script_addrs->ram_code_start_addr); | ||
| 1199 | |||
| 1200 | /* Set bits of CONFIG register with given context switching mode */ | ||
| 1201 | __raw_writel(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); | ||
| 1202 | |||
| 1203 | /* Initializes channel's priorities */ | ||
| 1204 | sdma_set_channel_priority(&sdma->channel[0], 7); | ||
| 1205 | |||
| 1206 | clk_disable(sdma->clk); | ||
| 1207 | |||
| 1208 | return 0; | ||
| 1209 | |||
| 1210 | err_dma_alloc: | ||
| 1211 | clk_disable(sdma->clk); | ||
| 1212 | dev_err(sdma->dev, "initialisation failed with %d\n", ret); | ||
| 1213 | return ret; | ||
| 1214 | } | ||
| 1215 | |||
| 1216 | static int __init sdma_probe(struct platform_device *pdev) | ||
| 1217 | { | ||
| 1218 | int ret; | ||
| 1219 | const struct firmware *fw; | ||
| 1220 | const struct sdma_firmware_header *header; | ||
| 1221 | const struct sdma_script_start_addrs *addr; | ||
| 1222 | int irq; | ||
| 1223 | unsigned short *ram_code; | ||
| 1224 | struct resource *iores; | ||
| 1225 | struct sdma_platform_data *pdata = pdev->dev.platform_data; | ||
| 1226 | char *fwname; | ||
| 1227 | int i; | ||
| 1228 | dma_cap_mask_t mask; | ||
| 1229 | struct sdma_engine *sdma; | ||
| 1230 | |||
| 1231 | sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); | ||
| 1232 | if (!sdma) | ||
| 1233 | return -ENOMEM; | ||
| 1234 | |||
| 1235 | sdma->dev = &pdev->dev; | ||
| 1236 | |||
| 1237 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 1238 | irq = platform_get_irq(pdev, 0); | ||
| 1239 | if (!iores || irq < 0 || !pdata) { | ||
| 1240 | ret = -EINVAL; | ||
| 1241 | goto err_irq; | ||
| 1242 | } | ||
| 1243 | |||
| 1244 | if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) { | ||
| 1245 | ret = -EBUSY; | ||
| 1246 | goto err_request_region; | ||
| 1247 | } | ||
| 1248 | |||
| 1249 | sdma->clk = clk_get(&pdev->dev, NULL); | ||
| 1250 | if (IS_ERR(sdma->clk)) { | ||
| 1251 | ret = PTR_ERR(sdma->clk); | ||
| 1252 | goto err_clk; | ||
| 1253 | } | ||
| 1254 | |||
| 1255 | sdma->regs = ioremap(iores->start, resource_size(iores)); | ||
| 1256 | if (!sdma->regs) { | ||
| 1257 | ret = -ENOMEM; | ||
| 1258 | goto err_ioremap; | ||
| 1259 | } | ||
| 1260 | |||
| 1261 | ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma); | ||
| 1262 | if (ret) | ||
| 1263 | goto err_request_irq; | ||
| 1264 | |||
| 1265 | fwname = kasprintf(GFP_KERNEL, "sdma-%s-to%d.bin", | ||
| 1266 | pdata->cpu_name, pdata->to_version); | ||
| 1267 | if (!fwname) { | ||
| 1268 | ret = -ENOMEM; | ||
| 1269 | goto err_cputype; | ||
| 1270 | } | ||
| 1271 | |||
| 1272 | ret = request_firmware(&fw, fwname, &pdev->dev); | ||
| 1273 | if (ret) { | ||
| 1274 | dev_err(&pdev->dev, "request firmware \"%s\" failed with %d\n", | ||
| 1275 | fwname, ret); | ||
| 1276 | kfree(fwname); | ||
| 1277 | goto err_cputype; | ||
| 1278 | } | ||
| 1279 | kfree(fwname); | ||
| 1280 | |||
| 1281 | if (fw->size < sizeof(*header)) | ||
| 1282 | goto err_firmware; | ||
| 1283 | |||
| 1284 | header = (struct sdma_firmware_header *)fw->data; | ||
| 1285 | |||
| 1286 | if (header->magic != SDMA_FIRMWARE_MAGIC) | ||
| 1287 | goto err_firmware; | ||
| 1288 | if (header->ram_code_start + header->ram_code_size > fw->size) | ||
| 1289 | goto err_firmware; | ||
| 1290 | |||
| 1291 | addr = (void *)header + header->script_addrs_start; | ||
| 1292 | ram_code = (void *)header + header->ram_code_start; | ||
| 1293 | sdma->script_addrs = kmalloc(sizeof(*addr), GFP_KERNEL); | ||
| 1294 | if (!sdma->script_addrs) | ||
| 1295 | goto err_firmware; | ||
| 1296 | memcpy(sdma->script_addrs, addr, sizeof(*addr)); | ||
| 1297 | |||
| 1298 | sdma->version = pdata->sdma_version; | ||
| 1299 | |||
| 1300 | INIT_LIST_HEAD(&sdma->dma_device.channels); | ||
| 1301 | /* Initialize channel parameters */ | ||
| 1302 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { | ||
| 1303 | struct sdma_channel *sdmac = &sdma->channel[i]; | ||
| 1304 | |||
| 1305 | sdmac->sdma = sdma; | ||
| 1306 | spin_lock_init(&sdmac->lock); | ||
| 1307 | |||
| 1308 | dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); | ||
| 1309 | dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); | ||
| 1310 | |||
| 1311 | sdmac->chan.device = &sdma->dma_device; | ||
| 1312 | sdmac->chan.chan_id = i; | ||
| 1313 | sdmac->channel = i; | ||
| 1314 | |||
| 1315 | /* Add the channel to the DMAC list */ | ||
| 1316 | list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels); | ||
| 1317 | } | ||
| 1318 | |||
| 1319 | ret = sdma_init(sdma, ram_code, header->ram_code_size); | ||
| 1320 | if (ret) | ||
| 1321 | goto err_init; | ||
| 1322 | |||
| 1323 | sdma->dma_device.dev = &pdev->dev; | ||
| 1324 | |||
| 1325 | sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources; | ||
| 1326 | sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources; | ||
| 1327 | sdma->dma_device.device_tx_status = sdma_tx_status; | ||
| 1328 | sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; | ||
| 1329 | sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; | ||
| 1330 | sdma->dma_device.device_control = sdma_control; | ||
| 1331 | sdma->dma_device.device_issue_pending = sdma_issue_pending; | ||
| 1332 | |||
| 1333 | ret = dma_async_device_register(&sdma->dma_device); | ||
| 1334 | if (ret) { | ||
| 1335 | dev_err(&pdev->dev, "unable to register\n"); | ||
| 1336 | goto err_init; | ||
| 1337 | } | ||
| 1338 | |||
| 1339 | dev_info(&pdev->dev, "initialized (firmware %d.%d)\n", | ||
| 1340 | header->version_major, | ||
| 1341 | header->version_minor); | ||
| 1342 | |||
| 1343 | /* request channel 0. This is an internal control channel | ||
| 1344 | * to the SDMA engine and not available to clients. | ||
| 1345 | */ | ||
| 1346 | dma_cap_zero(mask); | ||
| 1347 | dma_cap_set(DMA_SLAVE, mask); | ||
| 1348 | dma_request_channel(mask, NULL, NULL); | ||
| 1349 | |||
| 1350 | release_firmware(fw); | ||
| 1351 | |||
| 1352 | return 0; | ||
| 1353 | |||
| 1354 | err_init: | ||
| 1355 | kfree(sdma->script_addrs); | ||
| 1356 | err_firmware: | ||
| 1357 | release_firmware(fw); | ||
| 1358 | err_cputype: | ||
| 1359 | free_irq(irq, sdma); | ||
| 1360 | err_request_irq: | ||
| 1361 | iounmap(sdma->regs); | ||
| 1362 | err_ioremap: | ||
| 1363 | clk_put(sdma->clk); | ||
| 1364 | err_clk: | ||
| 1365 | release_mem_region(iores->start, resource_size(iores)); | ||
| 1366 | err_request_region: | ||
| 1367 | err_irq: | ||
| 1368 | kfree(sdma); | ||
| 1369 | return 0; | ||
| 1370 | } | ||
| 1371 | |||
| 1372 | static int __exit sdma_remove(struct platform_device *pdev) | ||
| 1373 | { | ||
| 1374 | return -EBUSY; | ||
| 1375 | } | ||
| 1376 | |||
| 1377 | static struct platform_driver sdma_driver = { | ||
| 1378 | .driver = { | ||
| 1379 | .name = "imx-sdma", | ||
| 1380 | }, | ||
| 1381 | .remove = __exit_p(sdma_remove), | ||
| 1382 | }; | ||
| 1383 | |||
| 1384 | static int __init sdma_module_init(void) | ||
| 1385 | { | ||
| 1386 | return platform_driver_probe(&sdma_driver, sdma_probe); | ||
| 1387 | } | ||
| 1388 | subsys_initcall(sdma_module_init); | ||
| 1389 | |||
| 1390 | MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); | ||
| 1391 | MODULE_DESCRIPTION("i.MX SDMA driver"); | ||
| 1392 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index c2591e8d9b6e..338bc4eed1f3 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | */ | 25 | */ |
| 26 | #include <linux/pci.h> | 26 | #include <linux/pci.h> |
| 27 | #include <linux/interrupt.h> | 27 | #include <linux/interrupt.h> |
| 28 | #include <linux/pm_runtime.h> | ||
| 28 | #include <linux/intel_mid_dma.h> | 29 | #include <linux/intel_mid_dma.h> |
| 29 | 30 | ||
| 30 | #define MAX_CHAN 4 /*max ch across controllers*/ | 31 | #define MAX_CHAN 4 /*max ch across controllers*/ |
| @@ -91,13 +92,13 @@ static int get_block_ts(int len, int tx_width, int block_size) | |||
| 91 | int byte_width = 0, block_ts = 0; | 92 | int byte_width = 0, block_ts = 0; |
| 92 | 93 | ||
| 93 | switch (tx_width) { | 94 | switch (tx_width) { |
| 94 | case LNW_DMA_WIDTH_8BIT: | 95 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
| 95 | byte_width = 1; | 96 | byte_width = 1; |
| 96 | break; | 97 | break; |
| 97 | case LNW_DMA_WIDTH_16BIT: | 98 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
| 98 | byte_width = 2; | 99 | byte_width = 2; |
| 99 | break; | 100 | break; |
| 100 | case LNW_DMA_WIDTH_32BIT: | 101 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
| 101 | default: | 102 | default: |
| 102 | byte_width = 4; | 103 | byte_width = 4; |
| 103 | break; | 104 | break; |
| @@ -247,16 +248,17 @@ static void midc_dostart(struct intel_mid_dma_chan *midc, | |||
| 247 | struct middma_device *mid = to_middma_device(midc->chan.device); | 248 | struct middma_device *mid = to_middma_device(midc->chan.device); |
| 248 | 249 | ||
| 249 | /* channel is idle */ | 250 | /* channel is idle */ |
| 250 | if (midc->in_use && test_ch_en(midc->dma_base, midc->ch_id)) { | 251 | if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) { |
| 251 | /*error*/ | 252 | /*error*/ |
| 252 | pr_err("ERR_MDMA: channel is busy in start\n"); | 253 | pr_err("ERR_MDMA: channel is busy in start\n"); |
| 253 | /* The tasklet will hopefully advance the queue... */ | 254 | /* The tasklet will hopefully advance the queue... */ |
| 254 | return; | 255 | return; |
| 255 | } | 256 | } |
| 256 | 257 | midc->busy = true; | |
| 257 | /*write registers and en*/ | 258 | /*write registers and en*/ |
| 258 | iowrite32(first->sar, midc->ch_regs + SAR); | 259 | iowrite32(first->sar, midc->ch_regs + SAR); |
| 259 | iowrite32(first->dar, midc->ch_regs + DAR); | 260 | iowrite32(first->dar, midc->ch_regs + DAR); |
| 261 | iowrite32(first->lli_phys, midc->ch_regs + LLP); | ||
| 260 | iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); | 262 | iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); |
| 261 | iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); | 263 | iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); |
| 262 | iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); | 264 | iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); |
| @@ -264,9 +266,9 @@ static void midc_dostart(struct intel_mid_dma_chan *midc, | |||
| 264 | pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", | 266 | pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", |
| 265 | (int)first->sar, (int)first->dar, first->cfg_hi, | 267 | (int)first->sar, (int)first->dar, first->cfg_hi, |
| 266 | first->cfg_lo, first->ctl_hi, first->ctl_lo); | 268 | first->cfg_lo, first->ctl_hi, first->ctl_lo); |
| 269 | first->status = DMA_IN_PROGRESS; | ||
| 267 | 270 | ||
| 268 | iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); | 271 | iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); |
| 269 | first->status = DMA_IN_PROGRESS; | ||
| 270 | } | 272 | } |
| 271 | 273 | ||
| 272 | /** | 274 | /** |
| @@ -283,20 +285,36 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, | |||
| 283 | { | 285 | { |
| 284 | struct dma_async_tx_descriptor *txd = &desc->txd; | 286 | struct dma_async_tx_descriptor *txd = &desc->txd; |
| 285 | dma_async_tx_callback callback_txd = NULL; | 287 | dma_async_tx_callback callback_txd = NULL; |
| 288 | struct intel_mid_dma_lli *llitem; | ||
| 286 | void *param_txd = NULL; | 289 | void *param_txd = NULL; |
| 287 | 290 | ||
| 288 | midc->completed = txd->cookie; | 291 | midc->completed = txd->cookie; |
| 289 | callback_txd = txd->callback; | 292 | callback_txd = txd->callback; |
| 290 | param_txd = txd->callback_param; | 293 | param_txd = txd->callback_param; |
| 291 | 294 | ||
| 292 | list_move(&desc->desc_node, &midc->free_list); | 295 | if (desc->lli != NULL) { |
| 293 | 296 | /*clear the DONE bit of completed LLI in memory*/ | |
| 297 | llitem = desc->lli + desc->current_lli; | ||
| 298 | llitem->ctl_hi &= CLEAR_DONE; | ||
| 299 | if (desc->current_lli < desc->lli_length-1) | ||
| 300 | (desc->current_lli)++; | ||
| 301 | else | ||
| 302 | desc->current_lli = 0; | ||
| 303 | } | ||
| 294 | spin_unlock_bh(&midc->lock); | 304 | spin_unlock_bh(&midc->lock); |
| 295 | if (callback_txd) { | 305 | if (callback_txd) { |
| 296 | pr_debug("MDMA: TXD callback set ... calling\n"); | 306 | pr_debug("MDMA: TXD callback set ... calling\n"); |
| 297 | callback_txd(param_txd); | 307 | callback_txd(param_txd); |
| 298 | spin_lock_bh(&midc->lock); | 308 | } |
| 299 | return; | 309 | if (midc->raw_tfr) { |
| 310 | desc->status = DMA_SUCCESS; | ||
| 311 | if (desc->lli != NULL) { | ||
| 312 | pci_pool_free(desc->lli_pool, desc->lli, | ||
| 313 | desc->lli_phys); | ||
| 314 | pci_pool_destroy(desc->lli_pool); | ||
| 315 | } | ||
| 316 | list_move(&desc->desc_node, &midc->free_list); | ||
| 317 | midc->busy = false; | ||
| 300 | } | 318 | } |
| 301 | spin_lock_bh(&midc->lock); | 319 | spin_lock_bh(&midc->lock); |
| 302 | 320 | ||
| @@ -317,14 +335,89 @@ static void midc_scan_descriptors(struct middma_device *mid, | |||
| 317 | 335 | ||
| 318 | /*tx is complete*/ | 336 | /*tx is complete*/ |
| 319 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { | 337 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { |
| 320 | if (desc->status == DMA_IN_PROGRESS) { | 338 | if (desc->status == DMA_IN_PROGRESS) |
| 321 | desc->status = DMA_SUCCESS; | ||
| 322 | midc_descriptor_complete(midc, desc); | 339 | midc_descriptor_complete(midc, desc); |
| 323 | } | ||
| 324 | } | 340 | } |
| 325 | return; | 341 | return; |
| 326 | } | 342 | } |
| 343 | /** | ||
| 344 | * midc_lli_fill_sg - Helper function to convert | ||
| 345 | * SG list to Linked List Items. | ||
| 346 | *@midc: Channel | ||
| 347 | *@desc: DMA descriptor | ||
| 348 | *@sglist: Pointer to SG list | ||
| 349 | *@sglen: SG list length | ||
| 350 | *@flags: DMA transaction flags | ||
| 351 | * | ||
| 352 | * Walk through the SG list and convert the SG list into Linked | ||
| 353 | * List Items (LLI). | ||
| 354 | */ | ||
| 355 | static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc, | ||
| 356 | struct intel_mid_dma_desc *desc, | ||
| 357 | struct scatterlist *sglist, | ||
| 358 | unsigned int sglen, | ||
| 359 | unsigned int flags) | ||
| 360 | { | ||
| 361 | struct intel_mid_dma_slave *mids; | ||
| 362 | struct scatterlist *sg; | ||
| 363 | dma_addr_t lli_next, sg_phy_addr; | ||
| 364 | struct intel_mid_dma_lli *lli_bloc_desc; | ||
| 365 | union intel_mid_dma_ctl_lo ctl_lo; | ||
| 366 | union intel_mid_dma_ctl_hi ctl_hi; | ||
| 367 | int i; | ||
| 327 | 368 | ||
| 369 | pr_debug("MDMA: Entered midc_lli_fill_sg\n"); | ||
| 370 | mids = midc->mid_slave; | ||
| 371 | |||
| 372 | lli_bloc_desc = desc->lli; | ||
| 373 | lli_next = desc->lli_phys; | ||
| 374 | |||
| 375 | ctl_lo.ctl_lo = desc->ctl_lo; | ||
| 376 | ctl_hi.ctl_hi = desc->ctl_hi; | ||
| 377 | for_each_sg(sglist, sg, sglen, i) { | ||
| 378 | /*Populate CTL_LOW and LLI values*/ | ||
| 379 | if (i != sglen - 1) { | ||
| 380 | lli_next = lli_next + | ||
| 381 | sizeof(struct intel_mid_dma_lli); | ||
| 382 | } else { | ||
| 383 | /*Check for circular list, otherwise terminate LLI to ZERO*/ | ||
| 384 | if (flags & DMA_PREP_CIRCULAR_LIST) { | ||
| 385 | pr_debug("MDMA: LLI is configured in circular mode\n"); | ||
| 386 | lli_next = desc->lli_phys; | ||
| 387 | } else { | ||
| 388 | lli_next = 0; | ||
| 389 | ctl_lo.ctlx.llp_dst_en = 0; | ||
| 390 | ctl_lo.ctlx.llp_src_en = 0; | ||
| 391 | } | ||
| 392 | } | ||
| 393 | /*Populate CTL_HI values*/ | ||
| 394 | ctl_hi.ctlx.block_ts = get_block_ts(sg->length, | ||
| 395 | desc->width, | ||
| 396 | midc->dma->block_size); | ||
| 397 | /*Populate SAR and DAR values*/ | ||
| 398 | sg_phy_addr = sg_phys(sg); | ||
| 399 | if (desc->dirn == DMA_TO_DEVICE) { | ||
| 400 | lli_bloc_desc->sar = sg_phy_addr; | ||
| 401 | lli_bloc_desc->dar = mids->dma_slave.dst_addr; | ||
| 402 | } else if (desc->dirn == DMA_FROM_DEVICE) { | ||
| 403 | lli_bloc_desc->sar = mids->dma_slave.src_addr; | ||
| 404 | lli_bloc_desc->dar = sg_phy_addr; | ||
| 405 | } | ||
| 406 | /*Copy values into block descriptor in system memroy*/ | ||
| 407 | lli_bloc_desc->llp = lli_next; | ||
| 408 | lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo; | ||
| 409 | lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi; | ||
| 410 | |||
| 411 | lli_bloc_desc++; | ||
| 412 | } | ||
| 413 | /*Copy very first LLI values to descriptor*/ | ||
| 414 | desc->ctl_lo = desc->lli->ctl_lo; | ||
| 415 | desc->ctl_hi = desc->lli->ctl_hi; | ||
| 416 | desc->sar = desc->lli->sar; | ||
| 417 | desc->dar = desc->lli->dar; | ||
| 418 | |||
| 419 | return 0; | ||
| 420 | } | ||
| 328 | /***************************************************************************** | 421 | /***************************************************************************** |
| 329 | DMA engine callback Functions*/ | 422 | DMA engine callback Functions*/ |
| 330 | /** | 423 | /** |
| @@ -349,12 +442,12 @@ static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
| 349 | desc->txd.cookie = cookie; | 442 | desc->txd.cookie = cookie; |
| 350 | 443 | ||
| 351 | 444 | ||
| 352 | if (list_empty(&midc->active_list)) { | 445 | if (list_empty(&midc->active_list)) |
| 353 | midc_dostart(midc, desc); | ||
| 354 | list_add_tail(&desc->desc_node, &midc->active_list); | 446 | list_add_tail(&desc->desc_node, &midc->active_list); |
| 355 | } else { | 447 | else |
| 356 | list_add_tail(&desc->desc_node, &midc->queue); | 448 | list_add_tail(&desc->desc_node, &midc->queue); |
| 357 | } | 449 | |
| 450 | midc_dostart(midc, desc); | ||
| 358 | spin_unlock_bh(&midc->lock); | 451 | spin_unlock_bh(&midc->lock); |
| 359 | 452 | ||
| 360 | return cookie; | 453 | return cookie; |
| @@ -414,6 +507,23 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, | |||
| 414 | return ret; | 507 | return ret; |
| 415 | } | 508 | } |
| 416 | 509 | ||
| 510 | static int dma_slave_control(struct dma_chan *chan, unsigned long arg) | ||
| 511 | { | ||
| 512 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | ||
| 513 | struct dma_slave_config *slave = (struct dma_slave_config *)arg; | ||
| 514 | struct intel_mid_dma_slave *mid_slave; | ||
| 515 | |||
| 516 | BUG_ON(!midc); | ||
| 517 | BUG_ON(!slave); | ||
| 518 | pr_debug("MDMA: slave control called\n"); | ||
| 519 | |||
| 520 | mid_slave = to_intel_mid_dma_slave(slave); | ||
| 521 | |||
| 522 | BUG_ON(!mid_slave); | ||
| 523 | |||
| 524 | midc->mid_slave = mid_slave; | ||
| 525 | return 0; | ||
| 526 | } | ||
| 417 | /** | 527 | /** |
| 418 | * intel_mid_dma_device_control - DMA device control | 528 | * intel_mid_dma_device_control - DMA device control |
| 419 | * @chan: chan for DMA control | 529 | * @chan: chan for DMA control |
| @@ -428,49 +538,41 @@ static int intel_mid_dma_device_control(struct dma_chan *chan, | |||
| 428 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); | 538 | struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); |
| 429 | struct middma_device *mid = to_middma_device(chan->device); | 539 | struct middma_device *mid = to_middma_device(chan->device); |
| 430 | struct intel_mid_dma_desc *desc, *_desc; | 540 | struct intel_mid_dma_desc *desc, *_desc; |
| 431 | LIST_HEAD(list); | 541 | union intel_mid_dma_cfg_lo cfg_lo; |
| 542 | |||
| 543 | if (cmd == DMA_SLAVE_CONFIG) | ||
| 544 | return dma_slave_control(chan, arg); | ||
| 432 | 545 | ||
| 433 | if (cmd != DMA_TERMINATE_ALL) | 546 | if (cmd != DMA_TERMINATE_ALL) |
| 434 | return -ENXIO; | 547 | return -ENXIO; |
| 435 | 548 | ||
| 436 | spin_lock_bh(&midc->lock); | 549 | spin_lock_bh(&midc->lock); |
| 437 | if (midc->in_use == false) { | 550 | if (midc->busy == false) { |
| 438 | spin_unlock_bh(&midc->lock); | 551 | spin_unlock_bh(&midc->lock); |
| 439 | return 0; | 552 | return 0; |
| 440 | } | 553 | } |
| 441 | list_splice_init(&midc->free_list, &list); | 554 | /*Suspend and disable the channel*/ |
| 442 | midc->descs_allocated = 0; | 555 | cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW); |
| 443 | midc->slave = NULL; | 556 | cfg_lo.cfgx.ch_susp = 1; |
| 444 | 557 | iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW); | |
| 558 | iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); | ||
| 559 | midc->busy = false; | ||
| 445 | /* Disable interrupts */ | 560 | /* Disable interrupts */ |
| 446 | disable_dma_interrupt(midc); | 561 | disable_dma_interrupt(midc); |
| 562 | midc->descs_allocated = 0; | ||
| 447 | 563 | ||
| 448 | spin_unlock_bh(&midc->lock); | 564 | spin_unlock_bh(&midc->lock); |
| 449 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | 565 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { |
| 450 | pr_debug("MDMA: freeing descriptor %p\n", desc); | 566 | if (desc->lli != NULL) { |
| 451 | pci_pool_free(mid->dma_pool, desc, desc->txd.phys); | 567 | pci_pool_free(desc->lli_pool, desc->lli, |
| 568 | desc->lli_phys); | ||
| 569 | pci_pool_destroy(desc->lli_pool); | ||
| 570 | } | ||
| 571 | list_move(&desc->desc_node, &midc->free_list); | ||
| 452 | } | 572 | } |
| 453 | return 0; | 573 | return 0; |
| 454 | } | 574 | } |
| 455 | 575 | ||
| 456 | /** | ||
| 457 | * intel_mid_dma_prep_slave_sg - Prep slave sg txn | ||
| 458 | * @chan: chan for DMA transfer | ||
| 459 | * @sgl: scatter gather list | ||
| 460 | * @sg_len: length of sg txn | ||
| 461 | * @direction: DMA transfer dirtn | ||
| 462 | * @flags: DMA flags | ||
| 463 | * | ||
| 464 | * Do DMA sg txn: NOT supported now | ||
| 465 | */ | ||
| 466 | static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( | ||
| 467 | struct dma_chan *chan, struct scatterlist *sgl, | ||
| 468 | unsigned int sg_len, enum dma_data_direction direction, | ||
| 469 | unsigned long flags) | ||
| 470 | { | ||
| 471 | /*not supported now*/ | ||
| 472 | return NULL; | ||
| 473 | } | ||
| 474 | 576 | ||
| 475 | /** | 577 | /** |
| 476 | * intel_mid_dma_prep_memcpy - Prep memcpy txn | 578 | * intel_mid_dma_prep_memcpy - Prep memcpy txn |
| @@ -495,23 +597,24 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( | |||
| 495 | union intel_mid_dma_ctl_hi ctl_hi; | 597 | union intel_mid_dma_ctl_hi ctl_hi; |
| 496 | union intel_mid_dma_cfg_lo cfg_lo; | 598 | union intel_mid_dma_cfg_lo cfg_lo; |
| 497 | union intel_mid_dma_cfg_hi cfg_hi; | 599 | union intel_mid_dma_cfg_hi cfg_hi; |
| 498 | enum intel_mid_dma_width width = 0; | 600 | enum dma_slave_buswidth width; |
| 499 | 601 | ||
| 500 | pr_debug("MDMA: Prep for memcpy\n"); | 602 | pr_debug("MDMA: Prep for memcpy\n"); |
| 501 | WARN_ON(!chan); | 603 | BUG_ON(!chan); |
| 502 | if (!len) | 604 | if (!len) |
| 503 | return NULL; | 605 | return NULL; |
| 504 | 606 | ||
| 505 | mids = chan->private; | ||
| 506 | WARN_ON(!mids); | ||
| 507 | |||
| 508 | midc = to_intel_mid_dma_chan(chan); | 607 | midc = to_intel_mid_dma_chan(chan); |
| 509 | WARN_ON(!midc); | 608 | BUG_ON(!midc); |
| 609 | |||
| 610 | mids = midc->mid_slave; | ||
| 611 | BUG_ON(!mids); | ||
| 510 | 612 | ||
| 511 | pr_debug("MDMA:called for DMA %x CH %d Length %zu\n", | 613 | pr_debug("MDMA:called for DMA %x CH %d Length %zu\n", |
| 512 | midc->dma->pci_id, midc->ch_id, len); | 614 | midc->dma->pci_id, midc->ch_id, len); |
| 513 | pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n", | 615 | pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n", |
| 514 | mids->cfg_mode, mids->dirn, mids->hs_mode, mids->src_width); | 616 | mids->cfg_mode, mids->dma_slave.direction, |
| 617 | mids->hs_mode, mids->dma_slave.src_addr_width); | ||
| 515 | 618 | ||
| 516 | /*calculate CFG_LO*/ | 619 | /*calculate CFG_LO*/ |
| 517 | if (mids->hs_mode == LNW_DMA_SW_HS) { | 620 | if (mids->hs_mode == LNW_DMA_SW_HS) { |
| @@ -530,13 +633,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( | |||
| 530 | if (midc->dma->pimr_mask) { | 633 | if (midc->dma->pimr_mask) { |
| 531 | cfg_hi.cfgx.protctl = 0x0; /*default value*/ | 634 | cfg_hi.cfgx.protctl = 0x0; /*default value*/ |
| 532 | cfg_hi.cfgx.fifo_mode = 1; | 635 | cfg_hi.cfgx.fifo_mode = 1; |
| 533 | if (mids->dirn == DMA_TO_DEVICE) { | 636 | if (mids->dma_slave.direction == DMA_TO_DEVICE) { |
| 534 | cfg_hi.cfgx.src_per = 0; | 637 | cfg_hi.cfgx.src_per = 0; |
| 535 | if (mids->device_instance == 0) | 638 | if (mids->device_instance == 0) |
| 536 | cfg_hi.cfgx.dst_per = 3; | 639 | cfg_hi.cfgx.dst_per = 3; |
| 537 | if (mids->device_instance == 1) | 640 | if (mids->device_instance == 1) |
| 538 | cfg_hi.cfgx.dst_per = 1; | 641 | cfg_hi.cfgx.dst_per = 1; |
| 539 | } else if (mids->dirn == DMA_FROM_DEVICE) { | 642 | } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { |
| 540 | if (mids->device_instance == 0) | 643 | if (mids->device_instance == 0) |
| 541 | cfg_hi.cfgx.src_per = 2; | 644 | cfg_hi.cfgx.src_per = 2; |
| 542 | if (mids->device_instance == 1) | 645 | if (mids->device_instance == 1) |
| @@ -552,7 +655,8 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( | |||
| 552 | 655 | ||
| 553 | /*calculate CTL_HI*/ | 656 | /*calculate CTL_HI*/ |
| 554 | ctl_hi.ctlx.reser = 0; | 657 | ctl_hi.ctlx.reser = 0; |
| 555 | width = mids->src_width; | 658 | ctl_hi.ctlx.done = 0; |
| 659 | width = mids->dma_slave.src_addr_width; | ||
| 556 | 660 | ||
| 557 | ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); | 661 | ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); |
| 558 | pr_debug("MDMA:calc len %d for block size %d\n", | 662 | pr_debug("MDMA:calc len %d for block size %d\n", |
| @@ -560,21 +664,21 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( | |||
| 560 | /*calculate CTL_LO*/ | 664 | /*calculate CTL_LO*/ |
| 561 | ctl_lo.ctl_lo = 0; | 665 | ctl_lo.ctl_lo = 0; |
| 562 | ctl_lo.ctlx.int_en = 1; | 666 | ctl_lo.ctlx.int_en = 1; |
| 563 | ctl_lo.ctlx.dst_tr_width = mids->dst_width; | 667 | ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width; |
| 564 | ctl_lo.ctlx.src_tr_width = mids->src_width; | 668 | ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width; |
| 565 | ctl_lo.ctlx.dst_msize = mids->src_msize; | 669 | ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst; |
| 566 | ctl_lo.ctlx.src_msize = mids->dst_msize; | 670 | ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst; |
| 567 | 671 | ||
| 568 | if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { | 672 | if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { |
| 569 | ctl_lo.ctlx.tt_fc = 0; | 673 | ctl_lo.ctlx.tt_fc = 0; |
| 570 | ctl_lo.ctlx.sinc = 0; | 674 | ctl_lo.ctlx.sinc = 0; |
| 571 | ctl_lo.ctlx.dinc = 0; | 675 | ctl_lo.ctlx.dinc = 0; |
| 572 | } else { | 676 | } else { |
| 573 | if (mids->dirn == DMA_TO_DEVICE) { | 677 | if (mids->dma_slave.direction == DMA_TO_DEVICE) { |
| 574 | ctl_lo.ctlx.sinc = 0; | 678 | ctl_lo.ctlx.sinc = 0; |
| 575 | ctl_lo.ctlx.dinc = 2; | 679 | ctl_lo.ctlx.dinc = 2; |
| 576 | ctl_lo.ctlx.tt_fc = 1; | 680 | ctl_lo.ctlx.tt_fc = 1; |
| 577 | } else if (mids->dirn == DMA_FROM_DEVICE) { | 681 | } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { |
| 578 | ctl_lo.ctlx.sinc = 2; | 682 | ctl_lo.ctlx.sinc = 2; |
| 579 | ctl_lo.ctlx.dinc = 0; | 683 | ctl_lo.ctlx.dinc = 0; |
| 580 | ctl_lo.ctlx.tt_fc = 2; | 684 | ctl_lo.ctlx.tt_fc = 2; |
| @@ -597,7 +701,10 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( | |||
| 597 | desc->ctl_lo = ctl_lo.ctl_lo; | 701 | desc->ctl_lo = ctl_lo.ctl_lo; |
| 598 | desc->ctl_hi = ctl_hi.ctl_hi; | 702 | desc->ctl_hi = ctl_hi.ctl_hi; |
| 599 | desc->width = width; | 703 | desc->width = width; |
| 600 | desc->dirn = mids->dirn; | 704 | desc->dirn = mids->dma_slave.direction; |
| 705 | desc->lli_phys = 0; | ||
| 706 | desc->lli = NULL; | ||
| 707 | desc->lli_pool = NULL; | ||
| 601 | return &desc->txd; | 708 | return &desc->txd; |
| 602 | 709 | ||
| 603 | err_desc_get: | 710 | err_desc_get: |
| @@ -605,6 +712,85 @@ err_desc_get: | |||
| 605 | midc_desc_put(midc, desc); | 712 | midc_desc_put(midc, desc); |
| 606 | return NULL; | 713 | return NULL; |
| 607 | } | 714 | } |
| 715 | /** | ||
| 716 | * intel_mid_dma_prep_slave_sg - Prep slave sg txn | ||
| 717 | * @chan: chan for DMA transfer | ||
| 718 | * @sgl: scatter gather list | ||
| 719 | * @sg_len: length of sg txn | ||
| 720 | * @direction: DMA transfer dirtn | ||
| 721 | * @flags: DMA flags | ||
| 722 | * | ||
| 723 | * Prepares LLI based periphral transfer | ||
| 724 | */ | ||
| 725 | static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( | ||
| 726 | struct dma_chan *chan, struct scatterlist *sgl, | ||
| 727 | unsigned int sg_len, enum dma_data_direction direction, | ||
| 728 | unsigned long flags) | ||
| 729 | { | ||
| 730 | struct intel_mid_dma_chan *midc = NULL; | ||
| 731 | struct intel_mid_dma_slave *mids = NULL; | ||
| 732 | struct intel_mid_dma_desc *desc = NULL; | ||
| 733 | struct dma_async_tx_descriptor *txd = NULL; | ||
| 734 | union intel_mid_dma_ctl_lo ctl_lo; | ||
| 735 | |||
| 736 | pr_debug("MDMA: Prep for slave SG\n"); | ||
| 737 | |||
| 738 | if (!sg_len) { | ||
| 739 | pr_err("MDMA: Invalid SG length\n"); | ||
| 740 | return NULL; | ||
| 741 | } | ||
| 742 | midc = to_intel_mid_dma_chan(chan); | ||
| 743 | BUG_ON(!midc); | ||
| 744 | |||
| 745 | mids = midc->mid_slave; | ||
| 746 | BUG_ON(!mids); | ||
| 747 | |||
| 748 | if (!midc->dma->pimr_mask) { | ||
| 749 | pr_debug("MDMA: SG list is not supported by this controller\n"); | ||
| 750 | return NULL; | ||
| 751 | } | ||
| 752 | |||
| 753 | pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", | ||
| 754 | sg_len, direction, flags); | ||
| 755 | |||
| 756 | txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags); | ||
| 757 | if (NULL == txd) { | ||
| 758 | pr_err("MDMA: Prep memcpy failed\n"); | ||
| 759 | return NULL; | ||
| 760 | } | ||
| 761 | desc = to_intel_mid_dma_desc(txd); | ||
| 762 | desc->dirn = direction; | ||
| 763 | ctl_lo.ctl_lo = desc->ctl_lo; | ||
| 764 | ctl_lo.ctlx.llp_dst_en = 1; | ||
| 765 | ctl_lo.ctlx.llp_src_en = 1; | ||
| 766 | desc->ctl_lo = ctl_lo.ctl_lo; | ||
| 767 | desc->lli_length = sg_len; | ||
| 768 | desc->current_lli = 0; | ||
| 769 | /* DMA coherent memory pool for LLI descriptors*/ | ||
| 770 | desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool", | ||
| 771 | midc->dma->pdev, | ||
| 772 | (sizeof(struct intel_mid_dma_lli)*sg_len), | ||
| 773 | 32, 0); | ||
| 774 | if (NULL == desc->lli_pool) { | ||
| 775 | pr_err("MID_DMA:LLI pool create failed\n"); | ||
| 776 | return NULL; | ||
| 777 | } | ||
| 778 | |||
| 779 | desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys); | ||
| 780 | if (!desc->lli) { | ||
| 781 | pr_err("MID_DMA: LLI alloc failed\n"); | ||
| 782 | pci_pool_destroy(desc->lli_pool); | ||
| 783 | return NULL; | ||
| 784 | } | ||
| 785 | |||
| 786 | midc_lli_fill_sg(midc, desc, sgl, sg_len, flags); | ||
| 787 | if (flags & DMA_PREP_INTERRUPT) { | ||
| 788 | iowrite32(UNMASK_INTR_REG(midc->ch_id), | ||
| 789 | midc->dma_base + MASK_BLOCK); | ||
| 790 | pr_debug("MDMA:Enabled Block interrupt\n"); | ||
| 791 | } | ||
| 792 | return &desc->txd; | ||
| 793 | } | ||
| 608 | 794 | ||
| 609 | /** | 795 | /** |
| 610 | * intel_mid_dma_free_chan_resources - Frees dma resources | 796 | * intel_mid_dma_free_chan_resources - Frees dma resources |
| @@ -618,11 +804,11 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) | |||
| 618 | struct middma_device *mid = to_middma_device(chan->device); | 804 | struct middma_device *mid = to_middma_device(chan->device); |
| 619 | struct intel_mid_dma_desc *desc, *_desc; | 805 | struct intel_mid_dma_desc *desc, *_desc; |
| 620 | 806 | ||
| 621 | if (true == midc->in_use) { | 807 | if (true == midc->busy) { |
| 622 | /*trying to free ch in use!!!!!*/ | 808 | /*trying to free ch in use!!!!!*/ |
| 623 | pr_err("ERR_MDMA: trying to free ch in use\n"); | 809 | pr_err("ERR_MDMA: trying to free ch in use\n"); |
| 624 | } | 810 | } |
| 625 | 811 | pm_runtime_put(&mid->pdev->dev); | |
| 626 | spin_lock_bh(&midc->lock); | 812 | spin_lock_bh(&midc->lock); |
| 627 | midc->descs_allocated = 0; | 813 | midc->descs_allocated = 0; |
| 628 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { | 814 | list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { |
| @@ -639,6 +825,7 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) | |||
| 639 | } | 825 | } |
| 640 | spin_unlock_bh(&midc->lock); | 826 | spin_unlock_bh(&midc->lock); |
| 641 | midc->in_use = false; | 827 | midc->in_use = false; |
| 828 | midc->busy = false; | ||
| 642 | /* Disable CH interrupts */ | 829 | /* Disable CH interrupts */ |
| 643 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); | 830 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); |
| 644 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); | 831 | iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); |
| @@ -659,11 +846,20 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) | |||
| 659 | dma_addr_t phys; | 846 | dma_addr_t phys; |
| 660 | int i = 0; | 847 | int i = 0; |
| 661 | 848 | ||
| 849 | pm_runtime_get_sync(&mid->pdev->dev); | ||
| 850 | |||
| 851 | if (mid->state == SUSPENDED) { | ||
| 852 | if (dma_resume(mid->pdev)) { | ||
| 853 | pr_err("ERR_MDMA: resume failed"); | ||
| 854 | return -EFAULT; | ||
| 855 | } | ||
| 856 | } | ||
| 662 | 857 | ||
| 663 | /* ASSERT: channel is idle */ | 858 | /* ASSERT: channel is idle */ |
| 664 | if (test_ch_en(mid->dma_base, midc->ch_id)) { | 859 | if (test_ch_en(mid->dma_base, midc->ch_id)) { |
| 665 | /*ch is not idle*/ | 860 | /*ch is not idle*/ |
| 666 | pr_err("ERR_MDMA: ch not idle\n"); | 861 | pr_err("ERR_MDMA: ch not idle\n"); |
| 862 | pm_runtime_put(&mid->pdev->dev); | ||
| 667 | return -EIO; | 863 | return -EIO; |
| 668 | } | 864 | } |
| 669 | midc->completed = chan->cookie = 1; | 865 | midc->completed = chan->cookie = 1; |
| @@ -674,6 +870,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) | |||
| 674 | desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys); | 870 | desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys); |
| 675 | if (!desc) { | 871 | if (!desc) { |
| 676 | pr_err("ERR_MDMA: desc failed\n"); | 872 | pr_err("ERR_MDMA: desc failed\n"); |
| 873 | pm_runtime_put(&mid->pdev->dev); | ||
| 677 | return -ENOMEM; | 874 | return -ENOMEM; |
| 678 | /*check*/ | 875 | /*check*/ |
| 679 | } | 876 | } |
| @@ -686,7 +883,8 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) | |||
| 686 | list_add_tail(&desc->desc_node, &midc->free_list); | 883 | list_add_tail(&desc->desc_node, &midc->free_list); |
| 687 | } | 884 | } |
| 688 | spin_unlock_bh(&midc->lock); | 885 | spin_unlock_bh(&midc->lock); |
| 689 | midc->in_use = false; | 886 | midc->in_use = true; |
| 887 | midc->busy = false; | ||
| 690 | pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i); | 888 | pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i); |
| 691 | return i; | 889 | return i; |
| 692 | } | 890 | } |
| @@ -715,7 +913,7 @@ static void dma_tasklet(unsigned long data) | |||
| 715 | { | 913 | { |
| 716 | struct middma_device *mid = NULL; | 914 | struct middma_device *mid = NULL; |
| 717 | struct intel_mid_dma_chan *midc = NULL; | 915 | struct intel_mid_dma_chan *midc = NULL; |
| 718 | u32 status; | 916 | u32 status, raw_tfr, raw_block; |
| 719 | int i; | 917 | int i; |
| 720 | 918 | ||
| 721 | mid = (struct middma_device *)data; | 919 | mid = (struct middma_device *)data; |
| @@ -724,8 +922,9 @@ static void dma_tasklet(unsigned long data) | |||
| 724 | return; | 922 | return; |
| 725 | } | 923 | } |
| 726 | pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id); | 924 | pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id); |
| 727 | status = ioread32(mid->dma_base + RAW_TFR); | 925 | raw_tfr = ioread32(mid->dma_base + RAW_TFR); |
| 728 | pr_debug("MDMA:RAW_TFR %x\n", status); | 926 | raw_block = ioread32(mid->dma_base + RAW_BLOCK); |
| 927 | status = raw_tfr | raw_block; | ||
| 729 | status &= mid->intr_mask; | 928 | status &= mid->intr_mask; |
| 730 | while (status) { | 929 | while (status) { |
| 731 | /*txn interrupt*/ | 930 | /*txn interrupt*/ |
| @@ -741,15 +940,23 @@ static void dma_tasklet(unsigned long data) | |||
| 741 | } | 940 | } |
| 742 | pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", | 941 | pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", |
| 743 | status, midc->ch_id, i); | 942 | status, midc->ch_id, i); |
| 943 | midc->raw_tfr = raw_tfr; | ||
| 944 | midc->raw_block = raw_block; | ||
| 945 | spin_lock_bh(&midc->lock); | ||
| 744 | /*clearing this interrupts first*/ | 946 | /*clearing this interrupts first*/ |
| 745 | iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR); | 947 | iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR); |
| 746 | iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK); | 948 | if (raw_block) { |
| 747 | 949 | iowrite32((1 << midc->ch_id), | |
| 748 | spin_lock_bh(&midc->lock); | 950 | mid->dma_base + CLEAR_BLOCK); |
| 951 | } | ||
| 749 | midc_scan_descriptors(mid, midc); | 952 | midc_scan_descriptors(mid, midc); |
| 750 | pr_debug("MDMA:Scan of desc... complete, unmasking\n"); | 953 | pr_debug("MDMA:Scan of desc... complete, unmasking\n"); |
| 751 | iowrite32(UNMASK_INTR_REG(midc->ch_id), | 954 | iowrite32(UNMASK_INTR_REG(midc->ch_id), |
| 752 | mid->dma_base + MASK_TFR); | 955 | mid->dma_base + MASK_TFR); |
| 956 | if (raw_block) { | ||
| 957 | iowrite32(UNMASK_INTR_REG(midc->ch_id), | ||
| 958 | mid->dma_base + MASK_BLOCK); | ||
| 959 | } | ||
| 753 | spin_unlock_bh(&midc->lock); | 960 | spin_unlock_bh(&midc->lock); |
| 754 | } | 961 | } |
| 755 | 962 | ||
| @@ -804,9 +1011,14 @@ static void dma_tasklet2(unsigned long data) | |||
| 804 | static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) | 1011 | static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) |
| 805 | { | 1012 | { |
| 806 | struct middma_device *mid = data; | 1013 | struct middma_device *mid = data; |
| 807 | u32 status; | 1014 | u32 tfr_status, err_status; |
| 808 | int call_tasklet = 0; | 1015 | int call_tasklet = 0; |
| 809 | 1016 | ||
| 1017 | tfr_status = ioread32(mid->dma_base + RAW_TFR); | ||
| 1018 | err_status = ioread32(mid->dma_base + RAW_ERR); | ||
| 1019 | if (!tfr_status && !err_status) | ||
| 1020 | return IRQ_NONE; | ||
| 1021 | |||
| 810 | /*DMA Interrupt*/ | 1022 | /*DMA Interrupt*/ |
| 811 | pr_debug("MDMA:Got an interrupt on irq %d\n", irq); | 1023 | pr_debug("MDMA:Got an interrupt on irq %d\n", irq); |
| 812 | if (!mid) { | 1024 | if (!mid) { |
| @@ -814,19 +1026,18 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) | |||
| 814 | return -EINVAL; | 1026 | return -EINVAL; |
| 815 | } | 1027 | } |
| 816 | 1028 | ||
| 817 | status = ioread32(mid->dma_base + RAW_TFR); | 1029 | pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); |
| 818 | pr_debug("MDMA: Status %x, Mask %x\n", status, mid->intr_mask); | 1030 | tfr_status &= mid->intr_mask; |
| 819 | status &= mid->intr_mask; | 1031 | if (tfr_status) { |
| 820 | if (status) { | ||
| 821 | /*need to disable intr*/ | 1032 | /*need to disable intr*/ |
| 822 | iowrite32((status << 8), mid->dma_base + MASK_TFR); | 1033 | iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR); |
| 823 | pr_debug("MDMA: Calling tasklet %x\n", status); | 1034 | iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK); |
| 1035 | pr_debug("MDMA: Calling tasklet %x\n", tfr_status); | ||
| 824 | call_tasklet = 1; | 1036 | call_tasklet = 1; |
| 825 | } | 1037 | } |
| 826 | status = ioread32(mid->dma_base + RAW_ERR); | 1038 | err_status &= mid->intr_mask; |
| 827 | status &= mid->intr_mask; | 1039 | if (err_status) { |
| 828 | if (status) { | 1040 | iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR); |
| 829 | iowrite32(MASK_INTR_REG(status), mid->dma_base + MASK_ERR); | ||
| 830 | call_tasklet = 1; | 1041 | call_tasklet = 1; |
| 831 | } | 1042 | } |
| 832 | if (call_tasklet) | 1043 | if (call_tasklet) |
| @@ -856,7 +1067,6 @@ static int mid_setup_dma(struct pci_dev *pdev) | |||
| 856 | { | 1067 | { |
| 857 | struct middma_device *dma = pci_get_drvdata(pdev); | 1068 | struct middma_device *dma = pci_get_drvdata(pdev); |
| 858 | int err, i; | 1069 | int err, i; |
| 859 | unsigned int irq_level; | ||
| 860 | 1070 | ||
| 861 | /* DMA coherent memory pool for DMA descriptor allocations */ | 1071 | /* DMA coherent memory pool for DMA descriptor allocations */ |
| 862 | dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev, | 1072 | dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev, |
| @@ -884,6 +1094,7 @@ static int mid_setup_dma(struct pci_dev *pdev) | |||
| 884 | pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan); | 1094 | pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan); |
| 885 | /*init CH structures*/ | 1095 | /*init CH structures*/ |
| 886 | dma->intr_mask = 0; | 1096 | dma->intr_mask = 0; |
| 1097 | dma->state = RUNNING; | ||
| 887 | for (i = 0; i < dma->max_chan; i++) { | 1098 | for (i = 0; i < dma->max_chan; i++) { |
| 888 | struct intel_mid_dma_chan *midch = &dma->ch[i]; | 1099 | struct intel_mid_dma_chan *midch = &dma->ch[i]; |
| 889 | 1100 | ||
| @@ -943,7 +1154,6 @@ static int mid_setup_dma(struct pci_dev *pdev) | |||
| 943 | 1154 | ||
| 944 | /*register irq */ | 1155 | /*register irq */ |
| 945 | if (dma->pimr_mask) { | 1156 | if (dma->pimr_mask) { |
| 946 | irq_level = IRQF_SHARED; | ||
| 947 | pr_debug("MDMA:Requesting irq shared for DMAC1\n"); | 1157 | pr_debug("MDMA:Requesting irq shared for DMAC1\n"); |
| 948 | err = request_irq(pdev->irq, intel_mid_dma_interrupt1, | 1158 | err = request_irq(pdev->irq, intel_mid_dma_interrupt1, |
| 949 | IRQF_SHARED, "INTEL_MID_DMAC1", dma); | 1159 | IRQF_SHARED, "INTEL_MID_DMAC1", dma); |
| @@ -951,10 +1161,9 @@ static int mid_setup_dma(struct pci_dev *pdev) | |||
| 951 | goto err_irq; | 1161 | goto err_irq; |
| 952 | } else { | 1162 | } else { |
| 953 | dma->intr_mask = 0x03; | 1163 | dma->intr_mask = 0x03; |
| 954 | irq_level = 0; | ||
| 955 | pr_debug("MDMA:Requesting irq for DMAC2\n"); | 1164 | pr_debug("MDMA:Requesting irq for DMAC2\n"); |
| 956 | err = request_irq(pdev->irq, intel_mid_dma_interrupt2, | 1165 | err = request_irq(pdev->irq, intel_mid_dma_interrupt2, |
| 957 | 0, "INTEL_MID_DMAC2", dma); | 1166 | IRQF_SHARED, "INTEL_MID_DMAC2", dma); |
| 958 | if (0 != err) | 1167 | if (0 != err) |
| 959 | goto err_irq; | 1168 | goto err_irq; |
| 960 | } | 1169 | } |
| @@ -1070,6 +1279,9 @@ static int __devinit intel_mid_dma_probe(struct pci_dev *pdev, | |||
| 1070 | if (err) | 1279 | if (err) |
| 1071 | goto err_dma; | 1280 | goto err_dma; |
| 1072 | 1281 | ||
| 1282 | pm_runtime_set_active(&pdev->dev); | ||
| 1283 | pm_runtime_enable(&pdev->dev); | ||
| 1284 | pm_runtime_allow(&pdev->dev); | ||
| 1073 | return 0; | 1285 | return 0; |
| 1074 | 1286 | ||
| 1075 | err_dma: | 1287 | err_dma: |
| @@ -1104,6 +1316,85 @@ static void __devexit intel_mid_dma_remove(struct pci_dev *pdev) | |||
| 1104 | pci_disable_device(pdev); | 1316 | pci_disable_device(pdev); |
| 1105 | } | 1317 | } |
| 1106 | 1318 | ||
| 1319 | /* Power Management */ | ||
| 1320 | /* | ||
| 1321 | * dma_suspend - PCI suspend function | ||
| 1322 | * | ||
| 1323 | * @pci: PCI device structure | ||
| 1324 | * @state: PM message | ||
| 1325 | * | ||
| 1326 | * This function is called by OS when a power event occurs | ||
| 1327 | */ | ||
| 1328 | int dma_suspend(struct pci_dev *pci, pm_message_t state) | ||
| 1329 | { | ||
| 1330 | int i; | ||
| 1331 | struct middma_device *device = pci_get_drvdata(pci); | ||
| 1332 | pr_debug("MDMA: dma_suspend called\n"); | ||
| 1333 | |||
| 1334 | for (i = 0; i < device->max_chan; i++) { | ||
| 1335 | if (device->ch[i].in_use) | ||
| 1336 | return -EAGAIN; | ||
| 1337 | } | ||
| 1338 | device->state = SUSPENDED; | ||
| 1339 | pci_set_drvdata(pci, device); | ||
| 1340 | pci_save_state(pci); | ||
| 1341 | pci_disable_device(pci); | ||
| 1342 | pci_set_power_state(pci, PCI_D3hot); | ||
| 1343 | return 0; | ||
| 1344 | } | ||
| 1345 | |||
| 1346 | /** | ||
| 1347 | * dma_resume - PCI resume function | ||
| 1348 | * | ||
| 1349 | * @pci: PCI device structure | ||
| 1350 | * | ||
| 1351 | * This function is called by OS when a power event occurs | ||
| 1352 | */ | ||
| 1353 | int dma_resume(struct pci_dev *pci) | ||
| 1354 | { | ||
| 1355 | int ret; | ||
| 1356 | struct middma_device *device = pci_get_drvdata(pci); | ||
| 1357 | |||
| 1358 | pr_debug("MDMA: dma_resume called\n"); | ||
| 1359 | pci_set_power_state(pci, PCI_D0); | ||
| 1360 | pci_restore_state(pci); | ||
| 1361 | ret = pci_enable_device(pci); | ||
| 1362 | if (ret) { | ||
| 1363 | pr_err("MDMA: device cant be enabled for %x\n", pci->device); | ||
| 1364 | return ret; | ||
| 1365 | } | ||
| 1366 | device->state = RUNNING; | ||
| 1367 | iowrite32(REG_BIT0, device->dma_base + DMA_CFG); | ||
| 1368 | pci_set_drvdata(pci, device); | ||
| 1369 | return 0; | ||
| 1370 | } | ||
| 1371 | |||
| 1372 | static int dma_runtime_suspend(struct device *dev) | ||
| 1373 | { | ||
| 1374 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
| 1375 | return dma_suspend(pci_dev, PMSG_SUSPEND); | ||
| 1376 | } | ||
| 1377 | |||
| 1378 | static int dma_runtime_resume(struct device *dev) | ||
| 1379 | { | ||
| 1380 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
| 1381 | return dma_resume(pci_dev); | ||
| 1382 | } | ||
| 1383 | |||
| 1384 | static int dma_runtime_idle(struct device *dev) | ||
| 1385 | { | ||
| 1386 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 1387 | struct middma_device *device = pci_get_drvdata(pdev); | ||
| 1388 | int i; | ||
| 1389 | |||
| 1390 | for (i = 0; i < device->max_chan; i++) { | ||
| 1391 | if (device->ch[i].in_use) | ||
| 1392 | return -EAGAIN; | ||
| 1393 | } | ||
| 1394 | |||
| 1395 | return pm_schedule_suspend(dev, 0); | ||
| 1396 | } | ||
| 1397 | |||
| 1107 | /****************************************************************************** | 1398 | /****************************************************************************** |
| 1108 | * PCI stuff | 1399 | * PCI stuff |
| 1109 | */ | 1400 | */ |
| @@ -1116,11 +1407,24 @@ static struct pci_device_id intel_mid_dma_ids[] = { | |||
| 1116 | }; | 1407 | }; |
| 1117 | MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids); | 1408 | MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids); |
| 1118 | 1409 | ||
| 1410 | static const struct dev_pm_ops intel_mid_dma_pm = { | ||
| 1411 | .runtime_suspend = dma_runtime_suspend, | ||
| 1412 | .runtime_resume = dma_runtime_resume, | ||
| 1413 | .runtime_idle = dma_runtime_idle, | ||
| 1414 | }; | ||
| 1415 | |||
| 1119 | static struct pci_driver intel_mid_dma_pci = { | 1416 | static struct pci_driver intel_mid_dma_pci = { |
| 1120 | .name = "Intel MID DMA", | 1417 | .name = "Intel MID DMA", |
| 1121 | .id_table = intel_mid_dma_ids, | 1418 | .id_table = intel_mid_dma_ids, |
| 1122 | .probe = intel_mid_dma_probe, | 1419 | .probe = intel_mid_dma_probe, |
| 1123 | .remove = __devexit_p(intel_mid_dma_remove), | 1420 | .remove = __devexit_p(intel_mid_dma_remove), |
| 1421 | #ifdef CONFIG_PM | ||
| 1422 | .suspend = dma_suspend, | ||
| 1423 | .resume = dma_resume, | ||
| 1424 | .driver = { | ||
| 1425 | .pm = &intel_mid_dma_pm, | ||
| 1426 | }, | ||
| 1427 | #endif | ||
| 1124 | }; | 1428 | }; |
| 1125 | 1429 | ||
| 1126 | static int __init intel_mid_dma_init(void) | 1430 | static int __init intel_mid_dma_init(void) |
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h index d81aa658ab09..709fecbdde79 100644 --- a/drivers/dma/intel_mid_dma_regs.h +++ b/drivers/dma/intel_mid_dma_regs.h | |||
| @@ -29,11 +29,12 @@ | |||
| 29 | #include <linux/dmapool.h> | 29 | #include <linux/dmapool.h> |
| 30 | #include <linux/pci_ids.h> | 30 | #include <linux/pci_ids.h> |
| 31 | 31 | ||
| 32 | #define INTEL_MID_DMA_DRIVER_VERSION "1.0.5" | 32 | #define INTEL_MID_DMA_DRIVER_VERSION "1.1.0" |
| 33 | 33 | ||
| 34 | #define REG_BIT0 0x00000001 | 34 | #define REG_BIT0 0x00000001 |
| 35 | #define REG_BIT8 0x00000100 | 35 | #define REG_BIT8 0x00000100 |
| 36 | 36 | #define INT_MASK_WE 0x8 | |
| 37 | #define CLEAR_DONE 0xFFFFEFFF | ||
| 37 | #define UNMASK_INTR_REG(chan_num) \ | 38 | #define UNMASK_INTR_REG(chan_num) \ |
| 38 | ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) | 39 | ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) |
| 39 | #define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num) | 40 | #define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num) |
| @@ -41,6 +42,9 @@ | |||
| 41 | #define ENABLE_CHANNEL(chan_num) \ | 42 | #define ENABLE_CHANNEL(chan_num) \ |
| 42 | ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) | 43 | ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) |
| 43 | 44 | ||
| 45 | #define DISABLE_CHANNEL(chan_num) \ | ||
| 46 | (REG_BIT8 << chan_num) | ||
| 47 | |||
| 44 | #define DESCS_PER_CHANNEL 16 | 48 | #define DESCS_PER_CHANNEL 16 |
| 45 | /*DMA Registers*/ | 49 | /*DMA Registers*/ |
| 46 | /*registers associated with channel programming*/ | 50 | /*registers associated with channel programming*/ |
| @@ -50,6 +54,7 @@ | |||
| 50 | /*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/ | 54 | /*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/ |
| 51 | #define SAR 0x00 /* Source Address Register*/ | 55 | #define SAR 0x00 /* Source Address Register*/ |
| 52 | #define DAR 0x08 /* Destination Address Register*/ | 56 | #define DAR 0x08 /* Destination Address Register*/ |
| 57 | #define LLP 0x10 /* Linked List Pointer Register*/ | ||
| 53 | #define CTL_LOW 0x18 /* Control Register*/ | 58 | #define CTL_LOW 0x18 /* Control Register*/ |
| 54 | #define CTL_HIGH 0x1C /* Control Register*/ | 59 | #define CTL_HIGH 0x1C /* Control Register*/ |
| 55 | #define CFG_LOW 0x40 /* Configuration Register Low*/ | 60 | #define CFG_LOW 0x40 /* Configuration Register Low*/ |
| @@ -112,8 +117,8 @@ union intel_mid_dma_ctl_lo { | |||
| 112 | union intel_mid_dma_ctl_hi { | 117 | union intel_mid_dma_ctl_hi { |
| 113 | struct { | 118 | struct { |
| 114 | u32 block_ts:12; /*block transfer size*/ | 119 | u32 block_ts:12; /*block transfer size*/ |
| 115 | /*configured by DMAC*/ | 120 | u32 done:1; /*Done - updated by DMAC*/ |
| 116 | u32 reser:20; | 121 | u32 reser:19; /*configured by DMAC*/ |
| 117 | } ctlx; | 122 | } ctlx; |
| 118 | u32 ctl_hi; | 123 | u32 ctl_hi; |
| 119 | 124 | ||
| @@ -152,6 +157,7 @@ union intel_mid_dma_cfg_hi { | |||
| 152 | u32 cfg_hi; | 157 | u32 cfg_hi; |
| 153 | }; | 158 | }; |
| 154 | 159 | ||
| 160 | |||
| 155 | /** | 161 | /** |
| 156 | * struct intel_mid_dma_chan - internal mid representation of a DMA channel | 162 | * struct intel_mid_dma_chan - internal mid representation of a DMA channel |
| 157 | * @chan: dma_chan strcture represetation for mid chan | 163 | * @chan: dma_chan strcture represetation for mid chan |
| @@ -166,7 +172,10 @@ union intel_mid_dma_cfg_hi { | |||
| 166 | * @slave: dma slave struture | 172 | * @slave: dma slave struture |
| 167 | * @descs_allocated: total number of decsiptors allocated | 173 | * @descs_allocated: total number of decsiptors allocated |
| 168 | * @dma: dma device struture pointer | 174 | * @dma: dma device struture pointer |
| 175 | * @busy: bool representing if ch is busy (active txn) or not | ||
| 169 | * @in_use: bool representing if ch is in use or not | 176 | * @in_use: bool representing if ch is in use or not |
| 177 | * @raw_tfr: raw trf interrupt recieved | ||
| 178 | * @raw_block: raw block interrupt recieved | ||
| 170 | */ | 179 | */ |
| 171 | struct intel_mid_dma_chan { | 180 | struct intel_mid_dma_chan { |
| 172 | struct dma_chan chan; | 181 | struct dma_chan chan; |
| @@ -178,10 +187,13 @@ struct intel_mid_dma_chan { | |||
| 178 | struct list_head active_list; | 187 | struct list_head active_list; |
| 179 | struct list_head queue; | 188 | struct list_head queue; |
| 180 | struct list_head free_list; | 189 | struct list_head free_list; |
| 181 | struct intel_mid_dma_slave *slave; | ||
| 182 | unsigned int descs_allocated; | 190 | unsigned int descs_allocated; |
| 183 | struct middma_device *dma; | 191 | struct middma_device *dma; |
| 192 | bool busy; | ||
| 184 | bool in_use; | 193 | bool in_use; |
| 194 | u32 raw_tfr; | ||
| 195 | u32 raw_block; | ||
| 196 | struct intel_mid_dma_slave *mid_slave; | ||
| 185 | }; | 197 | }; |
| 186 | 198 | ||
| 187 | static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan( | 199 | static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan( |
| @@ -190,6 +202,10 @@ static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan( | |||
| 190 | return container_of(chan, struct intel_mid_dma_chan, chan); | 202 | return container_of(chan, struct intel_mid_dma_chan, chan); |
| 191 | } | 203 | } |
| 192 | 204 | ||
| 205 | enum intel_mid_dma_state { | ||
| 206 | RUNNING = 0, | ||
| 207 | SUSPENDED, | ||
| 208 | }; | ||
| 193 | /** | 209 | /** |
| 194 | * struct middma_device - internal representation of a DMA device | 210 | * struct middma_device - internal representation of a DMA device |
| 195 | * @pdev: PCI device | 211 | * @pdev: PCI device |
| @@ -205,6 +221,7 @@ static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan( | |||
| 205 | * @max_chan: max number of chs supported (from drv_data) | 221 | * @max_chan: max number of chs supported (from drv_data) |
| 206 | * @block_size: Block size of DMA transfer supported (from drv_data) | 222 | * @block_size: Block size of DMA transfer supported (from drv_data) |
| 207 | * @pimr_mask: MMIO register addr for periphral interrupt (from drv_data) | 223 | * @pimr_mask: MMIO register addr for periphral interrupt (from drv_data) |
| 224 | * @state: dma PM device state | ||
| 208 | */ | 225 | */ |
| 209 | struct middma_device { | 226 | struct middma_device { |
| 210 | struct pci_dev *pdev; | 227 | struct pci_dev *pdev; |
| @@ -220,6 +237,7 @@ struct middma_device { | |||
| 220 | int max_chan; | 237 | int max_chan; |
| 221 | int block_size; | 238 | int block_size; |
| 222 | unsigned int pimr_mask; | 239 | unsigned int pimr_mask; |
| 240 | enum intel_mid_dma_state state; | ||
| 223 | }; | 241 | }; |
| 224 | 242 | ||
| 225 | static inline struct middma_device *to_middma_device(struct dma_device *common) | 243 | static inline struct middma_device *to_middma_device(struct dma_device *common) |
| @@ -238,14 +256,27 @@ struct intel_mid_dma_desc { | |||
| 238 | u32 cfg_lo; | 256 | u32 cfg_lo; |
| 239 | u32 ctl_lo; | 257 | u32 ctl_lo; |
| 240 | u32 ctl_hi; | 258 | u32 ctl_hi; |
| 259 | struct pci_pool *lli_pool; | ||
| 260 | struct intel_mid_dma_lli *lli; | ||
| 261 | dma_addr_t lli_phys; | ||
| 262 | unsigned int lli_length; | ||
| 263 | unsigned int current_lli; | ||
| 241 | dma_addr_t next; | 264 | dma_addr_t next; |
| 242 | enum dma_data_direction dirn; | 265 | enum dma_data_direction dirn; |
| 243 | enum dma_status status; | 266 | enum dma_status status; |
| 244 | enum intel_mid_dma_width width; /*width of DMA txn*/ | 267 | enum dma_slave_buswidth width; /*width of DMA txn*/ |
| 245 | enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ | 268 | enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ |
| 246 | 269 | ||
| 247 | }; | 270 | }; |
| 248 | 271 | ||
| 272 | struct intel_mid_dma_lli { | ||
| 273 | dma_addr_t sar; | ||
| 274 | dma_addr_t dar; | ||
| 275 | dma_addr_t llp; | ||
| 276 | u32 ctl_lo; | ||
| 277 | u32 ctl_hi; | ||
| 278 | } __attribute__ ((packed)); | ||
| 279 | |||
| 249 | static inline int test_ch_en(void __iomem *dma, u32 ch_no) | 280 | static inline int test_ch_en(void __iomem *dma, u32 ch_no) |
| 250 | { | 281 | { |
| 251 | u32 en_reg = ioread32(dma + DMA_CHAN_EN); | 282 | u32 en_reg = ioread32(dma + DMA_CHAN_EN); |
| @@ -257,4 +288,14 @@ static inline struct intel_mid_dma_desc *to_intel_mid_dma_desc | |||
| 257 | { | 288 | { |
| 258 | return container_of(txd, struct intel_mid_dma_desc, txd); | 289 | return container_of(txd, struct intel_mid_dma_desc, txd); |
| 259 | } | 290 | } |
| 291 | |||
| 292 | static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave | ||
| 293 | (struct dma_slave_config *slave) | ||
| 294 | { | ||
| 295 | return container_of(slave, struct intel_mid_dma_slave, dma_slave); | ||
| 296 | } | ||
| 297 | |||
| 298 | |||
| 299 | int dma_resume(struct pci_dev *pci); | ||
| 300 | |||
| 260 | #endif /*__INTEL_MID_DMAC_REGS_H__*/ | 301 | #endif /*__INTEL_MID_DMAC_REGS_H__*/ |
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 17e2600a00cf..fab68a553205 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c | |||
| @@ -1,11 +1,8 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * driver/dma/ste_dma40.c | 2 | * Copyright (C) ST-Ericsson SA 2007-2010 |
| 3 | * | 3 | * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson |
| 4 | * Copyright (C) ST-Ericsson 2007-2010 | 4 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson |
| 5 | * License terms: GNU General Public License (GPL) version 2 | 5 | * License terms: GNU General Public License (GPL) version 2 |
| 6 | * Author: Per Friden <per.friden@stericsson.com> | ||
| 7 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> | ||
| 8 | * | ||
| 9 | */ | 6 | */ |
| 10 | 7 | ||
| 11 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
| @@ -14,6 +11,7 @@ | |||
| 14 | #include <linux/platform_device.h> | 11 | #include <linux/platform_device.h> |
| 15 | #include <linux/clk.h> | 12 | #include <linux/clk.h> |
| 16 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
| 14 | #include <linux/err.h> | ||
| 17 | 15 | ||
| 18 | #include <plat/ste_dma40.h> | 16 | #include <plat/ste_dma40.h> |
| 19 | 17 | ||
| @@ -32,6 +30,11 @@ | |||
| 32 | 30 | ||
| 33 | /* Hardware requirement on LCLA alignment */ | 31 | /* Hardware requirement on LCLA alignment */ |
| 34 | #define LCLA_ALIGNMENT 0x40000 | 32 | #define LCLA_ALIGNMENT 0x40000 |
| 33 | |||
| 34 | /* Max number of links per event group */ | ||
| 35 | #define D40_LCLA_LINK_PER_EVENT_GRP 128 | ||
| 36 | #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP | ||
| 37 | |||
| 35 | /* Attempts before giving up to trying to get pages that are aligned */ | 38 | /* Attempts before giving up to trying to get pages that are aligned */ |
| 36 | #define MAX_LCLA_ALLOC_ATTEMPTS 256 | 39 | #define MAX_LCLA_ALLOC_ATTEMPTS 256 |
| 37 | 40 | ||
| @@ -41,7 +44,7 @@ | |||
| 41 | #define D40_ALLOC_LOG_FREE 0 | 44 | #define D40_ALLOC_LOG_FREE 0 |
| 42 | 45 | ||
| 43 | /* Hardware designer of the block */ | 46 | /* Hardware designer of the block */ |
| 44 | #define D40_PERIPHID2_DESIGNER 0x8 | 47 | #define D40_HW_DESIGNER 0x8 |
| 45 | 48 | ||
| 46 | /** | 49 | /** |
| 47 | * enum 40_command - The different commands and/or statuses. | 50 | * enum 40_command - The different commands and/or statuses. |
| @@ -84,18 +87,17 @@ struct d40_lli_pool { | |||
| 84 | * @lli_log: Same as above but for logical channels. | 87 | * @lli_log: Same as above but for logical channels. |
| 85 | * @lli_pool: The pool with two entries pre-allocated. | 88 | * @lli_pool: The pool with two entries pre-allocated. |
| 86 | * @lli_len: Number of llis of current descriptor. | 89 | * @lli_len: Number of llis of current descriptor. |
| 87 | * @lli_count: Number of transfered llis. | 90 | * @lli_current: Number of transfered llis. |
| 88 | * @lli_tx_len: Max number of LLIs per transfer, there can be | 91 | * @lcla_alloc: Number of LCLA entries allocated. |
| 89 | * many transfer for one descriptor. | ||
| 90 | * @txd: DMA engine struct. Used for among other things for communication | 92 | * @txd: DMA engine struct. Used for among other things for communication |
| 91 | * during a transfer. | 93 | * during a transfer. |
| 92 | * @node: List entry. | 94 | * @node: List entry. |
| 93 | * @dir: The transfer direction of this job. | ||
| 94 | * @is_in_client_list: true if the client owns this descriptor. | 95 | * @is_in_client_list: true if the client owns this descriptor. |
| 96 | * @is_hw_linked: true if this job will automatically be continued for | ||
| 97 | * the previous one. | ||
| 95 | * | 98 | * |
| 96 | * This descriptor is used for both logical and physical transfers. | 99 | * This descriptor is used for both logical and physical transfers. |
| 97 | */ | 100 | */ |
| 98 | |||
| 99 | struct d40_desc { | 101 | struct d40_desc { |
| 100 | /* LLI physical */ | 102 | /* LLI physical */ |
| 101 | struct d40_phy_lli_bidir lli_phy; | 103 | struct d40_phy_lli_bidir lli_phy; |
| @@ -104,14 +106,14 @@ struct d40_desc { | |||
| 104 | 106 | ||
| 105 | struct d40_lli_pool lli_pool; | 107 | struct d40_lli_pool lli_pool; |
| 106 | int lli_len; | 108 | int lli_len; |
| 107 | int lli_count; | 109 | int lli_current; |
| 108 | u32 lli_tx_len; | 110 | int lcla_alloc; |
| 109 | 111 | ||
| 110 | struct dma_async_tx_descriptor txd; | 112 | struct dma_async_tx_descriptor txd; |
| 111 | struct list_head node; | 113 | struct list_head node; |
| 112 | 114 | ||
| 113 | enum dma_data_direction dir; | ||
| 114 | bool is_in_client_list; | 115 | bool is_in_client_list; |
| 116 | bool is_hw_linked; | ||
| 115 | }; | 117 | }; |
| 116 | 118 | ||
| 117 | /** | 119 | /** |
| @@ -123,17 +125,14 @@ struct d40_desc { | |||
| 123 | * @pages: The number of pages needed for all physical channels. | 125 | * @pages: The number of pages needed for all physical channels. |
| 124 | * Only used later for clean-up on error | 126 | * Only used later for clean-up on error |
| 125 | * @lock: Lock to protect the content in this struct. | 127 | * @lock: Lock to protect the content in this struct. |
| 126 | * @alloc_map: Bitmap mapping between physical channel and LCLA entries. | 128 | * @alloc_map: big map over which LCLA entry is own by which job. |
| 127 | * @num_blocks: The number of entries of alloc_map. Equals to the | ||
| 128 | * number of physical channels. | ||
| 129 | */ | 129 | */ |
| 130 | struct d40_lcla_pool { | 130 | struct d40_lcla_pool { |
| 131 | void *base; | 131 | void *base; |
| 132 | void *base_unaligned; | 132 | void *base_unaligned; |
| 133 | int pages; | 133 | int pages; |
| 134 | spinlock_t lock; | 134 | spinlock_t lock; |
| 135 | u32 *alloc_map; | 135 | struct d40_desc **alloc_map; |
| 136 | int num_blocks; | ||
| 137 | }; | 136 | }; |
| 138 | 137 | ||
| 139 | /** | 138 | /** |
| @@ -146,9 +145,7 @@ struct d40_lcla_pool { | |||
| 146 | * this physical channel. Can also be free or physically allocated. | 145 | * this physical channel. Can also be free or physically allocated. |
| 147 | * @allocated_dst: Same as for src but is dst. | 146 | * @allocated_dst: Same as for src but is dst. |
| 148 | * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as | 147 | * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as |
| 149 | * event line number. Both allocated_src and allocated_dst can not be | 148 | * event line number. |
| 150 | * allocated to a physical channel, since the interrupt handler has then | ||
| 151 | * no way of figure out which one the interrupt belongs to. | ||
| 152 | */ | 149 | */ |
| 153 | struct d40_phy_res { | 150 | struct d40_phy_res { |
| 154 | spinlock_t lock; | 151 | spinlock_t lock; |
| @@ -178,6 +175,7 @@ struct d40_base; | |||
| 178 | * @active: Active descriptor. | 175 | * @active: Active descriptor. |
| 179 | * @queue: Queued jobs. | 176 | * @queue: Queued jobs. |
| 180 | * @dma_cfg: The client configuration of this dma channel. | 177 | * @dma_cfg: The client configuration of this dma channel. |
| 178 | * @configured: whether the dma_cfg configuration is valid | ||
| 181 | * @base: Pointer to the device instance struct. | 179 | * @base: Pointer to the device instance struct. |
| 182 | * @src_def_cfg: Default cfg register setting for src. | 180 | * @src_def_cfg: Default cfg register setting for src. |
| 183 | * @dst_def_cfg: Default cfg register setting for dst. | 181 | * @dst_def_cfg: Default cfg register setting for dst. |
| @@ -201,12 +199,12 @@ struct d40_chan { | |||
| 201 | struct list_head active; | 199 | struct list_head active; |
| 202 | struct list_head queue; | 200 | struct list_head queue; |
| 203 | struct stedma40_chan_cfg dma_cfg; | 201 | struct stedma40_chan_cfg dma_cfg; |
| 202 | bool configured; | ||
| 204 | struct d40_base *base; | 203 | struct d40_base *base; |
| 205 | /* Default register configurations */ | 204 | /* Default register configurations */ |
| 206 | u32 src_def_cfg; | 205 | u32 src_def_cfg; |
| 207 | u32 dst_def_cfg; | 206 | u32 dst_def_cfg; |
| 208 | struct d40_def_lcsp log_def; | 207 | struct d40_def_lcsp log_def; |
| 209 | struct d40_lcla_elem lcla; | ||
| 210 | struct d40_log_lli_full *lcpa; | 208 | struct d40_log_lli_full *lcpa; |
| 211 | /* Runtime reconfiguration */ | 209 | /* Runtime reconfiguration */ |
| 212 | dma_addr_t runtime_addr; | 210 | dma_addr_t runtime_addr; |
| @@ -234,7 +232,6 @@ struct d40_chan { | |||
| 234 | * @dma_both: dma_device channels that can do both memcpy and slave transfers. | 232 | * @dma_both: dma_device channels that can do both memcpy and slave transfers. |
| 235 | * @dma_slave: dma_device channels that can do only do slave transfers. | 233 | * @dma_slave: dma_device channels that can do only do slave transfers. |
| 236 | * @dma_memcpy: dma_device channels that can do only do memcpy transfers. | 234 | * @dma_memcpy: dma_device channels that can do only do memcpy transfers. |
| 237 | * @phy_chans: Room for all possible physical channels in system. | ||
| 238 | * @log_chans: Room for all possible logical channels in system. | 235 | * @log_chans: Room for all possible logical channels in system. |
| 239 | * @lookup_log_chans: Used to map interrupt number to logical channel. Points | 236 | * @lookup_log_chans: Used to map interrupt number to logical channel. Points |
| 240 | * to log_chans entries. | 237 | * to log_chans entries. |
| @@ -340,9 +337,6 @@ static int d40_pool_lli_alloc(struct d40_desc *d40d, | |||
| 340 | align); | 337 | align); |
| 341 | d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len, | 338 | d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len, |
| 342 | align); | 339 | align); |
| 343 | |||
| 344 | d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src); | ||
| 345 | d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst); | ||
| 346 | } | 340 | } |
| 347 | 341 | ||
| 348 | return 0; | 342 | return 0; |
| @@ -357,22 +351,67 @@ static void d40_pool_lli_free(struct d40_desc *d40d) | |||
| 357 | d40d->lli_log.dst = NULL; | 351 | d40d->lli_log.dst = NULL; |
| 358 | d40d->lli_phy.src = NULL; | 352 | d40d->lli_phy.src = NULL; |
| 359 | d40d->lli_phy.dst = NULL; | 353 | d40d->lli_phy.dst = NULL; |
| 360 | d40d->lli_phy.src_addr = 0; | ||
| 361 | d40d->lli_phy.dst_addr = 0; | ||
| 362 | } | 354 | } |
| 363 | 355 | ||
| 364 | static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c, | 356 | static int d40_lcla_alloc_one(struct d40_chan *d40c, |
| 365 | struct d40_desc *desc) | 357 | struct d40_desc *d40d) |
| 366 | { | 358 | { |
| 367 | dma_cookie_t cookie = d40c->chan.cookie; | 359 | unsigned long flags; |
| 360 | int i; | ||
| 361 | int ret = -EINVAL; | ||
| 362 | int p; | ||
| 368 | 363 | ||
| 369 | if (++cookie < 0) | 364 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); |
| 370 | cookie = 1; | 365 | |
| 366 | p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP; | ||
| 371 | 367 | ||
| 372 | d40c->chan.cookie = cookie; | 368 | /* |
| 373 | desc->txd.cookie = cookie; | 369 | * Allocate both src and dst at the same time, therefore the half |
| 370 | * start on 1 since 0 can't be used since zero is used as end marker. | ||
| 371 | */ | ||
| 372 | for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { | ||
| 373 | if (!d40c->base->lcla_pool.alloc_map[p + i]) { | ||
| 374 | d40c->base->lcla_pool.alloc_map[p + i] = d40d; | ||
| 375 | d40d->lcla_alloc++; | ||
| 376 | ret = i; | ||
| 377 | break; | ||
| 378 | } | ||
| 379 | } | ||
| 380 | |||
| 381 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); | ||
| 382 | |||
| 383 | return ret; | ||
| 384 | } | ||
| 385 | |||
| 386 | static int d40_lcla_free_all(struct d40_chan *d40c, | ||
| 387 | struct d40_desc *d40d) | ||
| 388 | { | ||
| 389 | unsigned long flags; | ||
| 390 | int i; | ||
| 391 | int ret = -EINVAL; | ||
| 392 | |||
| 393 | if (d40c->log_num == D40_PHY_CHAN) | ||
| 394 | return 0; | ||
| 395 | |||
| 396 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); | ||
| 397 | |||
| 398 | for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { | ||
| 399 | if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num * | ||
| 400 | D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) { | ||
| 401 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num * | ||
| 402 | D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL; | ||
| 403 | d40d->lcla_alloc--; | ||
| 404 | if (d40d->lcla_alloc == 0) { | ||
| 405 | ret = 0; | ||
| 406 | break; | ||
| 407 | } | ||
| 408 | } | ||
| 409 | } | ||
| 410 | |||
| 411 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); | ||
| 412 | |||
| 413 | return ret; | ||
| 374 | 414 | ||
| 375 | return cookie; | ||
| 376 | } | 415 | } |
| 377 | 416 | ||
| 378 | static void d40_desc_remove(struct d40_desc *d40d) | 417 | static void d40_desc_remove(struct d40_desc *d40d) |
| @@ -382,28 +421,35 @@ static void d40_desc_remove(struct d40_desc *d40d) | |||
| 382 | 421 | ||
| 383 | static struct d40_desc *d40_desc_get(struct d40_chan *d40c) | 422 | static struct d40_desc *d40_desc_get(struct d40_chan *d40c) |
| 384 | { | 423 | { |
| 385 | struct d40_desc *d; | 424 | struct d40_desc *desc = NULL; |
| 386 | struct d40_desc *_d; | ||
| 387 | 425 | ||
| 388 | if (!list_empty(&d40c->client)) { | 426 | if (!list_empty(&d40c->client)) { |
| 427 | struct d40_desc *d; | ||
| 428 | struct d40_desc *_d; | ||
| 429 | |||
| 389 | list_for_each_entry_safe(d, _d, &d40c->client, node) | 430 | list_for_each_entry_safe(d, _d, &d40c->client, node) |
| 390 | if (async_tx_test_ack(&d->txd)) { | 431 | if (async_tx_test_ack(&d->txd)) { |
| 391 | d40_pool_lli_free(d); | 432 | d40_pool_lli_free(d); |
| 392 | d40_desc_remove(d); | 433 | d40_desc_remove(d); |
| 434 | desc = d; | ||
| 435 | memset(desc, 0, sizeof(*desc)); | ||
| 393 | break; | 436 | break; |
| 394 | } | 437 | } |
| 395 | } else { | ||
| 396 | d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT); | ||
| 397 | if (d != NULL) { | ||
| 398 | memset(d, 0, sizeof(struct d40_desc)); | ||
| 399 | INIT_LIST_HEAD(&d->node); | ||
| 400 | } | ||
| 401 | } | 438 | } |
| 402 | return d; | 439 | |
| 440 | if (!desc) | ||
| 441 | desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT); | ||
| 442 | |||
| 443 | if (desc) | ||
| 444 | INIT_LIST_HEAD(&desc->node); | ||
| 445 | |||
| 446 | return desc; | ||
| 403 | } | 447 | } |
| 404 | 448 | ||
| 405 | static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) | 449 | static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) |
| 406 | { | 450 | { |
| 451 | |||
| 452 | d40_lcla_free_all(d40c, d40d); | ||
| 407 | kmem_cache_free(d40c->base->desc_slab, d40d); | 453 | kmem_cache_free(d40c->base->desc_slab, d40d); |
| 408 | } | 454 | } |
| 409 | 455 | ||
| @@ -412,6 +458,59 @@ static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) | |||
| 412 | list_add_tail(&desc->node, &d40c->active); | 458 | list_add_tail(&desc->node, &d40c->active); |
| 413 | } | 459 | } |
| 414 | 460 | ||
| 461 | static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) | ||
| 462 | { | ||
| 463 | int curr_lcla = -EINVAL, next_lcla; | ||
| 464 | |||
| 465 | if (d40c->log_num == D40_PHY_CHAN) { | ||
| 466 | d40_phy_lli_write(d40c->base->virtbase, | ||
| 467 | d40c->phy_chan->num, | ||
| 468 | d40d->lli_phy.dst, | ||
| 469 | d40d->lli_phy.src); | ||
| 470 | d40d->lli_current = d40d->lli_len; | ||
| 471 | } else { | ||
| 472 | |||
| 473 | if ((d40d->lli_len - d40d->lli_current) > 1) | ||
| 474 | curr_lcla = d40_lcla_alloc_one(d40c, d40d); | ||
| 475 | |||
| 476 | d40_log_lli_lcpa_write(d40c->lcpa, | ||
| 477 | &d40d->lli_log.dst[d40d->lli_current], | ||
| 478 | &d40d->lli_log.src[d40d->lli_current], | ||
| 479 | curr_lcla); | ||
| 480 | |||
| 481 | d40d->lli_current++; | ||
| 482 | for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) { | ||
| 483 | struct d40_log_lli *lcla; | ||
| 484 | |||
| 485 | if (d40d->lli_current + 1 < d40d->lli_len) | ||
| 486 | next_lcla = d40_lcla_alloc_one(d40c, d40d); | ||
| 487 | else | ||
| 488 | next_lcla = -EINVAL; | ||
| 489 | |||
| 490 | lcla = d40c->base->lcla_pool.base + | ||
| 491 | d40c->phy_chan->num * 1024 + | ||
| 492 | 8 * curr_lcla * 2; | ||
| 493 | |||
| 494 | d40_log_lli_lcla_write(lcla, | ||
| 495 | &d40d->lli_log.dst[d40d->lli_current], | ||
| 496 | &d40d->lli_log.src[d40d->lli_current], | ||
| 497 | next_lcla); | ||
| 498 | |||
| 499 | (void) dma_map_single(d40c->base->dev, lcla, | ||
| 500 | 2 * sizeof(struct d40_log_lli), | ||
| 501 | DMA_TO_DEVICE); | ||
| 502 | |||
| 503 | curr_lcla = next_lcla; | ||
| 504 | |||
| 505 | if (curr_lcla == -EINVAL) { | ||
| 506 | d40d->lli_current++; | ||
| 507 | break; | ||
| 508 | } | ||
| 509 | |||
| 510 | } | ||
| 511 | } | ||
| 512 | } | ||
| 513 | |||
| 415 | static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) | 514 | static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) |
| 416 | { | 515 | { |
| 417 | struct d40_desc *d; | 516 | struct d40_desc *d; |
| @@ -443,68 +542,26 @@ static struct d40_desc *d40_first_queued(struct d40_chan *d40c) | |||
| 443 | return d; | 542 | return d; |
| 444 | } | 543 | } |
| 445 | 544 | ||
| 446 | /* Support functions for logical channels */ | 545 | static struct d40_desc *d40_last_queued(struct d40_chan *d40c) |
| 447 | |||
| 448 | static int d40_lcla_id_get(struct d40_chan *d40c) | ||
| 449 | { | 546 | { |
| 450 | int src_id = 0; | 547 | struct d40_desc *d; |
| 451 | int dst_id = 0; | ||
| 452 | struct d40_log_lli *lcla_lidx_base = | ||
| 453 | d40c->base->lcla_pool.base + d40c->phy_chan->num * 1024; | ||
| 454 | int i; | ||
| 455 | int lli_per_log = d40c->base->plat_data->llis_per_log; | ||
| 456 | unsigned long flags; | ||
| 457 | |||
| 458 | if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0) | ||
| 459 | return 0; | ||
| 460 | |||
| 461 | if (d40c->base->lcla_pool.num_blocks > 32) | ||
| 462 | return -EINVAL; | ||
| 463 | |||
| 464 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); | ||
| 465 | |||
| 466 | for (i = 0; i < d40c->base->lcla_pool.num_blocks; i++) { | ||
| 467 | if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] & | ||
| 468 | (0x1 << i))) { | ||
| 469 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |= | ||
| 470 | (0x1 << i); | ||
| 471 | break; | ||
| 472 | } | ||
| 473 | } | ||
| 474 | src_id = i; | ||
| 475 | if (src_id >= d40c->base->lcla_pool.num_blocks) | ||
| 476 | goto err; | ||
| 477 | 548 | ||
| 478 | for (; i < d40c->base->lcla_pool.num_blocks; i++) { | 549 | if (list_empty(&d40c->queue)) |
| 479 | if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] & | 550 | return NULL; |
| 480 | (0x1 << i))) { | 551 | list_for_each_entry(d, &d40c->queue, node) |
| 481 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |= | 552 | if (list_is_last(&d->node, &d40c->queue)) |
| 482 | (0x1 << i); | ||
| 483 | break; | 553 | break; |
| 484 | } | 554 | return d; |
| 485 | } | ||
| 486 | |||
| 487 | dst_id = i; | ||
| 488 | if (dst_id == src_id) | ||
| 489 | goto err; | ||
| 490 | |||
| 491 | d40c->lcla.src_id = src_id; | ||
| 492 | d40c->lcla.dst_id = dst_id; | ||
| 493 | d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1; | ||
| 494 | d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1; | ||
| 495 | |||
| 496 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); | ||
| 497 | return 0; | ||
| 498 | err: | ||
| 499 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); | ||
| 500 | return -EINVAL; | ||
| 501 | } | 555 | } |
| 502 | 556 | ||
| 557 | /* Support functions for logical channels */ | ||
| 558 | |||
| 503 | 559 | ||
| 504 | static int d40_channel_execute_command(struct d40_chan *d40c, | 560 | static int d40_channel_execute_command(struct d40_chan *d40c, |
| 505 | enum d40_command command) | 561 | enum d40_command command) |
| 506 | { | 562 | { |
| 507 | int status, i; | 563 | u32 status; |
| 564 | int i; | ||
| 508 | void __iomem *active_reg; | 565 | void __iomem *active_reg; |
| 509 | int ret = 0; | 566 | int ret = 0; |
| 510 | unsigned long flags; | 567 | unsigned long flags; |
| @@ -567,35 +624,19 @@ done: | |||
| 567 | static void d40_term_all(struct d40_chan *d40c) | 624 | static void d40_term_all(struct d40_chan *d40c) |
| 568 | { | 625 | { |
| 569 | struct d40_desc *d40d; | 626 | struct d40_desc *d40d; |
| 570 | unsigned long flags; | ||
| 571 | 627 | ||
| 572 | /* Release active descriptors */ | 628 | /* Release active descriptors */ |
| 573 | while ((d40d = d40_first_active_get(d40c))) { | 629 | while ((d40d = d40_first_active_get(d40c))) { |
| 574 | d40_desc_remove(d40d); | 630 | d40_desc_remove(d40d); |
| 575 | |||
| 576 | /* Return desc to free-list */ | ||
| 577 | d40_desc_free(d40c, d40d); | 631 | d40_desc_free(d40c, d40d); |
| 578 | } | 632 | } |
| 579 | 633 | ||
| 580 | /* Release queued descriptors waiting for transfer */ | 634 | /* Release queued descriptors waiting for transfer */ |
| 581 | while ((d40d = d40_first_queued(d40c))) { | 635 | while ((d40d = d40_first_queued(d40c))) { |
| 582 | d40_desc_remove(d40d); | 636 | d40_desc_remove(d40d); |
| 583 | |||
| 584 | /* Return desc to free-list */ | ||
| 585 | d40_desc_free(d40c, d40d); | 637 | d40_desc_free(d40c, d40d); |
| 586 | } | 638 | } |
| 587 | 639 | ||
| 588 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); | ||
| 589 | |||
| 590 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &= | ||
| 591 | (~(0x1 << d40c->lcla.dst_id)); | ||
| 592 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &= | ||
| 593 | (~(0x1 << d40c->lcla.src_id)); | ||
| 594 | |||
| 595 | d40c->lcla.src_id = -1; | ||
| 596 | d40c->lcla.dst_id = -1; | ||
| 597 | |||
| 598 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); | ||
| 599 | 640 | ||
| 600 | d40c->pending_tx = 0; | 641 | d40c->pending_tx = 0; |
| 601 | d40c->busy = false; | 642 | d40c->busy = false; |
| @@ -640,45 +681,47 @@ static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) | |||
| 640 | 681 | ||
| 641 | static u32 d40_chan_has_events(struct d40_chan *d40c) | 682 | static u32 d40_chan_has_events(struct d40_chan *d40c) |
| 642 | { | 683 | { |
| 643 | u32 val = 0; | 684 | u32 val; |
| 644 | 685 | ||
| 645 | /* If SSLNK or SDLNK is zero all events are disabled */ | 686 | val = readl(d40c->base->virtbase + D40_DREG_PCBASE + |
| 646 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || | 687 | d40c->phy_chan->num * D40_DREG_PCDELTA + |
| 647 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) | 688 | D40_CHAN_REG_SSLNK); |
| 648 | val = readl(d40c->base->virtbase + D40_DREG_PCBASE + | 689 | |
| 649 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 690 | val |= readl(d40c->base->virtbase + D40_DREG_PCBASE + |
| 650 | D40_CHAN_REG_SSLNK); | 691 | d40c->phy_chan->num * D40_DREG_PCDELTA + |
| 651 | 692 | D40_CHAN_REG_SDLNK); | |
| 652 | if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) | ||
| 653 | val = readl(d40c->base->virtbase + D40_DREG_PCBASE + | ||
| 654 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
| 655 | D40_CHAN_REG_SDLNK); | ||
| 656 | return val; | 693 | return val; |
| 657 | } | 694 | } |
| 658 | 695 | ||
| 659 | static void d40_config_enable_lidx(struct d40_chan *d40c) | 696 | static u32 d40_get_prmo(struct d40_chan *d40c) |
| 660 | { | 697 | { |
| 661 | /* Set LIDX for lcla */ | 698 | static const unsigned int phy_map[] = { |
| 662 | writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & | 699 | [STEDMA40_PCHAN_BASIC_MODE] |
| 663 | D40_SREG_ELEM_LOG_LIDX_MASK, | 700 | = D40_DREG_PRMO_PCHAN_BASIC, |
| 664 | d40c->base->virtbase + D40_DREG_PCBASE + | 701 | [STEDMA40_PCHAN_MODULO_MODE] |
| 665 | d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT); | 702 | = D40_DREG_PRMO_PCHAN_MODULO, |
| 666 | 703 | [STEDMA40_PCHAN_DOUBLE_DST_MODE] | |
| 667 | writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & | 704 | = D40_DREG_PRMO_PCHAN_DOUBLE_DST, |
| 668 | D40_SREG_ELEM_LOG_LIDX_MASK, | 705 | }; |
| 669 | d40c->base->virtbase + D40_DREG_PCBASE + | 706 | static const unsigned int log_map[] = { |
| 670 | d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT); | 707 | [STEDMA40_LCHAN_SRC_PHY_DST_LOG] |
| 708 | = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG, | ||
| 709 | [STEDMA40_LCHAN_SRC_LOG_DST_PHY] | ||
| 710 | = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY, | ||
| 711 | [STEDMA40_LCHAN_SRC_LOG_DST_LOG] | ||
| 712 | = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG, | ||
| 713 | }; | ||
| 714 | |||
| 715 | if (d40c->log_num == D40_PHY_CHAN) | ||
| 716 | return phy_map[d40c->dma_cfg.mode_opt]; | ||
| 717 | else | ||
| 718 | return log_map[d40c->dma_cfg.mode_opt]; | ||
| 671 | } | 719 | } |
| 672 | 720 | ||
| 673 | static int d40_config_write(struct d40_chan *d40c) | 721 | static void d40_config_write(struct d40_chan *d40c) |
| 674 | { | 722 | { |
| 675 | u32 addr_base; | 723 | u32 addr_base; |
| 676 | u32 var; | 724 | u32 var; |
| 677 | int res; | ||
| 678 | |||
| 679 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | ||
| 680 | if (res) | ||
| 681 | return res; | ||
| 682 | 725 | ||
| 683 | /* Odd addresses are even addresses + 4 */ | 726 | /* Odd addresses are even addresses + 4 */ |
| 684 | addr_base = (d40c->phy_chan->num % 2) * 4; | 727 | addr_base = (d40c->phy_chan->num % 2) * 4; |
| @@ -688,8 +731,7 @@ static int d40_config_write(struct d40_chan *d40c) | |||
| 688 | writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); | 731 | writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); |
| 689 | 732 | ||
| 690 | /* Setup operational mode option register */ | 733 | /* Setup operational mode option register */ |
| 691 | var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) & | 734 | var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num); |
| 692 | 0x3) << D40_CHAN_POS(d40c->phy_chan->num); | ||
| 693 | 735 | ||
| 694 | writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); | 736 | writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); |
| 695 | 737 | ||
| @@ -704,41 +746,181 @@ static int d40_config_write(struct d40_chan *d40c) | |||
| 704 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 746 | d40c->phy_chan->num * D40_DREG_PCDELTA + |
| 705 | D40_CHAN_REG_SDCFG); | 747 | D40_CHAN_REG_SDCFG); |
| 706 | 748 | ||
| 707 | d40_config_enable_lidx(d40c); | 749 | /* Set LIDX for lcla */ |
| 750 | writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & | ||
| 751 | D40_SREG_ELEM_LOG_LIDX_MASK, | ||
| 752 | d40c->base->virtbase + D40_DREG_PCBASE + | ||
| 753 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
| 754 | D40_CHAN_REG_SDELT); | ||
| 755 | |||
| 756 | writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & | ||
| 757 | D40_SREG_ELEM_LOG_LIDX_MASK, | ||
| 758 | d40c->base->virtbase + D40_DREG_PCBASE + | ||
| 759 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
| 760 | D40_CHAN_REG_SSELT); | ||
| 761 | |||
| 762 | } | ||
| 763 | } | ||
| 764 | |||
| 765 | static u32 d40_residue(struct d40_chan *d40c) | ||
| 766 | { | ||
| 767 | u32 num_elt; | ||
| 768 | |||
| 769 | if (d40c->log_num != D40_PHY_CHAN) | ||
| 770 | num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) | ||
| 771 | >> D40_MEM_LCSP2_ECNT_POS; | ||
| 772 | else | ||
| 773 | num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + | ||
| 774 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
| 775 | D40_CHAN_REG_SDELT) & | ||
| 776 | D40_SREG_ELEM_PHY_ECNT_MASK) >> | ||
| 777 | D40_SREG_ELEM_PHY_ECNT_POS; | ||
| 778 | return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); | ||
| 779 | } | ||
| 780 | |||
| 781 | static bool d40_tx_is_linked(struct d40_chan *d40c) | ||
| 782 | { | ||
| 783 | bool is_link; | ||
| 784 | |||
| 785 | if (d40c->log_num != D40_PHY_CHAN) | ||
| 786 | is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; | ||
| 787 | else | ||
| 788 | is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE + | ||
| 789 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
| 790 | D40_CHAN_REG_SDLNK) & | ||
| 791 | D40_SREG_LNK_PHYS_LNK_MASK; | ||
| 792 | return is_link; | ||
| 793 | } | ||
| 794 | |||
| 795 | static int d40_pause(struct dma_chan *chan) | ||
| 796 | { | ||
| 797 | struct d40_chan *d40c = | ||
| 798 | container_of(chan, struct d40_chan, chan); | ||
| 799 | int res = 0; | ||
| 800 | unsigned long flags; | ||
| 801 | |||
| 802 | if (!d40c->busy) | ||
| 803 | return 0; | ||
| 804 | |||
| 805 | spin_lock_irqsave(&d40c->lock, flags); | ||
| 806 | |||
| 807 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | ||
| 808 | if (res == 0) { | ||
| 809 | if (d40c->log_num != D40_PHY_CHAN) { | ||
| 810 | d40_config_set_event(d40c, false); | ||
| 811 | /* Resume the other logical channels if any */ | ||
| 812 | if (d40_chan_has_events(d40c)) | ||
| 813 | res = d40_channel_execute_command(d40c, | ||
| 814 | D40_DMA_RUN); | ||
| 815 | } | ||
| 708 | } | 816 | } |
| 817 | |||
| 818 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
| 709 | return res; | 819 | return res; |
| 710 | } | 820 | } |
| 711 | 821 | ||
| 712 | static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) | 822 | static int d40_resume(struct dma_chan *chan) |
| 713 | { | 823 | { |
| 714 | if (d40d->lli_phy.dst && d40d->lli_phy.src) { | 824 | struct d40_chan *d40c = |
| 715 | d40_phy_lli_write(d40c->base->virtbase, | 825 | container_of(chan, struct d40_chan, chan); |
| 716 | d40c->phy_chan->num, | 826 | int res = 0; |
| 717 | d40d->lli_phy.dst, | 827 | unsigned long flags; |
| 718 | d40d->lli_phy.src); | 828 | |
| 719 | } else if (d40d->lli_log.dst && d40d->lli_log.src) { | 829 | if (!d40c->busy) |
| 720 | struct d40_log_lli *src = d40d->lli_log.src; | 830 | return 0; |
| 721 | struct d40_log_lli *dst = d40d->lli_log.dst; | 831 | |
| 722 | int s; | 832 | spin_lock_irqsave(&d40c->lock, flags); |
| 723 | 833 | ||
| 724 | src += d40d->lli_count; | 834 | if (d40c->base->rev == 0) |
| 725 | dst += d40d->lli_count; | 835 | if (d40c->log_num != D40_PHY_CHAN) { |
| 726 | s = d40_log_lli_write(d40c->lcpa, | 836 | res = d40_channel_execute_command(d40c, |
| 727 | d40c->lcla.src, d40c->lcla.dst, | 837 | D40_DMA_SUSPEND_REQ); |
| 728 | dst, src, | 838 | goto no_suspend; |
| 729 | d40c->base->plat_data->llis_per_log); | ||
| 730 | |||
| 731 | /* If s equals to zero, the job is not linked */ | ||
| 732 | if (s > 0) { | ||
| 733 | (void) dma_map_single(d40c->base->dev, d40c->lcla.src, | ||
| 734 | s * sizeof(struct d40_log_lli), | ||
| 735 | DMA_TO_DEVICE); | ||
| 736 | (void) dma_map_single(d40c->base->dev, d40c->lcla.dst, | ||
| 737 | s * sizeof(struct d40_log_lli), | ||
| 738 | DMA_TO_DEVICE); | ||
| 739 | } | 839 | } |
| 840 | |||
| 841 | /* If bytes left to transfer or linked tx resume job */ | ||
| 842 | if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { | ||
| 843 | |||
| 844 | if (d40c->log_num != D40_PHY_CHAN) | ||
| 845 | d40_config_set_event(d40c, true); | ||
| 846 | |||
| 847 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); | ||
| 848 | } | ||
| 849 | |||
| 850 | no_suspend: | ||
| 851 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
| 852 | return res; | ||
| 853 | } | ||
| 854 | |||
| 855 | static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d) | ||
| 856 | { | ||
| 857 | /* TODO: Write */ | ||
| 858 | } | ||
| 859 | |||
| 860 | static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d) | ||
| 861 | { | ||
| 862 | struct d40_desc *d40d_prev = NULL; | ||
| 863 | int i; | ||
| 864 | u32 val; | ||
| 865 | |||
| 866 | if (!list_empty(&d40c->queue)) | ||
| 867 | d40d_prev = d40_last_queued(d40c); | ||
| 868 | else if (!list_empty(&d40c->active)) | ||
| 869 | d40d_prev = d40_first_active_get(d40c); | ||
| 870 | |||
| 871 | if (!d40d_prev) | ||
| 872 | return; | ||
| 873 | |||
| 874 | /* Here we try to join this job with previous jobs */ | ||
| 875 | val = readl(d40c->base->virtbase + D40_DREG_PCBASE + | ||
| 876 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
| 877 | D40_CHAN_REG_SSLNK); | ||
| 878 | |||
| 879 | /* Figure out which link we're currently transmitting */ | ||
| 880 | for (i = 0; i < d40d_prev->lli_len; i++) | ||
| 881 | if (val == d40d_prev->lli_phy.src[i].reg_lnk) | ||
| 882 | break; | ||
| 883 | |||
| 884 | val = readl(d40c->base->virtbase + D40_DREG_PCBASE + | ||
| 885 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
| 886 | D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS; | ||
| 887 | |||
| 888 | if (i == (d40d_prev->lli_len - 1) && val > 0) { | ||
| 889 | /* Change the current one */ | ||
| 890 | writel(virt_to_phys(d40d->lli_phy.src), | ||
| 891 | d40c->base->virtbase + D40_DREG_PCBASE + | ||
| 892 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
| 893 | D40_CHAN_REG_SSLNK); | ||
| 894 | writel(virt_to_phys(d40d->lli_phy.dst), | ||
| 895 | d40c->base->virtbase + D40_DREG_PCBASE + | ||
| 896 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
| 897 | D40_CHAN_REG_SDLNK); | ||
| 898 | |||
| 899 | d40d->is_hw_linked = true; | ||
| 900 | |||
| 901 | } else if (i < d40d_prev->lli_len) { | ||
| 902 | (void) dma_unmap_single(d40c->base->dev, | ||
| 903 | virt_to_phys(d40d_prev->lli_phy.src), | ||
| 904 | d40d_prev->lli_pool.size, | ||
| 905 | DMA_TO_DEVICE); | ||
| 906 | |||
| 907 | /* Keep the settings */ | ||
| 908 | val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk & | ||
| 909 | ~D40_SREG_LNK_PHYS_LNK_MASK; | ||
| 910 | d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk = | ||
| 911 | val | virt_to_phys(d40d->lli_phy.src); | ||
| 912 | |||
| 913 | val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk & | ||
| 914 | ~D40_SREG_LNK_PHYS_LNK_MASK; | ||
| 915 | d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk = | ||
| 916 | val | virt_to_phys(d40d->lli_phy.dst); | ||
| 917 | |||
| 918 | (void) dma_map_single(d40c->base->dev, | ||
| 919 | d40d_prev->lli_phy.src, | ||
| 920 | d40d_prev->lli_pool.size, | ||
| 921 | DMA_TO_DEVICE); | ||
| 922 | d40d->is_hw_linked = true; | ||
| 740 | } | 923 | } |
| 741 | d40d->lli_count += d40d->lli_tx_len; | ||
| 742 | } | 924 | } |
| 743 | 925 | ||
| 744 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | 926 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) |
| @@ -749,14 +931,28 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | |||
| 749 | struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); | 931 | struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); |
| 750 | unsigned long flags; | 932 | unsigned long flags; |
| 751 | 933 | ||
| 934 | (void) d40_pause(&d40c->chan); | ||
| 935 | |||
| 752 | spin_lock_irqsave(&d40c->lock, flags); | 936 | spin_lock_irqsave(&d40c->lock, flags); |
| 753 | 937 | ||
| 754 | tx->cookie = d40_assign_cookie(d40c, d40d); | 938 | d40c->chan.cookie++; |
| 939 | |||
| 940 | if (d40c->chan.cookie < 0) | ||
| 941 | d40c->chan.cookie = 1; | ||
| 942 | |||
| 943 | d40d->txd.cookie = d40c->chan.cookie; | ||
| 944 | |||
| 945 | if (d40c->log_num == D40_PHY_CHAN) | ||
| 946 | d40_tx_submit_phy(d40c, d40d); | ||
| 947 | else | ||
| 948 | d40_tx_submit_log(d40c, d40d); | ||
| 755 | 949 | ||
| 756 | d40_desc_queue(d40c, d40d); | 950 | d40_desc_queue(d40c, d40d); |
| 757 | 951 | ||
| 758 | spin_unlock_irqrestore(&d40c->lock, flags); | 952 | spin_unlock_irqrestore(&d40c->lock, flags); |
| 759 | 953 | ||
| 954 | (void) d40_resume(&d40c->chan); | ||
| 955 | |||
| 760 | return tx->cookie; | 956 | return tx->cookie; |
| 761 | } | 957 | } |
| 762 | 958 | ||
| @@ -796,14 +992,21 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c) | |||
| 796 | /* Add to active queue */ | 992 | /* Add to active queue */ |
| 797 | d40_desc_submit(d40c, d40d); | 993 | d40_desc_submit(d40c, d40d); |
| 798 | 994 | ||
| 799 | /* Initiate DMA job */ | 995 | /* |
| 800 | d40_desc_load(d40c, d40d); | 996 | * If this job is already linked in hw, |
| 997 | * do not submit it. | ||
| 998 | */ | ||
| 801 | 999 | ||
| 802 | /* Start dma job */ | 1000 | if (!d40d->is_hw_linked) { |
| 803 | err = d40_start(d40c); | 1001 | /* Initiate DMA job */ |
| 1002 | d40_desc_load(d40c, d40d); | ||
| 804 | 1003 | ||
| 805 | if (err) | 1004 | /* Start dma job */ |
| 806 | return NULL; | 1005 | err = d40_start(d40c); |
| 1006 | |||
| 1007 | if (err) | ||
| 1008 | return NULL; | ||
| 1009 | } | ||
| 807 | } | 1010 | } |
| 808 | 1011 | ||
| 809 | return d40d; | 1012 | return d40d; |
| @@ -814,17 +1017,15 @@ static void dma_tc_handle(struct d40_chan *d40c) | |||
| 814 | { | 1017 | { |
| 815 | struct d40_desc *d40d; | 1018 | struct d40_desc *d40d; |
| 816 | 1019 | ||
| 817 | if (!d40c->phy_chan) | ||
| 818 | return; | ||
| 819 | |||
| 820 | /* Get first active entry from list */ | 1020 | /* Get first active entry from list */ |
| 821 | d40d = d40_first_active_get(d40c); | 1021 | d40d = d40_first_active_get(d40c); |
| 822 | 1022 | ||
| 823 | if (d40d == NULL) | 1023 | if (d40d == NULL) |
| 824 | return; | 1024 | return; |
| 825 | 1025 | ||
| 826 | if (d40d->lli_count < d40d->lli_len) { | 1026 | d40_lcla_free_all(d40c, d40d); |
| 827 | 1027 | ||
| 1028 | if (d40d->lli_current < d40d->lli_len) { | ||
| 828 | d40_desc_load(d40c, d40d); | 1029 | d40_desc_load(d40c, d40d); |
| 829 | /* Start dma job */ | 1030 | /* Start dma job */ |
| 830 | (void) d40_start(d40c); | 1031 | (void) d40_start(d40c); |
| @@ -842,7 +1043,7 @@ static void dma_tc_handle(struct d40_chan *d40c) | |||
| 842 | static void dma_tasklet(unsigned long data) | 1043 | static void dma_tasklet(unsigned long data) |
| 843 | { | 1044 | { |
| 844 | struct d40_chan *d40c = (struct d40_chan *) data; | 1045 | struct d40_chan *d40c = (struct d40_chan *) data; |
| 845 | struct d40_desc *d40d_fin; | 1046 | struct d40_desc *d40d; |
| 846 | unsigned long flags; | 1047 | unsigned long flags; |
| 847 | dma_async_tx_callback callback; | 1048 | dma_async_tx_callback callback; |
| 848 | void *callback_param; | 1049 | void *callback_param; |
| @@ -850,12 +1051,12 @@ static void dma_tasklet(unsigned long data) | |||
| 850 | spin_lock_irqsave(&d40c->lock, flags); | 1051 | spin_lock_irqsave(&d40c->lock, flags); |
| 851 | 1052 | ||
| 852 | /* Get first active entry from list */ | 1053 | /* Get first active entry from list */ |
| 853 | d40d_fin = d40_first_active_get(d40c); | 1054 | d40d = d40_first_active_get(d40c); |
| 854 | 1055 | ||
| 855 | if (d40d_fin == NULL) | 1056 | if (d40d == NULL) |
| 856 | goto err; | 1057 | goto err; |
| 857 | 1058 | ||
| 858 | d40c->completed = d40d_fin->txd.cookie; | 1059 | d40c->completed = d40d->txd.cookie; |
| 859 | 1060 | ||
| 860 | /* | 1061 | /* |
| 861 | * If terminating a channel pending_tx is set to zero. | 1062 | * If terminating a channel pending_tx is set to zero. |
| @@ -867,19 +1068,19 @@ static void dma_tasklet(unsigned long data) | |||
| 867 | } | 1068 | } |
| 868 | 1069 | ||
| 869 | /* Callback to client */ | 1070 | /* Callback to client */ |
| 870 | callback = d40d_fin->txd.callback; | 1071 | callback = d40d->txd.callback; |
| 871 | callback_param = d40d_fin->txd.callback_param; | 1072 | callback_param = d40d->txd.callback_param; |
| 872 | 1073 | ||
| 873 | if (async_tx_test_ack(&d40d_fin->txd)) { | 1074 | if (async_tx_test_ack(&d40d->txd)) { |
| 874 | d40_pool_lli_free(d40d_fin); | 1075 | d40_pool_lli_free(d40d); |
| 875 | d40_desc_remove(d40d_fin); | 1076 | d40_desc_remove(d40d); |
| 876 | /* Return desc to free-list */ | 1077 | d40_desc_free(d40c, d40d); |
| 877 | d40_desc_free(d40c, d40d_fin); | ||
| 878 | } else { | 1078 | } else { |
| 879 | if (!d40d_fin->is_in_client_list) { | 1079 | if (!d40d->is_in_client_list) { |
| 880 | d40_desc_remove(d40d_fin); | 1080 | d40_desc_remove(d40d); |
| 881 | list_add_tail(&d40d_fin->node, &d40c->client); | 1081 | d40_lcla_free_all(d40c, d40d); |
| 882 | d40d_fin->is_in_client_list = true; | 1082 | list_add_tail(&d40d->node, &d40c->client); |
| 1083 | d40d->is_in_client_list = true; | ||
| 883 | } | 1084 | } |
| 884 | } | 1085 | } |
| 885 | 1086 | ||
| @@ -890,7 +1091,7 @@ static void dma_tasklet(unsigned long data) | |||
| 890 | 1091 | ||
| 891 | spin_unlock_irqrestore(&d40c->lock, flags); | 1092 | spin_unlock_irqrestore(&d40c->lock, flags); |
| 892 | 1093 | ||
| 893 | if (callback) | 1094 | if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT)) |
| 894 | callback(callback_param); | 1095 | callback(callback_param); |
| 895 | 1096 | ||
| 896 | return; | 1097 | return; |
| @@ -919,7 +1120,6 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data) | |||
| 919 | 1120 | ||
| 920 | int i; | 1121 | int i; |
| 921 | u32 regs[ARRAY_SIZE(il)]; | 1122 | u32 regs[ARRAY_SIZE(il)]; |
| 922 | u32 tmp; | ||
| 923 | u32 idx; | 1123 | u32 idx; |
| 924 | u32 row; | 1124 | u32 row; |
| 925 | long chan = -1; | 1125 | long chan = -1; |
| @@ -946,9 +1146,7 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data) | |||
| 946 | idx = chan & (BITS_PER_LONG - 1); | 1146 | idx = chan & (BITS_PER_LONG - 1); |
| 947 | 1147 | ||
| 948 | /* ACK interrupt */ | 1148 | /* ACK interrupt */ |
| 949 | tmp = readl(base->virtbase + il[row].clr); | 1149 | writel(1 << idx, base->virtbase + il[row].clr); |
| 950 | tmp |= 1 << idx; | ||
| 951 | writel(tmp, base->virtbase + il[row].clr); | ||
| 952 | 1150 | ||
| 953 | if (il[row].offset == D40_PHY_CHAN) | 1151 | if (il[row].offset == D40_PHY_CHAN) |
| 954 | d40c = base->lookup_phy_chans[idx]; | 1152 | d40c = base->lookup_phy_chans[idx]; |
| @@ -971,24 +1169,47 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data) | |||
| 971 | return IRQ_HANDLED; | 1169 | return IRQ_HANDLED; |
| 972 | } | 1170 | } |
| 973 | 1171 | ||
| 974 | |||
| 975 | static int d40_validate_conf(struct d40_chan *d40c, | 1172 | static int d40_validate_conf(struct d40_chan *d40c, |
| 976 | struct stedma40_chan_cfg *conf) | 1173 | struct stedma40_chan_cfg *conf) |
| 977 | { | 1174 | { |
| 978 | int res = 0; | 1175 | int res = 0; |
| 979 | u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type); | 1176 | u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type); |
| 980 | u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type); | 1177 | u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type); |
| 981 | bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE) | 1178 | bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; |
| 982 | == STEDMA40_CHANNEL_IN_LOG_MODE; | 1179 | |
| 1180 | if (!conf->dir) { | ||
| 1181 | dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n", | ||
| 1182 | __func__); | ||
| 1183 | res = -EINVAL; | ||
| 1184 | } | ||
| 1185 | |||
| 1186 | if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY && | ||
| 1187 | d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 && | ||
| 1188 | d40c->runtime_addr == 0) { | ||
| 1189 | |||
| 1190 | dev_err(&d40c->chan.dev->device, | ||
| 1191 | "[%s] Invalid TX channel address (%d)\n", | ||
| 1192 | __func__, conf->dst_dev_type); | ||
| 1193 | res = -EINVAL; | ||
| 1194 | } | ||
| 1195 | |||
| 1196 | if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY && | ||
| 1197 | d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 && | ||
| 1198 | d40c->runtime_addr == 0) { | ||
| 1199 | dev_err(&d40c->chan.dev->device, | ||
| 1200 | "[%s] Invalid RX channel address (%d)\n", | ||
| 1201 | __func__, conf->src_dev_type); | ||
| 1202 | res = -EINVAL; | ||
| 1203 | } | ||
| 983 | 1204 | ||
| 984 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH && | 1205 | if (conf->dir == STEDMA40_MEM_TO_PERIPH && |
| 985 | dst_event_group == STEDMA40_DEV_DST_MEMORY) { | 1206 | dst_event_group == STEDMA40_DEV_DST_MEMORY) { |
| 986 | dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n", | 1207 | dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n", |
| 987 | __func__); | 1208 | __func__); |
| 988 | res = -EINVAL; | 1209 | res = -EINVAL; |
| 989 | } | 1210 | } |
| 990 | 1211 | ||
| 991 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM && | 1212 | if (conf->dir == STEDMA40_PERIPH_TO_MEM && |
| 992 | src_event_group == STEDMA40_DEV_SRC_MEMORY) { | 1213 | src_event_group == STEDMA40_DEV_SRC_MEMORY) { |
| 993 | dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n", | 1214 | dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n", |
| 994 | __func__); | 1215 | __func__); |
| @@ -1082,7 +1303,6 @@ static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src, | |||
| 1082 | 1303 | ||
| 1083 | spin_lock_irqsave(&phy->lock, flags); | 1304 | spin_lock_irqsave(&phy->lock, flags); |
| 1084 | if (!log_event_line) { | 1305 | if (!log_event_line) { |
| 1085 | /* Physical interrupts are masked per physical full channel */ | ||
| 1086 | phy->allocated_dst = D40_ALLOC_FREE; | 1306 | phy->allocated_dst = D40_ALLOC_FREE; |
| 1087 | phy->allocated_src = D40_ALLOC_FREE; | 1307 | phy->allocated_src = D40_ALLOC_FREE; |
| 1088 | is_free = true; | 1308 | is_free = true; |
| @@ -1119,10 +1339,7 @@ static int d40_allocate_channel(struct d40_chan *d40c) | |||
| 1119 | int j; | 1339 | int j; |
| 1120 | int log_num; | 1340 | int log_num; |
| 1121 | bool is_src; | 1341 | bool is_src; |
| 1122 | bool is_log = (d40c->dma_cfg.channel_type & | 1342 | bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL; |
| 1123 | STEDMA40_CHANNEL_IN_OPER_MODE) | ||
| 1124 | == STEDMA40_CHANNEL_IN_LOG_MODE; | ||
| 1125 | |||
| 1126 | 1343 | ||
| 1127 | phys = d40c->base->phy_res; | 1344 | phys = d40c->base->phy_res; |
| 1128 | 1345 | ||
| @@ -1251,7 +1468,6 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
| 1251 | list_for_each_entry_safe(d, _d, &d40c->client, node) { | 1468 | list_for_each_entry_safe(d, _d, &d40c->client, node) { |
| 1252 | d40_pool_lli_free(d); | 1469 | d40_pool_lli_free(d); |
| 1253 | d40_desc_remove(d); | 1470 | d40_desc_remove(d); |
| 1254 | /* Return desc to free-list */ | ||
| 1255 | d40_desc_free(d40c, d); | 1471 | d40_desc_free(d40c, d); |
| 1256 | } | 1472 | } |
| 1257 | 1473 | ||
| @@ -1324,37 +1540,12 @@ static int d40_free_dma(struct d40_chan *d40c) | |||
| 1324 | return res; | 1540 | return res; |
| 1325 | } | 1541 | } |
| 1326 | d40c->phy_chan = NULL; | 1542 | d40c->phy_chan = NULL; |
| 1327 | /* Invalidate channel type */ | 1543 | d40c->configured = false; |
| 1328 | d40c->dma_cfg.channel_type = 0; | ||
| 1329 | d40c->base->lookup_phy_chans[phy->num] = NULL; | 1544 | d40c->base->lookup_phy_chans[phy->num] = NULL; |
| 1330 | 1545 | ||
| 1331 | return 0; | 1546 | return 0; |
| 1332 | } | 1547 | } |
| 1333 | 1548 | ||
| 1334 | static int d40_pause(struct dma_chan *chan) | ||
| 1335 | { | ||
| 1336 | struct d40_chan *d40c = | ||
| 1337 | container_of(chan, struct d40_chan, chan); | ||
| 1338 | int res; | ||
| 1339 | unsigned long flags; | ||
| 1340 | |||
| 1341 | spin_lock_irqsave(&d40c->lock, flags); | ||
| 1342 | |||
| 1343 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | ||
| 1344 | if (res == 0) { | ||
| 1345 | if (d40c->log_num != D40_PHY_CHAN) { | ||
| 1346 | d40_config_set_event(d40c, false); | ||
| 1347 | /* Resume the other logical channels if any */ | ||
| 1348 | if (d40_chan_has_events(d40c)) | ||
| 1349 | res = d40_channel_execute_command(d40c, | ||
| 1350 | D40_DMA_RUN); | ||
| 1351 | } | ||
| 1352 | } | ||
| 1353 | |||
| 1354 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
| 1355 | return res; | ||
| 1356 | } | ||
| 1357 | |||
| 1358 | static bool d40_is_paused(struct d40_chan *d40c) | 1549 | static bool d40_is_paused(struct d40_chan *d40c) |
| 1359 | { | 1550 | { |
| 1360 | bool is_paused = false; | 1551 | bool is_paused = false; |
| @@ -1381,16 +1572,22 @@ static bool d40_is_paused(struct d40_chan *d40c) | |||
| 1381 | } | 1572 | } |
| 1382 | 1573 | ||
| 1383 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || | 1574 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || |
| 1384 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) | 1575 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { |
| 1385 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | 1576 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); |
| 1386 | else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) | 1577 | status = readl(d40c->base->virtbase + D40_DREG_PCBASE + |
| 1578 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
| 1579 | D40_CHAN_REG_SDLNK); | ||
| 1580 | } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { | ||
| 1387 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | 1581 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); |
| 1388 | else { | 1582 | status = readl(d40c->base->virtbase + D40_DREG_PCBASE + |
| 1583 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
| 1584 | D40_CHAN_REG_SSLNK); | ||
| 1585 | } else { | ||
| 1389 | dev_err(&d40c->chan.dev->device, | 1586 | dev_err(&d40c->chan.dev->device, |
| 1390 | "[%s] Unknown direction\n", __func__); | 1587 | "[%s] Unknown direction\n", __func__); |
| 1391 | goto _exit; | 1588 | goto _exit; |
| 1392 | } | 1589 | } |
| 1393 | status = d40_chan_has_events(d40c); | 1590 | |
| 1394 | status = (status & D40_EVENTLINE_MASK(event)) >> | 1591 | status = (status & D40_EVENTLINE_MASK(event)) >> |
| 1395 | D40_EVENTLINE_POS(event); | 1592 | D40_EVENTLINE_POS(event); |
| 1396 | 1593 | ||
| @@ -1403,64 +1600,6 @@ _exit: | |||
| 1403 | } | 1600 | } |
| 1404 | 1601 | ||
| 1405 | 1602 | ||
| 1406 | static bool d40_tx_is_linked(struct d40_chan *d40c) | ||
| 1407 | { | ||
| 1408 | bool is_link; | ||
| 1409 | |||
| 1410 | if (d40c->log_num != D40_PHY_CHAN) | ||
| 1411 | is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; | ||
| 1412 | else | ||
| 1413 | is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE + | ||
| 1414 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
| 1415 | D40_CHAN_REG_SDLNK) & | ||
| 1416 | D40_SREG_LNK_PHYS_LNK_MASK; | ||
| 1417 | return is_link; | ||
| 1418 | } | ||
| 1419 | |||
| 1420 | static u32 d40_residue(struct d40_chan *d40c) | ||
| 1421 | { | ||
| 1422 | u32 num_elt; | ||
| 1423 | |||
| 1424 | if (d40c->log_num != D40_PHY_CHAN) | ||
| 1425 | num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) | ||
| 1426 | >> D40_MEM_LCSP2_ECNT_POS; | ||
| 1427 | else | ||
| 1428 | num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + | ||
| 1429 | d40c->phy_chan->num * D40_DREG_PCDELTA + | ||
| 1430 | D40_CHAN_REG_SDELT) & | ||
| 1431 | D40_SREG_ELEM_PHY_ECNT_MASK) >> | ||
| 1432 | D40_SREG_ELEM_PHY_ECNT_POS; | ||
| 1433 | return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); | ||
| 1434 | } | ||
| 1435 | |||
| 1436 | static int d40_resume(struct dma_chan *chan) | ||
| 1437 | { | ||
| 1438 | struct d40_chan *d40c = | ||
| 1439 | container_of(chan, struct d40_chan, chan); | ||
| 1440 | int res = 0; | ||
| 1441 | unsigned long flags; | ||
| 1442 | |||
| 1443 | spin_lock_irqsave(&d40c->lock, flags); | ||
| 1444 | |||
| 1445 | if (d40c->base->rev == 0) | ||
| 1446 | if (d40c->log_num != D40_PHY_CHAN) { | ||
| 1447 | res = d40_channel_execute_command(d40c, | ||
| 1448 | D40_DMA_SUSPEND_REQ); | ||
| 1449 | goto no_suspend; | ||
| 1450 | } | ||
| 1451 | |||
| 1452 | /* If bytes left to transfer or linked tx resume job */ | ||
| 1453 | if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { | ||
| 1454 | if (d40c->log_num != D40_PHY_CHAN) | ||
| 1455 | d40_config_set_event(d40c, true); | ||
| 1456 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); | ||
| 1457 | } | ||
| 1458 | |||
| 1459 | no_suspend: | ||
| 1460 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
| 1461 | return res; | ||
| 1462 | } | ||
| 1463 | |||
| 1464 | static u32 stedma40_residue(struct dma_chan *chan) | 1603 | static u32 stedma40_residue(struct dma_chan *chan) |
| 1465 | { | 1604 | { |
| 1466 | struct d40_chan *d40c = | 1605 | struct d40_chan *d40c = |
| @@ -1475,51 +1614,6 @@ static u32 stedma40_residue(struct dma_chan *chan) | |||
| 1475 | return bytes_left; | 1614 | return bytes_left; |
| 1476 | } | 1615 | } |
| 1477 | 1616 | ||
| 1478 | /* Public DMA functions in addition to the DMA engine framework */ | ||
| 1479 | |||
| 1480 | int stedma40_set_psize(struct dma_chan *chan, | ||
| 1481 | int src_psize, | ||
| 1482 | int dst_psize) | ||
| 1483 | { | ||
| 1484 | struct d40_chan *d40c = | ||
| 1485 | container_of(chan, struct d40_chan, chan); | ||
| 1486 | unsigned long flags; | ||
| 1487 | |||
| 1488 | spin_lock_irqsave(&d40c->lock, flags); | ||
| 1489 | |||
| 1490 | if (d40c->log_num != D40_PHY_CHAN) { | ||
| 1491 | d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; | ||
| 1492 | d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; | ||
| 1493 | d40c->log_def.lcsp1 |= src_psize << | ||
| 1494 | D40_MEM_LCSP1_SCFG_PSIZE_POS; | ||
| 1495 | d40c->log_def.lcsp3 |= dst_psize << | ||
| 1496 | D40_MEM_LCSP1_SCFG_PSIZE_POS; | ||
| 1497 | goto out; | ||
| 1498 | } | ||
| 1499 | |||
| 1500 | if (src_psize == STEDMA40_PSIZE_PHY_1) | ||
| 1501 | d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS); | ||
| 1502 | else { | ||
| 1503 | d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS; | ||
| 1504 | d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 << | ||
| 1505 | D40_SREG_CFG_PSIZE_POS); | ||
| 1506 | d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS; | ||
| 1507 | } | ||
| 1508 | |||
| 1509 | if (dst_psize == STEDMA40_PSIZE_PHY_1) | ||
| 1510 | d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS); | ||
| 1511 | else { | ||
| 1512 | d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS; | ||
| 1513 | d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 << | ||
| 1514 | D40_SREG_CFG_PSIZE_POS); | ||
| 1515 | d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS; | ||
| 1516 | } | ||
| 1517 | out: | ||
| 1518 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
| 1519 | return 0; | ||
| 1520 | } | ||
| 1521 | EXPORT_SYMBOL(stedma40_set_psize); | ||
| 1522 | |||
| 1523 | struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, | 1617 | struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, |
| 1524 | struct scatterlist *sgl_dst, | 1618 | struct scatterlist *sgl_dst, |
| 1525 | struct scatterlist *sgl_src, | 1619 | struct scatterlist *sgl_src, |
| @@ -1545,21 +1639,10 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, | |||
| 1545 | goto err; | 1639 | goto err; |
| 1546 | 1640 | ||
| 1547 | d40d->lli_len = sgl_len; | 1641 | d40d->lli_len = sgl_len; |
| 1548 | d40d->lli_tx_len = d40d->lli_len; | 1642 | d40d->lli_current = 0; |
| 1549 | d40d->txd.flags = dma_flags; | 1643 | d40d->txd.flags = dma_flags; |
| 1550 | 1644 | ||
| 1551 | if (d40c->log_num != D40_PHY_CHAN) { | 1645 | if (d40c->log_num != D40_PHY_CHAN) { |
| 1552 | if (d40d->lli_len > d40c->base->plat_data->llis_per_log) | ||
| 1553 | d40d->lli_tx_len = d40c->base->plat_data->llis_per_log; | ||
| 1554 | |||
| 1555 | if (sgl_len > 1) | ||
| 1556 | /* | ||
| 1557 | * Check if there is space available in lcla. If not, | ||
| 1558 | * split list into 1-length and run only in lcpa | ||
| 1559 | * space. | ||
| 1560 | */ | ||
| 1561 | if (d40_lcla_id_get(d40c) != 0) | ||
| 1562 | d40d->lli_tx_len = 1; | ||
| 1563 | 1646 | ||
| 1564 | if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { | 1647 | if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { |
| 1565 | dev_err(&d40c->chan.dev->device, | 1648 | dev_err(&d40c->chan.dev->device, |
| @@ -1567,27 +1650,17 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, | |||
| 1567 | goto err; | 1650 | goto err; |
| 1568 | } | 1651 | } |
| 1569 | 1652 | ||
| 1570 | (void) d40_log_sg_to_lli(d40c->lcla.src_id, | 1653 | (void) d40_log_sg_to_lli(sgl_src, |
| 1571 | sgl_src, | ||
| 1572 | sgl_len, | 1654 | sgl_len, |
| 1573 | d40d->lli_log.src, | 1655 | d40d->lli_log.src, |
| 1574 | d40c->log_def.lcsp1, | 1656 | d40c->log_def.lcsp1, |
| 1575 | d40c->dma_cfg.src_info.data_width, | 1657 | d40c->dma_cfg.src_info.data_width); |
| 1576 | dma_flags & DMA_PREP_INTERRUPT, | ||
| 1577 | d40d->lli_tx_len, | ||
| 1578 | d40c->base->plat_data->llis_per_log); | ||
| 1579 | 1658 | ||
| 1580 | (void) d40_log_sg_to_lli(d40c->lcla.dst_id, | 1659 | (void) d40_log_sg_to_lli(sgl_dst, |
| 1581 | sgl_dst, | ||
| 1582 | sgl_len, | 1660 | sgl_len, |
| 1583 | d40d->lli_log.dst, | 1661 | d40d->lli_log.dst, |
| 1584 | d40c->log_def.lcsp3, | 1662 | d40c->log_def.lcsp3, |
| 1585 | d40c->dma_cfg.dst_info.data_width, | 1663 | d40c->dma_cfg.dst_info.data_width); |
| 1586 | dma_flags & DMA_PREP_INTERRUPT, | ||
| 1587 | d40d->lli_tx_len, | ||
| 1588 | d40c->base->plat_data->llis_per_log); | ||
| 1589 | |||
| 1590 | |||
| 1591 | } else { | 1664 | } else { |
| 1592 | if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { | 1665 | if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { |
| 1593 | dev_err(&d40c->chan.dev->device, | 1666 | dev_err(&d40c->chan.dev->device, |
| @@ -1599,11 +1672,10 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, | |||
| 1599 | sgl_len, | 1672 | sgl_len, |
| 1600 | 0, | 1673 | 0, |
| 1601 | d40d->lli_phy.src, | 1674 | d40d->lli_phy.src, |
| 1602 | d40d->lli_phy.src_addr, | 1675 | virt_to_phys(d40d->lli_phy.src), |
| 1603 | d40c->src_def_cfg, | 1676 | d40c->src_def_cfg, |
| 1604 | d40c->dma_cfg.src_info.data_width, | 1677 | d40c->dma_cfg.src_info.data_width, |
| 1605 | d40c->dma_cfg.src_info.psize, | 1678 | d40c->dma_cfg.src_info.psize); |
| 1606 | true); | ||
| 1607 | 1679 | ||
| 1608 | if (res < 0) | 1680 | if (res < 0) |
| 1609 | goto err; | 1681 | goto err; |
| @@ -1612,11 +1684,10 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, | |||
| 1612 | sgl_len, | 1684 | sgl_len, |
| 1613 | 0, | 1685 | 0, |
| 1614 | d40d->lli_phy.dst, | 1686 | d40d->lli_phy.dst, |
| 1615 | d40d->lli_phy.dst_addr, | 1687 | virt_to_phys(d40d->lli_phy.dst), |
| 1616 | d40c->dst_def_cfg, | 1688 | d40c->dst_def_cfg, |
| 1617 | d40c->dma_cfg.dst_info.data_width, | 1689 | d40c->dma_cfg.dst_info.data_width, |
| 1618 | d40c->dma_cfg.dst_info.psize, | 1690 | d40c->dma_cfg.dst_info.psize); |
| 1619 | true); | ||
| 1620 | 1691 | ||
| 1621 | if (res < 0) | 1692 | if (res < 0) |
| 1622 | goto err; | 1693 | goto err; |
| @@ -1633,6 +1704,8 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, | |||
| 1633 | 1704 | ||
| 1634 | return &d40d->txd; | 1705 | return &d40d->txd; |
| 1635 | err: | 1706 | err: |
| 1707 | if (d40d) | ||
| 1708 | d40_desc_free(d40c, d40d); | ||
| 1636 | spin_unlock_irqrestore(&d40c->lock, flags); | 1709 | spin_unlock_irqrestore(&d40c->lock, flags); |
| 1637 | return NULL; | 1710 | return NULL; |
| 1638 | } | 1711 | } |
| @@ -1652,6 +1725,9 @@ bool stedma40_filter(struct dma_chan *chan, void *data) | |||
| 1652 | } else | 1725 | } else |
| 1653 | err = d40_config_memcpy(d40c); | 1726 | err = d40_config_memcpy(d40c); |
| 1654 | 1727 | ||
| 1728 | if (!err) | ||
| 1729 | d40c->configured = true; | ||
| 1730 | |||
| 1655 | return err == 0; | 1731 | return err == 0; |
| 1656 | } | 1732 | } |
| 1657 | EXPORT_SYMBOL(stedma40_filter); | 1733 | EXPORT_SYMBOL(stedma40_filter); |
| @@ -1668,11 +1744,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
| 1668 | 1744 | ||
| 1669 | d40c->completed = chan->cookie = 1; | 1745 | d40c->completed = chan->cookie = 1; |
| 1670 | 1746 | ||
| 1671 | /* | 1747 | /* If no dma configuration is set use default configuration (memcpy) */ |
| 1672 | * If no dma configuration is set (channel_type == 0) | 1748 | if (!d40c->configured) { |
| 1673 | * use default configuration (memcpy) | ||
| 1674 | */ | ||
| 1675 | if (d40c->dma_cfg.channel_type == 0) { | ||
| 1676 | err = d40_config_memcpy(d40c); | 1749 | err = d40_config_memcpy(d40c); |
| 1677 | if (err) { | 1750 | if (err) { |
| 1678 | dev_err(&d40c->chan.dev->device, | 1751 | dev_err(&d40c->chan.dev->device, |
| @@ -1712,14 +1785,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) | |||
| 1712 | * resource is free. In case of multiple logical channels | 1785 | * resource is free. In case of multiple logical channels |
| 1713 | * on the same physical resource, only the first write is necessary. | 1786 | * on the same physical resource, only the first write is necessary. |
| 1714 | */ | 1787 | */ |
| 1715 | if (is_free_phy) { | 1788 | if (is_free_phy) |
| 1716 | err = d40_config_write(d40c); | 1789 | d40_config_write(d40c); |
| 1717 | if (err) { | ||
| 1718 | dev_err(&d40c->chan.dev->device, | ||
| 1719 | "[%s] Failed to configure channel\n", | ||
| 1720 | __func__); | ||
| 1721 | } | ||
| 1722 | } | ||
| 1723 | fail: | 1790 | fail: |
| 1724 | spin_unlock_irqrestore(&d40c->lock, flags); | 1791 | spin_unlock_irqrestore(&d40c->lock, flags); |
| 1725 | return err; | 1792 | return err; |
| @@ -1790,23 +1857,21 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | |||
| 1790 | goto err; | 1857 | goto err; |
| 1791 | } | 1858 | } |
| 1792 | d40d->lli_len = 1; | 1859 | d40d->lli_len = 1; |
| 1793 | d40d->lli_tx_len = 1; | 1860 | d40d->lli_current = 0; |
| 1794 | 1861 | ||
| 1795 | d40_log_fill_lli(d40d->lli_log.src, | 1862 | d40_log_fill_lli(d40d->lli_log.src, |
| 1796 | src, | 1863 | src, |
| 1797 | size, | 1864 | size, |
| 1798 | 0, | ||
| 1799 | d40c->log_def.lcsp1, | 1865 | d40c->log_def.lcsp1, |
| 1800 | d40c->dma_cfg.src_info.data_width, | 1866 | d40c->dma_cfg.src_info.data_width, |
| 1801 | false, true); | 1867 | true); |
| 1802 | 1868 | ||
| 1803 | d40_log_fill_lli(d40d->lli_log.dst, | 1869 | d40_log_fill_lli(d40d->lli_log.dst, |
| 1804 | dst, | 1870 | dst, |
| 1805 | size, | 1871 | size, |
| 1806 | 0, | ||
| 1807 | d40c->log_def.lcsp3, | 1872 | d40c->log_def.lcsp3, |
| 1808 | d40c->dma_cfg.dst_info.data_width, | 1873 | d40c->dma_cfg.dst_info.data_width, |
| 1809 | true, true); | 1874 | true); |
| 1810 | 1875 | ||
| 1811 | } else { | 1876 | } else { |
| 1812 | 1877 | ||
| @@ -1851,12 +1916,25 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | |||
| 1851 | err_fill_lli: | 1916 | err_fill_lli: |
| 1852 | dev_err(&d40c->chan.dev->device, | 1917 | dev_err(&d40c->chan.dev->device, |
| 1853 | "[%s] Failed filling in PHY LLI\n", __func__); | 1918 | "[%s] Failed filling in PHY LLI\n", __func__); |
| 1854 | d40_pool_lli_free(d40d); | ||
| 1855 | err: | 1919 | err: |
| 1920 | if (d40d) | ||
| 1921 | d40_desc_free(d40c, d40d); | ||
| 1856 | spin_unlock_irqrestore(&d40c->lock, flags); | 1922 | spin_unlock_irqrestore(&d40c->lock, flags); |
| 1857 | return NULL; | 1923 | return NULL; |
| 1858 | } | 1924 | } |
| 1859 | 1925 | ||
| 1926 | static struct dma_async_tx_descriptor * | ||
| 1927 | d40_prep_sg(struct dma_chan *chan, | ||
| 1928 | struct scatterlist *dst_sg, unsigned int dst_nents, | ||
| 1929 | struct scatterlist *src_sg, unsigned int src_nents, | ||
| 1930 | unsigned long dma_flags) | ||
| 1931 | { | ||
| 1932 | if (dst_nents != src_nents) | ||
| 1933 | return NULL; | ||
| 1934 | |||
| 1935 | return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags); | ||
| 1936 | } | ||
| 1937 | |||
| 1860 | static int d40_prep_slave_sg_log(struct d40_desc *d40d, | 1938 | static int d40_prep_slave_sg_log(struct d40_desc *d40d, |
| 1861 | struct d40_chan *d40c, | 1939 | struct d40_chan *d40c, |
| 1862 | struct scatterlist *sgl, | 1940 | struct scatterlist *sgl, |
| @@ -1874,19 +1952,7 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, | |||
| 1874 | } | 1952 | } |
| 1875 | 1953 | ||
| 1876 | d40d->lli_len = sg_len; | 1954 | d40d->lli_len = sg_len; |
| 1877 | if (d40d->lli_len <= d40c->base->plat_data->llis_per_log) | 1955 | d40d->lli_current = 0; |
| 1878 | d40d->lli_tx_len = d40d->lli_len; | ||
| 1879 | else | ||
| 1880 | d40d->lli_tx_len = d40c->base->plat_data->llis_per_log; | ||
| 1881 | |||
| 1882 | if (sg_len > 1) | ||
| 1883 | /* | ||
| 1884 | * Check if there is space available in lcla. | ||
| 1885 | * If not, split list into 1-length and run only | ||
| 1886 | * in lcpa space. | ||
| 1887 | */ | ||
| 1888 | if (d40_lcla_id_get(d40c) != 0) | ||
| 1889 | d40d->lli_tx_len = 1; | ||
| 1890 | 1956 | ||
| 1891 | if (direction == DMA_FROM_DEVICE) | 1957 | if (direction == DMA_FROM_DEVICE) |
| 1892 | if (d40c->runtime_addr) | 1958 | if (d40c->runtime_addr) |
| @@ -1902,16 +1968,13 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, | |||
| 1902 | else | 1968 | else |
| 1903 | return -EINVAL; | 1969 | return -EINVAL; |
| 1904 | 1970 | ||
| 1905 | total_size = d40_log_sg_to_dev(&d40c->lcla, | 1971 | total_size = d40_log_sg_to_dev(sgl, sg_len, |
| 1906 | sgl, sg_len, | ||
| 1907 | &d40d->lli_log, | 1972 | &d40d->lli_log, |
| 1908 | &d40c->log_def, | 1973 | &d40c->log_def, |
| 1909 | d40c->dma_cfg.src_info.data_width, | 1974 | d40c->dma_cfg.src_info.data_width, |
| 1910 | d40c->dma_cfg.dst_info.data_width, | 1975 | d40c->dma_cfg.dst_info.data_width, |
| 1911 | direction, | 1976 | direction, |
| 1912 | dma_flags & DMA_PREP_INTERRUPT, | 1977 | dev_addr); |
| 1913 | dev_addr, d40d->lli_tx_len, | ||
| 1914 | d40c->base->plat_data->llis_per_log); | ||
| 1915 | 1978 | ||
| 1916 | if (total_size < 0) | 1979 | if (total_size < 0) |
| 1917 | return -EINVAL; | 1980 | return -EINVAL; |
| @@ -1937,7 +2000,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, | |||
| 1937 | } | 2000 | } |
| 1938 | 2001 | ||
| 1939 | d40d->lli_len = sgl_len; | 2002 | d40d->lli_len = sgl_len; |
| 1940 | d40d->lli_tx_len = sgl_len; | 2003 | d40d->lli_current = 0; |
| 1941 | 2004 | ||
| 1942 | if (direction == DMA_FROM_DEVICE) { | 2005 | if (direction == DMA_FROM_DEVICE) { |
| 1943 | dst_dev_addr = 0; | 2006 | dst_dev_addr = 0; |
| @@ -1958,11 +2021,10 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, | |||
| 1958 | sgl_len, | 2021 | sgl_len, |
| 1959 | src_dev_addr, | 2022 | src_dev_addr, |
| 1960 | d40d->lli_phy.src, | 2023 | d40d->lli_phy.src, |
| 1961 | d40d->lli_phy.src_addr, | 2024 | virt_to_phys(d40d->lli_phy.src), |
| 1962 | d40c->src_def_cfg, | 2025 | d40c->src_def_cfg, |
| 1963 | d40c->dma_cfg.src_info.data_width, | 2026 | d40c->dma_cfg.src_info.data_width, |
| 1964 | d40c->dma_cfg.src_info.psize, | 2027 | d40c->dma_cfg.src_info.psize); |
| 1965 | true); | ||
| 1966 | if (res < 0) | 2028 | if (res < 0) |
| 1967 | return res; | 2029 | return res; |
| 1968 | 2030 | ||
| @@ -1970,11 +2032,10 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, | |||
| 1970 | sgl_len, | 2032 | sgl_len, |
| 1971 | dst_dev_addr, | 2033 | dst_dev_addr, |
| 1972 | d40d->lli_phy.dst, | 2034 | d40d->lli_phy.dst, |
| 1973 | d40d->lli_phy.dst_addr, | 2035 | virt_to_phys(d40d->lli_phy.dst), |
| 1974 | d40c->dst_def_cfg, | 2036 | d40c->dst_def_cfg, |
| 1975 | d40c->dma_cfg.dst_info.data_width, | 2037 | d40c->dma_cfg.dst_info.data_width, |
| 1976 | d40c->dma_cfg.dst_info.psize, | 2038 | d40c->dma_cfg.dst_info.psize); |
| 1977 | true); | ||
| 1978 | if (res < 0) | 2039 | if (res < 0) |
| 1979 | return res; | 2040 | return res; |
| 1980 | 2041 | ||
| @@ -2001,17 +2062,11 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | |||
| 2001 | return ERR_PTR(-EINVAL); | 2062 | return ERR_PTR(-EINVAL); |
| 2002 | } | 2063 | } |
| 2003 | 2064 | ||
| 2004 | if (d40c->dma_cfg.pre_transfer) | ||
| 2005 | d40c->dma_cfg.pre_transfer(chan, | ||
| 2006 | d40c->dma_cfg.pre_transfer_data, | ||
| 2007 | sg_dma_len(sgl)); | ||
| 2008 | |||
| 2009 | spin_lock_irqsave(&d40c->lock, flags); | 2065 | spin_lock_irqsave(&d40c->lock, flags); |
| 2010 | d40d = d40_desc_get(d40c); | 2066 | d40d = d40_desc_get(d40c); |
| 2011 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
| 2012 | 2067 | ||
| 2013 | if (d40d == NULL) | 2068 | if (d40d == NULL) |
| 2014 | return NULL; | 2069 | goto err; |
| 2015 | 2070 | ||
| 2016 | if (d40c->log_num != D40_PHY_CHAN) | 2071 | if (d40c->log_num != D40_PHY_CHAN) |
| 2017 | err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, | 2072 | err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, |
| @@ -2024,7 +2079,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | |||
| 2024 | "[%s] Failed to prepare %s slave sg job: %d\n", | 2079 | "[%s] Failed to prepare %s slave sg job: %d\n", |
| 2025 | __func__, | 2080 | __func__, |
| 2026 | d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err); | 2081 | d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err); |
| 2027 | return NULL; | 2082 | goto err; |
| 2028 | } | 2083 | } |
| 2029 | 2084 | ||
| 2030 | d40d->txd.flags = dma_flags; | 2085 | d40d->txd.flags = dma_flags; |
| @@ -2033,7 +2088,14 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | |||
| 2033 | 2088 | ||
| 2034 | d40d->txd.tx_submit = d40_tx_submit; | 2089 | d40d->txd.tx_submit = d40_tx_submit; |
| 2035 | 2090 | ||
| 2091 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
| 2036 | return &d40d->txd; | 2092 | return &d40d->txd; |
| 2093 | |||
| 2094 | err: | ||
| 2095 | if (d40d) | ||
| 2096 | d40_desc_free(d40c, d40d); | ||
| 2097 | spin_unlock_irqrestore(&d40c->lock, flags); | ||
| 2098 | return NULL; | ||
| 2037 | } | 2099 | } |
| 2038 | 2100 | ||
| 2039 | static enum dma_status d40_tx_status(struct dma_chan *chan, | 2101 | static enum dma_status d40_tx_status(struct dma_chan *chan, |
| @@ -2166,25 +2228,43 @@ static void d40_set_runtime_config(struct dma_chan *chan, | |||
| 2166 | return; | 2228 | return; |
| 2167 | } | 2229 | } |
| 2168 | 2230 | ||
| 2169 | if (config_maxburst >= 16) | 2231 | if (d40c->log_num != D40_PHY_CHAN) { |
| 2170 | psize = STEDMA40_PSIZE_LOG_16; | 2232 | if (config_maxburst >= 16) |
| 2171 | else if (config_maxburst >= 8) | 2233 | psize = STEDMA40_PSIZE_LOG_16; |
| 2172 | psize = STEDMA40_PSIZE_LOG_8; | 2234 | else if (config_maxburst >= 8) |
| 2173 | else if (config_maxburst >= 4) | 2235 | psize = STEDMA40_PSIZE_LOG_8; |
| 2174 | psize = STEDMA40_PSIZE_LOG_4; | 2236 | else if (config_maxburst >= 4) |
| 2175 | else | 2237 | psize = STEDMA40_PSIZE_LOG_4; |
| 2176 | psize = STEDMA40_PSIZE_LOG_1; | 2238 | else |
| 2239 | psize = STEDMA40_PSIZE_LOG_1; | ||
| 2240 | } else { | ||
| 2241 | if (config_maxburst >= 16) | ||
| 2242 | psize = STEDMA40_PSIZE_PHY_16; | ||
| 2243 | else if (config_maxburst >= 8) | ||
| 2244 | psize = STEDMA40_PSIZE_PHY_8; | ||
| 2245 | else if (config_maxburst >= 4) | ||
| 2246 | psize = STEDMA40_PSIZE_PHY_4; | ||
| 2247 | else | ||
| 2248 | psize = STEDMA40_PSIZE_PHY_1; | ||
| 2249 | } | ||
| 2177 | 2250 | ||
| 2178 | /* Set up all the endpoint configs */ | 2251 | /* Set up all the endpoint configs */ |
| 2179 | cfg->src_info.data_width = addr_width; | 2252 | cfg->src_info.data_width = addr_width; |
| 2180 | cfg->src_info.psize = psize; | 2253 | cfg->src_info.psize = psize; |
| 2181 | cfg->src_info.endianess = STEDMA40_LITTLE_ENDIAN; | 2254 | cfg->src_info.big_endian = false; |
| 2182 | cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; | 2255 | cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; |
| 2183 | cfg->dst_info.data_width = addr_width; | 2256 | cfg->dst_info.data_width = addr_width; |
| 2184 | cfg->dst_info.psize = psize; | 2257 | cfg->dst_info.psize = psize; |
| 2185 | cfg->dst_info.endianess = STEDMA40_LITTLE_ENDIAN; | 2258 | cfg->dst_info.big_endian = false; |
| 2186 | cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; | 2259 | cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; |
| 2187 | 2260 | ||
| 2261 | /* Fill in register values */ | ||
| 2262 | if (d40c->log_num != D40_PHY_CHAN) | ||
| 2263 | d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); | ||
| 2264 | else | ||
| 2265 | d40_phy_cfg(cfg, &d40c->src_def_cfg, | ||
| 2266 | &d40c->dst_def_cfg, false); | ||
| 2267 | |||
| 2188 | /* These settings will take precedence later */ | 2268 | /* These settings will take precedence later */ |
| 2189 | d40c->runtime_addr = config_addr; | 2269 | d40c->runtime_addr = config_addr; |
| 2190 | d40c->runtime_direction = config->direction; | 2270 | d40c->runtime_direction = config->direction; |
| @@ -2247,10 +2327,6 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, | |||
| 2247 | d40c->base = base; | 2327 | d40c->base = base; |
| 2248 | d40c->chan.device = dma; | 2328 | d40c->chan.device = dma; |
| 2249 | 2329 | ||
| 2250 | /* Invalidate lcla element */ | ||
| 2251 | d40c->lcla.src_id = -1; | ||
| 2252 | d40c->lcla.dst_id = -1; | ||
| 2253 | |||
| 2254 | spin_lock_init(&d40c->lock); | 2330 | spin_lock_init(&d40c->lock); |
| 2255 | 2331 | ||
| 2256 | d40c->log_num = D40_PHY_CHAN; | 2332 | d40c->log_num = D40_PHY_CHAN; |
| @@ -2281,6 +2357,7 @@ static int __init d40_dmaengine_init(struct d40_base *base, | |||
| 2281 | base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources; | 2357 | base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources; |
| 2282 | base->dma_slave.device_free_chan_resources = d40_free_chan_resources; | 2358 | base->dma_slave.device_free_chan_resources = d40_free_chan_resources; |
| 2283 | base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy; | 2359 | base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy; |
| 2360 | base->dma_slave.device_prep_dma_sg = d40_prep_sg; | ||
| 2284 | base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg; | 2361 | base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg; |
| 2285 | base->dma_slave.device_tx_status = d40_tx_status; | 2362 | base->dma_slave.device_tx_status = d40_tx_status; |
| 2286 | base->dma_slave.device_issue_pending = d40_issue_pending; | 2363 | base->dma_slave.device_issue_pending = d40_issue_pending; |
| @@ -2301,10 +2378,12 @@ static int __init d40_dmaengine_init(struct d40_base *base, | |||
| 2301 | 2378 | ||
| 2302 | dma_cap_zero(base->dma_memcpy.cap_mask); | 2379 | dma_cap_zero(base->dma_memcpy.cap_mask); |
| 2303 | dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); | 2380 | dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); |
| 2381 | dma_cap_set(DMA_SG, base->dma_slave.cap_mask); | ||
| 2304 | 2382 | ||
| 2305 | base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources; | 2383 | base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources; |
| 2306 | base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources; | 2384 | base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources; |
| 2307 | base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy; | 2385 | base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy; |
| 2386 | base->dma_slave.device_prep_dma_sg = d40_prep_sg; | ||
| 2308 | base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg; | 2387 | base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg; |
| 2309 | base->dma_memcpy.device_tx_status = d40_tx_status; | 2388 | base->dma_memcpy.device_tx_status = d40_tx_status; |
| 2310 | base->dma_memcpy.device_issue_pending = d40_issue_pending; | 2389 | base->dma_memcpy.device_issue_pending = d40_issue_pending; |
| @@ -2331,10 +2410,12 @@ static int __init d40_dmaengine_init(struct d40_base *base, | |||
| 2331 | dma_cap_zero(base->dma_both.cap_mask); | 2410 | dma_cap_zero(base->dma_both.cap_mask); |
| 2332 | dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); | 2411 | dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); |
| 2333 | dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); | 2412 | dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); |
| 2413 | dma_cap_set(DMA_SG, base->dma_slave.cap_mask); | ||
| 2334 | 2414 | ||
| 2335 | base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources; | 2415 | base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources; |
| 2336 | base->dma_both.device_free_chan_resources = d40_free_chan_resources; | 2416 | base->dma_both.device_free_chan_resources = d40_free_chan_resources; |
| 2337 | base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy; | 2417 | base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy; |
| 2418 | base->dma_slave.device_prep_dma_sg = d40_prep_sg; | ||
| 2338 | base->dma_both.device_prep_slave_sg = d40_prep_slave_sg; | 2419 | base->dma_both.device_prep_slave_sg = d40_prep_slave_sg; |
| 2339 | base->dma_both.device_tx_status = d40_tx_status; | 2420 | base->dma_both.device_tx_status = d40_tx_status; |
| 2340 | base->dma_both.device_issue_pending = d40_issue_pending; | 2421 | base->dma_both.device_issue_pending = d40_issue_pending; |
| @@ -2387,9 +2468,11 @@ static int __init d40_phy_res_init(struct d40_base *base) | |||
| 2387 | 2468 | ||
| 2388 | /* Mark disabled channels as occupied */ | 2469 | /* Mark disabled channels as occupied */ |
| 2389 | for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) { | 2470 | for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) { |
| 2390 | base->phy_res[i].allocated_src = D40_ALLOC_PHY; | 2471 | int chan = base->plat_data->disabled_channels[i]; |
| 2391 | base->phy_res[i].allocated_dst = D40_ALLOC_PHY; | 2472 | |
| 2392 | num_phy_chans_avail--; | 2473 | base->phy_res[chan].allocated_src = D40_ALLOC_PHY; |
| 2474 | base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; | ||
| 2475 | num_phy_chans_avail--; | ||
| 2393 | } | 2476 | } |
| 2394 | 2477 | ||
| 2395 | dev_info(base->dev, "%d of %d physical DMA channels available\n", | 2478 | dev_info(base->dev, "%d of %d physical DMA channels available\n", |
| @@ -2441,6 +2524,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
| 2441 | int num_phy_chans; | 2524 | int num_phy_chans; |
| 2442 | int i; | 2525 | int i; |
| 2443 | u32 val; | 2526 | u32 val; |
| 2527 | u32 rev; | ||
| 2444 | 2528 | ||
| 2445 | clk = clk_get(&pdev->dev, NULL); | 2529 | clk = clk_get(&pdev->dev, NULL); |
| 2446 | 2530 | ||
| @@ -2479,21 +2563,26 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
| 2479 | } | 2563 | } |
| 2480 | } | 2564 | } |
| 2481 | 2565 | ||
| 2482 | /* Get silicon revision */ | 2566 | /* Get silicon revision and designer */ |
| 2483 | val = readl(virtbase + D40_DREG_PERIPHID2); | 2567 | val = readl(virtbase + D40_DREG_PERIPHID2); |
| 2484 | 2568 | ||
| 2485 | if ((val & 0xf) != D40_PERIPHID2_DESIGNER) { | 2569 | if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) != |
| 2570 | D40_HW_DESIGNER) { | ||
| 2486 | dev_err(&pdev->dev, | 2571 | dev_err(&pdev->dev, |
| 2487 | "[%s] Unknown designer! Got %x wanted %x\n", | 2572 | "[%s] Unknown designer! Got %x wanted %x\n", |
| 2488 | __func__, val & 0xf, D40_PERIPHID2_DESIGNER); | 2573 | __func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK, |
| 2574 | D40_HW_DESIGNER); | ||
| 2489 | goto failure; | 2575 | goto failure; |
| 2490 | } | 2576 | } |
| 2491 | 2577 | ||
| 2578 | rev = (val & D40_DREG_PERIPHID2_REV_MASK) >> | ||
| 2579 | D40_DREG_PERIPHID2_REV_POS; | ||
| 2580 | |||
| 2492 | /* The number of physical channels on this HW */ | 2581 | /* The number of physical channels on this HW */ |
| 2493 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; | 2582 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; |
| 2494 | 2583 | ||
| 2495 | dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", | 2584 | dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", |
| 2496 | (val >> 4) & 0xf, res->start); | 2585 | rev, res->start); |
| 2497 | 2586 | ||
| 2498 | plat_data = pdev->dev.platform_data; | 2587 | plat_data = pdev->dev.platform_data; |
| 2499 | 2588 | ||
| @@ -2515,7 +2604,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
| 2515 | goto failure; | 2604 | goto failure; |
| 2516 | } | 2605 | } |
| 2517 | 2606 | ||
| 2518 | base->rev = (val >> 4) & 0xf; | 2607 | base->rev = rev; |
| 2519 | base->clk = clk; | 2608 | base->clk = clk; |
| 2520 | base->num_phy_chans = num_phy_chans; | 2609 | base->num_phy_chans = num_phy_chans; |
| 2521 | base->num_log_chans = num_log_chans; | 2610 | base->num_log_chans = num_log_chans; |
| @@ -2549,7 +2638,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
| 2549 | if (!base->lookup_log_chans) | 2638 | if (!base->lookup_log_chans) |
| 2550 | goto failure; | 2639 | goto failure; |
| 2551 | } | 2640 | } |
| 2552 | base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32), | 2641 | |
| 2642 | base->lcla_pool.alloc_map = kzalloc(num_phy_chans * | ||
| 2643 | sizeof(struct d40_desc *) * | ||
| 2644 | D40_LCLA_LINK_PER_EVENT_GRP, | ||
| 2553 | GFP_KERNEL); | 2645 | GFP_KERNEL); |
| 2554 | if (!base->lcla_pool.alloc_map) | 2646 | if (!base->lcla_pool.alloc_map) |
| 2555 | goto failure; | 2647 | goto failure; |
| @@ -2563,7 +2655,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |||
| 2563 | return base; | 2655 | return base; |
| 2564 | 2656 | ||
| 2565 | failure: | 2657 | failure: |
| 2566 | if (clk) { | 2658 | if (!IS_ERR(clk)) { |
| 2567 | clk_disable(clk); | 2659 | clk_disable(clk); |
| 2568 | clk_put(clk); | 2660 | clk_put(clk); |
| 2569 | } | 2661 | } |
| @@ -2700,8 +2792,10 @@ static int __init d40_lcla_allocate(struct d40_base *base) | |||
| 2700 | if (i < MAX_LCLA_ALLOC_ATTEMPTS) { | 2792 | if (i < MAX_LCLA_ALLOC_ATTEMPTS) { |
| 2701 | base->lcla_pool.base = (void *)page_list[i]; | 2793 | base->lcla_pool.base = (void *)page_list[i]; |
| 2702 | } else { | 2794 | } else { |
| 2703 | /* After many attempts, no succees with finding the correct | 2795 | /* |
| 2704 | * alignment try with allocating a big buffer */ | 2796 | * After many attempts and no succees with finding the correct |
| 2797 | * alignment, try with allocating a big buffer. | ||
| 2798 | */ | ||
| 2705 | dev_warn(base->dev, | 2799 | dev_warn(base->dev, |
| 2706 | "[%s] Failed to get %d pages @ 18 bit align.\n", | 2800 | "[%s] Failed to get %d pages @ 18 bit align.\n", |
| 2707 | __func__, base->lcla_pool.pages); | 2801 | __func__, base->lcla_pool.pages); |
| @@ -2794,8 +2888,6 @@ static int __init d40_probe(struct platform_device *pdev) | |||
| 2794 | 2888 | ||
| 2795 | spin_lock_init(&base->lcla_pool.lock); | 2889 | spin_lock_init(&base->lcla_pool.lock); |
| 2796 | 2890 | ||
| 2797 | base->lcla_pool.num_blocks = base->num_phy_chans; | ||
| 2798 | |||
| 2799 | base->irq = platform_get_irq(pdev, 0); | 2891 | base->irq = platform_get_irq(pdev, 0); |
| 2800 | 2892 | ||
| 2801 | ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); | 2893 | ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); |
| @@ -2823,8 +2915,9 @@ failure: | |||
| 2823 | if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) | 2915 | if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) |
| 2824 | free_pages((unsigned long)base->lcla_pool.base, | 2916 | free_pages((unsigned long)base->lcla_pool.base, |
| 2825 | base->lcla_pool.pages); | 2917 | base->lcla_pool.pages); |
| 2826 | if (base->lcla_pool.base_unaligned) | 2918 | |
| 2827 | kfree(base->lcla_pool.base_unaligned); | 2919 | kfree(base->lcla_pool.base_unaligned); |
| 2920 | |||
| 2828 | if (base->phy_lcpa) | 2921 | if (base->phy_lcpa) |
| 2829 | release_mem_region(base->phy_lcpa, | 2922 | release_mem_region(base->phy_lcpa, |
| 2830 | base->lcpa_size); | 2923 | base->lcpa_size); |
diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index d937f76d6e2e..8557cb88b255 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c | |||
| @@ -1,10 +1,8 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * driver/dma/ste_dma40_ll.c | 2 | * Copyright (C) ST-Ericsson SA 2007-2010 |
| 3 | * | 3 | * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson |
| 4 | * Copyright (C) ST-Ericsson 2007-2010 | 4 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson |
| 5 | * License terms: GNU General Public License (GPL) version 2 | 5 | * License terms: GNU General Public License (GPL) version 2 |
| 6 | * Author: Per Friden <per.friden@stericsson.com> | ||
| 7 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> | ||
| 8 | */ | 6 | */ |
| 9 | 7 | ||
| 10 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
| @@ -39,16 +37,13 @@ void d40_log_cfg(struct stedma40_chan_cfg *cfg, | |||
| 39 | cfg->dir == STEDMA40_PERIPH_TO_PERIPH) | 37 | cfg->dir == STEDMA40_PERIPH_TO_PERIPH) |
| 40 | l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS; | 38 | l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS; |
| 41 | 39 | ||
| 42 | l3 |= 1 << D40_MEM_LCSP3_DCFG_TIM_POS; | ||
| 43 | l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS; | 40 | l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS; |
| 44 | l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS; | 41 | l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS; |
| 45 | l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS; | 42 | l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS; |
| 46 | l3 |= 1 << D40_MEM_LCSP3_DTCP_POS; | ||
| 47 | 43 | ||
| 48 | l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS; | 44 | l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS; |
| 49 | l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; | 45 | l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; |
| 50 | l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS; | 46 | l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS; |
| 51 | l1 |= 1 << D40_MEM_LCSP1_STCP_POS; | ||
| 52 | 47 | ||
| 53 | *lcsp1 = l1; | 48 | *lcsp1 = l1; |
| 54 | *lcsp3 = l3; | 49 | *lcsp3 = l3; |
| @@ -113,13 +108,15 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg, | |||
| 113 | src |= 1 << D40_SREG_CFG_LOG_GIM_POS; | 108 | src |= 1 << D40_SREG_CFG_LOG_GIM_POS; |
| 114 | } | 109 | } |
| 115 | 110 | ||
| 116 | if (cfg->channel_type & STEDMA40_HIGH_PRIORITY_CHANNEL) { | 111 | if (cfg->high_priority) { |
| 117 | src |= 1 << D40_SREG_CFG_PRI_POS; | 112 | src |= 1 << D40_SREG_CFG_PRI_POS; |
| 118 | dst |= 1 << D40_SREG_CFG_PRI_POS; | 113 | dst |= 1 << D40_SREG_CFG_PRI_POS; |
| 119 | } | 114 | } |
| 120 | 115 | ||
| 121 | src |= cfg->src_info.endianess << D40_SREG_CFG_LBE_POS; | 116 | if (cfg->src_info.big_endian) |
| 122 | dst |= cfg->dst_info.endianess << D40_SREG_CFG_LBE_POS; | 117 | src |= 1 << D40_SREG_CFG_LBE_POS; |
| 118 | if (cfg->dst_info.big_endian) | ||
| 119 | dst |= 1 << D40_SREG_CFG_LBE_POS; | ||
| 123 | 120 | ||
| 124 | *src_cfg = src; | 121 | *src_cfg = src; |
| 125 | *dst_cfg = dst; | 122 | *dst_cfg = dst; |
| @@ -197,8 +194,7 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, | |||
| 197 | dma_addr_t lli_phys, | 194 | dma_addr_t lli_phys, |
| 198 | u32 reg_cfg, | 195 | u32 reg_cfg, |
| 199 | u32 data_width, | 196 | u32 data_width, |
| 200 | int psize, | 197 | int psize) |
| 201 | bool term_int) | ||
| 202 | { | 198 | { |
| 203 | int total_size = 0; | 199 | int total_size = 0; |
| 204 | int i; | 200 | int i; |
| @@ -238,7 +234,7 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, | |||
| 238 | } | 234 | } |
| 239 | 235 | ||
| 240 | return total_size; | 236 | return total_size; |
| 241 | err: | 237 | err: |
| 242 | return err; | 238 | return err; |
| 243 | } | 239 | } |
| 244 | 240 | ||
| @@ -271,11 +267,59 @@ void d40_phy_lli_write(void __iomem *virtbase, | |||
| 271 | 267 | ||
| 272 | /* DMA logical lli operations */ | 268 | /* DMA logical lli operations */ |
| 273 | 269 | ||
| 270 | static void d40_log_lli_link(struct d40_log_lli *lli_dst, | ||
| 271 | struct d40_log_lli *lli_src, | ||
| 272 | int next) | ||
| 273 | { | ||
| 274 | u32 slos = 0; | ||
| 275 | u32 dlos = 0; | ||
| 276 | |||
| 277 | if (next != -EINVAL) { | ||
| 278 | slos = next * 2; | ||
| 279 | dlos = next * 2 + 1; | ||
| 280 | } else { | ||
| 281 | lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK; | ||
| 282 | lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK; | ||
| 283 | } | ||
| 284 | |||
| 285 | lli_src->lcsp13 = (lli_src->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) | | ||
| 286 | (slos << D40_MEM_LCSP1_SLOS_POS); | ||
| 287 | |||
| 288 | lli_dst->lcsp13 = (lli_dst->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) | | ||
| 289 | (dlos << D40_MEM_LCSP1_SLOS_POS); | ||
| 290 | } | ||
| 291 | |||
| 292 | void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, | ||
| 293 | struct d40_log_lli *lli_dst, | ||
| 294 | struct d40_log_lli *lli_src, | ||
| 295 | int next) | ||
| 296 | { | ||
| 297 | d40_log_lli_link(lli_dst, lli_src, next); | ||
| 298 | |||
| 299 | writel(lli_src->lcsp02, &lcpa[0].lcsp0); | ||
| 300 | writel(lli_src->lcsp13, &lcpa[0].lcsp1); | ||
| 301 | writel(lli_dst->lcsp02, &lcpa[0].lcsp2); | ||
| 302 | writel(lli_dst->lcsp13, &lcpa[0].lcsp3); | ||
| 303 | } | ||
| 304 | |||
| 305 | void d40_log_lli_lcla_write(struct d40_log_lli *lcla, | ||
| 306 | struct d40_log_lli *lli_dst, | ||
| 307 | struct d40_log_lli *lli_src, | ||
| 308 | int next) | ||
| 309 | { | ||
| 310 | d40_log_lli_link(lli_dst, lli_src, next); | ||
| 311 | |||
| 312 | writel(lli_src->lcsp02, &lcla[0].lcsp02); | ||
| 313 | writel(lli_src->lcsp13, &lcla[0].lcsp13); | ||
| 314 | writel(lli_dst->lcsp02, &lcla[1].lcsp02); | ||
| 315 | writel(lli_dst->lcsp13, &lcla[1].lcsp13); | ||
| 316 | } | ||
| 317 | |||
| 274 | void d40_log_fill_lli(struct d40_log_lli *lli, | 318 | void d40_log_fill_lli(struct d40_log_lli *lli, |
| 275 | dma_addr_t data, u32 data_size, | 319 | dma_addr_t data, u32 data_size, |
| 276 | u32 lli_next_off, u32 reg_cfg, | 320 | u32 reg_cfg, |
| 277 | u32 data_width, | 321 | u32 data_width, |
| 278 | bool term_int, bool addr_inc) | 322 | bool addr_inc) |
| 279 | { | 323 | { |
| 280 | lli->lcsp13 = reg_cfg; | 324 | lli->lcsp13 = reg_cfg; |
| 281 | 325 | ||
| @@ -290,165 +334,69 @@ void d40_log_fill_lli(struct d40_log_lli *lli, | |||
| 290 | if (addr_inc) | 334 | if (addr_inc) |
| 291 | lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK; | 335 | lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK; |
| 292 | 336 | ||
| 293 | lli->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK; | ||
| 294 | /* If this scatter list entry is the last one, no next link */ | ||
| 295 | lli->lcsp13 |= (lli_next_off << D40_MEM_LCSP1_SLOS_POS) & | ||
| 296 | D40_MEM_LCSP1_SLOS_MASK; | ||
| 297 | |||
| 298 | if (term_int) | ||
| 299 | lli->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK; | ||
| 300 | else | ||
| 301 | lli->lcsp13 &= ~D40_MEM_LCSP1_SCFG_TIM_MASK; | ||
| 302 | } | 337 | } |
| 303 | 338 | ||
| 304 | int d40_log_sg_to_dev(struct d40_lcla_elem *lcla, | 339 | int d40_log_sg_to_dev(struct scatterlist *sg, |
| 305 | struct scatterlist *sg, | ||
| 306 | int sg_len, | 340 | int sg_len, |
| 307 | struct d40_log_lli_bidir *lli, | 341 | struct d40_log_lli_bidir *lli, |
| 308 | struct d40_def_lcsp *lcsp, | 342 | struct d40_def_lcsp *lcsp, |
| 309 | u32 src_data_width, | 343 | u32 src_data_width, |
| 310 | u32 dst_data_width, | 344 | u32 dst_data_width, |
| 311 | enum dma_data_direction direction, | 345 | enum dma_data_direction direction, |
| 312 | bool term_int, dma_addr_t dev_addr, int max_len, | 346 | dma_addr_t dev_addr) |
| 313 | int llis_per_log) | ||
| 314 | { | 347 | { |
| 315 | int total_size = 0; | 348 | int total_size = 0; |
| 316 | struct scatterlist *current_sg = sg; | 349 | struct scatterlist *current_sg = sg; |
| 317 | int i; | 350 | int i; |
| 318 | u32 next_lli_off_dst = 0; | ||
| 319 | u32 next_lli_off_src = 0; | ||
| 320 | 351 | ||
| 321 | for_each_sg(sg, current_sg, sg_len, i) { | 352 | for_each_sg(sg, current_sg, sg_len, i) { |
| 322 | total_size += sg_dma_len(current_sg); | 353 | total_size += sg_dma_len(current_sg); |
| 323 | 354 | ||
| 324 | /* | ||
| 325 | * If this scatter list entry is the last one or | ||
| 326 | * max length, terminate link. | ||
| 327 | */ | ||
| 328 | if (sg_len - 1 == i || ((i+1) % max_len == 0)) { | ||
| 329 | next_lli_off_src = 0; | ||
| 330 | next_lli_off_dst = 0; | ||
| 331 | } else { | ||
| 332 | if (next_lli_off_dst == 0 && | ||
| 333 | next_lli_off_src == 0) { | ||
| 334 | /* The first lli will be at next_lli_off */ | ||
| 335 | next_lli_off_dst = (lcla->dst_id * | ||
| 336 | llis_per_log + 1); | ||
| 337 | next_lli_off_src = (lcla->src_id * | ||
| 338 | llis_per_log + 1); | ||
| 339 | } else { | ||
| 340 | next_lli_off_dst++; | ||
| 341 | next_lli_off_src++; | ||
| 342 | } | ||
| 343 | } | ||
| 344 | |||
| 345 | if (direction == DMA_TO_DEVICE) { | 355 | if (direction == DMA_TO_DEVICE) { |
| 346 | d40_log_fill_lli(&lli->src[i], | 356 | d40_log_fill_lli(&lli->src[i], |
| 347 | sg_phys(current_sg), | 357 | sg_phys(current_sg), |
| 348 | sg_dma_len(current_sg), | 358 | sg_dma_len(current_sg), |
| 349 | next_lli_off_src, | ||
| 350 | lcsp->lcsp1, src_data_width, | 359 | lcsp->lcsp1, src_data_width, |
| 351 | false, | ||
| 352 | true); | 360 | true); |
| 353 | d40_log_fill_lli(&lli->dst[i], | 361 | d40_log_fill_lli(&lli->dst[i], |
| 354 | dev_addr, | 362 | dev_addr, |
| 355 | sg_dma_len(current_sg), | 363 | sg_dma_len(current_sg), |
| 356 | next_lli_off_dst, | ||
| 357 | lcsp->lcsp3, dst_data_width, | 364 | lcsp->lcsp3, dst_data_width, |
| 358 | /* No next == terminal interrupt */ | ||
| 359 | term_int && !next_lli_off_dst, | ||
| 360 | false); | 365 | false); |
| 361 | } else { | 366 | } else { |
| 362 | d40_log_fill_lli(&lli->dst[i], | 367 | d40_log_fill_lli(&lli->dst[i], |
| 363 | sg_phys(current_sg), | 368 | sg_phys(current_sg), |
| 364 | sg_dma_len(current_sg), | 369 | sg_dma_len(current_sg), |
| 365 | next_lli_off_dst, | ||
| 366 | lcsp->lcsp3, dst_data_width, | 370 | lcsp->lcsp3, dst_data_width, |
| 367 | /* No next == terminal interrupt */ | ||
| 368 | term_int && !next_lli_off_dst, | ||
| 369 | true); | 371 | true); |
| 370 | d40_log_fill_lli(&lli->src[i], | 372 | d40_log_fill_lli(&lli->src[i], |
| 371 | dev_addr, | 373 | dev_addr, |
| 372 | sg_dma_len(current_sg), | 374 | sg_dma_len(current_sg), |
| 373 | next_lli_off_src, | ||
| 374 | lcsp->lcsp1, src_data_width, | 375 | lcsp->lcsp1, src_data_width, |
| 375 | false, | ||
| 376 | false); | 376 | false); |
| 377 | } | 377 | } |
| 378 | } | 378 | } |
| 379 | return total_size; | 379 | return total_size; |
| 380 | } | 380 | } |
| 381 | 381 | ||
| 382 | int d40_log_sg_to_lli(int lcla_id, | 382 | int d40_log_sg_to_lli(struct scatterlist *sg, |
| 383 | struct scatterlist *sg, | ||
| 384 | int sg_len, | 383 | int sg_len, |
| 385 | struct d40_log_lli *lli_sg, | 384 | struct d40_log_lli *lli_sg, |
| 386 | u32 lcsp13, /* src or dst*/ | 385 | u32 lcsp13, /* src or dst*/ |
| 387 | u32 data_width, | 386 | u32 data_width) |
| 388 | bool term_int, int max_len, int llis_per_log) | ||
| 389 | { | 387 | { |
| 390 | int total_size = 0; | 388 | int total_size = 0; |
| 391 | struct scatterlist *current_sg = sg; | 389 | struct scatterlist *current_sg = sg; |
| 392 | int i; | 390 | int i; |
| 393 | u32 next_lli_off = 0; | ||
| 394 | 391 | ||
| 395 | for_each_sg(sg, current_sg, sg_len, i) { | 392 | for_each_sg(sg, current_sg, sg_len, i) { |
| 396 | total_size += sg_dma_len(current_sg); | 393 | total_size += sg_dma_len(current_sg); |
| 397 | 394 | ||
| 398 | /* | ||
| 399 | * If this scatter list entry is the last one or | ||
| 400 | * max length, terminate link. | ||
| 401 | */ | ||
| 402 | if (sg_len - 1 == i || ((i+1) % max_len == 0)) | ||
| 403 | next_lli_off = 0; | ||
| 404 | else { | ||
| 405 | if (next_lli_off == 0) | ||
| 406 | /* The first lli will be at next_lli_off */ | ||
| 407 | next_lli_off = lcla_id * llis_per_log + 1; | ||
| 408 | else | ||
| 409 | next_lli_off++; | ||
| 410 | } | ||
| 411 | |||
| 412 | d40_log_fill_lli(&lli_sg[i], | 395 | d40_log_fill_lli(&lli_sg[i], |
| 413 | sg_phys(current_sg), | 396 | sg_phys(current_sg), |
| 414 | sg_dma_len(current_sg), | 397 | sg_dma_len(current_sg), |
| 415 | next_lli_off, | ||
| 416 | lcsp13, data_width, | 398 | lcsp13, data_width, |
| 417 | term_int && !next_lli_off, | ||
| 418 | true); | 399 | true); |
| 419 | } | 400 | } |
| 420 | return total_size; | 401 | return total_size; |
| 421 | } | 402 | } |
| 422 | |||
| 423 | int d40_log_lli_write(struct d40_log_lli_full *lcpa, | ||
| 424 | struct d40_log_lli *lcla_src, | ||
| 425 | struct d40_log_lli *lcla_dst, | ||
| 426 | struct d40_log_lli *lli_dst, | ||
| 427 | struct d40_log_lli *lli_src, | ||
| 428 | int llis_per_log) | ||
| 429 | { | ||
| 430 | u32 slos; | ||
| 431 | u32 dlos; | ||
| 432 | int i; | ||
| 433 | |||
| 434 | writel(lli_src->lcsp02, &lcpa->lcsp0); | ||
| 435 | writel(lli_src->lcsp13, &lcpa->lcsp1); | ||
| 436 | writel(lli_dst->lcsp02, &lcpa->lcsp2); | ||
| 437 | writel(lli_dst->lcsp13, &lcpa->lcsp3); | ||
| 438 | |||
| 439 | slos = lli_src->lcsp13 & D40_MEM_LCSP1_SLOS_MASK; | ||
| 440 | dlos = lli_dst->lcsp13 & D40_MEM_LCSP3_DLOS_MASK; | ||
| 441 | |||
| 442 | for (i = 0; (i < llis_per_log) && slos && dlos; i++) { | ||
| 443 | writel(lli_src[i + 1].lcsp02, &lcla_src[i].lcsp02); | ||
| 444 | writel(lli_src[i + 1].lcsp13, &lcla_src[i].lcsp13); | ||
| 445 | writel(lli_dst[i + 1].lcsp02, &lcla_dst[i].lcsp02); | ||
| 446 | writel(lli_dst[i + 1].lcsp13, &lcla_dst[i].lcsp13); | ||
| 447 | |||
| 448 | slos = lli_src[i + 1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK; | ||
| 449 | dlos = lli_dst[i + 1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK; | ||
| 450 | } | ||
| 451 | |||
| 452 | return i; | ||
| 453 | |||
| 454 | } | ||
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 9c0fa2f5fe57..9e419b907544 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h | |||
| @@ -1,10 +1,8 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * driver/dma/ste_dma40_ll.h | 2 | * Copyright (C) ST-Ericsson SA 2007-2010 |
| 3 | * | 3 | * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson SA |
| 4 | * Copyright (C) ST-Ericsson 2007-2010 | 4 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson SA |
| 5 | * License terms: GNU General Public License (GPL) version 2 | 5 | * License terms: GNU General Public License (GPL) version 2 |
| 6 | * Author: Per Friden <per.friden@stericsson.com> | ||
| 7 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> | ||
| 8 | */ | 6 | */ |
| 9 | #ifndef STE_DMA40_LL_H | 7 | #ifndef STE_DMA40_LL_H |
| 10 | #define STE_DMA40_LL_H | 8 | #define STE_DMA40_LL_H |
| @@ -132,6 +130,13 @@ | |||
| 132 | #define D40_DREG_PRMSO 0x014 | 130 | #define D40_DREG_PRMSO 0x014 |
| 133 | #define D40_DREG_PRMOE 0x018 | 131 | #define D40_DREG_PRMOE 0x018 |
| 134 | #define D40_DREG_PRMOO 0x01C | 132 | #define D40_DREG_PRMOO 0x01C |
| 133 | #define D40_DREG_PRMO_PCHAN_BASIC 0x1 | ||
| 134 | #define D40_DREG_PRMO_PCHAN_MODULO 0x2 | ||
| 135 | #define D40_DREG_PRMO_PCHAN_DOUBLE_DST 0x3 | ||
| 136 | #define D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG 0x1 | ||
| 137 | #define D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY 0x2 | ||
| 138 | #define D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG 0x3 | ||
| 139 | |||
| 135 | #define D40_DREG_LCPA 0x020 | 140 | #define D40_DREG_LCPA 0x020 |
| 136 | #define D40_DREG_LCLA 0x024 | 141 | #define D40_DREG_LCLA 0x024 |
| 137 | #define D40_DREG_ACTIVE 0x050 | 142 | #define D40_DREG_ACTIVE 0x050 |
| @@ -163,6 +168,9 @@ | |||
| 163 | #define D40_DREG_PERIPHID0 0xFE0 | 168 | #define D40_DREG_PERIPHID0 0xFE0 |
| 164 | #define D40_DREG_PERIPHID1 0xFE4 | 169 | #define D40_DREG_PERIPHID1 0xFE4 |
| 165 | #define D40_DREG_PERIPHID2 0xFE8 | 170 | #define D40_DREG_PERIPHID2 0xFE8 |
| 171 | #define D40_DREG_PERIPHID2_REV_POS 4 | ||
| 172 | #define D40_DREG_PERIPHID2_REV_MASK (0xf << D40_DREG_PERIPHID2_REV_POS) | ||
| 173 | #define D40_DREG_PERIPHID2_DESIGNER_MASK 0xf | ||
| 166 | #define D40_DREG_PERIPHID3 0xFEC | 174 | #define D40_DREG_PERIPHID3 0xFEC |
| 167 | #define D40_DREG_CELLID0 0xFF0 | 175 | #define D40_DREG_CELLID0 0xFF0 |
| 168 | #define D40_DREG_CELLID1 0xFF4 | 176 | #define D40_DREG_CELLID1 0xFF4 |
| @@ -199,8 +207,6 @@ struct d40_phy_lli { | |||
| 199 | * | 207 | * |
| 200 | * @src: Register settings for src channel. | 208 | * @src: Register settings for src channel. |
| 201 | * @dst: Register settings for dst channel. | 209 | * @dst: Register settings for dst channel. |
| 202 | * @dst_addr: Physical destination address. | ||
| 203 | * @src_addr: Physical source address. | ||
| 204 | * | 210 | * |
| 205 | * All DMA transfers have a source and a destination. | 211 | * All DMA transfers have a source and a destination. |
| 206 | */ | 212 | */ |
| @@ -208,8 +214,6 @@ struct d40_phy_lli { | |||
| 208 | struct d40_phy_lli_bidir { | 214 | struct d40_phy_lli_bidir { |
| 209 | struct d40_phy_lli *src; | 215 | struct d40_phy_lli *src; |
| 210 | struct d40_phy_lli *dst; | 216 | struct d40_phy_lli *dst; |
| 211 | dma_addr_t dst_addr; | ||
| 212 | dma_addr_t src_addr; | ||
| 213 | }; | 217 | }; |
| 214 | 218 | ||
| 215 | 219 | ||
| @@ -271,29 +275,16 @@ struct d40_def_lcsp { | |||
| 271 | u32 lcsp1; | 275 | u32 lcsp1; |
| 272 | }; | 276 | }; |
| 273 | 277 | ||
| 274 | /** | ||
| 275 | * struct d40_lcla_elem - Info for one LCA element. | ||
| 276 | * | ||
| 277 | * @src_id: logical channel src id | ||
| 278 | * @dst_id: logical channel dst id | ||
| 279 | * @src: LCPA formated src parameters | ||
| 280 | * @dst: LCPA formated dst parameters | ||
| 281 | * | ||
| 282 | */ | ||
| 283 | struct d40_lcla_elem { | ||
| 284 | int src_id; | ||
| 285 | int dst_id; | ||
| 286 | struct d40_log_lli *src; | ||
| 287 | struct d40_log_lli *dst; | ||
| 288 | }; | ||
| 289 | |||
| 290 | /* Physical channels */ | 278 | /* Physical channels */ |
| 291 | 279 | ||
| 292 | void d40_phy_cfg(struct stedma40_chan_cfg *cfg, | 280 | void d40_phy_cfg(struct stedma40_chan_cfg *cfg, |
| 293 | u32 *src_cfg, u32 *dst_cfg, bool is_log); | 281 | u32 *src_cfg, |
| 282 | u32 *dst_cfg, | ||
| 283 | bool is_log); | ||
| 294 | 284 | ||
| 295 | void d40_log_cfg(struct stedma40_chan_cfg *cfg, | 285 | void d40_log_cfg(struct stedma40_chan_cfg *cfg, |
| 296 | u32 *lcsp1, u32 *lcsp2); | 286 | u32 *lcsp1, |
| 287 | u32 *lcsp2); | ||
| 297 | 288 | ||
| 298 | int d40_phy_sg_to_lli(struct scatterlist *sg, | 289 | int d40_phy_sg_to_lli(struct scatterlist *sg, |
| 299 | int sg_len, | 290 | int sg_len, |
| @@ -302,8 +293,7 @@ int d40_phy_sg_to_lli(struct scatterlist *sg, | |||
| 302 | dma_addr_t lli_phys, | 293 | dma_addr_t lli_phys, |
| 303 | u32 reg_cfg, | 294 | u32 reg_cfg, |
| 304 | u32 data_width, | 295 | u32 data_width, |
| 305 | int psize, | 296 | int psize); |
| 306 | bool term_int); | ||
| 307 | 297 | ||
| 308 | int d40_phy_fill_lli(struct d40_phy_lli *lli, | 298 | int d40_phy_fill_lli(struct d40_phy_lli *lli, |
| 309 | dma_addr_t data, | 299 | dma_addr_t data, |
| @@ -323,35 +313,35 @@ void d40_phy_lli_write(void __iomem *virtbase, | |||
| 323 | /* Logical channels */ | 313 | /* Logical channels */ |
| 324 | 314 | ||
| 325 | void d40_log_fill_lli(struct d40_log_lli *lli, | 315 | void d40_log_fill_lli(struct d40_log_lli *lli, |
| 326 | dma_addr_t data, u32 data_size, | 316 | dma_addr_t data, |
| 327 | u32 lli_next_off, u32 reg_cfg, | 317 | u32 data_size, |
| 318 | u32 reg_cfg, | ||
| 328 | u32 data_width, | 319 | u32 data_width, |
| 329 | bool term_int, bool addr_inc); | 320 | bool addr_inc); |
| 330 | 321 | ||
| 331 | int d40_log_sg_to_dev(struct d40_lcla_elem *lcla, | 322 | int d40_log_sg_to_dev(struct scatterlist *sg, |
| 332 | struct scatterlist *sg, | ||
| 333 | int sg_len, | 323 | int sg_len, |
| 334 | struct d40_log_lli_bidir *lli, | 324 | struct d40_log_lli_bidir *lli, |
| 335 | struct d40_def_lcsp *lcsp, | 325 | struct d40_def_lcsp *lcsp, |
| 336 | u32 src_data_width, | 326 | u32 src_data_width, |
| 337 | u32 dst_data_width, | 327 | u32 dst_data_width, |
| 338 | enum dma_data_direction direction, | 328 | enum dma_data_direction direction, |
| 339 | bool term_int, dma_addr_t dev_addr, int max_len, | 329 | dma_addr_t dev_addr); |
| 340 | int llis_per_log); | 330 | |
| 341 | 331 | int d40_log_sg_to_lli(struct scatterlist *sg, | |
| 342 | int d40_log_lli_write(struct d40_log_lli_full *lcpa, | ||
| 343 | struct d40_log_lli *lcla_src, | ||
| 344 | struct d40_log_lli *lcla_dst, | ||
| 345 | struct d40_log_lli *lli_dst, | ||
| 346 | struct d40_log_lli *lli_src, | ||
| 347 | int llis_per_log); | ||
| 348 | |||
| 349 | int d40_log_sg_to_lli(int lcla_id, | ||
| 350 | struct scatterlist *sg, | ||
| 351 | int sg_len, | 332 | int sg_len, |
| 352 | struct d40_log_lli *lli_sg, | 333 | struct d40_log_lli *lli_sg, |
| 353 | u32 lcsp13, /* src or dst*/ | 334 | u32 lcsp13, /* src or dst*/ |
| 354 | u32 data_width, | 335 | u32 data_width); |
| 355 | bool term_int, int max_len, int llis_per_log); | 336 | |
| 337 | void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, | ||
| 338 | struct d40_log_lli *lli_dst, | ||
| 339 | struct d40_log_lli *lli_src, | ||
| 340 | int next); | ||
| 341 | |||
| 342 | void d40_log_lli_lcla_write(struct d40_log_lli *lcla, | ||
| 343 | struct d40_log_lli *lli_dst, | ||
| 344 | struct d40_log_lli *lli_src, | ||
| 345 | int next); | ||
| 356 | 346 | ||
| 357 | #endif /* STE_DMA40_LLI_H */ | 347 | #endif /* STE_DMA40_LLI_H */ |
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 2ec1ed56f204..3b88a4e7c98a 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c | |||
| @@ -759,7 +759,7 @@ static int __devinit td_probe(struct platform_device *pdev) | |||
| 759 | pdata->channels + i; | 759 | pdata->channels + i; |
| 760 | 760 | ||
| 761 | /* even channels are RX, odd are TX */ | 761 | /* even channels are RX, odd are TX */ |
| 762 | if (((i % 2) && pchan->rx) || (!(i % 2) && !pchan->rx)) { | 762 | if ((i % 2) == pchan->rx) { |
| 763 | dev_err(&pdev->dev, "Wrong channel configuration\n"); | 763 | dev_err(&pdev->dev, "Wrong channel configuration\n"); |
| 764 | err = -EINVAL; | 764 | err = -EINVAL; |
| 765 | goto err_tasklet_kill; | 765 | goto err_tasklet_kill; |
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h new file mode 100644 index 000000000000..521a0f8974ac --- /dev/null +++ b/include/linux/amba/pl08x.h | |||
| @@ -0,0 +1,222 @@ | |||
| 1 | /* | ||
| 2 | * linux/amba/pl08x.h - ARM PrimeCell DMA Controller driver | ||
| 3 | * | ||
| 4 | * Copyright (C) 2005 ARM Ltd | ||
| 5 | * Copyright (C) 2010 ST-Ericsson SA | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | * | ||
| 11 | * pl08x information required by platform code | ||
| 12 | * | ||
| 13 | * Please credit ARM.com | ||
| 14 | * Documentation: ARM DDI 0196D | ||
| 15 | * | ||
| 16 | */ | ||
| 17 | |||
| 18 | #ifndef AMBA_PL08X_H | ||
| 19 | #define AMBA_PL08X_H | ||
| 20 | |||
| 21 | /* We need sizes of structs from this header */ | ||
| 22 | #include <linux/dmaengine.h> | ||
| 23 | #include <linux/interrupt.h> | ||
| 24 | |||
| 25 | /** | ||
| 26 | * struct pl08x_channel_data - data structure to pass info between | ||
| 27 | * platform and PL08x driver regarding channel configuration | ||
| 28 | * @bus_id: name of this device channel, not just a device name since | ||
| 29 | * devices may have more than one channel e.g. "foo_tx" | ||
| 30 | * @min_signal: the minimum DMA signal number to be muxed in for this | ||
| 31 | * channel (for platforms supporting muxed signals). If you have | ||
| 32 | * static assignments, make sure this is set to the assigned signal | ||
| 33 | * number, PL08x have 16 possible signals in number 0 thru 15 so | ||
| 34 | * when these are not enough they often get muxed (in hardware) | ||
| 35 | * disabling simultaneous use of the same channel for two devices. | ||
| 36 | * @max_signal: the maximum DMA signal number to be muxed in for | ||
| 37 | * the channel. Set to the same as min_signal for | ||
| 38 | * devices with static assignments | ||
| 39 | * @muxval: a number usually used to poke into some mux regiser to | ||
| 40 | * mux in the signal to this channel | ||
| 41 | * @cctl_opt: default options for the channel control register | ||
| 42 | * @addr: source/target address in physical memory for this DMA channel, | ||
| 43 | * can be the address of a FIFO register for burst requests for example. | ||
| 44 | * This can be left undefined if the PrimeCell API is used for configuring | ||
| 45 | * this. | ||
| 46 | * @circular_buffer: whether the buffer passed in is circular and | ||
| 47 | * shall simply be looped round round (like a record baby round | ||
| 48 | * round round round) | ||
| 49 | * @single: the device connected to this channel will request single | ||
| 50 | * DMA transfers, not bursts. (Bursts are default.) | ||
| 51 | */ | ||
| 52 | struct pl08x_channel_data { | ||
| 53 | char *bus_id; | ||
| 54 | int min_signal; | ||
| 55 | int max_signal; | ||
| 56 | u32 muxval; | ||
| 57 | u32 cctl; | ||
| 58 | u32 ccfg; | ||
| 59 | dma_addr_t addr; | ||
| 60 | bool circular_buffer; | ||
| 61 | bool single; | ||
| 62 | }; | ||
| 63 | |||
| 64 | /** | ||
| 65 | * Struct pl08x_bus_data - information of source or destination | ||
| 66 | * busses for a transfer | ||
| 67 | * @addr: current address | ||
| 68 | * @maxwidth: the maximum width of a transfer on this bus | ||
| 69 | * @buswidth: the width of this bus in bytes: 1, 2 or 4 | ||
| 70 | * @fill_bytes: bytes required to fill to the next bus memory | ||
| 71 | * boundary | ||
| 72 | */ | ||
| 73 | struct pl08x_bus_data { | ||
| 74 | dma_addr_t addr; | ||
| 75 | u8 maxwidth; | ||
| 76 | u8 buswidth; | ||
| 77 | u32 fill_bytes; | ||
| 78 | }; | ||
| 79 | |||
| 80 | /** | ||
| 81 | * struct pl08x_phy_chan - holder for the physical channels | ||
| 82 | * @id: physical index to this channel | ||
| 83 | * @lock: a lock to use when altering an instance of this struct | ||
| 84 | * @signal: the physical signal (aka channel) serving this | ||
| 85 | * physical channel right now | ||
| 86 | * @serving: the virtual channel currently being served by this | ||
| 87 | * physical channel | ||
| 88 | */ | ||
| 89 | struct pl08x_phy_chan { | ||
| 90 | unsigned int id; | ||
| 91 | void __iomem *base; | ||
| 92 | spinlock_t lock; | ||
| 93 | int signal; | ||
| 94 | struct pl08x_dma_chan *serving; | ||
| 95 | u32 csrc; | ||
| 96 | u32 cdst; | ||
| 97 | u32 clli; | ||
| 98 | u32 cctl; | ||
| 99 | u32 ccfg; | ||
| 100 | }; | ||
| 101 | |||
| 102 | /** | ||
| 103 | * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor | ||
| 104 | * @llis_bus: DMA memory address (physical) start for the LLIs | ||
| 105 | * @llis_va: virtual memory address start for the LLIs | ||
| 106 | */ | ||
| 107 | struct pl08x_txd { | ||
| 108 | struct dma_async_tx_descriptor tx; | ||
| 109 | struct list_head node; | ||
| 110 | enum dma_data_direction direction; | ||
| 111 | struct pl08x_bus_data srcbus; | ||
| 112 | struct pl08x_bus_data dstbus; | ||
| 113 | int len; | ||
| 114 | dma_addr_t llis_bus; | ||
| 115 | void *llis_va; | ||
| 116 | struct pl08x_channel_data *cd; | ||
| 117 | bool active; | ||
| 118 | /* | ||
| 119 | * Settings to be put into the physical channel when we | ||
| 120 | * trigger this txd | ||
| 121 | */ | ||
| 122 | u32 csrc; | ||
| 123 | u32 cdst; | ||
| 124 | u32 clli; | ||
| 125 | u32 cctl; | ||
| 126 | }; | ||
| 127 | |||
| 128 | /** | ||
| 129 | * struct pl08x_dma_chan_state - holds the PL08x specific virtual | ||
| 130 | * channel states | ||
| 131 | * @PL08X_CHAN_IDLE: the channel is idle | ||
| 132 | * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport | ||
| 133 | * channel and is running a transfer on it | ||
| 134 | * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport | ||
| 135 | * channel, but the transfer is currently paused | ||
| 136 | * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport | ||
| 137 | * channel to become available (only pertains to memcpy channels) | ||
| 138 | */ | ||
| 139 | enum pl08x_dma_chan_state { | ||
| 140 | PL08X_CHAN_IDLE, | ||
| 141 | PL08X_CHAN_RUNNING, | ||
| 142 | PL08X_CHAN_PAUSED, | ||
| 143 | PL08X_CHAN_WAITING, | ||
| 144 | }; | ||
| 145 | |||
| 146 | /** | ||
| 147 | * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel | ||
| 148 | * @chan: wrappped abstract channel | ||
| 149 | * @phychan: the physical channel utilized by this channel, if there is one | ||
| 150 | * @tasklet: tasklet scheduled by the IRQ to handle actual work etc | ||
| 151 | * @name: name of channel | ||
| 152 | * @cd: channel platform data | ||
| 153 | * @runtime_addr: address for RX/TX according to the runtime config | ||
| 154 | * @runtime_direction: current direction of this channel according to | ||
| 155 | * runtime config | ||
| 156 | * @lc: last completed transaction on this channel | ||
| 157 | * @desc_list: queued transactions pending on this channel | ||
| 158 | * @at: active transaction on this channel | ||
| 159 | * @lockflags: sometimes we let a lock last between two function calls, | ||
| 160 | * especially prep/submit, and then we need to store the IRQ flags | ||
| 161 | * in the channel state, here | ||
| 162 | * @lock: a lock for this channel data | ||
| 163 | * @host: a pointer to the host (internal use) | ||
| 164 | * @state: whether the channel is idle, paused, running etc | ||
| 165 | * @slave: whether this channel is a device (slave) or for memcpy | ||
| 166 | * @waiting: a TX descriptor on this channel which is waiting for | ||
| 167 | * a physical channel to become available | ||
| 168 | */ | ||
| 169 | struct pl08x_dma_chan { | ||
| 170 | struct dma_chan chan; | ||
| 171 | struct pl08x_phy_chan *phychan; | ||
| 172 | struct tasklet_struct tasklet; | ||
| 173 | char *name; | ||
| 174 | struct pl08x_channel_data *cd; | ||
| 175 | dma_addr_t runtime_addr; | ||
| 176 | enum dma_data_direction runtime_direction; | ||
| 177 | atomic_t last_issued; | ||
| 178 | dma_cookie_t lc; | ||
| 179 | struct list_head desc_list; | ||
| 180 | struct pl08x_txd *at; | ||
| 181 | unsigned long lockflags; | ||
| 182 | spinlock_t lock; | ||
| 183 | void *host; | ||
| 184 | enum pl08x_dma_chan_state state; | ||
| 185 | bool slave; | ||
| 186 | struct pl08x_txd *waiting; | ||
| 187 | }; | ||
| 188 | |||
| 189 | /** | ||
| 190 | * struct pl08x_platform_data - the platform configuration for the | ||
| 191 | * PL08x PrimeCells. | ||
| 192 | * @slave_channels: the channels defined for the different devices on the | ||
| 193 | * platform, all inclusive, including multiplexed channels. The available | ||
| 194 | * physical channels will be multiplexed around these signals as they | ||
| 195 | * are requested, just enumerate all possible channels. | ||
| 196 | * @get_signal: request a physical signal to be used for a DMA | ||
| 197 | * transfer immediately: if there is some multiplexing or similar blocking | ||
| 198 | * the use of the channel the transfer can be denied by returning | ||
| 199 | * less than zero, else it returns the allocated signal number | ||
| 200 | * @put_signal: indicate to the platform that this physical signal is not | ||
| 201 | * running any DMA transfer and multiplexing can be recycled | ||
| 202 | * @bus_bit_lli: Bit[0] of the address indicated which AHB bus master the | ||
| 203 | * LLI addresses are on 0/1 Master 1/2. | ||
| 204 | */ | ||
| 205 | struct pl08x_platform_data { | ||
| 206 | struct pl08x_channel_data *slave_channels; | ||
| 207 | unsigned int num_slave_channels; | ||
| 208 | struct pl08x_channel_data memcpy_channel; | ||
| 209 | int (*get_signal)(struct pl08x_dma_chan *); | ||
| 210 | void (*put_signal)(struct pl08x_dma_chan *); | ||
| 211 | }; | ||
| 212 | |||
| 213 | #ifdef CONFIG_AMBA_PL08X | ||
| 214 | bool pl08x_filter_id(struct dma_chan *chan, void *chan_id); | ||
| 215 | #else | ||
| 216 | static inline bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) | ||
| 217 | { | ||
| 218 | return false; | ||
| 219 | } | ||
| 220 | #endif | ||
| 221 | |||
| 222 | #endif /* AMBA_PL08X_H */ | ||
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index e2106495cc11..9d8688b92d8b 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
| @@ -64,13 +64,15 @@ enum dma_transaction_type { | |||
| 64 | DMA_PQ_VAL, | 64 | DMA_PQ_VAL, |
| 65 | DMA_MEMSET, | 65 | DMA_MEMSET, |
| 66 | DMA_INTERRUPT, | 66 | DMA_INTERRUPT, |
| 67 | DMA_SG, | ||
| 67 | DMA_PRIVATE, | 68 | DMA_PRIVATE, |
| 68 | DMA_ASYNC_TX, | 69 | DMA_ASYNC_TX, |
| 69 | DMA_SLAVE, | 70 | DMA_SLAVE, |
| 71 | DMA_CYCLIC, | ||
| 70 | }; | 72 | }; |
| 71 | 73 | ||
| 72 | /* last transaction type for creation of the capabilities mask */ | 74 | /* last transaction type for creation of the capabilities mask */ |
| 73 | #define DMA_TX_TYPE_END (DMA_SLAVE + 1) | 75 | #define DMA_TX_TYPE_END (DMA_CYCLIC + 1) |
| 74 | 76 | ||
| 75 | 77 | ||
| 76 | /** | 78 | /** |
| @@ -119,12 +121,15 @@ enum dma_ctrl_flags { | |||
| 119 | * configuration data in statically from the platform). An additional | 121 | * configuration data in statically from the platform). An additional |
| 120 | * argument of struct dma_slave_config must be passed in with this | 122 | * argument of struct dma_slave_config must be passed in with this |
| 121 | * command. | 123 | * command. |
| 124 | * @FSLDMA_EXTERNAL_START: this command will put the Freescale DMA controller | ||
| 125 | * into external start mode. | ||
| 122 | */ | 126 | */ |
| 123 | enum dma_ctrl_cmd { | 127 | enum dma_ctrl_cmd { |
| 124 | DMA_TERMINATE_ALL, | 128 | DMA_TERMINATE_ALL, |
| 125 | DMA_PAUSE, | 129 | DMA_PAUSE, |
| 126 | DMA_RESUME, | 130 | DMA_RESUME, |
| 127 | DMA_SLAVE_CONFIG, | 131 | DMA_SLAVE_CONFIG, |
| 132 | FSLDMA_EXTERNAL_START, | ||
| 128 | }; | 133 | }; |
| 129 | 134 | ||
| 130 | /** | 135 | /** |
| @@ -316,14 +321,14 @@ struct dma_async_tx_descriptor { | |||
| 316 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); | 321 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); |
| 317 | dma_async_tx_callback callback; | 322 | dma_async_tx_callback callback; |
| 318 | void *callback_param; | 323 | void *callback_param; |
| 319 | #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | 324 | #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH |
| 320 | struct dma_async_tx_descriptor *next; | 325 | struct dma_async_tx_descriptor *next; |
| 321 | struct dma_async_tx_descriptor *parent; | 326 | struct dma_async_tx_descriptor *parent; |
| 322 | spinlock_t lock; | 327 | spinlock_t lock; |
| 323 | #endif | 328 | #endif |
| 324 | }; | 329 | }; |
| 325 | 330 | ||
| 326 | #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | 331 | #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH |
| 327 | static inline void txd_lock(struct dma_async_tx_descriptor *txd) | 332 | static inline void txd_lock(struct dma_async_tx_descriptor *txd) |
| 328 | { | 333 | { |
| 329 | } | 334 | } |
| @@ -422,6 +427,9 @@ struct dma_tx_state { | |||
| 422 | * @device_prep_dma_memset: prepares a memset operation | 427 | * @device_prep_dma_memset: prepares a memset operation |
| 423 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation | 428 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation |
| 424 | * @device_prep_slave_sg: prepares a slave dma operation | 429 | * @device_prep_slave_sg: prepares a slave dma operation |
| 430 | * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. | ||
| 431 | * The function takes a buffer of size buf_len. The callback function will | ||
| 432 | * be called after period_len bytes have been transferred. | ||
| 425 | * @device_control: manipulate all pending operations on a channel, returns | 433 | * @device_control: manipulate all pending operations on a channel, returns |
| 426 | * zero or error code | 434 | * zero or error code |
| 427 | * @device_tx_status: poll for transaction completion, the optional | 435 | * @device_tx_status: poll for transaction completion, the optional |
| @@ -473,11 +481,19 @@ struct dma_device { | |||
| 473 | unsigned long flags); | 481 | unsigned long flags); |
| 474 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( | 482 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( |
| 475 | struct dma_chan *chan, unsigned long flags); | 483 | struct dma_chan *chan, unsigned long flags); |
| 484 | struct dma_async_tx_descriptor *(*device_prep_dma_sg)( | ||
| 485 | struct dma_chan *chan, | ||
| 486 | struct scatterlist *dst_sg, unsigned int dst_nents, | ||
| 487 | struct scatterlist *src_sg, unsigned int src_nents, | ||
| 488 | unsigned long flags); | ||
| 476 | 489 | ||
| 477 | struct dma_async_tx_descriptor *(*device_prep_slave_sg)( | 490 | struct dma_async_tx_descriptor *(*device_prep_slave_sg)( |
| 478 | struct dma_chan *chan, struct scatterlist *sgl, | 491 | struct dma_chan *chan, struct scatterlist *sgl, |
| 479 | unsigned int sg_len, enum dma_data_direction direction, | 492 | unsigned int sg_len, enum dma_data_direction direction, |
| 480 | unsigned long flags); | 493 | unsigned long flags); |
| 494 | struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)( | ||
| 495 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | ||
| 496 | size_t period_len, enum dma_data_direction direction); | ||
| 481 | int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 497 | int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
| 482 | unsigned long arg); | 498 | unsigned long arg); |
| 483 | 499 | ||
| @@ -487,6 +503,40 @@ struct dma_device { | |||
| 487 | void (*device_issue_pending)(struct dma_chan *chan); | 503 | void (*device_issue_pending)(struct dma_chan *chan); |
| 488 | }; | 504 | }; |
| 489 | 505 | ||
| 506 | static inline int dmaengine_device_control(struct dma_chan *chan, | ||
| 507 | enum dma_ctrl_cmd cmd, | ||
| 508 | unsigned long arg) | ||
| 509 | { | ||
| 510 | return chan->device->device_control(chan, cmd, arg); | ||
| 511 | } | ||
| 512 | |||
| 513 | static inline int dmaengine_slave_config(struct dma_chan *chan, | ||
| 514 | struct dma_slave_config *config) | ||
| 515 | { | ||
| 516 | return dmaengine_device_control(chan, DMA_SLAVE_CONFIG, | ||
| 517 | (unsigned long)config); | ||
| 518 | } | ||
| 519 | |||
| 520 | static inline int dmaengine_terminate_all(struct dma_chan *chan) | ||
| 521 | { | ||
| 522 | return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); | ||
| 523 | } | ||
| 524 | |||
| 525 | static inline int dmaengine_pause(struct dma_chan *chan) | ||
| 526 | { | ||
| 527 | return dmaengine_device_control(chan, DMA_PAUSE, 0); | ||
| 528 | } | ||
| 529 | |||
| 530 | static inline int dmaengine_resume(struct dma_chan *chan) | ||
| 531 | { | ||
| 532 | return dmaengine_device_control(chan, DMA_RESUME, 0); | ||
| 533 | } | ||
| 534 | |||
| 535 | static inline int dmaengine_submit(struct dma_async_tx_descriptor *desc) | ||
| 536 | { | ||
| 537 | return desc->tx_submit(desc); | ||
| 538 | } | ||
| 539 | |||
| 490 | static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len) | 540 | static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len) |
| 491 | { | 541 | { |
| 492 | size_t mask; | 542 | size_t mask; |
| @@ -606,11 +656,11 @@ static inline void net_dmaengine_put(void) | |||
| 606 | #ifdef CONFIG_ASYNC_TX_DMA | 656 | #ifdef CONFIG_ASYNC_TX_DMA |
| 607 | #define async_dmaengine_get() dmaengine_get() | 657 | #define async_dmaengine_get() dmaengine_get() |
| 608 | #define async_dmaengine_put() dmaengine_put() | 658 | #define async_dmaengine_put() dmaengine_put() |
| 609 | #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | 659 | #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH |
| 610 | #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX) | 660 | #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX) |
| 611 | #else | 661 | #else |
| 612 | #define async_dma_find_channel(type) dma_find_channel(type) | 662 | #define async_dma_find_channel(type) dma_find_channel(type) |
| 613 | #endif /* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH */ | 663 | #endif /* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH */ |
| 614 | #else | 664 | #else |
| 615 | static inline void async_dmaengine_get(void) | 665 | static inline void async_dmaengine_get(void) |
| 616 | { | 666 | { |
diff --git a/include/linux/intel_mid_dma.h b/include/linux/intel_mid_dma.h index d9d08b6269b6..10496bd24c5c 100644 --- a/include/linux/intel_mid_dma.h +++ b/include/linux/intel_mid_dma.h | |||
| @@ -27,14 +27,7 @@ | |||
| 27 | 27 | ||
| 28 | #include <linux/dmaengine.h> | 28 | #include <linux/dmaengine.h> |
| 29 | 29 | ||
| 30 | /*DMA transaction width, src and dstn width would be same | 30 | #define DMA_PREP_CIRCULAR_LIST (1 << 10) |
| 31 | The DMA length must be width aligned, | ||
| 32 | for 32 bit width the length must be 32 bit (4bytes) aligned only*/ | ||
| 33 | enum intel_mid_dma_width { | ||
| 34 | LNW_DMA_WIDTH_8BIT = 0x0, | ||
| 35 | LNW_DMA_WIDTH_16BIT = 0x1, | ||
| 36 | LNW_DMA_WIDTH_32BIT = 0x2, | ||
| 37 | }; | ||
| 38 | 31 | ||
| 39 | /*DMA mode configurations*/ | 32 | /*DMA mode configurations*/ |
| 40 | enum intel_mid_dma_mode { | 33 | enum intel_mid_dma_mode { |
| @@ -69,18 +62,15 @@ enum intel_mid_dma_msize { | |||
| 69 | * @cfg_mode: DMA data transfer mode (per-per/mem-per/mem-mem) | 62 | * @cfg_mode: DMA data transfer mode (per-per/mem-per/mem-mem) |
| 70 | * @src_msize: Source DMA burst size | 63 | * @src_msize: Source DMA burst size |
| 71 | * @dst_msize: Dst DMA burst size | 64 | * @dst_msize: Dst DMA burst size |
| 65 | * @per_addr: Periphral address | ||
| 72 | * @device_instance: DMA peripheral device instance, we can have multiple | 66 | * @device_instance: DMA peripheral device instance, we can have multiple |
| 73 | * peripheral device connected to single DMAC | 67 | * peripheral device connected to single DMAC |
| 74 | */ | 68 | */ |
| 75 | struct intel_mid_dma_slave { | 69 | struct intel_mid_dma_slave { |
| 76 | enum dma_data_direction dirn; | ||
| 77 | enum intel_mid_dma_width src_width; /*width of DMA src txn*/ | ||
| 78 | enum intel_mid_dma_width dst_width; /*width of DMA dst txn*/ | ||
| 79 | enum intel_mid_dma_hs_mode hs_mode; /*handshaking*/ | 70 | enum intel_mid_dma_hs_mode hs_mode; /*handshaking*/ |
| 80 | enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ | 71 | enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ |
| 81 | enum intel_mid_dma_msize src_msize; /*size if src burst*/ | ||
| 82 | enum intel_mid_dma_msize dst_msize; /*size of dst burst*/ | ||
| 83 | unsigned int device_instance; /*0, 1 for periphral instance*/ | 72 | unsigned int device_instance; /*0, 1 for periphral instance*/ |
| 73 | struct dma_slave_config dma_slave; | ||
| 84 | }; | 74 | }; |
| 85 | 75 | ||
| 86 | #endif /*__INTEL_MID_DMA_H__*/ | 76 | #endif /*__INTEL_MID_DMA_H__*/ |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 995840664a5f..28b42b9274d0 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -1217,6 +1217,19 @@ config ATOMIC64_SELFTEST | |||
| 1217 | 1217 | ||
| 1218 | If unsure, say N. | 1218 | If unsure, say N. |
| 1219 | 1219 | ||
| 1220 | config ASYNC_RAID6_TEST | ||
| 1221 | tristate "Self test for hardware accelerated raid6 recovery" | ||
| 1222 | depends on ASYNC_RAID6_RECOV | ||
| 1223 | select ASYNC_MEMCPY | ||
| 1224 | ---help--- | ||
| 1225 | This is a one-shot self test that permutes through the | ||
| 1226 | recovery of all the possible two disk failure scenarios for a | ||
| 1227 | N-disk array. Recovery is performed with the asynchronous | ||
| 1228 | raid6 recovery routines, and will optionally use an offload | ||
| 1229 | engine if one is available. | ||
| 1230 | |||
| 1231 | If unsure, say N. | ||
| 1232 | |||
| 1220 | source "samples/Kconfig" | 1233 | source "samples/Kconfig" |
| 1221 | 1234 | ||
| 1222 | source "lib/Kconfig.kgdb" | 1235 | source "lib/Kconfig.kgdb" |
