diff options
| -rw-r--r-- | drivers/dma/fsldma.c | 550 |
1 files changed, 275 insertions, 275 deletions
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 6a905929ef01..7b5f88cb495b 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c | |||
| @@ -37,19 +37,19 @@ | |||
| 37 | #include <asm/fsldma.h> | 37 | #include <asm/fsldma.h> |
| 38 | #include "fsldma.h" | 38 | #include "fsldma.h" |
| 39 | 39 | ||
| 40 | static void dma_init(struct fsldma_chan *fsl_chan) | 40 | static void dma_init(struct fsldma_chan *chan) |
| 41 | { | 41 | { |
| 42 | /* Reset the channel */ | 42 | /* Reset the channel */ |
| 43 | DMA_OUT(fsl_chan, &fsl_chan->regs->mr, 0, 32); | 43 | DMA_OUT(chan, &chan->regs->mr, 0, 32); |
| 44 | 44 | ||
| 45 | switch (fsl_chan->feature & FSL_DMA_IP_MASK) { | 45 | switch (chan->feature & FSL_DMA_IP_MASK) { |
| 46 | case FSL_DMA_IP_85XX: | 46 | case FSL_DMA_IP_85XX: |
| 47 | /* Set the channel to below modes: | 47 | /* Set the channel to below modes: |
| 48 | * EIE - Error interrupt enable | 48 | * EIE - Error interrupt enable |
| 49 | * EOSIE - End of segments interrupt enable (basic mode) | 49 | * EOSIE - End of segments interrupt enable (basic mode) |
| 50 | * EOLNIE - End of links interrupt enable | 50 | * EOLNIE - End of links interrupt enable |
| 51 | */ | 51 | */ |
| 52 | DMA_OUT(fsl_chan, &fsl_chan->regs->mr, FSL_DMA_MR_EIE | 52 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EIE |
| 53 | | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); | 53 | | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); |
| 54 | break; | 54 | break; |
| 55 | case FSL_DMA_IP_83XX: | 55 | case FSL_DMA_IP_83XX: |
| @@ -57,154 +57,154 @@ static void dma_init(struct fsldma_chan *fsl_chan) | |||
| 57 | * EOTIE - End-of-transfer interrupt enable | 57 | * EOTIE - End-of-transfer interrupt enable |
| 58 | * PRC_RM - PCI read multiple | 58 | * PRC_RM - PCI read multiple |
| 59 | */ | 59 | */ |
| 60 | DMA_OUT(fsl_chan, &fsl_chan->regs->mr, FSL_DMA_MR_EOTIE | 60 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE |
| 61 | | FSL_DMA_MR_PRC_RM, 32); | 61 | | FSL_DMA_MR_PRC_RM, 32); |
| 62 | break; | 62 | break; |
| 63 | } | 63 | } |
| 64 | 64 | ||
| 65 | } | 65 | } |
| 66 | 66 | ||
| 67 | static void set_sr(struct fsldma_chan *fsl_chan, u32 val) | 67 | static void set_sr(struct fsldma_chan *chan, u32 val) |
| 68 | { | 68 | { |
| 69 | DMA_OUT(fsl_chan, &fsl_chan->regs->sr, val, 32); | 69 | DMA_OUT(chan, &chan->regs->sr, val, 32); |
| 70 | } | 70 | } |
| 71 | 71 | ||
| 72 | static u32 get_sr(struct fsldma_chan *fsl_chan) | 72 | static u32 get_sr(struct fsldma_chan *chan) |
| 73 | { | 73 | { |
| 74 | return DMA_IN(fsl_chan, &fsl_chan->regs->sr, 32); | 74 | return DMA_IN(chan, &chan->regs->sr, 32); |
| 75 | } | 75 | } |
| 76 | 76 | ||
| 77 | static void set_desc_cnt(struct fsldma_chan *fsl_chan, | 77 | static void set_desc_cnt(struct fsldma_chan *chan, |
| 78 | struct fsl_dma_ld_hw *hw, u32 count) | 78 | struct fsl_dma_ld_hw *hw, u32 count) |
| 79 | { | 79 | { |
| 80 | hw->count = CPU_TO_DMA(fsl_chan, count, 32); | 80 | hw->count = CPU_TO_DMA(chan, count, 32); |
| 81 | } | 81 | } |
| 82 | 82 | ||
| 83 | static void set_desc_src(struct fsldma_chan *fsl_chan, | 83 | static void set_desc_src(struct fsldma_chan *chan, |
| 84 | struct fsl_dma_ld_hw *hw, dma_addr_t src) | 84 | struct fsl_dma_ld_hw *hw, dma_addr_t src) |
| 85 | { | 85 | { |
| 86 | u64 snoop_bits; | 86 | u64 snoop_bits; |
| 87 | 87 | ||
| 88 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | 88 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) |
| 89 | ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; | 89 | ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; |
| 90 | hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64); | 90 | hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); |
| 91 | } | 91 | } |
| 92 | 92 | ||
| 93 | static void set_desc_dst(struct fsldma_chan *fsl_chan, | 93 | static void set_desc_dst(struct fsldma_chan *chan, |
| 94 | struct fsl_dma_ld_hw *hw, dma_addr_t dst) | 94 | struct fsl_dma_ld_hw *hw, dma_addr_t dst) |
| 95 | { | 95 | { |
| 96 | u64 snoop_bits; | 96 | u64 snoop_bits; |
| 97 | 97 | ||
| 98 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | 98 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) |
| 99 | ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; | 99 | ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; |
| 100 | hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dst, 64); | 100 | hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | static void set_desc_next(struct fsldma_chan *fsl_chan, | 103 | static void set_desc_next(struct fsldma_chan *chan, |
| 104 | struct fsl_dma_ld_hw *hw, dma_addr_t next) | 104 | struct fsl_dma_ld_hw *hw, dma_addr_t next) |
| 105 | { | 105 | { |
| 106 | u64 snoop_bits; | 106 | u64 snoop_bits; |
| 107 | 107 | ||
| 108 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) | 108 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) |
| 109 | ? FSL_DMA_SNEN : 0; | 109 | ? FSL_DMA_SNEN : 0; |
| 110 | hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64); | 110 | hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); |
| 111 | } | 111 | } |
| 112 | 112 | ||
| 113 | static void set_cdar(struct fsldma_chan *fsl_chan, dma_addr_t addr) | 113 | static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) |
| 114 | { | 114 | { |
| 115 | DMA_OUT(fsl_chan, &fsl_chan->regs->cdar, addr | FSL_DMA_SNEN, 64); | 115 | DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); |
| 116 | } | 116 | } |
| 117 | 117 | ||
| 118 | static dma_addr_t get_cdar(struct fsldma_chan *fsl_chan) | 118 | static dma_addr_t get_cdar(struct fsldma_chan *chan) |
| 119 | { | 119 | { |
| 120 | return DMA_IN(fsl_chan, &fsl_chan->regs->cdar, 64) & ~FSL_DMA_SNEN; | 120 | return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | static void set_ndar(struct fsldma_chan *fsl_chan, dma_addr_t addr) | 123 | static void set_ndar(struct fsldma_chan *chan, dma_addr_t addr) |
| 124 | { | 124 | { |
| 125 | DMA_OUT(fsl_chan, &fsl_chan->regs->ndar, addr, 64); | 125 | DMA_OUT(chan, &chan->regs->ndar, addr, 64); |
| 126 | } | 126 | } |
| 127 | 127 | ||
| 128 | static dma_addr_t get_ndar(struct fsldma_chan *fsl_chan) | 128 | static dma_addr_t get_ndar(struct fsldma_chan *chan) |
| 129 | { | 129 | { |
| 130 | return DMA_IN(fsl_chan, &fsl_chan->regs->ndar, 64); | 130 | return DMA_IN(chan, &chan->regs->ndar, 64); |
| 131 | } | 131 | } |
| 132 | 132 | ||
| 133 | static u32 get_bcr(struct fsldma_chan *fsl_chan) | 133 | static u32 get_bcr(struct fsldma_chan *chan) |
| 134 | { | 134 | { |
| 135 | return DMA_IN(fsl_chan, &fsl_chan->regs->bcr, 32); | 135 | return DMA_IN(chan, &chan->regs->bcr, 32); |
| 136 | } | 136 | } |
| 137 | 137 | ||
| 138 | static int dma_is_idle(struct fsldma_chan *fsl_chan) | 138 | static int dma_is_idle(struct fsldma_chan *chan) |
| 139 | { | 139 | { |
| 140 | u32 sr = get_sr(fsl_chan); | 140 | u32 sr = get_sr(chan); |
| 141 | return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); | 141 | return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); |
| 142 | } | 142 | } |
| 143 | 143 | ||
| 144 | static void dma_start(struct fsldma_chan *fsl_chan) | 144 | static void dma_start(struct fsldma_chan *chan) |
| 145 | { | 145 | { |
| 146 | u32 mode; | 146 | u32 mode; |
| 147 | 147 | ||
| 148 | mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32); | 148 | mode = DMA_IN(chan, &chan->regs->mr, 32); |
| 149 | 149 | ||
| 150 | if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { | 150 | if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { |
| 151 | if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { | 151 | if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { |
| 152 | DMA_OUT(fsl_chan, &fsl_chan->regs->bcr, 0, 32); | 152 | DMA_OUT(chan, &chan->regs->bcr, 0, 32); |
| 153 | mode |= FSL_DMA_MR_EMP_EN; | 153 | mode |= FSL_DMA_MR_EMP_EN; |
| 154 | } else { | 154 | } else { |
| 155 | mode &= ~FSL_DMA_MR_EMP_EN; | 155 | mode &= ~FSL_DMA_MR_EMP_EN; |
| 156 | } | 156 | } |
| 157 | } | 157 | } |
| 158 | 158 | ||
| 159 | if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) | 159 | if (chan->feature & FSL_DMA_CHAN_START_EXT) |
| 160 | mode |= FSL_DMA_MR_EMS_EN; | 160 | mode |= FSL_DMA_MR_EMS_EN; |
| 161 | else | 161 | else |
| 162 | mode |= FSL_DMA_MR_CS; | 162 | mode |= FSL_DMA_MR_CS; |
| 163 | 163 | ||
| 164 | DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32); | 164 | DMA_OUT(chan, &chan->regs->mr, mode, 32); |
| 165 | } | 165 | } |
| 166 | 166 | ||
| 167 | static void dma_halt(struct fsldma_chan *fsl_chan) | 167 | static void dma_halt(struct fsldma_chan *chan) |
| 168 | { | 168 | { |
| 169 | u32 mode; | 169 | u32 mode; |
| 170 | int i; | 170 | int i; |
| 171 | 171 | ||
| 172 | mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32); | 172 | mode = DMA_IN(chan, &chan->regs->mr, 32); |
| 173 | mode |= FSL_DMA_MR_CA; | 173 | mode |= FSL_DMA_MR_CA; |
| 174 | DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32); | 174 | DMA_OUT(chan, &chan->regs->mr, mode, 32); |
| 175 | 175 | ||
| 176 | mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA); | 176 | mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA); |
| 177 | DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32); | 177 | DMA_OUT(chan, &chan->regs->mr, mode, 32); |
| 178 | 178 | ||
| 179 | for (i = 0; i < 100; i++) { | 179 | for (i = 0; i < 100; i++) { |
| 180 | if (dma_is_idle(fsl_chan)) | 180 | if (dma_is_idle(chan)) |
| 181 | break; | 181 | break; |
| 182 | udelay(10); | 182 | udelay(10); |
| 183 | } | 183 | } |
| 184 | 184 | ||
| 185 | if (i >= 100 && !dma_is_idle(fsl_chan)) | 185 | if (i >= 100 && !dma_is_idle(chan)) |
| 186 | dev_err(fsl_chan->dev, "DMA halt timeout!\n"); | 186 | dev_err(chan->dev, "DMA halt timeout!\n"); |
| 187 | } | 187 | } |
| 188 | 188 | ||
| 189 | static void set_ld_eol(struct fsldma_chan *fsl_chan, | 189 | static void set_ld_eol(struct fsldma_chan *chan, |
| 190 | struct fsl_desc_sw *desc) | 190 | struct fsl_desc_sw *desc) |
| 191 | { | 191 | { |
| 192 | u64 snoop_bits; | 192 | u64 snoop_bits; |
| 193 | 193 | ||
| 194 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) | 194 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) |
| 195 | ? FSL_DMA_SNEN : 0; | 195 | ? FSL_DMA_SNEN : 0; |
| 196 | 196 | ||
| 197 | desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, | 197 | desc->hw.next_ln_addr = CPU_TO_DMA(chan, |
| 198 | DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL | 198 | DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL |
| 199 | | snoop_bits, 64); | 199 | | snoop_bits, 64); |
| 200 | } | 200 | } |
| 201 | 201 | ||
| 202 | static void append_ld_queue(struct fsldma_chan *fsl_chan, | 202 | static void append_ld_queue(struct fsldma_chan *chan, |
| 203 | struct fsl_desc_sw *new_desc) | 203 | struct fsl_desc_sw *new_desc) |
| 204 | { | 204 | { |
| 205 | struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev); | 205 | struct fsl_desc_sw *queue_tail = to_fsl_desc(chan->ld_queue.prev); |
| 206 | 206 | ||
| 207 | if (list_empty(&fsl_chan->ld_queue)) | 207 | if (list_empty(&chan->ld_queue)) |
| 208 | return; | 208 | return; |
| 209 | 209 | ||
| 210 | /* Link to the new descriptor physical address and | 210 | /* Link to the new descriptor physical address and |
| @@ -214,15 +214,15 @@ static void append_ld_queue(struct fsldma_chan *fsl_chan, | |||
| 214 | * | 214 | * |
| 215 | * For FSL_DMA_IP_83xx, the snoop enable bit need be set. | 215 | * For FSL_DMA_IP_83xx, the snoop enable bit need be set. |
| 216 | */ | 216 | */ |
| 217 | queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, | 217 | queue_tail->hw.next_ln_addr = CPU_TO_DMA(chan, |
| 218 | new_desc->async_tx.phys | FSL_DMA_EOSIE | | 218 | new_desc->async_tx.phys | FSL_DMA_EOSIE | |
| 219 | (((fsl_chan->feature & FSL_DMA_IP_MASK) | 219 | (((chan->feature & FSL_DMA_IP_MASK) |
| 220 | == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64); | 220 | == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64); |
| 221 | } | 221 | } |
| 222 | 222 | ||
| 223 | /** | 223 | /** |
| 224 | * fsl_chan_set_src_loop_size - Set source address hold transfer size | 224 | * fsl_chan_set_src_loop_size - Set source address hold transfer size |
| 225 | * @fsl_chan : Freescale DMA channel | 225 | * @chan : Freescale DMA channel |
| 226 | * @size : Address loop size, 0 for disable loop | 226 | * @size : Address loop size, 0 for disable loop |
| 227 | * | 227 | * |
| 228 | * The set source address hold transfer size. The source | 228 | * The set source address hold transfer size. The source |
| @@ -231,11 +231,11 @@ static void append_ld_queue(struct fsldma_chan *fsl_chan, | |||
| 231 | * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, | 231 | * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, |
| 232 | * SA + 1 ... and so on. | 232 | * SA + 1 ... and so on. |
| 233 | */ | 233 | */ |
| 234 | static void fsl_chan_set_src_loop_size(struct fsldma_chan *fsl_chan, int size) | 234 | static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size) |
| 235 | { | 235 | { |
| 236 | u32 mode; | 236 | u32 mode; |
| 237 | 237 | ||
| 238 | mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32); | 238 | mode = DMA_IN(chan, &chan->regs->mr, 32); |
| 239 | 239 | ||
| 240 | switch (size) { | 240 | switch (size) { |
| 241 | case 0: | 241 | case 0: |
| @@ -249,12 +249,12 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *fsl_chan, int size) | |||
| 249 | break; | 249 | break; |
| 250 | } | 250 | } |
| 251 | 251 | ||
| 252 | DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32); | 252 | DMA_OUT(chan, &chan->regs->mr, mode, 32); |
| 253 | } | 253 | } |
| 254 | 254 | ||
| 255 | /** | 255 | /** |
| 256 | * fsl_chan_set_dst_loop_size - Set destination address hold transfer size | 256 | * fsl_chan_set_dst_loop_size - Set destination address hold transfer size |
| 257 | * @fsl_chan : Freescale DMA channel | 257 | * @chan : Freescale DMA channel |
| 258 | * @size : Address loop size, 0 for disable loop | 258 | * @size : Address loop size, 0 for disable loop |
| 259 | * | 259 | * |
| 260 | * The set destination address hold transfer size. The destination | 260 | * The set destination address hold transfer size. The destination |
| @@ -263,11 +263,11 @@ static void fsl_chan_set_src_loop_size(struct fsldma_chan *fsl_chan, int size) | |||
| 263 | * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, | 263 | * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, |
| 264 | * TA + 1 ... and so on. | 264 | * TA + 1 ... and so on. |
| 265 | */ | 265 | */ |
| 266 | static void fsl_chan_set_dst_loop_size(struct fsldma_chan *fsl_chan, int size) | 266 | static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size) |
| 267 | { | 267 | { |
| 268 | u32 mode; | 268 | u32 mode; |
| 269 | 269 | ||
| 270 | mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32); | 270 | mode = DMA_IN(chan, &chan->regs->mr, 32); |
| 271 | 271 | ||
| 272 | switch (size) { | 272 | switch (size) { |
| 273 | case 0: | 273 | case 0: |
| @@ -281,12 +281,12 @@ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *fsl_chan, int size) | |||
| 281 | break; | 281 | break; |
| 282 | } | 282 | } |
| 283 | 283 | ||
| 284 | DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32); | 284 | DMA_OUT(chan, &chan->regs->mr, mode, 32); |
| 285 | } | 285 | } |
| 286 | 286 | ||
| 287 | /** | 287 | /** |
| 288 | * fsl_chan_set_request_count - Set DMA Request Count for external control | 288 | * fsl_chan_set_request_count - Set DMA Request Count for external control |
| 289 | * @fsl_chan : Freescale DMA channel | 289 | * @chan : Freescale DMA channel |
| 290 | * @size : Number of bytes to transfer in a single request | 290 | * @size : Number of bytes to transfer in a single request |
| 291 | * | 291 | * |
| 292 | * The Freescale DMA channel can be controlled by the external signal DREQ#. | 292 | * The Freescale DMA channel can be controlled by the external signal DREQ#. |
| @@ -296,38 +296,38 @@ static void fsl_chan_set_dst_loop_size(struct fsldma_chan *fsl_chan, int size) | |||
| 296 | * | 296 | * |
| 297 | * A size of 0 disables external pause control. The maximum size is 1024. | 297 | * A size of 0 disables external pause control. The maximum size is 1024. |
| 298 | */ | 298 | */ |
| 299 | static void fsl_chan_set_request_count(struct fsldma_chan *fsl_chan, int size) | 299 | static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size) |
| 300 | { | 300 | { |
| 301 | u32 mode; | 301 | u32 mode; |
| 302 | 302 | ||
| 303 | BUG_ON(size > 1024); | 303 | BUG_ON(size > 1024); |
| 304 | 304 | ||
| 305 | mode = DMA_IN(fsl_chan, &fsl_chan->regs->mr, 32); | 305 | mode = DMA_IN(chan, &chan->regs->mr, 32); |
| 306 | mode |= (__ilog2(size) << 24) & 0x0f000000; | 306 | mode |= (__ilog2(size) << 24) & 0x0f000000; |
| 307 | 307 | ||
| 308 | DMA_OUT(fsl_chan, &fsl_chan->regs->mr, mode, 32); | 308 | DMA_OUT(chan, &chan->regs->mr, mode, 32); |
| 309 | } | 309 | } |
| 310 | 310 | ||
| 311 | /** | 311 | /** |
| 312 | * fsl_chan_toggle_ext_pause - Toggle channel external pause status | 312 | * fsl_chan_toggle_ext_pause - Toggle channel external pause status |
| 313 | * @fsl_chan : Freescale DMA channel | 313 | * @chan : Freescale DMA channel |
| 314 | * @enable : 0 is disabled, 1 is enabled. | 314 | * @enable : 0 is disabled, 1 is enabled. |
| 315 | * | 315 | * |
| 316 | * The Freescale DMA channel can be controlled by the external signal DREQ#. | 316 | * The Freescale DMA channel can be controlled by the external signal DREQ#. |
| 317 | * The DMA Request Count feature should be used in addition to this feature | 317 | * The DMA Request Count feature should be used in addition to this feature |
| 318 | * to set the number of bytes to transfer before pausing the channel. | 318 | * to set the number of bytes to transfer before pausing the channel. |
| 319 | */ | 319 | */ |
| 320 | static void fsl_chan_toggle_ext_pause(struct fsldma_chan *fsl_chan, int enable) | 320 | static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable) |
| 321 | { | 321 | { |
| 322 | if (enable) | 322 | if (enable) |
| 323 | fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; | 323 | chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; |
| 324 | else | 324 | else |
| 325 | fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; | 325 | chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; |
| 326 | } | 326 | } |
| 327 | 327 | ||
| 328 | /** | 328 | /** |
| 329 | * fsl_chan_toggle_ext_start - Toggle channel external start status | 329 | * fsl_chan_toggle_ext_start - Toggle channel external start status |
| 330 | * @fsl_chan : Freescale DMA channel | 330 | * @chan : Freescale DMA channel |
| 331 | * @enable : 0 is disabled, 1 is enabled. | 331 | * @enable : 0 is disabled, 1 is enabled. |
| 332 | * | 332 | * |
| 333 | * If enable the external start, the channel can be started by an | 333 | * If enable the external start, the channel can be started by an |
| @@ -335,26 +335,26 @@ static void fsl_chan_toggle_ext_pause(struct fsldma_chan *fsl_chan, int enable) | |||
| 335 | * transfer immediately. The DMA channel will wait for the | 335 | * transfer immediately. The DMA channel will wait for the |
| 336 | * control pin asserted. | 336 | * control pin asserted. |
| 337 | */ | 337 | */ |
| 338 | static void fsl_chan_toggle_ext_start(struct fsldma_chan *fsl_chan, int enable) | 338 | static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable) |
| 339 | { | 339 | { |
| 340 | if (enable) | 340 | if (enable) |
| 341 | fsl_chan->feature |= FSL_DMA_CHAN_START_EXT; | 341 | chan->feature |= FSL_DMA_CHAN_START_EXT; |
| 342 | else | 342 | else |
| 343 | fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT; | 343 | chan->feature &= ~FSL_DMA_CHAN_START_EXT; |
| 344 | } | 344 | } |
| 345 | 345 | ||
| 346 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | 346 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
| 347 | { | 347 | { |
| 348 | struct fsldma_chan *fsl_chan = to_fsl_chan(tx->chan); | 348 | struct fsldma_chan *chan = to_fsl_chan(tx->chan); |
| 349 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); | 349 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); |
| 350 | struct fsl_desc_sw *child; | 350 | struct fsl_desc_sw *child; |
| 351 | unsigned long flags; | 351 | unsigned long flags; |
| 352 | dma_cookie_t cookie; | 352 | dma_cookie_t cookie; |
| 353 | 353 | ||
| 354 | /* cookie increment and adding to ld_queue must be atomic */ | 354 | /* cookie increment and adding to ld_queue must be atomic */ |
| 355 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | 355 | spin_lock_irqsave(&chan->desc_lock, flags); |
| 356 | 356 | ||
| 357 | cookie = fsl_chan->common.cookie; | 357 | cookie = chan->common.cookie; |
| 358 | list_for_each_entry(child, &desc->tx_list, node) { | 358 | list_for_each_entry(child, &desc->tx_list, node) { |
| 359 | cookie++; | 359 | cookie++; |
| 360 | if (cookie < 0) | 360 | if (cookie < 0) |
| @@ -363,33 +363,33 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
| 363 | desc->async_tx.cookie = cookie; | 363 | desc->async_tx.cookie = cookie; |
| 364 | } | 364 | } |
| 365 | 365 | ||
| 366 | fsl_chan->common.cookie = cookie; | 366 | chan->common.cookie = cookie; |
| 367 | append_ld_queue(fsl_chan, desc); | 367 | append_ld_queue(chan, desc); |
| 368 | list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev); | 368 | list_splice_init(&desc->tx_list, chan->ld_queue.prev); |
| 369 | 369 | ||
| 370 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | 370 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
| 371 | 371 | ||
| 372 | return cookie; | 372 | return cookie; |
| 373 | } | 373 | } |
| 374 | 374 | ||
| 375 | /** | 375 | /** |
| 376 | * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. | 376 | * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. |
| 377 | * @fsl_chan : Freescale DMA channel | 377 | * @chan : Freescale DMA channel |
| 378 | * | 378 | * |
| 379 | * Return - The descriptor allocated. NULL for failed. | 379 | * Return - The descriptor allocated. NULL for failed. |
| 380 | */ | 380 | */ |
| 381 | static struct fsl_desc_sw *fsl_dma_alloc_descriptor( | 381 | static struct fsl_desc_sw *fsl_dma_alloc_descriptor( |
| 382 | struct fsldma_chan *fsl_chan) | 382 | struct fsldma_chan *chan) |
| 383 | { | 383 | { |
| 384 | dma_addr_t pdesc; | 384 | dma_addr_t pdesc; |
| 385 | struct fsl_desc_sw *desc_sw; | 385 | struct fsl_desc_sw *desc_sw; |
| 386 | 386 | ||
| 387 | desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); | 387 | desc_sw = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); |
| 388 | if (desc_sw) { | 388 | if (desc_sw) { |
| 389 | memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); | 389 | memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); |
| 390 | INIT_LIST_HEAD(&desc_sw->tx_list); | 390 | INIT_LIST_HEAD(&desc_sw->tx_list); |
| 391 | dma_async_tx_descriptor_init(&desc_sw->async_tx, | 391 | dma_async_tx_descriptor_init(&desc_sw->async_tx, |
| 392 | &fsl_chan->common); | 392 | &chan->common); |
| 393 | desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; | 393 | desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; |
| 394 | desc_sw->async_tx.phys = pdesc; | 394 | desc_sw->async_tx.phys = pdesc; |
| 395 | } | 395 | } |
| @@ -400,29 +400,29 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( | |||
| 400 | 400 | ||
| 401 | /** | 401 | /** |
| 402 | * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. | 402 | * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. |
| 403 | * @fsl_chan : Freescale DMA channel | 403 | * @chan : Freescale DMA channel |
| 404 | * | 404 | * |
| 405 | * This function will create a dma pool for descriptor allocation. | 405 | * This function will create a dma pool for descriptor allocation. |
| 406 | * | 406 | * |
| 407 | * Return - The number of descriptors allocated. | 407 | * Return - The number of descriptors allocated. |
| 408 | */ | 408 | */ |
| 409 | static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) | 409 | static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan) |
| 410 | { | 410 | { |
| 411 | struct fsldma_chan *fsl_chan = to_fsl_chan(chan); | 411 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
| 412 | 412 | ||
| 413 | /* Has this channel already been allocated? */ | 413 | /* Has this channel already been allocated? */ |
| 414 | if (fsl_chan->desc_pool) | 414 | if (chan->desc_pool) |
| 415 | return 1; | 415 | return 1; |
| 416 | 416 | ||
| 417 | /* We need the descriptor to be aligned to 32bytes | 417 | /* We need the descriptor to be aligned to 32bytes |
| 418 | * for meeting FSL DMA specification requirement. | 418 | * for meeting FSL DMA specification requirement. |
| 419 | */ | 419 | */ |
| 420 | fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", | 420 | chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", |
| 421 | fsl_chan->dev, sizeof(struct fsl_desc_sw), | 421 | chan->dev, sizeof(struct fsl_desc_sw), |
| 422 | 32, 0); | 422 | 32, 0); |
| 423 | if (!fsl_chan->desc_pool) { | 423 | if (!chan->desc_pool) { |
| 424 | dev_err(fsl_chan->dev, "No memory for channel %d " | 424 | dev_err(chan->dev, "No memory for channel %d " |
| 425 | "descriptor dma pool.\n", fsl_chan->id); | 425 | "descriptor dma pool.\n", chan->id); |
| 426 | return 0; | 426 | return 0; |
| 427 | } | 427 | } |
| 428 | 428 | ||
| @@ -431,45 +431,45 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) | |||
| 431 | 431 | ||
| 432 | /** | 432 | /** |
| 433 | * fsl_dma_free_chan_resources - Free all resources of the channel. | 433 | * fsl_dma_free_chan_resources - Free all resources of the channel. |
| 434 | * @fsl_chan : Freescale DMA channel | 434 | * @chan : Freescale DMA channel |
| 435 | */ | 435 | */ |
| 436 | static void fsl_dma_free_chan_resources(struct dma_chan *chan) | 436 | static void fsl_dma_free_chan_resources(struct dma_chan *dchan) |
| 437 | { | 437 | { |
| 438 | struct fsldma_chan *fsl_chan = to_fsl_chan(chan); | 438 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
| 439 | struct fsl_desc_sw *desc, *_desc; | 439 | struct fsl_desc_sw *desc, *_desc; |
| 440 | unsigned long flags; | 440 | unsigned long flags; |
| 441 | 441 | ||
| 442 | dev_dbg(fsl_chan->dev, "Free all channel resources.\n"); | 442 | dev_dbg(chan->dev, "Free all channel resources.\n"); |
| 443 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | 443 | spin_lock_irqsave(&chan->desc_lock, flags); |
| 444 | list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { | 444 | list_for_each_entry_safe(desc, _desc, &chan->ld_queue, node) { |
| 445 | #ifdef FSL_DMA_LD_DEBUG | 445 | #ifdef FSL_DMA_LD_DEBUG |
| 446 | dev_dbg(fsl_chan->dev, | 446 | dev_dbg(chan->dev, |
| 447 | "LD %p will be released.\n", desc); | 447 | "LD %p will be released.\n", desc); |
| 448 | #endif | 448 | #endif |
| 449 | list_del(&desc->node); | 449 | list_del(&desc->node); |
| 450 | /* free link descriptor */ | 450 | /* free link descriptor */ |
| 451 | dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); | 451 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); |
| 452 | } | 452 | } |
| 453 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | 453 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
| 454 | dma_pool_destroy(fsl_chan->desc_pool); | 454 | dma_pool_destroy(chan->desc_pool); |
| 455 | 455 | ||
| 456 | fsl_chan->desc_pool = NULL; | 456 | chan->desc_pool = NULL; |
| 457 | } | 457 | } |
| 458 | 458 | ||
| 459 | static struct dma_async_tx_descriptor * | 459 | static struct dma_async_tx_descriptor * |
| 460 | fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) | 460 | fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) |
| 461 | { | 461 | { |
| 462 | struct fsldma_chan *fsl_chan; | 462 | struct fsldma_chan *chan; |
| 463 | struct fsl_desc_sw *new; | 463 | struct fsl_desc_sw *new; |
| 464 | 464 | ||
| 465 | if (!chan) | 465 | if (!dchan) |
| 466 | return NULL; | 466 | return NULL; |
| 467 | 467 | ||
| 468 | fsl_chan = to_fsl_chan(chan); | 468 | chan = to_fsl_chan(dchan); |
| 469 | 469 | ||
| 470 | new = fsl_dma_alloc_descriptor(fsl_chan); | 470 | new = fsl_dma_alloc_descriptor(chan); |
| 471 | if (!new) { | 471 | if (!new) { |
| 472 | dev_err(fsl_chan->dev, "No free memory for link descriptor\n"); | 472 | dev_err(chan->dev, "No free memory for link descriptor\n"); |
| 473 | return NULL; | 473 | return NULL; |
| 474 | } | 474 | } |
| 475 | 475 | ||
| @@ -480,51 +480,51 @@ fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) | |||
| 480 | list_add_tail(&new->node, &new->tx_list); | 480 | list_add_tail(&new->node, &new->tx_list); |
| 481 | 481 | ||
| 482 | /* Set End-of-link to the last link descriptor of new list*/ | 482 | /* Set End-of-link to the last link descriptor of new list*/ |
| 483 | set_ld_eol(fsl_chan, new); | 483 | set_ld_eol(chan, new); |
| 484 | 484 | ||
| 485 | return &new->async_tx; | 485 | return &new->async_tx; |
| 486 | } | 486 | } |
| 487 | 487 | ||
| 488 | static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | 488 | static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( |
| 489 | struct dma_chan *chan, dma_addr_t dma_dst, dma_addr_t dma_src, | 489 | struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src, |
| 490 | size_t len, unsigned long flags) | 490 | size_t len, unsigned long flags) |
| 491 | { | 491 | { |
| 492 | struct fsldma_chan *fsl_chan; | 492 | struct fsldma_chan *chan; |
| 493 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new; | 493 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new; |
| 494 | struct list_head *list; | 494 | struct list_head *list; |
| 495 | size_t copy; | 495 | size_t copy; |
| 496 | 496 | ||
| 497 | if (!chan) | 497 | if (!dchan) |
| 498 | return NULL; | 498 | return NULL; |
| 499 | 499 | ||
| 500 | if (!len) | 500 | if (!len) |
| 501 | return NULL; | 501 | return NULL; |
| 502 | 502 | ||
| 503 | fsl_chan = to_fsl_chan(chan); | 503 | chan = to_fsl_chan(dchan); |
| 504 | 504 | ||
| 505 | do { | 505 | do { |
| 506 | 506 | ||
| 507 | /* Allocate the link descriptor from DMA pool */ | 507 | /* Allocate the link descriptor from DMA pool */ |
| 508 | new = fsl_dma_alloc_descriptor(fsl_chan); | 508 | new = fsl_dma_alloc_descriptor(chan); |
| 509 | if (!new) { | 509 | if (!new) { |
| 510 | dev_err(fsl_chan->dev, | 510 | dev_err(chan->dev, |
| 511 | "No free memory for link descriptor\n"); | 511 | "No free memory for link descriptor\n"); |
| 512 | goto fail; | 512 | goto fail; |
| 513 | } | 513 | } |
| 514 | #ifdef FSL_DMA_LD_DEBUG | 514 | #ifdef FSL_DMA_LD_DEBUG |
| 515 | dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); | 515 | dev_dbg(chan->dev, "new link desc alloc %p\n", new); |
| 516 | #endif | 516 | #endif |
| 517 | 517 | ||
| 518 | copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); | 518 | copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); |
| 519 | 519 | ||
| 520 | set_desc_cnt(fsl_chan, &new->hw, copy); | 520 | set_desc_cnt(chan, &new->hw, copy); |
| 521 | set_desc_src(fsl_chan, &new->hw, dma_src); | 521 | set_desc_src(chan, &new->hw, dma_src); |
| 522 | set_desc_dst(fsl_chan, &new->hw, dma_dst); | 522 | set_desc_dst(chan, &new->hw, dma_dst); |
| 523 | 523 | ||
| 524 | if (!first) | 524 | if (!first) |
| 525 | first = new; | 525 | first = new; |
| 526 | else | 526 | else |
| 527 | set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys); | 527 | set_desc_next(chan, &prev->hw, new->async_tx.phys); |
| 528 | 528 | ||
| 529 | new->async_tx.cookie = 0; | 529 | new->async_tx.cookie = 0; |
| 530 | async_tx_ack(&new->async_tx); | 530 | async_tx_ack(&new->async_tx); |
| @@ -542,7 +542,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | |||
| 542 | new->async_tx.cookie = -EBUSY; | 542 | new->async_tx.cookie = -EBUSY; |
| 543 | 543 | ||
| 544 | /* Set End-of-link to the last link descriptor of new list*/ | 544 | /* Set End-of-link to the last link descriptor of new list*/ |
| 545 | set_ld_eol(fsl_chan, new); | 545 | set_ld_eol(chan, new); |
| 546 | 546 | ||
| 547 | return &first->async_tx; | 547 | return &first->async_tx; |
| 548 | 548 | ||
| @@ -553,7 +553,7 @@ fail: | |||
| 553 | list = &first->tx_list; | 553 | list = &first->tx_list; |
| 554 | list_for_each_entry_safe_reverse(new, prev, list, node) { | 554 | list_for_each_entry_safe_reverse(new, prev, list, node) { |
| 555 | list_del(&new->node); | 555 | list_del(&new->node); |
| 556 | dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); | 556 | dma_pool_free(chan->desc_pool, new, new->async_tx.phys); |
| 557 | } | 557 | } |
| 558 | 558 | ||
| 559 | return NULL; | 559 | return NULL; |
| @@ -572,10 +572,10 @@ fail: | |||
| 572 | * chan->private variable. | 572 | * chan->private variable. |
| 573 | */ | 573 | */ |
| 574 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | 574 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( |
| 575 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | 575 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, |
| 576 | enum dma_data_direction direction, unsigned long flags) | 576 | enum dma_data_direction direction, unsigned long flags) |
| 577 | { | 577 | { |
| 578 | struct fsldma_chan *fsl_chan; | 578 | struct fsldma_chan *chan; |
| 579 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; | 579 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; |
| 580 | struct fsl_dma_slave *slave; | 580 | struct fsl_dma_slave *slave; |
| 581 | struct list_head *tx_list; | 581 | struct list_head *tx_list; |
| @@ -588,14 +588,14 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | |||
| 588 | struct fsl_dma_hw_addr *hw; | 588 | struct fsl_dma_hw_addr *hw; |
| 589 | dma_addr_t dma_dst, dma_src; | 589 | dma_addr_t dma_dst, dma_src; |
| 590 | 590 | ||
| 591 | if (!chan) | 591 | if (!dchan) |
| 592 | return NULL; | 592 | return NULL; |
| 593 | 593 | ||
| 594 | if (!chan->private) | 594 | if (!dchan->private) |
| 595 | return NULL; | 595 | return NULL; |
| 596 | 596 | ||
| 597 | fsl_chan = to_fsl_chan(chan); | 597 | chan = to_fsl_chan(dchan); |
| 598 | slave = chan->private; | 598 | slave = dchan->private; |
| 599 | 599 | ||
| 600 | if (list_empty(&slave->addresses)) | 600 | if (list_empty(&slave->addresses)) |
| 601 | return NULL; | 601 | return NULL; |
| @@ -644,14 +644,14 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | |||
| 644 | } | 644 | } |
| 645 | 645 | ||
| 646 | /* Allocate the link descriptor from DMA pool */ | 646 | /* Allocate the link descriptor from DMA pool */ |
| 647 | new = fsl_dma_alloc_descriptor(fsl_chan); | 647 | new = fsl_dma_alloc_descriptor(chan); |
| 648 | if (!new) { | 648 | if (!new) { |
| 649 | dev_err(fsl_chan->dev, "No free memory for " | 649 | dev_err(chan->dev, "No free memory for " |
| 650 | "link descriptor\n"); | 650 | "link descriptor\n"); |
| 651 | goto fail; | 651 | goto fail; |
| 652 | } | 652 | } |
| 653 | #ifdef FSL_DMA_LD_DEBUG | 653 | #ifdef FSL_DMA_LD_DEBUG |
| 654 | dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); | 654 | dev_dbg(chan->dev, "new link desc alloc %p\n", new); |
| 655 | #endif | 655 | #endif |
| 656 | 656 | ||
| 657 | /* | 657 | /* |
| @@ -678,9 +678,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | |||
| 678 | } | 678 | } |
| 679 | 679 | ||
| 680 | /* Fill in the descriptor */ | 680 | /* Fill in the descriptor */ |
| 681 | set_desc_cnt(fsl_chan, &new->hw, copy); | 681 | set_desc_cnt(chan, &new->hw, copy); |
| 682 | set_desc_src(fsl_chan, &new->hw, dma_src); | 682 | set_desc_src(chan, &new->hw, dma_src); |
| 683 | set_desc_dst(fsl_chan, &new->hw, dma_dst); | 683 | set_desc_dst(chan, &new->hw, dma_dst); |
| 684 | 684 | ||
| 685 | /* | 685 | /* |
| 686 | * If this is not the first descriptor, chain the | 686 | * If this is not the first descriptor, chain the |
| @@ -689,7 +689,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | |||
| 689 | if (!first) { | 689 | if (!first) { |
| 690 | first = new; | 690 | first = new; |
| 691 | } else { | 691 | } else { |
| 692 | set_desc_next(fsl_chan, &prev->hw, | 692 | set_desc_next(chan, &prev->hw, |
| 693 | new->async_tx.phys); | 693 | new->async_tx.phys); |
| 694 | } | 694 | } |
| 695 | 695 | ||
| @@ -715,23 +715,23 @@ finished: | |||
| 715 | new->async_tx.cookie = -EBUSY; | 715 | new->async_tx.cookie = -EBUSY; |
| 716 | 716 | ||
| 717 | /* Set End-of-link to the last link descriptor of new list */ | 717 | /* Set End-of-link to the last link descriptor of new list */ |
| 718 | set_ld_eol(fsl_chan, new); | 718 | set_ld_eol(chan, new); |
| 719 | 719 | ||
| 720 | /* Enable extra controller features */ | 720 | /* Enable extra controller features */ |
| 721 | if (fsl_chan->set_src_loop_size) | 721 | if (chan->set_src_loop_size) |
| 722 | fsl_chan->set_src_loop_size(fsl_chan, slave->src_loop_size); | 722 | chan->set_src_loop_size(chan, slave->src_loop_size); |
| 723 | 723 | ||
| 724 | if (fsl_chan->set_dst_loop_size) | 724 | if (chan->set_dst_loop_size) |
| 725 | fsl_chan->set_dst_loop_size(fsl_chan, slave->dst_loop_size); | 725 | chan->set_dst_loop_size(chan, slave->dst_loop_size); |
| 726 | 726 | ||
| 727 | if (fsl_chan->toggle_ext_start) | 727 | if (chan->toggle_ext_start) |
| 728 | fsl_chan->toggle_ext_start(fsl_chan, slave->external_start); | 728 | chan->toggle_ext_start(chan, slave->external_start); |
| 729 | 729 | ||
| 730 | if (fsl_chan->toggle_ext_pause) | 730 | if (chan->toggle_ext_pause) |
| 731 | fsl_chan->toggle_ext_pause(fsl_chan, slave->external_pause); | 731 | chan->toggle_ext_pause(chan, slave->external_pause); |
| 732 | 732 | ||
| 733 | if (fsl_chan->set_request_count) | 733 | if (chan->set_request_count) |
| 734 | fsl_chan->set_request_count(fsl_chan, slave->request_count); | 734 | chan->set_request_count(chan, slave->request_count); |
| 735 | 735 | ||
| 736 | return &first->async_tx; | 736 | return &first->async_tx; |
| 737 | 737 | ||
| @@ -751,62 +751,62 @@ fail: | |||
| 751 | tx_list = &first->tx_list; | 751 | tx_list = &first->tx_list; |
| 752 | list_for_each_entry_safe_reverse(new, prev, tx_list, node) { | 752 | list_for_each_entry_safe_reverse(new, prev, tx_list, node) { |
| 753 | list_del_init(&new->node); | 753 | list_del_init(&new->node); |
| 754 | dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); | 754 | dma_pool_free(chan->desc_pool, new, new->async_tx.phys); |
| 755 | } | 755 | } |
| 756 | 756 | ||
| 757 | return NULL; | 757 | return NULL; |
| 758 | } | 758 | } |
| 759 | 759 | ||
| 760 | static void fsl_dma_device_terminate_all(struct dma_chan *chan) | 760 | static void fsl_dma_device_terminate_all(struct dma_chan *dchan) |
| 761 | { | 761 | { |
| 762 | struct fsldma_chan *fsl_chan; | 762 | struct fsldma_chan *chan; |
| 763 | struct fsl_desc_sw *desc, *tmp; | 763 | struct fsl_desc_sw *desc, *tmp; |
| 764 | unsigned long flags; | 764 | unsigned long flags; |
| 765 | 765 | ||
| 766 | if (!chan) | 766 | if (!dchan) |
| 767 | return; | 767 | return; |
| 768 | 768 | ||
| 769 | fsl_chan = to_fsl_chan(chan); | 769 | chan = to_fsl_chan(dchan); |
| 770 | 770 | ||
| 771 | /* Halt the DMA engine */ | 771 | /* Halt the DMA engine */ |
| 772 | dma_halt(fsl_chan); | 772 | dma_halt(chan); |
| 773 | 773 | ||
| 774 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | 774 | spin_lock_irqsave(&chan->desc_lock, flags); |
| 775 | 775 | ||
| 776 | /* Remove and free all of the descriptors in the LD queue */ | 776 | /* Remove and free all of the descriptors in the LD queue */ |
| 777 | list_for_each_entry_safe(desc, tmp, &fsl_chan->ld_queue, node) { | 777 | list_for_each_entry_safe(desc, tmp, &chan->ld_queue, node) { |
| 778 | list_del(&desc->node); | 778 | list_del(&desc->node); |
| 779 | dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); | 779 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); |
| 780 | } | 780 | } |
| 781 | 781 | ||
| 782 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | 782 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
| 783 | } | 783 | } |
| 784 | 784 | ||
| 785 | /** | 785 | /** |
| 786 | * fsl_dma_update_completed_cookie - Update the completed cookie. | 786 | * fsl_dma_update_completed_cookie - Update the completed cookie. |
| 787 | * @fsl_chan : Freescale DMA channel | 787 | * @chan : Freescale DMA channel |
| 788 | */ | 788 | */ |
| 789 | static void fsl_dma_update_completed_cookie(struct fsldma_chan *fsl_chan) | 789 | static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan) |
| 790 | { | 790 | { |
| 791 | struct fsl_desc_sw *cur_desc, *desc; | 791 | struct fsl_desc_sw *cur_desc, *desc; |
| 792 | dma_addr_t ld_phy; | 792 | dma_addr_t ld_phy; |
| 793 | 793 | ||
| 794 | ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK; | 794 | ld_phy = get_cdar(chan) & FSL_DMA_NLDA_MASK; |
| 795 | 795 | ||
| 796 | if (ld_phy) { | 796 | if (ld_phy) { |
| 797 | cur_desc = NULL; | 797 | cur_desc = NULL; |
| 798 | list_for_each_entry(desc, &fsl_chan->ld_queue, node) | 798 | list_for_each_entry(desc, &chan->ld_queue, node) |
| 799 | if (desc->async_tx.phys == ld_phy) { | 799 | if (desc->async_tx.phys == ld_phy) { |
| 800 | cur_desc = desc; | 800 | cur_desc = desc; |
| 801 | break; | 801 | break; |
| 802 | } | 802 | } |
| 803 | 803 | ||
| 804 | if (cur_desc && cur_desc->async_tx.cookie) { | 804 | if (cur_desc && cur_desc->async_tx.cookie) { |
| 805 | if (dma_is_idle(fsl_chan)) | 805 | if (dma_is_idle(chan)) |
| 806 | fsl_chan->completed_cookie = | 806 | chan->completed_cookie = |
| 807 | cur_desc->async_tx.cookie; | 807 | cur_desc->async_tx.cookie; |
| 808 | else | 808 | else |
| 809 | fsl_chan->completed_cookie = | 809 | chan->completed_cookie = |
| 810 | cur_desc->async_tx.cookie - 1; | 810 | cur_desc->async_tx.cookie - 1; |
| 811 | } | 811 | } |
| 812 | } | 812 | } |
| @@ -814,27 +814,27 @@ static void fsl_dma_update_completed_cookie(struct fsldma_chan *fsl_chan) | |||
| 814 | 814 | ||
| 815 | /** | 815 | /** |
| 816 | * fsl_chan_ld_cleanup - Clean up link descriptors | 816 | * fsl_chan_ld_cleanup - Clean up link descriptors |
| 817 | * @fsl_chan : Freescale DMA channel | 817 | * @chan : Freescale DMA channel |
| 818 | * | 818 | * |
| 819 | * This function clean up the ld_queue of DMA channel. | 819 | * This function clean up the ld_queue of DMA channel. |
| 820 | * If 'in_intr' is set, the function will move the link descriptor to | 820 | * If 'in_intr' is set, the function will move the link descriptor to |
| 821 | * the recycle list. Otherwise, free it directly. | 821 | * the recycle list. Otherwise, free it directly. |
| 822 | */ | 822 | */ |
| 823 | static void fsl_chan_ld_cleanup(struct fsldma_chan *fsl_chan) | 823 | static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) |
| 824 | { | 824 | { |
| 825 | struct fsl_desc_sw *desc, *_desc; | 825 | struct fsl_desc_sw *desc, *_desc; |
| 826 | unsigned long flags; | 826 | unsigned long flags; |
| 827 | 827 | ||
| 828 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | 828 | spin_lock_irqsave(&chan->desc_lock, flags); |
| 829 | 829 | ||
| 830 | dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n", | 830 | dev_dbg(chan->dev, "chan completed_cookie = %d\n", |
| 831 | fsl_chan->completed_cookie); | 831 | chan->completed_cookie); |
| 832 | list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { | 832 | list_for_each_entry_safe(desc, _desc, &chan->ld_queue, node) { |
| 833 | dma_async_tx_callback callback; | 833 | dma_async_tx_callback callback; |
| 834 | void *callback_param; | 834 | void *callback_param; |
| 835 | 835 | ||
| 836 | if (dma_async_is_complete(desc->async_tx.cookie, | 836 | if (dma_async_is_complete(desc->async_tx.cookie, |
| 837 | fsl_chan->completed_cookie, fsl_chan->common.cookie) | 837 | chan->completed_cookie, chan->common.cookie) |
| 838 | == DMA_IN_PROGRESS) | 838 | == DMA_IN_PROGRESS) |
| 839 | break; | 839 | break; |
| 840 | 840 | ||
| @@ -844,119 +844,119 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *fsl_chan) | |||
| 844 | /* Remove from ld_queue list */ | 844 | /* Remove from ld_queue list */ |
| 845 | list_del(&desc->node); | 845 | list_del(&desc->node); |
| 846 | 846 | ||
| 847 | dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n", | 847 | dev_dbg(chan->dev, "link descriptor %p will be recycle.\n", |
| 848 | desc); | 848 | desc); |
| 849 | dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); | 849 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); |
| 850 | 850 | ||
| 851 | /* Run the link descriptor callback function */ | 851 | /* Run the link descriptor callback function */ |
| 852 | if (callback) { | 852 | if (callback) { |
| 853 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | 853 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
| 854 | dev_dbg(fsl_chan->dev, "link descriptor %p callback\n", | 854 | dev_dbg(chan->dev, "link descriptor %p callback\n", |
| 855 | desc); | 855 | desc); |
| 856 | callback(callback_param); | 856 | callback(callback_param); |
| 857 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | 857 | spin_lock_irqsave(&chan->desc_lock, flags); |
| 858 | } | 858 | } |
| 859 | } | 859 | } |
| 860 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | 860 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
| 861 | } | 861 | } |
| 862 | 862 | ||
| 863 | /** | 863 | /** |
| 864 | * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. | 864 | * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. |
| 865 | * @fsl_chan : Freescale DMA channel | 865 | * @chan : Freescale DMA channel |
| 866 | */ | 866 | */ |
| 867 | static void fsl_chan_xfer_ld_queue(struct fsldma_chan *fsl_chan) | 867 | static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) |
| 868 | { | 868 | { |
| 869 | struct list_head *ld_node; | 869 | struct list_head *ld_node; |
| 870 | dma_addr_t next_dst_addr; | 870 | dma_addr_t next_dst_addr; |
| 871 | unsigned long flags; | 871 | unsigned long flags; |
| 872 | 872 | ||
| 873 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | 873 | spin_lock_irqsave(&chan->desc_lock, flags); |
| 874 | 874 | ||
| 875 | if (!dma_is_idle(fsl_chan)) | 875 | if (!dma_is_idle(chan)) |
| 876 | goto out_unlock; | 876 | goto out_unlock; |
| 877 | 877 | ||
| 878 | dma_halt(fsl_chan); | 878 | dma_halt(chan); |
| 879 | 879 | ||
| 880 | /* If there are some link descriptors | 880 | /* If there are some link descriptors |
| 881 | * not transfered in queue. We need to start it. | 881 | * not transfered in queue. We need to start it. |
| 882 | */ | 882 | */ |
| 883 | 883 | ||
| 884 | /* Find the first un-transfer desciptor */ | 884 | /* Find the first un-transfer desciptor */ |
| 885 | for (ld_node = fsl_chan->ld_queue.next; | 885 | for (ld_node = chan->ld_queue.next; |
| 886 | (ld_node != &fsl_chan->ld_queue) | 886 | (ld_node != &chan->ld_queue) |
| 887 | && (dma_async_is_complete( | 887 | && (dma_async_is_complete( |
| 888 | to_fsl_desc(ld_node)->async_tx.cookie, | 888 | to_fsl_desc(ld_node)->async_tx.cookie, |
| 889 | fsl_chan->completed_cookie, | 889 | chan->completed_cookie, |
| 890 | fsl_chan->common.cookie) == DMA_SUCCESS); | 890 | chan->common.cookie) == DMA_SUCCESS); |
| 891 | ld_node = ld_node->next); | 891 | ld_node = ld_node->next); |
| 892 | 892 | ||
| 893 | if (ld_node != &fsl_chan->ld_queue) { | 893 | if (ld_node != &chan->ld_queue) { |
| 894 | /* Get the ld start address from ld_queue */ | 894 | /* Get the ld start address from ld_queue */ |
| 895 | next_dst_addr = to_fsl_desc(ld_node)->async_tx.phys; | 895 | next_dst_addr = to_fsl_desc(ld_node)->async_tx.phys; |
| 896 | dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n", | 896 | dev_dbg(chan->dev, "xfer LDs staring from 0x%llx\n", |
| 897 | (unsigned long long)next_dst_addr); | 897 | (unsigned long long)next_dst_addr); |
| 898 | set_cdar(fsl_chan, next_dst_addr); | 898 | set_cdar(chan, next_dst_addr); |
| 899 | dma_start(fsl_chan); | 899 | dma_start(chan); |
| 900 | } else { | 900 | } else { |
| 901 | set_cdar(fsl_chan, 0); | 901 | set_cdar(chan, 0); |
| 902 | set_ndar(fsl_chan, 0); | 902 | set_ndar(chan, 0); |
| 903 | } | 903 | } |
| 904 | 904 | ||
| 905 | out_unlock: | 905 | out_unlock: |
| 906 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | 906 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
| 907 | } | 907 | } |
| 908 | 908 | ||
| 909 | /** | 909 | /** |
| 910 | * fsl_dma_memcpy_issue_pending - Issue the DMA start command | 910 | * fsl_dma_memcpy_issue_pending - Issue the DMA start command |
| 911 | * @fsl_chan : Freescale DMA channel | 911 | * @chan : Freescale DMA channel |
| 912 | */ | 912 | */ |
| 913 | static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan) | 913 | static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) |
| 914 | { | 914 | { |
| 915 | struct fsldma_chan *fsl_chan = to_fsl_chan(chan); | 915 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
| 916 | 916 | ||
| 917 | #ifdef FSL_DMA_LD_DEBUG | 917 | #ifdef FSL_DMA_LD_DEBUG |
| 918 | struct fsl_desc_sw *ld; | 918 | struct fsl_desc_sw *ld; |
| 919 | unsigned long flags; | 919 | unsigned long flags; |
| 920 | 920 | ||
| 921 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | 921 | spin_lock_irqsave(&chan->desc_lock, flags); |
| 922 | if (list_empty(&fsl_chan->ld_queue)) { | 922 | if (list_empty(&chan->ld_queue)) { |
| 923 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | 923 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
| 924 | return; | 924 | return; |
| 925 | } | 925 | } |
| 926 | 926 | ||
| 927 | dev_dbg(fsl_chan->dev, "--memcpy issue--\n"); | 927 | dev_dbg(chan->dev, "--memcpy issue--\n"); |
| 928 | list_for_each_entry(ld, &fsl_chan->ld_queue, node) { | 928 | list_for_each_entry(ld, &chan->ld_queue, node) { |
| 929 | int i; | 929 | int i; |
| 930 | dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n", | 930 | dev_dbg(chan->dev, "Ch %d, LD %08x\n", |
| 931 | fsl_chan->id, ld->async_tx.phys); | 931 | chan->id, ld->async_tx.phys); |
| 932 | for (i = 0; i < 8; i++) | 932 | for (i = 0; i < 8; i++) |
| 933 | dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n", | 933 | dev_dbg(chan->dev, "LD offset %d: %08x\n", |
| 934 | i, *(((u32 *)&ld->hw) + i)); | 934 | i, *(((u32 *)&ld->hw) + i)); |
| 935 | } | 935 | } |
| 936 | dev_dbg(fsl_chan->dev, "----------------\n"); | 936 | dev_dbg(chan->dev, "----------------\n"); |
| 937 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | 937 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
| 938 | #endif | 938 | #endif |
| 939 | 939 | ||
| 940 | fsl_chan_xfer_ld_queue(fsl_chan); | 940 | fsl_chan_xfer_ld_queue(chan); |
| 941 | } | 941 | } |
| 942 | 942 | ||
| 943 | /** | 943 | /** |
| 944 | * fsl_dma_is_complete - Determine the DMA status | 944 | * fsl_dma_is_complete - Determine the DMA status |
| 945 | * @fsl_chan : Freescale DMA channel | 945 | * @chan : Freescale DMA channel |
| 946 | */ | 946 | */ |
| 947 | static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, | 947 | static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan, |
| 948 | dma_cookie_t cookie, | 948 | dma_cookie_t cookie, |
| 949 | dma_cookie_t *done, | 949 | dma_cookie_t *done, |
| 950 | dma_cookie_t *used) | 950 | dma_cookie_t *used) |
| 951 | { | 951 | { |
| 952 | struct fsldma_chan *fsl_chan = to_fsl_chan(chan); | 952 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
| 953 | dma_cookie_t last_used; | 953 | dma_cookie_t last_used; |
| 954 | dma_cookie_t last_complete; | 954 | dma_cookie_t last_complete; |
| 955 | 955 | ||
| 956 | fsl_chan_ld_cleanup(fsl_chan); | 956 | fsl_chan_ld_cleanup(chan); |
| 957 | 957 | ||
| 958 | last_used = chan->cookie; | 958 | last_used = dchan->cookie; |
| 959 | last_complete = fsl_chan->completed_cookie; | 959 | last_complete = chan->completed_cookie; |
| 960 | 960 | ||
| 961 | if (done) | 961 | if (done) |
| 962 | *done = last_complete; | 962 | *done = last_complete; |
| @@ -973,30 +973,30 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, | |||
| 973 | 973 | ||
| 974 | static irqreturn_t fsldma_chan_irq(int irq, void *data) | 974 | static irqreturn_t fsldma_chan_irq(int irq, void *data) |
| 975 | { | 975 | { |
| 976 | struct fsldma_chan *fsl_chan = data; | 976 | struct fsldma_chan *chan = data; |
| 977 | u32 stat; | ||
| 978 | int update_cookie = 0; | 977 | int update_cookie = 0; |
| 979 | int xfer_ld_q = 0; | 978 | int xfer_ld_q = 0; |
| 979 | u32 stat; | ||
| 980 | 980 | ||
| 981 | stat = get_sr(fsl_chan); | 981 | stat = get_sr(chan); |
| 982 | dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", | 982 | dev_dbg(chan->dev, "event: channel %d, stat = 0x%x\n", |
| 983 | fsl_chan->id, stat); | 983 | chan->id, stat); |
| 984 | set_sr(fsl_chan, stat); /* Clear the event register */ | 984 | set_sr(chan, stat); /* Clear the event register */ |
| 985 | 985 | ||
| 986 | stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); | 986 | stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); |
| 987 | if (!stat) | 987 | if (!stat) |
| 988 | return IRQ_NONE; | 988 | return IRQ_NONE; |
| 989 | 989 | ||
| 990 | if (stat & FSL_DMA_SR_TE) | 990 | if (stat & FSL_DMA_SR_TE) |
| 991 | dev_err(fsl_chan->dev, "Transfer Error!\n"); | 991 | dev_err(chan->dev, "Transfer Error!\n"); |
| 992 | 992 | ||
| 993 | /* Programming Error | 993 | /* Programming Error |
| 994 | * The DMA_INTERRUPT async_tx is a NULL transfer, which will | 994 | * The DMA_INTERRUPT async_tx is a NULL transfer, which will |
| 995 | * triger a PE interrupt. | 995 | * triger a PE interrupt. |
| 996 | */ | 996 | */ |
| 997 | if (stat & FSL_DMA_SR_PE) { | 997 | if (stat & FSL_DMA_SR_PE) { |
| 998 | dev_dbg(fsl_chan->dev, "event: Programming Error INT\n"); | 998 | dev_dbg(chan->dev, "event: Programming Error INT\n"); |
| 999 | if (get_bcr(fsl_chan) == 0) { | 999 | if (get_bcr(chan) == 0) { |
| 1000 | /* BCR register is 0, this is a DMA_INTERRUPT async_tx. | 1000 | /* BCR register is 0, this is a DMA_INTERRUPT async_tx. |
| 1001 | * Now, update the completed cookie, and continue the | 1001 | * Now, update the completed cookie, and continue the |
| 1002 | * next uncompleted transfer. | 1002 | * next uncompleted transfer. |
| @@ -1011,10 +1011,10 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) | |||
| 1011 | * we will recycle the used descriptor. | 1011 | * we will recycle the used descriptor. |
| 1012 | */ | 1012 | */ |
| 1013 | if (stat & FSL_DMA_SR_EOSI) { | 1013 | if (stat & FSL_DMA_SR_EOSI) { |
| 1014 | dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); | 1014 | dev_dbg(chan->dev, "event: End-of-segments INT\n"); |
| 1015 | dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n", | 1015 | dev_dbg(chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n", |
| 1016 | (unsigned long long)get_cdar(fsl_chan), | 1016 | (unsigned long long)get_cdar(chan), |
| 1017 | (unsigned long long)get_ndar(fsl_chan)); | 1017 | (unsigned long long)get_ndar(chan)); |
| 1018 | stat &= ~FSL_DMA_SR_EOSI; | 1018 | stat &= ~FSL_DMA_SR_EOSI; |
| 1019 | update_cookie = 1; | 1019 | update_cookie = 1; |
| 1020 | } | 1020 | } |
| @@ -1023,7 +1023,7 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) | |||
| 1023 | * and start the next transfer if it exist. | 1023 | * and start the next transfer if it exist. |
| 1024 | */ | 1024 | */ |
| 1025 | if (stat & FSL_DMA_SR_EOCDI) { | 1025 | if (stat & FSL_DMA_SR_EOCDI) { |
| 1026 | dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n"); | 1026 | dev_dbg(chan->dev, "event: End-of-Chain link INT\n"); |
| 1027 | stat &= ~FSL_DMA_SR_EOCDI; | 1027 | stat &= ~FSL_DMA_SR_EOCDI; |
| 1028 | update_cookie = 1; | 1028 | update_cookie = 1; |
| 1029 | xfer_ld_q = 1; | 1029 | xfer_ld_q = 1; |
| @@ -1034,28 +1034,28 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) | |||
| 1034 | * prepare next transfer. | 1034 | * prepare next transfer. |
| 1035 | */ | 1035 | */ |
| 1036 | if (stat & FSL_DMA_SR_EOLNI) { | 1036 | if (stat & FSL_DMA_SR_EOLNI) { |
| 1037 | dev_dbg(fsl_chan->dev, "event: End-of-link INT\n"); | 1037 | dev_dbg(chan->dev, "event: End-of-link INT\n"); |
| 1038 | stat &= ~FSL_DMA_SR_EOLNI; | 1038 | stat &= ~FSL_DMA_SR_EOLNI; |
| 1039 | xfer_ld_q = 1; | 1039 | xfer_ld_q = 1; |
| 1040 | } | 1040 | } |
| 1041 | 1041 | ||
| 1042 | if (update_cookie) | 1042 | if (update_cookie) |
| 1043 | fsl_dma_update_completed_cookie(fsl_chan); | 1043 | fsl_dma_update_completed_cookie(chan); |
| 1044 | if (xfer_ld_q) | 1044 | if (xfer_ld_q) |
| 1045 | fsl_chan_xfer_ld_queue(fsl_chan); | 1045 | fsl_chan_xfer_ld_queue(chan); |
| 1046 | if (stat) | 1046 | if (stat) |
| 1047 | dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n", | 1047 | dev_dbg(chan->dev, "event: unhandled sr 0x%02x\n", |
| 1048 | stat); | 1048 | stat); |
| 1049 | 1049 | ||
| 1050 | dev_dbg(fsl_chan->dev, "event: Exit\n"); | 1050 | dev_dbg(chan->dev, "event: Exit\n"); |
| 1051 | tasklet_schedule(&fsl_chan->tasklet); | 1051 | tasklet_schedule(&chan->tasklet); |
| 1052 | return IRQ_HANDLED; | 1052 | return IRQ_HANDLED; |
| 1053 | } | 1053 | } |
| 1054 | 1054 | ||
| 1055 | static void dma_do_tasklet(unsigned long data) | 1055 | static void dma_do_tasklet(unsigned long data) |
| 1056 | { | 1056 | { |
| 1057 | struct fsldma_chan *fsl_chan = (struct fsldma_chan *)data; | 1057 | struct fsldma_chan *chan = (struct fsldma_chan *)data; |
| 1058 | fsl_chan_ld_cleanup(fsl_chan); | 1058 | fsl_chan_ld_cleanup(chan); |
| 1059 | } | 1059 | } |
| 1060 | 1060 | ||
| 1061 | static irqreturn_t fsldma_ctrl_irq(int irq, void *data) | 1061 | static irqreturn_t fsldma_ctrl_irq(int irq, void *data) |
| @@ -1171,24 +1171,24 @@ out_unwind: | |||
| 1171 | static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, | 1171 | static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, |
| 1172 | struct device_node *node, u32 feature, const char *compatible) | 1172 | struct device_node *node, u32 feature, const char *compatible) |
| 1173 | { | 1173 | { |
| 1174 | struct fsldma_chan *fchan; | 1174 | struct fsldma_chan *chan; |
| 1175 | struct resource res; | 1175 | struct resource res; |
| 1176 | int err; | 1176 | int err; |
| 1177 | 1177 | ||
| 1178 | /* alloc channel */ | 1178 | /* alloc channel */ |
| 1179 | fchan = kzalloc(sizeof(*fchan), GFP_KERNEL); | 1179 | chan = kzalloc(sizeof(*chan), GFP_KERNEL); |
| 1180 | if (!fchan) { | 1180 | if (!chan) { |
| 1181 | dev_err(fdev->dev, "no free memory for DMA channels!\n"); | 1181 | dev_err(fdev->dev, "no free memory for DMA channels!\n"); |
| 1182 | err = -ENOMEM; | 1182 | err = -ENOMEM; |
| 1183 | goto out_return; | 1183 | goto out_return; |
| 1184 | } | 1184 | } |
| 1185 | 1185 | ||
| 1186 | /* ioremap registers for use */ | 1186 | /* ioremap registers for use */ |
| 1187 | fchan->regs = of_iomap(node, 0); | 1187 | chan->regs = of_iomap(node, 0); |
| 1188 | if (!fchan->regs) { | 1188 | if (!chan->regs) { |
| 1189 | dev_err(fdev->dev, "unable to ioremap registers\n"); | 1189 | dev_err(fdev->dev, "unable to ioremap registers\n"); |
| 1190 | err = -ENOMEM; | 1190 | err = -ENOMEM; |
| 1191 | goto out_free_fchan; | 1191 | goto out_free_chan; |
| 1192 | } | 1192 | } |
| 1193 | 1193 | ||
| 1194 | err = of_address_to_resource(node, 0, &res); | 1194 | err = of_address_to_resource(node, 0, &res); |
| @@ -1197,74 +1197,74 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, | |||
| 1197 | goto out_iounmap_regs; | 1197 | goto out_iounmap_regs; |
| 1198 | } | 1198 | } |
| 1199 | 1199 | ||
| 1200 | fchan->feature = feature; | 1200 | chan->feature = feature; |
| 1201 | if (!fdev->feature) | 1201 | if (!fdev->feature) |
| 1202 | fdev->feature = fchan->feature; | 1202 | fdev->feature = chan->feature; |
| 1203 | 1203 | ||
| 1204 | /* | 1204 | /* |
| 1205 | * If the DMA device's feature is different than the feature | 1205 | * If the DMA device's feature is different than the feature |
| 1206 | * of its channels, report the bug | 1206 | * of its channels, report the bug |
| 1207 | */ | 1207 | */ |
| 1208 | WARN_ON(fdev->feature != fchan->feature); | 1208 | WARN_ON(fdev->feature != chan->feature); |
| 1209 | 1209 | ||
| 1210 | fchan->dev = fdev->dev; | 1210 | chan->dev = fdev->dev; |
| 1211 | fchan->id = ((res.start - 0x100) & 0xfff) >> 7; | 1211 | chan->id = ((res.start - 0x100) & 0xfff) >> 7; |
| 1212 | if (fchan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { | 1212 | if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { |
| 1213 | dev_err(fdev->dev, "too many channels for device\n"); | 1213 | dev_err(fdev->dev, "too many channels for device\n"); |
| 1214 | err = -EINVAL; | 1214 | err = -EINVAL; |
| 1215 | goto out_iounmap_regs; | 1215 | goto out_iounmap_regs; |
| 1216 | } | 1216 | } |
| 1217 | 1217 | ||
| 1218 | fdev->chan[fchan->id] = fchan; | 1218 | fdev->chan[chan->id] = chan; |
| 1219 | tasklet_init(&fchan->tasklet, dma_do_tasklet, (unsigned long)fchan); | 1219 | tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); |
| 1220 | 1220 | ||
| 1221 | /* Initialize the channel */ | 1221 | /* Initialize the channel */ |
| 1222 | dma_init(fchan); | 1222 | dma_init(chan); |
| 1223 | 1223 | ||
| 1224 | /* Clear cdar registers */ | 1224 | /* Clear cdar registers */ |
| 1225 | set_cdar(fchan, 0); | 1225 | set_cdar(chan, 0); |
| 1226 | 1226 | ||
| 1227 | switch (fchan->feature & FSL_DMA_IP_MASK) { | 1227 | switch (chan->feature & FSL_DMA_IP_MASK) { |
| 1228 | case FSL_DMA_IP_85XX: | 1228 | case FSL_DMA_IP_85XX: |
| 1229 | fchan->toggle_ext_pause = fsl_chan_toggle_ext_pause; | 1229 | chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; |
| 1230 | case FSL_DMA_IP_83XX: | 1230 | case FSL_DMA_IP_83XX: |
| 1231 | fchan->toggle_ext_start = fsl_chan_toggle_ext_start; | 1231 | chan->toggle_ext_start = fsl_chan_toggle_ext_start; |
| 1232 | fchan->set_src_loop_size = fsl_chan_set_src_loop_size; | 1232 | chan->set_src_loop_size = fsl_chan_set_src_loop_size; |
| 1233 | fchan->set_dst_loop_size = fsl_chan_set_dst_loop_size; | 1233 | chan->set_dst_loop_size = fsl_chan_set_dst_loop_size; |
| 1234 | fchan->set_request_count = fsl_chan_set_request_count; | 1234 | chan->set_request_count = fsl_chan_set_request_count; |
| 1235 | } | 1235 | } |
| 1236 | 1236 | ||
| 1237 | spin_lock_init(&fchan->desc_lock); | 1237 | spin_lock_init(&chan->desc_lock); |
| 1238 | INIT_LIST_HEAD(&fchan->ld_queue); | 1238 | INIT_LIST_HEAD(&chan->ld_queue); |
| 1239 | 1239 | ||
| 1240 | fchan->common.device = &fdev->common; | 1240 | chan->common.device = &fdev->common; |
| 1241 | 1241 | ||
| 1242 | /* find the IRQ line, if it exists in the device tree */ | 1242 | /* find the IRQ line, if it exists in the device tree */ |
| 1243 | fchan->irq = irq_of_parse_and_map(node, 0); | 1243 | chan->irq = irq_of_parse_and_map(node, 0); |
| 1244 | 1244 | ||
| 1245 | /* Add the channel to DMA device channel list */ | 1245 | /* Add the channel to DMA device channel list */ |
| 1246 | list_add_tail(&fchan->common.device_node, &fdev->common.channels); | 1246 | list_add_tail(&chan->common.device_node, &fdev->common.channels); |
| 1247 | fdev->common.chancnt++; | 1247 | fdev->common.chancnt++; |
| 1248 | 1248 | ||
| 1249 | dev_info(fdev->dev, "#%d (%s), irq %d\n", fchan->id, compatible, | 1249 | dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible, |
| 1250 | fchan->irq != NO_IRQ ? fchan->irq : fdev->irq); | 1250 | chan->irq != NO_IRQ ? chan->irq : fdev->irq); |
| 1251 | 1251 | ||
| 1252 | return 0; | 1252 | return 0; |
| 1253 | 1253 | ||
| 1254 | out_iounmap_regs: | 1254 | out_iounmap_regs: |
| 1255 | iounmap(fchan->regs); | 1255 | iounmap(chan->regs); |
| 1256 | out_free_fchan: | 1256 | out_free_chan: |
| 1257 | kfree(fchan); | 1257 | kfree(chan); |
| 1258 | out_return: | 1258 | out_return: |
| 1259 | return err; | 1259 | return err; |
| 1260 | } | 1260 | } |
| 1261 | 1261 | ||
| 1262 | static void fsl_dma_chan_remove(struct fsldma_chan *fchan) | 1262 | static void fsl_dma_chan_remove(struct fsldma_chan *chan) |
| 1263 | { | 1263 | { |
| 1264 | irq_dispose_mapping(fchan->irq); | 1264 | irq_dispose_mapping(chan->irq); |
| 1265 | list_del(&fchan->common.device_node); | 1265 | list_del(&chan->common.device_node); |
| 1266 | iounmap(fchan->regs); | 1266 | iounmap(chan->regs); |
| 1267 | kfree(fchan); | 1267 | kfree(chan); |
| 1268 | } | 1268 | } |
| 1269 | 1269 | ||
| 1270 | static int __devinit fsldma_of_probe(struct of_device *op, | 1270 | static int __devinit fsldma_of_probe(struct of_device *op, |
