aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/dma-jz4780.c
diff options
context:
space:
mode:
authorAlex Smith <alex.smith@imgtec.com>2015-07-24 12:24:21 -0400
committerVinod Koul <vinod.koul@intel.com>2015-08-18 12:58:49 -0400
commitdc578f314e2471ca93a4c1f80988ecc781836f72 (patch)
tree0862d31388141b3f6228685e1c5cdaeb3a5880d7 /drivers/dma/dma-jz4780.c
parent46fa516869f4b57f9eb63db02c76642abfb9f682 (diff)
dmaengine: jz4780: Fall back on smaller transfer sizes where necessary
For some reason the controller does not support 8 byte transfers (but does support all other powers of 2 up to 128). In this case fall back to 4 bytes. In addition, fall back to 128 bytes when any larger power of 2 would be possible within the alignment constraints, as this is the maximum supported. It makes no sense to outright reject 8 or >128 bytes just because the alignment constraints make those the maximum possible size given the parameters for the transaction. For instance, this can result in a DMA from/to an 8 byte aligned address failing. It is perfectly safe to fall back to smaller transfer sizes, the only consequence is reduced transfer efficiency, which is far better than not allowing the transfer at all. Signed-off-by: Alex Smith <alex.smith@imgtec.com> Cc: Vinod Koul <vinod.koul@intel.com> Cc: Zubair Lutfullah Kakakhel <Zubair.Kakakhel@imgtec.com> Cc: dmaengine@vger.kernel.org Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/dma-jz4780.c')
-rw-r--r--drivers/dma/dma-jz4780.c37
1 files changed, 23 insertions, 14 deletions
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index fc933a268986..7af886fc47af 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -214,11 +214,25 @@ static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc)
214 kfree(desc); 214 kfree(desc);
215} 215}
216 216
217static uint32_t jz4780_dma_transfer_size(unsigned long val, int *ord) 217static uint32_t jz4780_dma_transfer_size(unsigned long val, uint32_t *shift)
218{ 218{
219 *ord = ffs(val) - 1; 219 int ord = ffs(val) - 1;
220 220
221 switch (*ord) { 221 /*
222 * 8 byte transfer sizes unsupported so fall back on 4. If it's larger
223 * than the maximum, just limit it. It is perfectly safe to fall back
224 * in this way since we won't exceed the maximum burst size supported
225 * by the device, the only effect is reduced efficiency. This is better
226 * than refusing to perform the request at all.
227 */
228 if (ord == 3)
229 ord = 2;
230 else if (ord > 7)
231 ord = 7;
232
233 *shift = ord;
234
235 switch (ord) {
222 case 0: 236 case 0:
223 return JZ_DMA_SIZE_1_BYTE; 237 return JZ_DMA_SIZE_1_BYTE;
224 case 1: 238 case 1:
@@ -231,10 +245,8 @@ static uint32_t jz4780_dma_transfer_size(unsigned long val, int *ord)
231 return JZ_DMA_SIZE_32_BYTE; 245 return JZ_DMA_SIZE_32_BYTE;
232 case 6: 246 case 6:
233 return JZ_DMA_SIZE_64_BYTE; 247 return JZ_DMA_SIZE_64_BYTE;
234 case 7:
235 return JZ_DMA_SIZE_128_BYTE;
236 default: 248 default:
237 return -EINVAL; 249 return JZ_DMA_SIZE_128_BYTE;
238 } 250 }
239} 251}
240 252
@@ -244,7 +256,6 @@ static uint32_t jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
244{ 256{
245 struct dma_slave_config *config = &jzchan->config; 257 struct dma_slave_config *config = &jzchan->config;
246 uint32_t width, maxburst, tsz; 258 uint32_t width, maxburst, tsz;
247 int ord;
248 259
249 if (direction == DMA_MEM_TO_DEV) { 260 if (direction == DMA_MEM_TO_DEV) {
250 desc->dcm = JZ_DMA_DCM_SAI; 261 desc->dcm = JZ_DMA_DCM_SAI;
@@ -271,8 +282,8 @@ static uint32_t jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
271 * divisible by the transfer size, and we must not use more than the 282 * divisible by the transfer size, and we must not use more than the
272 * maximum burst specified by the user. 283 * maximum burst specified by the user.
273 */ 284 */
274 tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst), &ord); 285 tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst),
275 jzchan->transfer_shift = ord; 286 &jzchan->transfer_shift);
276 287
277 switch (width) { 288 switch (width) {
278 case DMA_SLAVE_BUSWIDTH_1_BYTE: 289 case DMA_SLAVE_BUSWIDTH_1_BYTE:
@@ -289,7 +300,7 @@ static uint32_t jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
289 desc->dcm |= width << JZ_DMA_DCM_SP_SHIFT; 300 desc->dcm |= width << JZ_DMA_DCM_SP_SHIFT;
290 desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT; 301 desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT;
291 302
292 desc->dtc = len >> ord; 303 desc->dtc = len >> jzchan->transfer_shift;
293} 304}
294 305
295static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg( 306static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
@@ -391,15 +402,13 @@ struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
391 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan); 402 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
392 struct jz4780_dma_desc *desc; 403 struct jz4780_dma_desc *desc;
393 uint32_t tsz; 404 uint32_t tsz;
394 int ord;
395 405
396 desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY); 406 desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY);
397 if (!desc) 407 if (!desc)
398 return NULL; 408 return NULL;
399 409
400 tsz = jz4780_dma_transfer_size(dest | src | len, &ord); 410 tsz = jz4780_dma_transfer_size(dest | src | len,
401 if (tsz < 0) 411 &jzchan->transfer_shift);
402 return ERR_PTR(tsz);
403 412
404 desc->desc[0].dsa = src; 413 desc->desc[0].dsa = src;
405 desc->desc[0].dta = dest; 414 desc->desc[0].dta = dest;