diff options
author | Rameshwar Prasad Sahu <rsahu@apm.com> | 2015-06-02 05:03:33 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2015-06-02 09:34:10 -0400 |
commit | 6d0767c10f491034c875c1af11058f03caef34b1 (patch) | |
tree | bcce538365f999fe3ecbb789ddd814d7445442b6 /drivers/dma/xgene-dma.c | |
parent | 19d643d68bd678449d63209dff53b4585df9f149 (diff) |
dmaengine: xgene-dma: Fix "incorrect type in assignement" warnings
This patch fixes sparse warnings like incorrect type in assignment
(different base types), cast to restricted __le64.
Reported-by: kbuild test robot <fengguang.wu@intel.com>
Signed-off-by: Rameshwar Prasad Sahu <rsahu@apm.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/xgene-dma.c')
-rwxr-xr-x | drivers/dma/xgene-dma.c | 173 |
1 files changed, 66 insertions, 107 deletions
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index f52e37502254..620fd55ec766 100755 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c | |||
@@ -124,32 +124,8 @@ | |||
124 | #define XGENE_DMA_DESC_ELERR_POS 46 | 124 | #define XGENE_DMA_DESC_ELERR_POS 46 |
125 | #define XGENE_DMA_DESC_RTYPE_POS 56 | 125 | #define XGENE_DMA_DESC_RTYPE_POS 56 |
126 | #define XGENE_DMA_DESC_LERR_POS 60 | 126 | #define XGENE_DMA_DESC_LERR_POS 60 |
127 | #define XGENE_DMA_DESC_FLYBY_POS 4 | ||
128 | #define XGENE_DMA_DESC_BUFLEN_POS 48 | 127 | #define XGENE_DMA_DESC_BUFLEN_POS 48 |
129 | #define XGENE_DMA_DESC_HOENQ_NUM_POS 48 | 128 | #define XGENE_DMA_DESC_HOENQ_NUM_POS 48 |
130 | |||
131 | #define XGENE_DMA_DESC_NV_SET(m) \ | ||
132 | (((u64 *)(m))[0] |= XGENE_DMA_DESC_NV_BIT) | ||
133 | #define XGENE_DMA_DESC_IN_SET(m) \ | ||
134 | (((u64 *)(m))[0] |= XGENE_DMA_DESC_IN_BIT) | ||
135 | #define XGENE_DMA_DESC_RTYPE_SET(m, v) \ | ||
136 | (((u64 *)(m))[0] |= ((u64)(v) << XGENE_DMA_DESC_RTYPE_POS)) | ||
137 | #define XGENE_DMA_DESC_BUFADDR_SET(m, v) \ | ||
138 | (((u64 *)(m))[0] |= (v)) | ||
139 | #define XGENE_DMA_DESC_BUFLEN_SET(m, v) \ | ||
140 | (((u64 *)(m))[0] |= ((u64)(v) << XGENE_DMA_DESC_BUFLEN_POS)) | ||
141 | #define XGENE_DMA_DESC_C_SET(m) \ | ||
142 | (((u64 *)(m))[1] |= XGENE_DMA_DESC_C_BIT) | ||
143 | #define XGENE_DMA_DESC_FLYBY_SET(m, v) \ | ||
144 | (((u64 *)(m))[2] |= ((v) << XGENE_DMA_DESC_FLYBY_POS)) | ||
145 | #define XGENE_DMA_DESC_MULTI_SET(m, v, i) \ | ||
146 | (((u64 *)(m))[2] |= ((u64)(v) << (((i) + 1) * 8))) | ||
147 | #define XGENE_DMA_DESC_DR_SET(m) \ | ||
148 | (((u64 *)(m))[2] |= XGENE_DMA_DESC_DR_BIT) | ||
149 | #define XGENE_DMA_DESC_DST_ADDR_SET(m, v) \ | ||
150 | (((u64 *)(m))[3] |= (v)) | ||
151 | #define XGENE_DMA_DESC_H0ENQ_NUM_SET(m, v) \ | ||
152 | (((u64 *)(m))[3] |= ((u64)(v) << XGENE_DMA_DESC_HOENQ_NUM_POS)) | ||
153 | #define XGENE_DMA_DESC_ELERR_RD(m) \ | 129 | #define XGENE_DMA_DESC_ELERR_RD(m) \ |
154 | (((m) >> XGENE_DMA_DESC_ELERR_POS) & 0x3) | 130 | (((m) >> XGENE_DMA_DESC_ELERR_POS) & 0x3) |
155 | #define XGENE_DMA_DESC_LERR_RD(m) \ | 131 | #define XGENE_DMA_DESC_LERR_RD(m) \ |
@@ -158,14 +134,7 @@ | |||
158 | (((elerr) << 4) | (lerr)) | 134 | (((elerr) << 4) | (lerr)) |
159 | 135 | ||
160 | /* X-Gene DMA descriptor empty s/w signature */ | 136 | /* X-Gene DMA descriptor empty s/w signature */ |
161 | #define XGENE_DMA_DESC_EMPTY_INDEX 0 | ||
162 | #define XGENE_DMA_DESC_EMPTY_SIGNATURE ~0ULL | 137 | #define XGENE_DMA_DESC_EMPTY_SIGNATURE ~0ULL |
163 | #define XGENE_DMA_DESC_SET_EMPTY(m) \ | ||
164 | (((u64 *)(m))[XGENE_DMA_DESC_EMPTY_INDEX] = \ | ||
165 | XGENE_DMA_DESC_EMPTY_SIGNATURE) | ||
166 | #define XGENE_DMA_DESC_IS_EMPTY(m) \ | ||
167 | (((u64 *)(m))[XGENE_DMA_DESC_EMPTY_INDEX] == \ | ||
168 | XGENE_DMA_DESC_EMPTY_SIGNATURE) | ||
169 | 138 | ||
170 | /* X-Gene DMA configurable parameters defines */ | 139 | /* X-Gene DMA configurable parameters defines */ |
171 | #define XGENE_DMA_RING_NUM 512 | 140 | #define XGENE_DMA_RING_NUM 512 |
@@ -184,7 +153,7 @@ | |||
184 | #define XGENE_DMA_XOR_ALIGNMENT 6 /* 64 Bytes */ | 153 | #define XGENE_DMA_XOR_ALIGNMENT 6 /* 64 Bytes */ |
185 | #define XGENE_DMA_MAX_XOR_SRC 5 | 154 | #define XGENE_DMA_MAX_XOR_SRC 5 |
186 | #define XGENE_DMA_16K_BUFFER_LEN_CODE 0x0 | 155 | #define XGENE_DMA_16K_BUFFER_LEN_CODE 0x0 |
187 | #define XGENE_DMA_INVALID_LEN_CODE 0x7800 | 156 | #define XGENE_DMA_INVALID_LEN_CODE 0x7800000000000000ULL |
188 | 157 | ||
189 | /* X-Gene DMA descriptor error codes */ | 158 | /* X-Gene DMA descriptor error codes */ |
190 | #define ERR_DESC_AXI 0x01 | 159 | #define ERR_DESC_AXI 0x01 |
@@ -214,10 +183,10 @@ | |||
214 | #define ERR_DESC_SRC_INT 0xB | 183 | #define ERR_DESC_SRC_INT 0xB |
215 | 184 | ||
216 | /* X-Gene DMA flyby operation code */ | 185 | /* X-Gene DMA flyby operation code */ |
217 | #define FLYBY_2SRC_XOR 0x8 | 186 | #define FLYBY_2SRC_XOR 0x80 |
218 | #define FLYBY_3SRC_XOR 0x9 | 187 | #define FLYBY_3SRC_XOR 0x90 |
219 | #define FLYBY_4SRC_XOR 0xA | 188 | #define FLYBY_4SRC_XOR 0xA0 |
220 | #define FLYBY_5SRC_XOR 0xB | 189 | #define FLYBY_5SRC_XOR 0xB0 |
221 | 190 | ||
222 | /* X-Gene DMA SW descriptor flags */ | 191 | /* X-Gene DMA SW descriptor flags */ |
223 | #define XGENE_DMA_FLAG_64B_DESC BIT(0) | 192 | #define XGENE_DMA_FLAG_64B_DESC BIT(0) |
@@ -238,10 +207,10 @@ | |||
238 | dev_err(chan->dev, "%s: " fmt, chan->name, ##arg) | 207 | dev_err(chan->dev, "%s: " fmt, chan->name, ##arg) |
239 | 208 | ||
240 | struct xgene_dma_desc_hw { | 209 | struct xgene_dma_desc_hw { |
241 | u64 m0; | 210 | __le64 m0; |
242 | u64 m1; | 211 | __le64 m1; |
243 | u64 m2; | 212 | __le64 m2; |
244 | u64 m3; | 213 | __le64 m3; |
245 | }; | 214 | }; |
246 | 215 | ||
247 | enum xgene_dma_ring_cfgsize { | 216 | enum xgene_dma_ring_cfgsize { |
@@ -388,18 +357,11 @@ static bool is_pq_enabled(struct xgene_dma *pdma) | |||
388 | return !(val & XGENE_DMA_PQ_DISABLE_MASK); | 357 | return !(val & XGENE_DMA_PQ_DISABLE_MASK); |
389 | } | 358 | } |
390 | 359 | ||
391 | static void xgene_dma_cpu_to_le64(u64 *desc, int count) | 360 | static u64 xgene_dma_encode_len(size_t len) |
392 | { | ||
393 | int i; | ||
394 | |||
395 | for (i = 0; i < count; i++) | ||
396 | desc[i] = cpu_to_le64(desc[i]); | ||
397 | } | ||
398 | |||
399 | static u16 xgene_dma_encode_len(u32 len) | ||
400 | { | 361 | { |
401 | return (len < XGENE_DMA_MAX_BYTE_CNT) ? | 362 | return (len < XGENE_DMA_MAX_BYTE_CNT) ? |
402 | len : XGENE_DMA_16K_BUFFER_LEN_CODE; | 363 | ((u64)len << XGENE_DMA_DESC_BUFLEN_POS) : |
364 | XGENE_DMA_16K_BUFFER_LEN_CODE; | ||
403 | } | 365 | } |
404 | 366 | ||
405 | static u8 xgene_dma_encode_xor_flyby(u32 src_cnt) | 367 | static u8 xgene_dma_encode_xor_flyby(u32 src_cnt) |
@@ -424,34 +386,50 @@ static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring) | |||
424 | return XGENE_DMA_RING_DESC_CNT(ring_state); | 386 | return XGENE_DMA_RING_DESC_CNT(ring_state); |
425 | } | 387 | } |
426 | 388 | ||
427 | static void xgene_dma_set_src_buffer(void *ext8, size_t *len, | 389 | static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len, |
428 | dma_addr_t *paddr) | 390 | dma_addr_t *paddr) |
429 | { | 391 | { |
430 | size_t nbytes = (*len < XGENE_DMA_MAX_BYTE_CNT) ? | 392 | size_t nbytes = (*len < XGENE_DMA_MAX_BYTE_CNT) ? |
431 | *len : XGENE_DMA_MAX_BYTE_CNT; | 393 | *len : XGENE_DMA_MAX_BYTE_CNT; |
432 | 394 | ||
433 | XGENE_DMA_DESC_BUFADDR_SET(ext8, *paddr); | 395 | *ext8 |= cpu_to_le64(*paddr); |
434 | XGENE_DMA_DESC_BUFLEN_SET(ext8, xgene_dma_encode_len(nbytes)); | 396 | *ext8 |= cpu_to_le64(xgene_dma_encode_len(nbytes)); |
435 | *len -= nbytes; | 397 | *len -= nbytes; |
436 | *paddr += nbytes; | 398 | *paddr += nbytes; |
437 | } | 399 | } |
438 | 400 | ||
439 | static void xgene_dma_invalidate_buffer(void *ext8) | 401 | static void xgene_dma_invalidate_buffer(__le64 *ext8) |
440 | { | 402 | { |
441 | XGENE_DMA_DESC_BUFLEN_SET(ext8, XGENE_DMA_INVALID_LEN_CODE); | 403 | *ext8 |= cpu_to_le64(XGENE_DMA_INVALID_LEN_CODE); |
442 | } | 404 | } |
443 | 405 | ||
444 | static void *xgene_dma_lookup_ext8(u64 *desc, int idx) | 406 | static __le64 *xgene_dma_lookup_ext8(struct xgene_dma_desc_hw *desc, int idx) |
445 | { | 407 | { |
446 | return (idx % 2) ? (desc + idx - 1) : (desc + idx + 1); | 408 | switch (idx) { |
409 | case 0: | ||
410 | return &desc->m1; | ||
411 | case 1: | ||
412 | return &desc->m0; | ||
413 | case 2: | ||
414 | return &desc->m3; | ||
415 | case 3: | ||
416 | return &desc->m2; | ||
417 | default: | ||
418 | pr_err("Invalid dma descriptor index\n"); | ||
419 | } | ||
420 | |||
421 | return NULL; | ||
447 | } | 422 | } |
448 | 423 | ||
449 | static void xgene_dma_init_desc(void *desc, u16 dst_ring_num) | 424 | static void xgene_dma_init_desc(struct xgene_dma_desc_hw *desc, |
425 | u16 dst_ring_num) | ||
450 | { | 426 | { |
451 | XGENE_DMA_DESC_C_SET(desc); /* Coherent IO */ | 427 | desc->m0 |= cpu_to_le64(XGENE_DMA_DESC_IN_BIT); |
452 | XGENE_DMA_DESC_IN_SET(desc); | 428 | desc->m0 |= cpu_to_le64((u64)XGENE_DMA_RING_OWNER_DMA << |
453 | XGENE_DMA_DESC_H0ENQ_NUM_SET(desc, dst_ring_num); | 429 | XGENE_DMA_DESC_RTYPE_POS); |
454 | XGENE_DMA_DESC_RTYPE_SET(desc, XGENE_DMA_RING_OWNER_DMA); | 430 | desc->m1 |= cpu_to_le64(XGENE_DMA_DESC_C_BIT); |
431 | desc->m3 |= cpu_to_le64((u64)dst_ring_num << | ||
432 | XGENE_DMA_DESC_HOENQ_NUM_POS); | ||
455 | } | 433 | } |
456 | 434 | ||
457 | static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan, | 435 | static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan, |
@@ -459,7 +437,7 @@ static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan, | |||
459 | dma_addr_t dst, dma_addr_t src, | 437 | dma_addr_t dst, dma_addr_t src, |
460 | size_t len) | 438 | size_t len) |
461 | { | 439 | { |
462 | void *desc1, *desc2; | 440 | struct xgene_dma_desc_hw *desc1, *desc2; |
463 | int i; | 441 | int i; |
464 | 442 | ||
465 | /* Get 1st descriptor */ | 443 | /* Get 1st descriptor */ |
@@ -467,23 +445,21 @@ static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan, | |||
467 | xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num); | 445 | xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num); |
468 | 446 | ||
469 | /* Set destination address */ | 447 | /* Set destination address */ |
470 | XGENE_DMA_DESC_DR_SET(desc1); | 448 | desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT); |
471 | XGENE_DMA_DESC_DST_ADDR_SET(desc1, dst); | 449 | desc1->m3 |= cpu_to_le64(dst); |
472 | 450 | ||
473 | /* Set 1st source address */ | 451 | /* Set 1st source address */ |
474 | xgene_dma_set_src_buffer(desc1 + 8, &len, &src); | 452 | xgene_dma_set_src_buffer(&desc1->m1, &len, &src); |
475 | 453 | ||
476 | if (len <= 0) { | 454 | if (!len) |
477 | desc2 = NULL; | 455 | return; |
478 | goto skip_additional_src; | ||
479 | } | ||
480 | 456 | ||
481 | /* | 457 | /* |
482 | * We need to split this source buffer, | 458 | * We need to split this source buffer, |
483 | * and need to use 2nd descriptor | 459 | * and need to use 2nd descriptor |
484 | */ | 460 | */ |
485 | desc2 = &desc_sw->desc2; | 461 | desc2 = &desc_sw->desc2; |
486 | XGENE_DMA_DESC_NV_SET(desc1); | 462 | desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT); |
487 | 463 | ||
488 | /* Set 2nd to 5th source address */ | 464 | /* Set 2nd to 5th source address */ |
489 | for (i = 0; i < 4 && len; i++) | 465 | for (i = 0; i < 4 && len; i++) |
@@ -496,12 +472,6 @@ static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan, | |||
496 | 472 | ||
497 | /* Updated flag that we have prepared 64B descriptor */ | 473 | /* Updated flag that we have prepared 64B descriptor */ |
498 | desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC; | 474 | desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC; |
499 | |||
500 | skip_additional_src: | ||
501 | /* Hardware stores descriptor in little endian format */ | ||
502 | xgene_dma_cpu_to_le64(desc1, 4); | ||
503 | if (desc2) | ||
504 | xgene_dma_cpu_to_le64(desc2, 4); | ||
505 | } | 475 | } |
506 | 476 | ||
507 | static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan, | 477 | static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan, |
@@ -510,7 +480,7 @@ static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan, | |||
510 | u32 src_cnt, size_t *nbytes, | 480 | u32 src_cnt, size_t *nbytes, |
511 | const u8 *scf) | 481 | const u8 *scf) |
512 | { | 482 | { |
513 | void *desc1, *desc2; | 483 | struct xgene_dma_desc_hw *desc1, *desc2; |
514 | size_t len = *nbytes; | 484 | size_t len = *nbytes; |
515 | int i; | 485 | int i; |
516 | 486 | ||
@@ -521,28 +491,24 @@ static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan, | |||
521 | xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num); | 491 | xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num); |
522 | 492 | ||
523 | /* Set destination address */ | 493 | /* Set destination address */ |
524 | XGENE_DMA_DESC_DR_SET(desc1); | 494 | desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT); |
525 | XGENE_DMA_DESC_DST_ADDR_SET(desc1, *dst); | 495 | desc1->m3 |= cpu_to_le64(*dst); |
526 | 496 | ||
527 | /* We have multiple source addresses, so need to set NV bit*/ | 497 | /* We have multiple source addresses, so need to set NV bit*/ |
528 | XGENE_DMA_DESC_NV_SET(desc1); | 498 | desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT); |
529 | 499 | ||
530 | /* Set flyby opcode */ | 500 | /* Set flyby opcode */ |
531 | XGENE_DMA_DESC_FLYBY_SET(desc1, xgene_dma_encode_xor_flyby(src_cnt)); | 501 | desc1->m2 |= cpu_to_le64(xgene_dma_encode_xor_flyby(src_cnt)); |
532 | 502 | ||
533 | /* Set 1st to 5th source addresses */ | 503 | /* Set 1st to 5th source addresses */ |
534 | for (i = 0; i < src_cnt; i++) { | 504 | for (i = 0; i < src_cnt; i++) { |
535 | len = *nbytes; | 505 | len = *nbytes; |
536 | xgene_dma_set_src_buffer((i == 0) ? (desc1 + 8) : | 506 | xgene_dma_set_src_buffer((i == 0) ? &desc1->m1 : |
537 | xgene_dma_lookup_ext8(desc2, i - 1), | 507 | xgene_dma_lookup_ext8(desc2, i - 1), |
538 | &len, &src[i]); | 508 | &len, &src[i]); |
539 | XGENE_DMA_DESC_MULTI_SET(desc1, scf[i], i); | 509 | desc1->m2 |= cpu_to_le64((scf[i] << ((i + 1) * 8))); |
540 | } | 510 | } |
541 | 511 | ||
542 | /* Hardware stores descriptor in little endian format */ | ||
543 | xgene_dma_cpu_to_le64(desc1, 4); | ||
544 | xgene_dma_cpu_to_le64(desc2, 4); | ||
545 | |||
546 | /* Update meta data */ | 512 | /* Update meta data */ |
547 | *nbytes = len; | 513 | *nbytes = len; |
548 | *dst += XGENE_DMA_MAX_BYTE_CNT; | 514 | *dst += XGENE_DMA_MAX_BYTE_CNT; |
@@ -738,7 +704,7 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring, | |||
738 | * xgene_chan_xfer_ld_pending - push any pending transactions to hw | 704 | * xgene_chan_xfer_ld_pending - push any pending transactions to hw |
739 | * @chan : X-Gene DMA channel | 705 | * @chan : X-Gene DMA channel |
740 | * | 706 | * |
741 | * LOCKING: must hold chan->desc_lock | 707 | * LOCKING: must hold chan->lock |
742 | */ | 708 | */ |
743 | static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) | 709 | static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) |
744 | { | 710 | { |
@@ -808,7 +774,8 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) | |||
808 | desc_hw = &ring->desc_hw[ring->head]; | 774 | desc_hw = &ring->desc_hw[ring->head]; |
809 | 775 | ||
810 | /* Check if this descriptor has been completed */ | 776 | /* Check if this descriptor has been completed */ |
811 | if (unlikely(XGENE_DMA_DESC_IS_EMPTY(desc_hw))) | 777 | if (unlikely(le64_to_cpu(desc_hw->m0) == |
778 | XGENE_DMA_DESC_EMPTY_SIGNATURE)) | ||
812 | break; | 779 | break; |
813 | 780 | ||
814 | if (++ring->head == ring->slots) | 781 | if (++ring->head == ring->slots) |
@@ -842,7 +809,7 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) | |||
842 | iowrite32(-1, ring->cmd); | 809 | iowrite32(-1, ring->cmd); |
843 | 810 | ||
844 | /* Mark this hw descriptor as processed */ | 811 | /* Mark this hw descriptor as processed */ |
845 | XGENE_DMA_DESC_SET_EMPTY(desc_hw); | 812 | desc_hw->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE); |
846 | 813 | ||
847 | xgene_dma_run_tx_complete_actions(chan, desc_sw); | 814 | xgene_dma_run_tx_complete_actions(chan, desc_sw); |
848 | 815 | ||
@@ -889,7 +856,7 @@ static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan) | |||
889 | * @chan: X-Gene DMA channel | 856 | * @chan: X-Gene DMA channel |
890 | * @list: the list to free | 857 | * @list: the list to free |
891 | * | 858 | * |
892 | * LOCKING: must hold chan->desc_lock | 859 | * LOCKING: must hold chan->lock |
893 | */ | 860 | */ |
894 | static void xgene_dma_free_desc_list(struct xgene_dma_chan *chan, | 861 | static void xgene_dma_free_desc_list(struct xgene_dma_chan *chan, |
895 | struct list_head *list) | 862 | struct list_head *list) |
@@ -900,15 +867,6 @@ static void xgene_dma_free_desc_list(struct xgene_dma_chan *chan, | |||
900 | xgene_dma_clean_descriptor(chan, desc); | 867 | xgene_dma_clean_descriptor(chan, desc); |
901 | } | 868 | } |
902 | 869 | ||
903 | static void xgene_dma_free_tx_desc_list(struct xgene_dma_chan *chan, | ||
904 | struct list_head *list) | ||
905 | { | ||
906 | struct xgene_dma_desc_sw *desc, *_desc; | ||
907 | |||
908 | list_for_each_entry_safe(desc, _desc, list, node) | ||
909 | xgene_dma_clean_descriptor(chan, desc); | ||
910 | } | ||
911 | |||
912 | static void xgene_dma_free_chan_resources(struct dma_chan *dchan) | 870 | static void xgene_dma_free_chan_resources(struct dma_chan *dchan) |
913 | { | 871 | { |
914 | struct xgene_dma_chan *chan = to_dma_chan(dchan); | 872 | struct xgene_dma_chan *chan = to_dma_chan(dchan); |
@@ -985,7 +943,7 @@ fail: | |||
985 | if (!first) | 943 | if (!first) |
986 | return NULL; | 944 | return NULL; |
987 | 945 | ||
988 | xgene_dma_free_tx_desc_list(chan, &first->tx_list); | 946 | xgene_dma_free_desc_list(chan, &first->tx_list); |
989 | return NULL; | 947 | return NULL; |
990 | } | 948 | } |
991 | 949 | ||
@@ -1093,7 +1051,7 @@ fail: | |||
1093 | if (!first) | 1051 | if (!first) |
1094 | return NULL; | 1052 | return NULL; |
1095 | 1053 | ||
1096 | xgene_dma_free_tx_desc_list(chan, &first->tx_list); | 1054 | xgene_dma_free_desc_list(chan, &first->tx_list); |
1097 | return NULL; | 1055 | return NULL; |
1098 | } | 1056 | } |
1099 | 1057 | ||
@@ -1141,7 +1099,7 @@ fail: | |||
1141 | if (!first) | 1099 | if (!first) |
1142 | return NULL; | 1100 | return NULL; |
1143 | 1101 | ||
1144 | xgene_dma_free_tx_desc_list(chan, &first->tx_list); | 1102 | xgene_dma_free_desc_list(chan, &first->tx_list); |
1145 | return NULL; | 1103 | return NULL; |
1146 | } | 1104 | } |
1147 | 1105 | ||
@@ -1218,7 +1176,7 @@ fail: | |||
1218 | if (!first) | 1176 | if (!first) |
1219 | return NULL; | 1177 | return NULL; |
1220 | 1178 | ||
1221 | xgene_dma_free_tx_desc_list(chan, &first->tx_list); | 1179 | xgene_dma_free_desc_list(chan, &first->tx_list); |
1222 | return NULL; | 1180 | return NULL; |
1223 | } | 1181 | } |
1224 | 1182 | ||
@@ -1316,7 +1274,6 @@ static void xgene_dma_setup_ring(struct xgene_dma_ring *ring) | |||
1316 | { | 1274 | { |
1317 | void *ring_cfg = ring->state; | 1275 | void *ring_cfg = ring->state; |
1318 | u64 addr = ring->desc_paddr; | 1276 | u64 addr = ring->desc_paddr; |
1319 | void *desc; | ||
1320 | u32 i, val; | 1277 | u32 i, val; |
1321 | 1278 | ||
1322 | ring->slots = ring->size / XGENE_DMA_RING_WQ_DESC_SIZE; | 1279 | ring->slots = ring->size / XGENE_DMA_RING_WQ_DESC_SIZE; |
@@ -1358,8 +1315,10 @@ static void xgene_dma_setup_ring(struct xgene_dma_ring *ring) | |||
1358 | 1315 | ||
1359 | /* Set empty signature to DMA Rx ring descriptors */ | 1316 | /* Set empty signature to DMA Rx ring descriptors */ |
1360 | for (i = 0; i < ring->slots; i++) { | 1317 | for (i = 0; i < ring->slots; i++) { |
1318 | struct xgene_dma_desc_hw *desc; | ||
1319 | |||
1361 | desc = &ring->desc_hw[i]; | 1320 | desc = &ring->desc_hw[i]; |
1362 | XGENE_DMA_DESC_SET_EMPTY(desc); | 1321 | desc->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE); |
1363 | } | 1322 | } |
1364 | 1323 | ||
1365 | /* Enable DMA Rx ring interrupt */ | 1324 | /* Enable DMA Rx ring interrupt */ |