aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/xgene-dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/xgene-dma.c')
-rw-r--r--[-rwxr-xr-x]drivers/dma/xgene-dma.c176
1 files changed, 69 insertions, 107 deletions
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index f52e37502254..dff22ab01851 100755..100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -111,6 +111,7 @@
111#define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070 111#define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070
112#define XGENE_DMA_BLK_MEM_RDY 0xD074 112#define XGENE_DMA_BLK_MEM_RDY 0xD074
113#define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF 113#define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF
114#define XGENE_DMA_RING_CMD_SM_OFFSET 0x8000
114 115
115/* X-Gene SoC EFUSE csr register and bit defination */ 116/* X-Gene SoC EFUSE csr register and bit defination */
116#define XGENE_SOC_JTAG1_SHADOW 0x18 117#define XGENE_SOC_JTAG1_SHADOW 0x18
@@ -124,32 +125,8 @@
124#define XGENE_DMA_DESC_ELERR_POS 46 125#define XGENE_DMA_DESC_ELERR_POS 46
125#define XGENE_DMA_DESC_RTYPE_POS 56 126#define XGENE_DMA_DESC_RTYPE_POS 56
126#define XGENE_DMA_DESC_LERR_POS 60 127#define XGENE_DMA_DESC_LERR_POS 60
127#define XGENE_DMA_DESC_FLYBY_POS 4
128#define XGENE_DMA_DESC_BUFLEN_POS 48 128#define XGENE_DMA_DESC_BUFLEN_POS 48
129#define XGENE_DMA_DESC_HOENQ_NUM_POS 48 129#define XGENE_DMA_DESC_HOENQ_NUM_POS 48
130
131#define XGENE_DMA_DESC_NV_SET(m) \
132 (((u64 *)(m))[0] |= XGENE_DMA_DESC_NV_BIT)
133#define XGENE_DMA_DESC_IN_SET(m) \
134 (((u64 *)(m))[0] |= XGENE_DMA_DESC_IN_BIT)
135#define XGENE_DMA_DESC_RTYPE_SET(m, v) \
136 (((u64 *)(m))[0] |= ((u64)(v) << XGENE_DMA_DESC_RTYPE_POS))
137#define XGENE_DMA_DESC_BUFADDR_SET(m, v) \
138 (((u64 *)(m))[0] |= (v))
139#define XGENE_DMA_DESC_BUFLEN_SET(m, v) \
140 (((u64 *)(m))[0] |= ((u64)(v) << XGENE_DMA_DESC_BUFLEN_POS))
141#define XGENE_DMA_DESC_C_SET(m) \
142 (((u64 *)(m))[1] |= XGENE_DMA_DESC_C_BIT)
143#define XGENE_DMA_DESC_FLYBY_SET(m, v) \
144 (((u64 *)(m))[2] |= ((v) << XGENE_DMA_DESC_FLYBY_POS))
145#define XGENE_DMA_DESC_MULTI_SET(m, v, i) \
146 (((u64 *)(m))[2] |= ((u64)(v) << (((i) + 1) * 8)))
147#define XGENE_DMA_DESC_DR_SET(m) \
148 (((u64 *)(m))[2] |= XGENE_DMA_DESC_DR_BIT)
149#define XGENE_DMA_DESC_DST_ADDR_SET(m, v) \
150 (((u64 *)(m))[3] |= (v))
151#define XGENE_DMA_DESC_H0ENQ_NUM_SET(m, v) \
152 (((u64 *)(m))[3] |= ((u64)(v) << XGENE_DMA_DESC_HOENQ_NUM_POS))
153#define XGENE_DMA_DESC_ELERR_RD(m) \ 130#define XGENE_DMA_DESC_ELERR_RD(m) \
154 (((m) >> XGENE_DMA_DESC_ELERR_POS) & 0x3) 131 (((m) >> XGENE_DMA_DESC_ELERR_POS) & 0x3)
155#define XGENE_DMA_DESC_LERR_RD(m) \ 132#define XGENE_DMA_DESC_LERR_RD(m) \
@@ -158,14 +135,7 @@
158 (((elerr) << 4) | (lerr)) 135 (((elerr) << 4) | (lerr))
159 136
160/* X-Gene DMA descriptor empty s/w signature */ 137/* X-Gene DMA descriptor empty s/w signature */
161#define XGENE_DMA_DESC_EMPTY_INDEX 0
162#define XGENE_DMA_DESC_EMPTY_SIGNATURE ~0ULL 138#define XGENE_DMA_DESC_EMPTY_SIGNATURE ~0ULL
163#define XGENE_DMA_DESC_SET_EMPTY(m) \
164 (((u64 *)(m))[XGENE_DMA_DESC_EMPTY_INDEX] = \
165 XGENE_DMA_DESC_EMPTY_SIGNATURE)
166#define XGENE_DMA_DESC_IS_EMPTY(m) \
167 (((u64 *)(m))[XGENE_DMA_DESC_EMPTY_INDEX] == \
168 XGENE_DMA_DESC_EMPTY_SIGNATURE)
169 139
170/* X-Gene DMA configurable parameters defines */ 140/* X-Gene DMA configurable parameters defines */
171#define XGENE_DMA_RING_NUM 512 141#define XGENE_DMA_RING_NUM 512
@@ -184,7 +154,7 @@
184#define XGENE_DMA_XOR_ALIGNMENT 6 /* 64 Bytes */ 154#define XGENE_DMA_XOR_ALIGNMENT 6 /* 64 Bytes */
185#define XGENE_DMA_MAX_XOR_SRC 5 155#define XGENE_DMA_MAX_XOR_SRC 5
186#define XGENE_DMA_16K_BUFFER_LEN_CODE 0x0 156#define XGENE_DMA_16K_BUFFER_LEN_CODE 0x0
187#define XGENE_DMA_INVALID_LEN_CODE 0x7800 157#define XGENE_DMA_INVALID_LEN_CODE 0x7800000000000000ULL
188 158
189/* X-Gene DMA descriptor error codes */ 159/* X-Gene DMA descriptor error codes */
190#define ERR_DESC_AXI 0x01 160#define ERR_DESC_AXI 0x01
@@ -214,10 +184,10 @@
214#define ERR_DESC_SRC_INT 0xB 184#define ERR_DESC_SRC_INT 0xB
215 185
216/* X-Gene DMA flyby operation code */ 186/* X-Gene DMA flyby operation code */
217#define FLYBY_2SRC_XOR 0x8 187#define FLYBY_2SRC_XOR 0x80
218#define FLYBY_3SRC_XOR 0x9 188#define FLYBY_3SRC_XOR 0x90
219#define FLYBY_4SRC_XOR 0xA 189#define FLYBY_4SRC_XOR 0xA0
220#define FLYBY_5SRC_XOR 0xB 190#define FLYBY_5SRC_XOR 0xB0
221 191
222/* X-Gene DMA SW descriptor flags */ 192/* X-Gene DMA SW descriptor flags */
223#define XGENE_DMA_FLAG_64B_DESC BIT(0) 193#define XGENE_DMA_FLAG_64B_DESC BIT(0)
@@ -238,10 +208,10 @@
238 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg) 208 dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
239 209
240struct xgene_dma_desc_hw { 210struct xgene_dma_desc_hw {
241 u64 m0; 211 __le64 m0;
242 u64 m1; 212 __le64 m1;
243 u64 m2; 213 __le64 m2;
244 u64 m3; 214 __le64 m3;
245}; 215};
246 216
247enum xgene_dma_ring_cfgsize { 217enum xgene_dma_ring_cfgsize {
@@ -388,18 +358,11 @@ static bool is_pq_enabled(struct xgene_dma *pdma)
388 return !(val & XGENE_DMA_PQ_DISABLE_MASK); 358 return !(val & XGENE_DMA_PQ_DISABLE_MASK);
389} 359}
390 360
391static void xgene_dma_cpu_to_le64(u64 *desc, int count) 361static u64 xgene_dma_encode_len(size_t len)
392{
393 int i;
394
395 for (i = 0; i < count; i++)
396 desc[i] = cpu_to_le64(desc[i]);
397}
398
399static u16 xgene_dma_encode_len(u32 len)
400{ 362{
401 return (len < XGENE_DMA_MAX_BYTE_CNT) ? 363 return (len < XGENE_DMA_MAX_BYTE_CNT) ?
402 len : XGENE_DMA_16K_BUFFER_LEN_CODE; 364 ((u64)len << XGENE_DMA_DESC_BUFLEN_POS) :
365 XGENE_DMA_16K_BUFFER_LEN_CODE;
403} 366}
404 367
405static u8 xgene_dma_encode_xor_flyby(u32 src_cnt) 368static u8 xgene_dma_encode_xor_flyby(u32 src_cnt)
@@ -424,34 +387,50 @@ static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring)
424 return XGENE_DMA_RING_DESC_CNT(ring_state); 387 return XGENE_DMA_RING_DESC_CNT(ring_state);
425} 388}
426 389
427static void xgene_dma_set_src_buffer(void *ext8, size_t *len, 390static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len,
428 dma_addr_t *paddr) 391 dma_addr_t *paddr)
429{ 392{
430 size_t nbytes = (*len < XGENE_DMA_MAX_BYTE_CNT) ? 393 size_t nbytes = (*len < XGENE_DMA_MAX_BYTE_CNT) ?
431 *len : XGENE_DMA_MAX_BYTE_CNT; 394 *len : XGENE_DMA_MAX_BYTE_CNT;
432 395
433 XGENE_DMA_DESC_BUFADDR_SET(ext8, *paddr); 396 *ext8 |= cpu_to_le64(*paddr);
434 XGENE_DMA_DESC_BUFLEN_SET(ext8, xgene_dma_encode_len(nbytes)); 397 *ext8 |= cpu_to_le64(xgene_dma_encode_len(nbytes));
435 *len -= nbytes; 398 *len -= nbytes;
436 *paddr += nbytes; 399 *paddr += nbytes;
437} 400}
438 401
439static void xgene_dma_invalidate_buffer(void *ext8) 402static void xgene_dma_invalidate_buffer(__le64 *ext8)
440{ 403{
441 XGENE_DMA_DESC_BUFLEN_SET(ext8, XGENE_DMA_INVALID_LEN_CODE); 404 *ext8 |= cpu_to_le64(XGENE_DMA_INVALID_LEN_CODE);
442} 405}
443 406
444static void *xgene_dma_lookup_ext8(u64 *desc, int idx) 407static __le64 *xgene_dma_lookup_ext8(struct xgene_dma_desc_hw *desc, int idx)
445{ 408{
446 return (idx % 2) ? (desc + idx - 1) : (desc + idx + 1); 409 switch (idx) {
410 case 0:
411 return &desc->m1;
412 case 1:
413 return &desc->m0;
414 case 2:
415 return &desc->m3;
416 case 3:
417 return &desc->m2;
418 default:
419 pr_err("Invalid dma descriptor index\n");
420 }
421
422 return NULL;
447} 423}
448 424
449static void xgene_dma_init_desc(void *desc, u16 dst_ring_num) 425static void xgene_dma_init_desc(struct xgene_dma_desc_hw *desc,
426 u16 dst_ring_num)
450{ 427{
451 XGENE_DMA_DESC_C_SET(desc); /* Coherent IO */ 428 desc->m0 |= cpu_to_le64(XGENE_DMA_DESC_IN_BIT);
452 XGENE_DMA_DESC_IN_SET(desc); 429 desc->m0 |= cpu_to_le64((u64)XGENE_DMA_RING_OWNER_DMA <<
453 XGENE_DMA_DESC_H0ENQ_NUM_SET(desc, dst_ring_num); 430 XGENE_DMA_DESC_RTYPE_POS);
454 XGENE_DMA_DESC_RTYPE_SET(desc, XGENE_DMA_RING_OWNER_DMA); 431 desc->m1 |= cpu_to_le64(XGENE_DMA_DESC_C_BIT);
432 desc->m3 |= cpu_to_le64((u64)dst_ring_num <<
433 XGENE_DMA_DESC_HOENQ_NUM_POS);
455} 434}
456 435
457static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan, 436static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan,
@@ -459,7 +438,7 @@ static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan,
459 dma_addr_t dst, dma_addr_t src, 438 dma_addr_t dst, dma_addr_t src,
460 size_t len) 439 size_t len)
461{ 440{
462 void *desc1, *desc2; 441 struct xgene_dma_desc_hw *desc1, *desc2;
463 int i; 442 int i;
464 443
465 /* Get 1st descriptor */ 444 /* Get 1st descriptor */
@@ -467,23 +446,21 @@ static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan,
467 xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num); 446 xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num);
468 447
469 /* Set destination address */ 448 /* Set destination address */
470 XGENE_DMA_DESC_DR_SET(desc1); 449 desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT);
471 XGENE_DMA_DESC_DST_ADDR_SET(desc1, dst); 450 desc1->m3 |= cpu_to_le64(dst);
472 451
473 /* Set 1st source address */ 452 /* Set 1st source address */
474 xgene_dma_set_src_buffer(desc1 + 8, &len, &src); 453 xgene_dma_set_src_buffer(&desc1->m1, &len, &src);
475 454
476 if (len <= 0) { 455 if (!len)
477 desc2 = NULL; 456 return;
478 goto skip_additional_src;
479 }
480 457
481 /* 458 /*
482 * We need to split this source buffer, 459 * We need to split this source buffer,
483 * and need to use 2nd descriptor 460 * and need to use 2nd descriptor
484 */ 461 */
485 desc2 = &desc_sw->desc2; 462 desc2 = &desc_sw->desc2;
486 XGENE_DMA_DESC_NV_SET(desc1); 463 desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT);
487 464
488 /* Set 2nd to 5th source address */ 465 /* Set 2nd to 5th source address */
489 for (i = 0; i < 4 && len; i++) 466 for (i = 0; i < 4 && len; i++)
@@ -496,12 +473,6 @@ static void xgene_dma_prep_cpy_desc(struct xgene_dma_chan *chan,
496 473
497 /* Updated flag that we have prepared 64B descriptor */ 474 /* Updated flag that we have prepared 64B descriptor */
498 desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC; 475 desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC;
499
500skip_additional_src:
501 /* Hardware stores descriptor in little endian format */
502 xgene_dma_cpu_to_le64(desc1, 4);
503 if (desc2)
504 xgene_dma_cpu_to_le64(desc2, 4);
505} 476}
506 477
507static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan, 478static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan,
@@ -510,7 +481,7 @@ static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan,
510 u32 src_cnt, size_t *nbytes, 481 u32 src_cnt, size_t *nbytes,
511 const u8 *scf) 482 const u8 *scf)
512{ 483{
513 void *desc1, *desc2; 484 struct xgene_dma_desc_hw *desc1, *desc2;
514 size_t len = *nbytes; 485 size_t len = *nbytes;
515 int i; 486 int i;
516 487
@@ -521,28 +492,24 @@ static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan,
521 xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num); 492 xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num);
522 493
523 /* Set destination address */ 494 /* Set destination address */
524 XGENE_DMA_DESC_DR_SET(desc1); 495 desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT);
525 XGENE_DMA_DESC_DST_ADDR_SET(desc1, *dst); 496 desc1->m3 |= cpu_to_le64(*dst);
526 497
527 /* We have multiple source addresses, so need to set NV bit*/ 498 /* We have multiple source addresses, so need to set NV bit*/
528 XGENE_DMA_DESC_NV_SET(desc1); 499 desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT);
529 500
530 /* Set flyby opcode */ 501 /* Set flyby opcode */
531 XGENE_DMA_DESC_FLYBY_SET(desc1, xgene_dma_encode_xor_flyby(src_cnt)); 502 desc1->m2 |= cpu_to_le64(xgene_dma_encode_xor_flyby(src_cnt));
532 503
533 /* Set 1st to 5th source addresses */ 504 /* Set 1st to 5th source addresses */
534 for (i = 0; i < src_cnt; i++) { 505 for (i = 0; i < src_cnt; i++) {
535 len = *nbytes; 506 len = *nbytes;
536 xgene_dma_set_src_buffer((i == 0) ? (desc1 + 8) : 507 xgene_dma_set_src_buffer((i == 0) ? &desc1->m1 :
537 xgene_dma_lookup_ext8(desc2, i - 1), 508 xgene_dma_lookup_ext8(desc2, i - 1),
538 &len, &src[i]); 509 &len, &src[i]);
539 XGENE_DMA_DESC_MULTI_SET(desc1, scf[i], i); 510 desc1->m2 |= cpu_to_le64((scf[i] << ((i + 1) * 8)));
540 } 511 }
541 512
542 /* Hardware stores descriptor in little endian format */
543 xgene_dma_cpu_to_le64(desc1, 4);
544 xgene_dma_cpu_to_le64(desc2, 4);
545
546 /* Update meta data */ 513 /* Update meta data */
547 *nbytes = len; 514 *nbytes = len;
548 *dst += XGENE_DMA_MAX_BYTE_CNT; 515 *dst += XGENE_DMA_MAX_BYTE_CNT;
@@ -738,7 +705,7 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
738 * xgene_chan_xfer_ld_pending - push any pending transactions to hw 705 * xgene_chan_xfer_ld_pending - push any pending transactions to hw
739 * @chan : X-Gene DMA channel 706 * @chan : X-Gene DMA channel
740 * 707 *
741 * LOCKING: must hold chan->desc_lock 708 * LOCKING: must hold chan->lock
742 */ 709 */
743static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan) 710static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
744{ 711{
@@ -808,7 +775,8 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
808 desc_hw = &ring->desc_hw[ring->head]; 775 desc_hw = &ring->desc_hw[ring->head];
809 776
810 /* Check if this descriptor has been completed */ 777 /* Check if this descriptor has been completed */
811 if (unlikely(XGENE_DMA_DESC_IS_EMPTY(desc_hw))) 778 if (unlikely(le64_to_cpu(desc_hw->m0) ==
779 XGENE_DMA_DESC_EMPTY_SIGNATURE))
812 break; 780 break;
813 781
814 if (++ring->head == ring->slots) 782 if (++ring->head == ring->slots)
@@ -842,7 +810,7 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
842 iowrite32(-1, ring->cmd); 810 iowrite32(-1, ring->cmd);
843 811
844 /* Mark this hw descriptor as processed */ 812 /* Mark this hw descriptor as processed */
845 XGENE_DMA_DESC_SET_EMPTY(desc_hw); 813 desc_hw->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE);
846 814
847 xgene_dma_run_tx_complete_actions(chan, desc_sw); 815 xgene_dma_run_tx_complete_actions(chan, desc_sw);
848 816
@@ -889,7 +857,7 @@ static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan)
889 * @chan: X-Gene DMA channel 857 * @chan: X-Gene DMA channel
890 * @list: the list to free 858 * @list: the list to free
891 * 859 *
892 * LOCKING: must hold chan->desc_lock 860 * LOCKING: must hold chan->lock
893 */ 861 */
894static void xgene_dma_free_desc_list(struct xgene_dma_chan *chan, 862static void xgene_dma_free_desc_list(struct xgene_dma_chan *chan,
895 struct list_head *list) 863 struct list_head *list)
@@ -900,15 +868,6 @@ static void xgene_dma_free_desc_list(struct xgene_dma_chan *chan,
900 xgene_dma_clean_descriptor(chan, desc); 868 xgene_dma_clean_descriptor(chan, desc);
901} 869}
902 870
903static void xgene_dma_free_tx_desc_list(struct xgene_dma_chan *chan,
904 struct list_head *list)
905{
906 struct xgene_dma_desc_sw *desc, *_desc;
907
908 list_for_each_entry_safe(desc, _desc, list, node)
909 xgene_dma_clean_descriptor(chan, desc);
910}
911
912static void xgene_dma_free_chan_resources(struct dma_chan *dchan) 871static void xgene_dma_free_chan_resources(struct dma_chan *dchan)
913{ 872{
914 struct xgene_dma_chan *chan = to_dma_chan(dchan); 873 struct xgene_dma_chan *chan = to_dma_chan(dchan);
@@ -985,7 +944,7 @@ fail:
985 if (!first) 944 if (!first)
986 return NULL; 945 return NULL;
987 946
988 xgene_dma_free_tx_desc_list(chan, &first->tx_list); 947 xgene_dma_free_desc_list(chan, &first->tx_list);
989 return NULL; 948 return NULL;
990} 949}
991 950
@@ -1093,7 +1052,7 @@ fail:
1093 if (!first) 1052 if (!first)
1094 return NULL; 1053 return NULL;
1095 1054
1096 xgene_dma_free_tx_desc_list(chan, &first->tx_list); 1055 xgene_dma_free_desc_list(chan, &first->tx_list);
1097 return NULL; 1056 return NULL;
1098} 1057}
1099 1058
@@ -1141,7 +1100,7 @@ fail:
1141 if (!first) 1100 if (!first)
1142 return NULL; 1101 return NULL;
1143 1102
1144 xgene_dma_free_tx_desc_list(chan, &first->tx_list); 1103 xgene_dma_free_desc_list(chan, &first->tx_list);
1145 return NULL; 1104 return NULL;
1146} 1105}
1147 1106
@@ -1218,7 +1177,7 @@ fail:
1218 if (!first) 1177 if (!first)
1219 return NULL; 1178 return NULL;
1220 1179
1221 xgene_dma_free_tx_desc_list(chan, &first->tx_list); 1180 xgene_dma_free_desc_list(chan, &first->tx_list);
1222 return NULL; 1181 return NULL;
1223} 1182}
1224 1183
@@ -1316,7 +1275,6 @@ static void xgene_dma_setup_ring(struct xgene_dma_ring *ring)
1316{ 1275{
1317 void *ring_cfg = ring->state; 1276 void *ring_cfg = ring->state;
1318 u64 addr = ring->desc_paddr; 1277 u64 addr = ring->desc_paddr;
1319 void *desc;
1320 u32 i, val; 1278 u32 i, val;
1321 1279
1322 ring->slots = ring->size / XGENE_DMA_RING_WQ_DESC_SIZE; 1280 ring->slots = ring->size / XGENE_DMA_RING_WQ_DESC_SIZE;
@@ -1358,8 +1316,10 @@ static void xgene_dma_setup_ring(struct xgene_dma_ring *ring)
1358 1316
1359 /* Set empty signature to DMA Rx ring descriptors */ 1317 /* Set empty signature to DMA Rx ring descriptors */
1360 for (i = 0; i < ring->slots; i++) { 1318 for (i = 0; i < ring->slots; i++) {
1319 struct xgene_dma_desc_hw *desc;
1320
1361 desc = &ring->desc_hw[i]; 1321 desc = &ring->desc_hw[i];
1362 XGENE_DMA_DESC_SET_EMPTY(desc); 1322 desc->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE);
1363 } 1323 }
1364 1324
1365 /* Enable DMA Rx ring interrupt */ 1325 /* Enable DMA Rx ring interrupt */
@@ -1928,6 +1888,8 @@ static int xgene_dma_get_resources(struct platform_device *pdev,
1928 return -ENOMEM; 1888 return -ENOMEM;
1929 } 1889 }
1930 1890
1891 pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET;
1892
1931 /* Get efuse csr region */ 1893 /* Get efuse csr region */
1932 res = platform_get_resource(pdev, IORESOURCE_MEM, 3); 1894 res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1933 if (!res) { 1895 if (!res) {