aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorRafał Miłecki <zajec5@gmail.com>2013-09-15 17:13:18 -0400
committerDavid S. Miller <davem@davemloft.net>2013-09-16 21:30:59 -0400
commit9900303ec7698d82c8f7a018b134a7c1ec557499 (patch)
tree8355a5903e2a1b829874c6f9daf1dbef892e2cc3 /drivers/net
parent3647268dedfe95945be05c9dde2bdbda7cfc6e91 (diff)
bgmac: implement unaligned addressing for DMA rings that support it
This is important patch for new devices that support unaligned addressing. That devices suffer from the backward-compatibility bug in DMA engine. In theory we should be able to use old mechanism, but in practice DMA address seems to be randomly copied into status register when hardware reaches end of a ring. This breaks reading slot number from status register and we can't use DMA anymore. Signed-off-by: Rafał Miłecki <zajec5@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c38
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h2
2 files changed, 30 insertions, 10 deletions
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 59f3e0ce56df..249468f95365 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -157,6 +157,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
157 if (++ring->end >= BGMAC_TX_RING_SLOTS) 157 if (++ring->end >= BGMAC_TX_RING_SLOTS)
158 ring->end = 0; 158 ring->end = 0;
159 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX, 159 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
160 ring->index_base +
160 ring->end * sizeof(struct bgmac_dma_desc)); 161 ring->end * sizeof(struct bgmac_dma_desc));
161 162
162 /* Always keep one slot free to allow detecting bugged calls. */ 163 /* Always keep one slot free to allow detecting bugged calls. */
@@ -181,6 +182,8 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
181 /* The last slot that hardware didn't consume yet */ 182 /* The last slot that hardware didn't consume yet */
182 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); 183 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
183 empty_slot &= BGMAC_DMA_TX_STATDPTR; 184 empty_slot &= BGMAC_DMA_TX_STATDPTR;
185 empty_slot -= ring->index_base;
186 empty_slot &= BGMAC_DMA_TX_STATDPTR;
184 empty_slot /= sizeof(struct bgmac_dma_desc); 187 empty_slot /= sizeof(struct bgmac_dma_desc);
185 188
186 while (ring->start != empty_slot) { 189 while (ring->start != empty_slot) {
@@ -274,6 +277,8 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
274 277
275 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS); 278 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
276 end_slot &= BGMAC_DMA_RX_STATDPTR; 279 end_slot &= BGMAC_DMA_RX_STATDPTR;
280 end_slot -= ring->index_base;
281 end_slot &= BGMAC_DMA_RX_STATDPTR;
277 end_slot /= sizeof(struct bgmac_dma_desc); 282 end_slot /= sizeof(struct bgmac_dma_desc);
278 283
279 ring->end = end_slot; 284 ring->end = end_slot;
@@ -418,9 +423,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
418 ring = &bgmac->tx_ring[i]; 423 ring = &bgmac->tx_ring[i];
419 ring->num_slots = BGMAC_TX_RING_SLOTS; 424 ring->num_slots = BGMAC_TX_RING_SLOTS;
420 ring->mmio_base = ring_base[i]; 425 ring->mmio_base = ring_base[i];
421 if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX))
422 bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
423 ring->mmio_base);
424 426
425 /* Alloc ring of descriptors */ 427 /* Alloc ring of descriptors */
426 size = ring->num_slots * sizeof(struct bgmac_dma_desc); 428 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
@@ -435,6 +437,13 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
435 if (ring->dma_base & 0xC0000000) 437 if (ring->dma_base & 0xC0000000)
436 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); 438 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
437 439
440 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
441 BGMAC_DMA_RING_TX);
442 if (ring->unaligned)
443 ring->index_base = lower_32_bits(ring->dma_base);
444 else
445 ring->index_base = 0;
446
438 /* No need to alloc TX slots yet */ 447 /* No need to alloc TX slots yet */
439 } 448 }
440 449
@@ -444,9 +453,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
444 ring = &bgmac->rx_ring[i]; 453 ring = &bgmac->rx_ring[i];
445 ring->num_slots = BGMAC_RX_RING_SLOTS; 454 ring->num_slots = BGMAC_RX_RING_SLOTS;
446 ring->mmio_base = ring_base[i]; 455 ring->mmio_base = ring_base[i];
447 if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX))
448 bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
449 ring->mmio_base);
450 456
451 /* Alloc ring of descriptors */ 457 /* Alloc ring of descriptors */
452 size = ring->num_slots * sizeof(struct bgmac_dma_desc); 458 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
@@ -462,6 +468,13 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
462 if (ring->dma_base & 0xC0000000) 468 if (ring->dma_base & 0xC0000000)
463 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); 469 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
464 470
471 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
472 BGMAC_DMA_RING_RX);
473 if (ring->unaligned)
474 ring->index_base = lower_32_bits(ring->dma_base);
475 else
476 ring->index_base = 0;
477
465 /* Alloc RX slots */ 478 /* Alloc RX slots */
466 for (j = 0; j < ring->num_slots; j++) { 479 for (j = 0; j < ring->num_slots; j++) {
467 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]); 480 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
@@ -489,12 +502,14 @@ static void bgmac_dma_init(struct bgmac *bgmac)
489 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { 502 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
490 ring = &bgmac->tx_ring[i]; 503 ring = &bgmac->tx_ring[i];
491 504
492 /* We don't implement unaligned addressing, so enable first */ 505 if (!ring->unaligned)
493 bgmac_dma_tx_enable(bgmac, ring); 506 bgmac_dma_tx_enable(bgmac, ring);
494 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO, 507 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
495 lower_32_bits(ring->dma_base)); 508 lower_32_bits(ring->dma_base));
496 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI, 509 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
497 upper_32_bits(ring->dma_base)); 510 upper_32_bits(ring->dma_base));
511 if (ring->unaligned)
512 bgmac_dma_tx_enable(bgmac, ring);
498 513
499 ring->start = 0; 514 ring->start = 0;
500 ring->end = 0; /* Points the slot that should *not* be read */ 515 ring->end = 0; /* Points the slot that should *not* be read */
@@ -505,12 +520,14 @@ static void bgmac_dma_init(struct bgmac *bgmac)
505 520
506 ring = &bgmac->rx_ring[i]; 521 ring = &bgmac->rx_ring[i];
507 522
508 /* We don't implement unaligned addressing, so enable first */ 523 if (!ring->unaligned)
509 bgmac_dma_rx_enable(bgmac, ring); 524 bgmac_dma_rx_enable(bgmac, ring);
510 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO, 525 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
511 lower_32_bits(ring->dma_base)); 526 lower_32_bits(ring->dma_base));
512 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI, 527 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
513 upper_32_bits(ring->dma_base)); 528 upper_32_bits(ring->dma_base));
529 if (ring->unaligned)
530 bgmac_dma_rx_enable(bgmac, ring);
514 531
515 for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots; 532 for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
516 j++, dma_desc++) { 533 j++, dma_desc++) {
@@ -531,6 +548,7 @@ static void bgmac_dma_init(struct bgmac *bgmac)
531 } 548 }
532 549
533 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX, 550 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
551 ring->index_base +
534 ring->num_slots * sizeof(struct bgmac_dma_desc)); 552 ring->num_slots * sizeof(struct bgmac_dma_desc));
535 553
536 ring->start = 0; 554 ring->start = 0;
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 12a35cf9bb81..66c8afbdc8c7 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -384,6 +384,8 @@ struct bgmac_dma_ring {
384 u16 mmio_base; 384 u16 mmio_base;
385 struct bgmac_dma_desc *cpu_base; 385 struct bgmac_dma_desc *cpu_base;
386 dma_addr_t dma_base; 386 dma_addr_t dma_base;
387 u32 index_base; /* Used for unaligned rings only, otherwise 0 */
388 bool unaligned;
387 389
388 struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS]; 390 struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS];
389}; 391};