aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3/sge.c
diff options
context:
space:
mode:
authorDivy Le Ray <divy@chelsio.com>2009-05-28 07:23:02 -0400
committerDavid S. Miller <davem@davemloft.net>2009-05-29 04:54:37 -0400
commit10b6d95612672f89deb39b5a60fb677c78ba4844 (patch)
tree5b853a9db086d4c0894d93f54e5bf666f0b8fa70 /drivers/net/cxgb3/sge.c
parent4d3383d0adb6d1047fb9ee3edd9dc05e4d2184f0 (diff)
cxgb3: fix dma mapping regression
Commit 5e68b772e6efd189d6aca76f6872fb75d51ace60 cxgb3: map entire Rx page, feed map+offset to Rx ring. introduced a regression on platforms defining DECLARE_PCI_UNMAP_ADDR() and related macros as no-ops. Rx descriptors are fed with the a page buffer bus address + page chunk offset. The page buffer bus address is set and retrieved through pci_unamp_addr_set(), pci_unmap_addr(). These functions being meaningless on x86 (if CONFIG_DMA_API_DEBUG is not set). The HW ends up with a bogus bus address. This patch saves the page buffer bus address for all plaftorms. Signed-off-by: Divy Le Ray <divy@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/cxgb3/sge.c')
-rw-r--r--drivers/net/cxgb3/sge.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 26d3587f3399..b3ee2bc1a005 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -355,7 +355,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
355 (*d->pg_chunk.p_cnt)--; 355 (*d->pg_chunk.p_cnt)--;
356 if (!*d->pg_chunk.p_cnt) 356 if (!*d->pg_chunk.p_cnt)
357 pci_unmap_page(pdev, 357 pci_unmap_page(pdev,
358 pci_unmap_addr(&d->pg_chunk, mapping), 358 d->pg_chunk.mapping,
359 q->alloc_size, PCI_DMA_FROMDEVICE); 359 q->alloc_size, PCI_DMA_FROMDEVICE);
360 360
361 put_page(d->pg_chunk.page); 361 put_page(d->pg_chunk.page);
@@ -454,7 +454,7 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
454 q->pg_chunk.offset = 0; 454 q->pg_chunk.offset = 0;
455 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, 455 mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
456 0, q->alloc_size, PCI_DMA_FROMDEVICE); 456 0, q->alloc_size, PCI_DMA_FROMDEVICE);
457 pci_unmap_addr_set(&q->pg_chunk, mapping, mapping); 457 q->pg_chunk.mapping = mapping;
458 } 458 }
459 sd->pg_chunk = q->pg_chunk; 459 sd->pg_chunk = q->pg_chunk;
460 460
@@ -511,8 +511,7 @@ static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
511nomem: q->alloc_failed++; 511nomem: q->alloc_failed++;
512 break; 512 break;
513 } 513 }
514 mapping = pci_unmap_addr(&sd->pg_chunk, mapping) + 514 mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
515 sd->pg_chunk.offset;
516 pci_unmap_addr_set(sd, dma_addr, mapping); 515 pci_unmap_addr_set(sd, dma_addr, mapping);
517 516
518 add_one_rx_chunk(mapping, d, q->gen); 517 add_one_rx_chunk(mapping, d, q->gen);
@@ -881,7 +880,7 @@ recycle:
881 (*sd->pg_chunk.p_cnt)--; 880 (*sd->pg_chunk.p_cnt)--;
882 if (!*sd->pg_chunk.p_cnt) 881 if (!*sd->pg_chunk.p_cnt)
883 pci_unmap_page(adap->pdev, 882 pci_unmap_page(adap->pdev,
884 pci_unmap_addr(&sd->pg_chunk, mapping), 883 sd->pg_chunk.mapping,
885 fl->alloc_size, 884 fl->alloc_size,
886 PCI_DMA_FROMDEVICE); 885 PCI_DMA_FROMDEVICE);
887 if (!skb) { 886 if (!skb) {
@@ -2096,7 +2095,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2096 (*sd->pg_chunk.p_cnt)--; 2095 (*sd->pg_chunk.p_cnt)--;
2097 if (!*sd->pg_chunk.p_cnt) 2096 if (!*sd->pg_chunk.p_cnt)
2098 pci_unmap_page(adap->pdev, 2097 pci_unmap_page(adap->pdev,
2099 pci_unmap_addr(&sd->pg_chunk, mapping), 2098 sd->pg_chunk.mapping,
2100 fl->alloc_size, 2099 fl->alloc_size,
2101 PCI_DMA_FROMDEVICE); 2100 PCI_DMA_FROMDEVICE);
2102 2101