aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3
diff options
context:
space:
mode:
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>2010-04-12 10:32:12 -0400
committerDavid S. Miller <davem@davemloft.net>2010-04-13 05:54:18 -0400
commit56e3b9df1376fa865ae929909b02f6840207520f (patch)
treede3fcfa6fe70449792dc02081db86c1aec235f78 /drivers/net/cxgb3
parent094f92a61aa044142c231e04c35c00a9cc70adbc (diff)
cxgb3: use the DMA state API instead of the pci equivalents
This replace the PCI DMA state API (include/linux/pci-dma.h) with the DMA equivalents since the PCI DMA state API will be obsolete. No functional change. For further information about the background: http://marc.info/?l=linux-netdev&m=127037540020276&w=2 Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: Divy Le Ray <divy@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/cxgb3')
-rw-r--r--drivers/net/cxgb3/sge.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 07d7e7fab3f5..5962b911b5bd 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -118,7 +118,7 @@ struct rx_sw_desc { /* SW state per Rx descriptor */
118 struct sk_buff *skb; 118 struct sk_buff *skb;
119 struct fl_pg_chunk pg_chunk; 119 struct fl_pg_chunk pg_chunk;
120 }; 120 };
121 DECLARE_PCI_UNMAP_ADDR(dma_addr); 121 DEFINE_DMA_UNMAP_ADDR(dma_addr);
122}; 122};
123 123
124struct rsp_desc { /* response queue descriptor */ 124struct rsp_desc { /* response queue descriptor */
@@ -208,7 +208,7 @@ static inline int need_skb_unmap(void)
208 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything. 208 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
209 */ 209 */
210 struct dummy { 210 struct dummy {
211 DECLARE_PCI_UNMAP_ADDR(addr); 211 DEFINE_DMA_UNMAP_ADDR(addr);
212 }; 212 };
213 213
214 return sizeof(struct dummy) != 0; 214 return sizeof(struct dummy) != 0;
@@ -363,7 +363,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
363 put_page(d->pg_chunk.page); 363 put_page(d->pg_chunk.page);
364 d->pg_chunk.page = NULL; 364 d->pg_chunk.page = NULL;
365 } else { 365 } else {
366 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), 366 pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr),
367 q->buf_size, PCI_DMA_FROMDEVICE); 367 q->buf_size, PCI_DMA_FROMDEVICE);
368 kfree_skb(d->skb); 368 kfree_skb(d->skb);
369 d->skb = NULL; 369 d->skb = NULL;
@@ -419,7 +419,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
419 if (unlikely(pci_dma_mapping_error(pdev, mapping))) 419 if (unlikely(pci_dma_mapping_error(pdev, mapping)))
420 return -ENOMEM; 420 return -ENOMEM;
421 421
422 pci_unmap_addr_set(sd, dma_addr, mapping); 422 dma_unmap_addr_set(sd, dma_addr, mapping);
423 423
424 d->addr_lo = cpu_to_be32(mapping); 424 d->addr_lo = cpu_to_be32(mapping);
425 d->addr_hi = cpu_to_be32((u64) mapping >> 32); 425 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
@@ -515,7 +515,7 @@ nomem: q->alloc_failed++;
515 break; 515 break;
516 } 516 }
517 mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset; 517 mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
518 pci_unmap_addr_set(sd, dma_addr, mapping); 518 dma_unmap_addr_set(sd, dma_addr, mapping);
519 519
520 add_one_rx_chunk(mapping, d, q->gen); 520 add_one_rx_chunk(mapping, d, q->gen);
521 pci_dma_sync_single_for_device(adap->pdev, mapping, 521 pci_dma_sync_single_for_device(adap->pdev, mapping,
@@ -791,11 +791,11 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
791 if (likely(skb != NULL)) { 791 if (likely(skb != NULL)) {
792 __skb_put(skb, len); 792 __skb_put(skb, len);
793 pci_dma_sync_single_for_cpu(adap->pdev, 793 pci_dma_sync_single_for_cpu(adap->pdev,
794 pci_unmap_addr(sd, dma_addr), len, 794 dma_unmap_addr(sd, dma_addr), len,
795 PCI_DMA_FROMDEVICE); 795 PCI_DMA_FROMDEVICE);
796 memcpy(skb->data, sd->skb->data, len); 796 memcpy(skb->data, sd->skb->data, len);
797 pci_dma_sync_single_for_device(adap->pdev, 797 pci_dma_sync_single_for_device(adap->pdev,
798 pci_unmap_addr(sd, dma_addr), len, 798 dma_unmap_addr(sd, dma_addr), len,
799 PCI_DMA_FROMDEVICE); 799 PCI_DMA_FROMDEVICE);
800 } else if (!drop_thres) 800 } else if (!drop_thres)
801 goto use_orig_buf; 801 goto use_orig_buf;
@@ -810,7 +810,7 @@ recycle:
810 goto recycle; 810 goto recycle;
811 811
812use_orig_buf: 812use_orig_buf:
813 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), 813 pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
814 fl->buf_size, PCI_DMA_FROMDEVICE); 814 fl->buf_size, PCI_DMA_FROMDEVICE);
815 skb = sd->skb; 815 skb = sd->skb;
816 skb_put(skb, len); 816 skb_put(skb, len);
@@ -843,7 +843,7 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
843 struct sk_buff *newskb, *skb; 843 struct sk_buff *newskb, *skb;
844 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 844 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
845 845
846 dma_addr_t dma_addr = pci_unmap_addr(sd, dma_addr); 846 dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
847 847
848 newskb = skb = q->pg_skb; 848 newskb = skb = q->pg_skb;
849 if (!skb && (len <= SGE_RX_COPY_THRES)) { 849 if (!skb && (len <= SGE_RX_COPY_THRES)) {
@@ -2097,7 +2097,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2097 fl->credits--; 2097 fl->credits--;
2098 2098
2099 pci_dma_sync_single_for_cpu(adap->pdev, 2099 pci_dma_sync_single_for_cpu(adap->pdev,
2100 pci_unmap_addr(sd, dma_addr), 2100 dma_unmap_addr(sd, dma_addr),
2101 fl->buf_size - SGE_PG_RSVD, 2101 fl->buf_size - SGE_PG_RSVD,
2102 PCI_DMA_FROMDEVICE); 2102 PCI_DMA_FROMDEVICE);
2103 2103