aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/chelsio/cxgb4
diff options
context:
space:
mode:
authorIan Campbell <Ian.Campbell@citrix.com>2011-10-19 19:01:46 -0400
committerDavid S. Miller <davem@davemloft.net>2011-10-21 02:52:52 -0400
commite91b0f2491f7a7b21c4e562df09f3dbe551f0fe2 (patch)
tree2299183e6254d11cb040cfaa52ad3990c5abefdc /drivers/net/ethernet/chelsio/cxgb4
parent311761c8a553adaa3ad7482b1fdde1ce9042d3e2 (diff)
cxgb4: convert to SKB paged frag API.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Cc: Dimitris Michailidis <dm@chelsio.com> Cc: netdev@vger.kernel.org Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/chelsio/cxgb4')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c45
2 files changed, 24 insertions, 23 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 223a7f72343b..0fe18850c838 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -326,7 +326,7 @@ struct sge_fl { /* SGE free-buffer queue state */
326 326
327/* A packet gather list */ 327/* A packet gather list */
328struct pkt_gl { 328struct pkt_gl {
329 skb_frag_t frags[MAX_SKB_FRAGS]; 329 struct page_frag frags[MAX_SKB_FRAGS];
330 void *va; /* virtual address of first byte */ 330 void *va; /* virtual address of first byte */
331 unsigned int nfrags; /* # of fragments */ 331 unsigned int nfrags; /* # of fragments */
332 unsigned int tot_len; /* total length of fragments */ 332 unsigned int tot_len; /* total length of fragments */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 14f31d3a18d7..ddc16985d0f6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -215,8 +215,8 @@ static int map_skb(struct device *dev, const struct sk_buff *skb,
215 end = &si->frags[si->nr_frags]; 215 end = &si->frags[si->nr_frags];
216 216
217 for (fp = si->frags; fp < end; fp++) { 217 for (fp = si->frags; fp < end; fp++) {
218 *++addr = dma_map_page(dev, fp->page, fp->page_offset, 218 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
219 skb_frag_size(fp), DMA_TO_DEVICE); 219 DMA_TO_DEVICE);
220 if (dma_mapping_error(dev, *addr)) 220 if (dma_mapping_error(dev, *addr))
221 goto unwind; 221 goto unwind;
222 } 222 }
@@ -1409,22 +1409,23 @@ int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
1409} 1409}
1410EXPORT_SYMBOL(cxgb4_ofld_send); 1410EXPORT_SYMBOL(cxgb4_ofld_send);
1411 1411
1412static inline void copy_frags(struct skb_shared_info *ssi, 1412static inline void copy_frags(struct sk_buff *skb,
1413 const struct pkt_gl *gl, unsigned int offset) 1413 const struct pkt_gl *gl, unsigned int offset)
1414{ 1414{
1415 unsigned int n; 1415 int i;
1416 1416
1417 /* usually there's just one frag */ 1417 /* usually there's just one frag */
1418 ssi->frags[0].page = gl->frags[0].page; 1418 __skb_fill_page_desc(skb, 0, gl->frags[0].page,
1419 ssi->frags[0].page_offset = gl->frags[0].page_offset + offset; 1419 gl->frags[0].offset + offset,
1420 skb_frag_size_set(&ssi->frags[0], skb_frag_size(&gl->frags[0]) - offset); 1420 gl->frags[0].size - offset);
1421 ssi->nr_frags = gl->nfrags; 1421 skb_shinfo(skb)->nr_frags = gl->nfrags;
1422 n = gl->nfrags - 1; 1422 for (i = 1; i < gl->nfrags; i++)
1423 if (n) 1423 __skb_fill_page_desc(skb, i, gl->frags[i].page,
1424 memcpy(&ssi->frags[1], &gl->frags[1], n * sizeof(skb_frag_t)); 1424 gl->frags[i].offset,
1425 gl->frags[i].size);
1425 1426
1426 /* get a reference to the last page, we don't own it */ 1427 /* get a reference to the last page, we don't own it */
1427 get_page(gl->frags[n].page); 1428 get_page(gl->frags[gl->nfrags - 1].page);
1428} 1429}
1429 1430
1430/** 1431/**
@@ -1459,7 +1460,7 @@ struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
1459 __skb_put(skb, pull_len); 1460 __skb_put(skb, pull_len);
1460 skb_copy_to_linear_data(skb, gl->va, pull_len); 1461 skb_copy_to_linear_data(skb, gl->va, pull_len);
1461 1462
1462 copy_frags(skb_shinfo(skb), gl, pull_len); 1463 copy_frags(skb, gl, pull_len);
1463 skb->len = gl->tot_len; 1464 skb->len = gl->tot_len;
1464 skb->data_len = skb->len - pull_len; 1465 skb->data_len = skb->len - pull_len;
1465 skb->truesize += skb->data_len; 1466 skb->truesize += skb->data_len;
@@ -1478,7 +1479,7 @@ EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
1478static void t4_pktgl_free(const struct pkt_gl *gl) 1479static void t4_pktgl_free(const struct pkt_gl *gl)
1479{ 1480{
1480 int n; 1481 int n;
1481 const skb_frag_t *p; 1482 const struct page_frag *p;
1482 1483
1483 for (p = gl->frags, n = gl->nfrags - 1; n--; p++) 1484 for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
1484 put_page(p->page); 1485 put_page(p->page);
@@ -1522,7 +1523,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1522 return; 1523 return;
1523 } 1524 }
1524 1525
1525 copy_frags(skb_shinfo(skb), gl, RX_PKT_PAD); 1526 copy_frags(skb, gl, RX_PKT_PAD);
1526 skb->len = gl->tot_len - RX_PKT_PAD; 1527 skb->len = gl->tot_len - RX_PKT_PAD;
1527 skb->data_len = skb->len; 1528 skb->data_len = skb->len;
1528 skb->truesize += skb->data_len; 1529 skb->truesize += skb->data_len;
@@ -1698,7 +1699,7 @@ static int process_responses(struct sge_rspq *q, int budget)
1698 rmb(); 1699 rmb();
1699 rsp_type = RSPD_TYPE(rc->type_gen); 1700 rsp_type = RSPD_TYPE(rc->type_gen);
1700 if (likely(rsp_type == RSP_TYPE_FLBUF)) { 1701 if (likely(rsp_type == RSP_TYPE_FLBUF)) {
1701 skb_frag_t *fp; 1702 struct page_frag *fp;
1702 struct pkt_gl si; 1703 struct pkt_gl si;
1703 const struct rx_sw_desc *rsd; 1704 const struct rx_sw_desc *rsd;
1704 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags; 1705 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
@@ -1717,9 +1718,9 @@ static int process_responses(struct sge_rspq *q, int budget)
1717 rsd = &rxq->fl.sdesc[rxq->fl.cidx]; 1718 rsd = &rxq->fl.sdesc[rxq->fl.cidx];
1718 bufsz = get_buf_size(rsd); 1719 bufsz = get_buf_size(rsd);
1719 fp->page = rsd->page; 1720 fp->page = rsd->page;
1720 fp->page_offset = q->offset; 1721 fp->offset = q->offset;
1721 skb_frag_size_set(fp, min(bufsz, len)); 1722 fp->size = min(bufsz, len);
1722 len -= skb_frag_size(fp); 1723 len -= fp->size;
1723 if (!len) 1724 if (!len)
1724 break; 1725 break;
1725 unmap_rx_buf(q->adap, &rxq->fl); 1726 unmap_rx_buf(q->adap, &rxq->fl);
@@ -1731,16 +1732,16 @@ static int process_responses(struct sge_rspq *q, int budget)
1731 */ 1732 */
1732 dma_sync_single_for_cpu(q->adap->pdev_dev, 1733 dma_sync_single_for_cpu(q->adap->pdev_dev,
1733 get_buf_addr(rsd), 1734 get_buf_addr(rsd),
1734 skb_frag_size(fp), DMA_FROM_DEVICE); 1735 fp->size, DMA_FROM_DEVICE);
1735 1736
1736 si.va = page_address(si.frags[0].page) + 1737 si.va = page_address(si.frags[0].page) +
1737 si.frags[0].page_offset; 1738 si.frags[0].offset;
1738 prefetch(si.va); 1739 prefetch(si.va);
1739 1740
1740 si.nfrags = frags + 1; 1741 si.nfrags = frags + 1;
1741 ret = q->handler(q, q->cur_desc, &si); 1742 ret = q->handler(q, q->cur_desc, &si);
1742 if (likely(ret == 0)) 1743 if (likely(ret == 0))
1743 q->offset += ALIGN(skb_frag_size(fp), FL_ALIGN); 1744 q->offset += ALIGN(fp->size, FL_ALIGN);
1744 else 1745 else
1745 restore_rx_bufs(&si, &rxq->fl, frags); 1746 restore_rx_bufs(&si, &rxq->fl, frags);
1746 } else if (likely(rsp_type == RSP_TYPE_CPL)) { 1747 } else if (likely(rsp_type == RSP_TYPE_CPL)) {