aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/chelsio/cxgb4vf
diff options
context:
space:
mode:
authorIan Campbell <Ian.Campbell@citrix.com>2011-10-19 19:01:47 -0400
committerDavid S. Miller <davem@davemloft.net>2011-10-21 02:52:52 -0400
commita0006a86cb19543f126bb2ee3d37baef82080763 (patch)
treef207f7ef9ffbca68c997ffcaa120924a2a38fe16 /drivers/net/ethernet/chelsio/cxgb4vf
parente91b0f2491f7a7b21c4e562df09f3dbe551f0fe2 (diff)
cxgb4vf: convert to SKB paged frag API.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Cc: Casey Leedom <leedom@chelsio.com> Cc: netdev@vger.kernel.org Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/chelsio/cxgb4vf')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c92
2 files changed, 41 insertions, 53 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 594334d5c711..611396c4b381 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -144,7 +144,7 @@ struct sge_fl {
144 * An ingress packet gather list. 144 * An ingress packet gather list.
145 */ 145 */
146struct pkt_gl { 146struct pkt_gl {
147 skb_frag_t frags[MAX_SKB_FRAGS]; 147 struct page_frag frags[MAX_SKB_FRAGS];
148 void *va; /* virtual address of first byte */ 148 void *va; /* virtual address of first byte */
149 unsigned int nfrags; /* # of fragments */ 149 unsigned int nfrags; /* # of fragments */
150 unsigned int tot_len; /* total length of fragments */ 150 unsigned int tot_len; /* total length of fragments */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index c2d456d90c00..8d5d55ad102d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -296,8 +296,8 @@ static int map_skb(struct device *dev, const struct sk_buff *skb,
296 si = skb_shinfo(skb); 296 si = skb_shinfo(skb);
297 end = &si->frags[si->nr_frags]; 297 end = &si->frags[si->nr_frags];
298 for (fp = si->frags; fp < end; fp++) { 298 for (fp = si->frags; fp < end; fp++) {
299 *++addr = dma_map_page(dev, fp->page, fp->page_offset, 299 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
300 skb_frag_size(fp), DMA_TO_DEVICE); 300 DMA_TO_DEVICE);
301 if (dma_mapping_error(dev, *addr)) 301 if (dma_mapping_error(dev, *addr))
302 goto unwind; 302 goto unwind;
303 } 303 }
@@ -1357,6 +1357,35 @@ out_free:
1357} 1357}
1358 1358
1359/** 1359/**
1360 * copy_frags - copy fragments from gather list into skb_shared_info
1361 * @skb: destination skb
1362 * @gl: source internal packet gather list
1363 * @offset: packet start offset in first page
1364 *
1365 * Copy an internal packet gather list into a Linux skb_shared_info
1366 * structure.
1367 */
1368static inline void copy_frags(struct sk_buff *skb,
1369 const struct pkt_gl *gl,
1370 unsigned int offset)
1371{
1372 int i;
1373
1374 /* usually there's just one frag */
1375 __skb_fill_page_desc(skb, 0, gl->frags[0].page,
1376 gl->frags[0].offset + offset,
1377 gl->frags[0].size - offset);
1378 skb_shinfo(skb)->nr_frags = gl->nfrags;
1379 for (i = 1; i < gl->nfrags; i++)
1380 __skb_fill_page_desc(skb, i, gl->frags[i].page,
1381 gl->frags[i].offset,
1382 gl->frags[i].size);
1383
1384 /* get a reference to the last page, we don't own it */
1385 get_page(gl->frags[gl->nfrags - 1].page);
1386}
1387
1388/**
1360 * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list 1389 * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
1361 * @gl: the gather list 1390 * @gl: the gather list
1362 * @skb_len: size of sk_buff main body if it carries fragments 1391 * @skb_len: size of sk_buff main body if it carries fragments
@@ -1369,7 +1398,6 @@ struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
1369 unsigned int skb_len, unsigned int pull_len) 1398 unsigned int skb_len, unsigned int pull_len)
1370{ 1399{
1371 struct sk_buff *skb; 1400 struct sk_buff *skb;
1372 struct skb_shared_info *ssi;
1373 1401
1374 /* 1402 /*
1375 * If the ingress packet is small enough, allocate an skb large enough 1403 * If the ingress packet is small enough, allocate an skb large enough
@@ -1396,21 +1424,10 @@ struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
1396 __skb_put(skb, pull_len); 1424 __skb_put(skb, pull_len);
1397 skb_copy_to_linear_data(skb, gl->va, pull_len); 1425 skb_copy_to_linear_data(skb, gl->va, pull_len);
1398 1426
1399 ssi = skb_shinfo(skb); 1427 copy_frags(skb, gl, pull_len);
1400 ssi->frags[0].page = gl->frags[0].page;
1401 ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
1402 skb_frag_size_set(&ssi->frags[0], skb_frag_size(&gl->frags[0]) - pull_len);
1403 if (gl->nfrags > 1)
1404 memcpy(&ssi->frags[1], &gl->frags[1],
1405 (gl->nfrags-1) * sizeof(skb_frag_t));
1406 ssi->nr_frags = gl->nfrags;
1407
1408 skb->len = gl->tot_len; 1428 skb->len = gl->tot_len;
1409 skb->data_len = skb->len - pull_len; 1429 skb->data_len = skb->len - pull_len;
1410 skb->truesize += skb->data_len; 1430 skb->truesize += skb->data_len;
1411
1412 /* Get a reference for the last page, we don't own it */
1413 get_page(gl->frags[gl->nfrags - 1].page);
1414 } 1431 }
1415 1432
1416out: 1433out:
@@ -1434,35 +1451,6 @@ void t4vf_pktgl_free(const struct pkt_gl *gl)
1434} 1451}
1435 1452
1436/** 1453/**
1437 * copy_frags - copy fragments from gather list into skb_shared_info
1438 * @si: destination skb shared info structure
1439 * @gl: source internal packet gather list
1440 * @offset: packet start offset in first page
1441 *
1442 * Copy an internal packet gather list into a Linux skb_shared_info
1443 * structure.
1444 */
1445static inline void copy_frags(struct skb_shared_info *si,
1446 const struct pkt_gl *gl,
1447 unsigned int offset)
1448{
1449 unsigned int n;
1450
1451 /* usually there's just one frag */
1452 si->frags[0].page = gl->frags[0].page;
1453 si->frags[0].page_offset = gl->frags[0].page_offset + offset;
1454 skb_frag_size_set(&si->frags[0], skb_frag_size(&gl->frags[0]) - offset);
1455 si->nr_frags = gl->nfrags;
1456
1457 n = gl->nfrags - 1;
1458 if (n)
1459 memcpy(&si->frags[1], &gl->frags[1], n * sizeof(skb_frag_t));
1460
1461 /* get a reference to the last page, we don't own it */
1462 get_page(gl->frags[n].page);
1463}
1464
1465/**
1466 * do_gro - perform Generic Receive Offload ingress packet processing 1454 * do_gro - perform Generic Receive Offload ingress packet processing
1467 * @rxq: ingress RX Ethernet Queue 1455 * @rxq: ingress RX Ethernet Queue
1468 * @gl: gather list for ingress packet 1456 * @gl: gather list for ingress packet
@@ -1484,7 +1472,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1484 return; 1472 return;
1485 } 1473 }
1486 1474
1487 copy_frags(skb_shinfo(skb), gl, PKTSHIFT); 1475 copy_frags(skb, gl, PKTSHIFT);
1488 skb->len = gl->tot_len - PKTSHIFT; 1476 skb->len = gl->tot_len - PKTSHIFT;
1489 skb->data_len = skb->len; 1477 skb->data_len = skb->len;
1490 skb->truesize += skb->data_len; 1478 skb->truesize += skb->data_len;
@@ -1667,7 +1655,7 @@ int process_responses(struct sge_rspq *rspq, int budget)
1667 rmb(); 1655 rmb();
1668 rsp_type = RSPD_TYPE(rc->type_gen); 1656 rsp_type = RSPD_TYPE(rc->type_gen);
1669 if (likely(rsp_type == RSP_TYPE_FLBUF)) { 1657 if (likely(rsp_type == RSP_TYPE_FLBUF)) {
1670 skb_frag_t *fp; 1658 struct page_frag *fp;
1671 struct pkt_gl gl; 1659 struct pkt_gl gl;
1672 const struct rx_sw_desc *sdesc; 1660 const struct rx_sw_desc *sdesc;
1673 u32 bufsz, frag; 1661 u32 bufsz, frag;
@@ -1701,9 +1689,9 @@ int process_responses(struct sge_rspq *rspq, int budget)
1701 sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; 1689 sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
1702 bufsz = get_buf_size(sdesc); 1690 bufsz = get_buf_size(sdesc);
1703 fp->page = sdesc->page; 1691 fp->page = sdesc->page;
1704 fp->page_offset = rspq->offset; 1692 fp->offset = rspq->offset;
1705 skb_frag_size_set(fp, min(bufsz, len)); 1693 fp->size = min(bufsz, len);
1706 len -= skb_frag_size(fp); 1694 len -= fp->size;
1707 if (!len) 1695 if (!len)
1708 break; 1696 break;
1709 unmap_rx_buf(rspq->adapter, &rxq->fl); 1697 unmap_rx_buf(rspq->adapter, &rxq->fl);
@@ -1717,9 +1705,9 @@ int process_responses(struct sge_rspq *rspq, int budget)
1717 */ 1705 */
1718 dma_sync_single_for_cpu(rspq->adapter->pdev_dev, 1706 dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
1719 get_buf_addr(sdesc), 1707 get_buf_addr(sdesc),
1720 skb_frag_size(fp), DMA_FROM_DEVICE); 1708 fp->size, DMA_FROM_DEVICE);
1721 gl.va = (page_address(gl.frags[0].page) + 1709 gl.va = (page_address(gl.frags[0].page) +
1722 gl.frags[0].page_offset); 1710 gl.frags[0].offset);
1723 prefetch(gl.va); 1711 prefetch(gl.va);
1724 1712
1725 /* 1713 /*
@@ -1728,7 +1716,7 @@ int process_responses(struct sge_rspq *rspq, int budget)
1728 */ 1716 */
1729 ret = rspq->handler(rspq, rspq->cur_desc, &gl); 1717 ret = rspq->handler(rspq, rspq->cur_desc, &gl);
1730 if (likely(ret == 0)) 1718 if (likely(ret == 0))
1731 rspq->offset += ALIGN(skb_frag_size(fp), FL_ALIGN); 1719 rspq->offset += ALIGN(fp->size, FL_ALIGN);
1732 else 1720 else
1733 restore_rx_bufs(&gl, &rxq->fl, frag); 1721 restore_rx_bufs(&gl, &rxq->fl, frag);
1734 } else if (likely(rsp_type == RSP_TYPE_CPL)) { 1722 } else if (likely(rsp_type == RSP_TYPE_CPL)) {