aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb4vf/sge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/cxgb4vf/sge.c')
-rw-r--r--drivers/net/cxgb4vf/sge.c122
1 files changed, 75 insertions, 47 deletions
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
index f10864ddafb..ecf0770bf0f 100644
--- a/drivers/net/cxgb4vf/sge.c
+++ b/drivers/net/cxgb4vf/sge.c
@@ -154,13 +154,14 @@ enum {
154 */ 154 */
155 RX_COPY_THRES = 256, 155 RX_COPY_THRES = 256,
156 RX_PULL_LEN = 128, 156 RX_PULL_LEN = 128,
157};
158 157
159/* 158 /*
160 * Can't define this in the above enum because PKTSHIFT isn't a constant in 159 * Main body length for sk_buffs used for RX Ethernet packets with
161 * the VF Driver ... 160 * fragments. Should be >= RX_PULL_LEN but possibly bigger to give
162 */ 161 * pskb_may_pull() some room.
163#define RX_PKT_PULL_LEN (RX_PULL_LEN + PKTSHIFT) 162 */
163 RX_SKB_LEN = 512,
164};
164 165
165/* 166/*
166 * Software state per TX descriptor. 167 * Software state per TX descriptor.
@@ -1355,6 +1356,67 @@ out_free:
1355} 1356}
1356 1357
1357/** 1358/**
1359 * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
1360 * @gl: the gather list
1361 * @skb_len: size of sk_buff main body if it carries fragments
1362 * @pull_len: amount of data to move to the sk_buff's main body
1363 *
1364 * Builds an sk_buff from the given packet gather list. Returns the
1365 * sk_buff or %NULL if sk_buff allocation failed.
1366 */
1367struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
1368 unsigned int skb_len, unsigned int pull_len)
1369{
1370 struct sk_buff *skb;
1371 struct skb_shared_info *ssi;
1372
1373 /*
1374 * If the ingress packet is small enough, allocate an skb large enough
1375 * for all of the data and copy it inline. Otherwise, allocate an skb
1376 * with enough room to pull in the header and reference the rest of
1377 * the data via the skb fragment list.
1378 *
1379 * Below we rely on RX_COPY_THRES being less than the smallest Rx
1380 * buff! size, which is expected since buffers are at least
1381 * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one
1382 * fragment.
1383 */
1384 if (gl->tot_len <= RX_COPY_THRES) {
1385 /* small packets have only one fragment */
1386 skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
1387 if (unlikely(!skb))
1388 goto out;
1389 __skb_put(skb, gl->tot_len);
1390 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1391 } else {
1392 skb = alloc_skb(skb_len, GFP_ATOMIC);
1393 if (unlikely(!skb))
1394 goto out;
1395 __skb_put(skb, pull_len);
1396 skb_copy_to_linear_data(skb, gl->va, pull_len);
1397
1398 ssi = skb_shinfo(skb);
1399 ssi->frags[0].page = gl->frags[0].page;
1400 ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
1401 ssi->frags[0].size = gl->frags[0].size - pull_len;
1402 if (gl->nfrags > 1)
1403 memcpy(&ssi->frags[1], &gl->frags[1],
1404 (gl->nfrags-1) * sizeof(skb_frag_t));
1405 ssi->nr_frags = gl->nfrags;
1406
1407 skb->len = gl->tot_len;
1408 skb->data_len = skb->len - pull_len;
1409 skb->truesize += skb->data_len;
1410
1411 /* Get a reference for the last page, we don't own it */
1412 get_page(gl->frags[gl->nfrags - 1].page);
1413 }
1414
1415out:
1416 return skb;
1417}
1418
1419/**
1358 * t4vf_pktgl_free - free a packet gather list 1420 * t4vf_pktgl_free - free a packet gather list
1359 * @gl: the gather list 1421 * @gl: the gather list
1360 * 1422 *
@@ -1463,10 +1525,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1463{ 1525{
1464 struct sk_buff *skb; 1526 struct sk_buff *skb;
1465 struct port_info *pi; 1527 struct port_info *pi;
1466 struct skb_shared_info *ssi;
1467 const struct cpl_rx_pkt *pkt = (void *)&rsp[1]; 1528 const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
1468 bool csum_ok = pkt->csum_calc && !pkt->err_vec; 1529 bool csum_ok = pkt->csum_calc && !pkt->err_vec;
1469 unsigned int len = be16_to_cpu(pkt->len);
1470 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1530 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1471 1531
1472 /* 1532 /*
@@ -1481,42 +1541,14 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1481 } 1541 }
1482 1542
1483 /* 1543 /*
1484 * If the ingress packet is small enough, allocate an skb large enough 1544 * Convert the Packet Gather List into an skb.
1485 * for all of the data and copy it inline. Otherwise, allocate an skb
1486 * with enough room to pull in the header and reference the rest of
1487 * the data via the skb fragment list.
1488 */ 1545 */
1489 if (len <= RX_COPY_THRES) { 1546 skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
1490 /* small packets have only one fragment */ 1547 if (unlikely(!skb)) {
1491 skb = alloc_skb(gl->frags[0].size, GFP_ATOMIC); 1548 t4vf_pktgl_free(gl);
1492 if (!skb) 1549 rxq->stats.rx_drops++;
1493 goto nomem; 1550 return 0;
1494 __skb_put(skb, gl->frags[0].size);
1495 skb_copy_to_linear_data(skb, gl->va, gl->frags[0].size);
1496 } else {
1497 skb = alloc_skb(RX_PKT_PULL_LEN, GFP_ATOMIC);
1498 if (!skb)
1499 goto nomem;
1500 __skb_put(skb, RX_PKT_PULL_LEN);
1501 skb_copy_to_linear_data(skb, gl->va, RX_PKT_PULL_LEN);
1502
1503 ssi = skb_shinfo(skb);
1504 ssi->frags[0].page = gl->frags[0].page;
1505 ssi->frags[0].page_offset = (gl->frags[0].page_offset +
1506 RX_PKT_PULL_LEN);
1507 ssi->frags[0].size = gl->frags[0].size - RX_PKT_PULL_LEN;
1508 if (gl->nfrags > 1)
1509 memcpy(&ssi->frags[1], &gl->frags[1],
1510 (gl->nfrags-1) * sizeof(skb_frag_t));
1511 ssi->nr_frags = gl->nfrags;
1512 skb->len = len + PKTSHIFT;
1513 skb->data_len = skb->len - RX_PKT_PULL_LEN;
1514 skb->truesize += skb->data_len;
1515
1516 /* Get a reference for the last page, we don't own it */
1517 get_page(gl->frags[gl->nfrags - 1].page);
1518 } 1551 }
1519
1520 __skb_pull(skb, PKTSHIFT); 1552 __skb_pull(skb, PKTSHIFT);
1521 skb->protocol = eth_type_trans(skb, rspq->netdev); 1553 skb->protocol = eth_type_trans(skb, rspq->netdev);
1522 skb_record_rx_queue(skb, rspq->idx); 1554 skb_record_rx_queue(skb, rspq->idx);
@@ -1549,11 +1581,6 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1549 netif_receive_skb(skb); 1581 netif_receive_skb(skb);
1550 1582
1551 return 0; 1583 return 0;
1552
1553nomem:
1554 t4vf_pktgl_free(gl);
1555 rxq->stats.rx_drops++;
1556 return 0;
1557} 1584}
1558 1585
1559/** 1586/**
@@ -1679,6 +1706,7 @@ int process_responses(struct sge_rspq *rspq, int budget)
1679 } 1706 }
1680 len = RSPD_LEN(len); 1707 len = RSPD_LEN(len);
1681 } 1708 }
1709 gl.tot_len = len;
1682 1710
1683 /* 1711 /*
1684 * Gather packet fragments. 1712 * Gather packet fragments.