diff options
Diffstat (limited to 'drivers/net/cxgb4vf/sge.c')
-rw-r--r-- | drivers/net/cxgb4vf/sge.c | 147 |
1 files changed, 89 insertions, 58 deletions
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c index eb5a1c9cb2d3..5fd75fdaa631 100644 --- a/drivers/net/cxgb4vf/sge.c +++ b/drivers/net/cxgb4vf/sge.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <net/ipv6.h> | 41 | #include <net/ipv6.h> |
42 | #include <net/tcp.h> | 42 | #include <net/tcp.h> |
43 | #include <linux/dma-mapping.h> | 43 | #include <linux/dma-mapping.h> |
44 | #include <linux/prefetch.h> | ||
44 | 45 | ||
45 | #include "t4vf_common.h" | 46 | #include "t4vf_common.h" |
46 | #include "t4vf_defs.h" | 47 | #include "t4vf_defs.h" |
@@ -154,13 +155,14 @@ enum { | |||
154 | */ | 155 | */ |
155 | RX_COPY_THRES = 256, | 156 | RX_COPY_THRES = 256, |
156 | RX_PULL_LEN = 128, | 157 | RX_PULL_LEN = 128, |
157 | }; | ||
158 | 158 | ||
159 | /* | 159 | /* |
160 | * Can't define this in the above enum because PKTSHIFT isn't a constant in | 160 | * Main body length for sk_buffs used for RX Ethernet packets with |
161 | * the VF Driver ... | 161 | * fragments. Should be >= RX_PULL_LEN but possibly bigger to give |
162 | */ | 162 | * pskb_may_pull() some room. |
163 | #define RX_PKT_PULL_LEN (RX_PULL_LEN + PKTSHIFT) | 163 | */ |
164 | RX_SKB_LEN = 512, | ||
165 | }; | ||
164 | 166 | ||
165 | /* | 167 | /* |
166 | * Software state per TX descriptor. | 168 | * Software state per TX descriptor. |
@@ -223,8 +225,8 @@ static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc) | |||
223 | /** | 225 | /** |
224 | * need_skb_unmap - does the platform need unmapping of sk_buffs? | 226 | * need_skb_unmap - does the platform need unmapping of sk_buffs? |
225 | * | 227 | * |
226 | * Returns true if the platfrom needs sk_buff unmapping. The compiler | 228 | * Returns true if the platform needs sk_buff unmapping. The compiler |
227 | * optimizes away unecessary code if this returns true. | 229 | * optimizes away unnecessary code if this returns true. |
228 | */ | 230 | */ |
229 | static inline int need_skb_unmap(void) | 231 | static inline int need_skb_unmap(void) |
230 | { | 232 | { |
@@ -266,7 +268,7 @@ static inline unsigned int fl_cap(const struct sge_fl *fl) | |||
266 | * | 268 | * |
267 | * Tests specified Free List to see whether the number of buffers | 269 | * Tests specified Free List to see whether the number of buffers |
268 | * available to the hardware has falled below our "starvation" | 270 | * available to the hardware has falled below our "starvation" |
269 | * threshhold. | 271 | * threshold. |
270 | */ | 272 | */ |
271 | static inline bool fl_starving(const struct sge_fl *fl) | 273 | static inline bool fl_starving(const struct sge_fl *fl) |
272 | { | 274 | { |
@@ -1148,7 +1150,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1148 | if (unlikely(credits < ETHTXQ_STOP_THRES)) { | 1150 | if (unlikely(credits < ETHTXQ_STOP_THRES)) { |
1149 | /* | 1151 | /* |
1150 | * After we're done injecting the Work Request for this | 1152 | * After we're done injecting the Work Request for this |
1151 | * packet, we'll be below our "stop threshhold" so stop the TX | 1153 | * packet, we'll be below our "stop threshold" so stop the TX |
1152 | * Queue now and schedule a request for an SGE Egress Queue | 1154 | * Queue now and schedule a request for an SGE Egress Queue |
1153 | * Update message. The queue will get started later on when | 1155 | * Update message. The queue will get started later on when |
1154 | * the firmware processes this Work Request and sends us an | 1156 | * the firmware processes this Work Request and sends us an |
@@ -1355,6 +1357,67 @@ out_free: | |||
1355 | } | 1357 | } |
1356 | 1358 | ||
1357 | /** | 1359 | /** |
1360 | * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list | ||
1361 | * @gl: the gather list | ||
1362 | * @skb_len: size of sk_buff main body if it carries fragments | ||
1363 | * @pull_len: amount of data to move to the sk_buff's main body | ||
1364 | * | ||
1365 | * Builds an sk_buff from the given packet gather list. Returns the | ||
1366 | * sk_buff or %NULL if sk_buff allocation failed. | ||
1367 | */ | ||
1368 | struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl, | ||
1369 | unsigned int skb_len, unsigned int pull_len) | ||
1370 | { | ||
1371 | struct sk_buff *skb; | ||
1372 | struct skb_shared_info *ssi; | ||
1373 | |||
1374 | /* | ||
1375 | * If the ingress packet is small enough, allocate an skb large enough | ||
1376 | * for all of the data and copy it inline. Otherwise, allocate an skb | ||
1377 | * with enough room to pull in the header and reference the rest of | ||
1378 | * the data via the skb fragment list. | ||
1379 | * | ||
1380 | * Below we rely on RX_COPY_THRES being less than the smallest Rx | ||
1381 | * buff! size, which is expected since buffers are at least | ||
1382 | * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one | ||
1383 | * fragment. | ||
1384 | */ | ||
1385 | if (gl->tot_len <= RX_COPY_THRES) { | ||
1386 | /* small packets have only one fragment */ | ||
1387 | skb = alloc_skb(gl->tot_len, GFP_ATOMIC); | ||
1388 | if (unlikely(!skb)) | ||
1389 | goto out; | ||
1390 | __skb_put(skb, gl->tot_len); | ||
1391 | skb_copy_to_linear_data(skb, gl->va, gl->tot_len); | ||
1392 | } else { | ||
1393 | skb = alloc_skb(skb_len, GFP_ATOMIC); | ||
1394 | if (unlikely(!skb)) | ||
1395 | goto out; | ||
1396 | __skb_put(skb, pull_len); | ||
1397 | skb_copy_to_linear_data(skb, gl->va, pull_len); | ||
1398 | |||
1399 | ssi = skb_shinfo(skb); | ||
1400 | ssi->frags[0].page = gl->frags[0].page; | ||
1401 | ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len; | ||
1402 | ssi->frags[0].size = gl->frags[0].size - pull_len; | ||
1403 | if (gl->nfrags > 1) | ||
1404 | memcpy(&ssi->frags[1], &gl->frags[1], | ||
1405 | (gl->nfrags-1) * sizeof(skb_frag_t)); | ||
1406 | ssi->nr_frags = gl->nfrags; | ||
1407 | |||
1408 | skb->len = gl->tot_len; | ||
1409 | skb->data_len = skb->len - pull_len; | ||
1410 | skb->truesize += skb->data_len; | ||
1411 | |||
1412 | /* Get a reference for the last page, we don't own it */ | ||
1413 | get_page(gl->frags[gl->nfrags - 1].page); | ||
1414 | } | ||
1415 | |||
1416 | out: | ||
1417 | return skb; | ||
1418 | } | ||
1419 | |||
1420 | /** | ||
1358 | * t4vf_pktgl_free - free a packet gather list | 1421 | * t4vf_pktgl_free - free a packet gather list |
1359 | * @gl: the gather list | 1422 | * @gl: the gather list |
1360 | * | 1423 | * |
@@ -1463,10 +1526,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, | |||
1463 | { | 1526 | { |
1464 | struct sk_buff *skb; | 1527 | struct sk_buff *skb; |
1465 | struct port_info *pi; | 1528 | struct port_info *pi; |
1466 | struct skb_shared_info *ssi; | ||
1467 | const struct cpl_rx_pkt *pkt = (void *)&rsp[1]; | 1529 | const struct cpl_rx_pkt *pkt = (void *)&rsp[1]; |
1468 | bool csum_ok = pkt->csum_calc && !pkt->err_vec; | 1530 | bool csum_ok = pkt->csum_calc && !pkt->err_vec; |
1469 | unsigned int len = be16_to_cpu(pkt->len); | ||
1470 | struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); | 1531 | struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); |
1471 | 1532 | ||
1472 | /* | 1533 | /* |
@@ -1481,51 +1542,22 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, | |||
1481 | } | 1542 | } |
1482 | 1543 | ||
1483 | /* | 1544 | /* |
1484 | * If the ingress packet is small enough, allocate an skb large enough | 1545 | * Convert the Packet Gather List into an skb. |
1485 | * for all of the data and copy it inline. Otherwise, allocate an skb | ||
1486 | * with enough room to pull in the header and reference the rest of | ||
1487 | * the data via the skb fragment list. | ||
1488 | */ | 1546 | */ |
1489 | if (len <= RX_COPY_THRES) { | 1547 | skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN); |
1490 | /* small packets have only one fragment */ | 1548 | if (unlikely(!skb)) { |
1491 | skb = alloc_skb(gl->frags[0].size, GFP_ATOMIC); | 1549 | t4vf_pktgl_free(gl); |
1492 | if (!skb) | 1550 | rxq->stats.rx_drops++; |
1493 | goto nomem; | 1551 | return 0; |
1494 | __skb_put(skb, gl->frags[0].size); | ||
1495 | skb_copy_to_linear_data(skb, gl->va, gl->frags[0].size); | ||
1496 | } else { | ||
1497 | skb = alloc_skb(RX_PKT_PULL_LEN, GFP_ATOMIC); | ||
1498 | if (!skb) | ||
1499 | goto nomem; | ||
1500 | __skb_put(skb, RX_PKT_PULL_LEN); | ||
1501 | skb_copy_to_linear_data(skb, gl->va, RX_PKT_PULL_LEN); | ||
1502 | |||
1503 | ssi = skb_shinfo(skb); | ||
1504 | ssi->frags[0].page = gl->frags[0].page; | ||
1505 | ssi->frags[0].page_offset = (gl->frags[0].page_offset + | ||
1506 | RX_PKT_PULL_LEN); | ||
1507 | ssi->frags[0].size = gl->frags[0].size - RX_PKT_PULL_LEN; | ||
1508 | if (gl->nfrags > 1) | ||
1509 | memcpy(&ssi->frags[1], &gl->frags[1], | ||
1510 | (gl->nfrags-1) * sizeof(skb_frag_t)); | ||
1511 | ssi->nr_frags = gl->nfrags; | ||
1512 | skb->len = len + PKTSHIFT; | ||
1513 | skb->data_len = skb->len - RX_PKT_PULL_LEN; | ||
1514 | skb->truesize += skb->data_len; | ||
1515 | |||
1516 | /* Get a reference for the last page, we don't own it */ | ||
1517 | get_page(gl->frags[gl->nfrags - 1].page); | ||
1518 | } | 1552 | } |
1519 | |||
1520 | __skb_pull(skb, PKTSHIFT); | 1553 | __skb_pull(skb, PKTSHIFT); |
1521 | skb->protocol = eth_type_trans(skb, rspq->netdev); | 1554 | skb->protocol = eth_type_trans(skb, rspq->netdev); |
1522 | skb_record_rx_queue(skb, rspq->idx); | 1555 | skb_record_rx_queue(skb, rspq->idx); |
1523 | skb->dev->last_rx = jiffies; /* XXX removed 2.6.29 */ | ||
1524 | pi = netdev_priv(skb->dev); | 1556 | pi = netdev_priv(skb->dev); |
1525 | rxq->stats.pkts++; | 1557 | rxq->stats.pkts++; |
1526 | 1558 | ||
1527 | if (csum_ok && (pi->rx_offload & RX_CSO) && !pkt->err_vec && | 1559 | if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) && |
1528 | (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) { | 1560 | !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) { |
1529 | if (!pkt->ip_frag) | 1561 | if (!pkt->ip_frag) |
1530 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1562 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1531 | else { | 1563 | else { |
@@ -1535,8 +1567,11 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, | |||
1535 | } | 1567 | } |
1536 | rxq->stats.rx_cso++; | 1568 | rxq->stats.rx_cso++; |
1537 | } else | 1569 | } else |
1538 | skb->ip_summed = CHECKSUM_NONE; | 1570 | skb_checksum_none_assert(skb); |
1539 | 1571 | ||
1572 | /* | ||
1573 | * Deliver the packet to the stack. | ||
1574 | */ | ||
1540 | if (unlikely(pkt->vlan_ex)) { | 1575 | if (unlikely(pkt->vlan_ex)) { |
1541 | struct vlan_group *grp = pi->vlan_grp; | 1576 | struct vlan_group *grp = pi->vlan_grp; |
1542 | 1577 | ||
@@ -1550,11 +1585,6 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, | |||
1550 | netif_receive_skb(skb); | 1585 | netif_receive_skb(skb); |
1551 | 1586 | ||
1552 | return 0; | 1587 | return 0; |
1553 | |||
1554 | nomem: | ||
1555 | t4vf_pktgl_free(gl); | ||
1556 | rxq->stats.rx_drops++; | ||
1557 | return 0; | ||
1558 | } | 1588 | } |
1559 | 1589 | ||
1560 | /** | 1590 | /** |
@@ -1680,6 +1710,7 @@ int process_responses(struct sge_rspq *rspq, int budget) | |||
1680 | } | 1710 | } |
1681 | len = RSPD_LEN(len); | 1711 | len = RSPD_LEN(len); |
1682 | } | 1712 | } |
1713 | gl.tot_len = len; | ||
1683 | 1714 | ||
1684 | /* | 1715 | /* |
1685 | * Gather packet fragments. | 1716 | * Gather packet fragments. |
@@ -2116,7 +2147,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, | |||
2116 | 2147 | ||
2117 | /* | 2148 | /* |
2118 | * Calculate the size of the hardware free list ring plus | 2149 | * Calculate the size of the hardware free list ring plus |
2119 | * status page (which the SGE will place at the end of the | 2150 | * Status Page (which the SGE will place after the end of the |
2120 | * free list ring) in Egress Queue Units. | 2151 | * free list ring) in Egress Queue Units. |
2121 | */ | 2152 | */ |
2122 | flsz = (fl->size / FL_PER_EQ_UNIT + | 2153 | flsz = (fl->size / FL_PER_EQ_UNIT + |
@@ -2213,8 +2244,8 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq, | |||
2213 | struct port_info *pi = netdev_priv(dev); | 2244 | struct port_info *pi = netdev_priv(dev); |
2214 | 2245 | ||
2215 | /* | 2246 | /* |
2216 | * Calculate the size of the hardware TX Queue (including the | 2247 | * Calculate the size of the hardware TX Queue (including the Status |
2217 | * status age on the end) in units of TX Descriptors. | 2248 | * Page on the end of the TX Queue) in units of TX Descriptors. |
2218 | */ | 2249 | */ |
2219 | nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); | 2250 | nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); |
2220 | 2251 | ||