diff options
author | Ron Mercer <ron.mercer@qlogic.com> | 2010-01-02 05:37:43 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-01-06 23:30:34 -0500 |
commit | 4f848c0a9c265cb3457fbf842dbffd28e82a44fd (patch) | |
tree | 6e66ce964ae5fd99f26f10901c1e4b01df4edb35 /drivers/net/qlge | |
parent | 572c526fb19a9a24098de814ab0601c1ce1bac82 (diff) |
qlge: Add RX frame handlers for non-split frames.
New handlers are added here to handle:
1) Small frames (<256 bytes) in a single small buffer. Allocate a new
skb and copy the frame.
2) Large frame (>256 bytes) in a page chunk. Allocate an skb, tack it on frags,
post to napi_gro_receive().
Signed-off-by: Ron Mercer <ron.mercer@qlogic.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/qlge')
-rw-r--r-- | drivers/net/qlge/qlge_main.c | 261 |
1 files changed, 257 insertions, 4 deletions
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index c3034786965d..109bd0abe9b4 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c | |||
@@ -1433,6 +1433,209 @@ map_error: | |||
1433 | return NETDEV_TX_BUSY; | 1433 | return NETDEV_TX_BUSY; |
1434 | } | 1434 | } |
1435 | 1435 | ||
1436 | /* Process an inbound completion from an rx ring. */ | ||
1437 | static void ql_process_mac_rx_page(struct ql_adapter *qdev, | ||
1438 | struct rx_ring *rx_ring, | ||
1439 | struct ib_mac_iocb_rsp *ib_mac_rsp, | ||
1440 | u32 length, | ||
1441 | u16 vlan_id) | ||
1442 | { | ||
1443 | struct net_device *ndev = qdev->ndev; | ||
1444 | struct sk_buff *skb = NULL; | ||
1445 | void *addr; | ||
1446 | struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); | ||
1447 | struct napi_struct *napi = &rx_ring->napi; | ||
1448 | |||
1449 | skb = netdev_alloc_skb(ndev, length); | ||
1450 | if (!skb) { | ||
1451 | QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, " | ||
1452 | "need to unwind!.\n"); | ||
1453 | rx_ring->rx_dropped++; | ||
1454 | put_page(lbq_desc->p.pg_chunk.page); | ||
1455 | return; | ||
1456 | } | ||
1457 | |||
1458 | addr = lbq_desc->p.pg_chunk.va; | ||
1459 | prefetch(addr); | ||
1460 | |||
1461 | |||
1462 | /* Frame error, so drop the packet. */ | ||
1463 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { | ||
1464 | QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n", | ||
1465 | ib_mac_rsp->flags2); | ||
1466 | rx_ring->rx_errors++; | ||
1467 | goto err_out; | ||
1468 | } | ||
1469 | |||
1470 | /* The max framesize filter on this chip is set higher than | ||
1471 | * MTU since FCoE uses 2k frames. | ||
1472 | */ | ||
1473 | if (skb->len > ndev->mtu + ETH_HLEN) { | ||
1474 | QPRINTK(qdev, DRV, ERR, "Segment too small, dropping.\n"); | ||
1475 | rx_ring->rx_dropped++; | ||
1476 | goto err_out; | ||
1477 | } | ||
1478 | memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN); | ||
1479 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
1480 | "%d bytes of headers and data in large. Chain " | ||
1481 | "page to new skb and pull tail.\n", length); | ||
1482 | skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page, | ||
1483 | lbq_desc->p.pg_chunk.offset+ETH_HLEN, | ||
1484 | length-ETH_HLEN); | ||
1485 | skb->len += length-ETH_HLEN; | ||
1486 | skb->data_len += length-ETH_HLEN; | ||
1487 | skb->truesize += length-ETH_HLEN; | ||
1488 | |||
1489 | rx_ring->rx_packets++; | ||
1490 | rx_ring->rx_bytes += skb->len; | ||
1491 | skb->protocol = eth_type_trans(skb, ndev); | ||
1492 | skb->ip_summed = CHECKSUM_NONE; | ||
1493 | |||
1494 | if (qdev->rx_csum && | ||
1495 | !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { | ||
1496 | /* TCP frame. */ | ||
1497 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { | ||
1498 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
1499 | "TCP checksum done!\n"); | ||
1500 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1501 | } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && | ||
1502 | (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { | ||
1503 | /* Unfragmented ipv4 UDP frame. */ | ||
1504 | struct iphdr *iph = (struct iphdr *) skb->data; | ||
1505 | if (!(iph->frag_off & | ||
1506 | cpu_to_be16(IP_MF|IP_OFFSET))) { | ||
1507 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1508 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
1509 | "TCP checksum done!\n"); | ||
1510 | } | ||
1511 | } | ||
1512 | } | ||
1513 | |||
1514 | skb_record_rx_queue(skb, rx_ring->cq_id); | ||
1515 | if (skb->ip_summed == CHECKSUM_UNNECESSARY) { | ||
1516 | if (qdev->vlgrp && (vlan_id != 0xffff)) | ||
1517 | vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb); | ||
1518 | else | ||
1519 | napi_gro_receive(napi, skb); | ||
1520 | } else { | ||
1521 | if (qdev->vlgrp && (vlan_id != 0xffff)) | ||
1522 | vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id); | ||
1523 | else | ||
1524 | netif_receive_skb(skb); | ||
1525 | } | ||
1526 | return; | ||
1527 | err_out: | ||
1528 | dev_kfree_skb_any(skb); | ||
1529 | put_page(lbq_desc->p.pg_chunk.page); | ||
1530 | } | ||
1531 | |||
1532 | /* Process an inbound completion from an rx ring. */ | ||
1533 | static void ql_process_mac_rx_skb(struct ql_adapter *qdev, | ||
1534 | struct rx_ring *rx_ring, | ||
1535 | struct ib_mac_iocb_rsp *ib_mac_rsp, | ||
1536 | u32 length, | ||
1537 | u16 vlan_id) | ||
1538 | { | ||
1539 | struct net_device *ndev = qdev->ndev; | ||
1540 | struct sk_buff *skb = NULL; | ||
1541 | struct sk_buff *new_skb = NULL; | ||
1542 | struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring); | ||
1543 | |||
1544 | skb = sbq_desc->p.skb; | ||
1545 | /* Allocate new_skb and copy */ | ||
1546 | new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN); | ||
1547 | if (new_skb == NULL) { | ||
1548 | QPRINTK(qdev, PROBE, ERR, | ||
1549 | "No skb available, drop the packet.\n"); | ||
1550 | rx_ring->rx_dropped++; | ||
1551 | return; | ||
1552 | } | ||
1553 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
1554 | memcpy(skb_put(new_skb, length), skb->data, length); | ||
1555 | skb = new_skb; | ||
1556 | |||
1557 | /* Frame error, so drop the packet. */ | ||
1558 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { | ||
1559 | QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n", | ||
1560 | ib_mac_rsp->flags2); | ||
1561 | dev_kfree_skb_any(skb); | ||
1562 | rx_ring->rx_errors++; | ||
1563 | return; | ||
1564 | } | ||
1565 | |||
1566 | /* loopback self test for ethtool */ | ||
1567 | if (test_bit(QL_SELFTEST, &qdev->flags)) { | ||
1568 | ql_check_lb_frame(qdev, skb); | ||
1569 | dev_kfree_skb_any(skb); | ||
1570 | return; | ||
1571 | } | ||
1572 | |||
1573 | /* The max framesize filter on this chip is set higher than | ||
1574 | * MTU since FCoE uses 2k frames. | ||
1575 | */ | ||
1576 | if (skb->len > ndev->mtu + ETH_HLEN) { | ||
1577 | dev_kfree_skb_any(skb); | ||
1578 | rx_ring->rx_dropped++; | ||
1579 | return; | ||
1580 | } | ||
1581 | |||
1582 | prefetch(skb->data); | ||
1583 | skb->dev = ndev; | ||
1584 | if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) { | ||
1585 | QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n", | ||
1586 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | ||
1587 | IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "", | ||
1588 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | ||
1589 | IB_MAC_IOCB_RSP_M_REG ? "Registered" : "", | ||
1590 | (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == | ||
1591 | IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); | ||
1592 | } | ||
1593 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) | ||
1594 | QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n"); | ||
1595 | |||
1596 | rx_ring->rx_packets++; | ||
1597 | rx_ring->rx_bytes += skb->len; | ||
1598 | skb->protocol = eth_type_trans(skb, ndev); | ||
1599 | skb->ip_summed = CHECKSUM_NONE; | ||
1600 | |||
1601 | /* If rx checksum is on, and there are no | ||
1602 | * csum or frame errors. | ||
1603 | */ | ||
1604 | if (qdev->rx_csum && | ||
1605 | !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) { | ||
1606 | /* TCP frame. */ | ||
1607 | if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) { | ||
1608 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
1609 | "TCP checksum done!\n"); | ||
1610 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1611 | } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && | ||
1612 | (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { | ||
1613 | /* Unfragmented ipv4 UDP frame. */ | ||
1614 | struct iphdr *iph = (struct iphdr *) skb->data; | ||
1615 | if (!(iph->frag_off & | ||
1616 | cpu_to_be16(IP_MF|IP_OFFSET))) { | ||
1617 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1618 | QPRINTK(qdev, RX_STATUS, DEBUG, | ||
1619 | "TCP checksum done!\n"); | ||
1620 | } | ||
1621 | } | ||
1622 | } | ||
1623 | |||
1624 | skb_record_rx_queue(skb, rx_ring->cq_id); | ||
1625 | if (skb->ip_summed == CHECKSUM_UNNECESSARY) { | ||
1626 | if (qdev->vlgrp && (vlan_id != 0xffff)) | ||
1627 | vlan_gro_receive(&rx_ring->napi, qdev->vlgrp, | ||
1628 | vlan_id, skb); | ||
1629 | else | ||
1630 | napi_gro_receive(&rx_ring->napi, skb); | ||
1631 | } else { | ||
1632 | if (qdev->vlgrp && (vlan_id != 0xffff)) | ||
1633 | vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id); | ||
1634 | else | ||
1635 | netif_receive_skb(skb); | ||
1636 | } | ||
1637 | } | ||
1638 | |||
1436 | static void ql_realign_skb(struct sk_buff *skb, int len) | 1639 | static void ql_realign_skb(struct sk_buff *skb, int len) |
1437 | { | 1640 | { |
1438 | void *temp_addr = skb->data; | 1641 | void *temp_addr = skb->data; |
@@ -1646,14 +1849,13 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, | |||
1646 | } | 1849 | } |
1647 | 1850 | ||
1648 | /* Process an inbound completion from an rx ring. */ | 1851 | /* Process an inbound completion from an rx ring. */ |
1649 | static void ql_process_mac_rx_intr(struct ql_adapter *qdev, | 1852 | static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, |
1650 | struct rx_ring *rx_ring, | 1853 | struct rx_ring *rx_ring, |
1651 | struct ib_mac_iocb_rsp *ib_mac_rsp) | 1854 | struct ib_mac_iocb_rsp *ib_mac_rsp, |
1855 | u16 vlan_id) | ||
1652 | { | 1856 | { |
1653 | struct net_device *ndev = qdev->ndev; | 1857 | struct net_device *ndev = qdev->ndev; |
1654 | struct sk_buff *skb = NULL; | 1858 | struct sk_buff *skb = NULL; |
1655 | u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) & | ||
1656 | IB_MAC_IOCB_RSP_VLAN_MASK) | ||
1657 | 1859 | ||
1658 | QL_DUMP_IB_MAC_RSP(ib_mac_rsp); | 1860 | QL_DUMP_IB_MAC_RSP(ib_mac_rsp); |
1659 | 1861 | ||
@@ -1753,6 +1955,57 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev, | |||
1753 | } | 1955 | } |
1754 | } | 1956 | } |
1755 | 1957 | ||
1958 | /* Process an inbound completion from an rx ring. */ | ||
1959 | static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev, | ||
1960 | struct rx_ring *rx_ring, | ||
1961 | struct ib_mac_iocb_rsp *ib_mac_rsp) | ||
1962 | { | ||
1963 | u32 length = le32_to_cpu(ib_mac_rsp->data_len); | ||
1964 | u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? | ||
1965 | ((le16_to_cpu(ib_mac_rsp->vlan_id) & | ||
1966 | IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff; | ||
1967 | |||
1968 | QL_DUMP_IB_MAC_RSP(ib_mac_rsp); | ||
1969 | |||
1970 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { | ||
1971 | /* The data and headers are split into | ||
1972 | * separate buffers. | ||
1973 | */ | ||
1974 | ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, | ||
1975 | vlan_id); | ||
1976 | } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { | ||
1977 | /* The data fit in a single small buffer. | ||
1978 | * Allocate a new skb, copy the data and | ||
1979 | * return the buffer to the free pool. | ||
1980 | */ | ||
1981 | ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, | ||
1982 | length, vlan_id); | ||
1983 | } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { | ||
1984 | /* Non-TCP packet in a page chunk. Allocate an | ||
1985 | * skb, tack it on frags, and send it up. | ||
1986 | */ | ||
1987 | ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, | ||
1988 | length, vlan_id); | ||
1989 | } else { | ||
1990 | struct bq_desc *lbq_desc; | ||
1991 | |||
1992 | /* Free small buffer that holds the IAL */ | ||
1993 | lbq_desc = ql_get_curr_sbuf(rx_ring); | ||
1994 | QPRINTK(qdev, RX_ERR, ERR, "Dropping frame, len %d > mtu %d\n", | ||
1995 | length, qdev->ndev->mtu); | ||
1996 | |||
1997 | /* Unwind the large buffers for this frame. */ | ||
1998 | while (length > 0) { | ||
1999 | lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); | ||
2000 | length -= (length < rx_ring->lbq_buf_size) ? | ||
2001 | length : rx_ring->lbq_buf_size; | ||
2002 | put_page(lbq_desc->p.pg_chunk.page); | ||
2003 | } | ||
2004 | } | ||
2005 | |||
2006 | return (unsigned long)length; | ||
2007 | } | ||
2008 | |||
1756 | /* Process an outbound completion from an rx ring. */ | 2009 | /* Process an outbound completion from an rx ring. */ |
1757 | static void ql_process_mac_tx_intr(struct ql_adapter *qdev, | 2010 | static void ql_process_mac_tx_intr(struct ql_adapter *qdev, |
1758 | struct ob_mac_iocb_rsp *mac_rsp) | 2011 | struct ob_mac_iocb_rsp *mac_rsp) |