aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2012-01-30 21:59:39 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2012-02-10 18:51:33 -0500
commitf56e0cb1fea6aa3caace1c1ddde3f847793dcf38 (patch)
tree26bee86bc3df96516492a9f77cc452caa7056cda /drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
parentf990b79bc80ca7a23b8a6c33241c439072d0b85b (diff)
ixgbe: Add function for testing status bits in Rx descriptor
This change adds a small function for testing Rx status bits in the descriptor. The advantage to this is that we can avoid unnecessary byte swaps on big endian systems. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Stephen Ko <stephen.s.ko@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c51
1 files changed, 22 insertions, 29 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 538577b08e25..b0469ddb158c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1019,25 +1019,23 @@ static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter,
1019 * ixgbe_receive_skb - Send a completed packet up the stack 1019 * ixgbe_receive_skb - Send a completed packet up the stack
1020 * @adapter: board private structure 1020 * @adapter: board private structure
1021 * @skb: packet to send up 1021 * @skb: packet to send up
1022 * @status: hardware indication of status of receive
1023 * @rx_ring: rx descriptor ring (for a specific queue) to setup 1022 * @rx_ring: rx descriptor ring (for a specific queue) to setup
1024 * @rx_desc: rx descriptor 1023 * @rx_desc: rx descriptor
1025 **/ 1024 **/
1026static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, 1025static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
1027 struct sk_buff *skb, u8 status, 1026 struct sk_buff *skb,
1028 struct ixgbe_ring *ring, 1027 struct ixgbe_ring *ring,
1029 union ixgbe_adv_rx_desc *rx_desc) 1028 union ixgbe_adv_rx_desc *rx_desc)
1030{ 1029{
1031 struct ixgbe_adapter *adapter = q_vector->adapter; 1030 struct ixgbe_adapter *adapter = q_vector->adapter;
1032 struct napi_struct *napi = &q_vector->napi;
1033 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
1034 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
1035 1031
1036 if (is_vlan && (tag & VLAN_VID_MASK)) 1032 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1037 __vlan_hwaccel_put_tag(skb, tag); 1033 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1034 __vlan_hwaccel_put_tag(skb, vid);
1035 }
1038 1036
1039 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) 1037 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1040 napi_gro_receive(napi, skb); 1038 napi_gro_receive(&q_vector->napi, skb);
1041 else 1039 else
1042 netif_rx(skb); 1040 netif_rx(skb);
1043} 1041}
@@ -1047,12 +1045,10 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
1047 * @adapter: address of board private structure 1045 * @adapter: address of board private structure
1048 * @status_err: hardware indication of status of receive 1046 * @status_err: hardware indication of status of receive
1049 * @skb: skb currently being received and modified 1047 * @skb: skb currently being received and modified
1050 * @status_err: status error value of last descriptor in packet
1051 **/ 1048 **/
1052static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, 1049static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
1053 union ixgbe_adv_rx_desc *rx_desc, 1050 union ixgbe_adv_rx_desc *rx_desc,
1054 struct sk_buff *skb, 1051 struct sk_buff *skb)
1055 u32 status_err)
1056{ 1052{
1057 skb->ip_summed = CHECKSUM_NONE; 1053 skb->ip_summed = CHECKSUM_NONE;
1058 1054
@@ -1061,16 +1057,16 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
1061 return; 1057 return;
1062 1058
1063 /* if IP and error */ 1059 /* if IP and error */
1064 if ((status_err & IXGBE_RXD_STAT_IPCS) && 1060 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1065 (status_err & IXGBE_RXDADV_ERR_IPE)) { 1061 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
1066 adapter->hw_csum_rx_error++; 1062 adapter->hw_csum_rx_error++;
1067 return; 1063 return;
1068 } 1064 }
1069 1065
1070 if (!(status_err & IXGBE_RXD_STAT_L4CS)) 1066 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
1071 return; 1067 return;
1072 1068
1073 if (status_err & IXGBE_RXDADV_ERR_TCPE) { 1069 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1074 u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; 1070 u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1075 1071
1076 /* 1072 /*
@@ -1091,6 +1087,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
1091 1087
1092static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) 1088static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
1093{ 1089{
1090 rx_ring->next_to_use = val;
1094 /* 1091 /*
1095 * Force memory writes to complete before letting h/w 1092 * Force memory writes to complete before letting h/w
1096 * know there are new descriptors to fetch. (Only 1093 * know there are new descriptors to fetch. (Only
@@ -1219,10 +1216,8 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1219 1216
1220 i += rx_ring->count; 1217 i += rx_ring->count;
1221 1218
1222 if (rx_ring->next_to_use != i) { 1219 if (rx_ring->next_to_use != i)
1223 rx_ring->next_to_use = i;
1224 ixgbe_release_rx_desc(rx_ring, i); 1220 ixgbe_release_rx_desc(rx_ring, i);
1225 }
1226} 1221}
1227 1222
1228static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc) 1223static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
@@ -1469,15 +1464,13 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1469#ifdef IXGBE_FCOE 1464#ifdef IXGBE_FCOE
1470 int ddp_bytes = 0; 1465 int ddp_bytes = 0;
1471#endif /* IXGBE_FCOE */ 1466#endif /* IXGBE_FCOE */
1472 u32 staterr;
1473 u16 i; 1467 u16 i;
1474 u16 cleaned_count = 0; 1468 u16 cleaned_count = 0;
1475 1469
1476 i = rx_ring->next_to_clean; 1470 i = rx_ring->next_to_clean;
1477 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); 1471 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
1478 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1479 1472
1480 while (staterr & IXGBE_RXD_STAT_DD) { 1473 while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) {
1481 u32 upper_len = 0; 1474 u32 upper_len = 0;
1482 1475
1483 rmb(); /* read descriptor and rx_buffer_info after status DD */ 1476 rmb(); /* read descriptor and rx_buffer_info after status DD */
@@ -1553,12 +1546,13 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1553 prefetch(next_rxd); 1546 prefetch(next_rxd);
1554 cleaned_count++; 1547 cleaned_count++;
1555 1548
1556 if (!(staterr & IXGBE_RXD_STAT_EOP)) { 1549 if ((!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) {
1557 struct ixgbe_rx_buffer *next_buffer; 1550 struct ixgbe_rx_buffer *next_buffer;
1558 u32 nextp; 1551 u32 nextp;
1559 1552
1560 if (IXGBE_CB(skb)->append_cnt) { 1553 if (IXGBE_CB(skb)->append_cnt) {
1561 nextp = staterr & IXGBE_RXDADV_NEXTP_MASK; 1554 nextp = le32_to_cpu(
1555 rx_desc->wb.upper.status_error);
1562 nextp >>= IXGBE_RXDADV_NEXTP_SHIFT; 1556 nextp >>= IXGBE_RXDADV_NEXTP_SHIFT;
1563 } else { 1557 } else {
1564 nextp = i; 1558 nextp = i;
@@ -1597,12 +1591,13 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1597 ixgbe_update_rsc_stats(rx_ring, skb); 1591 ixgbe_update_rsc_stats(rx_ring, skb);
1598 1592
1599 /* ERR_MASK will only have valid bits if EOP set */ 1593 /* ERR_MASK will only have valid bits if EOP set */
1600 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { 1594 if (unlikely(ixgbe_test_staterr(rx_desc,
1595 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
1601 dev_kfree_skb_any(skb); 1596 dev_kfree_skb_any(skb);
1602 goto next_desc; 1597 goto next_desc;
1603 } 1598 }
1604 1599
1605 ixgbe_rx_checksum(adapter, rx_desc, skb, staterr); 1600 ixgbe_rx_checksum(adapter, rx_desc, skb);
1606 if (adapter->netdev->features & NETIF_F_RXHASH) 1601 if (adapter->netdev->features & NETIF_F_RXHASH)
1607 ixgbe_rx_hash(rx_desc, skb); 1602 ixgbe_rx_hash(rx_desc, skb);
1608 1603
@@ -1614,15 +1609,14 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1614#ifdef IXGBE_FCOE 1609#ifdef IXGBE_FCOE
1615 /* if ddp, not passing to ULD unless for FCP_RSP or error */ 1610 /* if ddp, not passing to ULD unless for FCP_RSP or error */
1616 if (ixgbe_rx_is_fcoe(adapter, rx_desc)) { 1611 if (ixgbe_rx_is_fcoe(adapter, rx_desc)) {
1617 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb, 1612 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
1618 staterr);
1619 if (!ddp_bytes) { 1613 if (!ddp_bytes) {
1620 dev_kfree_skb_any(skb); 1614 dev_kfree_skb_any(skb);
1621 goto next_desc; 1615 goto next_desc;
1622 } 1616 }
1623 } 1617 }
1624#endif /* IXGBE_FCOE */ 1618#endif /* IXGBE_FCOE */
1625 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); 1619 ixgbe_receive_skb(q_vector, skb, rx_ring, rx_desc);
1626 1620
1627 budget--; 1621 budget--;
1628next_desc: 1622next_desc:
@@ -1637,7 +1631,6 @@ next_desc:
1637 1631
1638 /* use prefetched values */ 1632 /* use prefetched values */
1639 rx_desc = next_rxd; 1633 rx_desc = next_rxd;
1640 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1641 } 1634 }
1642 1635
1643 rx_ring->next_to_clean = i; 1636 rx_ring->next_to_clean = i;