aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2012-01-30 21:59:39 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2012-02-10 18:51:33 -0500
commitf56e0cb1fea6aa3caace1c1ddde3f847793dcf38 (patch)
tree26bee86bc3df96516492a9f77cc452caa7056cda /drivers
parentf990b79bc80ca7a23b8a6c33241c439072d0b85b (diff)
ixgbe: Add function for testing status bits in Rx descriptor
This change adds a small function for testing Rx status bits in the descriptor. The advantage to this is that we can avoid unnecessary byte swaps on big endian systems. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Stephen Ko <stephen.s.ko@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c59
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c51
3 files changed, 65 insertions, 55 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index fca055362847..260e17637886 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -329,6 +329,13 @@ struct ixgbe_q_vector {
329#define IXGBE_10K_ITR 400 329#define IXGBE_10K_ITR 400
330#define IXGBE_8K_ITR 500 330#define IXGBE_8K_ITR 500
331 331
332/* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */
333static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
334 const u32 stat_err_bits)
335{
336 return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
337}
338
332static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) 339static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
333{ 340{
334 u16 ntc = ring->next_to_clean; 341 u16 ntc = ring->next_to_clean;
@@ -618,8 +625,7 @@ extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
618extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter); 625extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
619extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, 626extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
620 union ixgbe_adv_rx_desc *rx_desc, 627 union ixgbe_adv_rx_desc *rx_desc,
621 struct sk_buff *skb, 628 struct sk_buff *skb);
622 u32 staterr);
623extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, 629extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
624 struct scatterlist *sgl, unsigned int sgc); 630 struct scatterlist *sgl, unsigned int sgc);
625extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, 631extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 4bc794249801..da7da752b6b4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -357,22 +357,20 @@ int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
357 */ 357 */
358int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, 358int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
359 union ixgbe_adv_rx_desc *rx_desc, 359 union ixgbe_adv_rx_desc *rx_desc,
360 struct sk_buff *skb, 360 struct sk_buff *skb)
361 u32 staterr)
362{ 361{
363 u16 xid;
364 u32 fctl;
365 u32 fceofe, fcerr, fcstat;
366 int rc = -EINVAL; 362 int rc = -EINVAL;
367 struct ixgbe_fcoe *fcoe; 363 struct ixgbe_fcoe *fcoe;
368 struct ixgbe_fcoe_ddp *ddp; 364 struct ixgbe_fcoe_ddp *ddp;
369 struct fc_frame_header *fh; 365 struct fc_frame_header *fh;
370 struct fcoe_crc_eof *crc; 366 struct fcoe_crc_eof *crc;
367 __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR);
368 __le32 ddp_err;
369 u32 fctl;
370 u16 xid;
371 371
372 fcerr = (staterr & IXGBE_RXDADV_ERR_FCERR); 372 if (fcerr == cpu_to_le32(IXGBE_FCERR_BADCRC))
373 fceofe = (staterr & IXGBE_RXDADV_ERR_FCEOFE); 373 skb->ip_summed = CHECKSUM_NONE;
374 if (fcerr == IXGBE_FCERR_BADCRC)
375 skb_checksum_none_assert(skb);
376 else 374 else
377 skb->ip_summed = CHECKSUM_UNNECESSARY; 375 skb->ip_summed = CHECKSUM_UNNECESSARY;
378 376
@@ -382,6 +380,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
382 else 380 else
383 fh = (struct fc_frame_header *)(skb->data + 381 fh = (struct fc_frame_header *)(skb->data +
384 sizeof(struct fcoe_hdr)); 382 sizeof(struct fcoe_hdr));
383
385 fctl = ntoh24(fh->fh_f_ctl); 384 fctl = ntoh24(fh->fh_f_ctl);
386 if (fctl & FC_FC_EX_CTX) 385 if (fctl & FC_FC_EX_CTX)
387 xid = be16_to_cpu(fh->fh_ox_id); 386 xid = be16_to_cpu(fh->fh_ox_id);
@@ -396,27 +395,39 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
396 if (!ddp->udl) 395 if (!ddp->udl)
397 goto ddp_out; 396 goto ddp_out;
398 397
399 if (fcerr | fceofe) 398 ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE |
399 IXGBE_RXDADV_ERR_FCERR);
400 if (ddp_err)
400 goto ddp_out; 401 goto ddp_out;
401 402
402 fcstat = (staterr & IXGBE_RXDADV_STAT_FCSTAT); 403 switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) {
403 if (fcstat) { 404 /* return 0 to bypass going to ULD for DDPed data */
405 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP):
404 /* update length of DDPed data */ 406 /* update length of DDPed data */
405 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 407 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
406 /* unmap the sg list when FCP_RSP is received */ 408 rc = 0;
407 if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) { 409 break;
408 pci_unmap_sg(adapter->pdev, ddp->sgl, 410 /* unmap the sg list when FCPRSP is received */
409 ddp->sgc, DMA_FROM_DEVICE); 411 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
410 ddp->err = (fcerr | fceofe); 412 pci_unmap_sg(adapter->pdev, ddp->sgl,
411 ddp->sgl = NULL; 413 ddp->sgc, DMA_FROM_DEVICE);
412 ddp->sgc = 0; 414 ddp->err = ddp_err;
413 } 415 ddp->sgl = NULL;
414 /* return 0 to bypass going to ULD for DDPed data */ 416 ddp->sgc = 0;
415 if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP) 417 /* fall through */
416 rc = 0; 418 /* if DDP length is present pass it through to ULD */
417 else if (ddp->len) 419 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
420 /* update length of DDPed data */
421 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
422 if (ddp->len)
418 rc = ddp->len; 423 rc = ddp->len;
424 break;
425 /* no match will return as an error */
426 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH):
427 default:
428 break;
419 } 429 }
430
420 /* In target mode, check the last data frame of the sequence. 431 /* In target mode, check the last data frame of the sequence.
421 * For DDP in target mode, data is already DDPed but the header 432 * For DDP in target mode, data is already DDPed but the header
422 * indication of the last data frame ould allow is to tell if we 433 * indication of the last data frame ould allow is to tell if we
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 538577b08e25..b0469ddb158c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1019,25 +1019,23 @@ static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter,
1019 * ixgbe_receive_skb - Send a completed packet up the stack 1019 * ixgbe_receive_skb - Send a completed packet up the stack
1020 * @adapter: board private structure 1020 * @adapter: board private structure
1021 * @skb: packet to send up 1021 * @skb: packet to send up
1022 * @status: hardware indication of status of receive
1023 * @rx_ring: rx descriptor ring (for a specific queue) to setup 1022 * @rx_ring: rx descriptor ring (for a specific queue) to setup
1024 * @rx_desc: rx descriptor 1023 * @rx_desc: rx descriptor
1025 **/ 1024 **/
1026static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, 1025static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
1027 struct sk_buff *skb, u8 status, 1026 struct sk_buff *skb,
1028 struct ixgbe_ring *ring, 1027 struct ixgbe_ring *ring,
1029 union ixgbe_adv_rx_desc *rx_desc) 1028 union ixgbe_adv_rx_desc *rx_desc)
1030{ 1029{
1031 struct ixgbe_adapter *adapter = q_vector->adapter; 1030 struct ixgbe_adapter *adapter = q_vector->adapter;
1032 struct napi_struct *napi = &q_vector->napi;
1033 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
1034 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
1035 1031
1036 if (is_vlan && (tag & VLAN_VID_MASK)) 1032 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1037 __vlan_hwaccel_put_tag(skb, tag); 1033 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1034 __vlan_hwaccel_put_tag(skb, vid);
1035 }
1038 1036
1039 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) 1037 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1040 napi_gro_receive(napi, skb); 1038 napi_gro_receive(&q_vector->napi, skb);
1041 else 1039 else
1042 netif_rx(skb); 1040 netif_rx(skb);
1043} 1041}
@@ -1047,12 +1045,10 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
1047 * @adapter: address of board private structure 1045 * @adapter: address of board private structure
1048 * @status_err: hardware indication of status of receive 1046 * @status_err: hardware indication of status of receive
1049 * @skb: skb currently being received and modified 1047 * @skb: skb currently being received and modified
1050 * @status_err: status error value of last descriptor in packet
1051 **/ 1048 **/
1052static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter, 1049static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
1053 union ixgbe_adv_rx_desc *rx_desc, 1050 union ixgbe_adv_rx_desc *rx_desc,
1054 struct sk_buff *skb, 1051 struct sk_buff *skb)
1055 u32 status_err)
1056{ 1052{
1057 skb->ip_summed = CHECKSUM_NONE; 1053 skb->ip_summed = CHECKSUM_NONE;
1058 1054
@@ -1061,16 +1057,16 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
1061 return; 1057 return;
1062 1058
1063 /* if IP and error */ 1059 /* if IP and error */
1064 if ((status_err & IXGBE_RXD_STAT_IPCS) && 1060 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
1065 (status_err & IXGBE_RXDADV_ERR_IPE)) { 1061 ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
1066 adapter->hw_csum_rx_error++; 1062 adapter->hw_csum_rx_error++;
1067 return; 1063 return;
1068 } 1064 }
1069 1065
1070 if (!(status_err & IXGBE_RXD_STAT_L4CS)) 1066 if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
1071 return; 1067 return;
1072 1068
1073 if (status_err & IXGBE_RXDADV_ERR_TCPE) { 1069 if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
1074 u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; 1070 u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1075 1071
1076 /* 1072 /*
@@ -1091,6 +1087,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
1091 1087
1092static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) 1088static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
1093{ 1089{
1090 rx_ring->next_to_use = val;
1094 /* 1091 /*
1095 * Force memory writes to complete before letting h/w 1092 * Force memory writes to complete before letting h/w
1096 * know there are new descriptors to fetch. (Only 1093 * know there are new descriptors to fetch. (Only
@@ -1219,10 +1216,8 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1219 1216
1220 i += rx_ring->count; 1217 i += rx_ring->count;
1221 1218
1222 if (rx_ring->next_to_use != i) { 1219 if (rx_ring->next_to_use != i)
1223 rx_ring->next_to_use = i;
1224 ixgbe_release_rx_desc(rx_ring, i); 1220 ixgbe_release_rx_desc(rx_ring, i);
1225 }
1226} 1221}
1227 1222
1228static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc) 1223static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
@@ -1469,15 +1464,13 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1469#ifdef IXGBE_FCOE 1464#ifdef IXGBE_FCOE
1470 int ddp_bytes = 0; 1465 int ddp_bytes = 0;
1471#endif /* IXGBE_FCOE */ 1466#endif /* IXGBE_FCOE */
1472 u32 staterr;
1473 u16 i; 1467 u16 i;
1474 u16 cleaned_count = 0; 1468 u16 cleaned_count = 0;
1475 1469
1476 i = rx_ring->next_to_clean; 1470 i = rx_ring->next_to_clean;
1477 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); 1471 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
1478 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1479 1472
1480 while (staterr & IXGBE_RXD_STAT_DD) { 1473 while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) {
1481 u32 upper_len = 0; 1474 u32 upper_len = 0;
1482 1475
1483 rmb(); /* read descriptor and rx_buffer_info after status DD */ 1476 rmb(); /* read descriptor and rx_buffer_info after status DD */
@@ -1553,12 +1546,13 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1553 prefetch(next_rxd); 1546 prefetch(next_rxd);
1554 cleaned_count++; 1547 cleaned_count++;
1555 1548
1556 if (!(staterr & IXGBE_RXD_STAT_EOP)) { 1549 if ((!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) {
1557 struct ixgbe_rx_buffer *next_buffer; 1550 struct ixgbe_rx_buffer *next_buffer;
1558 u32 nextp; 1551 u32 nextp;
1559 1552
1560 if (IXGBE_CB(skb)->append_cnt) { 1553 if (IXGBE_CB(skb)->append_cnt) {
1561 nextp = staterr & IXGBE_RXDADV_NEXTP_MASK; 1554 nextp = le32_to_cpu(
1555 rx_desc->wb.upper.status_error);
1562 nextp >>= IXGBE_RXDADV_NEXTP_SHIFT; 1556 nextp >>= IXGBE_RXDADV_NEXTP_SHIFT;
1563 } else { 1557 } else {
1564 nextp = i; 1558 nextp = i;
@@ -1597,12 +1591,13 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1597 ixgbe_update_rsc_stats(rx_ring, skb); 1591 ixgbe_update_rsc_stats(rx_ring, skb);
1598 1592
1599 /* ERR_MASK will only have valid bits if EOP set */ 1593 /* ERR_MASK will only have valid bits if EOP set */
1600 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { 1594 if (unlikely(ixgbe_test_staterr(rx_desc,
1595 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
1601 dev_kfree_skb_any(skb); 1596 dev_kfree_skb_any(skb);
1602 goto next_desc; 1597 goto next_desc;
1603 } 1598 }
1604 1599
1605 ixgbe_rx_checksum(adapter, rx_desc, skb, staterr); 1600 ixgbe_rx_checksum(adapter, rx_desc, skb);
1606 if (adapter->netdev->features & NETIF_F_RXHASH) 1601 if (adapter->netdev->features & NETIF_F_RXHASH)
1607 ixgbe_rx_hash(rx_desc, skb); 1602 ixgbe_rx_hash(rx_desc, skb);
1608 1603
@@ -1614,15 +1609,14 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1614#ifdef IXGBE_FCOE 1609#ifdef IXGBE_FCOE
1615 /* if ddp, not passing to ULD unless for FCP_RSP or error */ 1610 /* if ddp, not passing to ULD unless for FCP_RSP or error */
1616 if (ixgbe_rx_is_fcoe(adapter, rx_desc)) { 1611 if (ixgbe_rx_is_fcoe(adapter, rx_desc)) {
1617 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb, 1612 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
1618 staterr);
1619 if (!ddp_bytes) { 1613 if (!ddp_bytes) {
1620 dev_kfree_skb_any(skb); 1614 dev_kfree_skb_any(skb);
1621 goto next_desc; 1615 goto next_desc;
1622 } 1616 }
1623 } 1617 }
1624#endif /* IXGBE_FCOE */ 1618#endif /* IXGBE_FCOE */
1625 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); 1619 ixgbe_receive_skb(q_vector, skb, rx_ring, rx_desc);
1626 1620
1627 budget--; 1621 budget--;
1628next_desc: 1622next_desc:
@@ -1637,7 +1631,6 @@ next_desc:
1637 1631
1638 /* use prefetched values */ 1632 /* use prefetched values */
1639 rx_desc = next_rxd; 1633 rx_desc = next_rxd;
1640 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1641 } 1634 }
1642 1635
1643 rx_ring->next_to_clean = i; 1636 rx_ring->next_to_clean = i;