aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom
diff options
context:
space:
mode:
authorDmitry Kravkov <dmitry@broadcom.com>2012-02-20 04:59:08 -0500
committerDavid S. Miller <davem@davemloft.net>2012-02-20 19:34:07 -0500
commit621b4d66b27e70ba9a0e8fa4676d9c4f916c8343 (patch)
tree41ebada1c68d784b97ca792b74a73a2f72ac4876 /drivers/net/ethernet/broadcom
parent2b68c18194b078489a84023564bcf7464b6c7b37 (diff)
use FW 7.2.16
The patch integrates FW 7.2.16 HSI and implements driver part of GRO flow. FW 7.2.16 adds the ability to aggregate packets for GRO (and not just LRO) and also fixes some bugs. 1. Added new aggregation mode: GRO. In this mode packets are aggregated such that the original packets can be reconstructed by the OS. 2. 57712 HW bug workaround - initialized all CAM TM registers to 0x32. 3. Adding the FCoE statistics structures to the BNX2X HSI. 4. Wrong configuration of TX HW input buffer size may cause theoretical performance effect. Performed configuration fix. 5. FCOE - Arrival of packets beyond task IO size can lead to crash. Fix firmware data-in flow. 6. iSCSI - In rare cases of on-chip termination the graceful termination timer hangs, and the termination doesn't complete. Firmware fix to MSL timer tolerance. 7. iSCSI - Chip hangs when target sends FIN out-of-order or with isles open at the initiator side. Firmware implementation corrected to drop FIN received out-of-order or with isles still open. 8. iSCSI - Chip hangs when in case of retransmission not aligned to 4-bytes from the beginning of iSCSI PDU. Firmware implementation corrected to support arbitrary aligned retransmissions. 9. iSCSI - Arrival of target-initiated NOP-IN during intense ISCSI traffic might lead to crash. Firmware fix to relevant flow. Signed-off-by: Dmitry Kravkov <dmitry@broadcom.com> Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/broadcom')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c149
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h57
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h1
8 files changed, 177 insertions, 82 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 604060ab8872..3cf9df833cb4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -445,6 +445,8 @@ struct bnx2x_agg_info {
445 u16 vlan_tag; 445 u16 vlan_tag;
446 u16 len_on_bd; 446 u16 len_on_bd;
447 u32 rxhash; 447 u32 rxhash;
448 u16 gro_size;
449 u16 full_page;
448}; 450};
449 451
450#define Q_STATS_OFFSET32(stat_name) \ 452#define Q_STATS_OFFSET32(stat_name) \
@@ -473,6 +475,11 @@ struct bnx2x_fp_txdata {
473 int txq_index; 475 int txq_index;
474}; 476};
475 477
478enum bnx2x_tpa_mode_t {
479 TPA_MODE_LRO,
480 TPA_MODE_GRO
481};
482
476struct bnx2x_fastpath { 483struct bnx2x_fastpath {
477 struct bnx2x *bp; /* parent */ 484 struct bnx2x *bp; /* parent */
478 485
@@ -489,6 +496,8 @@ struct bnx2x_fastpath {
489 496
490 dma_addr_t status_blk_mapping; 497 dma_addr_t status_blk_mapping;
491 498
499 enum bnx2x_tpa_mode_t mode;
500
492 u8 max_cos; /* actual number of active tx coses */ 501 u8 max_cos; /* actual number of active tx coses */
493 struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS]; 502 struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS];
494 503
@@ -1199,6 +1208,8 @@ struct bnx2x {
1199#define ETH_MIN_PACKET_SIZE 60 1208#define ETH_MIN_PACKET_SIZE 60
1200#define ETH_MAX_PACKET_SIZE 1500 1209#define ETH_MAX_PACKET_SIZE 1500
1201#define ETH_MAX_JUMBO_PACKET_SIZE 9600 1210#define ETH_MAX_JUMBO_PACKET_SIZE 9600
1211/* TCP with Timestamp Option (32) + IPv6 (40) */
1212#define ETH_MAX_TPA_HEADER_SIZE 72
1202 1213
1203 /* Max supported alignment is 256 (8 shift) */ 1214 /* Max supported alignment is 256 (8 shift) */
1204#define BNX2X_RX_ALIGN_SHIFT min(8, L1_CACHE_SHIFT) 1215#define BNX2X_RX_ALIGN_SHIFT min(8, L1_CACHE_SHIFT)
@@ -1269,6 +1280,7 @@ struct bnx2x {
1269#define NO_MCP_FLAG (1 << 9) 1280#define NO_MCP_FLAG (1 << 9)
1270 1281
1271#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) 1282#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
1283#define GRO_ENABLE_FLAG (1 << 10)
1272#define MF_FUNC_DIS (1 << 11) 1284#define MF_FUNC_DIS (1 << 11)
1273#define OWN_CNIC_IRQ (1 << 12) 1285#define OWN_CNIC_IRQ (1 << 12)
1274#define NO_ISCSI_OOO_FLAG (1 << 13) 1286#define NO_ISCSI_OOO_FLAG (1 << 13)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index aa14502289ce..0a45251edb8f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -209,13 +209,11 @@ static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
209 fp->last_max_sge = idx; 209 fp->last_max_sge = idx;
210} 210}
211 211
212static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp, 212static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
213 struct eth_fast_path_rx_cqe *fp_cqe) 213 u16 sge_len,
214 struct eth_end_agg_rx_cqe *cqe)
214{ 215{
215 struct bnx2x *bp = fp->bp; 216 struct bnx2x *bp = fp->bp;
216 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
217 le16_to_cpu(fp_cqe->len_on_bd)) >>
218 SGE_PAGE_SHIFT;
219 u16 last_max, last_elem, first_elem; 217 u16 last_max, last_elem, first_elem;
220 u16 delta = 0; 218 u16 delta = 0;
221 u16 i; 219 u16 i;
@@ -226,15 +224,15 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
226 /* First mark all used pages */ 224 /* First mark all used pages */
227 for (i = 0; i < sge_len; i++) 225 for (i = 0; i < sge_len; i++)
228 BIT_VEC64_CLEAR_BIT(fp->sge_mask, 226 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
229 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i]))); 227 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
230 228
231 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n", 229 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
232 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1])); 230 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
233 231
234 /* Here we assume that the last SGE index is the biggest */ 232 /* Here we assume that the last SGE index is the biggest */
235 prefetch((void *)(fp->sge_mask)); 233 prefetch((void *)(fp->sge_mask));
236 bnx2x_update_last_max_sge(fp, 234 bnx2x_update_last_max_sge(fp,
237 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1])); 235 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
238 236
239 last_max = RX_SGE(fp->last_max_sge); 237 last_max = RX_SGE(fp->last_max_sge);
240 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT; 238 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
@@ -328,6 +326,12 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
328 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd); 326 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
329 tpa_info->placement_offset = cqe->placement_offset; 327 tpa_info->placement_offset = cqe->placement_offset;
330 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe); 328 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
329 if (fp->mode == TPA_MODE_GRO) {
330 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
331 tpa_info->full_page =
332 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
333 tpa_info->gro_size = gro_size;
334 }
331 335
332#ifdef BNX2X_STOP_ON_ERROR 336#ifdef BNX2X_STOP_ON_ERROR
333 fp->tpa_queue_used |= (1 << queue); 337 fp->tpa_queue_used |= (1 << queue);
@@ -384,25 +388,40 @@ static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
384} 388}
385 389
386static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, 390static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
387 u16 queue, struct sk_buff *skb, 391 struct bnx2x_agg_info *tpa_info,
392 u16 pages,
393 struct sk_buff *skb,
388 struct eth_end_agg_rx_cqe *cqe, 394 struct eth_end_agg_rx_cqe *cqe,
389 u16 cqe_idx) 395 u16 cqe_idx)
390{ 396{
391 struct sw_rx_page *rx_pg, old_rx_pg; 397 struct sw_rx_page *rx_pg, old_rx_pg;
392 u32 i, frag_len, frag_size, pages; 398 u32 i, frag_len, frag_size;
393 int err; 399 int err, j, frag_id = 0;
394 int j;
395 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
396 u16 len_on_bd = tpa_info->len_on_bd; 400 u16 len_on_bd = tpa_info->len_on_bd;
401 u16 full_page = 0, gro_size = 0;
397 402
398 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd; 403 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
399 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT; 404
405 if (fp->mode == TPA_MODE_GRO) {
406 gro_size = tpa_info->gro_size;
407 full_page = tpa_info->full_page;
408 }
400 409
401 /* This is needed in order to enable forwarding support */ 410 /* This is needed in order to enable forwarding support */
402 if (frag_size) 411 if (frag_size) {
403 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, 412 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
404 tpa_info->parsing_flags, len_on_bd); 413 tpa_info->parsing_flags, len_on_bd);
405 414
415 /* set for GRO */
416 if (fp->mode == TPA_MODE_GRO)
417 skb_shinfo(skb)->gso_type =
418 (GET_FLAG(tpa_info->parsing_flags,
419 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
420 PRS_FLAG_OVERETH_IPV6) ?
421 SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
422 }
423
424
406#ifdef BNX2X_STOP_ON_ERROR 425#ifdef BNX2X_STOP_ON_ERROR
407 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { 426 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
408 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", 427 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
@@ -419,7 +438,12 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
419 438
420 /* FW gives the indices of the SGE as if the ring is an array 439 /* FW gives the indices of the SGE as if the ring is an array
421 (meaning that "next" element will consume 2 indices) */ 440 (meaning that "next" element will consume 2 indices) */
422 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE)); 441 if (fp->mode == TPA_MODE_GRO)
442 frag_len = min_t(u32, frag_size, (u32)full_page);
443 else /* LRO */
444 frag_len = min_t(u32, frag_size,
445 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
446
423 rx_pg = &fp->rx_page_ring[sge_idx]; 447 rx_pg = &fp->rx_page_ring[sge_idx];
424 old_rx_pg = *rx_pg; 448 old_rx_pg = *rx_pg;
425 449
@@ -435,9 +459,21 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
435 dma_unmap_page(&bp->pdev->dev, 459 dma_unmap_page(&bp->pdev->dev,
436 dma_unmap_addr(&old_rx_pg, mapping), 460 dma_unmap_addr(&old_rx_pg, mapping),
437 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); 461 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
438
439 /* Add one frag and update the appropriate fields in the skb */ 462 /* Add one frag and update the appropriate fields in the skb */
440 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); 463 if (fp->mode == TPA_MODE_LRO)
464 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
465 else { /* GRO */
466 int rem;
467 int offset = 0;
468 for (rem = frag_len; rem > 0; rem -= gro_size) {
469 int len = rem > gro_size ? gro_size : rem;
470 skb_fill_page_desc(skb, frag_id++,
471 old_rx_pg.page, offset, len);
472 if (offset)
473 get_page(old_rx_pg.page);
474 offset += len;
475 }
476 }
441 477
442 skb->data_len += frag_len; 478 skb->data_len += frag_len;
443 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE; 479 skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
@@ -449,18 +485,17 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
449 return 0; 485 return 0;
450} 486}
451 487
452static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, 488static inline void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
453 u16 queue, struct eth_end_agg_rx_cqe *cqe, 489 struct bnx2x_agg_info *tpa_info,
454 u16 cqe_idx) 490 u16 pages,
491 struct eth_end_agg_rx_cqe *cqe,
492 u16 cqe_idx)
455{ 493{
456 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
457 struct sw_rx_bd *rx_buf = &tpa_info->first_buf; 494 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
458 u32 pad = tpa_info->placement_offset; 495 u8 pad = tpa_info->placement_offset;
459 u16 len = tpa_info->len_on_bd; 496 u16 len = tpa_info->len_on_bd;
460 struct sk_buff *skb = NULL; 497 struct sk_buff *skb = NULL;
461 u8 *data = rx_buf->data; 498 u8 *new_data, *data = rx_buf->data;
462 /* alloc new skb */
463 u8 *new_data;
464 u8 old_tpa_state = tpa_info->tpa_state; 499 u8 old_tpa_state = tpa_info->tpa_state;
465 500
466 tpa_info->tpa_state = BNX2X_TPA_STOP; 501 tpa_info->tpa_state = BNX2X_TPA_STOP;
@@ -500,7 +535,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
500 skb->protocol = eth_type_trans(skb, bp->dev); 535 skb->protocol = eth_type_trans(skb, bp->dev);
501 skb->ip_summed = CHECKSUM_UNNECESSARY; 536 skb->ip_summed = CHECKSUM_UNNECESSARY;
502 537
503 if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) { 538 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
539 skb, cqe, cqe_idx)) {
504 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN) 540 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
505 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag); 541 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
506 napi_gro_receive(&fp->napi, skb); 542 napi_gro_receive(&fp->napi, skb);
@@ -565,7 +601,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
565 struct eth_fast_path_rx_cqe *cqe_fp; 601 struct eth_fast_path_rx_cqe *cqe_fp;
566 u8 cqe_fp_flags; 602 u8 cqe_fp_flags;
567 enum eth_rx_cqe_type cqe_fp_type; 603 enum eth_rx_cqe_type cqe_fp_type;
568 u16 len, pad; 604 u16 len, pad, queue;
569 u8 *data; 605 u8 *data;
570 606
571#ifdef BNX2X_STOP_ON_ERROR 607#ifdef BNX2X_STOP_ON_ERROR
@@ -586,17 +622,21 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
586 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), 622 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
587 cqe_fp_flags, cqe_fp->status_flags, 623 cqe_fp_flags, cqe_fp->status_flags,
588 le32_to_cpu(cqe_fp->rss_hash_result), 624 le32_to_cpu(cqe_fp->rss_hash_result),
589 le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len)); 625 le16_to_cpu(cqe_fp->vlan_tag),
626 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
590 627
591 /* is this a slowpath msg? */ 628 /* is this a slowpath msg? */
592 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) { 629 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
593 bnx2x_sp_event(fp, cqe); 630 bnx2x_sp_event(fp, cqe);
594 goto next_cqe; 631 goto next_cqe;
595 } 632 }
633
596 rx_buf = &fp->rx_buf_ring[bd_cons]; 634 rx_buf = &fp->rx_buf_ring[bd_cons];
597 data = rx_buf->data; 635 data = rx_buf->data;
598 636
599 if (!CQE_TYPE_FAST(cqe_fp_type)) { 637 if (!CQE_TYPE_FAST(cqe_fp_type)) {
638 struct bnx2x_agg_info *tpa_info;
639 u16 frag_size, pages;
600#ifdef BNX2X_STOP_ON_ERROR 640#ifdef BNX2X_STOP_ON_ERROR
601 /* sanity check */ 641 /* sanity check */
602 if (fp->disable_tpa && 642 if (fp->disable_tpa &&
@@ -616,28 +656,38 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
616 bnx2x_tpa_start(fp, queue, 656 bnx2x_tpa_start(fp, queue,
617 bd_cons, bd_prod, 657 bd_cons, bd_prod,
618 cqe_fp); 658 cqe_fp);
659
619 goto next_rx; 660 goto next_rx;
620 } else {
621 u16 queue =
622 cqe->end_agg_cqe.queue_index;
623 DP(NETIF_MSG_RX_STATUS,
624 "calling tpa_stop on queue %d\n",
625 queue);
626 661
627 bnx2x_tpa_stop(bp, fp, queue, 662 }
628 &cqe->end_agg_cqe, 663 queue = cqe->end_agg_cqe.queue_index;
629 comp_ring_cons); 664 tpa_info = &fp->tpa_info[queue];
665 DP(NETIF_MSG_RX_STATUS,
666 "calling tpa_stop on queue %d\n",
667 queue);
668
669 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
670 tpa_info->len_on_bd;
671
672 if (fp->mode == TPA_MODE_GRO)
673 pages = (frag_size + tpa_info->full_page - 1) /
674 tpa_info->full_page;
675 else
676 pages = SGE_PAGE_ALIGN(frag_size) >>
677 SGE_PAGE_SHIFT;
678
679 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
680 &cqe->end_agg_cqe, comp_ring_cons);
630#ifdef BNX2X_STOP_ON_ERROR 681#ifdef BNX2X_STOP_ON_ERROR
631 if (bp->panic) 682 if (bp->panic)
632 return 0; 683 return 0;
633#endif 684#endif
634 685
635 bnx2x_update_sge_prod(fp, cqe_fp); 686 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
636 goto next_cqe; 687 goto next_cqe;
637 }
638 } 688 }
639 /* non TPA */ 689 /* non TPA */
640 len = le16_to_cpu(cqe_fp->pkt_len); 690 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
641 pad = cqe_fp->placement_offset; 691 pad = cqe_fp->placement_offset;
642 dma_sync_single_for_cpu(&bp->pdev->dev, 692 dma_sync_single_for_cpu(&bp->pdev->dev,
643 dma_unmap_addr(rx_buf, mapping), 693 dma_unmap_addr(rx_buf, mapping),
@@ -3440,13 +3490,15 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3440} 3490}
3441 3491
3442netdev_features_t bnx2x_fix_features(struct net_device *dev, 3492netdev_features_t bnx2x_fix_features(struct net_device *dev,
3443 netdev_features_t features) 3493 netdev_features_t features)
3444{ 3494{
3445 struct bnx2x *bp = netdev_priv(dev); 3495 struct bnx2x *bp = netdev_priv(dev);
3446 3496
3447 /* TPA requires Rx CSUM offloading */ 3497 /* TPA requires Rx CSUM offloading */
3448 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) 3498 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
3449 features &= ~NETIF_F_LRO; 3499 features &= ~NETIF_F_LRO;
3500 features &= ~NETIF_F_GRO;
3501 }
3450 3502
3451 return features; 3503 return features;
3452} 3504}
@@ -3462,6 +3514,11 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
3462 else 3514 else
3463 flags &= ~TPA_ENABLE_FLAG; 3515 flags &= ~TPA_ENABLE_FLAG;
3464 3516
3517 if (features & NETIF_F_GRO)
3518 flags |= GRO_ENABLE_FLAG;
3519 else
3520 flags &= ~GRO_ENABLE_FLAG;
3521
3465 if (features & NETIF_F_LOOPBACK) { 3522 if (features & NETIF_F_LOOPBACK) {
3466 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { 3523 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3467 bp->link_params.loopback_mode = LOOPBACK_BMAC; 3524 bp->link_params.loopback_mode = LOOPBACK_BMAC;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index f978c6a1aef1..d2093ee9b85e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -534,8 +534,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
534 */ 534 */
535int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type); 535int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type);
536#endif 536#endif
537
537netdev_features_t bnx2x_fix_features(struct net_device *dev, 538netdev_features_t bnx2x_fix_features(struct net_device *dev,
538 netdev_features_t features); 539 netdev_features_t features);
539int bnx2x_set_features(struct net_device *dev, netdev_features_t features); 540int bnx2x_set_features(struct net_device *dev, netdev_features_t features);
540 541
541/** 542/**
@@ -1491,6 +1492,18 @@ static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
1491 return max_cfg; 1492 return max_cfg;
1492} 1493}
1493 1494
1495/* checks if HW supports GRO for given MTU */
1496static inline bool bnx2x_mtu_allows_gro(int mtu)
1497{
1498 /* gro frags per page */
1499 int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE);
1500
1501 /*
1502 * 1. number of frags should not grow above MAX_SKB_FRAGS
1503 * 2. frag must fit the page
1504 */
1505 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
1506}
1494/** 1507/**
1495 * bnx2x_bz_fp - zero content of the fastpath structure. 1508 * bnx2x_bz_fp - zero content of the fastpath structure.
1496 * 1509 *
@@ -1556,7 +1569,14 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
1556 * set the tpa flag for each queue. The tpa flag determines the queue 1569 * set the tpa flag for each queue. The tpa flag determines the queue
1557 * minimal size so it must be set prior to queue memory allocation 1570 * minimal size so it must be set prior to queue memory allocation
1558 */ 1571 */
1559 fp->disable_tpa = (bp->flags & TPA_ENABLE_FLAG) == 0; 1572 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
1573 (bp->flags & GRO_ENABLE_FLAG &&
1574 bnx2x_mtu_allows_gro(bp->dev->mtu)));
1575 if (bp->flags & TPA_ENABLE_FLAG)
1576 fp->mode = TPA_MODE_LRO;
1577 else if (bp->flags & GRO_ENABLE_FLAG)
1578 fp->mode = TPA_MODE_GRO;
1579
1560#ifdef BCM_CNIC 1580#ifdef BCM_CNIC
1561 /* We don't want TPA on an FCoE L2 ring */ 1581 /* We don't want TPA on an FCoE L2 ring */
1562 if (IS_FCOE_FP(fp)) 1582 if (IS_FCOE_FP(fp))
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 7e57fa40d2e3..9c24d536edef 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1931,7 +1931,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
1931 if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS)) 1931 if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
1932 goto test_loopback_rx_exit; 1932 goto test_loopback_rx_exit;
1933 1933
1934 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); 1934 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len_or_gro_seg_len);
1935 if (len != pkt_size) 1935 if (len != pkt_size)
1936 goto test_loopback_rx_exit; 1936 goto test_loopback_rx_exit;
1937 1937
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 78b77de728b0..a1413ad7757d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -34,9 +34,10 @@ struct license_key {
34}; 34};
35 35
36 36
37#define PORT_0 0 37#define PORT_0 0
38#define PORT_1 1 38#define PORT_1 1
39#define PORT_MAX 2 39#define PORT_MAX 2
40#define NVM_PATH_MAX 2
40 41
41/**************************************************************************** 42/****************************************************************************
42 * Shared HW configuration * 43 * Shared HW configuration *
@@ -618,12 +619,6 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
618 #define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000 619 #define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000
619 #define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000 620 #define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000
620 621
621 /* Enable RJ45 magjack pair swapping on 10GBase-T PHY, 84833 only */
622 #define PORT_HW_CFG_RJ45_PR_SWP_MASK 0x00400000
623 #define PORT_HW_CFG_RJ45_PR_SWP_SHIFT 22
624 #define PORT_HW_CFG_RJ45_PR_SWP_DISABLED 0x00000000
625 #define PORT_HW_CFG_RJ45_PR_SWP_ENABLED 0x00400000
626
627 /* Determine the Serdes electrical interface */ 622 /* Determine the Serdes electrical interface */
628 #define PORT_HW_CFG_NET_SERDES_IF_MASK 0x0F000000 623 #define PORT_HW_CFG_NET_SERDES_IF_MASK 0x0F000000
629 #define PORT_HW_CFG_NET_SERDES_IF_SHIFT 24 624 #define PORT_HW_CFG_NET_SERDES_IF_SHIFT 24
@@ -898,11 +893,6 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
898 #define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000 893 #define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000
899 #define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100 894 #define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100
900 895
901 #define PORT_FEAT_CFG_AUTOGREEN_MASK 0x00000200
902 #define PORT_FEAT_CFG_AUTOGREEN_SHIFT 9
903 #define PORT_FEAT_CFG_AUTOGREEN_DISABLED 0x00000000
904 #define PORT_FEAT_CFG_AUTOGREEN_ENABLED 0x00000200
905
906 #define PORT_FEATURE_EN_SIZE_MASK 0x0f000000 896 #define PORT_FEATURE_EN_SIZE_MASK 0x0f000000
907 #define PORT_FEATURE_EN_SIZE_SHIFT 24 897 #define PORT_FEATURE_EN_SIZE_SHIFT 24
908 #define PORT_FEATURE_WOL_ENABLED 0x01000000 898 #define PORT_FEATURE_WOL_ENABLED 0x01000000
@@ -1139,9 +1129,6 @@ struct shm_dev_info { /* size */
1139 1129
1140#define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS) 1130#define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS)
1141 1131
1142/* LED Blink rate that will achieve ~15.9Hz */
1143#define LED_BLINK_RATE_VAL 480
1144
1145/**************************************************************************** 1132/****************************************************************************
1146 * Driver <-> FW Mailbox * 1133 * Driver <-> FW Mailbox *
1147 ****************************************************************************/ 1134 ****************************************************************************/
@@ -1407,7 +1394,7 @@ struct port_mf_cfg {
1407 #define PORT_MF_CFG_E1HOV_TAG_SHIFT 0 1394 #define PORT_MF_CFG_E1HOV_TAG_SHIFT 0
1408 #define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK 1395 #define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK
1409 1396
1410 u32 reserved[3]; 1397 u32 reserved[1];
1411 1398
1412}; 1399};
1413 1400
@@ -1493,7 +1480,8 @@ struct func_ext_cfg {
1493struct mf_cfg { 1480struct mf_cfg {
1494 1481
1495 struct shared_mf_cfg shared_mf_config; /* 0x4 */ 1482 struct shared_mf_cfg shared_mf_config; /* 0x4 */
1496 struct port_mf_cfg port_mf_config[PORT_MAX]; /* 0x10 * 2 = 0x20 */ 1483 /* 0x8*2*2=0x20 */
1484 struct port_mf_cfg port_mf_config[NVM_PATH_MAX][PORT_MAX];
1497 /* for all chips, there are 8 mf functions */ 1485 /* for all chips, there are 8 mf functions */
1498 struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; /* 0x18 * 8 = 0xc0 */ 1486 struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; /* 0x18 * 8 = 0xc0 */
1499 /* 1487 /*
@@ -2002,6 +1990,7 @@ struct shmem2_region {
2002#define DRV_INFO_CONTROL_VER_SHIFT 0 1990#define DRV_INFO_CONTROL_VER_SHIFT 0
2003#define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00 1991#define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00
2004#define DRV_INFO_CONTROL_OP_CODE_SHIFT 8 1992#define DRV_INFO_CONTROL_OP_CODE_SHIFT 8
1993 u32 ibft_host_addr; /* initialized by option ROM */
2005}; 1994};
2006 1995
2007 1996
@@ -2700,8 +2689,8 @@ union drv_info_to_mcp {
2700 struct iscsi_stats_info iscsi_stat; 2689 struct iscsi_stats_info iscsi_stat;
2701}; 2690};
2702#define BCM_5710_FW_MAJOR_VERSION 7 2691#define BCM_5710_FW_MAJOR_VERSION 7
2703#define BCM_5710_FW_MINOR_VERSION 0 2692#define BCM_5710_FW_MINOR_VERSION 2
2704#define BCM_5710_FW_REVISION_VERSION 29 2693#define BCM_5710_FW_REVISION_VERSION 16
2705#define BCM_5710_FW_ENGINEERING_VERSION 0 2694#define BCM_5710_FW_ENGINEERING_VERSION 0
2706#define BCM_5710_FW_COMPILE_FLAGS 1 2695#define BCM_5710_FW_COMPILE_FLAGS 1
2707 2696
@@ -3308,8 +3297,10 @@ struct client_init_rx_data {
3308#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4_SHIFT 0 3297#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4_SHIFT 0
3309#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6 (0x1<<1) 3298#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6 (0x1<<1)
3310#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1 3299#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1
3311#define CLIENT_INIT_RX_DATA_RESERVED5 (0x3F<<2) 3300#define CLIENT_INIT_RX_DATA_TPA_MODE (0x1<<2)
3312#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 2 3301#define CLIENT_INIT_RX_DATA_TPA_MODE_SHIFT 2
3302#define CLIENT_INIT_RX_DATA_RESERVED5 (0x1F<<3)
3303#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 3
3313 u8 vmqueue_mode_en_flg; 3304 u8 vmqueue_mode_en_flg;
3314 u8 extra_data_over_sgl_en_flg; 3305 u8 extra_data_over_sgl_en_flg;
3315 u8 cache_line_alignment_log_size; 3306 u8 cache_line_alignment_log_size;
@@ -3324,7 +3315,7 @@ struct client_init_rx_data {
3324 u8 outer_vlan_removal_enable_flg; 3315 u8 outer_vlan_removal_enable_flg;
3325 u8 status_block_id; 3316 u8 status_block_id;
3326 u8 rx_sb_index_number; 3317 u8 rx_sb_index_number;
3327 u8 reserved0; 3318 u8 dont_verify_rings_pause_thr_flg;
3328 u8 max_tpa_queues; 3319 u8 max_tpa_queues;
3329 u8 silent_vlan_removal_flg; 3320 u8 silent_vlan_removal_flg;
3330 __le16 max_bytes_on_bd; 3321 __le16 max_bytes_on_bd;
@@ -3657,7 +3648,7 @@ struct eth_fast_path_rx_cqe {
3657 u8 placement_offset; 3648 u8 placement_offset;
3658 __le32 rss_hash_result; 3649 __le32 rss_hash_result;
3659 __le16 vlan_tag; 3650 __le16 vlan_tag;
3660 __le16 pkt_len; 3651 __le16 pkt_len_or_gro_seg_len;
3661 __le16 len_on_bd; 3652 __le16 len_on_bd;
3662 struct parsing_flags pars_flags; 3653 struct parsing_flags pars_flags;
3663 union eth_sgl_or_raw_data sgl_or_raw_data; 3654 union eth_sgl_or_raw_data sgl_or_raw_data;
@@ -4215,6 +4206,15 @@ enum set_mac_action_type {
4215 4206
4216 4207
4217/* 4208/*
4209 * Ethernet TPA Modes
4210 */
4211enum tpa_mode {
4212 TPA_LRO,
4213 TPA_GRO,
4214 MAX_TPA_MODE};
4215
4216
4217/*
4218 * tpa update ramrod data 4218 * tpa update ramrod data
4219 */ 4219 */
4220struct tpa_update_ramrod_data { 4220struct tpa_update_ramrod_data {
@@ -4224,7 +4224,8 @@ struct tpa_update_ramrod_data {
4224 u8 max_tpa_queues; 4224 u8 max_tpa_queues;
4225 u8 max_sges_for_packet; 4225 u8 max_sges_for_packet;
4226 u8 complete_on_both_clients; 4226 u8 complete_on_both_clients;
4227 __le16 reserved1; 4227 u8 dont_verify_rings_pause_thr_flg;
4228 u8 tpa_mode;
4228 __le16 sge_buff_size; 4229 __le16 sge_buff_size;
4229 __le16 max_agg_size; 4230 __le16 max_agg_size;
4230 __le32 sge_page_base_lo; 4231 __le32 sge_page_base_lo;
@@ -4447,13 +4448,13 @@ enum common_spqe_cmd_id {
4447 RAMROD_CMD_ID_COMMON_UNUSED, 4448 RAMROD_CMD_ID_COMMON_UNUSED,
4448 RAMROD_CMD_ID_COMMON_FUNCTION_START, 4449 RAMROD_CMD_ID_COMMON_FUNCTION_START,
4449 RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 4450 RAMROD_CMD_ID_COMMON_FUNCTION_STOP,
4451 RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE,
4450 RAMROD_CMD_ID_COMMON_CFC_DEL, 4452 RAMROD_CMD_ID_COMMON_CFC_DEL,
4451 RAMROD_CMD_ID_COMMON_CFC_DEL_WB, 4453 RAMROD_CMD_ID_COMMON_CFC_DEL_WB,
4452 RAMROD_CMD_ID_COMMON_STAT_QUERY, 4454 RAMROD_CMD_ID_COMMON_STAT_QUERY,
4453 RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 4455 RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
4454 RAMROD_CMD_ID_COMMON_START_TRAFFIC, 4456 RAMROD_CMD_ID_COMMON_START_TRAFFIC,
4455 RAMROD_CMD_ID_COMMON_RESERVED1, 4457 RAMROD_CMD_ID_COMMON_RESERVED1,
4456 RAMROD_CMD_ID_COMMON_RESERVED2,
4457 MAX_COMMON_SPQE_CMD_ID 4458 MAX_COMMON_SPQE_CMD_ID
4458}; 4459};
4459 4460
@@ -4733,8 +4734,8 @@ enum event_ring_opcode {
4733 EVENT_RING_OPCODE_MALICIOUS_VF, 4734 EVENT_RING_OPCODE_MALICIOUS_VF,
4734 EVENT_RING_OPCODE_FORWARD_SETUP, 4735 EVENT_RING_OPCODE_FORWARD_SETUP,
4735 EVENT_RING_OPCODE_RSS_UPDATE_RULES, 4736 EVENT_RING_OPCODE_RSS_UPDATE_RULES,
4737 EVENT_RING_OPCODE_FUNCTION_UPDATE,
4736 EVENT_RING_OPCODE_RESERVED1, 4738 EVENT_RING_OPCODE_RESERVED1,
4737 EVENT_RING_OPCODE_RESERVED2,
4738 EVENT_RING_OPCODE_SET_MAC, 4739 EVENT_RING_OPCODE_SET_MAC,
4739 EVENT_RING_OPCODE_CLASSIFICATION_RULES, 4740 EVENT_RING_OPCODE_CLASSIFICATION_RULES,
4740 EVENT_RING_OPCODE_FILTERS_RULES, 4741 EVENT_RING_OPCODE_FILTERS_RULES,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 9831d8c416a9..816e7d42f61c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -2702,6 +2702,8 @@ static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
2702 if (!fp->disable_tpa) { 2702 if (!fp->disable_tpa) {
2703 __set_bit(BNX2X_Q_FLG_TPA, &flags); 2703 __set_bit(BNX2X_Q_FLG_TPA, &flags);
2704 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags); 2704 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
2705 if (fp->mode == TPA_MODE_GRO)
2706 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
2705 } 2707 }
2706 2708
2707 if (leading) { 2709 if (leading) {
@@ -10167,10 +10169,10 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10167 10169
10168 /* Set TPA flags */ 10170 /* Set TPA flags */
10169 if (bp->disable_tpa) { 10171 if (bp->disable_tpa) {
10170 bp->flags &= ~TPA_ENABLE_FLAG; 10172 bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
10171 bp->dev->features &= ~NETIF_F_LRO; 10173 bp->dev->features &= ~NETIF_F_LRO;
10172 } else { 10174 } else {
10173 bp->flags |= TPA_ENABLE_FLAG; 10175 bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
10174 bp->dev->features |= NETIF_F_LRO; 10176 bp->dev->features |= NETIF_F_LRO;
10175 } 10177 }
10176 10178
@@ -10716,8 +10718,9 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10716 dev->priv_flags |= IFF_UNICAST_FLT; 10718 dev->priv_flags |= IFF_UNICAST_FLT;
10717 10719
10718 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 10720 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
10719 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_LRO | 10721 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
10720 NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX; 10722 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
10723 NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
10721 10724
10722 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 10725 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
10723 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; 10726 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index ac15f747f8da..adfae6bb430b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -4431,9 +4431,10 @@ static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4431 struct client_init_rx_data *rx_data, 4431 struct client_init_rx_data *rx_data,
4432 unsigned long *flags) 4432 unsigned long *flags)
4433{ 4433{
4434 /* Rx data */
4435 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) * 4434 rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4436 CLIENT_INIT_RX_DATA_TPA_EN_IPV4; 4435 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4436 rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
4437 CLIENT_INIT_RX_DATA_TPA_MODE;
4437 rx_data->vmqueue_mode_en_flg = 0; 4438 rx_data->vmqueue_mode_en_flg = 0;
4438 4439
4439 rx_data->cache_line_alignment_log_size = 4440 rx_data->cache_line_alignment_log_size =
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 71e039b618a7..685d42e1b2fc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -774,6 +774,7 @@ enum bnx2x_queue_cmd {
774enum { 774enum {
775 BNX2X_Q_FLG_TPA, 775 BNX2X_Q_FLG_TPA,
776 BNX2X_Q_FLG_TPA_IPV6, 776 BNX2X_Q_FLG_TPA_IPV6,
777 BNX2X_Q_FLG_TPA_GRO,
777 BNX2X_Q_FLG_STATS, 778 BNX2X_Q_FLG_STATS,
778 BNX2X_Q_FLG_ZERO_STATS, 779 BNX2X_Q_FLG_ZERO_STATS,
779 BNX2X_Q_FLG_ACTIVE, 780 BNX2X_Q_FLG_ACTIVE,