aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_enet.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw_defs.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c66
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h49
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c333
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h13
6 files changed, 343 insertions, 125 deletions
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h
index 7d10e335c27d..ae072dc5d238 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_enet.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h
@@ -472,7 +472,8 @@ enum bfi_enet_hds_type {
472 472
473struct bfi_enet_rx_cfg { 473struct bfi_enet_rx_cfg {
474 u8 rxq_type; 474 u8 rxq_type;
475 u8 rsvd[3]; 475 u8 rsvd[1];
476 u16 frame_size;
476 477
477 struct { 478 struct {
478 u8 max_header_size; 479 u8 max_header_size;
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
index af3f7bb0b3b8..2702d02e98d9 100644
--- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
@@ -322,6 +322,10 @@ do { \
322#define BNA_CQ_EF_REMOTE (1 << 19) 322#define BNA_CQ_EF_REMOTE (1 << 19)
323 323
324#define BNA_CQ_EF_LOCAL (1 << 20) 324#define BNA_CQ_EF_LOCAL (1 << 20)
325/* CAT2 ASIC does not use bit 21 as per the SPEC.
326 * Bit 31 is set in every end of frame completion
327 */
328#define BNA_CQ_EF_EOP (1 << 31)
325 329
326/* Data structures */ 330/* Data structures */
327 331
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index e02c1265e323..85e63546abe3 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -1811,6 +1811,7 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
1811 cfg_req->mh.num_entries = htons( 1811 cfg_req->mh.num_entries = htons(
1812 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req))); 1812 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
1813 1813
1814 cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
1814 cfg_req->num_queue_sets = rx->num_paths; 1815 cfg_req->num_queue_sets = rx->num_paths;
1815 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q); 1816 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
1816 i < rx->num_paths; 1817 i < rx->num_paths;
@@ -1832,8 +1833,17 @@ bna_bfi_rx_enet_start(struct bna_rx *rx)
1832 /* Large/Single RxQ */ 1833 /* Large/Single RxQ */
1833 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q, 1834 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
1834 &q0->qpt); 1835 &q0->qpt);
1835 q0->buffer_size = 1836 if (q0->multi_buffer)
1836 bna_enet_mtu_get(&rx->bna->enet); 1837 /* multi-buffer is enabled by allocating
1838 * a new rx with new set of resources.
1839 * q0->buffer_size should be initialized to
1840 * fragment size.
1841 */
1842 cfg_req->rx_cfg.multi_buffer =
1843 BNA_STATUS_T_ENABLED;
1844 else
1845 q0->buffer_size =
1846 bna_enet_mtu_get(&rx->bna->enet);
1837 cfg_req->q_cfg[i].ql.rx_buffer_size = 1847 cfg_req->q_cfg[i].ql.rx_buffer_size =
1838 htons((u16)q0->buffer_size); 1848 htons((u16)q0->buffer_size);
1839 break; 1849 break;
@@ -2383,8 +2393,8 @@ bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2383 u32 hq_depth; 2393 u32 hq_depth;
2384 u32 dq_depth; 2394 u32 dq_depth;
2385 2395
2386 dq_depth = q_cfg->q_depth; 2396 dq_depth = q_cfg->q0_depth;
2387 hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth); 2397 hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
2388 cq_depth = dq_depth + hq_depth; 2398 cq_depth = dq_depth + hq_depth;
2389 2399
2390 BNA_TO_POWER_OF_2_HIGH(cq_depth); 2400 BNA_TO_POWER_OF_2_HIGH(cq_depth);
@@ -2501,10 +2511,10 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
2501 struct bna_rxq *q0; 2511 struct bna_rxq *q0;
2502 struct bna_rxq *q1; 2512 struct bna_rxq *q1;
2503 struct bna_intr_info *intr_info; 2513 struct bna_intr_info *intr_info;
2504 u32 page_count; 2514 struct bna_mem_descr *hqunmap_mem;
2515 struct bna_mem_descr *dqunmap_mem;
2505 struct bna_mem_descr *ccb_mem; 2516 struct bna_mem_descr *ccb_mem;
2506 struct bna_mem_descr *rcb_mem; 2517 struct bna_mem_descr *rcb_mem;
2507 struct bna_mem_descr *unmapq_mem;
2508 struct bna_mem_descr *cqpt_mem; 2518 struct bna_mem_descr *cqpt_mem;
2509 struct bna_mem_descr *cswqpt_mem; 2519 struct bna_mem_descr *cswqpt_mem;
2510 struct bna_mem_descr *cpage_mem; 2520 struct bna_mem_descr *cpage_mem;
@@ -2514,8 +2524,10 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
2514 struct bna_mem_descr *dsqpt_mem; 2524 struct bna_mem_descr *dsqpt_mem;
2515 struct bna_mem_descr *hpage_mem; 2525 struct bna_mem_descr *hpage_mem;
2516 struct bna_mem_descr *dpage_mem; 2526 struct bna_mem_descr *dpage_mem;
2517 int i; 2527 u32 dpage_count, hpage_count;
2518 int dpage_count, hpage_count, rcb_idx; 2528 u32 hq_idx, dq_idx, rcb_idx;
2529 u32 cq_depth, i;
2530 u32 page_count;
2519 2531
2520 if (!bna_rx_res_check(rx_mod, rx_cfg)) 2532 if (!bna_rx_res_check(rx_mod, rx_cfg))
2521 return NULL; 2533 return NULL;
@@ -2523,7 +2535,8 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
2523 intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; 2535 intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2524 ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0]; 2536 ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2525 rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0]; 2537 rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2526 unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0]; 2538 dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0];
2539 hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0];
2527 cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0]; 2540 cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2528 cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0]; 2541 cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2529 cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0]; 2542 cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
@@ -2575,7 +2588,8 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
2575 } 2588 }
2576 2589
2577 rx->num_paths = rx_cfg->num_paths; 2590 rx->num_paths = rx_cfg->num_paths;
2578 for (i = 0, rcb_idx = 0; i < rx->num_paths; i++) { 2591 for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0;
2592 i < rx->num_paths; i++) {
2579 rxp = bna_rxp_get(rx_mod); 2593 rxp = bna_rxp_get(rx_mod);
2580 list_add_tail(&rxp->qe, &rx->rxp_q); 2594 list_add_tail(&rxp->qe, &rx->rxp_q);
2581 rxp->type = rx_cfg->rxp_type; 2595 rxp->type = rx_cfg->rxp_type;
@@ -2618,9 +2632,13 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
2618 q0->rxp = rxp; 2632 q0->rxp = rxp;
2619 2633
2620 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; 2634 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2621 q0->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva; 2635 q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva;
2622 rcb_idx++; 2636 rcb_idx++; dq_idx++;
2623 q0->rcb->q_depth = rx_cfg->q_depth; 2637 q0->rcb->q_depth = rx_cfg->q0_depth;
2638 q0->q_depth = rx_cfg->q0_depth;
2639 q0->multi_buffer = rx_cfg->q0_multi_buf;
2640 q0->buffer_size = rx_cfg->q0_buf_size;
2641 q0->num_vecs = rx_cfg->q0_num_vecs;
2624 q0->rcb->rxq = q0; 2642 q0->rcb->rxq = q0;
2625 q0->rcb->bnad = bna->bnad; 2643 q0->rcb->bnad = bna->bnad;
2626 q0->rcb->id = 0; 2644 q0->rcb->id = 0;
@@ -2640,15 +2658,18 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
2640 q1->rxp = rxp; 2658 q1->rxp = rxp;
2641 2659
2642 q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; 2660 q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2643 q1->rcb->unmap_q = (void *)unmapq_mem[rcb_idx].kva; 2661 q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva;
2644 rcb_idx++; 2662 rcb_idx++; hq_idx++;
2645 q1->rcb->q_depth = rx_cfg->q_depth; 2663 q1->rcb->q_depth = rx_cfg->q1_depth;
2664 q1->q_depth = rx_cfg->q1_depth;
2665 q1->multi_buffer = BNA_STATUS_T_DISABLED;
2666 q1->num_vecs = 1;
2646 q1->rcb->rxq = q1; 2667 q1->rcb->rxq = q1;
2647 q1->rcb->bnad = bna->bnad; 2668 q1->rcb->bnad = bna->bnad;
2648 q1->rcb->id = 1; 2669 q1->rcb->id = 1;
2649 q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ? 2670 q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
2650 rx_cfg->hds_config.forced_offset 2671 rx_cfg->hds_config.forced_offset
2651 : rx_cfg->small_buff_size; 2672 : rx_cfg->q1_buf_size;
2652 q1->rx_packets = q1->rx_bytes = 0; 2673 q1->rx_packets = q1->rx_bytes = 0;
2653 q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0; 2674 q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
2654 2675
@@ -2663,9 +2684,14 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
2663 /* Setup CQ */ 2684 /* Setup CQ */
2664 2685
2665 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva; 2686 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
2666 rxp->cq.ccb->q_depth = rx_cfg->q_depth + 2687 cq_depth = rx_cfg->q0_depth +
2667 ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ? 2688 ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
2668 0 : rx_cfg->q_depth); 2689 0 : rx_cfg->q1_depth);
2690 /* if multi-buffer is enabled sum of q0_depth
2691 * and q1_depth need not be a power of 2
2692 */
2693 BNA_TO_POWER_OF_2_HIGH(cq_depth);
2694 rxp->cq.ccb->q_depth = cq_depth;
2669 rxp->cq.ccb->cq = &rxp->cq; 2695 rxp->cq.ccb->cq = &rxp->cq;
2670 rxp->cq.ccb->rcb[0] = q0->rcb; 2696 rxp->cq.ccb->rcb[0] = q0->rcb;
2671 q0->rcb->ccb = rxp->cq.ccb; 2697 q0->rcb->ccb = rxp->cq.ccb;
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index acedac2ae1c5..621547cd3504 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -109,20 +109,21 @@ enum bna_tx_res_req_type {
109enum bna_rx_mem_type { 109enum bna_rx_mem_type {
110 BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */ 110 BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */
111 BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */ 111 BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */
112 BNA_RX_RES_MEM_T_UNMAPQ = 2, /* UnmapQ for RxQs */ 112 BNA_RX_RES_MEM_T_UNMAPHQ = 2,
113 BNA_RX_RES_MEM_T_CQPT = 3, /* CQ QPT */ 113 BNA_RX_RES_MEM_T_UNMAPDQ = 3,
114 BNA_RX_RES_MEM_T_CSWQPT = 4, /* S/W QPT */ 114 BNA_RX_RES_MEM_T_CQPT = 4,
115 BNA_RX_RES_MEM_T_CQPT_PAGE = 5, /* CQPT page */ 115 BNA_RX_RES_MEM_T_CSWQPT = 5,
116 BNA_RX_RES_MEM_T_HQPT = 6, /* RX QPT */ 116 BNA_RX_RES_MEM_T_CQPT_PAGE = 6,
117 BNA_RX_RES_MEM_T_DQPT = 7, /* RX QPT */ 117 BNA_RX_RES_MEM_T_HQPT = 7,
118 BNA_RX_RES_MEM_T_HSWQPT = 8, /* RX s/w QPT */ 118 BNA_RX_RES_MEM_T_DQPT = 8,
119 BNA_RX_RES_MEM_T_DSWQPT = 9, /* RX s/w QPT */ 119 BNA_RX_RES_MEM_T_HSWQPT = 9,
120 BNA_RX_RES_MEM_T_DPAGE = 10, /* RX s/w QPT */ 120 BNA_RX_RES_MEM_T_DSWQPT = 10,
121 BNA_RX_RES_MEM_T_HPAGE = 11, /* RX s/w QPT */ 121 BNA_RX_RES_MEM_T_DPAGE = 11,
122 BNA_RX_RES_MEM_T_IBIDX = 12, 122 BNA_RX_RES_MEM_T_HPAGE = 12,
123 BNA_RX_RES_MEM_T_RIT = 13, 123 BNA_RX_RES_MEM_T_IBIDX = 13,
124 BNA_RX_RES_T_INTR = 14, /* Rx interrupts */ 124 BNA_RX_RES_MEM_T_RIT = 14,
125 BNA_RX_RES_T_MAX = 15 125 BNA_RX_RES_T_INTR = 15,
126 BNA_RX_RES_T_MAX = 16
126}; 127};
127 128
128enum bna_tx_type { 129enum bna_tx_type {
@@ -583,6 +584,8 @@ struct bna_rxq {
583 584
584 int buffer_size; 585 int buffer_size;
585 int q_depth; 586 int q_depth;
587 u32 num_vecs;
588 enum bna_status multi_buffer;
586 589
587 struct bna_qpt qpt; 590 struct bna_qpt qpt;
588 struct bna_rcb *rcb; 591 struct bna_rcb *rcb;
@@ -632,6 +635,8 @@ struct bna_ccb {
632 struct bna_rcb *rcb[2]; 635 struct bna_rcb *rcb[2];
633 void *ctrl; /* For bnad */ 636 void *ctrl; /* For bnad */
634 struct bna_pkt_rate pkt_rate; 637 struct bna_pkt_rate pkt_rate;
638 u32 pkts_una;
639 u32 bytes_per_intr;
635 640
636 /* Control path */ 641 /* Control path */
637 struct bna_cq *cq; 642 struct bna_cq *cq;
@@ -671,14 +676,22 @@ struct bna_rx_config {
671 int num_paths; 676 int num_paths;
672 enum bna_rxp_type rxp_type; 677 enum bna_rxp_type rxp_type;
673 int paused; 678 int paused;
674 int q_depth;
675 int coalescing_timeo; 679 int coalescing_timeo;
676 /* 680 /*
677 * Small/Large (or Header/Data) buffer size to be configured 681 * Small/Large (or Header/Data) buffer size to be configured
678 * for SLR and HDS queue type. Large buffer size comes from 682 * for SLR and HDS queue type.
679 * enet->mtu.
680 */ 683 */
681 int small_buff_size; 684 u32 frame_size;
685
686 /* header or small queue */
687 u32 q1_depth;
688 u32 q1_buf_size;
689
690 /* data or large queue */
691 u32 q0_depth;
692 u32 q0_buf_size;
693 u32 q0_num_vecs;
694 enum bna_status q0_multi_buf;
682 695
683 enum bna_status rss_status; 696 enum bna_status rss_status;
684 struct bna_rss_config rss_config; 697 struct bna_rss_config rss_config;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index c1357eac3500..0e9f8f5b6009 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -282,27 +282,32 @@ static int
282bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb) 282bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
283{ 283{
284 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; 284 struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
285 int mtu, order; 285 int order;
286 286
287 bnad_rxq_alloc_uninit(bnad, rcb); 287 bnad_rxq_alloc_uninit(bnad, rcb);
288 288
289 mtu = bna_enet_mtu_get(&bnad->bna.enet); 289 order = get_order(rcb->rxq->buffer_size);
290 order = get_order(mtu); 290
291 unmap_q->type = BNAD_RXBUF_PAGE;
291 292
292 if (bna_is_small_rxq(rcb->id)) { 293 if (bna_is_small_rxq(rcb->id)) {
293 unmap_q->alloc_order = 0; 294 unmap_q->alloc_order = 0;
294 unmap_q->map_size = rcb->rxq->buffer_size; 295 unmap_q->map_size = rcb->rxq->buffer_size;
295 } else { 296 } else {
296 unmap_q->alloc_order = order; 297 if (rcb->rxq->multi_buffer) {
297 unmap_q->map_size = 298 unmap_q->alloc_order = 0;
298 (rcb->rxq->buffer_size > 2048) ? 299 unmap_q->map_size = rcb->rxq->buffer_size;
299 PAGE_SIZE << order : 2048; 300 unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
301 } else {
302 unmap_q->alloc_order = order;
303 unmap_q->map_size =
304 (rcb->rxq->buffer_size > 2048) ?
305 PAGE_SIZE << order : 2048;
306 }
300 } 307 }
301 308
302 BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size)); 309 BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size));
303 310
304 unmap_q->type = BNAD_RXBUF_PAGE;
305
306 return 0; 311 return 0;
307} 312}
308 313
@@ -345,10 +350,10 @@ bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
345 for (i = 0; i < rcb->q_depth; i++) { 350 for (i = 0; i < rcb->q_depth; i++) {
346 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i]; 351 struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
347 352
348 if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) 353 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
349 bnad_rxq_cleanup_page(bnad, unmap);
350 else
351 bnad_rxq_cleanup_skb(bnad, unmap); 354 bnad_rxq_cleanup_skb(bnad, unmap);
355 else
356 bnad_rxq_cleanup_page(bnad, unmap);
352 } 357 }
353 bnad_rxq_alloc_uninit(bnad, rcb); 358 bnad_rxq_alloc_uninit(bnad, rcb);
354} 359}
@@ -480,10 +485,10 @@ bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
480 if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)) 485 if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
481 return; 486 return;
482 487
483 if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) 488 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
484 bnad_rxq_refill_page(bnad, rcb, to_alloc);
485 else
486 bnad_rxq_refill_skb(bnad, rcb, to_alloc); 489 bnad_rxq_refill_skb(bnad, rcb, to_alloc);
490 else
491 bnad_rxq_refill_page(bnad, rcb, to_alloc);
487} 492}
488 493
489#define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \ 494#define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
@@ -500,62 +505,91 @@ bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
500#define flags_udp6 (BNA_CQ_EF_IPV6 | \ 505#define flags_udp6 (BNA_CQ_EF_IPV6 | \
501 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK) 506 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
502 507
503static inline struct sk_buff * 508static void
504bnad_cq_prepare_skb(struct bnad_rx_ctrl *rx_ctrl, 509bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
505 struct bnad_rx_unmap_q *unmap_q, 510 u32 sop_ci, u32 nvecs)
506 struct bnad_rx_unmap *unmap,
507 u32 length, u32 flags)
508{ 511{
509 struct bnad *bnad = rx_ctrl->bnad; 512 struct bnad_rx_unmap_q *unmap_q;
510 struct sk_buff *skb; 513 struct bnad_rx_unmap *unmap;
514 u32 ci, vec;
511 515
512 if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) { 516 unmap_q = rcb->unmap_q;
513 skb = napi_get_frags(&rx_ctrl->napi); 517 for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
514 if (unlikely(!skb)) 518 unmap = &unmap_q->unmap[ci];
515 return NULL; 519 BNA_QE_INDX_INC(ci, rcb->q_depth);
520
521 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
522 bnad_rxq_cleanup_skb(bnad, unmap);
523 else
524 bnad_rxq_cleanup_page(bnad, unmap);
525 }
526}
527
528static void
529bnad_cq_setup_skb_frags(struct bna_rcb *rcb, struct sk_buff *skb,
530 u32 sop_ci, u32 nvecs, u32 last_fraglen)
531{
532 struct bnad *bnad;
533 u32 ci, vec, len, totlen = 0;
534 struct bnad_rx_unmap_q *unmap_q;
535 struct bnad_rx_unmap *unmap;
536
537 unmap_q = rcb->unmap_q;
538 bnad = rcb->bnad;
539 for (vec = 1, ci = sop_ci; vec <= nvecs; vec++) {
540 unmap = &unmap_q->unmap[ci];
541 BNA_QE_INDX_INC(ci, rcb->q_depth);
516 542
517 dma_unmap_page(&bnad->pcidev->dev, 543 dma_unmap_page(&bnad->pcidev->dev,
518 dma_unmap_addr(&unmap->vector, dma_addr), 544 dma_unmap_addr(&unmap->vector, dma_addr),
519 unmap->vector.len, DMA_FROM_DEVICE); 545 unmap->vector.len, DMA_FROM_DEVICE);
546
547 len = (vec == nvecs) ?
548 last_fraglen : unmap->vector.len;
549 totlen += len;
550
520 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 551 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
521 unmap->page, unmap->page_offset, length); 552 unmap->page, unmap->page_offset, len);
522 skb->len += length;
523 skb->data_len += length;
524 skb->truesize += length;
525 553
526 unmap->page = NULL; 554 unmap->page = NULL;
527 unmap->vector.len = 0; 555 unmap->vector.len = 0;
528
529 return skb;
530 } 556 }
531 557
532 skb = unmap->skb; 558 skb->len += totlen;
533 BUG_ON(!skb); 559 skb->data_len += totlen;
560 skb->truesize += totlen;
561}
562
563static inline void
564bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
565 struct bnad_rx_unmap *unmap, u32 len)
566{
567 prefetch(skb->data);
534 568
535 dma_unmap_single(&bnad->pcidev->dev, 569 dma_unmap_single(&bnad->pcidev->dev,
536 dma_unmap_addr(&unmap->vector, dma_addr), 570 dma_unmap_addr(&unmap->vector, dma_addr),
537 unmap->vector.len, DMA_FROM_DEVICE); 571 unmap->vector.len, DMA_FROM_DEVICE);
538 572
539 skb_put(skb, length); 573 skb_put(skb, len);
540
541 skb->protocol = eth_type_trans(skb, bnad->netdev); 574 skb->protocol = eth_type_trans(skb, bnad->netdev);
542 575
543 unmap->skb = NULL; 576 unmap->skb = NULL;
544 unmap->vector.len = 0; 577 unmap->vector.len = 0;
545 return skb;
546} 578}
547 579
548static u32 580static u32
549bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) 581bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
550{ 582{
551 struct bna_cq_entry *cq, *cmpl; 583 struct bna_cq_entry *cq, *cmpl, *next_cmpl;
552 struct bna_rcb *rcb = NULL; 584 struct bna_rcb *rcb = NULL;
553 struct bnad_rx_unmap_q *unmap_q; 585 struct bnad_rx_unmap_q *unmap_q;
554 struct bnad_rx_unmap *unmap; 586 struct bnad_rx_unmap *unmap = NULL;
555 struct sk_buff *skb; 587 struct sk_buff *skb = NULL;
556 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; 588 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
557 struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl; 589 struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
558 u32 packets = 0, length = 0, flags, masked_flags; 590 u32 packets = 0, len = 0, totlen = 0;
591 u32 pi, vec, sop_ci = 0, nvecs = 0;
592 u32 flags, masked_flags;
559 593
560 prefetch(bnad->netdev); 594 prefetch(bnad->netdev);
561 595
@@ -563,9 +597,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
563 cmpl = &cq[ccb->producer_index]; 597 cmpl = &cq[ccb->producer_index];
564 598
565 while (cmpl->valid && (packets < budget)) { 599 while (cmpl->valid && (packets < budget)) {
566 packets++;
567 flags = ntohl(cmpl->flags);
568 length = ntohs(cmpl->length);
569 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length)); 600 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
570 601
571 if (bna_is_small_rxq(cmpl->rxq_id)) 602 if (bna_is_small_rxq(cmpl->rxq_id))
@@ -574,25 +605,68 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
574 rcb = ccb->rcb[0]; 605 rcb = ccb->rcb[0];
575 606
576 unmap_q = rcb->unmap_q; 607 unmap_q = rcb->unmap_q;
577 unmap = &unmap_q->unmap[rcb->consumer_index];
578 608
579 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR | 609 /* start of packet ci */
580 BNA_CQ_EF_FCS_ERROR | 610 sop_ci = rcb->consumer_index;
581 BNA_CQ_EF_TOO_LONG))) { 611
582 if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) 612 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
583 bnad_rxq_cleanup_page(bnad, unmap); 613 unmap = &unmap_q->unmap[sop_ci];
584 else 614 skb = unmap->skb;
585 bnad_rxq_cleanup_skb(bnad, unmap); 615 } else {
616 skb = napi_get_frags(&rx_ctrl->napi);
617 if (unlikely(!skb))
618 break;
619 }
620 prefetch(skb);
621
622 flags = ntohl(cmpl->flags);
623 len = ntohs(cmpl->length);
624 totlen = len;
625 nvecs = 1;
626
627 /* Check all the completions for this frame.
628 * busy-wait doesn't help much, break here.
629 */
630 if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
631 (flags & BNA_CQ_EF_EOP) == 0) {
632 pi = ccb->producer_index;
633 do {
634 BNA_QE_INDX_INC(pi, ccb->q_depth);
635 next_cmpl = &cq[pi];
586 636
637 if (!next_cmpl->valid)
638 break;
639
640 len = ntohs(next_cmpl->length);
641 flags = ntohl(next_cmpl->flags);
642
643 nvecs++;
644 totlen += len;
645 } while ((flags & BNA_CQ_EF_EOP) == 0);
646
647 if (!next_cmpl->valid)
648 break;
649 }
650
651 /* TODO: BNA_CQ_EF_LOCAL ? */
652 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
653 BNA_CQ_EF_FCS_ERROR |
654 BNA_CQ_EF_TOO_LONG))) {
655 bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
587 rcb->rxq->rx_packets_with_error++; 656 rcb->rxq->rx_packets_with_error++;
657
588 goto next; 658 goto next;
589 } 659 }
590 660
591 skb = bnad_cq_prepare_skb(ccb->ctrl, unmap_q, unmap, 661 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
592 length, flags); 662 bnad_cq_setup_skb(bnad, skb, unmap, len);
663 else
664 bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
593 665
594 if (unlikely(!skb)) 666 packets++;
595 break; 667 rcb->rxq->rx_packets++;
668 rcb->rxq->rx_bytes += totlen;
669 ccb->bytes_per_intr += totlen;
596 670
597 masked_flags = flags & flags_cksum_prot_mask; 671 masked_flags = flags & flags_cksum_prot_mask;
598 672
@@ -606,21 +680,21 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
606 else 680 else
607 skb_checksum_none_assert(skb); 681 skb_checksum_none_assert(skb);
608 682
609 rcb->rxq->rx_packets++;
610 rcb->rxq->rx_bytes += length;
611
612 if (flags & BNA_CQ_EF_VLAN) 683 if (flags & BNA_CQ_EF_VLAN)
613 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag)); 684 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
614 685
615 if (BNAD_RXBUF_IS_PAGE(unmap_q->type)) 686 if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
616 napi_gro_frags(&rx_ctrl->napi);
617 else
618 netif_receive_skb(skb); 687 netif_receive_skb(skb);
688 else
689 napi_gro_frags(&rx_ctrl->napi);
619 690
620next: 691next:
621 cmpl->valid = 0; 692 BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
622 BNA_QE_INDX_INC(rcb->consumer_index, rcb->q_depth); 693 for (vec = 0; vec < nvecs; vec++) {
623 BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth); 694 cmpl = &cq[ccb->producer_index];
695 cmpl->valid = 0;
696 BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
697 }
624 cmpl = &cq[ccb->producer_index]; 698 cmpl = &cq[ccb->producer_index];
625 } 699 }
626 700
@@ -1930,6 +2004,7 @@ err_return:
1930static void 2004static void
1931bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config) 2005bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1932{ 2006{
2007 memset(rx_config, 0, sizeof(*rx_config));
1933 rx_config->rx_type = BNA_RX_T_REGULAR; 2008 rx_config->rx_type = BNA_RX_T_REGULAR;
1934 rx_config->num_paths = bnad->num_rxp_per_rx; 2009 rx_config->num_paths = bnad->num_rxp_per_rx;
1935 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo; 2010 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
@@ -1950,10 +2025,39 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
1950 memset(&rx_config->rss_config, 0, 2025 memset(&rx_config->rss_config, 0,
1951 sizeof(rx_config->rss_config)); 2026 sizeof(rx_config->rss_config));
1952 } 2027 }
2028
2029 rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2030 rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2031
2032 /* BNA_RXP_SINGLE - one data-buffer queue
2033 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2034 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2035 */
2036 /* TODO: configurable param for queue type */
1953 rx_config->rxp_type = BNA_RXP_SLR; 2037 rx_config->rxp_type = BNA_RXP_SLR;
1954 rx_config->q_depth = bnad->rxq_depth;
1955 2038
1956 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE; 2039 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2040 rx_config->frame_size > 4096) {
2041 /* though size_routing_enable is set in SLR,
2042 * small packets may get routed to same rxq.
2043 * set buf_size to 2048 instead of PAGE_SIZE.
2044 */
2045 rx_config->q0_buf_size = 2048;
2046 /* this should be in multiples of 2 */
2047 rx_config->q0_num_vecs = 4;
2048 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2049 rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2050 } else {
2051 rx_config->q0_buf_size = rx_config->frame_size;
2052 rx_config->q0_num_vecs = 1;
2053 rx_config->q0_depth = bnad->rxq_depth;
2054 }
2055
2056 /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2057 if (rx_config->rxp_type == BNA_RXP_SLR) {
2058 rx_config->q1_depth = bnad->rxq_depth;
2059 rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2060 }
1957 2061
1958 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED; 2062 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
1959} 2063}
@@ -1969,6 +2073,49 @@ bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
1969} 2073}
1970 2074
1971/* Called with mutex_lock(&bnad->conf_mutex) held */ 2075/* Called with mutex_lock(&bnad->conf_mutex) held */
2076u32
2077bnad_reinit_rx(struct bnad *bnad)
2078{
2079 struct net_device *netdev = bnad->netdev;
2080 u32 err = 0, current_err = 0;
2081 u32 rx_id = 0, count = 0;
2082 unsigned long flags;
2083
2084 /* destroy and create new rx objects */
2085 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2086 if (!bnad->rx_info[rx_id].rx)
2087 continue;
2088 bnad_destroy_rx(bnad, rx_id);
2089 }
2090
2091 spin_lock_irqsave(&bnad->bna_lock, flags);
2092 bna_enet_mtu_set(&bnad->bna.enet,
2093 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2094 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2095
2096 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2097 count++;
2098 current_err = bnad_setup_rx(bnad, rx_id);
2099 if (current_err && !err) {
2100 err = current_err;
2101 pr_err("RXQ:%u setup failed\n", rx_id);
2102 }
2103 }
2104
2105 /* restore rx configuration */
2106 if (bnad->rx_info[0].rx && !err) {
2107 bnad_restore_vlans(bnad, 0);
2108 bnad_enable_default_bcast(bnad);
2109 spin_lock_irqsave(&bnad->bna_lock, flags);
2110 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2111 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2112 bnad_set_rx_mode(netdev);
2113 }
2114
2115 return count;
2116}
2117
2118/* Called with bnad_conf_lock() held */
1972void 2119void
1973bnad_destroy_rx(struct bnad *bnad, u32 rx_id) 2120bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
1974{ 2121{
@@ -2047,13 +2194,19 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2047 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2194 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2048 2195
2049 /* Fill Unmap Q memory requirements */ 2196 /* Fill Unmap Q memory requirements */
2050 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPQ], 2197 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2051 rx_config->num_paths + 2198 rx_config->num_paths,
2052 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 2199 (rx_config->q0_depth *
2053 0 : rx_config->num_paths), 2200 sizeof(struct bnad_rx_unmap)) +
2054 ((bnad->rxq_depth * sizeof(struct bnad_rx_unmap)) + 2201 sizeof(struct bnad_rx_unmap_q));
2055 sizeof(struct bnad_rx_unmap_q))); 2202
2056 2203 if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2204 BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2205 rx_config->num_paths,
2206 (rx_config->q1_depth *
2207 sizeof(struct bnad_rx_unmap) +
2208 sizeof(struct bnad_rx_unmap_q)));
2209 }
2057 /* Allocate resource */ 2210 /* Allocate resource */
2058 err = bnad_rx_res_alloc(bnad, res_info, rx_id); 2211 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2059 if (err) 2212 if (err)
@@ -2548,7 +2701,6 @@ bnad_open(struct net_device *netdev)
2548 int err; 2701 int err;
2549 struct bnad *bnad = netdev_priv(netdev); 2702 struct bnad *bnad = netdev_priv(netdev);
2550 struct bna_pause_config pause_config; 2703 struct bna_pause_config pause_config;
2551 int mtu;
2552 unsigned long flags; 2704 unsigned long flags;
2553 2705
2554 mutex_lock(&bnad->conf_mutex); 2706 mutex_lock(&bnad->conf_mutex);
@@ -2567,10 +2719,9 @@ bnad_open(struct net_device *netdev)
2567 pause_config.tx_pause = 0; 2719 pause_config.tx_pause = 0;
2568 pause_config.rx_pause = 0; 2720 pause_config.rx_pause = 0;
2569 2721
2570 mtu = ETH_HLEN + VLAN_HLEN + bnad->netdev->mtu + ETH_FCS_LEN;
2571
2572 spin_lock_irqsave(&bnad->bna_lock, flags); 2722 spin_lock_irqsave(&bnad->bna_lock, flags);
2573 bna_enet_mtu_set(&bnad->bna.enet, mtu, NULL); 2723 bna_enet_mtu_set(&bnad->bna.enet,
2724 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2574 bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL); 2725 bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL);
2575 bna_enet_enable(&bnad->bna.enet); 2726 bna_enet_enable(&bnad->bna.enet);
2576 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2727 spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -3092,14 +3243,14 @@ bnad_set_mac_address(struct net_device *netdev, void *mac_addr)
3092} 3243}
3093 3244
3094static int 3245static int
3095bnad_mtu_set(struct bnad *bnad, int mtu) 3246bnad_mtu_set(struct bnad *bnad, int frame_size)
3096{ 3247{
3097 unsigned long flags; 3248 unsigned long flags;
3098 3249
3099 init_completion(&bnad->bnad_completions.mtu_comp); 3250 init_completion(&bnad->bnad_completions.mtu_comp);
3100 3251
3101 spin_lock_irqsave(&bnad->bna_lock, flags); 3252 spin_lock_irqsave(&bnad->bna_lock, flags);
3102 bna_enet_mtu_set(&bnad->bna.enet, mtu, bnad_cb_enet_mtu_set); 3253 bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
3103 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3254 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3104 3255
3105 wait_for_completion(&bnad->bnad_completions.mtu_comp); 3256 wait_for_completion(&bnad->bnad_completions.mtu_comp);
@@ -3110,18 +3261,34 @@ bnad_mtu_set(struct bnad *bnad, int mtu)
3110static int 3261static int
3111bnad_change_mtu(struct net_device *netdev, int new_mtu) 3262bnad_change_mtu(struct net_device *netdev, int new_mtu)
3112{ 3263{
3113 int err, mtu = netdev->mtu; 3264 int err, mtu;
3114 struct bnad *bnad = netdev_priv(netdev); 3265 struct bnad *bnad = netdev_priv(netdev);
3266 u32 rx_count = 0, frame, new_frame;
3115 3267
3116 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU) 3268 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU)
3117 return -EINVAL; 3269 return -EINVAL;
3118 3270
3119 mutex_lock(&bnad->conf_mutex); 3271 mutex_lock(&bnad->conf_mutex);
3120 3272
3273 mtu = netdev->mtu;
3121 netdev->mtu = new_mtu; 3274 netdev->mtu = new_mtu;
3122 3275
3123 mtu = ETH_HLEN + VLAN_HLEN + new_mtu + ETH_FCS_LEN; 3276 frame = BNAD_FRAME_SIZE(mtu);
3124 err = bnad_mtu_set(bnad, mtu); 3277 new_frame = BNAD_FRAME_SIZE(new_mtu);
3278
3279 /* check if multi-buffer needs to be enabled */
3280 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3281 netif_running(bnad->netdev)) {
3282 /* only when transition is over 4K */
3283 if ((frame <= 4096 && new_frame > 4096) ||
3284 (frame > 4096 && new_frame <= 4096))
3285 rx_count = bnad_reinit_rx(bnad);
3286 }
3287
3288 /* rx_count > 0 - new rx created
3289 * - Linux set err = 0 and return
3290 */
3291 err = bnad_mtu_set(bnad, new_frame);
3125 if (err) 3292 if (err)
3126 err = -EBUSY; 3293 err = -EBUSY;
3127 3294
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 9d10c69f5889..2e6b943262a2 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -105,6 +105,9 @@ struct bnad_rx_ctrl {
105#define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx) 105#define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx)
106#define BNAD_NUM_RXP (bnad->num_rx * bnad->num_rxp_per_rx) 106#define BNAD_NUM_RXP (bnad->num_rx * bnad->num_rxp_per_rx)
107 107
108#define BNAD_FRAME_SIZE(_mtu) \
109 (ETH_HLEN + VLAN_HLEN + (_mtu) + ETH_FCS_LEN)
110
108/* 111/*
109 * DATA STRUCTURES 112 * DATA STRUCTURES
110 */ 113 */
@@ -241,12 +244,13 @@ struct bnad_rx_unmap {
241 244
242enum bnad_rxbuf_type { 245enum bnad_rxbuf_type {
243 BNAD_RXBUF_NONE = 0, 246 BNAD_RXBUF_NONE = 0,
244 BNAD_RXBUF_SKB = 1, 247 BNAD_RXBUF_SK_BUFF = 1,
245 BNAD_RXBUF_PAGE = 2, 248 BNAD_RXBUF_PAGE = 2,
246 BNAD_RXBUF_MULTI = 3 249 BNAD_RXBUF_MULTI_BUFF = 3
247}; 250};
248 251
249#define BNAD_RXBUF_IS_PAGE(_type) ((_type) == BNAD_RXBUF_PAGE) 252#define BNAD_RXBUF_IS_SK_BUFF(_type) ((_type) == BNAD_RXBUF_SK_BUFF)
253#define BNAD_RXBUF_IS_MULTI_BUFF(_type) ((_type) == BNAD_RXBUF_MULTI_BUFF)
250 254
251struct bnad_rx_unmap_q { 255struct bnad_rx_unmap_q {
252 int reuse_pi; 256 int reuse_pi;
@@ -256,6 +260,9 @@ struct bnad_rx_unmap_q {
256 struct bnad_rx_unmap unmap[0]; 260 struct bnad_rx_unmap unmap[0];
257}; 261};
258 262
263#define BNAD_PCI_DEV_IS_CAT2(_bnad) \
264 ((_bnad)->pcidev->device == BFA_PCI_DEVICE_ID_CT2)
265
259/* Bit mask values for bnad->cfg_flags */ 266/* Bit mask values for bnad->cfg_flags */
260#define BNAD_CF_DIM_ENABLED 0x01 /* DIM */ 267#define BNAD_CF_DIM_ENABLED 0x01 /* DIM */
261#define BNAD_CF_PROMISC 0x02 268#define BNAD_CF_PROMISC 0x02