diff options
Diffstat (limited to 'drivers/net/bnx2x/bnx2x_cmn.c')
-rw-r--r-- | drivers/net/bnx2x/bnx2x_cmn.c | 402 |
1 files changed, 324 insertions, 78 deletions
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index 8853ae2a042..218a7ad7cdd 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c | |||
@@ -27,6 +27,49 @@ | |||
27 | 27 | ||
28 | static int bnx2x_setup_irqs(struct bnx2x *bp); | 28 | static int bnx2x_setup_irqs(struct bnx2x *bp); |
29 | 29 | ||
30 | /** | ||
31 | * bnx2x_bz_fp - zero content of the fastpath structure. | ||
32 | * | ||
33 | * @bp: driver handle | ||
34 | * @index: fastpath index to be zeroed | ||
35 | * | ||
36 | * Makes sure the contents of the bp->fp[index].napi is kept | ||
37 | * intact. | ||
38 | */ | ||
39 | static inline void bnx2x_bz_fp(struct bnx2x *bp, int index) | ||
40 | { | ||
41 | struct bnx2x_fastpath *fp = &bp->fp[index]; | ||
42 | struct napi_struct orig_napi = fp->napi; | ||
43 | /* bzero bnx2x_fastpath contents */ | ||
44 | memset(fp, 0, sizeof(*fp)); | ||
45 | |||
46 | /* Restore the NAPI object as it has been already initialized */ | ||
47 | fp->napi = orig_napi; | ||
48 | } | ||
49 | |||
50 | /** | ||
51 | * bnx2x_move_fp - move content of the fastpath structure. | ||
52 | * | ||
53 | * @bp: driver handle | ||
54 | * @from: source FP index | ||
55 | * @to: destination FP index | ||
56 | * | ||
57 | * Makes sure the contents of the bp->fp[to].napi is kept | ||
58 | * intact. | ||
59 | */ | ||
60 | static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) | ||
61 | { | ||
62 | struct bnx2x_fastpath *from_fp = &bp->fp[from]; | ||
63 | struct bnx2x_fastpath *to_fp = &bp->fp[to]; | ||
64 | struct napi_struct orig_napi = to_fp->napi; | ||
65 | /* Move bnx2x_fastpath contents */ | ||
66 | memcpy(to_fp, from_fp, sizeof(*to_fp)); | ||
67 | to_fp->index = to; | ||
68 | |||
69 | /* Restore the NAPI object as it has been already initialized */ | ||
70 | to_fp->napi = orig_napi; | ||
71 | } | ||
72 | |||
30 | /* free skb in the packet ring at pos idx | 73 | /* free skb in the packet ring at pos idx |
31 | * return idx of last bd freed | 74 | * return idx of last bd freed |
32 | */ | 75 | */ |
@@ -881,55 +924,6 @@ void __bnx2x_link_report(struct bnx2x *bp) | |||
881 | } | 924 | } |
882 | } | 925 | } |
883 | 926 | ||
884 | /* Returns the number of actually allocated BDs */ | ||
885 | static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, | ||
886 | int rx_ring_size) | ||
887 | { | ||
888 | struct bnx2x *bp = fp->bp; | ||
889 | u16 ring_prod, cqe_ring_prod; | ||
890 | int i; | ||
891 | |||
892 | fp->rx_comp_cons = 0; | ||
893 | cqe_ring_prod = ring_prod = 0; | ||
894 | for (i = 0; i < rx_ring_size; i++) { | ||
895 | if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) { | ||
896 | BNX2X_ERR("was only able to allocate " | ||
897 | "%d rx skbs on queue[%d]\n", i, fp->index); | ||
898 | fp->eth_q_stats.rx_skb_alloc_failed++; | ||
899 | break; | ||
900 | } | ||
901 | ring_prod = NEXT_RX_IDX(ring_prod); | ||
902 | cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod); | ||
903 | WARN_ON(ring_prod <= i); | ||
904 | } | ||
905 | |||
906 | fp->rx_bd_prod = ring_prod; | ||
907 | /* Limit the CQE producer by the CQE ring size */ | ||
908 | fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT, | ||
909 | cqe_ring_prod); | ||
910 | fp->rx_pkt = fp->rx_calls = 0; | ||
911 | |||
912 | return i; | ||
913 | } | ||
914 | |||
915 | static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp) | ||
916 | { | ||
917 | struct bnx2x *bp = fp->bp; | ||
918 | int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size : | ||
919 | MAX_RX_AVAIL/bp->num_queues; | ||
920 | |||
921 | rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size); | ||
922 | |||
923 | bnx2x_alloc_rx_bds(fp, rx_ring_size); | ||
924 | |||
925 | /* Warning! | ||
926 | * this will generate an interrupt (to the TSTORM) | ||
927 | * must only be done after chip is initialized | ||
928 | */ | ||
929 | bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, | ||
930 | fp->rx_sge_prod); | ||
931 | } | ||
932 | |||
933 | void bnx2x_init_rx_rings(struct bnx2x *bp) | 927 | void bnx2x_init_rx_rings(struct bnx2x *bp) |
934 | { | 928 | { |
935 | int func = BP_FUNC(bp); | 929 | int func = BP_FUNC(bp); |
@@ -938,6 +932,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
938 | u16 ring_prod; | 932 | u16 ring_prod; |
939 | int i, j; | 933 | int i, j; |
940 | 934 | ||
935 | /* Allocate TPA resources */ | ||
941 | for_each_rx_queue(bp, j) { | 936 | for_each_rx_queue(bp, j) { |
942 | struct bnx2x_fastpath *fp = &bp->fp[j]; | 937 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
943 | 938 | ||
@@ -945,6 +940,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
945 | "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); | 940 | "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); |
946 | 941 | ||
947 | if (!fp->disable_tpa) { | 942 | if (!fp->disable_tpa) { |
943 | /* Fill the per-aggregation pool */ | ||
948 | for (i = 0; i < max_agg_queues; i++) { | 944 | for (i = 0; i < max_agg_queues; i++) { |
949 | fp->tpa_pool[i].skb = | 945 | fp->tpa_pool[i].skb = |
950 | netdev_alloc_skb(bp->dev, fp->rx_buf_size); | 946 | netdev_alloc_skb(bp->dev, fp->rx_buf_size); |
@@ -999,13 +995,13 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
999 | 995 | ||
1000 | fp->rx_bd_cons = 0; | 996 | fp->rx_bd_cons = 0; |
1001 | 997 | ||
1002 | bnx2x_set_next_page_rx_bd(fp); | 998 | /* Activate BD ring */ |
1003 | 999 | /* Warning! | |
1004 | /* CQ ring */ | 1000 | * this will generate an interrupt (to the TSTORM) |
1005 | bnx2x_set_next_page_rx_cq(fp); | 1001 | * must only be done after chip is initialized |
1006 | 1002 | */ | |
1007 | /* Allocate BDs and initialize BD ring */ | 1003 | bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, |
1008 | bnx2x_alloc_rx_bd_ring(fp); | 1004 | fp->rx_sge_prod); |
1009 | 1005 | ||
1010 | if (j != 0) | 1006 | if (j != 0) |
1011 | continue; | 1007 | continue; |
@@ -1039,27 +1035,40 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp) | |||
1039 | } | 1035 | } |
1040 | } | 1036 | } |
1041 | 1037 | ||
1038 | static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp) | ||
1039 | { | ||
1040 | struct bnx2x *bp = fp->bp; | ||
1041 | int i; | ||
1042 | |||
1043 | /* ring wasn't allocated */ | ||
1044 | if (fp->rx_buf_ring == NULL) | ||
1045 | return; | ||
1046 | |||
1047 | for (i = 0; i < NUM_RX_BD; i++) { | ||
1048 | struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; | ||
1049 | struct sk_buff *skb = rx_buf->skb; | ||
1050 | |||
1051 | if (skb == NULL) | ||
1052 | continue; | ||
1053 | |||
1054 | dma_unmap_single(&bp->pdev->dev, | ||
1055 | dma_unmap_addr(rx_buf, mapping), | ||
1056 | fp->rx_buf_size, DMA_FROM_DEVICE); | ||
1057 | |||
1058 | rx_buf->skb = NULL; | ||
1059 | dev_kfree_skb(skb); | ||
1060 | } | ||
1061 | } | ||
1062 | |||
1042 | static void bnx2x_free_rx_skbs(struct bnx2x *bp) | 1063 | static void bnx2x_free_rx_skbs(struct bnx2x *bp) |
1043 | { | 1064 | { |
1044 | int i, j; | 1065 | int j; |
1045 | 1066 | ||
1046 | for_each_rx_queue(bp, j) { | 1067 | for_each_rx_queue(bp, j) { |
1047 | struct bnx2x_fastpath *fp = &bp->fp[j]; | 1068 | struct bnx2x_fastpath *fp = &bp->fp[j]; |
1048 | 1069 | ||
1049 | for (i = 0; i < NUM_RX_BD; i++) { | 1070 | bnx2x_free_rx_bds(fp); |
1050 | struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i]; | ||
1051 | struct sk_buff *skb = rx_buf->skb; | ||
1052 | 1071 | ||
1053 | if (skb == NULL) | ||
1054 | continue; | ||
1055 | |||
1056 | dma_unmap_single(&bp->pdev->dev, | ||
1057 | dma_unmap_addr(rx_buf, mapping), | ||
1058 | fp->rx_buf_size, DMA_FROM_DEVICE); | ||
1059 | |||
1060 | rx_buf->skb = NULL; | ||
1061 | dev_kfree_skb(skb); | ||
1062 | } | ||
1063 | if (!fp->disable_tpa) | 1072 | if (!fp->disable_tpa) |
1064 | bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? | 1073 | bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ? |
1065 | ETH_MAX_AGGREGATION_QUEUES_E1 : | 1074 | ETH_MAX_AGGREGATION_QUEUES_E1 : |
@@ -1435,26 +1444,37 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
1435 | /* must be called before memory allocation and HW init */ | 1444 | /* must be called before memory allocation and HW init */ |
1436 | bnx2x_ilt_set_info(bp); | 1445 | bnx2x_ilt_set_info(bp); |
1437 | 1446 | ||
1447 | /* zero fastpath structures preserving invariants like napi which are | ||
1448 | * allocated only once | ||
1449 | */ | ||
1450 | for_each_queue(bp, i) | ||
1451 | bnx2x_bz_fp(bp, i); | ||
1452 | |||
1438 | /* Set the receive queues buffer size */ | 1453 | /* Set the receive queues buffer size */ |
1439 | bnx2x_set_rx_buf_size(bp); | 1454 | bnx2x_set_rx_buf_size(bp); |
1440 | 1455 | ||
1456 | for_each_queue(bp, i) | ||
1457 | bnx2x_fp(bp, i, disable_tpa) = | ||
1458 | ((bp->flags & TPA_ENABLE_FLAG) == 0); | ||
1459 | |||
1460 | #ifdef BCM_CNIC | ||
1461 | /* We don't want TPA on FCoE L2 ring */ | ||
1462 | bnx2x_fcoe(bp, disable_tpa) = 1; | ||
1463 | #endif | ||
1464 | |||
1441 | if (bnx2x_alloc_mem(bp)) | 1465 | if (bnx2x_alloc_mem(bp)) |
1442 | return -ENOMEM; | 1466 | return -ENOMEM; |
1443 | 1467 | ||
1468 | /* As long as bnx2x_alloc_mem() may possibly update | ||
1469 | * bp->num_queues, bnx2x_set_real_num_queues() should always | ||
1470 | * come after it. | ||
1471 | */ | ||
1444 | rc = bnx2x_set_real_num_queues(bp); | 1472 | rc = bnx2x_set_real_num_queues(bp); |
1445 | if (rc) { | 1473 | if (rc) { |
1446 | BNX2X_ERR("Unable to set real_num_queues\n"); | 1474 | BNX2X_ERR("Unable to set real_num_queues\n"); |
1447 | goto load_error0; | 1475 | goto load_error0; |
1448 | } | 1476 | } |
1449 | 1477 | ||
1450 | for_each_queue(bp, i) | ||
1451 | bnx2x_fp(bp, i, disable_tpa) = | ||
1452 | ((bp->flags & TPA_ENABLE_FLAG) == 0); | ||
1453 | |||
1454 | #ifdef BCM_CNIC | ||
1455 | /* We don't want TPA on FCoE L2 ring */ | ||
1456 | bnx2x_fcoe(bp, disable_tpa) = 1; | ||
1457 | #endif | ||
1458 | bnx2x_napi_enable(bp); | 1478 | bnx2x_napi_enable(bp); |
1459 | 1479 | ||
1460 | /* Send LOAD_REQUEST command to MCP | 1480 | /* Send LOAD_REQUEST command to MCP |
@@ -2480,6 +2500,232 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p) | |||
2480 | return 0; | 2500 | return 0; |
2481 | } | 2501 | } |
2482 | 2502 | ||
2503 | static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) | ||
2504 | { | ||
2505 | union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk); | ||
2506 | struct bnx2x_fastpath *fp = &bp->fp[fp_index]; | ||
2507 | |||
2508 | /* Common */ | ||
2509 | #ifdef BCM_CNIC | ||
2510 | if (IS_FCOE_IDX(fp_index)) { | ||
2511 | memset(sb, 0, sizeof(union host_hc_status_block)); | ||
2512 | fp->status_blk_mapping = 0; | ||
2513 | |||
2514 | } else { | ||
2515 | #endif | ||
2516 | /* status blocks */ | ||
2517 | if (CHIP_IS_E2(bp)) | ||
2518 | BNX2X_PCI_FREE(sb->e2_sb, | ||
2519 | bnx2x_fp(bp, fp_index, | ||
2520 | status_blk_mapping), | ||
2521 | sizeof(struct host_hc_status_block_e2)); | ||
2522 | else | ||
2523 | BNX2X_PCI_FREE(sb->e1x_sb, | ||
2524 | bnx2x_fp(bp, fp_index, | ||
2525 | status_blk_mapping), | ||
2526 | sizeof(struct host_hc_status_block_e1x)); | ||
2527 | #ifdef BCM_CNIC | ||
2528 | } | ||
2529 | #endif | ||
2530 | /* Rx */ | ||
2531 | if (!skip_rx_queue(bp, fp_index)) { | ||
2532 | bnx2x_free_rx_bds(fp); | ||
2533 | |||
2534 | /* fastpath rx rings: rx_buf rx_desc rx_comp */ | ||
2535 | BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring)); | ||
2536 | BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring), | ||
2537 | bnx2x_fp(bp, fp_index, rx_desc_mapping), | ||
2538 | sizeof(struct eth_rx_bd) * NUM_RX_BD); | ||
2539 | |||
2540 | BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring), | ||
2541 | bnx2x_fp(bp, fp_index, rx_comp_mapping), | ||
2542 | sizeof(struct eth_fast_path_rx_cqe) * | ||
2543 | NUM_RCQ_BD); | ||
2544 | |||
2545 | /* SGE ring */ | ||
2546 | BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring)); | ||
2547 | BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring), | ||
2548 | bnx2x_fp(bp, fp_index, rx_sge_mapping), | ||
2549 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); | ||
2550 | } | ||
2551 | |||
2552 | /* Tx */ | ||
2553 | if (!skip_tx_queue(bp, fp_index)) { | ||
2554 | /* fastpath tx rings: tx_buf tx_desc */ | ||
2555 | BNX2X_FREE(bnx2x_fp(bp, fp_index, tx_buf_ring)); | ||
2556 | BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, tx_desc_ring), | ||
2557 | bnx2x_fp(bp, fp_index, tx_desc_mapping), | ||
2558 | sizeof(union eth_tx_bd_types) * NUM_TX_BD); | ||
2559 | } | ||
2560 | /* end of fastpath */ | ||
2561 | } | ||
2562 | |||
2563 | void bnx2x_free_fp_mem(struct bnx2x *bp) | ||
2564 | { | ||
2565 | int i; | ||
2566 | for_each_queue(bp, i) | ||
2567 | bnx2x_free_fp_mem_at(bp, i); | ||
2568 | } | ||
2569 | |||
2570 | static inline void set_sb_shortcuts(struct bnx2x *bp, int index) | ||
2571 | { | ||
2572 | union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); | ||
2573 | if (CHIP_IS_E2(bp)) { | ||
2574 | bnx2x_fp(bp, index, sb_index_values) = | ||
2575 | (__le16 *)status_blk.e2_sb->sb.index_values; | ||
2576 | bnx2x_fp(bp, index, sb_running_index) = | ||
2577 | (__le16 *)status_blk.e2_sb->sb.running_index; | ||
2578 | } else { | ||
2579 | bnx2x_fp(bp, index, sb_index_values) = | ||
2580 | (__le16 *)status_blk.e1x_sb->sb.index_values; | ||
2581 | bnx2x_fp(bp, index, sb_running_index) = | ||
2582 | (__le16 *)status_blk.e1x_sb->sb.running_index; | ||
2583 | } | ||
2584 | } | ||
2585 | |||
2586 | static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) | ||
2587 | { | ||
2588 | union host_hc_status_block *sb; | ||
2589 | struct bnx2x_fastpath *fp = &bp->fp[index]; | ||
2590 | int ring_size = 0; | ||
2591 | |||
2592 | /* if rx_ring_size specified - use it */ | ||
2593 | int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size : | ||
2594 | MAX_RX_AVAIL/bp->num_queues; | ||
2595 | |||
2596 | /* allocate at least number of buffers required by FW */ | ||
2597 | rx_ring_size = max_t(int, fp->disable_tpa ? MIN_RX_SIZE_NONTPA : | ||
2598 | MIN_RX_SIZE_TPA, | ||
2599 | rx_ring_size); | ||
2600 | |||
2601 | bnx2x_fp(bp, index, bp) = bp; | ||
2602 | bnx2x_fp(bp, index, index) = index; | ||
2603 | |||
2604 | /* Common */ | ||
2605 | sb = &bnx2x_fp(bp, index, status_blk); | ||
2606 | #ifdef BCM_CNIC | ||
2607 | if (!IS_FCOE_IDX(index)) { | ||
2608 | #endif | ||
2609 | /* status blocks */ | ||
2610 | if (CHIP_IS_E2(bp)) | ||
2611 | BNX2X_PCI_ALLOC(sb->e2_sb, | ||
2612 | &bnx2x_fp(bp, index, status_blk_mapping), | ||
2613 | sizeof(struct host_hc_status_block_e2)); | ||
2614 | else | ||
2615 | BNX2X_PCI_ALLOC(sb->e1x_sb, | ||
2616 | &bnx2x_fp(bp, index, status_blk_mapping), | ||
2617 | sizeof(struct host_hc_status_block_e1x)); | ||
2618 | #ifdef BCM_CNIC | ||
2619 | } | ||
2620 | #endif | ||
2621 | set_sb_shortcuts(bp, index); | ||
2622 | |||
2623 | /* Tx */ | ||
2624 | if (!skip_tx_queue(bp, index)) { | ||
2625 | /* fastpath tx rings: tx_buf tx_desc */ | ||
2626 | BNX2X_ALLOC(bnx2x_fp(bp, index, tx_buf_ring), | ||
2627 | sizeof(struct sw_tx_bd) * NUM_TX_BD); | ||
2628 | BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, tx_desc_ring), | ||
2629 | &bnx2x_fp(bp, index, tx_desc_mapping), | ||
2630 | sizeof(union eth_tx_bd_types) * NUM_TX_BD); | ||
2631 | } | ||
2632 | |||
2633 | /* Rx */ | ||
2634 | if (!skip_rx_queue(bp, index)) { | ||
2635 | /* fastpath rx rings: rx_buf rx_desc rx_comp */ | ||
2636 | BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring), | ||
2637 | sizeof(struct sw_rx_bd) * NUM_RX_BD); | ||
2638 | BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring), | ||
2639 | &bnx2x_fp(bp, index, rx_desc_mapping), | ||
2640 | sizeof(struct eth_rx_bd) * NUM_RX_BD); | ||
2641 | |||
2642 | BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring), | ||
2643 | &bnx2x_fp(bp, index, rx_comp_mapping), | ||
2644 | sizeof(struct eth_fast_path_rx_cqe) * | ||
2645 | NUM_RCQ_BD); | ||
2646 | |||
2647 | /* SGE ring */ | ||
2648 | BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring), | ||
2649 | sizeof(struct sw_rx_page) * NUM_RX_SGE); | ||
2650 | BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring), | ||
2651 | &bnx2x_fp(bp, index, rx_sge_mapping), | ||
2652 | BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); | ||
2653 | /* RX BD ring */ | ||
2654 | bnx2x_set_next_page_rx_bd(fp); | ||
2655 | |||
2656 | /* CQ ring */ | ||
2657 | bnx2x_set_next_page_rx_cq(fp); | ||
2658 | |||
2659 | /* BDs */ | ||
2660 | ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size); | ||
2661 | if (ring_size < rx_ring_size) | ||
2662 | goto alloc_mem_err; | ||
2663 | } | ||
2664 | |||
2665 | return 0; | ||
2666 | |||
2667 | /* handles low memory cases */ | ||
2668 | alloc_mem_err: | ||
2669 | BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n", | ||
2670 | index, ring_size); | ||
2671 | /* FW will drop all packets if queue is not big enough, | ||
2672 | * In these cases we disable the queue | ||
2673 | * Min size diferent for TPA and non-TPA queues | ||
2674 | */ | ||
2675 | if (ring_size < (fp->disable_tpa ? | ||
2676 | MIN_RX_SIZE_TPA : MIN_RX_SIZE_NONTPA)) { | ||
2677 | /* release memory allocated for this queue */ | ||
2678 | bnx2x_free_fp_mem_at(bp, index); | ||
2679 | return -ENOMEM; | ||
2680 | } | ||
2681 | return 0; | ||
2682 | } | ||
2683 | |||
2684 | int bnx2x_alloc_fp_mem(struct bnx2x *bp) | ||
2685 | { | ||
2686 | int i; | ||
2687 | |||
2688 | /** | ||
2689 | * 1. Allocate FP for leading - fatal if error | ||
2690 | * 2. {CNIC} Allocate FCoE FP - fatal if error | ||
2691 | * 3. Allocate RSS - fix number of queues if error | ||
2692 | */ | ||
2693 | |||
2694 | /* leading */ | ||
2695 | if (bnx2x_alloc_fp_mem_at(bp, 0)) | ||
2696 | return -ENOMEM; | ||
2697 | #ifdef BCM_CNIC | ||
2698 | /* FCoE */ | ||
2699 | if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX)) | ||
2700 | return -ENOMEM; | ||
2701 | #endif | ||
2702 | /* RSS */ | ||
2703 | for_each_nondefault_eth_queue(bp, i) | ||
2704 | if (bnx2x_alloc_fp_mem_at(bp, i)) | ||
2705 | break; | ||
2706 | |||
2707 | /* handle memory failures */ | ||
2708 | if (i != BNX2X_NUM_ETH_QUEUES(bp)) { | ||
2709 | int delta = BNX2X_NUM_ETH_QUEUES(bp) - i; | ||
2710 | |||
2711 | WARN_ON(delta < 0); | ||
2712 | #ifdef BCM_CNIC | ||
2713 | /** | ||
2714 | * move non eth FPs next to last eth FP | ||
2715 | * must be done in that order | ||
2716 | * FCOE_IDX < FWD_IDX < OOO_IDX | ||
2717 | */ | ||
2718 | |||
2719 | /* move FCoE fp */ | ||
2720 | bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta); | ||
2721 | #endif | ||
2722 | bp->num_queues -= delta; | ||
2723 | BNX2X_ERR("Adjusted num of queues from %d to %d\n", | ||
2724 | bp->num_queues + delta, bp->num_queues); | ||
2725 | } | ||
2726 | |||
2727 | return 0; | ||
2728 | } | ||
2483 | 2729 | ||
2484 | static int bnx2x_setup_irqs(struct bnx2x *bp) | 2730 | static int bnx2x_setup_irqs(struct bnx2x *bp) |
2485 | { | 2731 | { |