diff options
author | Michael Chan <mchan@broadcom.com> | 2007-12-20 22:56:37 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-28 17:57:35 -0500 |
commit | 35efa7c1f4aa868d4a948a9069f20ccef1b3b28d (patch) | |
tree | 9369da5bfcd93139ce0ac961059d29b57c66626c /drivers/net | |
parent | 6d866ffc69b0c3e584782f212a3f783708d31e9a (diff) |
[BNX2]: Introduce new bnx2_napi structure.
Introduce a bnx2_napi structure that will hold a napi_struct and
other fields to handle NAPI polling for the napi_struct. Various tx
and rx indexes and status block pointers will be moved from the main
bnx2 structure to this bnx2_napi structure.
Most NAPI path functions are modified to be passed this bnx2_napi
struct pointer.
Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/bnx2.c | 146 | ||||
-rw-r--r-- | drivers/net/bnx2.h | 16 |
2 files changed, 100 insertions, 62 deletions
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 83cdbde5d2d6..3f754e6b48d6 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -407,12 +407,14 @@ bnx2_disable_int(struct bnx2 *bp) | |||
407 | static void | 407 | static void |
408 | bnx2_enable_int(struct bnx2 *bp) | 408 | bnx2_enable_int(struct bnx2 *bp) |
409 | { | 409 | { |
410 | struct bnx2_napi *bnapi = &bp->bnx2_napi; | ||
411 | |||
410 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, | 412 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, |
411 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | | 413 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | |
412 | BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx); | 414 | BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bnapi->last_status_idx); |
413 | 415 | ||
414 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, | 416 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, |
415 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx); | 417 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bnapi->last_status_idx); |
416 | 418 | ||
417 | REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); | 419 | REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW); |
418 | } | 420 | } |
@@ -426,11 +428,23 @@ bnx2_disable_int_sync(struct bnx2 *bp) | |||
426 | } | 428 | } |
427 | 429 | ||
428 | static void | 430 | static void |
431 | bnx2_napi_disable(struct bnx2 *bp) | ||
432 | { | ||
433 | napi_disable(&bp->bnx2_napi.napi); | ||
434 | } | ||
435 | |||
436 | static void | ||
437 | bnx2_napi_enable(struct bnx2 *bp) | ||
438 | { | ||
439 | napi_enable(&bp->bnx2_napi.napi); | ||
440 | } | ||
441 | |||
442 | static void | ||
429 | bnx2_netif_stop(struct bnx2 *bp) | 443 | bnx2_netif_stop(struct bnx2 *bp) |
430 | { | 444 | { |
431 | bnx2_disable_int_sync(bp); | 445 | bnx2_disable_int_sync(bp); |
432 | if (netif_running(bp->dev)) { | 446 | if (netif_running(bp->dev)) { |
433 | napi_disable(&bp->napi); | 447 | bnx2_napi_disable(bp); |
434 | netif_tx_disable(bp->dev); | 448 | netif_tx_disable(bp->dev); |
435 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ | 449 | bp->dev->trans_start = jiffies; /* prevent tx timeout */ |
436 | } | 450 | } |
@@ -442,7 +456,7 @@ bnx2_netif_start(struct bnx2 *bp) | |||
442 | if (atomic_dec_and_test(&bp->intr_sem)) { | 456 | if (atomic_dec_and_test(&bp->intr_sem)) { |
443 | if (netif_running(bp->dev)) { | 457 | if (netif_running(bp->dev)) { |
444 | netif_wake_queue(bp->dev); | 458 | netif_wake_queue(bp->dev); |
445 | napi_enable(&bp->napi); | 459 | bnx2_napi_enable(bp); |
446 | bnx2_enable_int(bp); | 460 | bnx2_enable_int(bp); |
447 | } | 461 | } |
448 | } | 462 | } |
@@ -555,6 +569,8 @@ bnx2_alloc_mem(struct bnx2 *bp) | |||
555 | 569 | ||
556 | memset(bp->status_blk, 0, bp->status_stats_size); | 570 | memset(bp->status_blk, 0, bp->status_stats_size); |
557 | 571 | ||
572 | bp->bnx2_napi.status_blk = bp->status_blk; | ||
573 | |||
558 | bp->stats_blk = (void *) ((unsigned long) bp->status_blk + | 574 | bp->stats_blk = (void *) ((unsigned long) bp->status_blk + |
559 | status_blk_size); | 575 | status_blk_size); |
560 | 576 | ||
@@ -2291,9 +2307,9 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index) | |||
2291 | } | 2307 | } |
2292 | 2308 | ||
2293 | static int | 2309 | static int |
2294 | bnx2_phy_event_is_set(struct bnx2 *bp, u32 event) | 2310 | bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event) |
2295 | { | 2311 | { |
2296 | struct status_block *sblk = bp->status_blk; | 2312 | struct status_block *sblk = bnapi->status_blk; |
2297 | u32 new_link_state, old_link_state; | 2313 | u32 new_link_state, old_link_state; |
2298 | int is_set = 1; | 2314 | int is_set = 1; |
2299 | 2315 | ||
@@ -2311,24 +2327,24 @@ bnx2_phy_event_is_set(struct bnx2 *bp, u32 event) | |||
2311 | } | 2327 | } |
2312 | 2328 | ||
2313 | static void | 2329 | static void |
2314 | bnx2_phy_int(struct bnx2 *bp) | 2330 | bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi) |
2315 | { | 2331 | { |
2316 | if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) { | 2332 | if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) { |
2317 | spin_lock(&bp->phy_lock); | 2333 | spin_lock(&bp->phy_lock); |
2318 | bnx2_set_link(bp); | 2334 | bnx2_set_link(bp); |
2319 | spin_unlock(&bp->phy_lock); | 2335 | spin_unlock(&bp->phy_lock); |
2320 | } | 2336 | } |
2321 | if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT)) | 2337 | if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT)) |
2322 | bnx2_set_remote_link(bp); | 2338 | bnx2_set_remote_link(bp); |
2323 | 2339 | ||
2324 | } | 2340 | } |
2325 | 2341 | ||
2326 | static inline u16 | 2342 | static inline u16 |
2327 | bnx2_get_hw_tx_cons(struct bnx2 *bp) | 2343 | bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi) |
2328 | { | 2344 | { |
2329 | u16 cons; | 2345 | u16 cons; |
2330 | 2346 | ||
2331 | cons = bp->status_blk->status_tx_quick_consumer_index0; | 2347 | cons = bnapi->status_blk->status_tx_quick_consumer_index0; |
2332 | 2348 | ||
2333 | if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT)) | 2349 | if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT)) |
2334 | cons++; | 2350 | cons++; |
@@ -2336,12 +2352,12 @@ bnx2_get_hw_tx_cons(struct bnx2 *bp) | |||
2336 | } | 2352 | } |
2337 | 2353 | ||
2338 | static void | 2354 | static void |
2339 | bnx2_tx_int(struct bnx2 *bp) | 2355 | bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi) |
2340 | { | 2356 | { |
2341 | u16 hw_cons, sw_cons, sw_ring_cons; | 2357 | u16 hw_cons, sw_cons, sw_ring_cons; |
2342 | int tx_free_bd = 0; | 2358 | int tx_free_bd = 0; |
2343 | 2359 | ||
2344 | hw_cons = bnx2_get_hw_tx_cons(bp); | 2360 | hw_cons = bnx2_get_hw_tx_cons(bnapi); |
2345 | sw_cons = bp->tx_cons; | 2361 | sw_cons = bp->tx_cons; |
2346 | 2362 | ||
2347 | while (sw_cons != hw_cons) { | 2363 | while (sw_cons != hw_cons) { |
@@ -2393,7 +2409,7 @@ bnx2_tx_int(struct bnx2 *bp) | |||
2393 | 2409 | ||
2394 | dev_kfree_skb(skb); | 2410 | dev_kfree_skb(skb); |
2395 | 2411 | ||
2396 | hw_cons = bnx2_get_hw_tx_cons(bp); | 2412 | hw_cons = bnx2_get_hw_tx_cons(bnapi); |
2397 | } | 2413 | } |
2398 | 2414 | ||
2399 | bp->hw_tx_cons = hw_cons; | 2415 | bp->hw_tx_cons = hw_cons; |
@@ -2584,9 +2600,9 @@ bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len, | |||
2584 | } | 2600 | } |
2585 | 2601 | ||
2586 | static inline u16 | 2602 | static inline u16 |
2587 | bnx2_get_hw_rx_cons(struct bnx2 *bp) | 2603 | bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi) |
2588 | { | 2604 | { |
2589 | u16 cons = bp->status_blk->status_rx_quick_consumer_index0; | 2605 | u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0; |
2590 | 2606 | ||
2591 | if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)) | 2607 | if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)) |
2592 | cons++; | 2608 | cons++; |
@@ -2594,13 +2610,13 @@ bnx2_get_hw_rx_cons(struct bnx2 *bp) | |||
2594 | } | 2610 | } |
2595 | 2611 | ||
2596 | static int | 2612 | static int |
2597 | bnx2_rx_int(struct bnx2 *bp, int budget) | 2613 | bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) |
2598 | { | 2614 | { |
2599 | u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod; | 2615 | u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod; |
2600 | struct l2_fhdr *rx_hdr; | 2616 | struct l2_fhdr *rx_hdr; |
2601 | int rx_pkt = 0, pg_ring_used = 0; | 2617 | int rx_pkt = 0, pg_ring_used = 0; |
2602 | 2618 | ||
2603 | hw_cons = bnx2_get_hw_rx_cons(bp); | 2619 | hw_cons = bnx2_get_hw_rx_cons(bnapi); |
2604 | sw_cons = bp->rx_cons; | 2620 | sw_cons = bp->rx_cons; |
2605 | sw_prod = bp->rx_prod; | 2621 | sw_prod = bp->rx_prod; |
2606 | 2622 | ||
@@ -2717,7 +2733,7 @@ next_rx: | |||
2717 | 2733 | ||
2718 | /* Refresh hw_cons to see if there is new work */ | 2734 | /* Refresh hw_cons to see if there is new work */ |
2719 | if (sw_cons == hw_cons) { | 2735 | if (sw_cons == hw_cons) { |
2720 | hw_cons = bnx2_get_hw_rx_cons(bp); | 2736 | hw_cons = bnx2_get_hw_rx_cons(bnapi); |
2721 | rmb(); | 2737 | rmb(); |
2722 | } | 2738 | } |
2723 | } | 2739 | } |
@@ -2746,8 +2762,9 @@ bnx2_msi(int irq, void *dev_instance) | |||
2746 | { | 2762 | { |
2747 | struct net_device *dev = dev_instance; | 2763 | struct net_device *dev = dev_instance; |
2748 | struct bnx2 *bp = netdev_priv(dev); | 2764 | struct bnx2 *bp = netdev_priv(dev); |
2765 | struct bnx2_napi *bnapi = &bp->bnx2_napi; | ||
2749 | 2766 | ||
2750 | prefetch(bp->status_blk); | 2767 | prefetch(bnapi->status_blk); |
2751 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, | 2768 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, |
2752 | BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | | 2769 | BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM | |
2753 | BNX2_PCICFG_INT_ACK_CMD_MASK_INT); | 2770 | BNX2_PCICFG_INT_ACK_CMD_MASK_INT); |
@@ -2756,7 +2773,7 @@ bnx2_msi(int irq, void *dev_instance) | |||
2756 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) | 2773 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) |
2757 | return IRQ_HANDLED; | 2774 | return IRQ_HANDLED; |
2758 | 2775 | ||
2759 | netif_rx_schedule(dev, &bp->napi); | 2776 | netif_rx_schedule(dev, &bnapi->napi); |
2760 | 2777 | ||
2761 | return IRQ_HANDLED; | 2778 | return IRQ_HANDLED; |
2762 | } | 2779 | } |
@@ -2766,14 +2783,15 @@ bnx2_msi_1shot(int irq, void *dev_instance) | |||
2766 | { | 2783 | { |
2767 | struct net_device *dev = dev_instance; | 2784 | struct net_device *dev = dev_instance; |
2768 | struct bnx2 *bp = netdev_priv(dev); | 2785 | struct bnx2 *bp = netdev_priv(dev); |
2786 | struct bnx2_napi *bnapi = &bp->bnx2_napi; | ||
2769 | 2787 | ||
2770 | prefetch(bp->status_blk); | 2788 | prefetch(bnapi->status_blk); |
2771 | 2789 | ||
2772 | /* Return here if interrupt is disabled. */ | 2790 | /* Return here if interrupt is disabled. */ |
2773 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) | 2791 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) |
2774 | return IRQ_HANDLED; | 2792 | return IRQ_HANDLED; |
2775 | 2793 | ||
2776 | netif_rx_schedule(dev, &bp->napi); | 2794 | netif_rx_schedule(dev, &bnapi->napi); |
2777 | 2795 | ||
2778 | return IRQ_HANDLED; | 2796 | return IRQ_HANDLED; |
2779 | } | 2797 | } |
@@ -2783,7 +2801,8 @@ bnx2_interrupt(int irq, void *dev_instance) | |||
2783 | { | 2801 | { |
2784 | struct net_device *dev = dev_instance; | 2802 | struct net_device *dev = dev_instance; |
2785 | struct bnx2 *bp = netdev_priv(dev); | 2803 | struct bnx2 *bp = netdev_priv(dev); |
2786 | struct status_block *sblk = bp->status_blk; | 2804 | struct bnx2_napi *bnapi = &bp->bnx2_napi; |
2805 | struct status_block *sblk = bnapi->status_blk; | ||
2787 | 2806 | ||
2788 | /* When using INTx, it is possible for the interrupt to arrive | 2807 | /* When using INTx, it is possible for the interrupt to arrive |
2789 | * at the CPU before the status block posted prior to the | 2808 | * at the CPU before the status block posted prior to the |
@@ -2791,7 +2810,7 @@ bnx2_interrupt(int irq, void *dev_instance) | |||
2791 | * When using MSI, the MSI message will always complete after | 2810 | * When using MSI, the MSI message will always complete after |
2792 | * the status block write. | 2811 | * the status block write. |
2793 | */ | 2812 | */ |
2794 | if ((sblk->status_idx == bp->last_status_idx) && | 2813 | if ((sblk->status_idx == bnapi->last_status_idx) && |
2795 | (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) & | 2814 | (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) & |
2796 | BNX2_PCICFG_MISC_STATUS_INTA_VALUE)) | 2815 | BNX2_PCICFG_MISC_STATUS_INTA_VALUE)) |
2797 | return IRQ_NONE; | 2816 | return IRQ_NONE; |
@@ -2809,9 +2828,9 @@ bnx2_interrupt(int irq, void *dev_instance) | |||
2809 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) | 2828 | if (unlikely(atomic_read(&bp->intr_sem) != 0)) |
2810 | return IRQ_HANDLED; | 2829 | return IRQ_HANDLED; |
2811 | 2830 | ||
2812 | if (netif_rx_schedule_prep(dev, &bp->napi)) { | 2831 | if (netif_rx_schedule_prep(dev, &bnapi->napi)) { |
2813 | bp->last_status_idx = sblk->status_idx; | 2832 | bnapi->last_status_idx = sblk->status_idx; |
2814 | __netif_rx_schedule(dev, &bp->napi); | 2833 | __netif_rx_schedule(dev, &bnapi->napi); |
2815 | } | 2834 | } |
2816 | 2835 | ||
2817 | return IRQ_HANDLED; | 2836 | return IRQ_HANDLED; |
@@ -2821,12 +2840,13 @@ bnx2_interrupt(int irq, void *dev_instance) | |||
2821 | STATUS_ATTN_BITS_TIMER_ABORT) | 2840 | STATUS_ATTN_BITS_TIMER_ABORT) |
2822 | 2841 | ||
2823 | static inline int | 2842 | static inline int |
2824 | bnx2_has_work(struct bnx2 *bp) | 2843 | bnx2_has_work(struct bnx2_napi *bnapi) |
2825 | { | 2844 | { |
2845 | struct bnx2 *bp = bnapi->bp; | ||
2826 | struct status_block *sblk = bp->status_blk; | 2846 | struct status_block *sblk = bp->status_blk; |
2827 | 2847 | ||
2828 | if ((bnx2_get_hw_rx_cons(bp) != bp->rx_cons) || | 2848 | if ((bnx2_get_hw_rx_cons(bnapi) != bp->rx_cons) || |
2829 | (bnx2_get_hw_tx_cons(bp) != bp->hw_tx_cons)) | 2849 | (bnx2_get_hw_tx_cons(bnapi) != bp->hw_tx_cons)) |
2830 | return 1; | 2850 | return 1; |
2831 | 2851 | ||
2832 | if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) != | 2852 | if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) != |
@@ -2836,16 +2856,17 @@ bnx2_has_work(struct bnx2 *bp) | |||
2836 | return 0; | 2856 | return 0; |
2837 | } | 2857 | } |
2838 | 2858 | ||
2839 | static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget) | 2859 | static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi, |
2860 | int work_done, int budget) | ||
2840 | { | 2861 | { |
2841 | struct status_block *sblk = bp->status_blk; | 2862 | struct status_block *sblk = bnapi->status_blk; |
2842 | u32 status_attn_bits = sblk->status_attn_bits; | 2863 | u32 status_attn_bits = sblk->status_attn_bits; |
2843 | u32 status_attn_bits_ack = sblk->status_attn_bits_ack; | 2864 | u32 status_attn_bits_ack = sblk->status_attn_bits_ack; |
2844 | 2865 | ||
2845 | if ((status_attn_bits & STATUS_ATTN_EVENTS) != | 2866 | if ((status_attn_bits & STATUS_ATTN_EVENTS) != |
2846 | (status_attn_bits_ack & STATUS_ATTN_EVENTS)) { | 2867 | (status_attn_bits_ack & STATUS_ATTN_EVENTS)) { |
2847 | 2868 | ||
2848 | bnx2_phy_int(bp); | 2869 | bnx2_phy_int(bp, bnapi); |
2849 | 2870 | ||
2850 | /* This is needed to take care of transient status | 2871 | /* This is needed to take care of transient status |
2851 | * during link changes. | 2872 | * during link changes. |
@@ -2855,49 +2876,50 @@ static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget) | |||
2855 | REG_RD(bp, BNX2_HC_COMMAND); | 2876 | REG_RD(bp, BNX2_HC_COMMAND); |
2856 | } | 2877 | } |
2857 | 2878 | ||
2858 | if (bnx2_get_hw_tx_cons(bp) != bp->hw_tx_cons) | 2879 | if (bnx2_get_hw_tx_cons(bnapi) != bp->hw_tx_cons) |
2859 | bnx2_tx_int(bp); | 2880 | bnx2_tx_int(bp, bnapi); |
2860 | 2881 | ||
2861 | if (bnx2_get_hw_rx_cons(bp) != bp->rx_cons) | 2882 | if (bnx2_get_hw_rx_cons(bnapi) != bp->rx_cons) |
2862 | work_done += bnx2_rx_int(bp, budget - work_done); | 2883 | work_done += bnx2_rx_int(bp, bnapi, budget - work_done); |
2863 | 2884 | ||
2864 | return work_done; | 2885 | return work_done; |
2865 | } | 2886 | } |
2866 | 2887 | ||
2867 | static int bnx2_poll(struct napi_struct *napi, int budget) | 2888 | static int bnx2_poll(struct napi_struct *napi, int budget) |
2868 | { | 2889 | { |
2869 | struct bnx2 *bp = container_of(napi, struct bnx2, napi); | 2890 | struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi); |
2891 | struct bnx2 *bp = bnapi->bp; | ||
2870 | int work_done = 0; | 2892 | int work_done = 0; |
2871 | struct status_block *sblk = bp->status_blk; | 2893 | struct status_block *sblk = bnapi->status_blk; |
2872 | 2894 | ||
2873 | while (1) { | 2895 | while (1) { |
2874 | work_done = bnx2_poll_work(bp, work_done, budget); | 2896 | work_done = bnx2_poll_work(bp, bnapi, work_done, budget); |
2875 | 2897 | ||
2876 | if (unlikely(work_done >= budget)) | 2898 | if (unlikely(work_done >= budget)) |
2877 | break; | 2899 | break; |
2878 | 2900 | ||
2879 | /* bp->last_status_idx is used below to tell the hw how | 2901 | /* bnapi->last_status_idx is used below to tell the hw how |
2880 | * much work has been processed, so we must read it before | 2902 | * much work has been processed, so we must read it before |
2881 | * checking for more work. | 2903 | * checking for more work. |
2882 | */ | 2904 | */ |
2883 | bp->last_status_idx = sblk->status_idx; | 2905 | bnapi->last_status_idx = sblk->status_idx; |
2884 | rmb(); | 2906 | rmb(); |
2885 | if (likely(!bnx2_has_work(bp))) { | 2907 | if (likely(!bnx2_has_work(bnapi))) { |
2886 | netif_rx_complete(bp->dev, napi); | 2908 | netif_rx_complete(bp->dev, napi); |
2887 | if (likely(bp->flags & USING_MSI_FLAG)) { | 2909 | if (likely(bp->flags & USING_MSI_FLAG)) { |
2888 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, | 2910 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, |
2889 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | | 2911 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | |
2890 | bp->last_status_idx); | 2912 | bnapi->last_status_idx); |
2891 | break; | 2913 | break; |
2892 | } | 2914 | } |
2893 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, | 2915 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, |
2894 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | | 2916 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | |
2895 | BNX2_PCICFG_INT_ACK_CMD_MASK_INT | | 2917 | BNX2_PCICFG_INT_ACK_CMD_MASK_INT | |
2896 | bp->last_status_idx); | 2918 | bnapi->last_status_idx); |
2897 | 2919 | ||
2898 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, | 2920 | REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, |
2899 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | | 2921 | BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | |
2900 | bp->last_status_idx); | 2922 | bnapi->last_status_idx); |
2901 | break; | 2923 | break; |
2902 | } | 2924 | } |
2903 | } | 2925 | } |
@@ -4247,7 +4269,7 @@ bnx2_init_chip(struct bnx2 *bp) | |||
4247 | val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA; | 4269 | val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA; |
4248 | REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val); | 4270 | REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val); |
4249 | 4271 | ||
4250 | bp->last_status_idx = 0; | 4272 | bp->bnx2_napi.last_status_idx = 0; |
4251 | bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE; | 4273 | bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE; |
4252 | 4274 | ||
4253 | /* Set up how to generate a link change interrupt. */ | 4275 | /* Set up how to generate a link change interrupt. */ |
@@ -4887,6 +4909,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) | |||
4887 | struct sw_bd *rx_buf; | 4909 | struct sw_bd *rx_buf; |
4888 | struct l2_fhdr *rx_hdr; | 4910 | struct l2_fhdr *rx_hdr; |
4889 | int ret = -ENODEV; | 4911 | int ret = -ENODEV; |
4912 | struct bnx2_napi *bnapi = &bp->bnx2_napi; | ||
4890 | 4913 | ||
4891 | if (loopback_mode == BNX2_MAC_LOOPBACK) { | 4914 | if (loopback_mode == BNX2_MAC_LOOPBACK) { |
4892 | bp->loopback = MAC_LOOPBACK; | 4915 | bp->loopback = MAC_LOOPBACK; |
@@ -4921,7 +4944,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) | |||
4921 | REG_RD(bp, BNX2_HC_COMMAND); | 4944 | REG_RD(bp, BNX2_HC_COMMAND); |
4922 | 4945 | ||
4923 | udelay(5); | 4946 | udelay(5); |
4924 | rx_start_idx = bnx2_get_hw_rx_cons(bp); | 4947 | rx_start_idx = bnx2_get_hw_rx_cons(bnapi); |
4925 | 4948 | ||
4926 | num_pkts = 0; | 4949 | num_pkts = 0; |
4927 | 4950 | ||
@@ -4951,10 +4974,10 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) | |||
4951 | pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE); | 4974 | pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE); |
4952 | dev_kfree_skb(skb); | 4975 | dev_kfree_skb(skb); |
4953 | 4976 | ||
4954 | if (bnx2_get_hw_tx_cons(bp) != bp->tx_prod) | 4977 | if (bnx2_get_hw_tx_cons(bnapi) != bp->tx_prod) |
4955 | goto loopback_test_done; | 4978 | goto loopback_test_done; |
4956 | 4979 | ||
4957 | rx_idx = bnx2_get_hw_rx_cons(bp); | 4980 | rx_idx = bnx2_get_hw_rx_cons(bnapi); |
4958 | if (rx_idx != rx_start_idx + num_pkts) { | 4981 | if (rx_idx != rx_start_idx + num_pkts) { |
4959 | goto loopback_test_done; | 4982 | goto loopback_test_done; |
4960 | } | 4983 | } |
@@ -5295,11 +5318,11 @@ bnx2_open(struct net_device *dev) | |||
5295 | return rc; | 5318 | return rc; |
5296 | 5319 | ||
5297 | bnx2_setup_int_mode(bp, disable_msi); | 5320 | bnx2_setup_int_mode(bp, disable_msi); |
5298 | napi_enable(&bp->napi); | 5321 | bnx2_napi_enable(bp); |
5299 | rc = bnx2_request_irq(bp); | 5322 | rc = bnx2_request_irq(bp); |
5300 | 5323 | ||
5301 | if (rc) { | 5324 | if (rc) { |
5302 | napi_disable(&bp->napi); | 5325 | bnx2_napi_disable(bp); |
5303 | bnx2_free_mem(bp); | 5326 | bnx2_free_mem(bp); |
5304 | return rc; | 5327 | return rc; |
5305 | } | 5328 | } |
@@ -5307,7 +5330,7 @@ bnx2_open(struct net_device *dev) | |||
5307 | rc = bnx2_init_nic(bp); | 5330 | rc = bnx2_init_nic(bp); |
5308 | 5331 | ||
5309 | if (rc) { | 5332 | if (rc) { |
5310 | napi_disable(&bp->napi); | 5333 | bnx2_napi_disable(bp); |
5311 | bnx2_free_irq(bp); | 5334 | bnx2_free_irq(bp); |
5312 | bnx2_free_skbs(bp); | 5335 | bnx2_free_skbs(bp); |
5313 | bnx2_free_mem(bp); | 5336 | bnx2_free_mem(bp); |
@@ -5342,7 +5365,7 @@ bnx2_open(struct net_device *dev) | |||
5342 | rc = bnx2_request_irq(bp); | 5365 | rc = bnx2_request_irq(bp); |
5343 | 5366 | ||
5344 | if (rc) { | 5367 | if (rc) { |
5345 | napi_disable(&bp->napi); | 5368 | bnx2_napi_disable(bp); |
5346 | bnx2_free_skbs(bp); | 5369 | bnx2_free_skbs(bp); |
5347 | bnx2_free_mem(bp); | 5370 | bnx2_free_mem(bp); |
5348 | del_timer_sync(&bp->timer); | 5371 | del_timer_sync(&bp->timer); |
@@ -5557,7 +5580,7 @@ bnx2_close(struct net_device *dev) | |||
5557 | msleep(1); | 5580 | msleep(1); |
5558 | 5581 | ||
5559 | bnx2_disable_int_sync(bp); | 5582 | bnx2_disable_int_sync(bp); |
5560 | napi_disable(&bp->napi); | 5583 | bnx2_napi_disable(bp); |
5561 | del_timer_sync(&bp->timer); | 5584 | del_timer_sync(&bp->timer); |
5562 | if (bp->flags & NO_WOL_FLAG) | 5585 | if (bp->flags & NO_WOL_FLAG) |
5563 | reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN; | 5586 | reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN; |
@@ -7083,6 +7106,15 @@ bnx2_bus_string(struct bnx2 *bp, char *str) | |||
7083 | } | 7106 | } |
7084 | 7107 | ||
7085 | static int __devinit | 7108 | static int __devinit |
7109 | bnx2_init_napi(struct bnx2 *bp) | ||
7110 | { | ||
7111 | struct bnx2_napi *bnapi = &bp->bnx2_napi; | ||
7112 | |||
7113 | bnapi->bp = bp; | ||
7114 | netif_napi_add(bp->dev, &bnapi->napi, bnx2_poll, 64); | ||
7115 | } | ||
7116 | |||
7117 | static int __devinit | ||
7086 | bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 7118 | bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
7087 | { | 7119 | { |
7088 | static int version_printed = 0; | 7120 | static int version_printed = 0; |
@@ -7123,7 +7155,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
7123 | dev->ethtool_ops = &bnx2_ethtool_ops; | 7155 | dev->ethtool_ops = &bnx2_ethtool_ops; |
7124 | 7156 | ||
7125 | bp = netdev_priv(dev); | 7157 | bp = netdev_priv(dev); |
7126 | netif_napi_add(dev, &bp->napi, bnx2_poll, 64); | 7158 | bnx2_init_napi(bp); |
7127 | 7159 | ||
7128 | #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) | 7160 | #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER) |
7129 | dev->poll_controller = poll_bnx2; | 7161 | dev->poll_controller = poll_bnx2; |
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index 1accf0093126..345b6db9a947 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h | |||
@@ -6503,6 +6503,14 @@ struct bnx2_irq { | |||
6503 | char name[16]; | 6503 | char name[16]; |
6504 | }; | 6504 | }; |
6505 | 6505 | ||
6506 | struct bnx2_napi { | ||
6507 | struct napi_struct napi ____cacheline_aligned; | ||
6508 | struct bnx2 *bp; | ||
6509 | struct status_block *status_blk; | ||
6510 | u32 last_status_idx; | ||
6511 | u32 int_num; | ||
6512 | }; | ||
6513 | |||
6506 | struct bnx2 { | 6514 | struct bnx2 { |
6507 | /* Fields used in the tx and intr/napi performance paths are grouped */ | 6515 | /* Fields used in the tx and intr/napi performance paths are grouped */ |
6508 | /* together in the beginning of the structure. */ | 6516 | /* together in the beginning of the structure. */ |
@@ -6511,13 +6519,8 @@ struct bnx2 { | |||
6511 | struct net_device *dev; | 6519 | struct net_device *dev; |
6512 | struct pci_dev *pdev; | 6520 | struct pci_dev *pdev; |
6513 | 6521 | ||
6514 | struct napi_struct napi; | ||
6515 | |||
6516 | atomic_t intr_sem; | 6522 | atomic_t intr_sem; |
6517 | 6523 | ||
6518 | struct status_block *status_blk; | ||
6519 | u32 last_status_idx; | ||
6520 | |||
6521 | u32 flags; | 6524 | u32 flags; |
6522 | #define PCIX_FLAG 0x00000001 | 6525 | #define PCIX_FLAG 0x00000001 |
6523 | #define PCI_32BIT_FLAG 0x00000002 | 6526 | #define PCI_32BIT_FLAG 0x00000002 |
@@ -6539,6 +6542,8 @@ struct bnx2 { | |||
6539 | u16 tx_cons __attribute__((aligned(L1_CACHE_BYTES))); | 6542 | u16 tx_cons __attribute__((aligned(L1_CACHE_BYTES))); |
6540 | u16 hw_tx_cons; | 6543 | u16 hw_tx_cons; |
6541 | 6544 | ||
6545 | struct bnx2_napi bnx2_napi; | ||
6546 | |||
6542 | #ifdef BCM_VLAN | 6547 | #ifdef BCM_VLAN |
6543 | struct vlan_group *vlgrp; | 6548 | struct vlan_group *vlgrp; |
6544 | #endif | 6549 | #endif |
@@ -6672,6 +6677,7 @@ struct bnx2 { | |||
6672 | 6677 | ||
6673 | u32 stats_ticks; | 6678 | u32 stats_ticks; |
6674 | 6679 | ||
6680 | struct status_block *status_blk; | ||
6675 | dma_addr_t status_blk_mapping; | 6681 | dma_addr_t status_blk_mapping; |
6676 | 6682 | ||
6677 | struct statistics_block *stats_blk; | 6683 | struct statistics_block *stats_blk; |