diff options
Diffstat (limited to 'drivers/net/skge.c')
-rw-r--r-- | drivers/net/skge.c | 99 |
1 files changed, 44 insertions, 55 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 25e028b7ce48..1a30d5401c48 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -2307,16 +2307,13 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
2307 | int i; | 2307 | int i; |
2308 | u32 control, len; | 2308 | u32 control, len; |
2309 | u64 map; | 2309 | u64 map; |
2310 | unsigned long flags; | ||
2311 | 2310 | ||
2312 | skb = skb_padto(skb, ETH_ZLEN); | 2311 | skb = skb_padto(skb, ETH_ZLEN); |
2313 | if (!skb) | 2312 | if (!skb) |
2314 | return NETDEV_TX_OK; | 2313 | return NETDEV_TX_OK; |
2315 | 2314 | ||
2316 | local_irq_save(flags); | ||
2317 | if (!spin_trylock(&skge->tx_lock)) { | 2315 | if (!spin_trylock(&skge->tx_lock)) { |
2318 | /* Collision - tell upper layer to requeue */ | 2316 | /* Collision - tell upper layer to requeue */ |
2319 | local_irq_restore(flags); | ||
2320 | return NETDEV_TX_LOCKED; | 2317 | return NETDEV_TX_LOCKED; |
2321 | } | 2318 | } |
2322 | 2319 | ||
@@ -2327,7 +2324,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
2327 | printk(KERN_WARNING PFX "%s: ring full when queue awake!\n", | 2324 | printk(KERN_WARNING PFX "%s: ring full when queue awake!\n", |
2328 | dev->name); | 2325 | dev->name); |
2329 | } | 2326 | } |
2330 | spin_unlock_irqrestore(&skge->tx_lock, flags); | 2327 | spin_unlock(&skge->tx_lock); |
2331 | return NETDEV_TX_BUSY; | 2328 | return NETDEV_TX_BUSY; |
2332 | } | 2329 | } |
2333 | 2330 | ||
@@ -2403,7 +2400,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
2403 | } | 2400 | } |
2404 | 2401 | ||
2405 | dev->trans_start = jiffies; | 2402 | dev->trans_start = jiffies; |
2406 | spin_unlock_irqrestore(&skge->tx_lock, flags); | 2403 | spin_unlock(&skge->tx_lock); |
2407 | 2404 | ||
2408 | return NETDEV_TX_OK; | 2405 | return NETDEV_TX_OK; |
2409 | } | 2406 | } |
@@ -2416,7 +2413,7 @@ static inline void skge_tx_free(struct skge_hw *hw, struct skge_element *e) | |||
2416 | pci_unmap_addr(e, mapaddr), | 2413 | pci_unmap_addr(e, mapaddr), |
2417 | pci_unmap_len(e, maplen), | 2414 | pci_unmap_len(e, maplen), |
2418 | PCI_DMA_TODEVICE); | 2415 | PCI_DMA_TODEVICE); |
2419 | dev_kfree_skb_any(e->skb); | 2416 | dev_kfree_skb(e->skb); |
2420 | e->skb = NULL; | 2417 | e->skb = NULL; |
2421 | } else { | 2418 | } else { |
2422 | pci_unmap_page(hw->pdev, | 2419 | pci_unmap_page(hw->pdev, |
@@ -2430,15 +2427,14 @@ static void skge_tx_clean(struct skge_port *skge) | |||
2430 | { | 2427 | { |
2431 | struct skge_ring *ring = &skge->tx_ring; | 2428 | struct skge_ring *ring = &skge->tx_ring; |
2432 | struct skge_element *e; | 2429 | struct skge_element *e; |
2433 | unsigned long flags; | ||
2434 | 2430 | ||
2435 | spin_lock_irqsave(&skge->tx_lock, flags); | 2431 | spin_lock_bh(&skge->tx_lock); |
2436 | for (e = ring->to_clean; e != ring->to_use; e = e->next) { | 2432 | for (e = ring->to_clean; e != ring->to_use; e = e->next) { |
2437 | ++skge->tx_avail; | 2433 | ++skge->tx_avail; |
2438 | skge_tx_free(skge->hw, e); | 2434 | skge_tx_free(skge->hw, e); |
2439 | } | 2435 | } |
2440 | ring->to_clean = e; | 2436 | ring->to_clean = e; |
2441 | spin_unlock_irqrestore(&skge->tx_lock, flags); | 2437 | spin_unlock_bh(&skge->tx_lock); |
2442 | } | 2438 | } |
2443 | 2439 | ||
2444 | static void skge_tx_timeout(struct net_device *dev) | 2440 | static void skge_tx_timeout(struct net_device *dev) |
@@ -2663,6 +2659,37 @@ resubmit: | |||
2663 | return NULL; | 2659 | return NULL; |
2664 | } | 2660 | } |
2665 | 2661 | ||
2662 | static void skge_tx_done(struct skge_port *skge) | ||
2663 | { | ||
2664 | struct skge_ring *ring = &skge->tx_ring; | ||
2665 | struct skge_element *e; | ||
2666 | |||
2667 | spin_lock(&skge->tx_lock); | ||
2668 | for (e = ring->to_clean; prefetch(e->next), e != ring->to_use; e = e->next) { | ||
2669 | struct skge_tx_desc *td = e->desc; | ||
2670 | u32 control; | ||
2671 | |||
2672 | rmb(); | ||
2673 | control = td->control; | ||
2674 | if (control & BMU_OWN) | ||
2675 | break; | ||
2676 | |||
2677 | if (unlikely(netif_msg_tx_done(skge))) | ||
2678 | printk(KERN_DEBUG PFX "%s: tx done slot %td status 0x%x\n", | ||
2679 | skge->netdev->name, e - ring->start, td->status); | ||
2680 | |||
2681 | skge_tx_free(skge->hw, e); | ||
2682 | e->skb = NULL; | ||
2683 | ++skge->tx_avail; | ||
2684 | } | ||
2685 | ring->to_clean = e; | ||
2686 | skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); | ||
2687 | |||
2688 | if (skge->tx_avail > MAX_SKB_FRAGS + 1) | ||
2689 | netif_wake_queue(skge->netdev); | ||
2690 | |||
2691 | spin_unlock(&skge->tx_lock); | ||
2692 | } | ||
2666 | 2693 | ||
2667 | static int skge_poll(struct net_device *dev, int *budget) | 2694 | static int skge_poll(struct net_device *dev, int *budget) |
2668 | { | 2695 | { |
@@ -2670,8 +2697,10 @@ static int skge_poll(struct net_device *dev, int *budget) | |||
2670 | struct skge_hw *hw = skge->hw; | 2697 | struct skge_hw *hw = skge->hw; |
2671 | struct skge_ring *ring = &skge->rx_ring; | 2698 | struct skge_ring *ring = &skge->rx_ring; |
2672 | struct skge_element *e; | 2699 | struct skge_element *e; |
2673 | unsigned int to_do = min(dev->quota, *budget); | 2700 | int to_do = min(dev->quota, *budget); |
2674 | unsigned int work_done = 0; | 2701 | int work_done = 0; |
2702 | |||
2703 | skge_tx_done(skge); | ||
2675 | 2704 | ||
2676 | for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { | 2705 | for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { |
2677 | struct skge_rx_desc *rd = e->desc; | 2706 | struct skge_rx_desc *rd = e->desc; |
@@ -2714,40 +2743,6 @@ static int skge_poll(struct net_device *dev, int *budget) | |||
2714 | return 0; | 2743 | return 0; |
2715 | } | 2744 | } |
2716 | 2745 | ||
2717 | static inline void skge_tx_intr(struct net_device *dev) | ||
2718 | { | ||
2719 | struct skge_port *skge = netdev_priv(dev); | ||
2720 | struct skge_hw *hw = skge->hw; | ||
2721 | struct skge_ring *ring = &skge->tx_ring; | ||
2722 | struct skge_element *e; | ||
2723 | |||
2724 | spin_lock(&skge->tx_lock); | ||
2725 | for (e = ring->to_clean; prefetch(e->next), e != ring->to_use; e = e->next) { | ||
2726 | struct skge_tx_desc *td = e->desc; | ||
2727 | u32 control; | ||
2728 | |||
2729 | rmb(); | ||
2730 | control = td->control; | ||
2731 | if (control & BMU_OWN) | ||
2732 | break; | ||
2733 | |||
2734 | if (unlikely(netif_msg_tx_done(skge))) | ||
2735 | printk(KERN_DEBUG PFX "%s: tx done slot %td status 0x%x\n", | ||
2736 | dev->name, e - ring->start, td->status); | ||
2737 | |||
2738 | skge_tx_free(hw, e); | ||
2739 | e->skb = NULL; | ||
2740 | ++skge->tx_avail; | ||
2741 | } | ||
2742 | ring->to_clean = e; | ||
2743 | skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); | ||
2744 | |||
2745 | if (skge->tx_avail > MAX_SKB_FRAGS + 1) | ||
2746 | netif_wake_queue(dev); | ||
2747 | |||
2748 | spin_unlock(&skge->tx_lock); | ||
2749 | } | ||
2750 | |||
2751 | /* Parity errors seem to happen when Genesis is connected to a switch | 2746 | /* Parity errors seem to happen when Genesis is connected to a switch |
2752 | * with no other ports present. Heartbeat error?? | 2747 | * with no other ports present. Heartbeat error?? |
2753 | */ | 2748 | */ |
@@ -2884,24 +2879,18 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) | |||
2884 | return IRQ_NONE; | 2879 | return IRQ_NONE; |
2885 | 2880 | ||
2886 | spin_lock(&hw->hw_lock); | 2881 | spin_lock(&hw->hw_lock); |
2887 | if (status & IS_R1_F) { | 2882 | if (status & (IS_R1_F|IS_XA1_F)) { |
2888 | skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F); | 2883 | skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F); |
2889 | hw->intr_mask &= ~IS_R1_F; | 2884 | hw->intr_mask &= ~(IS_R1_F|IS_XA1_F); |
2890 | netif_rx_schedule(hw->dev[0]); | 2885 | netif_rx_schedule(hw->dev[0]); |
2891 | } | 2886 | } |
2892 | 2887 | ||
2893 | if (status & IS_R2_F) { | 2888 | if (status & (IS_R2_F|IS_XA2_F)) { |
2894 | skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F); | 2889 | skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F); |
2895 | hw->intr_mask &= ~IS_R2_F; | 2890 | hw->intr_mask &= ~(IS_R2_F|IS_XA2_F); |
2896 | netif_rx_schedule(hw->dev[1]); | 2891 | netif_rx_schedule(hw->dev[1]); |
2897 | } | 2892 | } |
2898 | 2893 | ||
2899 | if (status & IS_XA1_F) | ||
2900 | skge_tx_intr(hw->dev[0]); | ||
2901 | |||
2902 | if (status & IS_XA2_F) | ||
2903 | skge_tx_intr(hw->dev[1]); | ||
2904 | |||
2905 | if (status & IS_PA_TO_RX1) { | 2894 | if (status & IS_PA_TO_RX1) { |
2906 | struct skge_port *skge = netdev_priv(hw->dev[0]); | 2895 | struct skge_port *skge = netdev_priv(hw->dev[0]); |
2907 | ++skge->net_stats.rx_over_errors; | 2896 | ++skge->net_stats.rx_over_errors; |