aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/skge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/skge.c')
-rw-r--r--drivers/net/skge.c346
1 files changed, 174 insertions, 172 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 25e028b7ce48..35dbf05c7f06 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -44,7 +44,7 @@
44#include "skge.h" 44#include "skge.h"
45 45
46#define DRV_NAME "skge" 46#define DRV_NAME "skge"
47#define DRV_VERSION "1.3" 47#define DRV_VERSION "1.5"
48#define PFX DRV_NAME " " 48#define PFX DRV_NAME " "
49 49
50#define DEFAULT_TX_RING_SIZE 128 50#define DEFAULT_TX_RING_SIZE 128
@@ -104,7 +104,6 @@ static const int txqaddr[] = { Q_XA1, Q_XA2 };
104static const int rxqaddr[] = { Q_R1, Q_R2 }; 104static const int rxqaddr[] = { Q_R1, Q_R2 };
105static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; 105static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
106static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; 106static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
107static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 };
108 107
109static int skge_get_regs_len(struct net_device *dev) 108static int skge_get_regs_len(struct net_device *dev)
110{ 109{
@@ -358,7 +357,7 @@ static struct net_device_stats *skge_get_stats(struct net_device *dev)
358 skge->net_stats.rx_bytes = data[1]; 357 skge->net_stats.rx_bytes = data[1];
359 skge->net_stats.tx_packets = data[2] + data[4] + data[6]; 358 skge->net_stats.tx_packets = data[2] + data[4] + data[6];
360 skge->net_stats.rx_packets = data[3] + data[5] + data[7]; 359 skge->net_stats.rx_packets = data[3] + data[5] + data[7];
361 skge->net_stats.multicast = data[5] + data[7]; 360 skge->net_stats.multicast = data[3] + data[5];
362 skge->net_stats.collisions = data[10]; 361 skge->net_stats.collisions = data[10];
363 skge->net_stats.tx_aborted_errors = data[12]; 362 skge->net_stats.tx_aborted_errors = data[12];
364 363
@@ -728,19 +727,18 @@ static struct ethtool_ops skge_ethtool_ops = {
728 * Allocate ring elements and chain them together 727 * Allocate ring elements and chain them together
729 * One-to-one association of board descriptors with ring elements 728 * One-to-one association of board descriptors with ring elements
730 */ 729 */
731static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base) 730static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
732{ 731{
733 struct skge_tx_desc *d; 732 struct skge_tx_desc *d;
734 struct skge_element *e; 733 struct skge_element *e;
735 int i; 734 int i;
736 735
737 ring->start = kmalloc(sizeof(*e)*ring->count, GFP_KERNEL); 736 ring->start = kcalloc(sizeof(*e), ring->count, GFP_KERNEL);
738 if (!ring->start) 737 if (!ring->start)
739 return -ENOMEM; 738 return -ENOMEM;
740 739
741 for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { 740 for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) {
742 e->desc = d; 741 e->desc = d;
743 e->skb = NULL;
744 if (i == ring->count - 1) { 742 if (i == ring->count - 1) {
745 e->next = ring->start; 743 e->next = ring->start;
746 d->next_offset = base; 744 d->next_offset = base;
@@ -783,7 +781,7 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
783 * Note: DMA address is not changed by chip. 781 * Note: DMA address is not changed by chip.
784 * MTU not changed while receiver active. 782 * MTU not changed while receiver active.
785 */ 783 */
786static void skge_rx_reuse(struct skge_element *e, unsigned int size) 784static inline void skge_rx_reuse(struct skge_element *e, unsigned int size)
787{ 785{
788 struct skge_rx_desc *rd = e->desc; 786 struct skge_rx_desc *rd = e->desc;
789 787
@@ -831,7 +829,7 @@ static int skge_rx_fill(struct skge_port *skge)
831 do { 829 do {
832 struct sk_buff *skb; 830 struct sk_buff *skb;
833 831
834 skb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN); 832 skb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_KERNEL);
835 if (!skb) 833 if (!skb)
836 return -ENOMEM; 834 return -ENOMEM;
837 835
@@ -849,8 +847,7 @@ static void skge_link_up(struct skge_port *skge)
849 LED_BLK_OFF|LED_SYNC_OFF|LED_ON); 847 LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
850 848
851 netif_carrier_on(skge->netdev); 849 netif_carrier_on(skge->netdev);
852 if (skge->tx_avail > MAX_SKB_FRAGS + 1) 850 netif_wake_queue(skge->netdev);
853 netif_wake_queue(skge->netdev);
854 851
855 if (netif_msg_link(skge)) 852 if (netif_msg_link(skge))
856 printk(KERN_INFO PFX 853 printk(KERN_INFO PFX
@@ -2157,7 +2154,7 @@ static int skge_up(struct net_device *dev)
2157 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); 2154 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
2158 2155
2159 if (dev->mtu > RX_BUF_SIZE) 2156 if (dev->mtu > RX_BUF_SIZE)
2160 skge->rx_buf_size = dev->mtu + ETH_HLEN + NET_IP_ALIGN; 2157 skge->rx_buf_size = dev->mtu + ETH_HLEN;
2161 else 2158 else
2162 skge->rx_buf_size = RX_BUF_SIZE; 2159 skge->rx_buf_size = RX_BUF_SIZE;
2163 2160
@@ -2169,27 +2166,29 @@ static int skge_up(struct net_device *dev)
2169 if (!skge->mem) 2166 if (!skge->mem)
2170 return -ENOMEM; 2167 return -ENOMEM;
2171 2168
2169 BUG_ON(skge->dma & 7);
2170
2171 if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) {
2172 printk(KERN_ERR PFX "pci_alloc_consistent region crosses 4G boundary\n");
2173 err = -EINVAL;
2174 goto free_pci_mem;
2175 }
2176
2172 memset(skge->mem, 0, skge->mem_size); 2177 memset(skge->mem, 0, skge->mem_size);
2173 2178
2174 if ((err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma))) 2179 err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma);
2180 if (err)
2175 goto free_pci_mem; 2181 goto free_pci_mem;
2176 2182
2177 err = skge_rx_fill(skge); 2183 err = skge_rx_fill(skge);
2178 if (err) 2184 if (err)
2179 goto free_rx_ring; 2185 goto free_rx_ring;
2180 2186
2181 if ((err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, 2187 err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size,
2182 skge->dma + rx_size))) 2188 skge->dma + rx_size);
2189 if (err)
2183 goto free_rx_ring; 2190 goto free_rx_ring;
2184 2191
2185 skge->tx_avail = skge->tx_ring.count - 1;
2186
2187 /* Enable IRQ from port */
2188 spin_lock_irq(&hw->hw_lock);
2189 hw->intr_mask |= portirqmask[port];
2190 skge_write32(hw, B0_IMSK, hw->intr_mask);
2191 spin_unlock_irq(&hw->hw_lock);
2192
2193 /* Initialize MAC */ 2192 /* Initialize MAC */
2194 spin_lock_bh(&hw->phy_lock); 2193 spin_lock_bh(&hw->phy_lock);
2195 if (hw->chip_id == CHIP_ID_GENESIS) 2194 if (hw->chip_id == CHIP_ID_GENESIS)
@@ -2246,11 +2245,6 @@ static int skge_down(struct net_device *dev)
2246 else 2245 else
2247 yukon_stop(skge); 2246 yukon_stop(skge);
2248 2247
2249 spin_lock_irq(&hw->hw_lock);
2250 hw->intr_mask &= ~portirqmask[skge->port];
2251 skge_write32(hw, B0_IMSK, hw->intr_mask);
2252 spin_unlock_irq(&hw->hw_lock);
2253
2254 /* Stop transmitter */ 2248 /* Stop transmitter */
2255 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); 2249 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
2256 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), 2250 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
@@ -2297,6 +2291,12 @@ static int skge_down(struct net_device *dev)
2297 return 0; 2291 return 0;
2298} 2292}
2299 2293
2294static inline int skge_avail(const struct skge_ring *ring)
2295{
2296 return ((ring->to_clean > ring->to_use) ? 0 : ring->count)
2297 + (ring->to_clean - ring->to_use) - 1;
2298}
2299
2300static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) 2300static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2301{ 2301{
2302 struct skge_port *skge = netdev_priv(dev); 2302 struct skge_port *skge = netdev_priv(dev);
@@ -2307,27 +2307,24 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2307 int i; 2307 int i;
2308 u32 control, len; 2308 u32 control, len;
2309 u64 map; 2309 u64 map;
2310 unsigned long flags;
2311 2310
2312 skb = skb_padto(skb, ETH_ZLEN); 2311 skb = skb_padto(skb, ETH_ZLEN);
2313 if (!skb) 2312 if (!skb)
2314 return NETDEV_TX_OK; 2313 return NETDEV_TX_OK;
2315 2314
2316 local_irq_save(flags);
2317 if (!spin_trylock(&skge->tx_lock)) { 2315 if (!spin_trylock(&skge->tx_lock)) {
2318 /* Collision - tell upper layer to requeue */ 2316 /* Collision - tell upper layer to requeue */
2319 local_irq_restore(flags); 2317 return NETDEV_TX_LOCKED;
2320 return NETDEV_TX_LOCKED; 2318 }
2321 }
2322 2319
2323 if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) { 2320 if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) {
2324 if (!netif_queue_stopped(dev)) { 2321 if (!netif_queue_stopped(dev)) {
2325 netif_stop_queue(dev); 2322 netif_stop_queue(dev);
2326 2323
2327 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n", 2324 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
2328 dev->name); 2325 dev->name);
2329 } 2326 }
2330 spin_unlock_irqrestore(&skge->tx_lock, flags); 2327 spin_unlock(&skge->tx_lock);
2331 return NETDEV_TX_BUSY; 2328 return NETDEV_TX_BUSY;
2332 } 2329 }
2333 2330
@@ -2396,49 +2393,51 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2396 dev->name, e - ring->start, skb->len); 2393 dev->name, e - ring->start, skb->len);
2397 2394
2398 ring->to_use = e->next; 2395 ring->to_use = e->next;
2399 skge->tx_avail -= skb_shinfo(skb)->nr_frags + 1; 2396 if (skge_avail(&skge->tx_ring) <= MAX_SKB_FRAGS + 1) {
2400 if (skge->tx_avail <= MAX_SKB_FRAGS + 1) {
2401 pr_debug("%s: transmit queue full\n", dev->name); 2397 pr_debug("%s: transmit queue full\n", dev->name);
2402 netif_stop_queue(dev); 2398 netif_stop_queue(dev);
2403 } 2399 }
2404 2400
2401 mmiowb();
2402 spin_unlock(&skge->tx_lock);
2403
2405 dev->trans_start = jiffies; 2404 dev->trans_start = jiffies;
2406 spin_unlock_irqrestore(&skge->tx_lock, flags);
2407 2405
2408 return NETDEV_TX_OK; 2406 return NETDEV_TX_OK;
2409} 2407}
2410 2408
2411static inline void skge_tx_free(struct skge_hw *hw, struct skge_element *e) 2409static void skge_tx_complete(struct skge_port *skge, struct skge_element *last)
2412{ 2410{
2413 /* This ring element can be skb or fragment */ 2411 struct pci_dev *pdev = skge->hw->pdev;
2414 if (e->skb) { 2412 struct skge_element *e;
2415 pci_unmap_single(hw->pdev, 2413
2416 pci_unmap_addr(e, mapaddr), 2414 for (e = skge->tx_ring.to_clean; e != last; e = e->next) {
2417 pci_unmap_len(e, maplen), 2415 struct sk_buff *skb = e->skb;
2418 PCI_DMA_TODEVICE); 2416 int i;
2419 dev_kfree_skb_any(e->skb); 2417
2420 e->skb = NULL; 2418 e->skb = NULL;
2421 } else { 2419 pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr),
2422 pci_unmap_page(hw->pdev, 2420 skb_headlen(skb), PCI_DMA_TODEVICE);
2423 pci_unmap_addr(e, mapaddr), 2421
2424 pci_unmap_len(e, maplen), 2422 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2425 PCI_DMA_TODEVICE); 2423 e = e->next;
2424 pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr),
2425 skb_shinfo(skb)->frags[i].size,
2426 PCI_DMA_TODEVICE);
2427 }
2428
2429 dev_kfree_skb(skb);
2426 } 2430 }
2431 skge->tx_ring.to_clean = e;
2427} 2432}
2428 2433
2429static void skge_tx_clean(struct skge_port *skge) 2434static void skge_tx_clean(struct skge_port *skge)
2430{ 2435{
2431 struct skge_ring *ring = &skge->tx_ring;
2432 struct skge_element *e;
2433 unsigned long flags;
2434 2436
2435 spin_lock_irqsave(&skge->tx_lock, flags); 2437 spin_lock_bh(&skge->tx_lock);
2436 for (e = ring->to_clean; e != ring->to_use; e = e->next) { 2438 skge_tx_complete(skge, skge->tx_ring.to_use);
2437 ++skge->tx_avail; 2439 netif_wake_queue(skge->netdev);
2438 skge_tx_free(skge->hw, e); 2440 spin_unlock_bh(&skge->tx_lock);
2439 }
2440 ring->to_clean = e;
2441 spin_unlock_irqrestore(&skge->tx_lock, flags);
2442} 2441}
2443 2442
2444static void skge_tx_timeout(struct net_device *dev) 2443static void skge_tx_timeout(struct net_device *dev)
@@ -2597,7 +2596,7 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
2597 goto error; 2596 goto error;
2598 2597
2599 if (len < RX_COPY_THRESHOLD) { 2598 if (len < RX_COPY_THRESHOLD) {
2600 skb = dev_alloc_skb(len + 2); 2599 skb = alloc_skb(len + 2, GFP_ATOMIC);
2601 if (!skb) 2600 if (!skb)
2602 goto resubmit; 2601 goto resubmit;
2603 2602
@@ -2612,10 +2611,11 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
2612 skge_rx_reuse(e, skge->rx_buf_size); 2611 skge_rx_reuse(e, skge->rx_buf_size);
2613 } else { 2612 } else {
2614 struct sk_buff *nskb; 2613 struct sk_buff *nskb;
2615 nskb = dev_alloc_skb(skge->rx_buf_size + NET_IP_ALIGN); 2614 nskb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_ATOMIC);
2616 if (!nskb) 2615 if (!nskb)
2617 goto resubmit; 2616 goto resubmit;
2618 2617
2618 skb_reserve(nskb, NET_IP_ALIGN);
2619 pci_unmap_single(skge->hw->pdev, 2619 pci_unmap_single(skge->hw->pdev,
2620 pci_unmap_addr(e, mapaddr), 2620 pci_unmap_addr(e, mapaddr),
2621 pci_unmap_len(e, maplen), 2621 pci_unmap_len(e, maplen),
@@ -2663,6 +2663,36 @@ resubmit:
2663 return NULL; 2663 return NULL;
2664} 2664}
2665 2665
2666static void skge_tx_done(struct skge_port *skge)
2667{
2668 struct skge_ring *ring = &skge->tx_ring;
2669 struct skge_element *e, *last;
2670
2671 spin_lock(&skge->tx_lock);
2672 last = ring->to_clean;
2673 for (e = ring->to_clean; e != ring->to_use; e = e->next) {
2674 struct skge_tx_desc *td = e->desc;
2675
2676 if (td->control & BMU_OWN)
2677 break;
2678
2679 if (td->control & BMU_EOF) {
2680 last = e->next;
2681 if (unlikely(netif_msg_tx_done(skge)))
2682 printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
2683 skge->netdev->name, e - ring->start);
2684 }
2685 }
2686
2687 skge_tx_complete(skge, last);
2688
2689 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
2690
2691 if (skge_avail(&skge->tx_ring) > MAX_SKB_FRAGS + 1)
2692 netif_wake_queue(skge->netdev);
2693
2694 spin_unlock(&skge->tx_lock);
2695}
2666 2696
2667static int skge_poll(struct net_device *dev, int *budget) 2697static int skge_poll(struct net_device *dev, int *budget)
2668{ 2698{
@@ -2670,8 +2700,10 @@ static int skge_poll(struct net_device *dev, int *budget)
2670 struct skge_hw *hw = skge->hw; 2700 struct skge_hw *hw = skge->hw;
2671 struct skge_ring *ring = &skge->rx_ring; 2701 struct skge_ring *ring = &skge->rx_ring;
2672 struct skge_element *e; 2702 struct skge_element *e;
2673 unsigned int to_do = min(dev->quota, *budget); 2703 int to_do = min(dev->quota, *budget);
2674 unsigned int work_done = 0; 2704 int work_done = 0;
2705
2706 skge_tx_done(skge);
2675 2707
2676 for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { 2708 for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
2677 struct skge_rx_desc *rd = e->desc; 2709 struct skge_rx_desc *rd = e->desc;
@@ -2683,15 +2715,14 @@ static int skge_poll(struct net_device *dev, int *budget)
2683 if (control & BMU_OWN) 2715 if (control & BMU_OWN)
2684 break; 2716 break;
2685 2717
2686 skb = skge_rx_get(skge, e, control, rd->status, 2718 skb = skge_rx_get(skge, e, control, rd->status,
2687 le16_to_cpu(rd->csum2)); 2719 le16_to_cpu(rd->csum2));
2688 if (likely(skb)) { 2720 if (likely(skb)) {
2689 dev->last_rx = jiffies; 2721 dev->last_rx = jiffies;
2690 netif_receive_skb(skb); 2722 netif_receive_skb(skb);
2691 2723
2692 ++work_done; 2724 ++work_done;
2693 } else 2725 }
2694 skge_rx_reuse(e, skge->rx_buf_size);
2695 } 2726 }
2696 ring->to_clean = e; 2727 ring->to_clean = e;
2697 2728
@@ -2705,49 +2736,15 @@ static int skge_poll(struct net_device *dev, int *budget)
2705 if (work_done >= to_do) 2736 if (work_done >= to_do)
2706 return 1; /* not done */ 2737 return 1; /* not done */
2707 2738
2708 spin_lock_irq(&hw->hw_lock); 2739 netif_rx_complete(dev);
2709 __netif_rx_complete(dev); 2740 mmiowb();
2710 hw->intr_mask |= portirqmask[skge->port]; 2741
2742 hw->intr_mask |= skge->port == 0 ? (IS_R1_F|IS_XA1_F) : (IS_R2_F|IS_XA2_F);
2711 skge_write32(hw, B0_IMSK, hw->intr_mask); 2743 skge_write32(hw, B0_IMSK, hw->intr_mask);
2712 spin_unlock_irq(&hw->hw_lock);
2713 2744
2714 return 0; 2745 return 0;
2715} 2746}
2716 2747
2717static inline void skge_tx_intr(struct net_device *dev)
2718{
2719 struct skge_port *skge = netdev_priv(dev);
2720 struct skge_hw *hw = skge->hw;
2721 struct skge_ring *ring = &skge->tx_ring;
2722 struct skge_element *e;
2723
2724 spin_lock(&skge->tx_lock);
2725 for (e = ring->to_clean; prefetch(e->next), e != ring->to_use; e = e->next) {
2726 struct skge_tx_desc *td = e->desc;
2727 u32 control;
2728
2729 rmb();
2730 control = td->control;
2731 if (control & BMU_OWN)
2732 break;
2733
2734 if (unlikely(netif_msg_tx_done(skge)))
2735 printk(KERN_DEBUG PFX "%s: tx done slot %td status 0x%x\n",
2736 dev->name, e - ring->start, td->status);
2737
2738 skge_tx_free(hw, e);
2739 e->skb = NULL;
2740 ++skge->tx_avail;
2741 }
2742 ring->to_clean = e;
2743 skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
2744
2745 if (skge->tx_avail > MAX_SKB_FRAGS + 1)
2746 netif_wake_queue(dev);
2747
2748 spin_unlock(&skge->tx_lock);
2749}
2750
2751/* Parity errors seem to happen when Genesis is connected to a switch 2748/* Parity errors seem to happen when Genesis is connected to a switch
2752 * with no other ports present. Heartbeat error?? 2749 * with no other ports present. Heartbeat error??
2753 */ 2750 */
@@ -2770,17 +2767,6 @@ static void skge_mac_parity(struct skge_hw *hw, int port)
2770 ? GMF_CLI_TX_FC : GMF_CLI_TX_PE); 2767 ? GMF_CLI_TX_FC : GMF_CLI_TX_PE);
2771} 2768}
2772 2769
2773static void skge_pci_clear(struct skge_hw *hw)
2774{
2775 u16 status;
2776
2777 pci_read_config_word(hw->pdev, PCI_STATUS, &status);
2778 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2779 pci_write_config_word(hw->pdev, PCI_STATUS,
2780 status | PCI_STATUS_ERROR_BITS);
2781 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2782}
2783
2784static void skge_mac_intr(struct skge_hw *hw, int port) 2770static void skge_mac_intr(struct skge_hw *hw, int port)
2785{ 2771{
2786 if (hw->chip_id == CHIP_ID_GENESIS) 2772 if (hw->chip_id == CHIP_ID_GENESIS)
@@ -2822,23 +2808,39 @@ static void skge_error_irq(struct skge_hw *hw)
2822 if (hwstatus & IS_M2_PAR_ERR) 2808 if (hwstatus & IS_M2_PAR_ERR)
2823 skge_mac_parity(hw, 1); 2809 skge_mac_parity(hw, 1);
2824 2810
2825 if (hwstatus & IS_R1_PAR_ERR) 2811 if (hwstatus & IS_R1_PAR_ERR) {
2812 printk(KERN_ERR PFX "%s: receive queue parity error\n",
2813 hw->dev[0]->name);
2826 skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P); 2814 skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P);
2815 }
2827 2816
2828 if (hwstatus & IS_R2_PAR_ERR) 2817 if (hwstatus & IS_R2_PAR_ERR) {
2818 printk(KERN_ERR PFX "%s: receive queue parity error\n",
2819 hw->dev[1]->name);
2829 skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P); 2820 skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P);
2821 }
2830 2822
2831 if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) { 2823 if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) {
2832 printk(KERN_ERR PFX "hardware error detected (status 0x%x)\n", 2824 u16 pci_status, pci_cmd;
2833 hwstatus);
2834 2825
2835 skge_pci_clear(hw); 2826 pci_read_config_word(hw->pdev, PCI_COMMAND, &pci_cmd);
2827 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
2828
2829 printk(KERN_ERR PFX "%s: PCI error cmd=%#x status=%#x\n",
2830 pci_name(hw->pdev), pci_cmd, pci_status);
2831
2832 /* Write the error bits back to clear them. */
2833 pci_status &= PCI_STATUS_ERROR_BITS;
2834 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2835 pci_write_config_word(hw->pdev, PCI_COMMAND,
2836 pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
2837 pci_write_config_word(hw->pdev, PCI_STATUS, pci_status);
2838 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2836 2839
2837 /* if error still set then just ignore it */ 2840 /* if error still set then just ignore it */
2838 hwstatus = skge_read32(hw, B0_HWE_ISRC); 2841 hwstatus = skge_read32(hw, B0_HWE_ISRC);
2839 if (hwstatus & IS_IRQ_STAT) { 2842 if (hwstatus & IS_IRQ_STAT) {
2840 pr_debug("IRQ status %x: still set ignoring hardware errors\n", 2843 printk(KERN_INFO PFX "unable to clear error (so ignoring them)\n");
2841 hwstatus);
2842 hw->intr_mask &= ~IS_HW_ERR; 2844 hw->intr_mask &= ~IS_HW_ERR;
2843 } 2845 }
2844 } 2846 }
@@ -2855,12 +2857,11 @@ static void skge_extirq(unsigned long data)
2855 int port; 2857 int port;
2856 2858
2857 spin_lock(&hw->phy_lock); 2859 spin_lock(&hw->phy_lock);
2858 for (port = 0; port < 2; port++) { 2860 for (port = 0; port < hw->ports; port++) {
2859 struct net_device *dev = hw->dev[port]; 2861 struct net_device *dev = hw->dev[port];
2862 struct skge_port *skge = netdev_priv(dev);
2860 2863
2861 if (dev && netif_running(dev)) { 2864 if (netif_running(dev)) {
2862 struct skge_port *skge = netdev_priv(dev);
2863
2864 if (hw->chip_id != CHIP_ID_GENESIS) 2865 if (hw->chip_id != CHIP_ID_GENESIS)
2865 yukon_phy_intr(skge); 2866 yukon_phy_intr(skge);
2866 else 2867 else
@@ -2869,38 +2870,39 @@ static void skge_extirq(unsigned long data)
2869 } 2870 }
2870 spin_unlock(&hw->phy_lock); 2871 spin_unlock(&hw->phy_lock);
2871 2872
2872 spin_lock_irq(&hw->hw_lock);
2873 hw->intr_mask |= IS_EXT_REG; 2873 hw->intr_mask |= IS_EXT_REG;
2874 skge_write32(hw, B0_IMSK, hw->intr_mask); 2874 skge_write32(hw, B0_IMSK, hw->intr_mask);
2875 spin_unlock_irq(&hw->hw_lock);
2876} 2875}
2877 2876
2878static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) 2877static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2879{ 2878{
2880 struct skge_hw *hw = dev_id; 2879 struct skge_hw *hw = dev_id;
2881 u32 status = skge_read32(hw, B0_SP_ISRC); 2880 u32 status;
2882 2881
2883 if (status == 0 || status == ~0) /* hotplug or shared irq */ 2882 /* Reading this register masks IRQ */
2883 status = skge_read32(hw, B0_SP_ISRC);
2884 if (status == 0)
2884 return IRQ_NONE; 2885 return IRQ_NONE;
2885 2886
2886 spin_lock(&hw->hw_lock); 2887 if (status & IS_EXT_REG) {
2887 if (status & IS_R1_F) { 2888 hw->intr_mask &= ~IS_EXT_REG;
2889 tasklet_schedule(&hw->ext_tasklet);
2890 }
2891
2892 if (status & (IS_R1_F|IS_XA1_F)) {
2888 skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F); 2893 skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
2889 hw->intr_mask &= ~IS_R1_F; 2894 hw->intr_mask &= ~(IS_R1_F|IS_XA1_F);
2890 netif_rx_schedule(hw->dev[0]); 2895 netif_rx_schedule(hw->dev[0]);
2891 } 2896 }
2892 2897
2893 if (status & IS_R2_F) { 2898 if (status & (IS_R2_F|IS_XA2_F)) {
2894 skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F); 2899 skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
2895 hw->intr_mask &= ~IS_R2_F; 2900 hw->intr_mask &= ~(IS_R2_F|IS_XA2_F);
2896 netif_rx_schedule(hw->dev[1]); 2901 netif_rx_schedule(hw->dev[1]);
2897 } 2902 }
2898 2903
2899 if (status & IS_XA1_F) 2904 if (likely((status & hw->intr_mask) == 0))
2900 skge_tx_intr(hw->dev[0]); 2905 return IRQ_HANDLED;
2901
2902 if (status & IS_XA2_F)
2903 skge_tx_intr(hw->dev[1]);
2904 2906
2905 if (status & IS_PA_TO_RX1) { 2907 if (status & IS_PA_TO_RX1) {
2906 struct skge_port *skge = netdev_priv(hw->dev[0]); 2908 struct skge_port *skge = netdev_priv(hw->dev[0]);
@@ -2929,13 +2931,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2929 if (status & IS_HW_ERR) 2931 if (status & IS_HW_ERR)
2930 skge_error_irq(hw); 2932 skge_error_irq(hw);
2931 2933
2932 if (status & IS_EXT_REG) {
2933 hw->intr_mask &= ~IS_EXT_REG;
2934 tasklet_schedule(&hw->ext_tasklet);
2935 }
2936
2937 skge_write32(hw, B0_IMSK, hw->intr_mask); 2934 skge_write32(hw, B0_IMSK, hw->intr_mask);
2938 spin_unlock(&hw->hw_lock);
2939 2935
2940 return IRQ_HANDLED; 2936 return IRQ_HANDLED;
2941} 2937}
@@ -3010,7 +3006,7 @@ static const char *skge_board_name(const struct skge_hw *hw)
3010static int skge_reset(struct skge_hw *hw) 3006static int skge_reset(struct skge_hw *hw)
3011{ 3007{
3012 u32 reg; 3008 u32 reg;
3013 u16 ctst; 3009 u16 ctst, pci_status;
3014 u8 t8, mac_cfg, pmd_type, phy_type; 3010 u8 t8, mac_cfg, pmd_type, phy_type;
3015 int i; 3011 int i;
3016 3012
@@ -3021,8 +3017,13 @@ static int skge_reset(struct skge_hw *hw)
3021 skge_write8(hw, B0_CTST, CS_RST_CLR); 3017 skge_write8(hw, B0_CTST, CS_RST_CLR);
3022 3018
3023 /* clear PCI errors, if any */ 3019 /* clear PCI errors, if any */
3024 skge_pci_clear(hw); 3020 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
3021 skge_write8(hw, B2_TST_CTRL2, 0);
3025 3022
3023 pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status);
3024 pci_write_config_word(hw->pdev, PCI_STATUS,
3025 pci_status | PCI_STATUS_ERROR_BITS);
3026 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
3026 skge_write8(hw, B0_CTST, CS_MRST_CLR); 3027 skge_write8(hw, B0_CTST, CS_MRST_CLR);
3027 3028
3028 /* restore CLK_RUN bits (for Yukon-Lite) */ 3029 /* restore CLK_RUN bits (for Yukon-Lite) */
@@ -3081,7 +3082,10 @@ static int skge_reset(struct skge_hw *hw)
3081 else 3082 else
3082 hw->ram_size = t8 * 4096; 3083 hw->ram_size = t8 * 4096;
3083 3084
3084 hw->intr_mask = IS_HW_ERR | IS_EXT_REG; 3085 hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1;
3086 if (hw->ports > 1)
3087 hw->intr_mask |= IS_PORT_2;
3088
3085 if (hw->chip_id == CHIP_ID_GENESIS) 3089 if (hw->chip_id == CHIP_ID_GENESIS)
3086 genesis_init(hw); 3090 genesis_init(hw);
3087 else { 3091 else {
@@ -3251,13 +3255,15 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3251 struct skge_hw *hw; 3255 struct skge_hw *hw;
3252 int err, using_dac = 0; 3256 int err, using_dac = 0;
3253 3257
3254 if ((err = pci_enable_device(pdev))) { 3258 err = pci_enable_device(pdev);
3259 if (err) {
3255 printk(KERN_ERR PFX "%s cannot enable PCI device\n", 3260 printk(KERN_ERR PFX "%s cannot enable PCI device\n",
3256 pci_name(pdev)); 3261 pci_name(pdev));
3257 goto err_out; 3262 goto err_out;
3258 } 3263 }
3259 3264
3260 if ((err = pci_request_regions(pdev, DRV_NAME))) { 3265 err = pci_request_regions(pdev, DRV_NAME);
3266 if (err) {
3261 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", 3267 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
3262 pci_name(pdev)); 3268 pci_name(pdev));
3263 goto err_out_disable_pdev; 3269 goto err_out_disable_pdev;
@@ -3265,22 +3271,18 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3265 3271
3266 pci_set_master(pdev); 3272 pci_set_master(pdev);
3267 3273
3268 if (sizeof(dma_addr_t) > sizeof(u32) && 3274 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3269 !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
3270 using_dac = 1; 3275 using_dac = 1;
3271 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 3276 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3272 if (err < 0) { 3277 } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
3273 printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA " 3278 using_dac = 0;
3274 "for consistent allocations\n", pci_name(pdev)); 3279 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3275 goto err_out_free_regions; 3280 }
3276 } 3281
3277 } else { 3282 if (err) {
3278 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 3283 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
3279 if (err) { 3284 pci_name(pdev));
3280 printk(KERN_ERR PFX "%s no usable DMA configuration\n", 3285 goto err_out_free_regions;
3281 pci_name(pdev));
3282 goto err_out_free_regions;
3283 }
3284 } 3286 }
3285 3287
3286#ifdef __BIG_ENDIAN 3288#ifdef __BIG_ENDIAN
@@ -3304,7 +3306,6 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3304 3306
3305 hw->pdev = pdev; 3307 hw->pdev = pdev;
3306 spin_lock_init(&hw->phy_lock); 3308 spin_lock_init(&hw->phy_lock);
3307 spin_lock_init(&hw->hw_lock);
3308 tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw); 3309 tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw);
3309 3310
3310 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3311 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
@@ -3314,7 +3315,8 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3314 goto err_out_free_hw; 3315 goto err_out_free_hw;
3315 } 3316 }
3316 3317
3317 if ((err = request_irq(pdev->irq, skge_intr, SA_SHIRQ, DRV_NAME, hw))) { 3318 err = request_irq(pdev->irq, skge_intr, SA_SHIRQ, DRV_NAME, hw);
3319 if (err) {
3318 printk(KERN_ERR PFX "%s: cannot assign irq %d\n", 3320 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
3319 pci_name(pdev), pdev->irq); 3321 pci_name(pdev), pdev->irq);
3320 goto err_out_iounmap; 3322 goto err_out_iounmap;
@@ -3332,7 +3334,8 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3332 if ((dev = skge_devinit(hw, 0, using_dac)) == NULL) 3334 if ((dev = skge_devinit(hw, 0, using_dac)) == NULL)
3333 goto err_out_led_off; 3335 goto err_out_led_off;
3334 3336
3335 if ((err = register_netdev(dev))) { 3337 err = register_netdev(dev);
3338 if (err) {
3336 printk(KERN_ERR PFX "%s: cannot register net device\n", 3339 printk(KERN_ERR PFX "%s: cannot register net device\n",
3337 pci_name(pdev)); 3340 pci_name(pdev));
3338 goto err_out_free_netdev; 3341 goto err_out_free_netdev;
@@ -3387,7 +3390,6 @@ static void __devexit skge_remove(struct pci_dev *pdev)
3387 3390
3388 skge_write32(hw, B0_IMSK, 0); 3391 skge_write32(hw, B0_IMSK, 0);
3389 skge_write16(hw, B0_LED, LED_STAT_OFF); 3392 skge_write16(hw, B0_LED, LED_STAT_OFF);
3390 skge_pci_clear(hw);
3391 skge_write8(hw, B0_CTST, CS_RST_SET); 3393 skge_write8(hw, B0_CTST, CS_RST_SET);
3392 3394
3393 tasklet_kill(&hw->ext_tasklet); 3395 tasklet_kill(&hw->ext_tasklet);