aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/skge.c
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@osdl.org>2006-03-21 13:57:00 -0500
committerJeff Garzik <jeff@garzik.org>2006-03-21 16:00:50 -0500
commitcfc3ed796eda2c41fb20986d831ed56c0474279d (patch)
tree5fbd513356c76c3fafa7ffda0c3dd6eda0d5b690 /drivers/net/skge.c
parent00a6cae288138ce0444ab6f48a81da12afe557aa (diff)
[PATCH] skge: use auto masking of irqs
Improve performance of skge driver by not touching irq mask register as much. Since the interrupt source auto-masks, the driver can just leave it disabled until the end of the soft irq. Signed-off-by: Stephen Hemminger <shemminger@osdl.org> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/skge.c')
-rw-r--r--drivers/net/skge.c54
1 files changed, 21 insertions, 33 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 1a30d5401c48..4fc9333f0740 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -104,7 +104,6 @@ static const int txqaddr[] = { Q_XA1, Q_XA2 };
104static const int rxqaddr[] = { Q_R1, Q_R2 }; 104static const int rxqaddr[] = { Q_R1, Q_R2 };
105static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; 105static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
106static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; 106static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
107static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 };
108 107
109static int skge_get_regs_len(struct net_device *dev) 108static int skge_get_regs_len(struct net_device *dev)
110{ 109{
@@ -2184,12 +2183,6 @@ static int skge_up(struct net_device *dev)
2184 2183
2185 skge->tx_avail = skge->tx_ring.count - 1; 2184 skge->tx_avail = skge->tx_ring.count - 1;
2186 2185
2187 /* Enable IRQ from port */
2188 spin_lock_irq(&hw->hw_lock);
2189 hw->intr_mask |= portirqmask[port];
2190 skge_write32(hw, B0_IMSK, hw->intr_mask);
2191 spin_unlock_irq(&hw->hw_lock);
2192
2193 /* Initialize MAC */ 2186 /* Initialize MAC */
2194 spin_lock_bh(&hw->phy_lock); 2187 spin_lock_bh(&hw->phy_lock);
2195 if (hw->chip_id == CHIP_ID_GENESIS) 2188 if (hw->chip_id == CHIP_ID_GENESIS)
@@ -2246,11 +2239,6 @@ static int skge_down(struct net_device *dev)
2246 else 2239 else
2247 yukon_stop(skge); 2240 yukon_stop(skge);
2248 2241
2249 spin_lock_irq(&hw->hw_lock);
2250 hw->intr_mask &= ~portirqmask[skge->port];
2251 skge_write32(hw, B0_IMSK, hw->intr_mask);
2252 spin_unlock_irq(&hw->hw_lock);
2253
2254 /* Stop transmitter */ 2242 /* Stop transmitter */
2255 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); 2243 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
2256 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), 2244 skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
@@ -2734,11 +2722,9 @@ static int skge_poll(struct net_device *dev, int *budget)
2734 if (work_done >= to_do) 2722 if (work_done >= to_do)
2735 return 1; /* not done */ 2723 return 1; /* not done */
2736 2724
2737 spin_lock_irq(&hw->hw_lock); 2725 netif_rx_complete(dev);
2738 __netif_rx_complete(dev); 2726 hw->intr_mask |= skge->port == 0 ? (IS_R1_F|IS_XA1_F) : (IS_R2_F|IS_XA2_F);
2739 hw->intr_mask |= portirqmask[skge->port];
2740 skge_write32(hw, B0_IMSK, hw->intr_mask); 2727 skge_write32(hw, B0_IMSK, hw->intr_mask);
2741 spin_unlock_irq(&hw->hw_lock);
2742 2728
2743 return 0; 2729 return 0;
2744} 2730}
@@ -2850,12 +2836,11 @@ static void skge_extirq(unsigned long data)
2850 int port; 2836 int port;
2851 2837
2852 spin_lock(&hw->phy_lock); 2838 spin_lock(&hw->phy_lock);
2853 for (port = 0; port < 2; port++) { 2839 for (port = 0; port < hw->ports; port++) {
2854 struct net_device *dev = hw->dev[port]; 2840 struct net_device *dev = hw->dev[port];
2841 struct skge_port *skge = netdev_priv(dev);
2855 2842
2856 if (dev && netif_running(dev)) { 2843 if (netif_running(dev)) {
2857 struct skge_port *skge = netdev_priv(dev);
2858
2859 if (hw->chip_id != CHIP_ID_GENESIS) 2844 if (hw->chip_id != CHIP_ID_GENESIS)
2860 yukon_phy_intr(skge); 2845 yukon_phy_intr(skge);
2861 else 2846 else
@@ -2864,21 +2849,25 @@ static void skge_extirq(unsigned long data)
2864 } 2849 }
2865 spin_unlock(&hw->phy_lock); 2850 spin_unlock(&hw->phy_lock);
2866 2851
2867 spin_lock_irq(&hw->hw_lock);
2868 hw->intr_mask |= IS_EXT_REG; 2852 hw->intr_mask |= IS_EXT_REG;
2869 skge_write32(hw, B0_IMSK, hw->intr_mask); 2853 skge_write32(hw, B0_IMSK, hw->intr_mask);
2870 spin_unlock_irq(&hw->hw_lock);
2871} 2854}
2872 2855
2873static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) 2856static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2874{ 2857{
2875 struct skge_hw *hw = dev_id; 2858 struct skge_hw *hw = dev_id;
2876 u32 status = skge_read32(hw, B0_SP_ISRC); 2859 u32 status;
2877 2860
2878 if (status == 0 || status == ~0) /* hotplug or shared irq */ 2861 /* Reading this register masks IRQ */
2862 status = skge_read32(hw, B0_SP_ISRC);
2863 if (status == 0)
2879 return IRQ_NONE; 2864 return IRQ_NONE;
2880 2865
2881 spin_lock(&hw->hw_lock); 2866 if (status & IS_EXT_REG) {
2867 hw->intr_mask &= ~IS_EXT_REG;
2868 tasklet_schedule(&hw->ext_tasklet);
2869 }
2870
2882 if (status & (IS_R1_F|IS_XA1_F)) { 2871 if (status & (IS_R1_F|IS_XA1_F)) {
2883 skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F); 2872 skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
2884 hw->intr_mask &= ~(IS_R1_F|IS_XA1_F); 2873 hw->intr_mask &= ~(IS_R1_F|IS_XA1_F);
@@ -2891,6 +2880,9 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2891 netif_rx_schedule(hw->dev[1]); 2880 netif_rx_schedule(hw->dev[1]);
2892 } 2881 }
2893 2882
2883 if (likely((status & hw->intr_mask) == 0))
2884 return IRQ_HANDLED;
2885
2894 if (status & IS_PA_TO_RX1) { 2886 if (status & IS_PA_TO_RX1) {
2895 struct skge_port *skge = netdev_priv(hw->dev[0]); 2887 struct skge_port *skge = netdev_priv(hw->dev[0]);
2896 ++skge->net_stats.rx_over_errors; 2888 ++skge->net_stats.rx_over_errors;
@@ -2918,13 +2910,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2918 if (status & IS_HW_ERR) 2910 if (status & IS_HW_ERR)
2919 skge_error_irq(hw); 2911 skge_error_irq(hw);
2920 2912
2921 if (status & IS_EXT_REG) {
2922 hw->intr_mask &= ~IS_EXT_REG;
2923 tasklet_schedule(&hw->ext_tasklet);
2924 }
2925
2926 skge_write32(hw, B0_IMSK, hw->intr_mask); 2913 skge_write32(hw, B0_IMSK, hw->intr_mask);
2927 spin_unlock(&hw->hw_lock);
2928 2914
2929 return IRQ_HANDLED; 2915 return IRQ_HANDLED;
2930} 2916}
@@ -3070,7 +3056,10 @@ static int skge_reset(struct skge_hw *hw)
3070 else 3056 else
3071 hw->ram_size = t8 * 4096; 3057 hw->ram_size = t8 * 4096;
3072 3058
3073 hw->intr_mask = IS_HW_ERR | IS_EXT_REG; 3059 hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1;
3060 if (hw->ports > 1)
3061 hw->intr_mask |= IS_PORT_2;
3062
3074 if (hw->chip_id == CHIP_ID_GENESIS) 3063 if (hw->chip_id == CHIP_ID_GENESIS)
3075 genesis_init(hw); 3064 genesis_init(hw);
3076 else { 3065 else {
@@ -3293,7 +3282,6 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3293 3282
3294 hw->pdev = pdev; 3283 hw->pdev = pdev;
3295 spin_lock_init(&hw->phy_lock); 3284 spin_lock_init(&hw->phy_lock);
3296 spin_lock_init(&hw->hw_lock);
3297 tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw); 3285 tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw);
3298 3286
3299 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3287 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);