diff options
author | Jeff Garzik <jeff@garzik.org> | 2006-05-26 21:39:03 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-05-26 21:39:03 -0400 |
commit | f3b197ac26ed0e57989856494c495818dcc7f9ac (patch) | |
tree | 5451adb0bc6c219d0a794ea32e3c598740c82bdf /drivers/net/forcedeth.c | |
parent | 4c0c2fd486b6598e37c77b5d81a08bc2d948aa7b (diff) |
[netdrvr] trim trailing whitespace: 8139*.c, epic100, forcedeth, tulip/*
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r-- | drivers/net/forcedeth.c | 40 |
1 files changed, 20 insertions, 20 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index cee25fe7e19b..66ea5fc5c2e2 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -458,7 +458,7 @@ typedef union _ring_type { | |||
458 | 458 | ||
459 | #define RX_RING 128 | 459 | #define RX_RING 128 |
460 | #define TX_RING 256 | 460 | #define TX_RING 256 |
461 | /* | 461 | /* |
462 | * If your nic mysteriously hangs then try to reduce the limits | 462 | * If your nic mysteriously hangs then try to reduce the limits |
463 | * to 1/0: It might be required to set NV_TX_LASTPACKET in the | 463 | * to 1/0: It might be required to set NV_TX_LASTPACKET in the |
464 | * last valid ring entry. But this would be impossible to | 464 | * last valid ring entry. But this would be impossible to |
@@ -480,7 +480,7 @@ typedef union _ring_type { | |||
480 | #define POLL_WAIT (1+HZ/100) | 480 | #define POLL_WAIT (1+HZ/100) |
481 | #define LINK_TIMEOUT (3*HZ) | 481 | #define LINK_TIMEOUT (3*HZ) |
482 | 482 | ||
483 | /* | 483 | /* |
484 | * desc_ver values: | 484 | * desc_ver values: |
485 | * The nic supports three different descriptor types: | 485 | * The nic supports three different descriptor types: |
486 | * - DESC_VER_1: Original | 486 | * - DESC_VER_1: Original |
@@ -619,7 +619,7 @@ static int max_interrupt_work = 5; | |||
619 | 619 | ||
620 | /* | 620 | /* |
621 | * Optimization can be either throuput mode or cpu mode | 621 | * Optimization can be either throuput mode or cpu mode |
622 | * | 622 | * |
623 | * Throughput Mode: Every tx and rx packet will generate an interrupt. | 623 | * Throughput Mode: Every tx and rx packet will generate an interrupt. |
624 | * CPU Mode: Interrupts are controlled by a timer. | 624 | * CPU Mode: Interrupts are controlled by a timer. |
625 | */ | 625 | */ |
@@ -1119,7 +1119,7 @@ static void nv_do_rx_refill(unsigned long data) | |||
1119 | } | 1119 | } |
1120 | } | 1120 | } |
1121 | 1121 | ||
1122 | static void nv_init_rx(struct net_device *dev) | 1122 | static void nv_init_rx(struct net_device *dev) |
1123 | { | 1123 | { |
1124 | struct fe_priv *np = netdev_priv(dev); | 1124 | struct fe_priv *np = netdev_priv(dev); |
1125 | int i; | 1125 | int i; |
@@ -1183,7 +1183,7 @@ static void nv_drain_tx(struct net_device *dev) | |||
1183 | { | 1183 | { |
1184 | struct fe_priv *np = netdev_priv(dev); | 1184 | struct fe_priv *np = netdev_priv(dev); |
1185 | unsigned int i; | 1185 | unsigned int i; |
1186 | 1186 | ||
1187 | for (i = 0; i < TX_RING; i++) { | 1187 | for (i = 0; i < TX_RING; i++) { |
1188 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) | 1188 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) |
1189 | np->tx_ring.orig[i].FlagLen = 0; | 1189 | np->tx_ring.orig[i].FlagLen = 0; |
@@ -1329,7 +1329,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1329 | } else { | 1329 | } else { |
1330 | np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan); | 1330 | np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan); |
1331 | np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); | 1331 | np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); |
1332 | } | 1332 | } |
1333 | 1333 | ||
1334 | dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", | 1334 | dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", |
1335 | dev->name, np->next_tx, entries, tx_flags_extra); | 1335 | dev->name, np->next_tx, entries, tx_flags_extra); |
@@ -1404,7 +1404,7 @@ static void nv_tx_done(struct net_device *dev) | |||
1404 | } else { | 1404 | } else { |
1405 | np->stats.tx_packets++; | 1405 | np->stats.tx_packets++; |
1406 | np->stats.tx_bytes += skb->len; | 1406 | np->stats.tx_bytes += skb->len; |
1407 | } | 1407 | } |
1408 | } | 1408 | } |
1409 | } | 1409 | } |
1410 | nv_release_txskb(dev, i); | 1410 | nv_release_txskb(dev, i); |
@@ -1450,7 +1450,7 @@ static void nv_tx_timeout(struct net_device *dev) | |||
1450 | for (i=0;i<TX_RING;i+= 4) { | 1450 | for (i=0;i<TX_RING;i+= 4) { |
1451 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { | 1451 | if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { |
1452 | printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", | 1452 | printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", |
1453 | i, | 1453 | i, |
1454 | le32_to_cpu(np->tx_ring.orig[i].PacketBuffer), | 1454 | le32_to_cpu(np->tx_ring.orig[i].PacketBuffer), |
1455 | le32_to_cpu(np->tx_ring.orig[i].FlagLen), | 1455 | le32_to_cpu(np->tx_ring.orig[i].FlagLen), |
1456 | le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer), | 1456 | le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer), |
@@ -1461,7 +1461,7 @@ static void nv_tx_timeout(struct net_device *dev) | |||
1461 | le32_to_cpu(np->tx_ring.orig[i+3].FlagLen)); | 1461 | le32_to_cpu(np->tx_ring.orig[i+3].FlagLen)); |
1462 | } else { | 1462 | } else { |
1463 | printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", | 1463 | printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", |
1464 | i, | 1464 | i, |
1465 | le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh), | 1465 | le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh), |
1466 | le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow), | 1466 | le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow), |
1467 | le32_to_cpu(np->tx_ring.ex[i].FlagLen), | 1467 | le32_to_cpu(np->tx_ring.ex[i].FlagLen), |
@@ -2067,7 +2067,7 @@ set_speed: | |||
2067 | if (lpa_pause == LPA_PAUSE_ASYM) | 2067 | if (lpa_pause == LPA_PAUSE_ASYM) |
2068 | { | 2068 | { |
2069 | np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; | 2069 | np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE; |
2070 | } | 2070 | } |
2071 | break; | 2071 | break; |
2072 | } | 2072 | } |
2073 | } | 2073 | } |
@@ -2086,7 +2086,7 @@ set_speed: | |||
2086 | writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); | 2086 | writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); |
2087 | } else { | 2087 | } else { |
2088 | writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); | 2088 | writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); |
2089 | writel(regmisc, base + NvRegMisc1); | 2089 | writel(regmisc, base + NvRegMisc1); |
2090 | } | 2090 | } |
2091 | } | 2091 | } |
2092 | 2092 | ||
@@ -2150,7 +2150,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) | |||
2150 | spin_lock(&np->lock); | 2150 | spin_lock(&np->lock); |
2151 | nv_tx_done(dev); | 2151 | nv_tx_done(dev); |
2152 | spin_unlock(&np->lock); | 2152 | spin_unlock(&np->lock); |
2153 | 2153 | ||
2154 | nv_rx_process(dev); | 2154 | nv_rx_process(dev); |
2155 | if (nv_alloc_rx(dev)) { | 2155 | if (nv_alloc_rx(dev)) { |
2156 | spin_lock(&np->lock); | 2156 | spin_lock(&np->lock); |
@@ -2158,7 +2158,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) | |||
2158 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 2158 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
2159 | spin_unlock(&np->lock); | 2159 | spin_unlock(&np->lock); |
2160 | } | 2160 | } |
2161 | 2161 | ||
2162 | if (events & NVREG_IRQ_LINK) { | 2162 | if (events & NVREG_IRQ_LINK) { |
2163 | spin_lock(&np->lock); | 2163 | spin_lock(&np->lock); |
2164 | nv_link_irq(dev); | 2164 | nv_link_irq(dev); |
@@ -2223,7 +2223,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) | |||
2223 | spin_lock_irq(&np->lock); | 2223 | spin_lock_irq(&np->lock); |
2224 | nv_tx_done(dev); | 2224 | nv_tx_done(dev); |
2225 | spin_unlock_irq(&np->lock); | 2225 | spin_unlock_irq(&np->lock); |
2226 | 2226 | ||
2227 | if (events & (NVREG_IRQ_TX_ERR)) { | 2227 | if (events & (NVREG_IRQ_TX_ERR)) { |
2228 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", | 2228 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", |
2229 | dev->name, events); | 2229 | dev->name, events); |
@@ -2266,7 +2266,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) | |||
2266 | dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); | 2266 | dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events); |
2267 | if (!(events & np->irqmask)) | 2267 | if (!(events & np->irqmask)) |
2268 | break; | 2268 | break; |
2269 | 2269 | ||
2270 | nv_rx_process(dev); | 2270 | nv_rx_process(dev); |
2271 | if (nv_alloc_rx(dev)) { | 2271 | if (nv_alloc_rx(dev)) { |
2272 | spin_lock_irq(&np->lock); | 2272 | spin_lock_irq(&np->lock); |
@@ -2274,7 +2274,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) | |||
2274 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 2274 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
2275 | spin_unlock_irq(&np->lock); | 2275 | spin_unlock_irq(&np->lock); |
2276 | } | 2276 | } |
2277 | 2277 | ||
2278 | if (i > max_interrupt_work) { | 2278 | if (i > max_interrupt_work) { |
2279 | spin_lock_irq(&np->lock); | 2279 | spin_lock_irq(&np->lock); |
2280 | /* disable interrupts on the nic */ | 2280 | /* disable interrupts on the nic */ |
@@ -2313,7 +2313,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) | |||
2313 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); | 2313 | dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events); |
2314 | if (!(events & np->irqmask)) | 2314 | if (!(events & np->irqmask)) |
2315 | break; | 2315 | break; |
2316 | 2316 | ||
2317 | if (events & NVREG_IRQ_LINK) { | 2317 | if (events & NVREG_IRQ_LINK) { |
2318 | spin_lock_irq(&np->lock); | 2318 | spin_lock_irq(&np->lock); |
2319 | nv_link_irq(dev); | 2319 | nv_link_irq(dev); |
@@ -2386,7 +2386,7 @@ static void nv_do_nic_poll(unsigned long data) | |||
2386 | np->nic_poll_irq = 0; | 2386 | np->nic_poll_irq = 0; |
2387 | 2387 | ||
2388 | /* FIXME: Do we need synchronize_irq(dev->irq) here? */ | 2388 | /* FIXME: Do we need synchronize_irq(dev->irq) here? */ |
2389 | 2389 | ||
2390 | writel(mask, base + NvRegIrqMask); | 2390 | writel(mask, base + NvRegIrqMask); |
2391 | pci_push(base); | 2391 | pci_push(base); |
2392 | 2392 | ||
@@ -3165,7 +3165,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
3165 | if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) { | 3165 | if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) { |
3166 | np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE; | 3166 | np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE; |
3167 | } | 3167 | } |
3168 | 3168 | ||
3169 | 3169 | ||
3170 | err = -ENOMEM; | 3170 | err = -ENOMEM; |
3171 | np->base = ioremap(addr, np->register_size); | 3171 | np->base = ioremap(addr, np->register_size); |
@@ -3313,7 +3313,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
3313 | pci_name(pci_dev)); | 3313 | pci_name(pci_dev)); |
3314 | goto out_freering; | 3314 | goto out_freering; |
3315 | } | 3315 | } |
3316 | 3316 | ||
3317 | /* reset it */ | 3317 | /* reset it */ |
3318 | phy_init(dev); | 3318 | phy_init(dev); |
3319 | 3319 | ||