aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ni65.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ni65.c')
-rw-r--r--drivers/net/ni65.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index 810cc572f5f7..fab3c8593ac1 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -324,7 +324,7 @@ static int ni65_close(struct net_device *dev)
324 struct priv *p = (struct priv *) dev->priv; 324 struct priv *p = (struct priv *) dev->priv;
325 325
326 netif_stop_queue(dev); 326 netif_stop_queue(dev);
327 327
328 outw(inw(PORT+L_RESET),PORT+L_RESET); /* that's the hard way */ 328 outw(inw(PORT+L_RESET),PORT+L_RESET); /* that's the hard way */
329 329
330#ifdef XMT_VIA_SKB 330#ifdef XMT_VIA_SKB
@@ -489,20 +489,20 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr)
489 int dma = dmatab[i]; 489 int dma = dmatab[i];
490 if(test_bit(dma,&dma_channels) || request_dma(dma,"ni6510")) 490 if(test_bit(dma,&dma_channels) || request_dma(dma,"ni6510"))
491 continue; 491 continue;
492 492
493 flags=claim_dma_lock(); 493 flags=claim_dma_lock();
494 disable_dma(dma); 494 disable_dma(dma);
495 set_dma_mode(dma,DMA_MODE_CASCADE); 495 set_dma_mode(dma,DMA_MODE_CASCADE);
496 enable_dma(dma); 496 enable_dma(dma);
497 release_dma_lock(flags); 497 release_dma_lock(flags);
498 498
499 ni65_init_lance(p,dev->dev_addr,0,0); /* trigger memory access */ 499 ni65_init_lance(p,dev->dev_addr,0,0); /* trigger memory access */
500 500
501 flags=claim_dma_lock(); 501 flags=claim_dma_lock();
502 disable_dma(dma); 502 disable_dma(dma);
503 free_dma(dma); 503 free_dma(dma);
504 release_dma_lock(flags); 504 release_dma_lock(flags);
505 505
506 if(readreg(CSR0) & CSR0_IDON) 506 if(readreg(CSR0) & CSR0_IDON)
507 break; 507 break;
508 } 508 }
@@ -881,7 +881,7 @@ static irqreturn_t ni65_interrupt(int irq, void * dev_id, struct pt_regs * regs)
881 p = (struct priv *) dev->priv; 881 p = (struct priv *) dev->priv;
882 882
883 spin_lock(&p->ring_lock); 883 spin_lock(&p->ring_lock);
884 884
885 while(--bcnt) { 885 while(--bcnt) {
886 csr0 = inw(PORT+L_DATAREG); 886 csr0 = inw(PORT+L_DATAREG);
887 887
@@ -1139,7 +1139,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
1139/* 1139/*
1140 * kick xmitter .. 1140 * kick xmitter ..
1141 */ 1141 */
1142 1142
1143static void ni65_timeout(struct net_device *dev) 1143static void ni65_timeout(struct net_device *dev)
1144{ 1144{
1145 int i; 1145 int i;
@@ -1163,7 +1163,7 @@ static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev)
1163 struct priv *p = (struct priv *) dev->priv; 1163 struct priv *p = (struct priv *) dev->priv;
1164 1164
1165 netif_stop_queue(dev); 1165 netif_stop_queue(dev);
1166 1166
1167 if (test_and_set_bit(0, (void*)&p->lock)) { 1167 if (test_and_set_bit(0, (void*)&p->lock)) {
1168 printk(KERN_ERR "%s: Queue was locked.\n", dev->name); 1168 printk(KERN_ERR "%s: Queue was locked.\n", dev->name);
1169 return 1; 1169 return 1;
@@ -1209,10 +1209,10 @@ static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev)
1209 1209
1210 if(p->tmdnum != p->tmdlast) 1210 if(p->tmdnum != p->tmdlast)
1211 netif_wake_queue(dev); 1211 netif_wake_queue(dev);
1212 1212
1213 p->lock = 0; 1213 p->lock = 0;
1214 dev->trans_start = jiffies; 1214 dev->trans_start = jiffies;
1215 1215
1216 spin_unlock_irqrestore(&p->ring_lock, flags); 1216 spin_unlock_irqrestore(&p->ring_lock, flags);
1217 } 1217 }
1218 1218