aboutsummaryrefslogtreecommitdiffstats
path: root/net/802/tr.c
diff options
context:
space:
mode:
authorJay Vosburgh <fubar@us.ibm.com>2005-08-18 17:04:51 -0400
committerDavid S. Miller <davem@davemloft.net>2005-08-18 17:04:51 -0400
commit001dd250c1c68667a5c3b74979fa614e2edc9ceb (patch)
tree95bafd6d3dbd0f7ca8bc0c41c2046a002e7c7660 /net/802/tr.c
parent6be382ea0c767a81be0e7980400b9b18167b3261 (diff)
[TOKENRING]: Use interrupt-safe locking with rif_lock.
Change operations on rif_lock from spin_{un}lock_bh to spin_{un}lock_irq{save,restore} equivalents. Some of the rif_lock critical sections are called from interrupt context via tr_type_trans->tr_add_rif_info. The TR NIC drivers call tr_type_trans from their packet receive handlers. Signed-off-by: Jay Vosburgh <fubar@us.ibm.com> Signed-off-by: John W. Linville <linville@tuxdriver.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/802/tr.c')
-rw-r--r--net/802/tr.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/net/802/tr.c b/net/802/tr.c
index a755e880f4ba..1bb7dc1b85cd 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -251,10 +251,11 @@ void tr_source_route(struct sk_buff *skb,struct trh_hdr *trh,struct net_device *
251 unsigned int hash; 251 unsigned int hash;
252 struct rif_cache *entry; 252 struct rif_cache *entry;
253 unsigned char *olddata; 253 unsigned char *olddata;
254 unsigned long flags;
254 static const unsigned char mcast_func_addr[] 255 static const unsigned char mcast_func_addr[]
255 = {0xC0,0x00,0x00,0x04,0x00,0x00}; 256 = {0xC0,0x00,0x00,0x04,0x00,0x00};
256 257
257 spin_lock_bh(&rif_lock); 258 spin_lock_irqsave(&rif_lock, flags);
258 259
259 /* 260 /*
260 * Broadcasts are single route as stated in RFC 1042 261 * Broadcasts are single route as stated in RFC 1042
@@ -323,7 +324,7 @@ printk("source routing for %02X:%02X:%02X:%02X:%02X:%02X\n",trh->daddr[0],
323 else 324 else
324 slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8); 325 slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8);
325 olddata = skb->data; 326 olddata = skb->data;
326 spin_unlock_bh(&rif_lock); 327 spin_unlock_irqrestore(&rif_lock, flags);
327 328
328 skb_pull(skb, slack); 329 skb_pull(skb, slack);
329 memmove(skb->data, olddata, sizeof(struct trh_hdr) - slack); 330 memmove(skb->data, olddata, sizeof(struct trh_hdr) - slack);
@@ -337,10 +338,11 @@ printk("source routing for %02X:%02X:%02X:%02X:%02X:%02X\n",trh->daddr[0],
337static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev) 338static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev)
338{ 339{
339 unsigned int hash, rii_p = 0; 340 unsigned int hash, rii_p = 0;
341 unsigned long flags;
340 struct rif_cache *entry; 342 struct rif_cache *entry;
341 343
342 344
343 spin_lock_bh(&rif_lock); 345 spin_lock_irqsave(&rif_lock, flags);
344 346
345 /* 347 /*
346 * Firstly see if the entry exists 348 * Firstly see if the entry exists
@@ -378,7 +380,7 @@ printk("adding rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
378 if(!entry) 380 if(!entry)
379 { 381 {
380 printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n"); 382 printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n");
381 spin_unlock_bh(&rif_lock); 383 spin_unlock_irqrestore(&rif_lock, flags);
382 return; 384 return;
383 } 385 }
384 386
@@ -420,7 +422,7 @@ printk("updating rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
420 } 422 }
421 entry->last_used=jiffies; 423 entry->last_used=jiffies;
422 } 424 }
423 spin_unlock_bh(&rif_lock); 425 spin_unlock_irqrestore(&rif_lock, flags);
424} 426}
425 427
426/* 428/*
@@ -430,9 +432,9 @@ printk("updating rif_entry: addr:%02X:%02X:%02X:%02X:%02X:%02X rcf:%04X\n",
430static void rif_check_expire(unsigned long dummy) 432static void rif_check_expire(unsigned long dummy)
431{ 433{
432 int i; 434 int i;
433 unsigned long next_interval = jiffies + sysctl_tr_rif_timeout/2; 435 unsigned long flags, next_interval = jiffies + sysctl_tr_rif_timeout/2;
434 436
435 spin_lock_bh(&rif_lock); 437 spin_lock_irqsave(&rif_lock, flags);
436 438
437 for(i =0; i < RIF_TABLE_SIZE; i++) { 439 for(i =0; i < RIF_TABLE_SIZE; i++) {
438 struct rif_cache *entry, **pentry; 440 struct rif_cache *entry, **pentry;
@@ -454,7 +456,7 @@ static void rif_check_expire(unsigned long dummy)
454 } 456 }
455 } 457 }
456 458
457 spin_unlock_bh(&rif_lock); 459 spin_unlock_irqrestore(&rif_lock, flags);
458 460
459 mod_timer(&rif_timer, next_interval); 461 mod_timer(&rif_timer, next_interval);
460 462
@@ -485,7 +487,7 @@ static struct rif_cache *rif_get_idx(loff_t pos)
485 487
486static void *rif_seq_start(struct seq_file *seq, loff_t *pos) 488static void *rif_seq_start(struct seq_file *seq, loff_t *pos)
487{ 489{
488 spin_lock_bh(&rif_lock); 490 spin_lock_irq(&rif_lock);
489 491
490 return *pos ? rif_get_idx(*pos - 1) : SEQ_START_TOKEN; 492 return *pos ? rif_get_idx(*pos - 1) : SEQ_START_TOKEN;
491} 493}
@@ -516,7 +518,7 @@ static void *rif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
516 518
517static void rif_seq_stop(struct seq_file *seq, void *v) 519static void rif_seq_stop(struct seq_file *seq, void *v)
518{ 520{
519 spin_unlock_bh(&rif_lock); 521 spin_unlock_irq(&rif_lock);
520} 522}
521 523
522static int rif_seq_show(struct seq_file *seq, void *v) 524static int rif_seq_show(struct seq_file *seq, void *v)