aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/drivers
diff options
context:
space:
mode:
authorJeff Dike <jdike@addtoit.com>2007-02-10 04:44:04 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-11 13:51:22 -0500
commitc862fc32a3ee4319c652f4ff39462d030120c380 (patch)
treed8a79fe0e26d449048f310531deec2d83835b386 /arch/um/drivers
parentf9795220521e0575dfd4ed0737d3a7848264662c (diff)
[PATCH] uml: network driver locking and code cleanup
Add some missing locking to walks of the transports and opened lists. Delete some dead code. Comment the lack of some locking. Signed-off-by: Jeff Dike <jdike@addtoit.com> Cc: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/um/drivers')
-rw-r--r--arch/um/drivers/net_kern.c42
1 files changed, 18 insertions, 24 deletions
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index c1838645ae28..04e31f86c10a 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -502,7 +502,7 @@ static DEFINE_SPINLOCK(transports_lock);
502static LIST_HEAD(transports); 502static LIST_HEAD(transports);
503 503
504/* Filled in during early boot */ 504/* Filled in during early boot */
505struct list_head eth_cmd_line = LIST_HEAD_INIT(eth_cmd_line); 505static LIST_HEAD(eth_cmd_line);
506 506
507static int check_transport(struct transport *transport, char *eth, int n, 507static int check_transport(struct transport *transport, char *eth, int n,
508 void **init_out, char **mac_out) 508 void **init_out, char **mac_out)
@@ -563,7 +563,9 @@ static int eth_setup_common(char *str, int index)
563 struct transport *transport; 563 struct transport *transport;
564 void *init; 564 void *init;
565 char *mac = NULL; 565 char *mac = NULL;
566 int found = 0;
566 567
568 spin_lock(&transports_lock);
567 list_for_each(ele, &transports){ 569 list_for_each(ele, &transports){
568 transport = list_entry(ele, struct transport, list); 570 transport = list_entry(ele, struct transport, list);
569 if(!check_transport(transport, str, index, &init, &mac)) 571 if(!check_transport(transport, str, index, &init, &mac))
@@ -572,9 +574,12 @@ static int eth_setup_common(char *str, int index)
572 eth_configure(index, init, mac, transport); 574 eth_configure(index, init, mac, transport);
573 kfree(init); 575 kfree(init);
574 } 576 }
575 return 1; 577 found = 1;
578 break;
576 } 579 }
577 return 0; 580
581 spin_unlock(&transports_lock);
582 return found;
578} 583}
579 584
580static int eth_setup(char *str) 585static int eth_setup(char *str)
@@ -610,24 +615,6 @@ __uml_help(eth_setup,
610" Configure a network device.\n\n" 615" Configure a network device.\n\n"
611); 616);
612 617
613#if 0
614static int eth_init(void)
615{
616 struct list_head *ele, *next;
617 struct eth_init *eth;
618
619 list_for_each_safe(ele, next, &eth_cmd_line){
620 eth = list_entry(ele, struct eth_init, list);
621
622 if(eth_setup_common(eth->init, eth->index))
623 list_del(&eth->list);
624 }
625
626 return(1);
627}
628__initcall(eth_init);
629#endif
630
631static int net_config(char *str, char **error_out) 618static int net_config(char *str, char **error_out)
632{ 619{
633 int n, err; 620 int n, err;
@@ -729,6 +716,7 @@ static int uml_inetaddr_event(struct notifier_block *this, unsigned long event,
729 return NOTIFY_DONE; 716 return NOTIFY_DONE;
730} 717}
731 718
719/* uml_net_init shouldn't be called twice on two CPUs at the same time */
732struct notifier_block uml_inetaddr_notifier = { 720struct notifier_block uml_inetaddr_notifier = {
733 .notifier_call = uml_inetaddr_event, 721 .notifier_call = uml_inetaddr_event,
734}; 722};
@@ -747,18 +735,21 @@ static int uml_net_init(void)
747 * didn't get a chance to run for them. This fakes it so that 735 * didn't get a chance to run for them. This fakes it so that
748 * addresses which have already been set up get handled properly. 736 * addresses which have already been set up get handled properly.
749 */ 737 */
738 spin_lock(&opened_lock);
750 list_for_each(ele, &opened){ 739 list_for_each(ele, &opened){
751 lp = list_entry(ele, struct uml_net_private, list); 740 lp = list_entry(ele, struct uml_net_private, list);
752 ip = lp->dev->ip_ptr; 741 ip = lp->dev->ip_ptr;
753 if(ip == NULL) continue; 742 if(ip == NULL)
743 continue;
754 in = ip->ifa_list; 744 in = ip->ifa_list;
755 while(in != NULL){ 745 while(in != NULL){
756 uml_inetaddr_event(NULL, NETDEV_UP, in); 746 uml_inetaddr_event(NULL, NETDEV_UP, in);
757 in = in->ifa_next; 747 in = in->ifa_next;
758 } 748 }
759 } 749 }
750 spin_unlock(&opened_lock);
760 751
761 return(0); 752 return 0;
762} 753}
763 754
764__initcall(uml_net_init); 755__initcall(uml_net_init);
@@ -768,13 +759,16 @@ static void close_devices(void)
768 struct list_head *ele; 759 struct list_head *ele;
769 struct uml_net_private *lp; 760 struct uml_net_private *lp;
770 761
762 spin_lock(&opened_lock);
771 list_for_each(ele, &opened){ 763 list_for_each(ele, &opened){
772 lp = list_entry(ele, struct uml_net_private, list); 764 lp = list_entry(ele, struct uml_net_private, list);
773 free_irq(lp->dev->irq, lp->dev); 765 free_irq(lp->dev->irq, lp->dev);
774 if((lp->close != NULL) && (lp->fd >= 0)) 766 if((lp->close != NULL) && (lp->fd >= 0))
775 (*lp->close)(lp->fd, &lp->user); 767 (*lp->close)(lp->fd, &lp->user);
776 if(lp->remove != NULL) (*lp->remove)(&lp->user); 768 if(lp->remove != NULL)
769 (*lp->remove)(&lp->user);
777 } 770 }
771 spin_unlock(&opened_lock);
778} 772}
779 773
780__uml_exitcall(close_devices); 774__uml_exitcall(close_devices);