aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2009-10-30 03:11:27 -0400
committerDavid S. Miller <davem@davemloft.net>2009-11-02 02:55:08 -0500
commit72c9528bab94cc052d00ce241b8e85f5d71e45f0 (patch)
treefcec7a40e0242e659474a4d9f501d9213225aa55 /net/core/dev.c
parent68d8287ce1e1da3c99881385a93e74f68c454fc2 (diff)
net: Introduce dev_get_by_name_rcu()
Some workloads hit dev_base_lock rwlock pretty hard. We can use RCU lookups to avoid touching this rwlock (and avoid touching netdevice refcount) netdevices are already freed after a RCU grace period, so this patch adds no penalty at device dismantle time. However, it adds a synchronize_rcu() call in dev_change_name() Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c49
1 files changed, 40 insertions, 9 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 94f42a15fff1..f54d8b8a434b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -213,7 +213,7 @@ static int list_netdevice(struct net_device *dev)
213 213
214 write_lock_bh(&dev_base_lock); 214 write_lock_bh(&dev_base_lock);
215 list_add_tail(&dev->dev_list, &net->dev_base_head); 215 list_add_tail(&dev->dev_list, &net->dev_base_head);
216 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name)); 216 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
217 hlist_add_head_rcu(&dev->index_hlist, 217 hlist_add_head_rcu(&dev->index_hlist,
218 dev_index_hash(net, dev->ifindex)); 218 dev_index_hash(net, dev->ifindex));
219 write_unlock_bh(&dev_base_lock); 219 write_unlock_bh(&dev_base_lock);
@@ -230,7 +230,7 @@ static void unlist_netdevice(struct net_device *dev)
230 /* Unlink dev from the device chain */ 230 /* Unlink dev from the device chain */
231 write_lock_bh(&dev_base_lock); 231 write_lock_bh(&dev_base_lock);
232 list_del(&dev->dev_list); 232 list_del(&dev->dev_list);
233 hlist_del(&dev->name_hlist); 233 hlist_del_rcu(&dev->name_hlist);
234 hlist_del_rcu(&dev->index_hlist); 234 hlist_del_rcu(&dev->index_hlist);
235 write_unlock_bh(&dev_base_lock); 235 write_unlock_bh(&dev_base_lock);
236} 236}
@@ -599,6 +599,32 @@ struct net_device *__dev_get_by_name(struct net *net, const char *name)
599EXPORT_SYMBOL(__dev_get_by_name); 599EXPORT_SYMBOL(__dev_get_by_name);
600 600
601/** 601/**
602 * dev_get_by_name_rcu - find a device by its name
603 * @net: the applicable net namespace
604 * @name: name to find
605 *
606 * Find an interface by name.
607 * If the name is found a pointer to the device is returned.
608 * If the name is not found then %NULL is returned.
609 * The reference counters are not incremented so the caller must be
610 * careful with locks. The caller must hold RCU lock.
611 */
612
613struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
614{
615 struct hlist_node *p;
616 struct net_device *dev;
617 struct hlist_head *head = dev_name_hash(net, name);
618
619 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
620 if (!strncmp(dev->name, name, IFNAMSIZ))
621 return dev;
622
623 return NULL;
624}
625EXPORT_SYMBOL(dev_get_by_name_rcu);
626
627/**
602 * dev_get_by_name - find a device by its name 628 * dev_get_by_name - find a device by its name
603 * @net: the applicable net namespace 629 * @net: the applicable net namespace
604 * @name: name to find 630 * @name: name to find
@@ -614,11 +640,11 @@ struct net_device *dev_get_by_name(struct net *net, const char *name)
614{ 640{
615 struct net_device *dev; 641 struct net_device *dev;
616 642
617 read_lock(&dev_base_lock); 643 rcu_read_lock();
618 dev = __dev_get_by_name(net, name); 644 dev = dev_get_by_name_rcu(net, name);
619 if (dev) 645 if (dev)
620 dev_hold(dev); 646 dev_hold(dev);
621 read_unlock(&dev_base_lock); 647 rcu_read_unlock();
622 return dev; 648 return dev;
623} 649}
624EXPORT_SYMBOL(dev_get_by_name); 650EXPORT_SYMBOL(dev_get_by_name);
@@ -960,7 +986,12 @@ rollback:
960 986
961 write_lock_bh(&dev_base_lock); 987 write_lock_bh(&dev_base_lock);
962 hlist_del(&dev->name_hlist); 988 hlist_del(&dev->name_hlist);
963 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name)); 989 write_unlock_bh(&dev_base_lock);
990
991 synchronize_rcu();
992
993 write_lock_bh(&dev_base_lock);
994 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
964 write_unlock_bh(&dev_base_lock); 995 write_unlock_bh(&dev_base_lock);
965 996
966 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); 997 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
@@ -1062,9 +1093,9 @@ void dev_load(struct net *net, const char *name)
1062{ 1093{
1063 struct net_device *dev; 1094 struct net_device *dev;
1064 1095
1065 read_lock(&dev_base_lock); 1096 rcu_read_lock();
1066 dev = __dev_get_by_name(net, name); 1097 dev = dev_get_by_name_rcu(net, name);
1067 read_unlock(&dev_base_lock); 1098 rcu_read_unlock();
1068 1099
1069 if (!dev && capable(CAP_NET_ADMIN)) 1100 if (!dev && capable(CAP_NET_ADMIN))
1070 request_module("%s", name); 1101 request_module("%s", name);