diff options
Diffstat (limited to 'net/core/dev.c')
-rw-r--r-- | net/core/dev.c | 32 |
1 files changed, 20 insertions, 12 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index ef56c035d44e..08dec6eb922b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -81,6 +81,7 @@ | |||
81 | #include <linux/types.h> | 81 | #include <linux/types.h> |
82 | #include <linux/kernel.h> | 82 | #include <linux/kernel.h> |
83 | #include <linux/sched.h> | 83 | #include <linux/sched.h> |
84 | #include <linux/mutex.h> | ||
84 | #include <linux/string.h> | 85 | #include <linux/string.h> |
85 | #include <linux/mm.h> | 86 | #include <linux/mm.h> |
86 | #include <linux/socket.h> | 87 | #include <linux/socket.h> |
@@ -1759,8 +1760,7 @@ static void net_rx_action(struct softirq_action *h) | |||
1759 | if (dev->quota <= 0 || dev->poll(dev, &budget)) { | 1760 | if (dev->quota <= 0 || dev->poll(dev, &budget)) { |
1760 | netpoll_poll_unlock(have); | 1761 | netpoll_poll_unlock(have); |
1761 | local_irq_disable(); | 1762 | local_irq_disable(); |
1762 | list_del(&dev->poll_list); | 1763 | list_move_tail(&dev->poll_list, &queue->poll_list); |
1763 | list_add_tail(&dev->poll_list, &queue->poll_list); | ||
1764 | if (dev->quota < 0) | 1764 | if (dev->quota < 0) |
1765 | dev->quota += dev->weight; | 1765 | dev->quota += dev->weight; |
1766 | else | 1766 | else |
@@ -2174,12 +2174,20 @@ unsigned dev_get_flags(const struct net_device *dev) | |||
2174 | 2174 | ||
2175 | flags = (dev->flags & ~(IFF_PROMISC | | 2175 | flags = (dev->flags & ~(IFF_PROMISC | |
2176 | IFF_ALLMULTI | | 2176 | IFF_ALLMULTI | |
2177 | IFF_RUNNING)) | | 2177 | IFF_RUNNING | |
2178 | IFF_LOWER_UP | | ||
2179 | IFF_DORMANT)) | | ||
2178 | (dev->gflags & (IFF_PROMISC | | 2180 | (dev->gflags & (IFF_PROMISC | |
2179 | IFF_ALLMULTI)); | 2181 | IFF_ALLMULTI)); |
2180 | 2182 | ||
2181 | if (netif_running(dev) && netif_carrier_ok(dev)) | 2183 | if (netif_running(dev)) { |
2182 | flags |= IFF_RUNNING; | 2184 | if (netif_oper_up(dev)) |
2185 | flags |= IFF_RUNNING; | ||
2186 | if (netif_carrier_ok(dev)) | ||
2187 | flags |= IFF_LOWER_UP; | ||
2188 | if (netif_dormant(dev)) | ||
2189 | flags |= IFF_DORMANT; | ||
2190 | } | ||
2183 | 2191 | ||
2184 | return flags; | 2192 | return flags; |
2185 | } | 2193 | } |
@@ -2458,9 +2466,9 @@ int dev_ioctl(unsigned int cmd, void __user *arg) | |||
2458 | */ | 2466 | */ |
2459 | 2467 | ||
2460 | if (cmd == SIOCGIFCONF) { | 2468 | if (cmd == SIOCGIFCONF) { |
2461 | rtnl_shlock(); | 2469 | rtnl_lock(); |
2462 | ret = dev_ifconf((char __user *) arg); | 2470 | ret = dev_ifconf((char __user *) arg); |
2463 | rtnl_shunlock(); | 2471 | rtnl_unlock(); |
2464 | return ret; | 2472 | return ret; |
2465 | } | 2473 | } |
2466 | if (cmd == SIOCGIFNAME) | 2474 | if (cmd == SIOCGIFNAME) |
@@ -2869,7 +2877,7 @@ static void netdev_wait_allrefs(struct net_device *dev) | |||
2869 | rebroadcast_time = warning_time = jiffies; | 2877 | rebroadcast_time = warning_time = jiffies; |
2870 | while (atomic_read(&dev->refcnt) != 0) { | 2878 | while (atomic_read(&dev->refcnt) != 0) { |
2871 | if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { | 2879 | if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { |
2872 | rtnl_shlock(); | 2880 | rtnl_lock(); |
2873 | 2881 | ||
2874 | /* Rebroadcast unregister notification */ | 2882 | /* Rebroadcast unregister notification */ |
2875 | notifier_call_chain(&netdev_chain, | 2883 | notifier_call_chain(&netdev_chain, |
@@ -2886,7 +2894,7 @@ static void netdev_wait_allrefs(struct net_device *dev) | |||
2886 | linkwatch_run_queue(); | 2894 | linkwatch_run_queue(); |
2887 | } | 2895 | } |
2888 | 2896 | ||
2889 | rtnl_shunlock(); | 2897 | __rtnl_unlock(); |
2890 | 2898 | ||
2891 | rebroadcast_time = jiffies; | 2899 | rebroadcast_time = jiffies; |
2892 | } | 2900 | } |
@@ -2924,7 +2932,7 @@ static void netdev_wait_allrefs(struct net_device *dev) | |||
2924 | * 2) Since we run with the RTNL semaphore not held, we can sleep | 2932 | * 2) Since we run with the RTNL semaphore not held, we can sleep |
2925 | * safely in order to wait for the netdev refcnt to drop to zero. | 2933 | * safely in order to wait for the netdev refcnt to drop to zero. |
2926 | */ | 2934 | */ |
2927 | static DECLARE_MUTEX(net_todo_run_mutex); | 2935 | static DEFINE_MUTEX(net_todo_run_mutex); |
2928 | void netdev_run_todo(void) | 2936 | void netdev_run_todo(void) |
2929 | { | 2937 | { |
2930 | struct list_head list = LIST_HEAD_INIT(list); | 2938 | struct list_head list = LIST_HEAD_INIT(list); |
@@ -2932,7 +2940,7 @@ void netdev_run_todo(void) | |||
2932 | 2940 | ||
2933 | 2941 | ||
2934 | /* Need to guard against multiple cpu's getting out of order. */ | 2942 | /* Need to guard against multiple cpu's getting out of order. */ |
2935 | down(&net_todo_run_mutex); | 2943 | mutex_lock(&net_todo_run_mutex); |
2936 | 2944 | ||
2937 | /* Not safe to do outside the semaphore. We must not return | 2945 | /* Not safe to do outside the semaphore. We must not return |
2938 | * until all unregister events invoked by the local processor | 2946 | * until all unregister events invoked by the local processor |
@@ -2989,7 +2997,7 @@ void netdev_run_todo(void) | |||
2989 | } | 2997 | } |
2990 | 2998 | ||
2991 | out: | 2999 | out: |
2992 | up(&net_todo_run_mutex); | 3000 | mutex_unlock(&net_todo_run_mutex); |
2993 | } | 3001 | } |
2994 | 3002 | ||
2995 | /** | 3003 | /** |