aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--net/core/dev.c3
-rw-r--r--net/core/link_watch.c94
3 files changed, 62 insertions, 38 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c8fa4627de00..97873e31661c 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -896,7 +896,7 @@ struct net_device {
896 /* device index hash chain */ 896 /* device index hash chain */
897 struct hlist_node index_hlist; 897 struct hlist_node index_hlist;
898 898
899 struct net_device *link_watch_next; 899 struct list_head link_watch_list;
900 900
901 /* register/unregister state machine */ 901 /* register/unregister state machine */
902 enum { NETREG_UNINITIALIZED=0, 902 enum { NETREG_UNINITIALIZED=0,
@@ -1600,6 +1600,7 @@ static inline void dev_hold(struct net_device *dev)
1600 */ 1600 */
1601 1601
1602extern void linkwatch_fire_event(struct net_device *dev); 1602extern void linkwatch_fire_event(struct net_device *dev);
1603extern void linkwatch_forget_dev(struct net_device *dev);
1603 1604
1604/** 1605/**
1605 * netif_carrier_ok - test if carrier present 1606 * netif_carrier_ok - test if carrier present
diff --git a/net/core/dev.c b/net/core/dev.c
index e25fe5d9343b..c128af708ebf 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5085,6 +5085,8 @@ static void netdev_wait_allrefs(struct net_device *dev)
5085{ 5085{
5086 unsigned long rebroadcast_time, warning_time; 5086 unsigned long rebroadcast_time, warning_time;
5087 5087
5088 linkwatch_forget_dev(dev);
5089
5088 rebroadcast_time = warning_time = jiffies; 5090 rebroadcast_time = warning_time = jiffies;
5089 while (atomic_read(&dev->refcnt) != 0) { 5091 while (atomic_read(&dev->refcnt) != 0) {
5090 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) { 5092 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
@@ -5311,6 +5313,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5311 5313
5312 INIT_LIST_HEAD(&dev->napi_list); 5314 INIT_LIST_HEAD(&dev->napi_list);
5313 INIT_LIST_HEAD(&dev->unreg_list); 5315 INIT_LIST_HEAD(&dev->unreg_list);
5316 INIT_LIST_HEAD(&dev->link_watch_list);
5314 dev->priv_flags = IFF_XMIT_DST_RELEASE; 5317 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5315 setup(dev); 5318 setup(dev);
5316 strcpy(dev->name, name); 5319 strcpy(dev->name, name);
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index bf8f7af699d7..5910b555a54a 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -35,7 +35,7 @@ static unsigned long linkwatch_nextevent;
35static void linkwatch_event(struct work_struct *dummy); 35static void linkwatch_event(struct work_struct *dummy);
36static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event); 36static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
37 37
38static struct net_device *lweventlist; 38static LIST_HEAD(lweventlist);
39static DEFINE_SPINLOCK(lweventlist_lock); 39static DEFINE_SPINLOCK(lweventlist_lock);
40 40
41static unsigned char default_operstate(const struct net_device *dev) 41static unsigned char default_operstate(const struct net_device *dev)
@@ -89,8 +89,10 @@ static void linkwatch_add_event(struct net_device *dev)
89 unsigned long flags; 89 unsigned long flags;
90 90
91 spin_lock_irqsave(&lweventlist_lock, flags); 91 spin_lock_irqsave(&lweventlist_lock, flags);
92 dev->link_watch_next = lweventlist; 92 if (list_empty(&dev->link_watch_list)) {
93 lweventlist = dev; 93 list_add_tail(&dev->link_watch_list, &lweventlist);
94 dev_hold(dev);
95 }
94 spin_unlock_irqrestore(&lweventlist_lock, flags); 96 spin_unlock_irqrestore(&lweventlist_lock, flags);
95} 97}
96 98
@@ -133,9 +135,35 @@ static void linkwatch_schedule_work(int urgent)
133} 135}
134 136
135 137
138static void linkwatch_do_dev(struct net_device *dev)
139{
140 /*
141 * Make sure the above read is complete since it can be
142 * rewritten as soon as we clear the bit below.
143 */
144 smp_mb__before_clear_bit();
145
146 /* We are about to handle this device,
147 * so new events can be accepted
148 */
149 clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
150
151 rfc2863_policy(dev);
152 if (dev->flags & IFF_UP) {
153 if (netif_carrier_ok(dev))
154 dev_activate(dev);
155 else
156 dev_deactivate(dev);
157
158 netdev_state_change(dev);
159 }
160 dev_put(dev);
161}
162
136static void __linkwatch_run_queue(int urgent_only) 163static void __linkwatch_run_queue(int urgent_only)
137{ 164{
138 struct net_device *next; 165 struct net_device *dev;
166 LIST_HEAD(wrk);
139 167
140 /* 168 /*
141 * Limit the number of linkwatch events to one 169 * Limit the number of linkwatch events to one
@@ -153,46 +181,40 @@ static void __linkwatch_run_queue(int urgent_only)
153 clear_bit(LW_URGENT, &linkwatch_flags); 181 clear_bit(LW_URGENT, &linkwatch_flags);
154 182
155 spin_lock_irq(&lweventlist_lock); 183 spin_lock_irq(&lweventlist_lock);
156 next = lweventlist; 184 list_splice_init(&lweventlist, &wrk);
157 lweventlist = NULL;
158 spin_unlock_irq(&lweventlist_lock);
159 185
160 while (next) { 186 while (!list_empty(&wrk)) {
161 struct net_device *dev = next;
162 187
163 next = dev->link_watch_next; 188 dev = list_first_entry(&wrk, struct net_device, link_watch_list);
189 list_del_init(&dev->link_watch_list);
164 190
165 if (urgent_only && !linkwatch_urgent_event(dev)) { 191 if (urgent_only && !linkwatch_urgent_event(dev)) {
166 linkwatch_add_event(dev); 192 list_add_tail(&dev->link_watch_list, &lweventlist);
167 continue; 193 continue;
168 } 194 }
169 195 spin_unlock_irq(&lweventlist_lock);
170 /* 196 linkwatch_do_dev(dev);
171 * Make sure the above read is complete since it can be 197 spin_lock_irq(&lweventlist_lock);
172 * rewritten as soon as we clear the bit below.
173 */
174 smp_mb__before_clear_bit();
175
176 /* We are about to handle this device,
177 * so new events can be accepted
178 */
179 clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
180
181 rfc2863_policy(dev);
182 if (dev->flags & IFF_UP) {
183 if (netif_carrier_ok(dev))
184 dev_activate(dev);
185 else
186 dev_deactivate(dev);
187
188 netdev_state_change(dev);
189 }
190
191 dev_put(dev);
192 } 198 }
193 199
194 if (lweventlist) 200 if (!list_empty(&lweventlist))
195 linkwatch_schedule_work(0); 201 linkwatch_schedule_work(0);
202 spin_unlock_irq(&lweventlist_lock);
203}
204
205void linkwatch_forget_dev(struct net_device *dev)
206{
207 unsigned long flags;
208 int clean = 0;
209
210 spin_lock_irqsave(&lweventlist_lock, flags);
211 if (!list_empty(&dev->link_watch_list)) {
212 list_del_init(&dev->link_watch_list);
213 clean = 1;
214 }
215 spin_unlock_irqrestore(&lweventlist_lock, flags);
216 if (clean)
217 linkwatch_do_dev(dev);
196} 218}
197 219
198 220
@@ -216,8 +238,6 @@ void linkwatch_fire_event(struct net_device *dev)
216 bool urgent = linkwatch_urgent_event(dev); 238 bool urgent = linkwatch_urgent_event(dev);
217 239
218 if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { 240 if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
219 dev_hold(dev);
220
221 linkwatch_add_event(dev); 241 linkwatch_add_event(dev);
222 } else if (!urgent) 242 } else if (!urgent)
223 return; 243 return;