aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/link_watch.c60
1 files changed, 41 insertions, 19 deletions
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 4674ae574128..a5e372b9ec4d 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -26,7 +26,7 @@
26 26
27 27
28enum lw_bits { 28enum lw_bits {
29 LW_RUNNING = 0, 29 LW_URGENT = 0,
30}; 30};
31 31
32static unsigned long linkwatch_flags; 32static unsigned long linkwatch_flags;
@@ -95,18 +95,41 @@ static void linkwatch_add_event(struct net_device *dev)
95} 95}
96 96
97 97
98static void linkwatch_schedule_work(unsigned long delay) 98static void linkwatch_schedule_work(int urgent)
99{ 99{
100 if (test_and_set_bit(LW_RUNNING, &linkwatch_flags)) 100 unsigned long delay = linkwatch_nextevent - jiffies;
101
102 if (test_bit(LW_URGENT, &linkwatch_flags))
101 return; 103 return;
102 104
103 /* If we wrap around we'll delay it by at most HZ. */ 105 /* Minimise down-time: drop delay for up event. */
104 if (delay > HZ) { 106 if (urgent) {
105 linkwatch_nextevent = jiffies; 107 if (test_and_set_bit(LW_URGENT, &linkwatch_flags))
108 return;
106 delay = 0; 109 delay = 0;
107 } 110 }
108 111
109 schedule_delayed_work(&linkwatch_work, delay); 112 /* If we wrap around we'll delay it by at most HZ. */
113 if (delay > HZ)
114 delay = 0;
115
116 /*
117 * This is true if we've scheduled it immeditately or if we don't
118 * need an immediate execution and it's already pending.
119 */
120 if (schedule_delayed_work(&linkwatch_work, delay) == !delay)
121 return;
122
123 /* Don't bother if there is nothing urgent. */
124 if (!test_bit(LW_URGENT, &linkwatch_flags))
125 return;
126
127 /* It's already running which is good enough. */
128 if (!cancel_delayed_work(&linkwatch_work))
129 return;
130
131 /* Otherwise we reschedule it again for immediate exection. */
132 schedule_delayed_work(&linkwatch_work, 0);
110} 133}
111 134
112 135
@@ -123,7 +146,11 @@ static void __linkwatch_run_queue(int urgent_only)
123 */ 146 */
124 if (!urgent_only) 147 if (!urgent_only)
125 linkwatch_nextevent = jiffies + HZ; 148 linkwatch_nextevent = jiffies + HZ;
126 clear_bit(LW_RUNNING, &linkwatch_flags); 149 /* Limit wrap-around effect on delay. */
150 else if (time_after(linkwatch_nextevent, jiffies + HZ))
151 linkwatch_nextevent = jiffies;
152
153 clear_bit(LW_URGENT, &linkwatch_flags);
127 154
128 spin_lock_irq(&lweventlist_lock); 155 spin_lock_irq(&lweventlist_lock);
129 next = lweventlist; 156 next = lweventlist;
@@ -166,7 +193,7 @@ static void __linkwatch_run_queue(int urgent_only)
166 } 193 }
167 194
168 if (lweventlist) 195 if (lweventlist)
169 linkwatch_schedule_work(linkwatch_nextevent - jiffies); 196 linkwatch_schedule_work(0);
170} 197}
171 198
172 199
@@ -187,21 +214,16 @@ static void linkwatch_event(struct work_struct *dummy)
187 214
188void linkwatch_fire_event(struct net_device *dev) 215void linkwatch_fire_event(struct net_device *dev)
189{ 216{
190 if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { 217 int urgent = linkwatch_urgent_event(dev);
191 unsigned long delay;
192 218
219 if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
193 dev_hold(dev); 220 dev_hold(dev);
194 221
195 linkwatch_add_event(dev); 222 linkwatch_add_event(dev);
223 } else if (!urgent)
224 return;
196 225
197 delay = linkwatch_nextevent - jiffies; 226 linkwatch_schedule_work(urgent);
198
199 /* Minimise down-time: drop delay for up event. */
200 if (linkwatch_urgent_event(dev))
201 delay = 0;
202
203 linkwatch_schedule_work(delay);
204 }
205} 227}
206 228
207EXPORT_SYMBOL(linkwatch_fire_event); 229EXPORT_SYMBOL(linkwatch_fire_event);