aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2007-05-08 21:36:28 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-05-11 02:45:08 -0400
commit294cc44b7e48a6e7732499eebcf409b231460d8e (patch)
tree36c77c3ddb8f2b731ba4bba3a6d6682f6376a66b /net/core
parent572a103ded0ad880f75ce83e99f0512fbb80b5b0 (diff)
[NET]: Remove link_watch delay for up even when we're down
Currently all link carrier events are delayed by up to a second before they're processed to prevent link storms. This causes unnecessary packet loss during that interval. In fact, we can achieve the same effect in preventing storms by only delaying down events and unnecssary up events. The latter is defined as up events when we're already up. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/link_watch.c90
1 files changed, 67 insertions, 23 deletions
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 71a35da275d4..b5f45799c2f3 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -77,11 +77,52 @@ static void rfc2863_policy(struct net_device *dev)
77} 77}
78 78
79 79
80/* Must be called with the rtnl semaphore held */ 80static int linkwatch_urgent_event(struct net_device *dev)
81void linkwatch_run_queue(void) 81{
82 return netif_running(dev) && netif_carrier_ok(dev) &&
83 dev->qdisc != dev->qdisc_sleeping;
84}
85
86
87static void linkwatch_add_event(struct net_device *dev)
88{
89 unsigned long flags;
90
91 spin_lock_irqsave(&lweventlist_lock, flags);
92 dev->link_watch_next = lweventlist;
93 lweventlist = dev;
94 spin_unlock_irqrestore(&lweventlist_lock, flags);
95}
96
97
98static void linkwatch_schedule_work(unsigned long delay)
99{
100 if (test_and_set_bit(LW_RUNNING, &linkwatch_flags))
101 return;
102
103 /* If we wrap around we'll delay it by at most HZ. */
104 if (delay > HZ)
105 delay = 0;
106
107 schedule_delayed_work(&linkwatch_work, delay);
108}
109
110
111static void __linkwatch_run_queue(int urgent_only)
82{ 112{
83 struct net_device *next; 113 struct net_device *next;
84 114
115 /*
116 * Limit the number of linkwatch events to one
117 * per second so that a runaway driver does not
118 * cause a storm of messages on the netlink
119 * socket. This limit does not apply to up events
120 * while the device qdisc is down.
121 */
122 if (!urgent_only)
123 linkwatch_nextevent = jiffies + HZ;
124 clear_bit(LW_RUNNING, &linkwatch_flags);
125
85 spin_lock_irq(&lweventlist_lock); 126 spin_lock_irq(&lweventlist_lock);
86 next = lweventlist; 127 next = lweventlist;
87 lweventlist = NULL; 128 lweventlist = NULL;
@@ -92,6 +133,11 @@ void linkwatch_run_queue(void)
92 133
93 next = dev->link_watch_next; 134 next = dev->link_watch_next;
94 135
136 if (urgent_only && !linkwatch_urgent_event(dev)) {
137 linkwatch_add_event(dev);
138 continue;
139 }
140
95 /* 141 /*
96 * Make sure the above read is complete since it can be 142 * Make sure the above read is complete since it can be
97 * rewritten as soon as we clear the bit below. 143 * rewritten as soon as we clear the bit below.
@@ -116,21 +162,23 @@ void linkwatch_run_queue(void)
116 162
117 dev_put(dev); 163 dev_put(dev);
118 } 164 }
165
166 if (lweventlist)
167 linkwatch_schedule_work(linkwatch_nextevent - jiffies);
119} 168}
120 169
121 170
122static void linkwatch_event(struct work_struct *dummy) 171/* Must be called with the rtnl semaphore held */
172void linkwatch_run_queue(void)
123{ 173{
124 /* Limit the number of linkwatch events to one 174 __linkwatch_run_queue(0);
125 * per second so that a runaway driver does not 175}
126 * cause a storm of messages on the netlink 176
127 * socket
128 */
129 linkwatch_nextevent = jiffies + HZ;
130 clear_bit(LW_RUNNING, &linkwatch_flags);
131 177
178static void linkwatch_event(struct work_struct *dummy)
179{
132 rtnl_lock(); 180 rtnl_lock();
133 linkwatch_run_queue(); 181 __linkwatch_run_queue(time_after(linkwatch_nextevent, jiffies));
134 rtnl_unlock(); 182 rtnl_unlock();
135} 183}
136 184
@@ -138,23 +186,19 @@ static void linkwatch_event(struct work_struct *dummy)
138void linkwatch_fire_event(struct net_device *dev) 186void linkwatch_fire_event(struct net_device *dev)
139{ 187{
140 if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { 188 if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
141 unsigned long flags; 189 unsigned long delay;
142 190
143 dev_hold(dev); 191 dev_hold(dev);
144 192
145 spin_lock_irqsave(&lweventlist_lock, flags); 193 linkwatch_add_event(dev);
146 dev->link_watch_next = lweventlist;
147 lweventlist = dev;
148 spin_unlock_irqrestore(&lweventlist_lock, flags);
149 194
150 if (!test_and_set_bit(LW_RUNNING, &linkwatch_flags)) { 195 delay = linkwatch_nextevent - jiffies;
151 unsigned long delay = linkwatch_nextevent - jiffies;
152 196
153 /* If we wrap around we'll delay it by at most HZ. */ 197 /* Minimise down-time: drop delay for up event. */
154 if (delay > HZ) 198 if (linkwatch_urgent_event(dev))
155 delay = 0; 199 delay = 0;
156 schedule_delayed_work(&linkwatch_work, delay); 200
157 } 201 linkwatch_schedule_work(delay);
158 } 202 }
159} 203}
160 204