aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-11 12:10:19 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-11 12:10:19 -0400
commitee54d2d87a8158d14434c1a3274bd7f713105836 (patch)
treecd3e1f6fc0a7fc920e4153c01f35ff7bd92d79da /net/core
parentbf61f8d357e5d71d74a3ca3be3cce52bf1a2c01a (diff)
parentda0dd231436ba7e81789e93dd933d7a275e1709d (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (31 commits) [NETFILTER]: xt_conntrack: add compat support [NETFILTER]: iptable_raw: ignore short packets sent by SOCK_RAW sockets [NETFILTER]: iptable_{filter,mangle}: more descriptive "happy cracking" message [NETFILTER]: nf_nat: Clears helper private area when NATing [NETFILTER]: ctnetlink: clear helper area and handle unchanged helper [NETFILTER]: nf_conntrack: Removes unused destroy operation of l3proto [NETFILTER]: nf_conntrack: Removes duplicated declarations [NETFILTER]: nf_nat: remove unused argument of function allocating binding [NETFILTER]: Clean up table initialization [NET_SCHED]: Avoid requeue warning on dev_deactivate [NET_SCHED]: Reread dev->qdisc for NETDEV_TX_OK [NET_SCHED]: Rationalise return value of qdisc_restart [NET]: Fix dev->qdisc race for NETDEV_TX_LOCKED case [UDP]: Fix AF-specific references in AF-agnostic code. [IrDA]: KingSun/DonShine USB IrDA dongle support. [IPV6] ROUTE: Assign rt6i_idev for ip6_{prohibit,blk_hole}_entry. [IPV6]: Do no rely on skb->dst before it is assigned. [IPV6]: Send ICMPv6 error on scope violations. [SCTP]: Do not include ABORT chunk header in the notification. [SCTP]: Correctly copy addresses in sctp_copy_laddrs ...
Diffstat (limited to 'net/core')
-rw-r--r--net/core/link_watch.c166
1 files changed, 107 insertions, 59 deletions
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index e3c26a9cca..a5e372b9ec 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -19,7 +19,6 @@
19#include <linux/rtnetlink.h> 19#include <linux/rtnetlink.h>
20#include <linux/jiffies.h> 20#include <linux/jiffies.h>
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <linux/list.h>
23#include <linux/slab.h> 22#include <linux/slab.h>
24#include <linux/workqueue.h> 23#include <linux/workqueue.h>
25#include <linux/bitops.h> 24#include <linux/bitops.h>
@@ -27,8 +26,7 @@
27 26
28 27
29enum lw_bits { 28enum lw_bits {
30 LW_RUNNING = 0, 29 LW_URGENT = 0,
31 LW_SE_USED
32}; 30};
33 31
34static unsigned long linkwatch_flags; 32static unsigned long linkwatch_flags;
@@ -37,17 +35,9 @@ static unsigned long linkwatch_nextevent;
37static void linkwatch_event(struct work_struct *dummy); 35static void linkwatch_event(struct work_struct *dummy);
38static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event); 36static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
39 37
40static LIST_HEAD(lweventlist); 38static struct net_device *lweventlist;
41static DEFINE_SPINLOCK(lweventlist_lock); 39static DEFINE_SPINLOCK(lweventlist_lock);
42 40
43struct lw_event {
44 struct list_head list;
45 struct net_device *dev;
46};
47
48/* Avoid kmalloc() for most systems */
49static struct lw_event singleevent;
50
51static unsigned char default_operstate(const struct net_device *dev) 41static unsigned char default_operstate(const struct net_device *dev)
52{ 42{
53 if (!netif_carrier_ok(dev)) 43 if (!netif_carrier_ok(dev))
@@ -87,25 +77,102 @@ static void rfc2863_policy(struct net_device *dev)
87} 77}
88 78
89 79
90/* Must be called with the rtnl semaphore held */ 80static int linkwatch_urgent_event(struct net_device *dev)
91void linkwatch_run_queue(void)
92{ 81{
93 struct list_head head, *n, *next; 82 return netif_running(dev) && netif_carrier_ok(dev) &&
83 dev->qdisc != dev->qdisc_sleeping;
84}
85
86
87static void linkwatch_add_event(struct net_device *dev)
88{
89 unsigned long flags;
90
91 spin_lock_irqsave(&lweventlist_lock, flags);
92 dev->link_watch_next = lweventlist;
93 lweventlist = dev;
94 spin_unlock_irqrestore(&lweventlist_lock, flags);
95}
96
97
98static void linkwatch_schedule_work(int urgent)
99{
100 unsigned long delay = linkwatch_nextevent - jiffies;
101
102 if (test_bit(LW_URGENT, &linkwatch_flags))
103 return;
104
105 /* Minimise down-time: drop delay for up event. */
106 if (urgent) {
107 if (test_and_set_bit(LW_URGENT, &linkwatch_flags))
108 return;
109 delay = 0;
110 }
111
112 /* If we wrap around we'll delay it by at most HZ. */
113 if (delay > HZ)
114 delay = 0;
115
116 /*
117 * This is true if we've scheduled it immeditately or if we don't
118 * need an immediate execution and it's already pending.
119 */
120 if (schedule_delayed_work(&linkwatch_work, delay) == !delay)
121 return;
122
123 /* Don't bother if there is nothing urgent. */
124 if (!test_bit(LW_URGENT, &linkwatch_flags))
125 return;
126
127 /* It's already running which is good enough. */
128 if (!cancel_delayed_work(&linkwatch_work))
129 return;
130
131 /* Otherwise we reschedule it again for immediate exection. */
132 schedule_delayed_work(&linkwatch_work, 0);
133}
134
135
136static void __linkwatch_run_queue(int urgent_only)
137{
138 struct net_device *next;
139
140 /*
141 * Limit the number of linkwatch events to one
142 * per second so that a runaway driver does not
143 * cause a storm of messages on the netlink
144 * socket. This limit does not apply to up events
145 * while the device qdisc is down.
146 */
147 if (!urgent_only)
148 linkwatch_nextevent = jiffies + HZ;
149 /* Limit wrap-around effect on delay. */
150 else if (time_after(linkwatch_nextevent, jiffies + HZ))
151 linkwatch_nextevent = jiffies;
152
153 clear_bit(LW_URGENT, &linkwatch_flags);
94 154
95 spin_lock_irq(&lweventlist_lock); 155 spin_lock_irq(&lweventlist_lock);
96 list_replace_init(&lweventlist, &head); 156 next = lweventlist;
157 lweventlist = NULL;
97 spin_unlock_irq(&lweventlist_lock); 158 spin_unlock_irq(&lweventlist_lock);
98 159
99 list_for_each_safe(n, next, &head) { 160 while (next) {
100 struct lw_event *event = list_entry(n, struct lw_event, list); 161 struct net_device *dev = next;
101 struct net_device *dev = event->dev;
102 162
103 if (event == &singleevent) { 163 next = dev->link_watch_next;
104 clear_bit(LW_SE_USED, &linkwatch_flags); 164
105 } else { 165 if (urgent_only && !linkwatch_urgent_event(dev)) {
106 kfree(event); 166 linkwatch_add_event(dev);
167 continue;
107 } 168 }
108 169
170 /*
171 * Make sure the above read is complete since it can be
172 * rewritten as soon as we clear the bit below.
173 */
174 smp_mb__before_clear_bit();
175
109 /* We are about to handle this device, 176 /* We are about to handle this device,
110 * so new events can be accepted 177 * so new events can be accepted
111 */ 178 */
@@ -124,58 +191,39 @@ void linkwatch_run_queue(void)
124 191
125 dev_put(dev); 192 dev_put(dev);
126 } 193 }
194
195 if (lweventlist)
196 linkwatch_schedule_work(0);
127} 197}
128 198
129 199
130static void linkwatch_event(struct work_struct *dummy) 200/* Must be called with the rtnl semaphore held */
201void linkwatch_run_queue(void)
131{ 202{
132 /* Limit the number of linkwatch events to one 203 __linkwatch_run_queue(0);
133 * per second so that a runaway driver does not 204}
134 * cause a storm of messages on the netlink
135 * socket
136 */
137 linkwatch_nextevent = jiffies + HZ;
138 clear_bit(LW_RUNNING, &linkwatch_flags);
139 205
206
207static void linkwatch_event(struct work_struct *dummy)
208{
140 rtnl_lock(); 209 rtnl_lock();
141 linkwatch_run_queue(); 210 __linkwatch_run_queue(time_after(linkwatch_nextevent, jiffies));
142 rtnl_unlock(); 211 rtnl_unlock();
143} 212}
144 213
145 214
146void linkwatch_fire_event(struct net_device *dev) 215void linkwatch_fire_event(struct net_device *dev)
147{ 216{
148 if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { 217 int urgent = linkwatch_urgent_event(dev);
149 unsigned long flags;
150 struct lw_event *event;
151
152 if (test_and_set_bit(LW_SE_USED, &linkwatch_flags)) {
153 event = kmalloc(sizeof(struct lw_event), GFP_ATOMIC);
154
155 if (unlikely(event == NULL)) {
156 clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
157 return;
158 }
159 } else {
160 event = &singleevent;
161 }
162 218
219 if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
163 dev_hold(dev); 220 dev_hold(dev);
164 event->dev = dev;
165
166 spin_lock_irqsave(&lweventlist_lock, flags);
167 list_add_tail(&event->list, &lweventlist);
168 spin_unlock_irqrestore(&lweventlist_lock, flags);
169 221
170 if (!test_and_set_bit(LW_RUNNING, &linkwatch_flags)) { 222 linkwatch_add_event(dev);
171 unsigned long delay = linkwatch_nextevent - jiffies; 223 } else if (!urgent)
224 return;
172 225
173 /* If we wrap around we'll delay it by at most HZ. */ 226 linkwatch_schedule_work(urgent);
174 if (delay > HZ)
175 delay = 0;
176 schedule_delayed_work(&linkwatch_work, delay);
177 }
178 }
179} 227}
180 228
181EXPORT_SYMBOL(linkwatch_fire_event); 229EXPORT_SYMBOL(linkwatch_fire_event);