aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-05-11 18:35:54 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-05-11 18:35:54 -0400
commit6572b2064a54f1ed29fcbf6d16dfc5de71dfe495 (patch)
tree5ab97c864956028045da27ca489d6447b2350c48
parentf7a014af2d76a96e5af51b64f954328b700fa62f (diff)
parent210525d65d33d17eb6bea6c965ce442d60d9aa8d (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: [NET_SCHED]: HFSC: fix thinko in hfsc_adjust_levels() [IPV6]: skb leakage in inet6_csk_xmit [BRIDGE]: Do sysfs registration inside rtnl. [NET]: Do sysfs registration as part of register_netdevice. [TG3]: Fix possible NULL deref in tg3_run_loopback(). [NET] linkwatch: Handle jiffies wrap-around [IRDA]: Switching to a workqueue for the SIR work [IRDA]: smsc-ircc: Minimal hotplug support. [IRDA]: Removing unused EXPORT_SYMBOLs [IRDA]: New maintainer. [NET]: Make netdev_chain a raw notifier. [IPV4]: ip_options_fragment() has no effect on fragmentation [NET]: Add missing operstates documentation.
-rw-r--r--Documentation/networking/operstates.txt161
-rw-r--r--MAINTAINERS5
-rw-r--r--drivers/net/irda/Makefile2
-rw-r--r--drivers/net/irda/sir-dev.h13
-rw-r--r--drivers/net/irda/sir_dev.c315
-rw-r--r--drivers/net/irda/sir_kthread.c508
-rw-r--r--drivers/net/irda/smsc-ircc2.c14
-rw-r--r--drivers/net/tg3.c3
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--net/bridge/br_if.c21
-rw-r--r--net/core/dev.c99
-rw-r--r--net/core/link_watch.c10
-rw-r--r--net/ipv4/ip_options.c2
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/irda/irias_object.c3
-rw-r--r--net/sched/sch_hfsc.c6
16 files changed, 558 insertions, 609 deletions
diff --git a/Documentation/networking/operstates.txt b/Documentation/networking/operstates.txt
new file mode 100644
index 000000000000..4a21d9bb836b
--- /dev/null
+++ b/Documentation/networking/operstates.txt
@@ -0,0 +1,161 @@
1
21. Introduction
3
4Linux distinguishes between administrative and operational state of an
5interface. Admininstrative state is the result of "ip link set dev
6<dev> up or down" and reflects whether the administrator wants to use
7the device for traffic.
8
9However, an interface is not usable just because the admin enabled it
10- ethernet requires to be plugged into the switch and, depending on
11a site's networking policy and configuration, an 802.1X authentication
12to be performed before user data can be transferred. Operational state
13shows the ability of an interface to transmit this user data.
14
15Thanks to 802.1X, userspace must be granted the possibility to
16influence operational state. To accommodate this, operational state is
17split into two parts: Two flags that can be set by the driver only, and
18a RFC2863 compatible state that is derived from these flags, a policy,
19and changeable from userspace under certain rules.
20
21
222. Querying from userspace
23
24Both admin and operational state can be queried via the netlink
25operation RTM_GETLINK. It is also possible to subscribe to RTMGRP_LINK
26to be notified of updates. This is important for setting from userspace.
27
28These values contain interface state:
29
30ifinfomsg::if_flags & IFF_UP:
31 Interface is admin up
32ifinfomsg::if_flags & IFF_RUNNING:
33 Interface is in RFC2863 operational state UP or UNKNOWN. This is for
34 backward compatibility, routing daemons, dhcp clients can use this
35 flag to determine whether they should use the interface.
36ifinfomsg::if_flags & IFF_LOWER_UP:
37 Driver has signaled netif_carrier_on()
38ifinfomsg::if_flags & IFF_DORMANT:
39 Driver has signaled netif_dormant_on()
40
41These interface flags can also be queried without netlink using the
42SIOCGIFFLAGS ioctl.
43
44TLV IFLA_OPERSTATE
45
46contains RFC2863 state of the interface in numeric representation:
47
48IF_OPER_UNKNOWN (0):
49 Interface is in unknown state, neither driver nor userspace has set
50 operational state. Interface must be considered for user data as
51 setting operational state has not been implemented in every driver.
52IF_OPER_NOTPRESENT (1):
53 Unused in current kernel (notpresent interfaces normally disappear),
54 just a numerical placeholder.
55IF_OPER_DOWN (2):
56 Interface is unable to transfer data on L1, f.e. ethernet is not
57 plugged or interface is ADMIN down.
58IF_OPER_LOWERLAYERDOWN (3):
59 Interfaces stacked on an interface that is IF_OPER_DOWN show this
60 state (f.e. VLAN).
61IF_OPER_TESTING (4):
62 Unused in current kernel.
63IF_OPER_DORMANT (5):
64 Interface is L1 up, but waiting for an external event, f.e. for a
65 protocol to establish. (802.1X)
66IF_OPER_UP (6):
67 Interface is operational up and can be used.
68
69This TLV can also be queried via sysfs.
70
71TLV IFLA_LINKMODE
72
73contains link policy. This is needed for userspace interaction
74described below.
75
76This TLV can also be queried via sysfs.
77
78
793. Kernel driver API
80
81Kernel drivers have access to two flags that map to IFF_LOWER_UP and
82IFF_DORMANT. These flags can be set from everywhere, even from
83interrupts. It is guaranteed that only the driver has write access,
84however, if different layers of the driver manipulate the same flag,
85the driver has to provide the synchronisation needed.
86
87__LINK_STATE_NOCARRIER, maps to !IFF_LOWER_UP:
88
89The driver uses netif_carrier_on() to clear and netif_carrier_off() to
90set this flag. On netif_carrier_off(), the scheduler stops sending
91packets. The name 'carrier' and the inversion are historical, think of
92it as lower layer.
93
94netif_carrier_ok() can be used to query that bit.
95
96__LINK_STATE_DORMANT, maps to IFF_DORMANT:
97
98Set by the driver to express that the device cannot yet be used
99because some driver controlled protocol establishment has to
100complete. Corresponding functions are netif_dormant_on() to set the
101flag, netif_dormant_off() to clear it and netif_dormant() to query.
102
103On device allocation, networking core sets the flags equivalent to
104netif_carrier_ok() and !netif_dormant().
105
106
107Whenever the driver CHANGES one of these flags, a workqueue event is
108scheduled to translate the flag combination to IFLA_OPERSTATE as
109follows:
110
111!netif_carrier_ok():
112 IF_OPER_LOWERLAYERDOWN if the interface is stacked, IF_OPER_DOWN
113 otherwise. Kernel can recognise stacked interfaces because their
114 ifindex != iflink.
115
116netif_carrier_ok() && netif_dormant():
117 IF_OPER_DORMANT
118
119netif_carrier_ok() && !netif_dormant():
120 IF_OPER_UP if userspace interaction is disabled. Otherwise
121 IF_OPER_DORMANT with the possibility for userspace to initiate the
122 IF_OPER_UP transition afterwards.
123
124
1254. Setting from userspace
126
127Applications have to use the netlink interface to influence the
128RFC2863 operational state of an interface. Setting IFLA_LINKMODE to 1
129via RTM_SETLINK instructs the kernel that an interface should go to
130IF_OPER_DORMANT instead of IF_OPER_UP when the combination
131netif_carrier_ok() && !netif_dormant() is set by the
132driver. Afterwards, the userspace application can set IFLA_OPERSTATE
133to IF_OPER_DORMANT or IF_OPER_UP as long as the driver does not set
134netif_carrier_off() or netif_dormant_on(). Changes made by userspace
135are multicasted on the netlink group RTMGRP_LINK.
136
137So basically a 802.1X supplicant interacts with the kernel like this:
138
139-subscribe to RTMGRP_LINK
140-set IFLA_LINKMODE to 1 via RTM_SETLINK
141-query RTM_GETLINK once to get initial state
142-if initial flags are not (IFF_LOWER_UP && !IFF_DORMANT), wait until
143 netlink multicast signals this state
144-do 802.1X, eventually abort if flags go down again
145-send RTM_SETLINK to set operstate to IF_OPER_UP if authentication
146 succeeds, IF_OPER_DORMANT otherwise
147-see how operstate and IFF_RUNNING is echoed via netlink multicast
148-set interface back to IF_OPER_DORMANT if 802.1X reauthentication
149 fails
150-restart if kernel changes IFF_LOWER_UP or IFF_DORMANT flag
151
152if supplicant goes down, bring back IFLA_LINKMODE to 0 and
153IFLA_OPERSTATE to a sane value.
154
155A routing daemon or dhcp client just needs to care for IFF_RUNNING or
156waiting for operstate to go IF_OPER_UP/IF_OPER_UNKNOWN before
157considering the interface / querying a DHCP address.
158
159
160For technical questions and/or comments please e-mail to Stefan Rompf
161(stefan at loplof.de).
diff --git a/MAINTAINERS b/MAINTAINERS
index d6a8e5b434ec..5e3355871416 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1480,10 +1480,11 @@ L: netdev@vger.kernel.org
1480S: Maintained 1480S: Maintained
1481 1481
1482IRDA SUBSYSTEM 1482IRDA SUBSYSTEM
1483P: Jean Tourrilhes 1483P: Samuel Ortiz
1484M: samuel@sortiz.org
1484L: irda-users@lists.sourceforge.net (subscribers-only) 1485L: irda-users@lists.sourceforge.net (subscribers-only)
1485W: http://irda.sourceforge.net/ 1486W: http://irda.sourceforge.net/
1486S: Odd Fixes 1487S: Maintained
1487 1488
1488ISAPNP 1489ISAPNP
1489P: Jaroslav Kysela 1490P: Jaroslav Kysela
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index 27ab75f20799..c1ce2398efea 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -46,4 +46,4 @@ obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o
46obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o 46obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o
47 47
48# The SIR helper module 48# The SIR helper module
49sir-dev-objs := sir_dev.o sir_dongle.o sir_kthread.o 49sir-dev-objs := sir_dev.o sir_dongle.o
diff --git a/drivers/net/irda/sir-dev.h b/drivers/net/irda/sir-dev.h
index f69fb4cec76f..9fa294a546d6 100644
--- a/drivers/net/irda/sir-dev.h
+++ b/drivers/net/irda/sir-dev.h
@@ -15,23 +15,14 @@
15#define IRDA_SIR_H 15#define IRDA_SIR_H
16 16
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/workqueue.h>
18 19
19#include <net/irda/irda.h> 20#include <net/irda/irda.h>
20#include <net/irda/irda_device.h> // iobuff_t 21#include <net/irda/irda_device.h> // iobuff_t
21 22
22/* FIXME: unify irda_request with sir_fsm! */
23
24struct irda_request {
25 struct list_head lh_request;
26 unsigned long pending;
27 void (*func)(void *);
28 void *data;
29 struct timer_list timer;
30};
31
32struct sir_fsm { 23struct sir_fsm {
33 struct semaphore sem; 24 struct semaphore sem;
34 struct irda_request rq; 25 struct work_struct work;
35 unsigned state, substate; 26 unsigned state, substate;
36 int param; 27 int param;
37 int result; 28 int result;
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index ea7c9464d46a..3b5854d10c17 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -23,6 +23,298 @@
23 23
24#include "sir-dev.h" 24#include "sir-dev.h"
25 25
26
27static struct workqueue_struct *irda_sir_wq;
28
29/* STATE MACHINE */
30
31/* substate handler of the config-fsm to handle the cases where we want
32 * to wait for transmit completion before changing the port configuration
33 */
34
35static int sirdev_tx_complete_fsm(struct sir_dev *dev)
36{
37 struct sir_fsm *fsm = &dev->fsm;
38 unsigned next_state, delay;
39 unsigned bytes_left;
40
41 do {
42 next_state = fsm->substate; /* default: stay in current substate */
43 delay = 0;
44
45 switch(fsm->substate) {
46
47 case SIRDEV_STATE_WAIT_XMIT:
48 if (dev->drv->chars_in_buffer)
49 bytes_left = dev->drv->chars_in_buffer(dev);
50 else
51 bytes_left = 0;
52 if (!bytes_left) {
53 next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
54 break;
55 }
56
57 if (dev->speed > 115200)
58 delay = (bytes_left*8*10000) / (dev->speed/100);
59 else if (dev->speed > 0)
60 delay = (bytes_left*10*10000) / (dev->speed/100);
61 else
62 delay = 0;
63 /* expected delay (usec) until remaining bytes are sent */
64 if (delay < 100) {
65 udelay(delay);
66 delay = 0;
67 break;
68 }
69 /* sleep some longer delay (msec) */
70 delay = (delay+999) / 1000;
71 break;
72
73 case SIRDEV_STATE_WAIT_UNTIL_SENT:
74 /* block until underlaying hardware buffer are empty */
75 if (dev->drv->wait_until_sent)
76 dev->drv->wait_until_sent(dev);
77 next_state = SIRDEV_STATE_TX_DONE;
78 break;
79
80 case SIRDEV_STATE_TX_DONE:
81 return 0;
82
83 default:
84 IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
85 return -EINVAL;
86 }
87 fsm->substate = next_state;
88 } while (delay == 0);
89 return delay;
90}
91
92/*
93 * Function sirdev_config_fsm
94 *
95 * State machine to handle the configuration of the device (and attached dongle, if any).
96 * This handler is scheduled for execution in kIrDAd context, so we can sleep.
97 * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
98 * long. Instead, for longer delays we start a timer to reschedule us later.
99 * On entry, fsm->sem is always locked and the netdev xmit queue stopped.
100 * Both must be unlocked/restarted on completion - but only on final exit.
101 */
102
103static void sirdev_config_fsm(void *data)
104{
105 struct sir_dev *dev = data;
106 struct sir_fsm *fsm = &dev->fsm;
107 int next_state;
108 int ret = -1;
109 unsigned delay;
110
111 IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies);
112
113 do {
114 IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
115 __FUNCTION__, fsm->state, fsm->substate);
116
117 next_state = fsm->state;
118 delay = 0;
119
120 switch(fsm->state) {
121
122 case SIRDEV_STATE_DONGLE_OPEN:
123 if (dev->dongle_drv != NULL) {
124 ret = sirdev_put_dongle(dev);
125 if (ret) {
126 fsm->result = -EINVAL;
127 next_state = SIRDEV_STATE_ERROR;
128 break;
129 }
130 }
131
132 /* Initialize dongle */
133 ret = sirdev_get_dongle(dev, fsm->param);
134 if (ret) {
135 fsm->result = ret;
136 next_state = SIRDEV_STATE_ERROR;
137 break;
138 }
139
140 /* Dongles are powered through the modem control lines which
141 * were just set during open. Before resetting, let's wait for
142 * the power to stabilize. This is what some dongle drivers did
143 * in open before, while others didn't - should be safe anyway.
144 */
145
146 delay = 50;
147 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
148 next_state = SIRDEV_STATE_DONGLE_RESET;
149
150 fsm->param = 9600;
151
152 break;
153
154 case SIRDEV_STATE_DONGLE_CLOSE:
155 /* shouldn't we just treat this as success=? */
156 if (dev->dongle_drv == NULL) {
157 fsm->result = -EINVAL;
158 next_state = SIRDEV_STATE_ERROR;
159 break;
160 }
161
162 ret = sirdev_put_dongle(dev);
163 if (ret) {
164 fsm->result = ret;
165 next_state = SIRDEV_STATE_ERROR;
166 break;
167 }
168 next_state = SIRDEV_STATE_DONE;
169 break;
170
171 case SIRDEV_STATE_SET_DTR_RTS:
172 ret = sirdev_set_dtr_rts(dev,
173 (fsm->param&0x02) ? TRUE : FALSE,
174 (fsm->param&0x01) ? TRUE : FALSE);
175 next_state = SIRDEV_STATE_DONE;
176 break;
177
178 case SIRDEV_STATE_SET_SPEED:
179 fsm->substate = SIRDEV_STATE_WAIT_XMIT;
180 next_state = SIRDEV_STATE_DONGLE_CHECK;
181 break;
182
183 case SIRDEV_STATE_DONGLE_CHECK:
184 ret = sirdev_tx_complete_fsm(dev);
185 if (ret < 0) {
186 fsm->result = ret;
187 next_state = SIRDEV_STATE_ERROR;
188 break;
189 }
190 if ((delay=ret) != 0)
191 break;
192
193 if (dev->dongle_drv) {
194 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
195 next_state = SIRDEV_STATE_DONGLE_RESET;
196 }
197 else {
198 dev->speed = fsm->param;
199 next_state = SIRDEV_STATE_PORT_SPEED;
200 }
201 break;
202
203 case SIRDEV_STATE_DONGLE_RESET:
204 if (dev->dongle_drv->reset) {
205 ret = dev->dongle_drv->reset(dev);
206 if (ret < 0) {
207 fsm->result = ret;
208 next_state = SIRDEV_STATE_ERROR;
209 break;
210 }
211 }
212 else
213 ret = 0;
214 if ((delay=ret) == 0) {
215 /* set serial port according to dongle default speed */
216 if (dev->drv->set_speed)
217 dev->drv->set_speed(dev, dev->speed);
218 fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
219 next_state = SIRDEV_STATE_DONGLE_SPEED;
220 }
221 break;
222
223 case SIRDEV_STATE_DONGLE_SPEED:
224 if (dev->dongle_drv->reset) {
225 ret = dev->dongle_drv->set_speed(dev, fsm->param);
226 if (ret < 0) {
227 fsm->result = ret;
228 next_state = SIRDEV_STATE_ERROR;
229 break;
230 }
231 }
232 else
233 ret = 0;
234 if ((delay=ret) == 0)
235 next_state = SIRDEV_STATE_PORT_SPEED;
236 break;
237
238 case SIRDEV_STATE_PORT_SPEED:
239 /* Finally we are ready to change the serial port speed */
240 if (dev->drv->set_speed)
241 dev->drv->set_speed(dev, dev->speed);
242 dev->new_speed = 0;
243 next_state = SIRDEV_STATE_DONE;
244 break;
245
246 case SIRDEV_STATE_DONE:
247 /* Signal network layer so it can send more frames */
248 netif_wake_queue(dev->netdev);
249 next_state = SIRDEV_STATE_COMPLETE;
250 break;
251
252 default:
253 IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
254 fsm->result = -EINVAL;
255 /* fall thru */
256
257 case SIRDEV_STATE_ERROR:
258 IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result);
259
260#if 0 /* don't enable this before we have netdev->tx_timeout to recover */
261 netif_stop_queue(dev->netdev);
262#else
263 netif_wake_queue(dev->netdev);
264#endif
265 /* fall thru */
266
267 case SIRDEV_STATE_COMPLETE:
268 /* config change finished, so we are not busy any longer */
269 sirdev_enable_rx(dev);
270 up(&fsm->sem);
271 return;
272 }
273 fsm->state = next_state;
274 } while(!delay);
275
276 queue_delayed_work(irda_sir_wq, &fsm->work, msecs_to_jiffies(delay));
277}
278
279/* schedule some device configuration task for execution by kIrDAd
280 * on behalf of the above state machine.
281 * can be called from process or interrupt/tasklet context.
282 */
283
284int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
285{
286 struct sir_fsm *fsm = &dev->fsm;
287
288 IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param);
289
290 if (down_trylock(&fsm->sem)) {
291 if (in_interrupt() || in_atomic() || irqs_disabled()) {
292 IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__);
293 return -EWOULDBLOCK;
294 } else
295 down(&fsm->sem);
296 }
297
298 if (fsm->state == SIRDEV_STATE_DEAD) {
299 /* race with sirdev_close should never happen */
300 IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__);
301 up(&fsm->sem);
302 return -ESTALE; /* or better EPIPE? */
303 }
304
305 netif_stop_queue(dev->netdev);
306 atomic_set(&dev->enable_rx, 0);
307
308 fsm->state = initial_state;
309 fsm->param = param;
310 fsm->result = 0;
311
312 INIT_WORK(&fsm->work, sirdev_config_fsm, dev);
313 queue_work(irda_sir_wq, &fsm->work);
314 return 0;
315}
316
317
26/***************************************************************************/ 318/***************************************************************************/
27 319
28void sirdev_enable_rx(struct sir_dev *dev) 320void sirdev_enable_rx(struct sir_dev *dev)
@@ -619,10 +911,6 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n
619 spin_lock_init(&dev->tx_lock); 911 spin_lock_init(&dev->tx_lock);
620 init_MUTEX(&dev->fsm.sem); 912 init_MUTEX(&dev->fsm.sem);
621 913
622 INIT_LIST_HEAD(&dev->fsm.rq.lh_request);
623 dev->fsm.rq.pending = 0;
624 init_timer(&dev->fsm.rq.timer);
625
626 dev->drv = drv; 914 dev->drv = drv;
627 dev->netdev = ndev; 915 dev->netdev = ndev;
628 916
@@ -682,3 +970,22 @@ int sirdev_put_instance(struct sir_dev *dev)
682} 970}
683EXPORT_SYMBOL(sirdev_put_instance); 971EXPORT_SYMBOL(sirdev_put_instance);
684 972
973static int __init sir_wq_init(void)
974{
975 irda_sir_wq = create_singlethread_workqueue("irda_sir_wq");
976 if (!irda_sir_wq)
977 return -ENOMEM;
978 return 0;
979}
980
981static void __exit sir_wq_exit(void)
982{
983 destroy_workqueue(irda_sir_wq);
984}
985
986module_init(sir_wq_init);
987module_exit(sir_wq_exit);
988
989MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
990MODULE_DESCRIPTION("IrDA SIR core");
991MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/sir_kthread.c b/drivers/net/irda/sir_kthread.c
deleted file mode 100644
index e3904d6bfecd..000000000000
--- a/drivers/net/irda/sir_kthread.c
+++ /dev/null
@@ -1,508 +0,0 @@
1/*********************************************************************
2 *
3 * sir_kthread.c: dedicated thread to process scheduled
4 * sir device setup requests
5 *
6 * Copyright (c) 2002 Martin Diehl
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
12 *
13 ********************************************************************/
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/version.h>
18#include <linux/init.h>
19#include <linux/smp_lock.h>
20#include <linux/completion.h>
21#include <linux/delay.h>
22
23#include <net/irda/irda.h>
24
25#include "sir-dev.h"
26
27/**************************************************************************
28 *
29 * kIrDAd kernel thread and config state machine
30 *
31 */
32
33struct irda_request_queue {
34 struct list_head request_list;
35 spinlock_t lock;
36 task_t *thread;
37 struct completion exit;
38 wait_queue_head_t kick, done;
39 atomic_t num_pending;
40};
41
42static struct irda_request_queue irda_rq_queue;
43
44static int irda_queue_request(struct irda_request *rq)
45{
46 int ret = 0;
47 unsigned long flags;
48
49 if (!test_and_set_bit(0, &rq->pending)) {
50 spin_lock_irqsave(&irda_rq_queue.lock, flags);
51 list_add_tail(&rq->lh_request, &irda_rq_queue.request_list);
52 wake_up(&irda_rq_queue.kick);
53 atomic_inc(&irda_rq_queue.num_pending);
54 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
55 ret = 1;
56 }
57 return ret;
58}
59
60static void irda_request_timer(unsigned long data)
61{
62 struct irda_request *rq = (struct irda_request *)data;
63 unsigned long flags;
64
65 spin_lock_irqsave(&irda_rq_queue.lock, flags);
66 list_add_tail(&rq->lh_request, &irda_rq_queue.request_list);
67 wake_up(&irda_rq_queue.kick);
68 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
69}
70
71static int irda_queue_delayed_request(struct irda_request *rq, unsigned long delay)
72{
73 int ret = 0;
74 struct timer_list *timer = &rq->timer;
75
76 if (!test_and_set_bit(0, &rq->pending)) {
77 timer->expires = jiffies + delay;
78 timer->function = irda_request_timer;
79 timer->data = (unsigned long)rq;
80 atomic_inc(&irda_rq_queue.num_pending);
81 add_timer(timer);
82 ret = 1;
83 }
84 return ret;
85}
86
87static void run_irda_queue(void)
88{
89 unsigned long flags;
90 struct list_head *entry, *tmp;
91 struct irda_request *rq;
92
93 spin_lock_irqsave(&irda_rq_queue.lock, flags);
94 list_for_each_safe(entry, tmp, &irda_rq_queue.request_list) {
95 rq = list_entry(entry, struct irda_request, lh_request);
96 list_del_init(entry);
97 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
98
99 clear_bit(0, &rq->pending);
100 rq->func(rq->data);
101
102 if (atomic_dec_and_test(&irda_rq_queue.num_pending))
103 wake_up(&irda_rq_queue.done);
104
105 spin_lock_irqsave(&irda_rq_queue.lock, flags);
106 }
107 spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
108}
109
110static int irda_thread(void *startup)
111{
112 DECLARE_WAITQUEUE(wait, current);
113
114 daemonize("kIrDAd");
115
116 irda_rq_queue.thread = current;
117
118 complete((struct completion *)startup);
119
120 while (irda_rq_queue.thread != NULL) {
121
122 /* We use TASK_INTERRUPTIBLE, rather than
123 * TASK_UNINTERRUPTIBLE. Andrew Morton made this
124 * change ; he told me that it is safe, because "signal
125 * blocking is now handled in daemonize()", he added
126 * that the problem is that "uninterruptible sleep
127 * contributes to load average", making user worry.
128 * Jean II */
129 set_task_state(current, TASK_INTERRUPTIBLE);
130 add_wait_queue(&irda_rq_queue.kick, &wait);
131 if (list_empty(&irda_rq_queue.request_list))
132 schedule();
133 else
134 __set_task_state(current, TASK_RUNNING);
135 remove_wait_queue(&irda_rq_queue.kick, &wait);
136
137 /* make swsusp happy with our thread */
138 try_to_freeze();
139
140 run_irda_queue();
141 }
142
143#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,35)
144 reparent_to_init();
145#endif
146 complete_and_exit(&irda_rq_queue.exit, 0);
147 /* never reached */
148 return 0;
149}
150
151
152static void flush_irda_queue(void)
153{
154 if (atomic_read(&irda_rq_queue.num_pending)) {
155
156 DECLARE_WAITQUEUE(wait, current);
157
158 if (!list_empty(&irda_rq_queue.request_list))
159 run_irda_queue();
160
161 set_task_state(current, TASK_UNINTERRUPTIBLE);
162 add_wait_queue(&irda_rq_queue.done, &wait);
163 if (atomic_read(&irda_rq_queue.num_pending))
164 schedule();
165 else
166 __set_task_state(current, TASK_RUNNING);
167 remove_wait_queue(&irda_rq_queue.done, &wait);
168 }
169}
170
171/* substate handler of the config-fsm to handle the cases where we want
172 * to wait for transmit completion before changing the port configuration
173 */
174
175static int irda_tx_complete_fsm(struct sir_dev *dev)
176{
177 struct sir_fsm *fsm = &dev->fsm;
178 unsigned next_state, delay;
179 unsigned bytes_left;
180
181 do {
182 next_state = fsm->substate; /* default: stay in current substate */
183 delay = 0;
184
185 switch(fsm->substate) {
186
187 case SIRDEV_STATE_WAIT_XMIT:
188 if (dev->drv->chars_in_buffer)
189 bytes_left = dev->drv->chars_in_buffer(dev);
190 else
191 bytes_left = 0;
192 if (!bytes_left) {
193 next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
194 break;
195 }
196
197 if (dev->speed > 115200)
198 delay = (bytes_left*8*10000) / (dev->speed/100);
199 else if (dev->speed > 0)
200 delay = (bytes_left*10*10000) / (dev->speed/100);
201 else
202 delay = 0;
203 /* expected delay (usec) until remaining bytes are sent */
204 if (delay < 100) {
205 udelay(delay);
206 delay = 0;
207 break;
208 }
209 /* sleep some longer delay (msec) */
210 delay = (delay+999) / 1000;
211 break;
212
213 case SIRDEV_STATE_WAIT_UNTIL_SENT:
214 /* block until underlaying hardware buffer are empty */
215 if (dev->drv->wait_until_sent)
216 dev->drv->wait_until_sent(dev);
217 next_state = SIRDEV_STATE_TX_DONE;
218 break;
219
220 case SIRDEV_STATE_TX_DONE:
221 return 0;
222
223 default:
224 IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
225 return -EINVAL;
226 }
227 fsm->substate = next_state;
228 } while (delay == 0);
229 return delay;
230}
231
232/*
233 * Function irda_config_fsm
234 *
235 * State machine to handle the configuration of the device (and attached dongle, if any).
236 * This handler is scheduled for execution in kIrDAd context, so we can sleep.
237 * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
238 * long. Instead, for longer delays we start a timer to reschedule us later.
239 * On entry, fsm->sem is always locked and the netdev xmit queue stopped.
240 * Both must be unlocked/restarted on completion - but only on final exit.
241 */
242
243static void irda_config_fsm(void *data)
244{
245 struct sir_dev *dev = data;
246 struct sir_fsm *fsm = &dev->fsm;
247 int next_state;
248 int ret = -1;
249 unsigned delay;
250
251 IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies);
252
253 do {
254 IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
255 __FUNCTION__, fsm->state, fsm->substate);
256
257 next_state = fsm->state;
258 delay = 0;
259
260 switch(fsm->state) {
261
262 case SIRDEV_STATE_DONGLE_OPEN:
263 if (dev->dongle_drv != NULL) {
264 ret = sirdev_put_dongle(dev);
265 if (ret) {
266 fsm->result = -EINVAL;
267 next_state = SIRDEV_STATE_ERROR;
268 break;
269 }
270 }
271
272 /* Initialize dongle */
273 ret = sirdev_get_dongle(dev, fsm->param);
274 if (ret) {
275 fsm->result = ret;
276 next_state = SIRDEV_STATE_ERROR;
277 break;
278 }
279
280 /* Dongles are powered through the modem control lines which
281 * were just set during open. Before resetting, let's wait for
282 * the power to stabilize. This is what some dongle drivers did
283 * in open before, while others didn't - should be safe anyway.
284 */
285
286 delay = 50;
287 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
288 next_state = SIRDEV_STATE_DONGLE_RESET;
289
290 fsm->param = 9600;
291
292 break;
293
294 case SIRDEV_STATE_DONGLE_CLOSE:
295 /* shouldn't we just treat this as success=? */
296 if (dev->dongle_drv == NULL) {
297 fsm->result = -EINVAL;
298 next_state = SIRDEV_STATE_ERROR;
299 break;
300 }
301
302 ret = sirdev_put_dongle(dev);
303 if (ret) {
304 fsm->result = ret;
305 next_state = SIRDEV_STATE_ERROR;
306 break;
307 }
308 next_state = SIRDEV_STATE_DONE;
309 break;
310
311 case SIRDEV_STATE_SET_DTR_RTS:
312 ret = sirdev_set_dtr_rts(dev,
313 (fsm->param&0x02) ? TRUE : FALSE,
314 (fsm->param&0x01) ? TRUE : FALSE);
315 next_state = SIRDEV_STATE_DONE;
316 break;
317
318 case SIRDEV_STATE_SET_SPEED:
319 fsm->substate = SIRDEV_STATE_WAIT_XMIT;
320 next_state = SIRDEV_STATE_DONGLE_CHECK;
321 break;
322
323 case SIRDEV_STATE_DONGLE_CHECK:
324 ret = irda_tx_complete_fsm(dev);
325 if (ret < 0) {
326 fsm->result = ret;
327 next_state = SIRDEV_STATE_ERROR;
328 break;
329 }
330 if ((delay=ret) != 0)
331 break;
332
333 if (dev->dongle_drv) {
334 fsm->substate = SIRDEV_STATE_DONGLE_RESET;
335 next_state = SIRDEV_STATE_DONGLE_RESET;
336 }
337 else {
338 dev->speed = fsm->param;
339 next_state = SIRDEV_STATE_PORT_SPEED;
340 }
341 break;
342
343 case SIRDEV_STATE_DONGLE_RESET:
344 if (dev->dongle_drv->reset) {
345 ret = dev->dongle_drv->reset(dev);
346 if (ret < 0) {
347 fsm->result = ret;
348 next_state = SIRDEV_STATE_ERROR;
349 break;
350 }
351 }
352 else
353 ret = 0;
354 if ((delay=ret) == 0) {
355 /* set serial port according to dongle default speed */
356 if (dev->drv->set_speed)
357 dev->drv->set_speed(dev, dev->speed);
358 fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
359 next_state = SIRDEV_STATE_DONGLE_SPEED;
360 }
361 break;
362
363 case SIRDEV_STATE_DONGLE_SPEED:
364 if (dev->dongle_drv->reset) {
365 ret = dev->dongle_drv->set_speed(dev, fsm->param);
366 if (ret < 0) {
367 fsm->result = ret;
368 next_state = SIRDEV_STATE_ERROR;
369 break;
370 }
371 }
372 else
373 ret = 0;
374 if ((delay=ret) == 0)
375 next_state = SIRDEV_STATE_PORT_SPEED;
376 break;
377
378 case SIRDEV_STATE_PORT_SPEED:
379 /* Finally we are ready to change the serial port speed */
380 if (dev->drv->set_speed)
381 dev->drv->set_speed(dev, dev->speed);
382 dev->new_speed = 0;
383 next_state = SIRDEV_STATE_DONE;
384 break;
385
386 case SIRDEV_STATE_DONE:
387 /* Signal network layer so it can send more frames */
388 netif_wake_queue(dev->netdev);
389 next_state = SIRDEV_STATE_COMPLETE;
390 break;
391
392 default:
393 IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
394 fsm->result = -EINVAL;
395 /* fall thru */
396
397 case SIRDEV_STATE_ERROR:
398 IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result);
399
400#if 0 /* don't enable this before we have netdev->tx_timeout to recover */
401 netif_stop_queue(dev->netdev);
402#else
403 netif_wake_queue(dev->netdev);
404#endif
405 /* fall thru */
406
407 case SIRDEV_STATE_COMPLETE:
408 /* config change finished, so we are not busy any longer */
409 sirdev_enable_rx(dev);
410 up(&fsm->sem);
411 return;
412 }
413 fsm->state = next_state;
414 } while(!delay);
415
416 irda_queue_delayed_request(&fsm->rq, msecs_to_jiffies(delay));
417}
418
419/* schedule some device configuration task for execution by kIrDAd
420 * on behalf of the above state machine.
421 * can be called from process or interrupt/tasklet context.
422 */
423
424int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
425{
426 struct sir_fsm *fsm = &dev->fsm;
427 int xmit_was_down;
428
429 IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param);
430
431 if (down_trylock(&fsm->sem)) {
432 if (in_interrupt() || in_atomic() || irqs_disabled()) {
433 IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__);
434 return -EWOULDBLOCK;
435 } else
436 down(&fsm->sem);
437 }
438
439 if (fsm->state == SIRDEV_STATE_DEAD) {
440 /* race with sirdev_close should never happen */
441 IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__);
442 up(&fsm->sem);
443 return -ESTALE; /* or better EPIPE? */
444 }
445
446 xmit_was_down = netif_queue_stopped(dev->netdev);
447 netif_stop_queue(dev->netdev);
448 atomic_set(&dev->enable_rx, 0);
449
450 fsm->state = initial_state;
451 fsm->param = param;
452 fsm->result = 0;
453
454 INIT_LIST_HEAD(&fsm->rq.lh_request);
455 fsm->rq.pending = 0;
456 fsm->rq.func = irda_config_fsm;
457 fsm->rq.data = dev;
458
459 if (!irda_queue_request(&fsm->rq)) { /* returns 0 on error! */
460 atomic_set(&dev->enable_rx, 1);
461 if (!xmit_was_down)
462 netif_wake_queue(dev->netdev);
463 up(&fsm->sem);
464 return -EAGAIN;
465 }
466 return 0;
467}
468
469static int __init irda_thread_create(void)
470{
471 struct completion startup;
472 int pid;
473
474 spin_lock_init(&irda_rq_queue.lock);
475 irda_rq_queue.thread = NULL;
476 INIT_LIST_HEAD(&irda_rq_queue.request_list);
477 init_waitqueue_head(&irda_rq_queue.kick);
478 init_waitqueue_head(&irda_rq_queue.done);
479 atomic_set(&irda_rq_queue.num_pending, 0);
480
481 init_completion(&startup);
482 pid = kernel_thread(irda_thread, &startup, CLONE_FS|CLONE_FILES);
483 if (pid <= 0)
484 return -EAGAIN;
485 else
486 wait_for_completion(&startup);
487
488 return 0;
489}
490
491static void __exit irda_thread_join(void)
492{
493 if (irda_rq_queue.thread) {
494 flush_irda_queue();
495 init_completion(&irda_rq_queue.exit);
496 irda_rq_queue.thread = NULL;
497 wake_up(&irda_rq_queue.kick);
498 wait_for_completion(&irda_rq_queue.exit);
499 }
500}
501
502module_init(irda_thread_create);
503module_exit(irda_thread_join);
504
505MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
506MODULE_DESCRIPTION("IrDA SIR core");
507MODULE_LICENSE("GPL");
508
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 58f76cefbc83..a4674044bd6f 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -54,6 +54,7 @@
54#include <linux/rtnetlink.h> 54#include <linux/rtnetlink.h>
55#include <linux/serial_reg.h> 55#include <linux/serial_reg.h>
56#include <linux/dma-mapping.h> 56#include <linux/dma-mapping.h>
57#include <linux/pnp.h>
57#include <linux/platform_device.h> 58#include <linux/platform_device.h>
58 59
59#include <asm/io.h> 60#include <asm/io.h>
@@ -358,6 +359,16 @@ static inline void register_bank(int iobase, int bank)
358 iobase + IRCC_MASTER); 359 iobase + IRCC_MASTER);
359} 360}
360 361
362#ifdef CONFIG_PNP
363/* PNP hotplug support */
364static const struct pnp_device_id smsc_ircc_pnp_table[] = {
365 { .id = "SMCf010", .driver_data = 0 },
366 /* and presumably others */
367 { }
368};
369MODULE_DEVICE_TABLE(pnp, smsc_ircc_pnp_table);
370#endif
371
361 372
362/******************************************************************************* 373/*******************************************************************************
363 * 374 *
@@ -2072,7 +2083,8 @@ static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self)
2072 2083
2073/* PROBING 2084/* PROBING
2074 * 2085 *
2075 * 2086 * REVISIT we can be told about the device by PNP, and should use that info
2087 * instead of probing hardware and creating a platform_device ...
2076 */ 2088 */
2077 2089
2078static int __init smsc_ircc_look_for_chips(void) 2090static int __init smsc_ircc_look_for_chips(void)
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index beeb612be98f..2bd9592b75cd 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -8454,6 +8454,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8454 8454
8455 tx_len = 1514; 8455 tx_len = 1514;
8456 skb = dev_alloc_skb(tx_len); 8456 skb = dev_alloc_skb(tx_len);
8457 if (!skb)
8458 return -ENOMEM;
8459
8457 tx_data = skb_put(skb, tx_len); 8460 tx_data = skb_put(skb, tx_len);
8458 memcpy(tx_data, tp->dev->dev_addr, 6); 8461 memcpy(tx_data, tp->dev->dev_addr, 6);
8459 memset(tx_data + 6, 0x0, 8); 8462 memset(tx_data + 6, 0x0, 8);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index a461b51d6076..f4169bbb60eb 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -433,8 +433,7 @@ struct net_device
433 433
434 /* register/unregister state machine */ 434 /* register/unregister state machine */
435 enum { NETREG_UNINITIALIZED=0, 435 enum { NETREG_UNINITIALIZED=0,
436 NETREG_REGISTERING, /* called register_netdevice */ 436 NETREG_REGISTERED, /* completed register_netdevice */
437 NETREG_REGISTERED, /* completed register todo */
438 NETREG_UNREGISTERING, /* called unregister_netdevice */ 437 NETREG_UNREGISTERING, /* called unregister_netdevice */
439 NETREG_UNREGISTERED, /* completed unregister todo */ 438 NETREG_UNREGISTERED, /* completed unregister todo */
440 NETREG_RELEASED, /* called free_netdev */ 439 NETREG_RELEASED, /* called free_netdev */
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 59eef42d4a42..ad1c7af65ec8 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -308,26 +308,19 @@ int br_add_bridge(const char *name)
308 if (ret) 308 if (ret)
309 goto err2; 309 goto err2;
310 310
311 /* network device kobject is not setup until
312 * after rtnl_unlock does it's hotplug magic.
313 * so hold reference to avoid race.
314 */
315 dev_hold(dev);
316 rtnl_unlock();
317
318 ret = br_sysfs_addbr(dev); 311 ret = br_sysfs_addbr(dev);
319 dev_put(dev); 312 if (ret)
320 313 goto err3;
321 if (ret) 314 rtnl_unlock();
322 unregister_netdev(dev); 315 return 0;
323 out:
324 return ret;
325 316
317 err3:
318 unregister_netdev(dev);
326 err2: 319 err2:
327 free_netdev(dev); 320 free_netdev(dev);
328 err1: 321 err1:
329 rtnl_unlock(); 322 rtnl_unlock();
330 goto out; 323 return ret;
331} 324}
332 325
333int br_del_bridge(const char *name) 326int br_del_bridge(const char *name)
diff --git a/net/core/dev.c b/net/core/dev.c
index 9ab3cfa58466..2dce673a039b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -193,7 +193,7 @@ static inline struct hlist_head *dev_index_hash(int ifindex)
193 * Our notifier list 193 * Our notifier list
194 */ 194 */
195 195
196static BLOCKING_NOTIFIER_HEAD(netdev_chain); 196static RAW_NOTIFIER_HEAD(netdev_chain);
197 197
198/* 198/*
199 * Device drivers call our routines to queue packets here. We empty the 199 * Device drivers call our routines to queue packets here. We empty the
@@ -736,7 +736,7 @@ int dev_change_name(struct net_device *dev, char *newname)
736 if (!err) { 736 if (!err) {
737 hlist_del(&dev->name_hlist); 737 hlist_del(&dev->name_hlist);
738 hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name)); 738 hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name));
739 blocking_notifier_call_chain(&netdev_chain, 739 raw_notifier_call_chain(&netdev_chain,
740 NETDEV_CHANGENAME, dev); 740 NETDEV_CHANGENAME, dev);
741 } 741 }
742 742
@@ -751,7 +751,7 @@ int dev_change_name(struct net_device *dev, char *newname)
751 */ 751 */
752void netdev_features_change(struct net_device *dev) 752void netdev_features_change(struct net_device *dev)
753{ 753{
754 blocking_notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev); 754 raw_notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev);
755} 755}
756EXPORT_SYMBOL(netdev_features_change); 756EXPORT_SYMBOL(netdev_features_change);
757 757
@@ -766,7 +766,7 @@ EXPORT_SYMBOL(netdev_features_change);
766void netdev_state_change(struct net_device *dev) 766void netdev_state_change(struct net_device *dev)
767{ 767{
768 if (dev->flags & IFF_UP) { 768 if (dev->flags & IFF_UP) {
769 blocking_notifier_call_chain(&netdev_chain, 769 raw_notifier_call_chain(&netdev_chain,
770 NETDEV_CHANGE, dev); 770 NETDEV_CHANGE, dev);
771 rtmsg_ifinfo(RTM_NEWLINK, dev, 0); 771 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
772 } 772 }
@@ -864,7 +864,7 @@ int dev_open(struct net_device *dev)
864 /* 864 /*
865 * ... and announce new interface. 865 * ... and announce new interface.
866 */ 866 */
867 blocking_notifier_call_chain(&netdev_chain, NETDEV_UP, dev); 867 raw_notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
868 } 868 }
869 return ret; 869 return ret;
870} 870}
@@ -887,7 +887,7 @@ int dev_close(struct net_device *dev)
887 * Tell people we are going down, so that they can 887 * Tell people we are going down, so that they can
888 * prepare to death, when device is still operating. 888 * prepare to death, when device is still operating.
889 */ 889 */
890 blocking_notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev); 890 raw_notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
891 891
892 dev_deactivate(dev); 892 dev_deactivate(dev);
893 893
@@ -924,7 +924,7 @@ int dev_close(struct net_device *dev)
924 /* 924 /*
925 * Tell people we are down 925 * Tell people we are down
926 */ 926 */
927 blocking_notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev); 927 raw_notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
928 928
929 return 0; 929 return 0;
930} 930}
@@ -955,7 +955,7 @@ int register_netdevice_notifier(struct notifier_block *nb)
955 int err; 955 int err;
956 956
957 rtnl_lock(); 957 rtnl_lock();
958 err = blocking_notifier_chain_register(&netdev_chain, nb); 958 err = raw_notifier_chain_register(&netdev_chain, nb);
959 if (!err) { 959 if (!err) {
960 for (dev = dev_base; dev; dev = dev->next) { 960 for (dev = dev_base; dev; dev = dev->next) {
961 nb->notifier_call(nb, NETDEV_REGISTER, dev); 961 nb->notifier_call(nb, NETDEV_REGISTER, dev);
@@ -983,7 +983,7 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
983 int err; 983 int err;
984 984
985 rtnl_lock(); 985 rtnl_lock();
986 err = blocking_notifier_chain_unregister(&netdev_chain, nb); 986 err = raw_notifier_chain_unregister(&netdev_chain, nb);
987 rtnl_unlock(); 987 rtnl_unlock();
988 return err; 988 return err;
989} 989}
@@ -994,12 +994,12 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
994 * @v: pointer passed unmodified to notifier function 994 * @v: pointer passed unmodified to notifier function
995 * 995 *
996 * Call all network notifier blocks. Parameters and return value 996 * Call all network notifier blocks. Parameters and return value
997 * are as for blocking_notifier_call_chain(). 997 * are as for raw_notifier_call_chain().
998 */ 998 */
999 999
1000int call_netdevice_notifiers(unsigned long val, void *v) 1000int call_netdevice_notifiers(unsigned long val, void *v)
1001{ 1001{
1002 return blocking_notifier_call_chain(&netdev_chain, val, v); 1002 return raw_notifier_call_chain(&netdev_chain, val, v);
1003} 1003}
1004 1004
1005/* When > 0 there are consumers of rx skb time stamps */ 1005/* When > 0 there are consumers of rx skb time stamps */
@@ -2308,7 +2308,7 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
2308 if (dev->flags & IFF_UP && 2308 if (dev->flags & IFF_UP &&
2309 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI | 2309 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
2310 IFF_VOLATILE))) 2310 IFF_VOLATILE)))
2311 blocking_notifier_call_chain(&netdev_chain, 2311 raw_notifier_call_chain(&netdev_chain,
2312 NETDEV_CHANGE, dev); 2312 NETDEV_CHANGE, dev);
2313 2313
2314 if ((flags ^ dev->gflags) & IFF_PROMISC) { 2314 if ((flags ^ dev->gflags) & IFF_PROMISC) {
@@ -2353,7 +2353,7 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
2353 else 2353 else
2354 dev->mtu = new_mtu; 2354 dev->mtu = new_mtu;
2355 if (!err && dev->flags & IFF_UP) 2355 if (!err && dev->flags & IFF_UP)
2356 blocking_notifier_call_chain(&netdev_chain, 2356 raw_notifier_call_chain(&netdev_chain,
2357 NETDEV_CHANGEMTU, dev); 2357 NETDEV_CHANGEMTU, dev);
2358 return err; 2358 return err;
2359} 2359}
@@ -2370,7 +2370,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
2370 return -ENODEV; 2370 return -ENODEV;
2371 err = dev->set_mac_address(dev, sa); 2371 err = dev->set_mac_address(dev, sa);
2372 if (!err) 2372 if (!err)
2373 blocking_notifier_call_chain(&netdev_chain, 2373 raw_notifier_call_chain(&netdev_chain,
2374 NETDEV_CHANGEADDR, dev); 2374 NETDEV_CHANGEADDR, dev);
2375 return err; 2375 return err;
2376} 2376}
@@ -2427,7 +2427,7 @@ static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
2427 return -EINVAL; 2427 return -EINVAL;
2428 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, 2428 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
2429 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); 2429 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
2430 blocking_notifier_call_chain(&netdev_chain, 2430 raw_notifier_call_chain(&netdev_chain,
2431 NETDEV_CHANGEADDR, dev); 2431 NETDEV_CHANGEADDR, dev);
2432 return 0; 2432 return 0;
2433 2433
@@ -2777,6 +2777,8 @@ int register_netdevice(struct net_device *dev)
2777 BUG_ON(dev_boot_phase); 2777 BUG_ON(dev_boot_phase);
2778 ASSERT_RTNL(); 2778 ASSERT_RTNL();
2779 2779
2780 might_sleep();
2781
2780 /* When net_device's are persistent, this will be fatal. */ 2782 /* When net_device's are persistent, this will be fatal. */
2781 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED); 2783 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
2782 2784
@@ -2863,6 +2865,11 @@ int register_netdevice(struct net_device *dev)
2863 if (!dev->rebuild_header) 2865 if (!dev->rebuild_header)
2864 dev->rebuild_header = default_rebuild_header; 2866 dev->rebuild_header = default_rebuild_header;
2865 2867
2868 ret = netdev_register_sysfs(dev);
2869 if (ret)
2870 goto out_err;
2871 dev->reg_state = NETREG_REGISTERED;
2872
2866 /* 2873 /*
2867 * Default initial state at registry is that the 2874 * Default initial state at registry is that the
2868 * device is present. 2875 * device is present.
@@ -2878,14 +2885,11 @@ int register_netdevice(struct net_device *dev)
2878 hlist_add_head(&dev->name_hlist, head); 2885 hlist_add_head(&dev->name_hlist, head);
2879 hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex)); 2886 hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex));
2880 dev_hold(dev); 2887 dev_hold(dev);
2881 dev->reg_state = NETREG_REGISTERING;
2882 write_unlock_bh(&dev_base_lock); 2888 write_unlock_bh(&dev_base_lock);
2883 2889
2884 /* Notify protocols, that a new device appeared. */ 2890 /* Notify protocols, that a new device appeared. */
2885 blocking_notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev); 2891 raw_notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
2886 2892
2887 /* Finish registration after unlock */
2888 net_set_todo(dev);
2889 ret = 0; 2893 ret = 0;
2890 2894
2891out: 2895out:
@@ -2961,7 +2965,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
2961 rtnl_lock(); 2965 rtnl_lock();
2962 2966
2963 /* Rebroadcast unregister notification */ 2967 /* Rebroadcast unregister notification */
2964 blocking_notifier_call_chain(&netdev_chain, 2968 raw_notifier_call_chain(&netdev_chain,
2965 NETDEV_UNREGISTER, dev); 2969 NETDEV_UNREGISTER, dev);
2966 2970
2967 if (test_bit(__LINK_STATE_LINKWATCH_PENDING, 2971 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
@@ -3008,7 +3012,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
3008 * 3012 *
3009 * We are invoked by rtnl_unlock() after it drops the semaphore. 3013 * We are invoked by rtnl_unlock() after it drops the semaphore.
3010 * This allows us to deal with problems: 3014 * This allows us to deal with problems:
3011 * 1) We can create/delete sysfs objects which invoke hotplug 3015 * 1) We can delete sysfs objects which invoke hotplug
3012 * without deadlocking with linkwatch via keventd. 3016 * without deadlocking with linkwatch via keventd.
3013 * 2) Since we run with the RTNL semaphore not held, we can sleep 3017 * 2) Since we run with the RTNL semaphore not held, we can sleep
3014 * safely in order to wait for the netdev refcnt to drop to zero. 3018 * safely in order to wait for the netdev refcnt to drop to zero.
@@ -3017,8 +3021,6 @@ static DEFINE_MUTEX(net_todo_run_mutex);
3017void netdev_run_todo(void) 3021void netdev_run_todo(void)
3018{ 3022{
3019 struct list_head list = LIST_HEAD_INIT(list); 3023 struct list_head list = LIST_HEAD_INIT(list);
3020 int err;
3021
3022 3024
3023 /* Need to guard against multiple cpu's getting out of order. */ 3025 /* Need to guard against multiple cpu's getting out of order. */
3024 mutex_lock(&net_todo_run_mutex); 3026 mutex_lock(&net_todo_run_mutex);
@@ -3041,40 +3043,29 @@ void netdev_run_todo(void)
3041 = list_entry(list.next, struct net_device, todo_list); 3043 = list_entry(list.next, struct net_device, todo_list);
3042 list_del(&dev->todo_list); 3044 list_del(&dev->todo_list);
3043 3045
3044 switch(dev->reg_state) { 3046 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
3045 case NETREG_REGISTERING: 3047 printk(KERN_ERR "network todo '%s' but state %d\n",
3046 err = netdev_register_sysfs(dev); 3048 dev->name, dev->reg_state);
3047 if (err) 3049 dump_stack();
3048 printk(KERN_ERR "%s: failed sysfs registration (%d)\n", 3050 continue;
3049 dev->name, err); 3051 }
3050 dev->reg_state = NETREG_REGISTERED;
3051 break;
3052
3053 case NETREG_UNREGISTERING:
3054 netdev_unregister_sysfs(dev);
3055 dev->reg_state = NETREG_UNREGISTERED;
3056
3057 netdev_wait_allrefs(dev);
3058 3052
3059 /* paranoia */ 3053 netdev_unregister_sysfs(dev);
3060 BUG_ON(atomic_read(&dev->refcnt)); 3054 dev->reg_state = NETREG_UNREGISTERED;
3061 BUG_TRAP(!dev->ip_ptr);
3062 BUG_TRAP(!dev->ip6_ptr);
3063 BUG_TRAP(!dev->dn_ptr);
3064 3055
3056 netdev_wait_allrefs(dev);
3065 3057
3066 /* It must be the very last action, 3058 /* paranoia */
3067 * after this 'dev' may point to freed up memory. 3059 BUG_ON(atomic_read(&dev->refcnt));
3068 */ 3060 BUG_TRAP(!dev->ip_ptr);
3069 if (dev->destructor) 3061 BUG_TRAP(!dev->ip6_ptr);
3070 dev->destructor(dev); 3062 BUG_TRAP(!dev->dn_ptr);
3071 break;
3072 3063
3073 default: 3064 /* It must be the very last action,
3074 printk(KERN_ERR "network todo '%s' but state %d\n", 3065 * after this 'dev' may point to freed up memory.
3075 dev->name, dev->reg_state); 3066 */
3076 break; 3067 if (dev->destructor)
3077 } 3068 dev->destructor(dev);
3078 } 3069 }
3079 3070
3080out: 3071out:
@@ -3216,7 +3207,7 @@ int unregister_netdevice(struct net_device *dev)
3216 /* Notify protocols, that we are about to destroy 3207 /* Notify protocols, that we are about to destroy
3217 this device. They should clean all the things. 3208 this device. They should clean all the things.
3218 */ 3209 */
3219 blocking_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev); 3210 raw_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
3220 3211
3221 /* 3212 /*
3222 * Flush the multicast chain 3213 * Flush the multicast chain
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 341de44c7ed1..646937cc2d84 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -170,13 +170,13 @@ void linkwatch_fire_event(struct net_device *dev)
170 spin_unlock_irqrestore(&lweventlist_lock, flags); 170 spin_unlock_irqrestore(&lweventlist_lock, flags);
171 171
172 if (!test_and_set_bit(LW_RUNNING, &linkwatch_flags)) { 172 if (!test_and_set_bit(LW_RUNNING, &linkwatch_flags)) {
173 unsigned long thisevent = jiffies; 173 unsigned long delay = linkwatch_nextevent - jiffies;
174 174
175 if (thisevent >= linkwatch_nextevent) { 175 /* If we wrap around we'll delay it by at most HZ. */
176 if (!delay || delay > HZ)
176 schedule_work(&linkwatch_work); 177 schedule_work(&linkwatch_work);
177 } else { 178 else
178 schedule_delayed_work(&linkwatch_work, linkwatch_nextevent - thisevent); 179 schedule_delayed_work(&linkwatch_work, delay);
179 }
180 } 180 }
181 } 181 }
182} 182}
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 9bebad07bf2e..cbcae6544622 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -209,7 +209,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
209 209
210void ip_options_fragment(struct sk_buff * skb) 210void ip_options_fragment(struct sk_buff * skb)
211{ 211{
212 unsigned char * optptr = skb->nh.raw; 212 unsigned char * optptr = skb->nh.raw + sizeof(struct iphdr);
213 struct ip_options * opt = &(IPCB(skb)->opt); 213 struct ip_options * opt = &(IPCB(skb)->opt);
214 int l = opt->optlen; 214 int l = opt->optlen;
215 int optlen; 215 int optlen;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index f8f3a37a1494..eb2865d5ae28 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -173,6 +173,7 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
173 173
174 if (err) { 174 if (err) {
175 sk->sk_err_soft = -err; 175 sk->sk_err_soft = -err;
176 kfree_skb(skb);
176 return err; 177 return err;
177 } 178 }
178 179
@@ -181,6 +182,7 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
181 182
182 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) { 183 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
183 sk->sk_route_caps = 0; 184 sk->sk_route_caps = 0;
185 kfree_skb(skb);
184 return err; 186 return err;
185 } 187 }
186 188
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c
index c6d169fbdceb..82e665c79991 100644
--- a/net/irda/irias_object.c
+++ b/net/irda/irias_object.c
@@ -257,7 +257,6 @@ struct ias_attrib *irias_find_attrib(struct ias_object *obj, char *name)
257 /* Unsafe (locking), attrib might change */ 257 /* Unsafe (locking), attrib might change */
258 return attrib; 258 return attrib;
259} 259}
260EXPORT_SYMBOL(irias_find_attrib);
261 260
262/* 261/*
263 * Function irias_add_attribute (obj, attrib) 262 * Function irias_add_attribute (obj, attrib)
@@ -484,7 +483,6 @@ struct ias_value *irias_new_string_value(char *string)
484 483
485 return value; 484 return value;
486} 485}
487EXPORT_SYMBOL(irias_new_string_value);
488 486
489/* 487/*
490 * Function irias_new_octseq_value (octets, len) 488 * Function irias_new_octseq_value (octets, len)
@@ -519,7 +517,6 @@ struct ias_value *irias_new_octseq_value(__u8 *octseq , int len)
519 memcpy(value->t.oct_seq, octseq , len); 517 memcpy(value->t.oct_seq, octseq , len);
520 return value; 518 return value;
521} 519}
522EXPORT_SYMBOL(irias_new_octseq_value);
523 520
524struct ias_value *irias_new_missing_value(void) 521struct ias_value *irias_new_missing_value(void)
525{ 522{
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 91132f6871d7..f1c7bd29f2cd 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -974,10 +974,10 @@ hfsc_adjust_levels(struct hfsc_class *cl)
974 do { 974 do {
975 level = 0; 975 level = 0;
976 list_for_each_entry(p, &cl->children, siblings) { 976 list_for_each_entry(p, &cl->children, siblings) {
977 if (p->level > level) 977 if (p->level >= level)
978 level = p->level; 978 level = p->level + 1;
979 } 979 }
980 cl->level = level + 1; 980 cl->level = level;
981 } while ((cl = cl->cl_parent) != NULL); 981 } while ((cl = cl->cl_parent) != NULL);
982} 982}
983 983