diff options
author | Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> | 2006-09-27 04:50:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-27 11:26:15 -0400 |
commit | 48af05ed54ddf8dc6eceea4f009e063d7e784b37 (patch) | |
tree | 4960c5a2ed9a3a04c4423317824aad9af6075880 /arch/um/drivers/net_kern.c | |
parent | 06837504de7b4883e92af207dbbab4310d0db0ed (diff) |
[PATCH] uml: fix proc-vs-interrupt context spinlock deadlock
This spinlock can be taken on interrupt too, so spin_lock_irq[save] must be
used.
However, Documentation/networking/netdevices.txt explains we are called with
rtnl_lock() held - so we don't need to care about other concurrent opens.
Verified also in LDD3 and by direct checking. Also verified that the network
layer (through a state machine) guarantees us that nobody will close the
interface while it's being used. Please correct me if I'm wrong.
Also, we must check we don't sleep with irqs disabled!!! But anyway, this is
not news - we already can't sleep while holding a spinlock. Who says this is
guaranted really by the present code?
Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Jeff Garzik <jeff@garzik.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/um/drivers/net_kern.c')
-rw-r--r-- | arch/um/drivers/net_kern.c | 16 |
1 files changed, 4 insertions, 12 deletions
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index 4a7966b21931..657dfacd5ba8 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c | |||
@@ -114,8 +114,6 @@ static int uml_net_open(struct net_device *dev) | |||
114 | struct uml_net_private *lp = dev->priv; | 114 | struct uml_net_private *lp = dev->priv; |
115 | int err; | 115 | int err; |
116 | 116 | ||
117 | spin_lock(&lp->lock); | ||
118 | |||
119 | if(lp->fd >= 0){ | 117 | if(lp->fd >= 0){ |
120 | err = -ENXIO; | 118 | err = -ENXIO; |
121 | goto out; | 119 | goto out; |
@@ -149,8 +147,6 @@ static int uml_net_open(struct net_device *dev) | |||
149 | */ | 147 | */ |
150 | while((err = uml_net_rx(dev)) > 0) ; | 148 | while((err = uml_net_rx(dev)) > 0) ; |
151 | 149 | ||
152 | spin_unlock(&lp->lock); | ||
153 | |||
154 | spin_lock(&opened_lock); | 150 | spin_lock(&opened_lock); |
155 | list_add(&lp->list, &opened); | 151 | list_add(&lp->list, &opened); |
156 | spin_unlock(&opened_lock); | 152 | spin_unlock(&opened_lock); |
@@ -160,7 +156,6 @@ out_close: | |||
160 | if(lp->close != NULL) (*lp->close)(lp->fd, &lp->user); | 156 | if(lp->close != NULL) (*lp->close)(lp->fd, &lp->user); |
161 | lp->fd = -1; | 157 | lp->fd = -1; |
162 | out: | 158 | out: |
163 | spin_unlock(&lp->lock); | ||
164 | return err; | 159 | return err; |
165 | } | 160 | } |
166 | 161 | ||
@@ -169,15 +164,12 @@ static int uml_net_close(struct net_device *dev) | |||
169 | struct uml_net_private *lp = dev->priv; | 164 | struct uml_net_private *lp = dev->priv; |
170 | 165 | ||
171 | netif_stop_queue(dev); | 166 | netif_stop_queue(dev); |
172 | spin_lock(&lp->lock); | ||
173 | 167 | ||
174 | free_irq(dev->irq, dev); | 168 | free_irq(dev->irq, dev); |
175 | if(lp->close != NULL) | 169 | if(lp->close != NULL) |
176 | (*lp->close)(lp->fd, &lp->user); | 170 | (*lp->close)(lp->fd, &lp->user); |
177 | lp->fd = -1; | 171 | lp->fd = -1; |
178 | 172 | ||
179 | spin_unlock(&lp->lock); | ||
180 | |||
181 | spin_lock(&opened_lock); | 173 | spin_lock(&opened_lock); |
182 | list_del(&lp->list); | 174 | list_del(&lp->list); |
183 | spin_unlock(&opened_lock); | 175 | spin_unlock(&opened_lock); |
@@ -246,9 +238,9 @@ static int uml_net_set_mac(struct net_device *dev, void *addr) | |||
246 | struct uml_net_private *lp = dev->priv; | 238 | struct uml_net_private *lp = dev->priv; |
247 | struct sockaddr *hwaddr = addr; | 239 | struct sockaddr *hwaddr = addr; |
248 | 240 | ||
249 | spin_lock(&lp->lock); | 241 | spin_lock_irq(&lp->lock); |
250 | set_ether_mac(dev, hwaddr->sa_data); | 242 | set_ether_mac(dev, hwaddr->sa_data); |
251 | spin_unlock(&lp->lock); | 243 | spin_unlock_irq(&lp->lock); |
252 | 244 | ||
253 | return(0); | 245 | return(0); |
254 | } | 246 | } |
@@ -258,7 +250,7 @@ static int uml_net_change_mtu(struct net_device *dev, int new_mtu) | |||
258 | struct uml_net_private *lp = dev->priv; | 250 | struct uml_net_private *lp = dev->priv; |
259 | int err = 0; | 251 | int err = 0; |
260 | 252 | ||
261 | spin_lock(&lp->lock); | 253 | spin_lock_irq(&lp->lock); |
262 | 254 | ||
263 | new_mtu = (*lp->set_mtu)(new_mtu, &lp->user); | 255 | new_mtu = (*lp->set_mtu)(new_mtu, &lp->user); |
264 | if(new_mtu < 0){ | 256 | if(new_mtu < 0){ |
@@ -269,7 +261,7 @@ static int uml_net_change_mtu(struct net_device *dev, int new_mtu) | |||
269 | dev->mtu = new_mtu; | 261 | dev->mtu = new_mtu; |
270 | 262 | ||
271 | out: | 263 | out: |
272 | spin_unlock(&lp->lock); | 264 | spin_unlock_irq(&lp->lock); |
273 | return err; | 265 | return err; |
274 | } | 266 | } |
275 | 267 | ||