diff options
author | Asier Llano <a.llano@ziv.es> | 2009-12-08 23:29:10 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-12-08 23:29:10 -0500 |
commit | 1e4e0767ecb1cf53a43343518c0e09ad7ee5e23a (patch) | |
tree | cccda01860fec5659a2c178284c2f57cbb755e16 /drivers/net/fec_mpc52xx.c | |
parent | 4b860abf636fdd963731ae4ccafdd39ebcd5f962 (diff) |
net/mpc5200: Fix locking on fec_mpc52xx driver
Fix the locking scheme on the fec_mpc52xx driver. This device can
receive IRQs from three sources; the FEC itself, the tx DMA, and the
rx DMA. Mutual exclusion was handled by taking a spin_lock() in the
critical regions, but because the handlers are run with IRQs enabled,
spin_lock() is insufficient and the driver can end up interrupting
a critical region anyway from another IRQ.
Asier Llano discovered that this occurs when an error IRQ is raised
in the middle of handling rx irqs which resulted in an sk_buff memory
leak.
In addition, locking is spotty at best in the driver and inspection
revealed quite a few places with insufficient locking.
This patch is based on Asier's initial work, but reworks a number of
things so that locks are held for as short a time as possible, so
that spin_lock_irqsave() is used everywhere, and so the locks are
dropped when calling into the network stack (because the lock only
protects the hardware interface; not the network stack).
Boot tested on a lite5200 with an NFS root. Has not been performance
tested.
Signed-off-by: Asier Llano <a.llano@ziv.es>
Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/fec_mpc52xx.c')
-rw-r--r-- | drivers/net/fec_mpc52xx.c | 121 |
1 files changed, 62 insertions, 59 deletions
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c index 6407672b28e9..848e8407ea8f 100644 --- a/drivers/net/fec_mpc52xx.c +++ b/drivers/net/fec_mpc52xx.c | |||
@@ -85,11 +85,15 @@ MODULE_PARM_DESC(debug, "debugging messages level"); | |||
85 | 85 | ||
86 | static void mpc52xx_fec_tx_timeout(struct net_device *dev) | 86 | static void mpc52xx_fec_tx_timeout(struct net_device *dev) |
87 | { | 87 | { |
88 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
89 | unsigned long flags; | ||
90 | |||
88 | dev_warn(&dev->dev, "transmit timed out\n"); | 91 | dev_warn(&dev->dev, "transmit timed out\n"); |
89 | 92 | ||
93 | spin_lock_irqsave(&priv->lock, flags); | ||
90 | mpc52xx_fec_reset(dev); | 94 | mpc52xx_fec_reset(dev); |
91 | |||
92 | dev->stats.tx_errors++; | 95 | dev->stats.tx_errors++; |
96 | spin_unlock_irqrestore(&priv->lock, flags); | ||
93 | 97 | ||
94 | netif_wake_queue(dev); | 98 | netif_wake_queue(dev); |
95 | } | 99 | } |
@@ -135,28 +139,32 @@ static void mpc52xx_fec_free_rx_buffers(struct net_device *dev, struct bcom_task | |||
135 | } | 139 | } |
136 | } | 140 | } |
137 | 141 | ||
142 | static void | ||
143 | mpc52xx_fec_rx_submit(struct net_device *dev, struct sk_buff *rskb) | ||
144 | { | ||
145 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | ||
146 | struct bcom_fec_bd *bd; | ||
147 | |||
148 | bd = (struct bcom_fec_bd *) bcom_prepare_next_buffer(priv->rx_dmatsk); | ||
149 | bd->status = FEC_RX_BUFFER_SIZE; | ||
150 | bd->skb_pa = dma_map_single(dev->dev.parent, rskb->data, | ||
151 | FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE); | ||
152 | bcom_submit_next_buffer(priv->rx_dmatsk, rskb); | ||
153 | } | ||
154 | |||
138 | static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task *rxtsk) | 155 | static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task *rxtsk) |
139 | { | 156 | { |
140 | while (!bcom_queue_full(rxtsk)) { | 157 | struct sk_buff *skb; |
141 | struct sk_buff *skb; | ||
142 | struct bcom_fec_bd *bd; | ||
143 | 158 | ||
159 | while (!bcom_queue_full(rxtsk)) { | ||
144 | skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE); | 160 | skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE); |
145 | if (skb == NULL) | 161 | if (!skb) |
146 | return -EAGAIN; | 162 | return -EAGAIN; |
147 | 163 | ||
148 | /* zero out the initial receive buffers to aid debugging */ | 164 | /* zero out the initial receive buffers to aid debugging */ |
149 | memset(skb->data, 0, FEC_RX_BUFFER_SIZE); | 165 | memset(skb->data, 0, FEC_RX_BUFFER_SIZE); |
150 | 166 | mpc52xx_fec_rx_submit(dev, skb); | |
151 | bd = (struct bcom_fec_bd *)bcom_prepare_next_buffer(rxtsk); | ||
152 | |||
153 | bd->status = FEC_RX_BUFFER_SIZE; | ||
154 | bd->skb_pa = dma_map_single(dev->dev.parent, skb->data, | ||
155 | FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE); | ||
156 | |||
157 | bcom_submit_next_buffer(rxtsk, skb); | ||
158 | } | 167 | } |
159 | |||
160 | return 0; | 168 | return 0; |
161 | } | 169 | } |
162 | 170 | ||
@@ -328,13 +336,12 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
328 | DMA_TO_DEVICE); | 336 | DMA_TO_DEVICE); |
329 | 337 | ||
330 | bcom_submit_next_buffer(priv->tx_dmatsk, skb); | 338 | bcom_submit_next_buffer(priv->tx_dmatsk, skb); |
339 | spin_unlock_irqrestore(&priv->lock, flags); | ||
331 | 340 | ||
332 | if (bcom_queue_full(priv->tx_dmatsk)) { | 341 | if (bcom_queue_full(priv->tx_dmatsk)) { |
333 | netif_stop_queue(dev); | 342 | netif_stop_queue(dev); |
334 | } | 343 | } |
335 | 344 | ||
336 | spin_unlock_irqrestore(&priv->lock, flags); | ||
337 | |||
338 | return NETDEV_TX_OK; | 345 | return NETDEV_TX_OK; |
339 | } | 346 | } |
340 | 347 | ||
@@ -359,9 +366,9 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id) | |||
359 | { | 366 | { |
360 | struct net_device *dev = dev_id; | 367 | struct net_device *dev = dev_id; |
361 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | 368 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); |
369 | unsigned long flags; | ||
362 | 370 | ||
363 | spin_lock(&priv->lock); | 371 | spin_lock_irqsave(&priv->lock, flags); |
364 | |||
365 | while (bcom_buffer_done(priv->tx_dmatsk)) { | 372 | while (bcom_buffer_done(priv->tx_dmatsk)) { |
366 | struct sk_buff *skb; | 373 | struct sk_buff *skb; |
367 | struct bcom_fec_bd *bd; | 374 | struct bcom_fec_bd *bd; |
@@ -372,11 +379,10 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id) | |||
372 | 379 | ||
373 | dev_kfree_skb_irq(skb); | 380 | dev_kfree_skb_irq(skb); |
374 | } | 381 | } |
382 | spin_unlock_irqrestore(&priv->lock, flags); | ||
375 | 383 | ||
376 | netif_wake_queue(dev); | 384 | netif_wake_queue(dev); |
377 | 385 | ||
378 | spin_unlock(&priv->lock); | ||
379 | |||
380 | return IRQ_HANDLED; | 386 | return IRQ_HANDLED; |
381 | } | 387 | } |
382 | 388 | ||
@@ -384,67 +390,60 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id) | |||
384 | { | 390 | { |
385 | struct net_device *dev = dev_id; | 391 | struct net_device *dev = dev_id; |
386 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | 392 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); |
393 | struct sk_buff *rskb; /* received sk_buff */ | ||
394 | struct sk_buff *skb; /* new sk_buff to enqueue in its place */ | ||
395 | struct bcom_fec_bd *bd; | ||
396 | u32 status, physaddr; | ||
397 | int length; | ||
398 | unsigned long flags; | ||
399 | |||
400 | spin_lock_irqsave(&priv->lock, flags); | ||
387 | 401 | ||
388 | while (bcom_buffer_done(priv->rx_dmatsk)) { | 402 | while (bcom_buffer_done(priv->rx_dmatsk)) { |
389 | struct sk_buff *skb; | ||
390 | struct sk_buff *rskb; | ||
391 | struct bcom_fec_bd *bd; | ||
392 | u32 status; | ||
393 | 403 | ||
394 | rskb = bcom_retrieve_buffer(priv->rx_dmatsk, &status, | 404 | rskb = bcom_retrieve_buffer(priv->rx_dmatsk, &status, |
395 | (struct bcom_bd **)&bd); | 405 | (struct bcom_bd **)&bd); |
396 | dma_unmap_single(dev->dev.parent, bd->skb_pa, rskb->len, | 406 | physaddr = bd->skb_pa; |
397 | DMA_FROM_DEVICE); | ||
398 | 407 | ||
399 | /* Test for errors in received frame */ | 408 | /* Test for errors in received frame */ |
400 | if (status & BCOM_FEC_RX_BD_ERRORS) { | 409 | if (status & BCOM_FEC_RX_BD_ERRORS) { |
401 | /* Drop packet and reuse the buffer */ | 410 | /* Drop packet and reuse the buffer */ |
402 | bd = (struct bcom_fec_bd *) | 411 | mpc52xx_fec_rx_submit(dev, rskb); |
403 | bcom_prepare_next_buffer(priv->rx_dmatsk); | ||
404 | |||
405 | bd->status = FEC_RX_BUFFER_SIZE; | ||
406 | bd->skb_pa = dma_map_single(dev->dev.parent, | ||
407 | rskb->data, | ||
408 | FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE); | ||
409 | |||
410 | bcom_submit_next_buffer(priv->rx_dmatsk, rskb); | ||
411 | |||
412 | dev->stats.rx_dropped++; | 412 | dev->stats.rx_dropped++; |
413 | |||
414 | continue; | 413 | continue; |
415 | } | 414 | } |
416 | 415 | ||
417 | /* skbs are allocated on open, so now we allocate a new one, | 416 | /* skbs are allocated on open, so now we allocate a new one, |
418 | * and remove the old (with the packet) */ | 417 | * and remove the old (with the packet) */ |
419 | skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE); | 418 | skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE); |
420 | if (skb) { | 419 | if (!skb) { |
421 | /* Process the received skb */ | ||
422 | int length = status & BCOM_FEC_RX_BD_LEN_MASK; | ||
423 | |||
424 | skb_put(rskb, length - 4); /* length without CRC32 */ | ||
425 | |||
426 | rskb->dev = dev; | ||
427 | rskb->protocol = eth_type_trans(rskb, dev); | ||
428 | |||
429 | netif_rx(rskb); | ||
430 | } else { | ||
431 | /* Can't get a new one : reuse the same & drop pkt */ | 420 | /* Can't get a new one : reuse the same & drop pkt */ |
432 | dev_notice(&dev->dev, "Memory squeeze, dropping packet.\n"); | 421 | dev_notice(&dev->dev, "Low memory - dropped packet.\n"); |
422 | mpc52xx_fec_rx_submit(dev, rskb); | ||
433 | dev->stats.rx_dropped++; | 423 | dev->stats.rx_dropped++; |
434 | 424 | continue; | |
435 | skb = rskb; | ||
436 | } | 425 | } |
437 | 426 | ||
438 | bd = (struct bcom_fec_bd *) | 427 | /* Enqueue the new sk_buff back on the hardware */ |
439 | bcom_prepare_next_buffer(priv->rx_dmatsk); | 428 | mpc52xx_fec_rx_submit(dev, skb); |
440 | 429 | ||
441 | bd->status = FEC_RX_BUFFER_SIZE; | 430 | /* Process the received skb - Drop the spin lock while |
442 | bd->skb_pa = dma_map_single(dev->dev.parent, skb->data, | 431 | * calling into the network stack */ |
443 | FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE); | 432 | spin_unlock_irqrestore(&priv->lock, flags); |
444 | 433 | ||
445 | bcom_submit_next_buffer(priv->rx_dmatsk, skb); | 434 | dma_unmap_single(dev->dev.parent, physaddr, rskb->len, |
435 | DMA_FROM_DEVICE); | ||
436 | length = status & BCOM_FEC_RX_BD_LEN_MASK; | ||
437 | skb_put(rskb, length - 4); /* length without CRC32 */ | ||
438 | rskb->dev = dev; | ||
439 | rskb->protocol = eth_type_trans(rskb, dev); | ||
440 | netif_rx(rskb); | ||
441 | |||
442 | spin_lock_irqsave(&priv->lock, flags); | ||
446 | } | 443 | } |
447 | 444 | ||
445 | spin_unlock_irqrestore(&priv->lock, flags); | ||
446 | |||
448 | return IRQ_HANDLED; | 447 | return IRQ_HANDLED; |
449 | } | 448 | } |
450 | 449 | ||
@@ -454,6 +453,7 @@ static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id) | |||
454 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); | 453 | struct mpc52xx_fec_priv *priv = netdev_priv(dev); |
455 | struct mpc52xx_fec __iomem *fec = priv->fec; | 454 | struct mpc52xx_fec __iomem *fec = priv->fec; |
456 | u32 ievent; | 455 | u32 ievent; |
456 | unsigned long flags; | ||
457 | 457 | ||
458 | ievent = in_be32(&fec->ievent); | 458 | ievent = in_be32(&fec->ievent); |
459 | 459 | ||
@@ -471,9 +471,10 @@ static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id) | |||
471 | if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR)) | 471 | if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR)) |
472 | dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n"); | 472 | dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n"); |
473 | 473 | ||
474 | spin_lock_irqsave(&priv->lock, flags); | ||
474 | mpc52xx_fec_reset(dev); | 475 | mpc52xx_fec_reset(dev); |
476 | spin_unlock_irqrestore(&priv->lock, flags); | ||
475 | 477 | ||
476 | netif_wake_queue(dev); | ||
477 | return IRQ_HANDLED; | 478 | return IRQ_HANDLED; |
478 | } | 479 | } |
479 | 480 | ||
@@ -768,6 +769,8 @@ static void mpc52xx_fec_reset(struct net_device *dev) | |||
768 | bcom_enable(priv->tx_dmatsk); | 769 | bcom_enable(priv->tx_dmatsk); |
769 | 770 | ||
770 | mpc52xx_fec_start(dev); | 771 | mpc52xx_fec_start(dev); |
772 | |||
773 | netif_wake_queue(dev); | ||
771 | } | 774 | } |
772 | 775 | ||
773 | 776 | ||