aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/fec_mpc52xx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/fec_mpc52xx.c')
-rw-r--r--drivers/net/fec_mpc52xx.c133
1 files changed, 67 insertions, 66 deletions
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 66dace6d324f..4a43e56b7394 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -19,6 +19,7 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <linux/slab.h>
22#include <linux/errno.h> 23#include <linux/errno.h>
23#include <linux/init.h> 24#include <linux/init.h>
24#include <linux/crc32.h> 25#include <linux/crc32.h>
@@ -85,11 +86,15 @@ MODULE_PARM_DESC(debug, "debugging messages level");
85 86
86static void mpc52xx_fec_tx_timeout(struct net_device *dev) 87static void mpc52xx_fec_tx_timeout(struct net_device *dev)
87{ 88{
89 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
90 unsigned long flags;
91
88 dev_warn(&dev->dev, "transmit timed out\n"); 92 dev_warn(&dev->dev, "transmit timed out\n");
89 93
94 spin_lock_irqsave(&priv->lock, flags);
90 mpc52xx_fec_reset(dev); 95 mpc52xx_fec_reset(dev);
91
92 dev->stats.tx_errors++; 96 dev->stats.tx_errors++;
97 spin_unlock_irqrestore(&priv->lock, flags);
93 98
94 netif_wake_queue(dev); 99 netif_wake_queue(dev);
95} 100}
@@ -135,28 +140,32 @@ static void mpc52xx_fec_free_rx_buffers(struct net_device *dev, struct bcom_task
135 } 140 }
136} 141}
137 142
143static void
144mpc52xx_fec_rx_submit(struct net_device *dev, struct sk_buff *rskb)
145{
146 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
147 struct bcom_fec_bd *bd;
148
149 bd = (struct bcom_fec_bd *) bcom_prepare_next_buffer(priv->rx_dmatsk);
150 bd->status = FEC_RX_BUFFER_SIZE;
151 bd->skb_pa = dma_map_single(dev->dev.parent, rskb->data,
152 FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
153 bcom_submit_next_buffer(priv->rx_dmatsk, rskb);
154}
155
138static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task *rxtsk) 156static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task *rxtsk)
139{ 157{
140 while (!bcom_queue_full(rxtsk)) { 158 struct sk_buff *skb;
141 struct sk_buff *skb;
142 struct bcom_fec_bd *bd;
143 159
160 while (!bcom_queue_full(rxtsk)) {
144 skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE); 161 skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE);
145 if (skb == NULL) 162 if (!skb)
146 return -EAGAIN; 163 return -EAGAIN;
147 164
148 /* zero out the initial receive buffers to aid debugging */ 165 /* zero out the initial receive buffers to aid debugging */
149 memset(skb->data, 0, FEC_RX_BUFFER_SIZE); 166 memset(skb->data, 0, FEC_RX_BUFFER_SIZE);
150 167 mpc52xx_fec_rx_submit(dev, skb);
151 bd = (struct bcom_fec_bd *)bcom_prepare_next_buffer(rxtsk);
152
153 bd->status = FEC_RX_BUFFER_SIZE;
154 bd->skb_pa = dma_map_single(dev->dev.parent, skb->data,
155 FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
156
157 bcom_submit_next_buffer(rxtsk, skb);
158 } 168 }
159
160 return 0; 169 return 0;
161} 170}
162 171
@@ -226,17 +235,17 @@ static int mpc52xx_fec_open(struct net_device *dev)
226 phy_start(priv->phydev); 235 phy_start(priv->phydev);
227 } 236 }
228 237
229 if (request_irq(dev->irq, &mpc52xx_fec_interrupt, IRQF_SHARED, 238 if (request_irq(dev->irq, mpc52xx_fec_interrupt, IRQF_SHARED,
230 DRIVER_NAME "_ctrl", dev)) { 239 DRIVER_NAME "_ctrl", dev)) {
231 dev_err(&dev->dev, "ctrl interrupt request failed\n"); 240 dev_err(&dev->dev, "ctrl interrupt request failed\n");
232 goto free_phy; 241 goto free_phy;
233 } 242 }
234 if (request_irq(priv->r_irq, &mpc52xx_fec_rx_interrupt, 0, 243 if (request_irq(priv->r_irq, mpc52xx_fec_rx_interrupt, 0,
235 DRIVER_NAME "_rx", dev)) { 244 DRIVER_NAME "_rx", dev)) {
236 dev_err(&dev->dev, "rx interrupt request failed\n"); 245 dev_err(&dev->dev, "rx interrupt request failed\n");
237 goto free_ctrl_irq; 246 goto free_ctrl_irq;
238 } 247 }
239 if (request_irq(priv->t_irq, &mpc52xx_fec_tx_interrupt, 0, 248 if (request_irq(priv->t_irq, mpc52xx_fec_tx_interrupt, 0,
240 DRIVER_NAME "_tx", dev)) { 249 DRIVER_NAME "_tx", dev)) {
241 dev_err(&dev->dev, "tx interrupt request failed\n"); 250 dev_err(&dev->dev, "tx interrupt request failed\n");
242 goto free_2irqs; 251 goto free_2irqs;
@@ -328,13 +337,12 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
328 DMA_TO_DEVICE); 337 DMA_TO_DEVICE);
329 338
330 bcom_submit_next_buffer(priv->tx_dmatsk, skb); 339 bcom_submit_next_buffer(priv->tx_dmatsk, skb);
340 spin_unlock_irqrestore(&priv->lock, flags);
331 341
332 if (bcom_queue_full(priv->tx_dmatsk)) { 342 if (bcom_queue_full(priv->tx_dmatsk)) {
333 netif_stop_queue(dev); 343 netif_stop_queue(dev);
334 } 344 }
335 345
336 spin_unlock_irqrestore(&priv->lock, flags);
337
338 return NETDEV_TX_OK; 346 return NETDEV_TX_OK;
339} 347}
340 348
@@ -359,9 +367,9 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
359{ 367{
360 struct net_device *dev = dev_id; 368 struct net_device *dev = dev_id;
361 struct mpc52xx_fec_priv *priv = netdev_priv(dev); 369 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
370 unsigned long flags;
362 371
363 spin_lock(&priv->lock); 372 spin_lock_irqsave(&priv->lock, flags);
364
365 while (bcom_buffer_done(priv->tx_dmatsk)) { 373 while (bcom_buffer_done(priv->tx_dmatsk)) {
366 struct sk_buff *skb; 374 struct sk_buff *skb;
367 struct bcom_fec_bd *bd; 375 struct bcom_fec_bd *bd;
@@ -372,11 +380,10 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
372 380
373 dev_kfree_skb_irq(skb); 381 dev_kfree_skb_irq(skb);
374 } 382 }
383 spin_unlock_irqrestore(&priv->lock, flags);
375 384
376 netif_wake_queue(dev); 385 netif_wake_queue(dev);
377 386
378 spin_unlock(&priv->lock);
379
380 return IRQ_HANDLED; 387 return IRQ_HANDLED;
381} 388}
382 389
@@ -384,67 +391,60 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
384{ 391{
385 struct net_device *dev = dev_id; 392 struct net_device *dev = dev_id;
386 struct mpc52xx_fec_priv *priv = netdev_priv(dev); 393 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
394 struct sk_buff *rskb; /* received sk_buff */
395 struct sk_buff *skb; /* new sk_buff to enqueue in its place */
396 struct bcom_fec_bd *bd;
397 u32 status, physaddr;
398 int length;
399 unsigned long flags;
400
401 spin_lock_irqsave(&priv->lock, flags);
387 402
388 while (bcom_buffer_done(priv->rx_dmatsk)) { 403 while (bcom_buffer_done(priv->rx_dmatsk)) {
389 struct sk_buff *skb;
390 struct sk_buff *rskb;
391 struct bcom_fec_bd *bd;
392 u32 status;
393 404
394 rskb = bcom_retrieve_buffer(priv->rx_dmatsk, &status, 405 rskb = bcom_retrieve_buffer(priv->rx_dmatsk, &status,
395 (struct bcom_bd **)&bd); 406 (struct bcom_bd **)&bd);
396 dma_unmap_single(dev->dev.parent, bd->skb_pa, rskb->len, 407 physaddr = bd->skb_pa;
397 DMA_FROM_DEVICE);
398 408
399 /* Test for errors in received frame */ 409 /* Test for errors in received frame */
400 if (status & BCOM_FEC_RX_BD_ERRORS) { 410 if (status & BCOM_FEC_RX_BD_ERRORS) {
401 /* Drop packet and reuse the buffer */ 411 /* Drop packet and reuse the buffer */
402 bd = (struct bcom_fec_bd *) 412 mpc52xx_fec_rx_submit(dev, rskb);
403 bcom_prepare_next_buffer(priv->rx_dmatsk);
404
405 bd->status = FEC_RX_BUFFER_SIZE;
406 bd->skb_pa = dma_map_single(dev->dev.parent,
407 rskb->data,
408 FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
409
410 bcom_submit_next_buffer(priv->rx_dmatsk, rskb);
411
412 dev->stats.rx_dropped++; 413 dev->stats.rx_dropped++;
413
414 continue; 414 continue;
415 } 415 }
416 416
417 /* skbs are allocated on open, so now we allocate a new one, 417 /* skbs are allocated on open, so now we allocate a new one,
418 * and remove the old (with the packet) */ 418 * and remove the old (with the packet) */
419 skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE); 419 skb = dev_alloc_skb(FEC_RX_BUFFER_SIZE);
420 if (skb) { 420 if (!skb) {
421 /* Process the received skb */
422 int length = status & BCOM_FEC_RX_BD_LEN_MASK;
423
424 skb_put(rskb, length - 4); /* length without CRC32 */
425
426 rskb->dev = dev;
427 rskb->protocol = eth_type_trans(rskb, dev);
428
429 netif_rx(rskb);
430 } else {
431 /* Can't get a new one : reuse the same & drop pkt */ 421 /* Can't get a new one : reuse the same & drop pkt */
432 dev_notice(&dev->dev, "Memory squeeze, dropping packet.\n"); 422 dev_notice(&dev->dev, "Low memory - dropped packet.\n");
423 mpc52xx_fec_rx_submit(dev, rskb);
433 dev->stats.rx_dropped++; 424 dev->stats.rx_dropped++;
434 425 continue;
435 skb = rskb;
436 } 426 }
437 427
438 bd = (struct bcom_fec_bd *) 428 /* Enqueue the new sk_buff back on the hardware */
439 bcom_prepare_next_buffer(priv->rx_dmatsk); 429 mpc52xx_fec_rx_submit(dev, skb);
440 430
441 bd->status = FEC_RX_BUFFER_SIZE; 431 /* Process the received skb - Drop the spin lock while
442 bd->skb_pa = dma_map_single(dev->dev.parent, skb->data, 432 * calling into the network stack */
443 FEC_RX_BUFFER_SIZE, DMA_FROM_DEVICE); 433 spin_unlock_irqrestore(&priv->lock, flags);
444 434
445 bcom_submit_next_buffer(priv->rx_dmatsk, skb); 435 dma_unmap_single(dev->dev.parent, physaddr, rskb->len,
436 DMA_FROM_DEVICE);
437 length = status & BCOM_FEC_RX_BD_LEN_MASK;
438 skb_put(rskb, length - 4); /* length without CRC32 */
439 rskb->dev = dev;
440 rskb->protocol = eth_type_trans(rskb, dev);
441 netif_rx(rskb);
442
443 spin_lock_irqsave(&priv->lock, flags);
446 } 444 }
447 445
446 spin_unlock_irqrestore(&priv->lock, flags);
447
448 return IRQ_HANDLED; 448 return IRQ_HANDLED;
449} 449}
450 450
@@ -454,6 +454,7 @@ static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
454 struct mpc52xx_fec_priv *priv = netdev_priv(dev); 454 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
455 struct mpc52xx_fec __iomem *fec = priv->fec; 455 struct mpc52xx_fec __iomem *fec = priv->fec;
456 u32 ievent; 456 u32 ievent;
457 unsigned long flags;
457 458
458 ievent = in_be32(&fec->ievent); 459 ievent = in_be32(&fec->ievent);
459 460
@@ -471,9 +472,10 @@ static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
471 if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR)) 472 if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR))
472 dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n"); 473 dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n");
473 474
475 spin_lock_irqsave(&priv->lock, flags);
474 mpc52xx_fec_reset(dev); 476 mpc52xx_fec_reset(dev);
477 spin_unlock_irqrestore(&priv->lock, flags);
475 478
476 netif_wake_queue(dev);
477 return IRQ_HANDLED; 479 return IRQ_HANDLED;
478 } 480 }
479 481
@@ -574,19 +576,16 @@ static void mpc52xx_fec_set_multicast_list(struct net_device *dev)
574 out_be32(&fec->gaddr2, 0xffffffff); 576 out_be32(&fec->gaddr2, 0xffffffff);
575 } else { 577 } else {
576 u32 crc; 578 u32 crc;
577 int i;
578 struct dev_mc_list *dmi; 579 struct dev_mc_list *dmi;
579 u32 gaddr1 = 0x00000000; 580 u32 gaddr1 = 0x00000000;
580 u32 gaddr2 = 0x00000000; 581 u32 gaddr2 = 0x00000000;
581 582
582 dmi = dev->mc_list; 583 netdev_for_each_mc_addr(dmi, dev) {
583 for (i=0; i<dev->mc_count; i++) {
584 crc = ether_crc_le(6, dmi->dmi_addr) >> 26; 584 crc = ether_crc_le(6, dmi->dmi_addr) >> 26;
585 if (crc >= 32) 585 if (crc >= 32)
586 gaddr1 |= 1 << (crc-32); 586 gaddr1 |= 1 << (crc-32);
587 else 587 else
588 gaddr2 |= 1 << crc; 588 gaddr2 |= 1 << crc;
589 dmi = dmi->next;
590 } 589 }
591 out_be32(&fec->gaddr1, gaddr1); 590 out_be32(&fec->gaddr1, gaddr1);
592 out_be32(&fec->gaddr2, gaddr2); 591 out_be32(&fec->gaddr2, gaddr2);
@@ -768,6 +767,8 @@ static void mpc52xx_fec_reset(struct net_device *dev)
768 bcom_enable(priv->tx_dmatsk); 767 bcom_enable(priv->tx_dmatsk);
769 768
770 mpc52xx_fec_start(dev); 769 mpc52xx_fec_start(dev);
770
771 netif_wake_queue(dev);
771} 772}
772 773
773 774