aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/freescale
diff options
context:
space:
mode:
authorClaudiu Manoil <claudiu.manoil@freescale.com>2014-02-24 05:13:45 -0500
committerDavid S. Miller <davem@davemloft.net>2014-02-24 19:38:20 -0500
commit0851133bb5ad9d95fceccac9fc67b798041b73e2 (patch)
treed595058108186c8731b361f53bad30c0f75fb5ee /drivers/net/ethernet/freescale
parent80ec396cb6b522b23c69dfff32a2d12993e4bb30 (diff)
gianfar: Fix device reset races (oops) for Tx
The device reset procedure, stop_gfar()/startup_gfar(), has concurrency issues. "Kernel access of bad area" oopses show up during Tx timeout device reset or other reset cases (like changing MTU) that happen while the interface still has traffic. The oopses happen in start_xmit and clean_tx_ring when accessing tx_queue-> tx_skbuff which is NULL. The race comes from de-allocating the tx_skbuff while transmission and napi processing are still active. Though the Tx queues get temoprarily stopped when Tx timeout occurs, they get re-enabled as a result of Tx congestion handling inside the napi context (see clean_tx_ring()). Not disabling the napi during reset is also a bug, because clean_tx_ring() will try to access tx_skbuff while it is being de-alloc'ed and re-alloc'ed. To fix this, stop_gfar() needs to disable napi processing after stopping the Tx queues. However, in order to prevent clean_tx_ring() to re-enable the Tx queue before the napi gets disabled, the device state DOWN has been introduced. It prevents the Tx congestion management from re-enabling the de-congested Tx queue while the device is brought down. An additional locking state, RESETTING, has been introduced to prevent simultaneous resets or to prevent configuring the device while it is resetting. The bogus 'rxlock's (for each Rx queue) have been removed since their purpose is not justified, as they don't prevent nor are suited to prevent device reset/reconfig races (such as this one). Signed-off-by: Claudiu Manoil <claudiu.manoil@freescale.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/freescale')
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c116
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h16
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c28
3 files changed, 76 insertions, 84 deletions
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 6c054b549fa1..4eac25f66605 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -480,14 +480,6 @@ static void gfar_ints_enable(struct gfar_private *priv)
480 } 480 }
481} 481}
482 482
483void lock_rx_qs(struct gfar_private *priv)
484{
485 int i;
486
487 for (i = 0; i < priv->num_rx_queues; i++)
488 spin_lock(&priv->rx_queue[i]->rxlock);
489}
490
491void lock_tx_qs(struct gfar_private *priv) 483void lock_tx_qs(struct gfar_private *priv)
492{ 484{
493 int i; 485 int i;
@@ -496,14 +488,6 @@ void lock_tx_qs(struct gfar_private *priv)
496 spin_lock(&priv->tx_queue[i]->txlock); 488 spin_lock(&priv->tx_queue[i]->txlock);
497} 489}
498 490
499void unlock_rx_qs(struct gfar_private *priv)
500{
501 int i;
502
503 for (i = 0; i < priv->num_rx_queues; i++)
504 spin_unlock(&priv->rx_queue[i]->rxlock);
505}
506
507void unlock_tx_qs(struct gfar_private *priv) 491void unlock_tx_qs(struct gfar_private *priv)
508{ 492{
509 int i; 493 int i;
@@ -543,7 +527,6 @@ static int gfar_alloc_rx_queues(struct gfar_private *priv)
543 priv->rx_queue[i]->rx_skbuff = NULL; 527 priv->rx_queue[i]->rx_skbuff = NULL;
544 priv->rx_queue[i]->qindex = i; 528 priv->rx_queue[i]->qindex = i;
545 priv->rx_queue[i]->dev = priv->ndev; 529 priv->rx_queue[i]->dev = priv->ndev;
546 spin_lock_init(&(priv->rx_queue[i]->rxlock));
547 } 530 }
548 return 0; 531 return 0;
549} 532}
@@ -857,18 +840,16 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
857 switch (config.rx_filter) { 840 switch (config.rx_filter) {
858 case HWTSTAMP_FILTER_NONE: 841 case HWTSTAMP_FILTER_NONE:
859 if (priv->hwts_rx_en) { 842 if (priv->hwts_rx_en) {
860 stop_gfar(netdev);
861 priv->hwts_rx_en = 0; 843 priv->hwts_rx_en = 0;
862 startup_gfar(netdev); 844 reset_gfar(netdev);
863 } 845 }
864 break; 846 break;
865 default: 847 default:
866 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) 848 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
867 return -ERANGE; 849 return -ERANGE;
868 if (!priv->hwts_rx_en) { 850 if (!priv->hwts_rx_en) {
869 stop_gfar(netdev);
870 priv->hwts_rx_en = 1; 851 priv->hwts_rx_en = 1;
871 startup_gfar(netdev); 852 reset_gfar(netdev);
872 } 853 }
873 config.rx_filter = HWTSTAMP_FILTER_ALL; 854 config.rx_filter = HWTSTAMP_FILTER_ALL;
874 break; 855 break;
@@ -1027,7 +1008,7 @@ static void gfar_detect_errata(struct gfar_private *priv)
1027 priv->errata); 1008 priv->errata);
1028} 1009}
1029 1010
1030static void gfar_mac_reset(struct gfar_private *priv) 1011void gfar_mac_reset(struct gfar_private *priv)
1031{ 1012{
1032 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1013 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1033 u32 tempval; 1014 u32 tempval;
@@ -1290,6 +1271,8 @@ static int gfar_probe(struct platform_device *ofdev)
1290 if (priv->num_tx_queues == 1) 1271 if (priv->num_tx_queues == 1)
1291 priv->prio_sched_en = 1; 1272 priv->prio_sched_en = 1;
1292 1273
1274 set_bit(GFAR_DOWN, &priv->state);
1275
1293 gfar_hw_init(priv); 1276 gfar_hw_init(priv);
1294 1277
1295 err = register_netdev(dev); 1278 err = register_netdev(dev);
@@ -1389,7 +1372,6 @@ static int gfar_suspend(struct device *dev)
1389 1372
1390 local_irq_save(flags); 1373 local_irq_save(flags);
1391 lock_tx_qs(priv); 1374 lock_tx_qs(priv);
1392 lock_rx_qs(priv);
1393 1375
1394 gfar_halt_nodisable(priv); 1376 gfar_halt_nodisable(priv);
1395 1377
@@ -1403,7 +1385,6 @@ static int gfar_suspend(struct device *dev)
1403 1385
1404 gfar_write(&regs->maccfg1, tempval); 1386 gfar_write(&regs->maccfg1, tempval);
1405 1387
1406 unlock_rx_qs(priv);
1407 unlock_tx_qs(priv); 1388 unlock_tx_qs(priv);
1408 local_irq_restore(flags); 1389 local_irq_restore(flags);
1409 1390
@@ -1449,7 +1430,6 @@ static int gfar_resume(struct device *dev)
1449 */ 1430 */
1450 local_irq_save(flags); 1431 local_irq_save(flags);
1451 lock_tx_qs(priv); 1432 lock_tx_qs(priv);
1452 lock_rx_qs(priv);
1453 1433
1454 tempval = gfar_read(&regs->maccfg2); 1434 tempval = gfar_read(&regs->maccfg2);
1455 tempval &= ~MACCFG2_MPEN; 1435 tempval &= ~MACCFG2_MPEN;
@@ -1457,7 +1437,6 @@ static int gfar_resume(struct device *dev)
1457 1437
1458 gfar_start(priv); 1438 gfar_start(priv);
1459 1439
1460 unlock_rx_qs(priv);
1461 unlock_tx_qs(priv); 1440 unlock_tx_qs(priv);
1462 local_irq_restore(flags); 1441 local_irq_restore(flags);
1463 1442
@@ -1718,21 +1697,19 @@ void gfar_halt(struct gfar_private *priv)
1718void stop_gfar(struct net_device *dev) 1697void stop_gfar(struct net_device *dev)
1719{ 1698{
1720 struct gfar_private *priv = netdev_priv(dev); 1699 struct gfar_private *priv = netdev_priv(dev);
1721 unsigned long flags;
1722 1700
1723 phy_stop(priv->phydev); 1701 netif_tx_stop_all_queues(dev);
1724 1702
1703 smp_mb__before_clear_bit();
1704 set_bit(GFAR_DOWN, &priv->state);
1705 smp_mb__after_clear_bit();
1725 1706
1726 /* Lock it down */ 1707 disable_napi(priv);
1727 local_irq_save(flags);
1728 lock_tx_qs(priv);
1729 lock_rx_qs(priv);
1730 1708
1709 /* disable ints and gracefully shut down Rx/Tx DMA */
1731 gfar_halt(priv); 1710 gfar_halt(priv);
1732 1711
1733 unlock_rx_qs(priv); 1712 phy_stop(priv->phydev);
1734 unlock_tx_qs(priv);
1735 local_irq_restore(flags);
1736 1713
1737 free_skb_resources(priv); 1714 free_skb_resources(priv);
1738} 1715}
@@ -2009,11 +1986,19 @@ int startup_gfar(struct net_device *ndev)
2009 1986
2010 gfar_init_tx_rx_base(priv); 1987 gfar_init_tx_rx_base(priv);
2011 1988
2012 /* Start the controller */ 1989 smp_mb__before_clear_bit();
1990 clear_bit(GFAR_DOWN, &priv->state);
1991 smp_mb__after_clear_bit();
1992
1993 /* Start Rx/Tx DMA and enable the interrupts */
2013 gfar_start(priv); 1994 gfar_start(priv);
2014 1995
2015 phy_start(priv->phydev); 1996 phy_start(priv->phydev);
2016 1997
1998 enable_napi(priv);
1999
2000 netif_tx_wake_all_queues(ndev);
2001
2017 return 0; 2002 return 0;
2018} 2003}
2019 2004
@@ -2025,26 +2010,17 @@ static int gfar_enet_open(struct net_device *dev)
2025 struct gfar_private *priv = netdev_priv(dev); 2010 struct gfar_private *priv = netdev_priv(dev);
2026 int err; 2011 int err;
2027 2012
2028 enable_napi(priv);
2029
2030 err = init_phy(dev); 2013 err = init_phy(dev);
2031 2014 if (err)
2032 if (err) {
2033 disable_napi(priv);
2034 return err; 2015 return err;
2035 }
2036 2016
2037 err = gfar_request_irq(priv); 2017 err = gfar_request_irq(priv);
2038 if (err) 2018 if (err)
2039 return err; 2019 return err;
2040 2020
2041 err = startup_gfar(dev); 2021 err = startup_gfar(dev);
2042 if (err) { 2022 if (err)
2043 disable_napi(priv);
2044 return err; 2023 return err;
2045 }
2046
2047 netif_tx_start_all_queues(dev);
2048 2024
2049 device_set_wakeup_enable(&dev->dev, priv->wol_en); 2025 device_set_wakeup_enable(&dev->dev, priv->wol_en);
2050 2026
@@ -2369,8 +2345,6 @@ static int gfar_close(struct net_device *dev)
2369{ 2345{
2370 struct gfar_private *priv = netdev_priv(dev); 2346 struct gfar_private *priv = netdev_priv(dev);
2371 2347
2372 disable_napi(priv);
2373
2374 cancel_work_sync(&priv->reset_task); 2348 cancel_work_sync(&priv->reset_task);
2375 stop_gfar(dev); 2349 stop_gfar(dev);
2376 2350
@@ -2378,8 +2352,6 @@ static int gfar_close(struct net_device *dev)
2378 phy_disconnect(priv->phydev); 2352 phy_disconnect(priv->phydev);
2379 priv->phydev = NULL; 2353 priv->phydev = NULL;
2380 2354
2381 netif_tx_stop_all_queues(dev);
2382
2383 gfar_free_irq(priv); 2355 gfar_free_irq(priv);
2384 2356
2385 return 0; 2357 return 0;
@@ -2403,6 +2375,9 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2403 return -EINVAL; 2375 return -EINVAL;
2404 } 2376 }
2405 2377
2378 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2379 cpu_relax();
2380
2406 if (dev->flags & IFF_UP) 2381 if (dev->flags & IFF_UP)
2407 stop_gfar(dev); 2382 stop_gfar(dev);
2408 2383
@@ -2411,9 +2386,24 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2411 if (dev->flags & IFF_UP) 2386 if (dev->flags & IFF_UP)
2412 startup_gfar(dev); 2387 startup_gfar(dev);
2413 2388
2389 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2390
2414 return 0; 2391 return 0;
2415} 2392}
2416 2393
2394void reset_gfar(struct net_device *ndev)
2395{
2396 struct gfar_private *priv = netdev_priv(ndev);
2397
2398 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2399 cpu_relax();
2400
2401 stop_gfar(ndev);
2402 startup_gfar(ndev);
2403
2404 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2405}
2406
2417/* gfar_reset_task gets scheduled when a packet has not been 2407/* gfar_reset_task gets scheduled when a packet has not been
2418 * transmitted after a set amount of time. 2408 * transmitted after a set amount of time.
2419 * For now, assume that clearing out all the structures, and 2409 * For now, assume that clearing out all the structures, and
@@ -2423,16 +2413,7 @@ static void gfar_reset_task(struct work_struct *work)
2423{ 2413{
2424 struct gfar_private *priv = container_of(work, struct gfar_private, 2414 struct gfar_private *priv = container_of(work, struct gfar_private,
2425 reset_task); 2415 reset_task);
2426 struct net_device *dev = priv->ndev; 2416 reset_gfar(priv->ndev);
2427
2428 if (dev->flags & IFF_UP) {
2429 netif_tx_stop_all_queues(dev);
2430 stop_gfar(dev);
2431 startup_gfar(dev);
2432 netif_tx_start_all_queues(dev);
2433 }
2434
2435 netif_tx_schedule_all(dev);
2436} 2417}
2437 2418
2438static void gfar_timeout(struct net_device *dev) 2419static void gfar_timeout(struct net_device *dev)
@@ -2545,8 +2526,10 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2545 } 2526 }
2546 2527
2547 /* If we freed a buffer, we can restart transmission, if necessary */ 2528 /* If we freed a buffer, we can restart transmission, if necessary */
2548 if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree) 2529 if (tx_queue->num_txbdfree &&
2549 netif_wake_subqueue(dev, tqi); 2530 netif_tx_queue_stopped(txq) &&
2531 !(test_bit(GFAR_DOWN, &priv->state)))
2532 netif_wake_subqueue(priv->ndev, tqi);
2550 2533
2551 /* Update dirty indicators */ 2534 /* Update dirty indicators */
2552 tx_queue->skb_dirtytx = skb_dirtytx; 2535 tx_queue->skb_dirtytx = skb_dirtytx;
@@ -3023,12 +3006,11 @@ static void adjust_link(struct net_device *dev)
3023{ 3006{
3024 struct gfar_private *priv = netdev_priv(dev); 3007 struct gfar_private *priv = netdev_priv(dev);
3025 struct gfar __iomem *regs = priv->gfargrp[0].regs; 3008 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3026 unsigned long flags;
3027 struct phy_device *phydev = priv->phydev; 3009 struct phy_device *phydev = priv->phydev;
3028 int new_state = 0; 3010 int new_state = 0;
3029 3011
3030 local_irq_save(flags); 3012 if (test_bit(GFAR_RESETTING, &priv->state))
3031 lock_tx_qs(priv); 3013 return;
3032 3014
3033 if (phydev->link) { 3015 if (phydev->link) {
3034 u32 tempval1 = gfar_read(&regs->maccfg1); 3016 u32 tempval1 = gfar_read(&regs->maccfg1);
@@ -3100,8 +3082,6 @@ static void adjust_link(struct net_device *dev)
3100 3082
3101 if (new_state && netif_msg_link(priv)) 3083 if (new_state && netif_msg_link(priv))
3102 phy_print_status(phydev); 3084 phy_print_status(phydev);
3103 unlock_tx_qs(priv);
3104 local_irq_restore(flags);
3105} 3085}
3106 3086
3107/* Update the hash table based on the current list of multicast 3087/* Update the hash table based on the current list of multicast
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 9db95563f8aa..1e16216d4150 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -965,7 +965,6 @@ struct rx_q_stats {
965 965
966/** 966/**
967 * struct gfar_priv_rx_q - per rx queue structure 967 * struct gfar_priv_rx_q - per rx queue structure
968 * @rxlock: per queue rx spin lock
969 * @rx_skbuff: skb pointers 968 * @rx_skbuff: skb pointers
970 * @skb_currx: currently use skb pointer 969 * @skb_currx: currently use skb pointer
971 * @rx_bd_base: First rx buffer descriptor 970 * @rx_bd_base: First rx buffer descriptor
@@ -978,8 +977,7 @@ struct rx_q_stats {
978 */ 977 */
979 978
980struct gfar_priv_rx_q { 979struct gfar_priv_rx_q {
981 spinlock_t rxlock __attribute__ ((aligned (SMP_CACHE_BYTES))); 980 struct sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES);
982 struct sk_buff ** rx_skbuff;
983 dma_addr_t rx_bd_dma_base; 981 dma_addr_t rx_bd_dma_base;
984 struct rxbd8 *rx_bd_base; 982 struct rxbd8 *rx_bd_base;
985 struct rxbd8 *cur_rx; 983 struct rxbd8 *cur_rx;
@@ -1040,6 +1038,11 @@ enum gfar_errata {
1040 GFAR_ERRATA_12 = 0x08, /* a.k.a errata eTSEC49 */ 1038 GFAR_ERRATA_12 = 0x08, /* a.k.a errata eTSEC49 */
1041}; 1039};
1042 1040
1041enum gfar_dev_state {
1042 GFAR_DOWN = 1,
1043 GFAR_RESETTING
1044};
1045
1043/* Struct stolen almost completely (and shamelessly) from the FCC enet source 1046/* Struct stolen almost completely (and shamelessly) from the FCC enet source
1044 * (Ok, that's not so true anymore, but there is a family resemblance) 1047 * (Ok, that's not so true anymore, but there is a family resemblance)
1045 * The GFAR buffer descriptors track the ring buffers. The rx_bd_base 1048 * The GFAR buffer descriptors track the ring buffers. The rx_bd_base
@@ -1068,6 +1071,7 @@ struct gfar_private {
1068 struct gfar_priv_rx_q *rx_queue[MAX_RX_QS]; 1071 struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
1069 struct gfar_priv_grp gfargrp[MAXGROUPS]; 1072 struct gfar_priv_grp gfargrp[MAXGROUPS];
1070 1073
1074 unsigned long state;
1071 u32 device_flags; 1075 u32 device_flags;
1072 1076
1073 unsigned int mode; 1077 unsigned int mode;
@@ -1198,13 +1202,11 @@ static inline void gfar_write_isrg(struct gfar_private *priv)
1198 } 1202 }
1199} 1203}
1200 1204
1201void lock_rx_qs(struct gfar_private *priv);
1202void lock_tx_qs(struct gfar_private *priv);
1203void unlock_rx_qs(struct gfar_private *priv);
1204void unlock_tx_qs(struct gfar_private *priv);
1205irqreturn_t gfar_receive(int irq, void *dev_id); 1205irqreturn_t gfar_receive(int irq, void *dev_id);
1206int startup_gfar(struct net_device *dev); 1206int startup_gfar(struct net_device *dev);
1207void stop_gfar(struct net_device *dev); 1207void stop_gfar(struct net_device *dev);
1208void reset_gfar(struct net_device *dev);
1209void gfar_mac_reset(struct gfar_private *priv);
1208void gfar_halt(struct gfar_private *priv); 1210void gfar_halt(struct gfar_private *priv);
1209void gfar_start(struct gfar_private *priv); 1211void gfar_start(struct gfar_private *priv);
1210void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable, 1212void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable,
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index dd7ccec506f1..45219d4d09b4 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -487,6 +487,9 @@ static int gfar_sringparam(struct net_device *dev,
487 return -EINVAL; 487 return -EINVAL;
488 } 488 }
489 489
490 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
491 cpu_relax();
492
490 if (dev->flags & IFF_UP) 493 if (dev->flags & IFF_UP)
491 stop_gfar(dev); 494 stop_gfar(dev);
492 495
@@ -498,10 +501,11 @@ static int gfar_sringparam(struct net_device *dev,
498 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending; 501 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
499 502
500 /* Rebuild the rings with the new size */ 503 /* Rebuild the rings with the new size */
501 if (dev->flags & IFF_UP) { 504 if (dev->flags & IFF_UP)
502 err = startup_gfar(dev); 505 err = startup_gfar(dev);
503 netif_tx_wake_all_queues(dev); 506
504 } 507 clear_bit_unlock(GFAR_RESETTING, &priv->state);
508
505 return err; 509 return err;
506} 510}
507 511
@@ -580,20 +584,28 @@ static int gfar_spauseparam(struct net_device *dev,
580int gfar_set_features(struct net_device *dev, netdev_features_t features) 584int gfar_set_features(struct net_device *dev, netdev_features_t features)
581{ 585{
582 netdev_features_t changed = dev->features ^ features; 586 netdev_features_t changed = dev->features ^ features;
587 struct gfar_private *priv = netdev_priv(dev);
583 int err = 0; 588 int err = 0;
584 589
585 if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 590 if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
586 NETIF_F_RXCSUM))) 591 NETIF_F_RXCSUM)))
587 return 0; 592 return 0;
588 593
594 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
595 cpu_relax();
596
589 dev->features = features; 597 dev->features = features;
590 598
591 if (dev->flags & IFF_UP) { 599 if (dev->flags & IFF_UP) {
592 /* Now we take down the rings to rebuild them */ 600 /* Now we take down the rings to rebuild them */
593 stop_gfar(dev); 601 stop_gfar(dev);
594 err = startup_gfar(dev); 602 err = startup_gfar(dev);
595 netif_tx_wake_all_queues(dev); 603 } else {
604 gfar_mac_reset(priv);
596 } 605 }
606
607 clear_bit_unlock(GFAR_RESETTING, &priv->state);
608
597 return err; 609 return err;
598} 610}
599 611
@@ -1559,9 +1571,6 @@ static int gfar_write_filer_table(struct gfar_private *priv,
1559 if (tab->index > MAX_FILER_IDX - 1) 1571 if (tab->index > MAX_FILER_IDX - 1)
1560 return -EBUSY; 1572 return -EBUSY;
1561 1573
1562 /* Avoid inconsistent filer table to be processed */
1563 lock_rx_qs(priv);
1564
1565 /* Fill regular entries */ 1574 /* Fill regular entries */
1566 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); 1575 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
1567 i++) 1576 i++)
@@ -1574,8 +1583,6 @@ static int gfar_write_filer_table(struct gfar_private *priv,
1574 */ 1583 */
1575 gfar_write_filer(priv, i, 0x20, 0x0); 1584 gfar_write_filer(priv, i, 0x20, 0x0);
1576 1585
1577 unlock_rx_qs(priv);
1578
1579 return 0; 1586 return 0;
1580} 1587}
1581 1588
@@ -1780,6 +1787,9 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1780 struct gfar_private *priv = netdev_priv(dev); 1787 struct gfar_private *priv = netdev_priv(dev);
1781 int ret = 0; 1788 int ret = 0;
1782 1789
1790 if (test_bit(GFAR_RESETTING, &priv->state))
1791 return -EBUSY;
1792
1783 mutex_lock(&priv->rx_queue_access); 1793 mutex_lock(&priv->rx_queue_access);
1784 1794
1785 switch (cmd->cmd) { 1795 switch (cmd->cmd) {