aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom/tg3.c
diff options
context:
space:
mode:
authorMatt Carlson <mcarlson@broadcom.com>2011-11-04 05:15:03 -0400
committerDavid S. Miller <davem@davemloft.net>2011-11-04 17:31:48 -0400
commitdb21997379906fe7657d360674e1106d80b020a4 (patch)
treeaefa69f5ad536aec711d102a238d6dc10c50fa48 /drivers/net/ethernet/broadcom/tg3.c
parent9dc5e342703948ea7b086d063c85c0e79dac8149 (diff)
tg3: Schedule at most one tg3_reset_task run
It is possible for multiple threads in the tg3 driver to each attempt to schedule a run of tg3_reset_task(). The multiple tg3_reset_task executions could all wind up on the same queue (and execute serially) or wind up on the queues of another processor (which could execute in parallel). Either scenario is not what was truly desired. This patch adds a new flag, TG3_FLAG_RESET_TASK_PENDING, and uses it to determine whether or not to schedule another run of tg3_reset_task(). With the new flag comes two new functions to facilitate scheduling and descheduling of tg3_reset_task(). Signed-off-by: Matt Carlson <mcarlson@broadcom.com> Reviewed-by: Michael Chan <mchan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/broadcom/tg3.c')
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c33
1 files changed, 24 insertions, 9 deletions
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 6973d01ae85a..d4a85b795344 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -5929,6 +5929,18 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5929 return work_done; 5929 return work_done;
5930} 5930}
5931 5931
5932static inline void tg3_reset_task_schedule(struct tg3 *tp)
5933{
5934 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
5935 schedule_work(&tp->reset_task);
5936}
5937
5938static inline void tg3_reset_task_cancel(struct tg3 *tp)
5939{
5940 cancel_work_sync(&tp->reset_task);
5941 tg3_flag_clear(tp, RESET_TASK_PENDING);
5942}
5943
5932static int tg3_poll_msix(struct napi_struct *napi, int budget) 5944static int tg3_poll_msix(struct napi_struct *napi, int budget)
5933{ 5945{
5934 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); 5946 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
@@ -5969,7 +5981,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
5969tx_recovery: 5981tx_recovery:
5970 /* work_done is guaranteed to be less than budget. */ 5982 /* work_done is guaranteed to be less than budget. */
5971 napi_complete(napi); 5983 napi_complete(napi);
5972 schedule_work(&tp->reset_task); 5984 tg3_reset_task_schedule(tp);
5973 return work_done; 5985 return work_done;
5974} 5986}
5975 5987
@@ -6004,7 +6016,7 @@ static void tg3_process_error(struct tg3 *tp)
6004 tg3_dump_state(tp); 6016 tg3_dump_state(tp);
6005 6017
6006 tg3_flag_set(tp, ERROR_PROCESSED); 6018 tg3_flag_set(tp, ERROR_PROCESSED);
6007 schedule_work(&tp->reset_task); 6019 tg3_reset_task_schedule(tp);
6008} 6020}
6009 6021
6010static int tg3_poll(struct napi_struct *napi, int budget) 6022static int tg3_poll(struct napi_struct *napi, int budget)
@@ -6051,7 +6063,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
6051tx_recovery: 6063tx_recovery:
6052 /* work_done is guaranteed to be less than budget. */ 6064 /* work_done is guaranteed to be less than budget. */
6053 napi_complete(napi); 6065 napi_complete(napi);
6054 schedule_work(&tp->reset_task); 6066 tg3_reset_task_schedule(tp);
6055 return work_done; 6067 return work_done;
6056} 6068}
6057 6069
@@ -6345,6 +6357,7 @@ static void tg3_reset_task(struct work_struct *work)
6345 tg3_full_lock(tp, 0); 6357 tg3_full_lock(tp, 0);
6346 6358
6347 if (!netif_running(tp->dev)) { 6359 if (!netif_running(tp->dev)) {
6360 tg3_flag_clear(tp, RESET_TASK_PENDING);
6348 tg3_full_unlock(tp); 6361 tg3_full_unlock(tp);
6349 return; 6362 return;
6350 } 6363 }
@@ -6382,6 +6395,8 @@ out:
6382 6395
6383 if (!err) 6396 if (!err)
6384 tg3_phy_start(tp); 6397 tg3_phy_start(tp);
6398
6399 tg3_flag_clear(tp, RESET_TASK_PENDING);
6385} 6400}
6386 6401
6387static void tg3_tx_timeout(struct net_device *dev) 6402static void tg3_tx_timeout(struct net_device *dev)
@@ -6393,7 +6408,7 @@ static void tg3_tx_timeout(struct net_device *dev)
6393 tg3_dump_state(tp); 6408 tg3_dump_state(tp);
6394 } 6409 }
6395 6410
6396 schedule_work(&tp->reset_task); 6411 tg3_reset_task_schedule(tp);
6397} 6412}
6398 6413
6399/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ 6414/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
@@ -9228,7 +9243,7 @@ static void tg3_timer(unsigned long __opaque)
9228 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 9243 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9229 tg3_flag_set(tp, RESTART_TIMER); 9244 tg3_flag_set(tp, RESTART_TIMER);
9230 spin_unlock(&tp->lock); 9245 spin_unlock(&tp->lock);
9231 schedule_work(&tp->reset_task); 9246 tg3_reset_task_schedule(tp);
9232 return; 9247 return;
9233 } 9248 }
9234 } 9249 }
@@ -9785,7 +9800,7 @@ static int tg3_close(struct net_device *dev)
9785 struct tg3 *tp = netdev_priv(dev); 9800 struct tg3 *tp = netdev_priv(dev);
9786 9801
9787 tg3_napi_disable(tp); 9802 tg3_napi_disable(tp);
9788 cancel_work_sync(&tp->reset_task); 9803 tg3_reset_task_cancel(tp);
9789 9804
9790 netif_tx_stop_all_queues(dev); 9805 netif_tx_stop_all_queues(dev);
9791 9806
@@ -15685,7 +15700,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
15685 if (tp->fw) 15700 if (tp->fw)
15686 release_firmware(tp->fw); 15701 release_firmware(tp->fw);
15687 15702
15688 cancel_work_sync(&tp->reset_task); 15703 tg3_reset_task_cancel(tp);
15689 15704
15690 if (tg3_flag(tp, USE_PHYLIB)) { 15705 if (tg3_flag(tp, USE_PHYLIB)) {
15691 tg3_phy_fini(tp); 15706 tg3_phy_fini(tp);
@@ -15719,7 +15734,7 @@ static int tg3_suspend(struct device *device)
15719 if (!netif_running(dev)) 15734 if (!netif_running(dev))
15720 return 0; 15735 return 0;
15721 15736
15722 flush_work_sync(&tp->reset_task); 15737 tg3_reset_task_cancel(tp);
15723 tg3_phy_stop(tp); 15738 tg3_phy_stop(tp);
15724 tg3_netif_stop(tp); 15739 tg3_netif_stop(tp);
15725 15740
@@ -15835,7 +15850,7 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15835 tg3_flag_clear(tp, RESTART_TIMER); 15850 tg3_flag_clear(tp, RESTART_TIMER);
15836 15851
15837 /* Want to make sure that the reset task doesn't run */ 15852 /* Want to make sure that the reset task doesn't run */
15838 cancel_work_sync(&tp->reset_task); 15853 tg3_reset_task_cancel(tp);
15839 tg3_flag_clear(tp, TX_RECOVERY_PENDING); 15854 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15840 tg3_flag_clear(tp, RESTART_TIMER); 15855 tg3_flag_clear(tp, RESTART_TIMER);
15841 15856