aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3
diff options
context:
space:
mode:
authorDivy Le Ray <divy@chelsio.com>2009-04-17 08:21:17 -0400
committerDavid S. Miller <davem@davemloft.net>2009-04-20 05:07:24 -0400
commitc80b0c28caed5cd9165caab6295ed86b4e9fc327 (patch)
treeb0c9980129bc5464461a1f1d0c1332d51d1050a3 /drivers/net/cxgb3
parent3851c66cf0d130ae49f99fe1dea42950d9835037 (diff)
cxgb3: fix workqueue flush issues
The fatal error task can be scheduled while processing an offload packet in NAPI context when the connection handle is bogus. this can race with the ports being brought down and the cxgb3 workqueue being flushed. Stop napi processing before flushing the work queue. The ULP drivers (iSCSI, iWARP) might also schedule a task on keventd_wk while releasing a connection handle (cxgb3_offload.c::cxgb3_queue_tid_release()). The driver however does not flush any work on keventd_wq while being unloaded. This patch also fixes this. Also call cancel_delayed_work_sync in place of the the deprecated cancel_rearming_delayed_workqueue. Signed-off-by: Divy Le Ray <divy@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/cxgb3')
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 9fdfe0bfaecb..99b5032afda6 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1117,8 +1117,8 @@ static void cxgb_down(struct adapter *adapter)
1117 spin_unlock_irq(&adapter->work_lock); 1117 spin_unlock_irq(&adapter->work_lock);
1118 1118
1119 free_irq_resources(adapter); 1119 free_irq_resources(adapter);
1120 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
1121 quiesce_rx(adapter); 1120 quiesce_rx(adapter);
1121 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
1122} 1122}
1123 1123
1124static void schedule_chk_task(struct adapter *adap) 1124static void schedule_chk_task(struct adapter *adap)
@@ -1187,6 +1187,9 @@ static int offload_close(struct t3cdev *tdev)
1187 1187
1188 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group); 1188 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1189 1189
1190 /* Flush work scheduled while releasing TIDs */
1191 flush_scheduled_work();
1192
1190 tdev->lldev = NULL; 1193 tdev->lldev = NULL;
1191 cxgb3_set_dummy_ops(tdev); 1194 cxgb3_set_dummy_ops(tdev);
1192 t3_tp_set_offload_mode(adapter, 0); 1195 t3_tp_set_offload_mode(adapter, 0);
@@ -1247,8 +1250,7 @@ static int cxgb_close(struct net_device *dev)
1247 spin_unlock_irq(&adapter->work_lock); 1250 spin_unlock_irq(&adapter->work_lock);
1248 1251
1249 if (!(adapter->open_device_map & PORT_MASK)) 1252 if (!(adapter->open_device_map & PORT_MASK))
1250 cancel_rearming_delayed_workqueue(cxgb3_wq, 1253 cancel_delayed_work_sync(&adapter->adap_check_task);
1251 &adapter->adap_check_task);
1252 1254
1253 if (!adapter->open_device_map) 1255 if (!adapter->open_device_map)
1254 cxgb_down(adapter); 1256 cxgb_down(adapter);