aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/message
diff options
context:
space:
mode:
authorKashyap, Desai <kashyap.desai@lsi.com>2009-09-02 02:16:33 -0400
committerJames Bottomley <James.Bottomley@suse.de>2009-09-12 10:35:28 -0400
commit9766096d331c82e71d3c9df61f1c88eff6ad916b (patch)
treead364a6951109114117d55aa85bec4d082815847 /drivers/message
parentfea984034b1ccdb26e8163ed5350ce7f0563b136 (diff)
[SCSI] mptsas : FW event thread and scsi mid layer deadlock in SYNCHRONIZE CACHE command
Normally In HBA reset path MPT driver will flush existing work in current work queue (mpt/0) . This is just a dummy activity for MPT driver point of view, since HBA reset will turn off Work queue events. It means we will simply returns from work queue without doing anything. But for the case where Work is already done (half the way), we have to have that work to be done. Considering above condition we stuck forever since Deadlock in scsi midlayer and MPT driver. sd_sync_cache() will wait forever since HBA is not in Running state, and it will never come into Running state since sd_sync_cache() is called from HBA reset context. Now new code will not wait for half cooked work to be finished before returning from HBA reset. Once we are out of HBA reset, EH thread will change host state to running from recovery and work waiting for running state of HBA will be finished. New code is turning ON firmware event from another special work called Rescan toplogy. Signed-off-by: Kashyap Desai <kashyap.desai@lsi.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/message')
-rw-r--r--drivers/message/fusion/mptsas.c8
1 files changed, 1 insertions, 7 deletions
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 02a18dfcd52a..83873e3d0ce7 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -325,7 +325,6 @@ mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc)
325{ 325{
326 struct fw_event_work *fw_event, *next; 326 struct fw_event_work *fw_event, *next;
327 struct mptsas_target_reset_event *target_reset_list, *n; 327 struct mptsas_target_reset_event *target_reset_list, *n;
328 u8 flush_q;
329 MPT_SCSI_HOST *hd = shost_priv(ioc->sh); 328 MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
330 329
331 /* flush the target_reset_list */ 330 /* flush the target_reset_list */
@@ -345,15 +344,10 @@ mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc)
345 !ioc->fw_event_q || in_interrupt()) 344 !ioc->fw_event_q || in_interrupt())
346 return; 345 return;
347 346
348 flush_q = 0;
349 list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) { 347 list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
350 if (cancel_delayed_work(&fw_event->work)) 348 if (cancel_delayed_work(&fw_event->work))
351 mptsas_free_fw_event(ioc, fw_event); 349 mptsas_free_fw_event(ioc, fw_event);
352 else
353 flush_q = 1;
354 } 350 }
355 if (flush_q)
356 flush_workqueue(ioc->fw_event_q);
357} 351}
358 352
359 353
@@ -1279,7 +1273,6 @@ mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
1279 } 1273 }
1280 mptsas_cleanup_fw_event_q(ioc); 1274 mptsas_cleanup_fw_event_q(ioc);
1281 mptsas_queue_rescan(ioc); 1275 mptsas_queue_rescan(ioc);
1282 mptsas_fw_event_on(ioc);
1283 break; 1276 break;
1284 default: 1277 default:
1285 break; 1278 break;
@@ -1599,6 +1592,7 @@ mptsas_firmware_event_work(struct work_struct *work)
1599 mptsas_scan_sas_topology(ioc); 1592 mptsas_scan_sas_topology(ioc);
1600 ioc->in_rescan = 0; 1593 ioc->in_rescan = 0;
1601 mptsas_free_fw_event(ioc, fw_event); 1594 mptsas_free_fw_event(ioc, fw_event);
1595 mptsas_fw_event_on(ioc);
1602 return; 1596 return;
1603 } 1597 }
1604 1598