aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/mpt2sas/mpt2sas_scsih.c
diff options
context:
space:
mode:
authorKashyap, Desai <kashyap.desai@lsi.com>2010-03-09 06:01:43 -0500
committerJames Bottomley <James.Bottomley@suse.de>2010-04-11 10:23:27 -0400
commitf1c35e6aea579d5bdb6dc02dfa99c67c7c3b3f67 (patch)
tree29fcd494910b005edfe37771cf5f6b13142bab62 /drivers/scsi/mpt2sas/mpt2sas_scsih.c
parent36dd288f0f930c154ec6a4d73a6a35f3079418c6 (diff)
[SCSI] mpt2sas: RESCAN Barrier work is added in case of HBA reset.
Add the cancel_pending_work flag from the fw_event_work structure, and then to set the flag during host reset, check the flag later from work threads context and if cancel_pending_work_flag is set ingore those events. Now Rescan after host reset is changed. Added special task MPT2SAS_RESCAN_AFTER_HOST_RESET. This task will be queued at the time of HBA reset. this task is treated as barrier. All work after MPT2SAS_RESCAN_AFTER_HOST_RESET will be treated as new work and will be server by callback handle. If host_recovery is going on while running RESCAN task, it will wait for shos_recovery_done completion which will be called from HBA reset DONE context. Signed-off-by: Kashyap Desai <kashyap.desai@lsi.com> Reviewed-by: Eric Moore <eric.moore@lsi.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/mpt2sas/mpt2sas_scsih.c')
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c140
1 files changed, 75 insertions, 65 deletions
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index aa67b757bf23..6f121a904d05 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -109,14 +109,16 @@ struct sense_info {
109}; 109};
110 110
111 111
112#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF)
113
112/** 114/**
113 * struct fw_event_work - firmware event struct 115 * struct fw_event_work - firmware event struct
114 * @list: link list framework 116 * @list: link list framework
115 * @work: work object (ioc->fault_reset_work_q) 117 * @work: work object (ioc->fault_reset_work_q)
118 * @cancel_pending_work: flag set during reset handling
116 * @ioc: per adapter object 119 * @ioc: per adapter object
117 * @VF_ID: virtual function id 120 * @VF_ID: virtual function id
118 * @VP_ID: virtual port id 121 * @VP_ID: virtual port id
119 * @host_reset_handling: handling events during host reset
120 * @ignore: flag meaning this event has been marked to ignore 122 * @ignore: flag meaning this event has been marked to ignore
121 * @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h 123 * @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h
122 * @event_data: reply event data payload follows 124 * @event_data: reply event data payload follows
@@ -125,11 +127,11 @@ struct sense_info {
125 */ 127 */
126struct fw_event_work { 128struct fw_event_work {
127 struct list_head list; 129 struct list_head list;
128 struct work_struct work; 130 u8 cancel_pending_work;
131 struct delayed_work delayed_work;
129 struct MPT2SAS_ADAPTER *ioc; 132 struct MPT2SAS_ADAPTER *ioc;
130 u8 VF_ID; 133 u8 VF_ID;
131 u8 VP_ID; 134 u8 VP_ID;
132 u8 host_reset_handling;
133 u8 ignore; 135 u8 ignore;
134 u16 event; 136 u16 event;
135 void *event_data; 137 void *event_data;
@@ -2325,8 +2327,9 @@ _scsih_fw_event_add(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
2325 2327
2326 spin_lock_irqsave(&ioc->fw_event_lock, flags); 2328 spin_lock_irqsave(&ioc->fw_event_lock, flags);
2327 list_add_tail(&fw_event->list, &ioc->fw_event_list); 2329 list_add_tail(&fw_event->list, &ioc->fw_event_list);
2328 INIT_WORK(&fw_event->work, _firmware_event_work); 2330 INIT_DELAYED_WORK(&fw_event->delayed_work, _firmware_event_work);
2329 queue_work(ioc->firmware_event_thread, &fw_event->work); 2331 queue_delayed_work(ioc->firmware_event_thread,
2332 &fw_event->delayed_work, 0);
2330 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 2333 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
2331} 2334}
2332 2335
@@ -2353,62 +2356,55 @@ _scsih_fw_event_free(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work
2353 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 2356 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
2354} 2357}
2355 2358
2359
2356/** 2360/**
2357 * _scsih_fw_event_add - requeue an event 2361 * _scsih_queue_rescan - queue a topology rescan from user context
2358 * @ioc: per adapter object 2362 * @ioc: per adapter object
2359 * @fw_event: object describing the event
2360 * Context: This function will acquire ioc->fw_event_lock.
2361 * 2363 *
2362 * Return nothing. 2364 * Return nothing.
2363 */ 2365 */
2364static void 2366static void
2365_scsih_fw_event_requeue(struct MPT2SAS_ADAPTER *ioc, struct fw_event_work 2367_scsih_queue_rescan(struct MPT2SAS_ADAPTER *ioc)
2366 *fw_event, unsigned long delay)
2367{ 2368{
2368 unsigned long flags; 2369 struct fw_event_work *fw_event;
2369 if (ioc->firmware_event_thread == NULL)
2370 return;
2371 2370
2372 spin_lock_irqsave(&ioc->fw_event_lock, flags); 2371 if (ioc->wait_for_port_enable_to_complete)
2373 queue_work(ioc->firmware_event_thread, &fw_event->work); 2372 return;
2374 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 2373 fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
2374 if (!fw_event)
2375 return;
2376 fw_event->event = MPT2SAS_RESCAN_AFTER_HOST_RESET;
2377 fw_event->ioc = ioc;
2378 _scsih_fw_event_add(ioc, fw_event);
2375} 2379}
2376 2380
2377/** 2381/**
2378 * _scsih_fw_event_off - turn flag off preventing event handling 2382 * _scsih_fw_event_cleanup_queue - cleanup event queue
2379 * @ioc: per adapter object 2383 * @ioc: per adapter object
2380 * 2384 *
2381 * Used to prevent handling of firmware events during adapter reset 2385 * Walk the firmware event queue, either killing timers, or waiting
2382 * driver unload. 2386 * for outstanding events to complete
2383 * 2387 *
2384 * Return nothing. 2388 * Return nothing.
2385 */ 2389 */
2386static void 2390static void
2387_scsih_fw_event_off(struct MPT2SAS_ADAPTER *ioc) 2391_scsih_fw_event_cleanup_queue(struct MPT2SAS_ADAPTER *ioc)
2388{ 2392{
2389 unsigned long flags; 2393 struct fw_event_work *fw_event, *next;
2390 2394
2391 spin_lock_irqsave(&ioc->fw_event_lock, flags); 2395 if (list_empty(&ioc->fw_event_list) ||
2392 ioc->fw_events_off = 1; 2396 !ioc->firmware_event_thread || in_interrupt())
2393 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 2397 return;
2394 2398
2399 list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
2400 if (cancel_delayed_work(&fw_event->delayed_work)) {
2401 _scsih_fw_event_free(ioc, fw_event);
2402 continue;
2403 }
2404 fw_event->cancel_pending_work = 1;
2405 }
2395} 2406}
2396 2407
2397/**
2398 * _scsih_fw_event_on - turn flag on allowing firmware event handling
2399 * @ioc: per adapter object
2400 *
2401 * Returns nothing.
2402 */
2403static void
2404_scsih_fw_event_on(struct MPT2SAS_ADAPTER *ioc)
2405{
2406 unsigned long flags;
2407
2408 spin_lock_irqsave(&ioc->fw_event_lock, flags);
2409 ioc->fw_events_off = 0;
2410 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
2411}
2412 2408
2413/** 2409/**
2414 * _scsih_ublock_io_device - set the device state to SDEV_RUNNING 2410 * _scsih_ublock_io_device - set the device state to SDEV_RUNNING
@@ -5694,13 +5690,13 @@ _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc)
5694} 5690}
5695 5691
5696/** 5692/**
5697 * _scsih_remove_unresponding_devices - removing unresponding devices 5693 * _scsih_remove_unresponding_sas_devices - removing unresponding devices
5698 * @ioc: per adapter object 5694 * @ioc: per adapter object
5699 * 5695 *
5700 * Return nothing. 5696 * Return nothing.
5701 */ 5697 */
5702static void 5698static void
5703_scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc) 5699_scsih_remove_unresponding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
5704{ 5700{
5705 struct _sas_device *sas_device, *sas_device_next; 5701 struct _sas_device *sas_device, *sas_device_next;
5706 struct _sas_node *sas_expander; 5702 struct _sas_node *sas_expander;
@@ -5774,31 +5770,28 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
5774 case MPT2_IOC_PRE_RESET: 5770 case MPT2_IOC_PRE_RESET:
5775 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 5771 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
5776 "MPT2_IOC_PRE_RESET\n", ioc->name, __func__)); 5772 "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
5777 _scsih_fw_event_off(ioc);
5778 break; 5773 break;
5779 case MPT2_IOC_AFTER_RESET: 5774 case MPT2_IOC_AFTER_RESET:
5780 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 5775 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
5781 "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__)); 5776 "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
5777 if (ioc->scsih_cmds.status & MPT2_CMD_PENDING) {
5778 ioc->scsih_cmds.status |= MPT2_CMD_RESET;
5779 mpt2sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
5780 complete(&ioc->scsih_cmds.done);
5781 }
5782 if (ioc->tm_cmds.status & MPT2_CMD_PENDING) { 5782 if (ioc->tm_cmds.status & MPT2_CMD_PENDING) {
5783 ioc->tm_cmds.status |= MPT2_CMD_RESET; 5783 ioc->tm_cmds.status |= MPT2_CMD_RESET;
5784 mpt2sas_base_free_smid(ioc, ioc->tm_cmds.smid); 5784 mpt2sas_base_free_smid(ioc, ioc->tm_cmds.smid);
5785 complete(&ioc->tm_cmds.done); 5785 complete(&ioc->tm_cmds.done);
5786 } 5786 }
5787 _scsih_fw_event_on(ioc); 5787 _scsih_fw_event_cleanup_queue(ioc);
5788 _scsih_flush_running_cmds(ioc); 5788 _scsih_flush_running_cmds(ioc);
5789 _scsih_queue_rescan(ioc);
5789 break; 5790 break;
5790 case MPT2_IOC_DONE_RESET: 5791 case MPT2_IOC_DONE_RESET:
5791 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " 5792 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
5792 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__)); 5793 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
5793 _scsih_sas_host_refresh(ioc); 5794 _scsih_sas_host_refresh(ioc);
5794 _scsih_search_responding_sas_devices(ioc);
5795 _scsih_search_responding_raid_devices(ioc);
5796 _scsih_search_responding_expanders(ioc);
5797 break;
5798 case MPT2_IOC_RUNNING:
5799 dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
5800 "MPT2_IOC_RUNNING\n", ioc->name, __func__));
5801 _scsih_remove_unresponding_devices(ioc);
5802 break; 5795 break;
5803 } 5796 }
5804} 5797}
@@ -5815,21 +5808,31 @@ static void
5815_firmware_event_work(struct work_struct *work) 5808_firmware_event_work(struct work_struct *work)
5816{ 5809{
5817 struct fw_event_work *fw_event = container_of(work, 5810 struct fw_event_work *fw_event = container_of(work,
5818 struct fw_event_work, work); 5811 struct fw_event_work, delayed_work.work);
5819 unsigned long flags; 5812 unsigned long flags;
5820 struct MPT2SAS_ADAPTER *ioc = fw_event->ioc; 5813 struct MPT2SAS_ADAPTER *ioc = fw_event->ioc;
5821 5814
5822 /* the queue is being flushed so ignore this event */ 5815 /* the queue is being flushed so ignore this event */
5823 spin_lock_irqsave(&ioc->fw_event_lock, flags); 5816 if (ioc->remove_host || fw_event->cancel_pending_work) {
5824 if (ioc->fw_events_off || ioc->remove_host) {
5825 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
5826 _scsih_fw_event_free(ioc, fw_event); 5817 _scsih_fw_event_free(ioc, fw_event);
5827 return; 5818 return;
5828 } 5819 }
5829 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
5830 5820
5831 if (ioc->shost_recovery) { 5821 if (fw_event->event == MPT2SAS_RESCAN_AFTER_HOST_RESET) {
5832 _scsih_fw_event_requeue(ioc, fw_event, 1000); 5822 _scsih_fw_event_free(ioc, fw_event);
5823 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
5824 if (ioc->shost_recovery) {
5825 init_completion(&ioc->shost_recovery_done);
5826 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
5827 flags);
5828 wait_for_completion(&ioc->shost_recovery_done);
5829 } else
5830 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
5831 flags);
5832 _scsih_search_responding_sas_devices(ioc);
5833 _scsih_search_responding_raid_devices(ioc);
5834 _scsih_search_responding_expanders(ioc);
5835 _scsih_remove_unresponding_sas_devices(ioc);
5833 return; 5836 return;
5834 } 5837 }
5835 5838
@@ -5891,16 +5894,11 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
5891{ 5894{
5892 struct fw_event_work *fw_event; 5895 struct fw_event_work *fw_event;
5893 Mpi2EventNotificationReply_t *mpi_reply; 5896 Mpi2EventNotificationReply_t *mpi_reply;
5894 unsigned long flags;
5895 u16 event; 5897 u16 event;
5896 5898
5897 /* events turned off due to host reset or driver unloading */ 5899 /* events turned off due to host reset or driver unloading */
5898 spin_lock_irqsave(&ioc->fw_event_lock, flags); 5900 if (ioc->remove_host)
5899 if (ioc->fw_events_off || ioc->remove_host) {
5900 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
5901 return 1; 5901 return 1;
5902 }
5903 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
5904 5902
5905 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 5903 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
5906 event = le16_to_cpu(mpi_reply->Event); 5904 event = le16_to_cpu(mpi_reply->Event);
@@ -6158,6 +6156,18 @@ _scsih_shutdown(struct pci_dev *pdev)
6158{ 6156{
6159 struct Scsi_Host *shost = pci_get_drvdata(pdev); 6157 struct Scsi_Host *shost = pci_get_drvdata(pdev);
6160 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 6158 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
6159 struct workqueue_struct *wq;
6160 unsigned long flags;
6161
6162 ioc->remove_host = 1;
6163 _scsih_fw_event_cleanup_queue(ioc);
6164
6165 spin_lock_irqsave(&ioc->fw_event_lock, flags);
6166 wq = ioc->firmware_event_thread;
6167 ioc->firmware_event_thread = NULL;
6168 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
6169 if (wq)
6170 destroy_workqueue(wq);
6161 6171
6162 _scsih_ir_shutdown(ioc); 6172 _scsih_ir_shutdown(ioc);
6163 mpt2sas_base_detach(ioc); 6173 mpt2sas_base_detach(ioc);
@@ -6184,7 +6194,7 @@ _scsih_remove(struct pci_dev *pdev)
6184 unsigned long flags; 6194 unsigned long flags;
6185 6195
6186 ioc->remove_host = 1; 6196 ioc->remove_host = 1;
6187 _scsih_fw_event_off(ioc); 6197 _scsih_fw_event_cleanup_queue(ioc);
6188 6198
6189 spin_lock_irqsave(&ioc->fw_event_lock, flags); 6199 spin_lock_irqsave(&ioc->fw_event_lock, flags);
6190 wq = ioc->firmware_event_thread; 6200 wq = ioc->firmware_event_thread;