summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorScott Teel <scott.teel@microsemi.com>2017-05-04 18:51:36 -0400
committerMartin K. Petersen <martin.petersen@oracle.com>2017-06-12 20:48:01 -0400
commit3d38f00c4107cc007056db9f4ab14ecb17ed193f (patch)
treec5b145ec38a771b42a677027361420dd2dd4ddf5
parent5086435e662c7b6ada6cb5f48a1215fc6f612153 (diff)
scsi: hpsa: separate monitor events from rescan worker
create new worker thread to monitor controller events - both the rescan and event monitor workers can cause a rescan to occur however for multipath we have found that we need to respond faster than the normal scheduled rescan interval for path fail-overs. - getting controller events only involves reading a register, but the rescan worker can obtain an updated LUN list when there is a PTRAID device present. - move common code to a separate function. advantages: - detect controller events more frequently. - leave rescan thread interval at 30 seconds. Reviewed-by: Scott Benesh <scott.benesh@microsemi.com> Reviewed-by: Scott Teel <scott.teel@microsemi.com> Reviewed-by: Kevin Barnett <kevin.barnett@microsemi.com> Signed-off-by: Don Brace <don.brace@microsemi.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-rw-r--r--drivers/scsi/hpsa.c76
-rw-r--r--drivers/scsi/hpsa.h1
2 files changed, 59 insertions, 18 deletions
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 5b0cc0ebb5e0..2ec907993400 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1110,6 +1110,7 @@ static int is_firmware_flash_cmd(u8 *cdb)
1110 */ 1110 */
1111#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ) 1111#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1112#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ) 1112#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1113#define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ)
1113static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h, 1114static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
1114 struct CommandList *c) 1115 struct CommandList *c)
1115{ 1116{
@@ -8661,15 +8662,10 @@ out:
8661 return rc; 8662 return rc;
8662} 8663}
8663 8664
8664static void hpsa_rescan_ctlr_worker(struct work_struct *work) 8665static void hpsa_perform_rescan(struct ctlr_info *h)
8665{ 8666{
8667 struct Scsi_Host *sh = NULL;
8666 unsigned long flags; 8668 unsigned long flags;
8667 struct ctlr_info *h = container_of(to_delayed_work(work),
8668 struct ctlr_info, rescan_ctlr_work);
8669
8670
8671 if (h->remove_in_progress)
8672 return;
8673 8669
8674 /* 8670 /*
8675 * Do the scan after the reset 8671 * Do the scan after the reset
@@ -8682,23 +8678,63 @@ static void hpsa_rescan_ctlr_worker(struct work_struct *work)
8682 } 8678 }
8683 spin_unlock_irqrestore(&h->reset_lock, flags); 8679 spin_unlock_irqrestore(&h->reset_lock, flags);
8684 8680
8685 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { 8681 sh = scsi_host_get(h->scsi_host);
8686 scsi_host_get(h->scsi_host); 8682 if (sh != NULL) {
8683 hpsa_scan_start(sh);
8684 scsi_host_put(sh);
8685 h->drv_req_rescan = 0;
8686 }
8687}
8688
8689/*
8690 * watch for controller events
8691 */
8692static void hpsa_event_monitor_worker(struct work_struct *work)
8693{
8694 struct ctlr_info *h = container_of(to_delayed_work(work),
8695 struct ctlr_info, event_monitor_work);
8696 unsigned long flags;
8697
8698 spin_lock_irqsave(&h->lock, flags);
8699 if (h->remove_in_progress) {
8700 spin_unlock_irqrestore(&h->lock, flags);
8701 return;
8702 }
8703 spin_unlock_irqrestore(&h->lock, flags);
8704
8705 if (hpsa_ctlr_needs_rescan(h)) {
8687 hpsa_ack_ctlr_events(h); 8706 hpsa_ack_ctlr_events(h);
8688 hpsa_scan_start(h->scsi_host); 8707 hpsa_perform_rescan(h);
8689 scsi_host_put(h->scsi_host); 8708 }
8709
8710 spin_lock_irqsave(&h->lock, flags);
8711 if (!h->remove_in_progress)
8712 schedule_delayed_work(&h->event_monitor_work,
8713 HPSA_EVENT_MONITOR_INTERVAL);
8714 spin_unlock_irqrestore(&h->lock, flags);
8715}
8716
8717static void hpsa_rescan_ctlr_worker(struct work_struct *work)
8718{
8719 unsigned long flags;
8720 struct ctlr_info *h = container_of(to_delayed_work(work),
8721 struct ctlr_info, rescan_ctlr_work);
8722
8723 spin_lock_irqsave(&h->lock, flags);
8724 if (h->remove_in_progress) {
8725 spin_unlock_irqrestore(&h->lock, flags);
8726 return;
8727 }
8728 spin_unlock_irqrestore(&h->lock, flags);
8729
8730 if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) {
8731 hpsa_perform_rescan(h);
8690 } else if (h->discovery_polling) { 8732 } else if (h->discovery_polling) {
8691 hpsa_disable_rld_caching(h); 8733 hpsa_disable_rld_caching(h);
8692 if (hpsa_luns_changed(h)) { 8734 if (hpsa_luns_changed(h)) {
8693 struct Scsi_Host *sh = NULL;
8694
8695 dev_info(&h->pdev->dev, 8735 dev_info(&h->pdev->dev,
8696 "driver discovery polling rescan.\n"); 8736 "driver discovery polling rescan.\n");
8697 sh = scsi_host_get(h->scsi_host); 8737 hpsa_perform_rescan(h);
8698 if (sh != NULL) {
8699 hpsa_scan_start(sh);
8700 scsi_host_put(sh);
8701 }
8702 } 8738 }
8703 } 8739 }
8704 spin_lock_irqsave(&h->lock, flags); 8740 spin_lock_irqsave(&h->lock, flags);
@@ -8964,6 +9000,9 @@ reinit_after_soft_reset:
8964 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker); 9000 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
8965 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, 9001 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8966 h->heartbeat_sample_interval); 9002 h->heartbeat_sample_interval);
9003 INIT_DELAYED_WORK(&h->event_monitor_work, hpsa_event_monitor_worker);
9004 schedule_delayed_work(&h->event_monitor_work,
9005 HPSA_EVENT_MONITOR_INTERVAL);
8967 return 0; 9006 return 0;
8968 9007
8969clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ 9008clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
@@ -9132,6 +9171,7 @@ static void hpsa_remove_one(struct pci_dev *pdev)
9132 spin_unlock_irqrestore(&h->lock, flags); 9171 spin_unlock_irqrestore(&h->lock, flags);
9133 cancel_delayed_work_sync(&h->monitor_ctlr_work); 9172 cancel_delayed_work_sync(&h->monitor_ctlr_work);
9134 cancel_delayed_work_sync(&h->rescan_ctlr_work); 9173 cancel_delayed_work_sync(&h->rescan_ctlr_work);
9174 cancel_delayed_work_sync(&h->event_monitor_work);
9135 destroy_workqueue(h->rescan_ctlr_wq); 9175 destroy_workqueue(h->rescan_ctlr_wq);
9136 destroy_workqueue(h->resubmit_wq); 9176 destroy_workqueue(h->resubmit_wq);
9137 9177
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 61dd54aa4d5d..c9c4927f5d0e 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -245,6 +245,7 @@ struct ctlr_info {
245 u32 __percpu *lockup_detected; 245 u32 __percpu *lockup_detected;
246 struct delayed_work monitor_ctlr_work; 246 struct delayed_work monitor_ctlr_work;
247 struct delayed_work rescan_ctlr_work; 247 struct delayed_work rescan_ctlr_work;
248 struct delayed_work event_monitor_work;
248 int remove_in_progress; 249 int remove_in_progress;
249 /* Address of h->q[x] is passed to intr handler to know which queue */ 250 /* Address of h->q[x] is passed to intr handler to know which queue */
250 u8 q[MAX_REPLY_QUEUES]; 251 u8 q[MAX_REPLY_QUEUES];