aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStephen M. Cameron <scameron@beardog.cce.hp.com>2014-05-29 11:53:18 -0400
committerChristoph Hellwig <hch@lst.de>2014-06-02 03:54:56 -0400
commit094963dad88c86f8f480c78992df03d916774c18 (patch)
tree4855df6634281b06ee4f6a9f1426729a42056b4a
parent41b3cf08cd5e7915293f3784ab649d48bb142153 (diff)
hpsa: use per-cpu variable for lockup_detected
Avoid excessive locking by using per-cpu variable for lockup_detected Signed-off-by: Stephen M. Cameron <scameron@beardog.cce.hp.com> Reviewed-by: Scott Teel <scott.teel@hp.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
-rw-r--r--drivers/scsi/hpsa.c80
-rw-r--r--drivers/scsi/hpsa.h2
2 files changed, 54 insertions, 28 deletions
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 2b7454f6c50f..3b70e66838ad 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -48,6 +48,7 @@
48#include <linux/bitmap.h> 48#include <linux/bitmap.h>
49#include <linux/atomic.h> 49#include <linux/atomic.h>
50#include <linux/jiffies.h> 50#include <linux/jiffies.h>
51#include <linux/percpu.h>
51#include <asm/div64.h> 52#include <asm/div64.h>
52#include "hpsa_cmd.h" 53#include "hpsa_cmd.h"
53#include "hpsa.h" 54#include "hpsa.h"
@@ -1991,20 +1992,26 @@ static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1991 wait_for_completion(&wait); 1992 wait_for_completion(&wait);
1992} 1993}
1993 1994
1995static u32 lockup_detected(struct ctlr_info *h)
1996{
1997 int cpu;
1998 u32 rc, *lockup_detected;
1999
2000 cpu = get_cpu();
2001 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2002 rc = *lockup_detected;
2003 put_cpu();
2004 return rc;
2005}
2006
1994static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h, 2007static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
1995 struct CommandList *c) 2008 struct CommandList *c)
1996{ 2009{
1997 unsigned long flags;
1998
1999 /* If controller lockup detected, fake a hardware error. */ 2010 /* If controller lockup detected, fake a hardware error. */
2000 spin_lock_irqsave(&h->lock, flags); 2011 if (unlikely(lockup_detected(h)))
2001 if (unlikely(h->lockup_detected)) {
2002 spin_unlock_irqrestore(&h->lock, flags);
2003 c->err_info->CommandStatus = CMD_HARDWARE_ERR; 2012 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
2004 } else { 2013 else
2005 spin_unlock_irqrestore(&h->lock, flags);
2006 hpsa_scsi_do_simple_cmd_core(h, c); 2014 hpsa_scsi_do_simple_cmd_core(h, c);
2007 }
2008} 2015}
2009 2016
2010#define MAX_DRIVER_CMD_RETRIES 25 2017#define MAX_DRIVER_CMD_RETRIES 25
@@ -3971,7 +3978,6 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
3971 struct hpsa_scsi_dev_t *dev; 3978 struct hpsa_scsi_dev_t *dev;
3972 unsigned char scsi3addr[8]; 3979 unsigned char scsi3addr[8];
3973 struct CommandList *c; 3980 struct CommandList *c;
3974 unsigned long flags;
3975 int rc = 0; 3981 int rc = 0;
3976 3982
3977 /* Get the ptr to our adapter structure out of cmd->host. */ 3983 /* Get the ptr to our adapter structure out of cmd->host. */
@@ -3984,14 +3990,11 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
3984 } 3990 }
3985 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); 3991 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
3986 3992
3987 spin_lock_irqsave(&h->lock, flags); 3993 if (unlikely(lockup_detected(h))) {
3988 if (unlikely(h->lockup_detected)) {
3989 spin_unlock_irqrestore(&h->lock, flags);
3990 cmd->result = DID_ERROR << 16; 3994 cmd->result = DID_ERROR << 16;
3991 done(cmd); 3995 done(cmd);
3992 return 0; 3996 return 0;
3993 } 3997 }
3994 spin_unlock_irqrestore(&h->lock, flags);
3995 c = cmd_alloc(h); 3998 c = cmd_alloc(h);
3996 if (c == NULL) { /* trouble... */ 3999 if (c == NULL) { /* trouble... */
3997 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); 4000 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
@@ -4103,16 +4106,13 @@ static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
4103 * we can prevent new rescan threads from piling up on a 4106 * we can prevent new rescan threads from piling up on a
4104 * locked up controller. 4107 * locked up controller.
4105 */ 4108 */
4106 spin_lock_irqsave(&h->lock, flags); 4109 if (unlikely(lockup_detected(h))) {
4107 if (unlikely(h->lockup_detected)) {
4108 spin_unlock_irqrestore(&h->lock, flags);
4109 spin_lock_irqsave(&h->scan_lock, flags); 4110 spin_lock_irqsave(&h->scan_lock, flags);
4110 h->scan_finished = 1; 4111 h->scan_finished = 1;
4111 wake_up_all(&h->scan_wait_queue); 4112 wake_up_all(&h->scan_wait_queue);
4112 spin_unlock_irqrestore(&h->scan_lock, flags); 4113 spin_unlock_irqrestore(&h->scan_lock, flags);
4113 return 1; 4114 return 1;
4114 } 4115 }
4115 spin_unlock_irqrestore(&h->lock, flags);
4116 return 0; 4116 return 0;
4117} 4117}
4118 4118
@@ -6768,16 +6768,38 @@ static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
6768 } 6768 }
6769} 6769}
6770 6770
6771static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
6772{
6773 int i, cpu;
6774
6775 cpu = cpumask_first(cpu_online_mask);
6776 for (i = 0; i < num_online_cpus(); i++) {
6777 u32 *lockup_detected;
6778 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
6779 *lockup_detected = value;
6780 cpu = cpumask_next(cpu, cpu_online_mask);
6781 }
6782 wmb(); /* be sure the per-cpu variables are out to memory */
6783}
6784
6771static void controller_lockup_detected(struct ctlr_info *h) 6785static void controller_lockup_detected(struct ctlr_info *h)
6772{ 6786{
6773 unsigned long flags; 6787 unsigned long flags;
6788 u32 lockup_detected;
6774 6789
6775 h->access.set_intr_mask(h, HPSA_INTR_OFF); 6790 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6776 spin_lock_irqsave(&h->lock, flags); 6791 spin_lock_irqsave(&h->lock, flags);
6777 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 6792 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
6793 if (!lockup_detected) {
6794 /* no heartbeat, but controller gave us a zero. */
6795 dev_warn(&h->pdev->dev,
6796 "lockup detected but scratchpad register is zero\n");
6797 lockup_detected = 0xffffffff;
6798 }
6799 set_lockup_detected_for_all_cpus(h, lockup_detected);
6778 spin_unlock_irqrestore(&h->lock, flags); 6800 spin_unlock_irqrestore(&h->lock, flags);
6779 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", 6801 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
6780 h->lockup_detected); 6802 lockup_detected);
6781 pci_disable_device(h->pdev); 6803 pci_disable_device(h->pdev);
6782 spin_lock_irqsave(&h->lock, flags); 6804 spin_lock_irqsave(&h->lock, flags);
6783 fail_all_cmds_on_list(h, &h->cmpQ); 6805 fail_all_cmds_on_list(h, &h->cmpQ);
@@ -6912,7 +6934,7 @@ static void hpsa_monitor_ctlr_worker(struct work_struct *work)
6912 struct ctlr_info *h = container_of(to_delayed_work(work), 6934 struct ctlr_info *h = container_of(to_delayed_work(work),
6913 struct ctlr_info, monitor_ctlr_work); 6935 struct ctlr_info, monitor_ctlr_work);
6914 detect_controller_lockup(h); 6936 detect_controller_lockup(h);
6915 if (h->lockup_detected) 6937 if (lockup_detected(h))
6916 return; 6938 return;
6917 6939
6918 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { 6940 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
@@ -6976,6 +6998,13 @@ reinit_after_soft_reset:
6976 spin_lock_init(&h->offline_device_lock); 6998 spin_lock_init(&h->offline_device_lock);
6977 spin_lock_init(&h->scan_lock); 6999 spin_lock_init(&h->scan_lock);
6978 spin_lock_init(&h->passthru_count_lock); 7000 spin_lock_init(&h->passthru_count_lock);
7001
7002 /* Allocate and clear per-cpu variable lockup_detected */
7003 h->lockup_detected = alloc_percpu(u32);
7004 if (!h->lockup_detected)
7005 goto clean1;
7006 set_lockup_detected_for_all_cpus(h, 0);
7007
6979 rc = hpsa_pci_init(h); 7008 rc = hpsa_pci_init(h);
6980 if (rc != 0) 7009 if (rc != 0)
6981 goto clean1; 7010 goto clean1;
@@ -7099,6 +7128,8 @@ clean4:
7099 free_irqs(h); 7128 free_irqs(h);
7100clean2: 7129clean2:
7101clean1: 7130clean1:
7131 if (h->lockup_detected)
7132 free_percpu(h->lockup_detected);
7102 kfree(h); 7133 kfree(h);
7103 return rc; 7134 return rc;
7104} 7135}
@@ -7107,16 +7138,10 @@ static void hpsa_flush_cache(struct ctlr_info *h)
7107{ 7138{
7108 char *flush_buf; 7139 char *flush_buf;
7109 struct CommandList *c; 7140 struct CommandList *c;
7110 unsigned long flags;
7111 7141
7112 /* Don't bother trying to flush the cache if locked up */ 7142 /* Don't bother trying to flush the cache if locked up */
7113 spin_lock_irqsave(&h->lock, flags); 7143 if (unlikely(lockup_detected(h)))
7114 if (unlikely(h->lockup_detected)) {
7115 spin_unlock_irqrestore(&h->lock, flags);
7116 return; 7144 return;
7117 }
7118 spin_unlock_irqrestore(&h->lock, flags);
7119
7120 flush_buf = kzalloc(4, GFP_KERNEL); 7145 flush_buf = kzalloc(4, GFP_KERNEL);
7121 if (!flush_buf) 7146 if (!flush_buf)
7122 return; 7147 return;
@@ -7200,6 +7225,7 @@ static void hpsa_remove_one(struct pci_dev *pdev)
7200 kfree(h->hba_inquiry_data); 7225 kfree(h->hba_inquiry_data);
7201 pci_disable_device(pdev); 7226 pci_disable_device(pdev);
7202 pci_release_regions(pdev); 7227 pci_release_regions(pdev);
7228 free_percpu(h->lockup_detected);
7203 kfree(h); 7229 kfree(h);
7204} 7230}
7205 7231
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 60d72fc3c180..6da9a9f4b749 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -192,7 +192,7 @@ struct ctlr_info {
192 u64 last_heartbeat_timestamp; 192 u64 last_heartbeat_timestamp;
193 u32 heartbeat_sample_interval; 193 u32 heartbeat_sample_interval;
194 atomic_t firmware_flash_in_progress; 194 atomic_t firmware_flash_in_progress;
195 u32 lockup_detected; 195 u32 *lockup_detected;
196 struct delayed_work monitor_ctlr_work; 196 struct delayed_work monitor_ctlr_work;
197 int remove_in_progress; 197 int remove_in_progress;
198 u32 fifo_recently_full; 198 u32 fifo_recently_full;