aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h11
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c114
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c1
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c19
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c64
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c10
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
12 files changed, 134 insertions, 108 deletions
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index 475c31ae985c..77b26f5b9c33 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -2,7 +2,7 @@
2******************************************************************************* 2*******************************************************************************
3** O.S : Linux 3** O.S : Linux
4** FILE NAME : arcmsr.h 4** FILE NAME : arcmsr.h
5** BY : Erich Chen 5** BY : Nick Cheng
6** Description: SCSI RAID Device Driver for 6** Description: SCSI RAID Device Driver for
7** ARECA RAID Host adapter 7** ARECA RAID Host adapter
8******************************************************************************* 8*******************************************************************************
@@ -46,8 +46,12 @@
46struct device_attribute; 46struct device_attribute;
47/*The limit of outstanding scsi command that firmware can handle*/ 47/*The limit of outstanding scsi command that firmware can handle*/
48#define ARCMSR_MAX_OUTSTANDING_CMD 256 48#define ARCMSR_MAX_OUTSTANDING_CMD 256
49#define ARCMSR_MAX_FREECCB_NUM 320 49#ifdef CONFIG_XEN
50#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2010/02/02" 50 #define ARCMSR_MAX_FREECCB_NUM 160
51#else
52 #define ARCMSR_MAX_FREECCB_NUM 320
53#endif
54#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2010/08/05"
51#define ARCMSR_SCSI_INITIATOR_ID 255 55#define ARCMSR_SCSI_INITIATOR_ID 255
52#define ARCMSR_MAX_XFER_SECTORS 512 56#define ARCMSR_MAX_XFER_SECTORS 512
53#define ARCMSR_MAX_XFER_SECTORS_B 4096 57#define ARCMSR_MAX_XFER_SECTORS_B 4096
@@ -60,7 +64,6 @@ struct device_attribute;
60#define ARCMSR_MAX_HBB_POSTQUEUE 264 64#define ARCMSR_MAX_HBB_POSTQUEUE 264
61#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */ 65#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */
62#define ARCMSR_CDB_SG_PAGE_LENGTH 256 66#define ARCMSR_CDB_SG_PAGE_LENGTH 256
63#define SCSI_CMD_ARECA_SPECIFIC 0xE1
64#ifndef PCI_DEVICE_ID_ARECA_1880 67#ifndef PCI_DEVICE_ID_ARECA_1880
65#define PCI_DEVICE_ID_ARECA_1880 0x1880 68#define PCI_DEVICE_ID_ARECA_1880 0x1880
66 #endif 69 #endif
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index a4e04c50c436..acdae33de521 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -2,7 +2,7 @@
2******************************************************************************* 2*******************************************************************************
3** O.S : Linux 3** O.S : Linux
4** FILE NAME : arcmsr_attr.c 4** FILE NAME : arcmsr_attr.c
5** BY : Erich Chen 5** BY : Nick Cheng
6** Description: attributes exported to sysfs and device host 6** Description: attributes exported to sysfs and device host
7******************************************************************************* 7*******************************************************************************
8** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved 8** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 1cadcd6b7da6..984bd527c6c9 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2,7 +2,7 @@
2******************************************************************************* 2*******************************************************************************
3** O.S : Linux 3** O.S : Linux
4** FILE NAME : arcmsr_hba.c 4** FILE NAME : arcmsr_hba.c
5** BY : Erich Chen 5** BY : Nick Cheng
6** Description: SCSI RAID Device Driver for 6** Description: SCSI RAID Device Driver for
7** ARECA RAID Host adapter 7** ARECA RAID Host adapter
8******************************************************************************* 8*******************************************************************************
@@ -76,7 +76,7 @@ MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/16xx/1880) SATA/SAS RAID Host Bus Adapte
76MODULE_LICENSE("Dual BSD/GPL"); 76MODULE_LICENSE("Dual BSD/GPL");
77MODULE_VERSION(ARCMSR_DRIVER_VERSION); 77MODULE_VERSION(ARCMSR_DRIVER_VERSION);
78static int sleeptime = 10; 78static int sleeptime = 10;
79static int retrycount = 30; 79static int retrycount = 12;
80wait_queue_head_t wait_q; 80wait_queue_head_t wait_q;
81static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, 81static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
82 struct scsi_cmnd *cmd); 82 struct scsi_cmnd *cmd);
@@ -187,7 +187,6 @@ int arcmsr_sleep_for_bus_reset(struct scsi_cmnd *cmd)
187 if (isleep > 0) { 187 if (isleep > 0) {
188 msleep(isleep*1000); 188 msleep(isleep*1000);
189 } 189 }
190 printk(KERN_NOTICE "wake-up\n");
191 return 0; 190 return 0;
192} 191}
193 192
@@ -921,7 +920,6 @@ static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
921} 920}
922 921
923static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error) 922static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
924
925{ 923{
926 int id, lun; 924 int id, lun;
927 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { 925 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
@@ -948,7 +946,7 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct Comma
948 , pCCB->startdone 946 , pCCB->startdone
949 , atomic_read(&acb->ccboutstandingcount)); 947 , atomic_read(&acb->ccboutstandingcount));
950 return; 948 return;
951 } 949 }
952 arcmsr_report_ccb_state(acb, pCCB, error); 950 arcmsr_report_ccb_state(acb, pCCB, error);
953} 951}
954 952
@@ -981,7 +979,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
981 case ACB_ADAPTER_TYPE_B: { 979 case ACB_ADAPTER_TYPE_B: {
982 struct MessageUnit_B *reg = acb->pmuB; 980 struct MessageUnit_B *reg = acb->pmuB;
983 /*clear all outbound posted Q*/ 981 /*clear all outbound posted Q*/
984 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, &reg->iop2drv_doorbell); /* clear doorbell interrupt */ 982 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
985 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { 983 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
986 if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) { 984 if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) {
987 writel(0, &reg->done_qbuffer[i]); 985 writel(0, &reg->done_qbuffer[i]);
@@ -1511,7 +1509,6 @@ static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
1511 arcmsr_drain_donequeue(acb, pCCB, error); 1509 arcmsr_drain_donequeue(acb, pCCB, error);
1512 } 1510 }
1513} 1511}
1514
1515static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) 1512static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
1516{ 1513{
1517 uint32_t index; 1514 uint32_t index;
@@ -2106,10 +2103,6 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
2106 if (atomic_read(&acb->ccboutstandingcount) >= 2103 if (atomic_read(&acb->ccboutstandingcount) >=
2107 ARCMSR_MAX_OUTSTANDING_CMD) 2104 ARCMSR_MAX_OUTSTANDING_CMD)
2108 return SCSI_MLQUEUE_HOST_BUSY; 2105 return SCSI_MLQUEUE_HOST_BUSY;
2109 if ((scsicmd == SCSI_CMD_ARECA_SPECIFIC)) {
2110 printk(KERN_NOTICE "Receiveing SCSI_CMD_ARECA_SPECIFIC command..\n");
2111 return 0;
2112 }
2113 ccb = arcmsr_get_freeccb(acb); 2106 ccb = arcmsr_get_freeccb(acb);
2114 if (!ccb) 2107 if (!ccb)
2115 return SCSI_MLQUEUE_HOST_BUSY; 2108 return SCSI_MLQUEUE_HOST_BUSY;
@@ -2393,6 +2386,7 @@ static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
2393 int index, rtn; 2386 int index, rtn;
2394 bool error; 2387 bool error;
2395 polling_hbb_ccb_retry: 2388 polling_hbb_ccb_retry:
2389
2396 poll_count++; 2390 poll_count++;
2397 /* clear doorbell interrupt */ 2391 /* clear doorbell interrupt */
2398 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); 2392 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
@@ -2663,6 +2657,7 @@ static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
2663{ 2657{
2664 struct MessageUnit_A __iomem *reg = acb->pmuA; 2658 struct MessageUnit_A __iomem *reg = acb->pmuA;
2665 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){ 2659 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
2660 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2666 return; 2661 return;
2667 } else { 2662 } else {
2668 acb->fw_flag = FW_NORMAL; 2663 acb->fw_flag = FW_NORMAL;
@@ -2670,8 +2665,10 @@ static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
2670 atomic_set(&acb->rq_map_token, 16); 2665 atomic_set(&acb->rq_map_token, 16);
2671 } 2666 }
2672 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token)); 2667 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
2673 if (atomic_dec_and_test(&acb->rq_map_token)) 2668 if (atomic_dec_and_test(&acb->rq_map_token)) {
2669 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2674 return; 2670 return;
2671 }
2675 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); 2672 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
2676 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); 2673 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2677 } 2674 }
@@ -2682,15 +2679,18 @@ static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb)
2682{ 2679{
2683 struct MessageUnit_B __iomem *reg = acb->pmuB; 2680 struct MessageUnit_B __iomem *reg = acb->pmuB;
2684 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){ 2681 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
2682 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2685 return; 2683 return;
2686 } else { 2684 } else {
2687 acb->fw_flag = FW_NORMAL; 2685 acb->fw_flag = FW_NORMAL;
2688 if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) { 2686 if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
2689 atomic_set(&acb->rq_map_token,16); 2687 atomic_set(&acb->rq_map_token, 16);
2690 } 2688 }
2691 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token)); 2689 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
2692 if(atomic_dec_and_test(&acb->rq_map_token)) 2690 if (atomic_dec_and_test(&acb->rq_map_token)) {
2691 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2693 return; 2692 return;
2693 }
2694 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell); 2694 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
2695 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); 2695 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2696 } 2696 }
@@ -2701,6 +2701,7 @@ static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb)
2701{ 2701{
2702 struct MessageUnit_C __iomem *reg = acb->pmuC; 2702 struct MessageUnit_C __iomem *reg = acb->pmuC;
2703 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) { 2703 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
2704 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2704 return; 2705 return;
2705 } else { 2706 } else {
2706 acb->fw_flag = FW_NORMAL; 2707 acb->fw_flag = FW_NORMAL;
@@ -2708,8 +2709,10 @@ static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb)
2708 atomic_set(&acb->rq_map_token, 16); 2709 atomic_set(&acb->rq_map_token, 16);
2709 } 2710 }
2710 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token)); 2711 atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
2711 if (atomic_dec_and_test(&acb->rq_map_token)) 2712 if (atomic_dec_and_test(&acb->rq_map_token)) {
2713 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2712 return; 2714 return;
2715 }
2713 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); 2716 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
2714 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell); 2717 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
2715 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); 2718 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
@@ -2897,6 +2900,8 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
2897 uint32_t intmask_org; 2900 uint32_t intmask_org;
2898 uint8_t rtnval = 0x00; 2901 uint8_t rtnval = 0x00;
2899 int i = 0; 2902 int i = 0;
2903 unsigned long flags;
2904
2900 if (atomic_read(&acb->ccboutstandingcount) != 0) { 2905 if (atomic_read(&acb->ccboutstandingcount) != 0) {
2901 /* disable all outbound interrupt */ 2906 /* disable all outbound interrupt */
2902 intmask_org = arcmsr_disable_outbound_ints(acb); 2907 intmask_org = arcmsr_disable_outbound_ints(acb);
@@ -2907,7 +2912,12 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
2907 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { 2912 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2908 ccb = acb->pccb_pool[i]; 2913 ccb = acb->pccb_pool[i];
2909 if (ccb->startdone == ARCMSR_CCB_START) { 2914 if (ccb->startdone == ARCMSR_CCB_START) {
2910 arcmsr_ccb_complete(ccb); 2915 scsi_dma_unmap(ccb->pcmd);
2916 ccb->startdone = ARCMSR_CCB_DONE;
2917 ccb->ccb_flags = 0;
2918 spin_lock_irqsave(&acb->ccblist_lock, flags);
2919 list_add_tail(&ccb->list, &acb->ccb_free_list);
2920 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
2911 } 2921 }
2912 } 2922 }
2913 atomic_set(&acb->ccboutstandingcount, 0); 2923 atomic_set(&acb->ccboutstandingcount, 0);
@@ -2920,8 +2930,7 @@ static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
2920 2930
2921static int arcmsr_bus_reset(struct scsi_cmnd *cmd) 2931static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
2922{ 2932{
2923 struct AdapterControlBlock *acb = 2933 struct AdapterControlBlock *acb;
2924 (struct AdapterControlBlock *)cmd->device->host->hostdata;
2925 uint32_t intmask_org, outbound_doorbell; 2934 uint32_t intmask_org, outbound_doorbell;
2926 int retry_count = 0; 2935 int retry_count = 0;
2927 int rtn = FAILED; 2936 int rtn = FAILED;
@@ -2971,31 +2980,16 @@ sleep_again:
2971 atomic_set(&acb->rq_map_token, 16); 2980 atomic_set(&acb->rq_map_token, 16);
2972 atomic_set(&acb->ante_token_value, 16); 2981 atomic_set(&acb->ante_token_value, 16);
2973 acb->fw_flag = FW_NORMAL; 2982 acb->fw_flag = FW_NORMAL;
2974 init_timer(&acb->eternal_timer); 2983 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2975 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
2976 acb->eternal_timer.data = (unsigned long) acb;
2977 acb->eternal_timer.function = &arcmsr_request_device_map;
2978 add_timer(&acb->eternal_timer);
2979 acb->acb_flags &= ~ACB_F_BUS_RESET; 2984 acb->acb_flags &= ~ACB_F_BUS_RESET;
2980 rtn = SUCCESS; 2985 rtn = SUCCESS;
2981 printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n"); 2986 printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
2982 } else { 2987 } else {
2983 acb->acb_flags &= ~ACB_F_BUS_RESET; 2988 acb->acb_flags &= ~ACB_F_BUS_RESET;
2984 if (atomic_read(&acb->rq_map_token) == 0) { 2989 atomic_set(&acb->rq_map_token, 16);
2985 atomic_set(&acb->rq_map_token, 16); 2990 atomic_set(&acb->ante_token_value, 16);
2986 atomic_set(&acb->ante_token_value, 16); 2991 acb->fw_flag = FW_NORMAL;
2987 acb->fw_flag = FW_NORMAL; 2992 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
2988 init_timer(&acb->eternal_timer);
2989 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
2990 acb->eternal_timer.data = (unsigned long) acb;
2991 acb->eternal_timer.function = &arcmsr_request_device_map;
2992 add_timer(&acb->eternal_timer);
2993 } else {
2994 atomic_set(&acb->rq_map_token, 16);
2995 atomic_set(&acb->ante_token_value, 16);
2996 acb->fw_flag = FW_NORMAL;
2997 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
2998 }
2999 rtn = SUCCESS; 2993 rtn = SUCCESS;
3000 } 2994 }
3001 break; 2995 break;
@@ -3007,21 +3001,10 @@ sleep_again:
3007 rtn = FAILED; 3001 rtn = FAILED;
3008 } else { 3002 } else {
3009 acb->acb_flags &= ~ACB_F_BUS_RESET; 3003 acb->acb_flags &= ~ACB_F_BUS_RESET;
3010 if (atomic_read(&acb->rq_map_token) == 0) { 3004 atomic_set(&acb->rq_map_token, 16);
3011 atomic_set(&acb->rq_map_token, 16); 3005 atomic_set(&acb->ante_token_value, 16);
3012 atomic_set(&acb->ante_token_value, 16); 3006 acb->fw_flag = FW_NORMAL;
3013 acb->fw_flag = FW_NORMAL; 3007 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3014 init_timer(&acb->eternal_timer);
3015 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
3016 acb->eternal_timer.data = (unsigned long) acb;
3017 acb->eternal_timer.function = &arcmsr_request_device_map;
3018 add_timer(&acb->eternal_timer);
3019 } else {
3020 atomic_set(&acb->rq_map_token, 16);
3021 atomic_set(&acb->ante_token_value, 16);
3022 acb->fw_flag = FW_NORMAL;
3023 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
3024 }
3025 rtn = SUCCESS; 3008 rtn = SUCCESS;
3026 } 3009 }
3027 break; 3010 break;
@@ -3067,31 +3050,16 @@ sleep:
3067 atomic_set(&acb->rq_map_token, 16); 3050 atomic_set(&acb->rq_map_token, 16);
3068 atomic_set(&acb->ante_token_value, 16); 3051 atomic_set(&acb->ante_token_value, 16);
3069 acb->fw_flag = FW_NORMAL; 3052 acb->fw_flag = FW_NORMAL;
3070 init_timer(&acb->eternal_timer); 3053 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3071 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
3072 acb->eternal_timer.data = (unsigned long) acb;
3073 acb->eternal_timer.function = &arcmsr_request_device_map;
3074 add_timer(&acb->eternal_timer);
3075 acb->acb_flags &= ~ACB_F_BUS_RESET; 3054 acb->acb_flags &= ~ACB_F_BUS_RESET;
3076 rtn = SUCCESS; 3055 rtn = SUCCESS;
3077 printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n"); 3056 printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
3078 } else { 3057 } else {
3079 acb->acb_flags &= ~ACB_F_BUS_RESET; 3058 acb->acb_flags &= ~ACB_F_BUS_RESET;
3080 if (atomic_read(&acb->rq_map_token) == 0) { 3059 atomic_set(&acb->rq_map_token, 16);
3081 atomic_set(&acb->rq_map_token, 16); 3060 atomic_set(&acb->ante_token_value, 16);
3082 atomic_set(&acb->ante_token_value, 16); 3061 acb->fw_flag = FW_NORMAL;
3083 acb->fw_flag = FW_NORMAL; 3062 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
3084 init_timer(&acb->eternal_timer);
3085 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
3086 acb->eternal_timer.data = (unsigned long) acb;
3087 acb->eternal_timer.function = &arcmsr_request_device_map;
3088 add_timer(&acb->eternal_timer);
3089 } else {
3090 atomic_set(&acb->rq_map_token, 16);
3091 atomic_set(&acb->ante_token_value, 16);
3092 acb->fw_flag = FW_NORMAL;
3093 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
3094 }
3095 rtn = SUCCESS; 3063 rtn = SUCCESS;
3096 } 3064 }
3097 break; 3065 break;
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 5815cbeb27a6..9a7aaf5f1311 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -646,6 +646,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
646 646
647 spin_lock_irqsave(shost->host_lock, flags); 647 spin_lock_irqsave(shost->host_lock, flags);
648 list_splice_init(&shost->eh_cmd_q, &eh_work_q); 648 list_splice_init(&shost->eh_cmd_q, &eh_work_q);
649 shost->host_eh_scheduled = 0;
649 spin_unlock_irqrestore(shost->host_lock, flags); 650 spin_unlock_irqrestore(shost->host_lock, flags);
650 651
651 SAS_DPRINTK("Enter %s\n", __func__); 652 SAS_DPRINTK("Enter %s\n", __func__);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index b2a817055b8b..9ead0399808a 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -2176,9 +2176,9 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2176 /* adjust hba_queue_depth, reply_free_queue_depth, 2176 /* adjust hba_queue_depth, reply_free_queue_depth,
2177 * and queue_size 2177 * and queue_size
2178 */ 2178 */
2179 ioc->hba_queue_depth -= queue_diff; 2179 ioc->hba_queue_depth -= (queue_diff / 2);
2180 ioc->reply_free_queue_depth -= queue_diff; 2180 ioc->reply_free_queue_depth -= (queue_diff / 2);
2181 queue_size -= queue_diff; 2181 queue_size = facts->MaxReplyDescriptorPostQueueDepth;
2182 } 2182 }
2183 ioc->reply_post_queue_depth = queue_size; 2183 ioc->reply_post_queue_depth = queue_size;
2184 2184
@@ -3941,6 +3941,8 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
3941static void 3941static void
3942_base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) 3942_base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
3943{ 3943{
3944 mpt2sas_scsih_reset_handler(ioc, reset_phase);
3945 mpt2sas_ctl_reset_handler(ioc, reset_phase);
3944 switch (reset_phase) { 3946 switch (reset_phase) {
3945 case MPT2_IOC_PRE_RESET: 3947 case MPT2_IOC_PRE_RESET:
3946 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: " 3948 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
@@ -3971,8 +3973,6 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
3971 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__)); 3973 "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
3972 break; 3974 break;
3973 } 3975 }
3974 mpt2sas_scsih_reset_handler(ioc, reset_phase);
3975 mpt2sas_ctl_reset_handler(ioc, reset_phase);
3976} 3976}
3977 3977
3978/** 3978/**
@@ -4026,6 +4026,7 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
4026{ 4026{
4027 int r; 4027 int r;
4028 unsigned long flags; 4028 unsigned long flags;
4029 u8 pe_complete = ioc->wait_for_port_enable_to_complete;
4029 4030
4030 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, 4031 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
4031 __func__)); 4032 __func__));
@@ -4068,6 +4069,14 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
4068 if (r) 4069 if (r)
4069 goto out; 4070 goto out;
4070 _base_reset_handler(ioc, MPT2_IOC_AFTER_RESET); 4071 _base_reset_handler(ioc, MPT2_IOC_AFTER_RESET);
4072
4073 /* If this hard reset is called while port enable is active, then
4074 * there is no reason to call make_ioc_operational
4075 */
4076 if (pe_complete) {
4077 r = -EFAULT;
4078 goto out;
4079 }
4071 r = _base_make_ioc_operational(ioc, sleep_flag); 4080 r = _base_make_ioc_operational(ioc, sleep_flag);
4072 if (!r) 4081 if (!r)
4073 _base_reset_handler(ioc, MPT2_IOC_DONE_RESET); 4082 _base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index eda347c57979..5ded3db6e316 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -819,7 +819,7 @@ _scsih_is_end_device(u32 device_info)
819} 819}
820 820
821/** 821/**
822 * mptscsih_get_scsi_lookup - returns scmd entry 822 * _scsih_scsi_lookup_get - returns scmd entry
823 * @ioc: per adapter object 823 * @ioc: per adapter object
824 * @smid: system request message index 824 * @smid: system request message index
825 * 825 *
@@ -832,6 +832,28 @@ _scsih_scsi_lookup_get(struct MPT2SAS_ADAPTER *ioc, u16 smid)
832} 832}
833 833
834/** 834/**
835 * _scsih_scsi_lookup_get_clear - returns scmd entry
836 * @ioc: per adapter object
837 * @smid: system request message index
838 *
839 * Returns the smid stored scmd pointer.
840 * Then will derefrence the stored scmd pointer.
841 */
842static inline struct scsi_cmnd *
843_scsih_scsi_lookup_get_clear(struct MPT2SAS_ADAPTER *ioc, u16 smid)
844{
845 unsigned long flags;
846 struct scsi_cmnd *scmd;
847
848 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
849 scmd = ioc->scsi_lookup[smid - 1].scmd;
850 ioc->scsi_lookup[smid - 1].scmd = NULL;
851 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
852
853 return scmd;
854}
855
856/**
835 * _scsih_scsi_lookup_find_by_scmd - scmd lookup 857 * _scsih_scsi_lookup_find_by_scmd - scmd lookup
836 * @ioc: per adapter object 858 * @ioc: per adapter object
837 * @smid: system request message index 859 * @smid: system request message index
@@ -2981,9 +3003,6 @@ _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc,
2981 u16 handle; 3003 u16 handle;
2982 3004
2983 for (i = 0 ; i < event_data->NumEntries; i++) { 3005 for (i = 0 ; i < event_data->NumEntries; i++) {
2984 if (event_data->PHY[i].PhyStatus &
2985 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT)
2986 continue;
2987 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); 3006 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
2988 if (!handle) 3007 if (!handle)
2989 continue; 3008 continue;
@@ -3210,7 +3229,7 @@ _scsih_flush_running_cmds(struct MPT2SAS_ADAPTER *ioc)
3210 u16 count = 0; 3229 u16 count = 0;
3211 3230
3212 for (smid = 1; smid <= ioc->scsiio_depth; smid++) { 3231 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
3213 scmd = _scsih_scsi_lookup_get(ioc, smid); 3232 scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
3214 if (!scmd) 3233 if (!scmd)
3215 continue; 3234 continue;
3216 count++; 3235 count++;
@@ -3804,7 +3823,7 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
3804 u32 response_code = 0; 3823 u32 response_code = 0;
3805 3824
3806 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 3825 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
3807 scmd = _scsih_scsi_lookup_get(ioc, smid); 3826 scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
3808 if (scmd == NULL) 3827 if (scmd == NULL)
3809 return 1; 3828 return 1;
3810 3829
@@ -5005,6 +5024,12 @@ _scsih_sas_device_status_change_event(struct MPT2SAS_ADAPTER *ioc,
5005 event_data); 5024 event_data);
5006#endif 5025#endif
5007 5026
5027 /* In MPI Revision K (0xC), the internal device reset complete was
5028 * implemented, so avoid setting tm_busy flag for older firmware.
5029 */
5030 if ((ioc->facts.HeaderVersion >> 8) < 0xC)
5031 return;
5032
5008 if (event_data->ReasonCode != 5033 if (event_data->ReasonCode !=
5009 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET && 5034 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
5010 event_data->ReasonCode != 5035 event_data->ReasonCode !=
@@ -5099,6 +5124,7 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
5099 struct fw_event_work *fw_event) 5124 struct fw_event_work *fw_event)
5100{ 5125{
5101 struct scsi_cmnd *scmd; 5126 struct scsi_cmnd *scmd;
5127 struct scsi_device *sdev;
5102 u16 smid, handle; 5128 u16 smid, handle;
5103 u32 lun; 5129 u32 lun;
5104 struct MPT2SAS_DEVICE *sas_device_priv_data; 5130 struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -5109,12 +5135,17 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
5109 Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data; 5135 Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data;
5110#endif 5136#endif
5111 u16 ioc_status; 5137 u16 ioc_status;
5138 unsigned long flags;
5139 int r;
5140
5112 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "broadcast primative: " 5141 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "broadcast primative: "
5113 "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum, 5142 "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum,
5114 event_data->PortWidth)); 5143 event_data->PortWidth));
5115 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name, 5144 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
5116 __func__)); 5145 __func__));
5117 5146
5147 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5148 ioc->broadcast_aen_busy = 0;
5118 termination_count = 0; 5149 termination_count = 0;
5119 query_count = 0; 5150 query_count = 0;
5120 mpi_reply = ioc->tm_cmds.reply; 5151 mpi_reply = ioc->tm_cmds.reply;
@@ -5122,7 +5153,8 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
5122 scmd = _scsih_scsi_lookup_get(ioc, smid); 5153 scmd = _scsih_scsi_lookup_get(ioc, smid);
5123 if (!scmd) 5154 if (!scmd)
5124 continue; 5155 continue;
5125 sas_device_priv_data = scmd->device->hostdata; 5156 sdev = scmd->device;
5157 sas_device_priv_data = sdev->hostdata;
5126 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) 5158 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
5127 continue; 5159 continue;
5128 /* skip hidden raid components */ 5160 /* skip hidden raid components */
@@ -5138,6 +5170,7 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
5138 lun = sas_device_priv_data->lun; 5170 lun = sas_device_priv_data->lun;
5139 query_count++; 5171 query_count++;
5140 5172
5173 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
5141 mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun, 5174 mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
5142 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, NULL); 5175 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, NULL);
5143 ioc->tm_cmds.status = MPT2_CMD_NOT_USED; 5176 ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
@@ -5147,14 +5180,20 @@ _scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc,
5147 (mpi_reply->ResponseCode == 5180 (mpi_reply->ResponseCode ==
5148 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED || 5181 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
5149 mpi_reply->ResponseCode == 5182 mpi_reply->ResponseCode ==
5150 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC)) 5183 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC)) {
5184 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5151 continue; 5185 continue;
5152 5186 }
5153 mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun, 5187 r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
5154 MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, 0, 30, NULL); 5188 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
5189 scmd);
5190 if (r == FAILED)
5191 sdev_printk(KERN_WARNING, sdev, "task abort: FAILED "
5192 "scmd(%p)\n", scmd);
5155 termination_count += le32_to_cpu(mpi_reply->TerminationCount); 5193 termination_count += le32_to_cpu(mpi_reply->TerminationCount);
5194 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5156 } 5195 }
5157 ioc->broadcast_aen_busy = 0; 5196 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
5158 5197
5159 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT 5198 dtmprintk(ioc, printk(MPT2SAS_INFO_FMT
5160 "%s - exit, query_count = %d termination_count = %d\n", 5199 "%s - exit, query_count = %d termination_count = %d\n",
@@ -6626,6 +6665,7 @@ _scsih_remove(struct pci_dev *pdev)
6626 destroy_workqueue(wq); 6665 destroy_workqueue(wq);
6627 6666
6628 /* release all the volumes */ 6667 /* release all the volumes */
6668 _scsih_ir_shutdown(ioc);
6629 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, 6669 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
6630 list) { 6670 list) {
6631 if (raid_device->starget) { 6671 if (raid_device->starget) {
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 44578b56ad0a..d3e58d763b43 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1561,6 +1561,7 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1561{ 1561{
1562 struct Scsi_Host *host = rport_to_shost(rport); 1562 struct Scsi_Host *host = rport_to_shost(rport);
1563 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 1563 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1564 unsigned long flags;
1564 1565
1565 if (!fcport) 1566 if (!fcport)
1566 return; 1567 return;
@@ -1573,10 +1574,10 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1573 * Transport has effectively 'deleted' the rport, clear 1574 * Transport has effectively 'deleted' the rport, clear
1574 * all local references. 1575 * all local references.
1575 */ 1576 */
1576 spin_lock_irq(host->host_lock); 1577 spin_lock_irqsave(host->host_lock, flags);
1577 fcport->rport = fcport->drport = NULL; 1578 fcport->rport = fcport->drport = NULL;
1578 *((fc_port_t **)rport->dd_data) = NULL; 1579 *((fc_port_t **)rport->dd_data) = NULL;
1579 spin_unlock_irq(host->host_lock); 1580 spin_unlock_irqrestore(host->host_lock, flags);
1580 1581
1581 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) 1582 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1582 return; 1583 return;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f948e1a73aec..d9479c3fe5f8 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2505,11 +2505,12 @@ qla2x00_rport_del(void *data)
2505{ 2505{
2506 fc_port_t *fcport = data; 2506 fc_port_t *fcport = data;
2507 struct fc_rport *rport; 2507 struct fc_rport *rport;
2508 unsigned long flags;
2508 2509
2509 spin_lock_irq(fcport->vha->host->host_lock); 2510 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2510 rport = fcport->drport ? fcport->drport: fcport->rport; 2511 rport = fcport->drport ? fcport->drport: fcport->rport;
2511 fcport->drport = NULL; 2512 fcport->drport = NULL;
2512 spin_unlock_irq(fcport->vha->host->host_lock); 2513 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
2513 if (rport) 2514 if (rport)
2514 fc_remote_port_delete(rport); 2515 fc_remote_port_delete(rport);
2515} 2516}
@@ -2879,6 +2880,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2879 struct fc_rport_identifiers rport_ids; 2880 struct fc_rport_identifiers rport_ids;
2880 struct fc_rport *rport; 2881 struct fc_rport *rport;
2881 struct qla_hw_data *ha = vha->hw; 2882 struct qla_hw_data *ha = vha->hw;
2883 unsigned long flags;
2882 2884
2883 qla2x00_rport_del(fcport); 2885 qla2x00_rport_del(fcport);
2884 2886
@@ -2893,9 +2895,9 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2893 "Unable to allocate fc remote port!\n"); 2895 "Unable to allocate fc remote port!\n");
2894 return; 2896 return;
2895 } 2897 }
2896 spin_lock_irq(fcport->vha->host->host_lock); 2898 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2897 *((fc_port_t **)rport->dd_data) = fcport; 2899 *((fc_port_t **)rport->dd_data) = fcport;
2898 spin_unlock_irq(fcport->vha->host->host_lock); 2900 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
2899 2901
2900 rport->supported_classes = fcport->supported_classes; 2902 rport->supported_classes = fcport->supported_classes;
2901 2903
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index c194c23ca1fb..f27724d76cf6 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -562,7 +562,6 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)
562 } 562 }
563 if (atomic_read(&fcport->state) != FCS_ONLINE) { 563 if (atomic_read(&fcport->state) != FCS_ONLINE) {
564 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 564 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
565 atomic_read(&fcport->state) == FCS_DEVICE_LOST ||
566 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 565 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
567 cmd->result = DID_NO_CONNECT << 16; 566 cmd->result = DID_NO_CONNECT << 16;
568 goto qc24_fail_command; 567 goto qc24_fail_command;
@@ -2513,6 +2512,7 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
2513{ 2512{
2514 struct fc_rport *rport; 2513 struct fc_rport *rport;
2515 scsi_qla_host_t *base_vha; 2514 scsi_qla_host_t *base_vha;
2515 unsigned long flags;
2516 2516
2517 if (!fcport->rport) 2517 if (!fcport->rport)
2518 return; 2518 return;
@@ -2520,9 +2520,9 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
2520 rport = fcport->rport; 2520 rport = fcport->rport;
2521 if (defer) { 2521 if (defer) {
2522 base_vha = pci_get_drvdata(vha->hw->pdev); 2522 base_vha = pci_get_drvdata(vha->hw->pdev);
2523 spin_lock_irq(vha->host->host_lock); 2523 spin_lock_irqsave(vha->host->host_lock, flags);
2524 fcport->drport = rport; 2524 fcport->drport = rport;
2525 spin_unlock_irq(vha->host->host_lock); 2525 spin_unlock_irqrestore(vha->host->host_lock, flags);
2526 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); 2526 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
2527 qla2xxx_wake_dpc(base_vha); 2527 qla2xxx_wake_dpc(base_vha);
2528 } else 2528 } else
@@ -3282,10 +3282,10 @@ qla2x00_do_dpc(void *data)
3282 3282
3283 set_user_nice(current, -20); 3283 set_user_nice(current, -20);
3284 3284
3285 set_current_state(TASK_INTERRUPTIBLE);
3285 while (!kthread_should_stop()) { 3286 while (!kthread_should_stop()) {
3286 DEBUG3(printk("qla2x00: DPC handler sleeping\n")); 3287 DEBUG3(printk("qla2x00: DPC handler sleeping\n"));
3287 3288
3288 set_current_state(TASK_INTERRUPTIBLE);
3289 schedule(); 3289 schedule();
3290 __set_current_state(TASK_RUNNING); 3290 __set_current_state(TASK_RUNNING);
3291 3291
@@ -3454,7 +3454,9 @@ qla2x00_do_dpc(void *data)
3454 qla2x00_do_dpc_all_vps(base_vha); 3454 qla2x00_do_dpc_all_vps(base_vha);
3455 3455
3456 ha->dpc_active = 0; 3456 ha->dpc_active = 0;
3457 set_current_state(TASK_INTERRUPTIBLE);
3457 } /* End of while(1) */ 3458 } /* End of while(1) */
3459 __set_current_state(TASK_RUNNING);
3458 3460
3459 DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no)); 3461 DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
3460 3462
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 7b310934efed..a6b2d72022fc 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1671,7 +1671,7 @@ static int do_device_access(struct scsi_cmnd *scmd,
1671 unsigned long long lba, unsigned int num, int write) 1671 unsigned long long lba, unsigned int num, int write)
1672{ 1672{
1673 int ret; 1673 int ret;
1674 unsigned int block, rest = 0; 1674 unsigned long long block, rest = 0;
1675 int (*func)(struct scsi_cmnd *, unsigned char *, int); 1675 int (*func)(struct scsi_cmnd *, unsigned char *, int);
1676 1676
1677 func = write ? fetch_to_dev_buffer : fill_from_dev_buffer; 1677 func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9045c52abd25..fb2bb35c62cb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q)
443 &sdev->request_queue->queue_flags); 443 &sdev->request_queue->queue_flags);
444 if (flagset) 444 if (flagset)
445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); 445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
446 __blk_run_queue(sdev->request_queue); 446 __blk_run_queue(sdev->request_queue, false);
447 if (flagset) 447 if (flagset)
448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); 448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
449 spin_unlock(sdev->request_queue->queue_lock); 449 spin_unlock(sdev->request_queue->queue_lock);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 998c01be3234..5c3ccfc6b622 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
3829 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); 3829 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
3830 if (flagset) 3830 if (flagset)
3831 queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); 3831 queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
3832 __blk_run_queue(rport->rqst_q); 3832 __blk_run_queue(rport->rqst_q, false);
3833 if (flagset) 3833 if (flagset)
3834 queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); 3834 queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
3835 spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); 3835 spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);