aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/NCR5380.c5
-rw-r--r--drivers/scsi/aacraid/rkt.c6
-rw-r--r--drivers/scsi/aacraid/rx.c6
-rw-r--r--drivers/scsi/aacraid/sa.c6
-rw-r--r--drivers/scsi/dpt_i2o.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c12
-rw-r--r--drivers/scsi/osst.c9
7 files changed, 25 insertions, 53 deletions
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index cc9ecb35b412..cba9655d0f14 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -606,10 +606,7 @@ static int __init NCR5380_probe_irq(struct Scsi_Host *instance, int possible)
606 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL); 606 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL);
607 607
608 while (probe_irq == SCSI_IRQ_NONE && time_before(jiffies, timeout)) 608 while (probe_irq == SCSI_IRQ_NONE && time_before(jiffies, timeout))
609 { 609 schedule_timeout_uninterruptible(1);
610 set_current_state(TASK_UNINTERRUPTIBLE);
611 schedule_timeout(1);
612 }
613 610
614 NCR5380_write(SELECT_ENABLE_REG, 0); 611 NCR5380_write(SELECT_ENABLE_REG, 0);
615 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); 612 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
index fc4c73c2a6a9..e9b775d6bec9 100644
--- a/drivers/scsi/aacraid/rkt.c
+++ b/drivers/scsi/aacraid/rkt.c
@@ -183,8 +183,7 @@ static int rkt_sync_cmd(struct aac_dev *dev, u32 command,
183 /* 183 /*
184 * Yield the processor in case we are slow 184 * Yield the processor in case we are slow
185 */ 185 */
186 set_current_state(TASK_UNINTERRUPTIBLE); 186 schedule_timeout_uninterruptible(1);
187 schedule_timeout(1);
188 } 187 }
189 if (ok != 1) { 188 if (ok != 1) {
190 /* 189 /*
@@ -452,8 +451,7 @@ int aac_rkt_init(struct aac_dev *dev)
452 dev->name, instance, status); 451 dev->name, instance, status);
453 goto error_iounmap; 452 goto error_iounmap;
454 } 453 }
455 set_current_state(TASK_UNINTERRUPTIBLE); 454 schedule_timeout_uninterruptible(1);
456 schedule_timeout(1);
457 } 455 }
458 if (request_irq(dev->scsi_host_ptr->irq, aac_rkt_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev)<0) 456 if (request_irq(dev->scsi_host_ptr->irq, aac_rkt_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev)<0)
459 { 457 {
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index da99046e5393..6998bc877dd6 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -183,8 +183,7 @@ static int rx_sync_cmd(struct aac_dev *dev, u32 command,
183 /* 183 /*
184 * Yield the processor in case we are slow 184 * Yield the processor in case we are slow
185 */ 185 */
186 set_current_state(TASK_UNINTERRUPTIBLE); 186 schedule_timeout_uninterruptible(1);
187 schedule_timeout(1);
188 } 187 }
189 if (ok != 1) { 188 if (ok != 1) {
190 /* 189 /*
@@ -452,8 +451,7 @@ int aac_rx_init(struct aac_dev *dev)
452 dev->name, instance, status); 451 dev->name, instance, status);
453 goto error_iounmap; 452 goto error_iounmap;
454 } 453 }
455 set_current_state(TASK_UNINTERRUPTIBLE); 454 schedule_timeout_uninterruptible(1);
456 schedule_timeout(1);
457 } 455 }
458 if (request_irq(dev->scsi_host_ptr->irq, aac_rx_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev)<0) 456 if (request_irq(dev->scsi_host_ptr->irq, aac_rx_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev)<0)
459 { 457 {
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index 8b9596209164..466f05cfbf0c 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -189,8 +189,7 @@ static int sa_sync_cmd(struct aac_dev *dev, u32 command,
189 ok = 1; 189 ok = 1;
190 break; 190 break;
191 } 191 }
192 set_current_state(TASK_UNINTERRUPTIBLE); 192 schedule_timeout_uninterruptible(1);
193 schedule_timeout(1);
194 } 193 }
195 194
196 if (ok != 1) 195 if (ok != 1)
@@ -325,8 +324,7 @@ int aac_sa_init(struct aac_dev *dev)
325 name, instance, status); 324 name, instance, status);
326 goto error_iounmap; 325 goto error_iounmap;
327 } 326 }
328 set_current_state(TASK_UNINTERRUPTIBLE); 327 schedule_timeout_uninterruptible(1);
329 schedule_timeout(1);
330 } 328 }
331 329
332 if (request_irq(dev->scsi_host_ptr->irq, aac_sa_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev ) < 0) { 330 if (request_irq(dev->scsi_host_ptr->irq, aac_sa_intr, SA_SHIRQ|SA_INTERRUPT, "aacraid", (void *)dev ) < 0) {
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 7235f94f1191..46d5571ec55a 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -1218,8 +1218,7 @@ static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1218 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit); 1218 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1219 return -ETIMEDOUT; 1219 return -ETIMEDOUT;
1220 } 1220 }
1221 set_current_state(TASK_UNINTERRUPTIBLE); 1221 schedule_timeout_uninterruptible(1);
1222 schedule_timeout(1);
1223 } while(m == EMPTY_QUEUE); 1222 } while(m == EMPTY_QUEUE);
1224 1223
1225 msg = pHba->msg_addr_virt + m; 1224 msg = pHba->msg_addr_virt + m;
@@ -1294,8 +1293,7 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1294 printk(KERN_WARNING"Timeout waiting for message!\n"); 1293 printk(KERN_WARNING"Timeout waiting for message!\n");
1295 return -ETIMEDOUT; 1294 return -ETIMEDOUT;
1296 } 1295 }
1297 set_current_state(TASK_UNINTERRUPTIBLE); 1296 schedule_timeout_uninterruptible(1);
1298 schedule_timeout(1);
1299 } while (m == EMPTY_QUEUE); 1297 } while (m == EMPTY_QUEUE);
1300 1298
1301 status = (u8*)kmalloc(4, GFP_KERNEL|ADDR32); 1299 status = (u8*)kmalloc(4, GFP_KERNEL|ADDR32);
@@ -1327,8 +1325,7 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1327 return -ETIMEDOUT; 1325 return -ETIMEDOUT;
1328 } 1326 }
1329 rmb(); 1327 rmb();
1330 set_current_state(TASK_UNINTERRUPTIBLE); 1328 schedule_timeout_uninterruptible(1);
1331 schedule_timeout(1);
1332 } 1329 }
1333 1330
1334 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) { 1331 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
@@ -1345,8 +1342,7 @@ static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1345 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name); 1342 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1346 return -ETIMEDOUT; 1343 return -ETIMEDOUT;
1347 } 1344 }
1348 set_current_state(TASK_UNINTERRUPTIBLE); 1345 schedule_timeout_uninterruptible(1);
1349 schedule_timeout(1);
1350 } while (m == EMPTY_QUEUE); 1346 } while (m == EMPTY_QUEUE);
1351 // Flush the offset 1347 // Flush the offset
1352 adpt_send_nop(pHba, m); 1348 adpt_send_nop(pHba, m);
@@ -1917,11 +1913,8 @@ static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd,
1917 return -ENXIO; 1913 return -ENXIO;
1918 } 1914 }
1919 1915
1920 while((volatile u32) pHba->state & DPTI_STATE_RESET ) { 1916 while((volatile u32) pHba->state & DPTI_STATE_RESET )
1921 set_task_state(current,TASK_UNINTERRUPTIBLE); 1917 schedule_timeout_uninterruptible(2);
1922 schedule_timeout(2);
1923
1924 }
1925 1918
1926 switch (cmd) { 1919 switch (cmd) {
1927 // TODO: handle 3 cases 1920 // TODO: handle 3 cases
@@ -2635,8 +2628,7 @@ static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2635 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name); 2628 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2636 return 2; 2629 return 2;
2637 } 2630 }
2638 set_current_state(TASK_UNINTERRUPTIBLE); 2631 schedule_timeout_uninterruptible(1);
2639 schedule_timeout(1);
2640 } 2632 }
2641 msg = (u32 __iomem *)(pHba->msg_addr_virt + m); 2633 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2642 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]); 2634 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
@@ -2670,8 +2662,7 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2670 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name); 2662 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2671 return -ETIMEDOUT; 2663 return -ETIMEDOUT;
2672 } 2664 }
2673 set_current_state(TASK_UNINTERRUPTIBLE); 2665 schedule_timeout_uninterruptible(1);
2674 schedule_timeout(1);
2675 } while(m == EMPTY_QUEUE); 2666 } while(m == EMPTY_QUEUE);
2676 2667
2677 msg=(u32 __iomem *)(pHba->msg_addr_virt+m); 2668 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
@@ -2709,8 +2700,7 @@ static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2709 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name); 2700 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2710 return -ETIMEDOUT; 2701 return -ETIMEDOUT;
2711 } 2702 }
2712 set_current_state(TASK_UNINTERRUPTIBLE); 2703 schedule_timeout_uninterruptible(1);
2713 schedule_timeout(1);
2714 } while (1); 2704 } while (1);
2715 2705
2716 // If the command was successful, fill the fifo with our reply 2706 // If the command was successful, fill the fifo with our reply
@@ -2788,8 +2778,7 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba)
2788 pHba->name); 2778 pHba->name);
2789 return -ETIMEDOUT; 2779 return -ETIMEDOUT;
2790 } 2780 }
2791 set_current_state(TASK_UNINTERRUPTIBLE); 2781 schedule_timeout_uninterruptible(1);
2792 schedule_timeout(1);
2793 } while(m==EMPTY_QUEUE); 2782 } while(m==EMPTY_QUEUE);
2794 2783
2795 2784
@@ -2816,8 +2805,7 @@ static s32 adpt_i2o_status_get(adpt_hba* pHba)
2816 return -ETIMEDOUT; 2805 return -ETIMEDOUT;
2817 } 2806 }
2818 rmb(); 2807 rmb();
2819 set_current_state(TASK_UNINTERRUPTIBLE); 2808 schedule_timeout_uninterruptible(1);
2820 schedule_timeout(1);
2821 } 2809 }
2822 2810
2823 // Set up our number of outbound and inbound messages 2811 // Set up our number of outbound and inbound messages
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c34d3cf4f19c..c63275e66e2e 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -825,8 +825,7 @@ __lpfc_abort_handler(struct scsi_cmnd *cmnd)
825 while (lpfc_cmd->pCmd == cmnd) 825 while (lpfc_cmd->pCmd == cmnd)
826 { 826 {
827 spin_unlock_irq(phba->host->host_lock); 827 spin_unlock_irq(phba->host->host_lock);
828 set_current_state(TASK_UNINTERRUPTIBLE); 828 schedule_timeout_uninterruptible(LPFC_ABORT_WAIT*HZ);
829 schedule_timeout(LPFC_ABORT_WAIT*HZ);
830 spin_lock_irq(phba->host->host_lock); 829 spin_lock_irq(phba->host->host_lock);
831 if (++loop_count 830 if (++loop_count
832 > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT) 831 > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT)
@@ -885,8 +884,7 @@ __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
885 884
886 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) { 885 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
887 spin_unlock_irq(phba->host->host_lock); 886 spin_unlock_irq(phba->host->host_lock);
888 set_current_state(TASK_UNINTERRUPTIBLE); 887 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
889 schedule_timeout( HZ/2);
890 spin_lock_irq(phba->host->host_lock); 888 spin_lock_irq(phba->host->host_lock);
891 } 889 }
892 if ((pnode) && (pnode->nlp_state == NLP_STE_MAPPED_NODE)) 890 if ((pnode) && (pnode->nlp_state == NLP_STE_MAPPED_NODE))
@@ -939,8 +937,7 @@ __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
939 cmnd->device->id, cmnd->device->lun, 937 cmnd->device->id, cmnd->device->lun,
940 LPFC_CTX_LUN))) { 938 LPFC_CTX_LUN))) {
941 spin_unlock_irq(phba->host->host_lock); 939 spin_unlock_irq(phba->host->host_lock);
942 set_current_state(TASK_UNINTERRUPTIBLE); 940 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
943 schedule_timeout(LPFC_RESET_WAIT*HZ);
944 spin_lock_irq(phba->host->host_lock); 941 spin_lock_irq(phba->host->host_lock);
945 942
946 if (++loopcnt 943 if (++loopcnt
@@ -1038,8 +1035,7 @@ __lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1038 &phba->sli.ring[phba->sli.fcp_ring], 1035 &phba->sli.ring[phba->sli.fcp_ring],
1039 0, 0, LPFC_CTX_HOST))) { 1036 0, 0, LPFC_CTX_HOST))) {
1040 spin_unlock_irq(phba->host->host_lock); 1037 spin_unlock_irq(phba->host->host_lock);
1041 set_current_state(TASK_UNINTERRUPTIBLE); 1038 schedule_timeout_uninterruptible(LPFC_RESET_WAIT*HZ);
1042 schedule_timeout(LPFC_RESET_WAIT*HZ);
1043 spin_lock_irq(phba->host->host_lock); 1039 spin_lock_irq(phba->host->host_lock);
1044 1040
1045 if (++loopcnt 1041 if (++loopcnt
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 1cf11c3322fb..d9946bd95492 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -862,8 +862,7 @@ static int osst_recover_wait_frame(struct osst_tape * STp, struct scsi_request *
862 retval = osst_write_error_recovery(STp, aSRpnt, 0); 862 retval = osst_write_error_recovery(STp, aSRpnt, 0);
863 break; 863 break;
864 } 864 }
865 set_current_state(TASK_INTERRUPTIBLE); 865 schedule_timeout_interruptible(HZ / OSST_POLL_PER_SEC);
866 schedule_timeout (HZ / OSST_POLL_PER_SEC);
867 866
868 STp->buffer->b_data = mybuf; STp->buffer->buffer_size = 24; 867 STp->buffer->b_data = mybuf; STp->buffer->buffer_size = 24;
869 memset(cmd, 0, MAX_COMMAND_SIZE); 868 memset(cmd, 0, MAX_COMMAND_SIZE);
@@ -1558,8 +1557,7 @@ static int osst_reposition_and_retry(struct osst_tape * STp, struct scsi_request
1558 osst_set_frame_position(STp, aSRpnt, frame + skip, 1); 1557 osst_set_frame_position(STp, aSRpnt, frame + skip, 1);
1559 flag = 0; 1558 flag = 0;
1560 attempts--; 1559 attempts--;
1561 set_current_state(TASK_INTERRUPTIBLE); 1560 schedule_timeout_interruptible(msecs_to_jiffies(100));
1562 schedule_timeout(HZ / 10);
1563 } 1561 }
1564 if (osst_get_frame_position(STp, aSRpnt) < 0) { /* additional write error */ 1562 if (osst_get_frame_position(STp, aSRpnt) < 0) { /* additional write error */
1565#if DEBUG 1563#if DEBUG
@@ -1620,8 +1618,7 @@ static int osst_reposition_and_retry(struct osst_tape * STp, struct scsi_request
1620 debugging = 0; 1618 debugging = 0;
1621 } 1619 }
1622#endif 1620#endif
1623 set_current_state(TASK_INTERRUPTIBLE); 1621 schedule_timeout_interruptible(msecs_to_jiffies(100));
1624 schedule_timeout(HZ / 10);
1625 } 1622 }
1626 printk(KERN_ERR "%s:E: Failed to find valid tape media\n", name); 1623 printk(KERN_ERR "%s:E: Failed to find valid tape media\n", name);
1627#if DEBUG 1624#if DEBUG