diff options
author | Hannes Reinecke <hare@suse.de> | 2013-11-11 07:44:55 -0500 |
---|---|---|
committer | James Bottomley <JBottomley@Parallels.com> | 2013-12-19 10:39:02 -0500 |
commit | 76ad3e5956bf0bc8871ebd19ebda03f2287c966a (patch) | |
tree | 5a0b923d78bc2d68f860a55356a8eed8ab95f1bc /drivers/scsi/scsi_error.c | |
parent | e494f6a728394ab0df194342549ee20e6f0752df (diff) |
[SCSI] Unlock accesses to eh_deadline
32bit accesses are guaranteed to be atomic, so we can remove
the spinlock when checking for eh_deadline. We only need to
make sure to catch any updates which might happened during
the call to time_before(); if so we just recheck with the
correct value.
Signed-off-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/scsi_error.c')
-rw-r--r-- | drivers/scsi/scsi_error.c | 44 |
1 files changed, 9 insertions, 35 deletions
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 3dd04026d466..4e4824beefe4 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
@@ -94,8 +94,15 @@ static int scsi_host_eh_past_deadline(struct Scsi_Host *shost) | |||
94 | if (!shost->last_reset || !shost->eh_deadline) | 94 | if (!shost->last_reset || !shost->eh_deadline) |
95 | return 0; | 95 | return 0; |
96 | 96 | ||
97 | if (time_before(jiffies, | 97 | /* |
98 | shost->last_reset + shost->eh_deadline)) | 98 | * 32bit accesses are guaranteed to be atomic |
99 | * (on all supported architectures), so instead | ||
100 | * of using a spinlock we can as well double check | ||
101 | * if eh_deadline has been unset during the | ||
102 | * time_before call. | ||
103 | */ | ||
104 | if (time_before(jiffies, shost->last_reset + shost->eh_deadline) && | ||
105 | shost->eh_deadline != 0) | ||
99 | return 0; | 106 | return 0; |
100 | 107 | ||
101 | return 1; | 108 | return 1; |
@@ -111,18 +118,14 @@ scmd_eh_abort_handler(struct work_struct *work) | |||
111 | struct scsi_cmnd *scmd = | 118 | struct scsi_cmnd *scmd = |
112 | container_of(work, struct scsi_cmnd, abort_work.work); | 119 | container_of(work, struct scsi_cmnd, abort_work.work); |
113 | struct scsi_device *sdev = scmd->device; | 120 | struct scsi_device *sdev = scmd->device; |
114 | unsigned long flags; | ||
115 | int rtn; | 121 | int rtn; |
116 | 122 | ||
117 | spin_lock_irqsave(sdev->host->host_lock, flags); | ||
118 | if (scsi_host_eh_past_deadline(sdev->host)) { | 123 | if (scsi_host_eh_past_deadline(sdev->host)) { |
119 | spin_unlock_irqrestore(sdev->host->host_lock, flags); | ||
120 | SCSI_LOG_ERROR_RECOVERY(3, | 124 | SCSI_LOG_ERROR_RECOVERY(3, |
121 | scmd_printk(KERN_INFO, scmd, | 125 | scmd_printk(KERN_INFO, scmd, |
122 | "scmd %p eh timeout, not aborting\n", | 126 | "scmd %p eh timeout, not aborting\n", |
123 | scmd)); | 127 | scmd)); |
124 | } else { | 128 | } else { |
125 | spin_unlock_irqrestore(sdev->host->host_lock, flags); | ||
126 | SCSI_LOG_ERROR_RECOVERY(3, | 129 | SCSI_LOG_ERROR_RECOVERY(3, |
127 | scmd_printk(KERN_INFO, scmd, | 130 | scmd_printk(KERN_INFO, scmd, |
128 | "aborting command %p\n", scmd)); | 131 | "aborting command %p\n", scmd)); |
@@ -1132,7 +1135,6 @@ int scsi_eh_get_sense(struct list_head *work_q, | |||
1132 | struct scsi_cmnd *scmd, *next; | 1135 | struct scsi_cmnd *scmd, *next; |
1133 | struct Scsi_Host *shost; | 1136 | struct Scsi_Host *shost; |
1134 | int rtn; | 1137 | int rtn; |
1135 | unsigned long flags; | ||
1136 | 1138 | ||
1137 | list_for_each_entry_safe(scmd, next, work_q, eh_entry) { | 1139 | list_for_each_entry_safe(scmd, next, work_q, eh_entry) { |
1138 | if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) || | 1140 | if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) || |
@@ -1140,16 +1142,13 @@ int scsi_eh_get_sense(struct list_head *work_q, | |||
1140 | continue; | 1142 | continue; |
1141 | 1143 | ||
1142 | shost = scmd->device->host; | 1144 | shost = scmd->device->host; |
1143 | spin_lock_irqsave(shost->host_lock, flags); | ||
1144 | if (scsi_host_eh_past_deadline(shost)) { | 1145 | if (scsi_host_eh_past_deadline(shost)) { |
1145 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
1146 | SCSI_LOG_ERROR_RECOVERY(3, | 1146 | SCSI_LOG_ERROR_RECOVERY(3, |
1147 | shost_printk(KERN_INFO, shost, | 1147 | shost_printk(KERN_INFO, shost, |
1148 | "skip %s, past eh deadline\n", | 1148 | "skip %s, past eh deadline\n", |
1149 | __func__)); | 1149 | __func__)); |
1150 | break; | 1150 | break; |
1151 | } | 1151 | } |
1152 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
1153 | SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd, | 1152 | SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd, |
1154 | "%s: requesting sense\n", | 1153 | "%s: requesting sense\n", |
1155 | current->comm)); | 1154 | current->comm)); |
@@ -1235,26 +1234,21 @@ static int scsi_eh_test_devices(struct list_head *cmd_list, | |||
1235 | struct scsi_cmnd *scmd, *next; | 1234 | struct scsi_cmnd *scmd, *next; |
1236 | struct scsi_device *sdev; | 1235 | struct scsi_device *sdev; |
1237 | int finish_cmds; | 1236 | int finish_cmds; |
1238 | unsigned long flags; | ||
1239 | 1237 | ||
1240 | while (!list_empty(cmd_list)) { | 1238 | while (!list_empty(cmd_list)) { |
1241 | scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry); | 1239 | scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry); |
1242 | sdev = scmd->device; | 1240 | sdev = scmd->device; |
1243 | 1241 | ||
1244 | if (!try_stu) { | 1242 | if (!try_stu) { |
1245 | spin_lock_irqsave(sdev->host->host_lock, flags); | ||
1246 | if (scsi_host_eh_past_deadline(sdev->host)) { | 1243 | if (scsi_host_eh_past_deadline(sdev->host)) { |
1247 | /* Push items back onto work_q */ | 1244 | /* Push items back onto work_q */ |
1248 | list_splice_init(cmd_list, work_q); | 1245 | list_splice_init(cmd_list, work_q); |
1249 | spin_unlock_irqrestore(sdev->host->host_lock, | ||
1250 | flags); | ||
1251 | SCSI_LOG_ERROR_RECOVERY(3, | 1246 | SCSI_LOG_ERROR_RECOVERY(3, |
1252 | shost_printk(KERN_INFO, sdev->host, | 1247 | shost_printk(KERN_INFO, sdev->host, |
1253 | "skip %s, past eh deadline", | 1248 | "skip %s, past eh deadline", |
1254 | __func__)); | 1249 | __func__)); |
1255 | break; | 1250 | break; |
1256 | } | 1251 | } |
1257 | spin_unlock_irqrestore(sdev->host->host_lock, flags); | ||
1258 | } | 1252 | } |
1259 | 1253 | ||
1260 | finish_cmds = !scsi_device_online(scmd->device) || | 1254 | finish_cmds = !scsi_device_online(scmd->device) || |
@@ -1295,15 +1289,12 @@ static int scsi_eh_abort_cmds(struct list_head *work_q, | |||
1295 | LIST_HEAD(check_list); | 1289 | LIST_HEAD(check_list); |
1296 | int rtn; | 1290 | int rtn; |
1297 | struct Scsi_Host *shost; | 1291 | struct Scsi_Host *shost; |
1298 | unsigned long flags; | ||
1299 | 1292 | ||
1300 | list_for_each_entry_safe(scmd, next, work_q, eh_entry) { | 1293 | list_for_each_entry_safe(scmd, next, work_q, eh_entry) { |
1301 | if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD)) | 1294 | if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD)) |
1302 | continue; | 1295 | continue; |
1303 | shost = scmd->device->host; | 1296 | shost = scmd->device->host; |
1304 | spin_lock_irqsave(shost->host_lock, flags); | ||
1305 | if (scsi_host_eh_past_deadline(shost)) { | 1297 | if (scsi_host_eh_past_deadline(shost)) { |
1306 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
1307 | list_splice_init(&check_list, work_q); | 1298 | list_splice_init(&check_list, work_q); |
1308 | SCSI_LOG_ERROR_RECOVERY(3, | 1299 | SCSI_LOG_ERROR_RECOVERY(3, |
1309 | shost_printk(KERN_INFO, shost, | 1300 | shost_printk(KERN_INFO, shost, |
@@ -1311,7 +1302,6 @@ static int scsi_eh_abort_cmds(struct list_head *work_q, | |||
1311 | __func__)); | 1302 | __func__)); |
1312 | return list_empty(work_q); | 1303 | return list_empty(work_q); |
1313 | } | 1304 | } |
1314 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
1315 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:" | 1305 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:" |
1316 | "0x%p\n", current->comm, | 1306 | "0x%p\n", current->comm, |
1317 | scmd)); | 1307 | scmd)); |
@@ -1375,19 +1365,15 @@ static int scsi_eh_stu(struct Scsi_Host *shost, | |||
1375 | { | 1365 | { |
1376 | struct scsi_cmnd *scmd, *stu_scmd, *next; | 1366 | struct scsi_cmnd *scmd, *stu_scmd, *next; |
1377 | struct scsi_device *sdev; | 1367 | struct scsi_device *sdev; |
1378 | unsigned long flags; | ||
1379 | 1368 | ||
1380 | shost_for_each_device(sdev, shost) { | 1369 | shost_for_each_device(sdev, shost) { |
1381 | spin_lock_irqsave(shost->host_lock, flags); | ||
1382 | if (scsi_host_eh_past_deadline(shost)) { | 1370 | if (scsi_host_eh_past_deadline(shost)) { |
1383 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
1384 | SCSI_LOG_ERROR_RECOVERY(3, | 1371 | SCSI_LOG_ERROR_RECOVERY(3, |
1385 | shost_printk(KERN_INFO, shost, | 1372 | shost_printk(KERN_INFO, shost, |
1386 | "skip %s, past eh deadline\n", | 1373 | "skip %s, past eh deadline\n", |
1387 | __func__)); | 1374 | __func__)); |
1388 | break; | 1375 | break; |
1389 | } | 1376 | } |
1390 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
1391 | stu_scmd = NULL; | 1377 | stu_scmd = NULL; |
1392 | list_for_each_entry(scmd, work_q, eh_entry) | 1378 | list_for_each_entry(scmd, work_q, eh_entry) |
1393 | if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) && | 1379 | if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) && |
@@ -1441,20 +1427,16 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost, | |||
1441 | { | 1427 | { |
1442 | struct scsi_cmnd *scmd, *bdr_scmd, *next; | 1428 | struct scsi_cmnd *scmd, *bdr_scmd, *next; |
1443 | struct scsi_device *sdev; | 1429 | struct scsi_device *sdev; |
1444 | unsigned long flags; | ||
1445 | int rtn; | 1430 | int rtn; |
1446 | 1431 | ||
1447 | shost_for_each_device(sdev, shost) { | 1432 | shost_for_each_device(sdev, shost) { |
1448 | spin_lock_irqsave(shost->host_lock, flags); | ||
1449 | if (scsi_host_eh_past_deadline(shost)) { | 1433 | if (scsi_host_eh_past_deadline(shost)) { |
1450 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
1451 | SCSI_LOG_ERROR_RECOVERY(3, | 1434 | SCSI_LOG_ERROR_RECOVERY(3, |
1452 | shost_printk(KERN_INFO, shost, | 1435 | shost_printk(KERN_INFO, shost, |
1453 | "skip %s, past eh deadline\n", | 1436 | "skip %s, past eh deadline\n", |
1454 | __func__)); | 1437 | __func__)); |
1455 | break; | 1438 | break; |
1456 | } | 1439 | } |
1457 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
1458 | bdr_scmd = NULL; | 1440 | bdr_scmd = NULL; |
1459 | list_for_each_entry(scmd, work_q, eh_entry) | 1441 | list_for_each_entry(scmd, work_q, eh_entry) |
1460 | if (scmd->device == sdev) { | 1442 | if (scmd->device == sdev) { |
@@ -1515,11 +1497,8 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost, | |||
1515 | struct scsi_cmnd *next, *scmd; | 1497 | struct scsi_cmnd *next, *scmd; |
1516 | int rtn; | 1498 | int rtn; |
1517 | unsigned int id; | 1499 | unsigned int id; |
1518 | unsigned long flags; | ||
1519 | 1500 | ||
1520 | spin_lock_irqsave(shost->host_lock, flags); | ||
1521 | if (scsi_host_eh_past_deadline(shost)) { | 1501 | if (scsi_host_eh_past_deadline(shost)) { |
1522 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
1523 | /* push back on work queue for further processing */ | 1502 | /* push back on work queue for further processing */ |
1524 | list_splice_init(&check_list, work_q); | 1503 | list_splice_init(&check_list, work_q); |
1525 | list_splice_init(&tmp_list, work_q); | 1504 | list_splice_init(&tmp_list, work_q); |
@@ -1529,7 +1508,6 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost, | |||
1529 | __func__)); | 1508 | __func__)); |
1530 | return list_empty(work_q); | 1509 | return list_empty(work_q); |
1531 | } | 1510 | } |
1532 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
1533 | 1511 | ||
1534 | scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry); | 1512 | scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry); |
1535 | id = scmd_id(scmd); | 1513 | id = scmd_id(scmd); |
@@ -1574,7 +1552,6 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost, | |||
1574 | LIST_HEAD(check_list); | 1552 | LIST_HEAD(check_list); |
1575 | unsigned int channel; | 1553 | unsigned int channel; |
1576 | int rtn; | 1554 | int rtn; |
1577 | unsigned long flags; | ||
1578 | 1555 | ||
1579 | /* | 1556 | /* |
1580 | * we really want to loop over the various channels, and do this on | 1557 | * we really want to loop over the various channels, and do this on |
@@ -1584,9 +1561,7 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost, | |||
1584 | */ | 1561 | */ |
1585 | 1562 | ||
1586 | for (channel = 0; channel <= shost->max_channel; channel++) { | 1563 | for (channel = 0; channel <= shost->max_channel; channel++) { |
1587 | spin_lock_irqsave(shost->host_lock, flags); | ||
1588 | if (scsi_host_eh_past_deadline(shost)) { | 1564 | if (scsi_host_eh_past_deadline(shost)) { |
1589 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
1590 | list_splice_init(&check_list, work_q); | 1565 | list_splice_init(&check_list, work_q); |
1591 | SCSI_LOG_ERROR_RECOVERY(3, | 1566 | SCSI_LOG_ERROR_RECOVERY(3, |
1592 | shost_printk(KERN_INFO, shost, | 1567 | shost_printk(KERN_INFO, shost, |
@@ -1594,7 +1569,6 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost, | |||
1594 | __func__)); | 1569 | __func__)); |
1595 | return list_empty(work_q); | 1570 | return list_empty(work_q); |
1596 | } | 1571 | } |
1597 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
1598 | 1572 | ||
1599 | chan_scmd = NULL; | 1573 | chan_scmd = NULL; |
1600 | list_for_each_entry(scmd, work_q, eh_entry) { | 1574 | list_for_each_entry(scmd, work_q, eh_entry) { |