diff options
author | Tejun Heo <htejun@gmail.com> | 2007-09-23 00:14:12 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2007-10-12 14:55:41 -0400 |
commit | 31cc23b34913bc173680bdc87af79e551bf8cc0d (patch) | |
tree | ec64421ead9259174f0de8b22c36449ece6d69a4 | |
parent | fb7fd61454c8681cd2621051a710b78a00369203 (diff) |
libata-pmp-prep: implement ops->qc_defer()
Controllers which support PMP have various restrictions on which
combinations of commands are allowed to what number of devices
concurrently. This patch implements ops->qc_defer() which determines
whether a qc can be issued at the moment or should be deferred.
If the function returns ATA_DEFER_LINK, the qc will be deferred until
a qc completes on the link. If ATA_DEFER_PORT, until a qc completes
on any link. The defer conditions are advisory and in general
ATA_DEFER_LINK can be considered as lower priority deferring than
ATA_DEFER_PORT.
ops->qc_defer() replaces fixed ata_scmd_need_defer(). For standard
NCQ/non-NCQ exclusion, ata_std_qc_defer() is implemented. ahci and
sata_sil24 are converted to use ata_std_qc_defer().
ops->qc_defer() is heavier than the original mechanism because full qc
is prepped before determining to defer it, but various information is
needed to determine defer conditinos and fully translating a qc is the
only way to supply such information in generic manner.
IMHO, this shouldn't cause any noticeable performance issues as
* for most cases deferring occurs rarely (except for NCQ-aware
cmd-switching PMP)
* translation itself isn't that expensive
* once deferred the command won't be repeated until another command
completes which usually is a very long time cpu-wise.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
-rw-r--r-- | drivers/ata/ahci.c | 2 | ||||
-rw-r--r-- | drivers/ata/libata-core.c | 31 | ||||
-rw-r--r-- | drivers/ata/libata-scsi.c | 62 | ||||
-rw-r--r-- | drivers/ata/sata_nv.c | 1 | ||||
-rw-r--r-- | drivers/ata/sata_sil24.c | 1 | ||||
-rw-r--r-- | include/linux/libata.h | 6 |
6 files changed, 67 insertions, 36 deletions
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 0a6b694f0d3a..cf3404467ceb 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -268,6 +268,7 @@ static const struct ata_port_operations ahci_ops = { | |||
268 | 268 | ||
269 | .tf_read = ahci_tf_read, | 269 | .tf_read = ahci_tf_read, |
270 | 270 | ||
271 | .qc_defer = ata_std_qc_defer, | ||
271 | .qc_prep = ahci_qc_prep, | 272 | .qc_prep = ahci_qc_prep, |
272 | .qc_issue = ahci_qc_issue, | 273 | .qc_issue = ahci_qc_issue, |
273 | 274 | ||
@@ -298,6 +299,7 @@ static const struct ata_port_operations ahci_vt8251_ops = { | |||
298 | 299 | ||
299 | .tf_read = ahci_tf_read, | 300 | .tf_read = ahci_tf_read, |
300 | 301 | ||
302 | .qc_defer = ata_std_qc_defer, | ||
301 | .qc_prep = ahci_qc_prep, | 303 | .qc_prep = ahci_qc_prep, |
302 | .qc_issue = ahci_qc_issue, | 304 | .qc_issue = ahci_qc_issue, |
303 | 305 | ||
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 9467c2f60192..b666f51da7ed 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -4346,6 +4346,36 @@ int ata_check_atapi_dma(struct ata_queued_cmd *qc) | |||
4346 | } | 4346 | } |
4347 | 4347 | ||
4348 | /** | 4348 | /** |
4349 | * ata_std_qc_defer - Check whether a qc needs to be deferred | ||
4350 | * @qc: ATA command in question | ||
4351 | * | ||
4352 | * Non-NCQ commands cannot run with any other command, NCQ or | ||
4353 | * not. As upper layer only knows the queue depth, we are | ||
4354 | * responsible for maintaining exclusion. This function checks | ||
4355 | * whether a new command @qc can be issued. | ||
4356 | * | ||
4357 | * LOCKING: | ||
4358 | * spin_lock_irqsave(host lock) | ||
4359 | * | ||
4360 | * RETURNS: | ||
4361 | * ATA_DEFER_* if deferring is needed, 0 otherwise. | ||
4362 | */ | ||
4363 | int ata_std_qc_defer(struct ata_queued_cmd *qc) | ||
4364 | { | ||
4365 | struct ata_link *link = qc->dev->link; | ||
4366 | |||
4367 | if (qc->tf.protocol == ATA_PROT_NCQ) { | ||
4368 | if (!ata_tag_valid(link->active_tag)) | ||
4369 | return 0; | ||
4370 | } else { | ||
4371 | if (!ata_tag_valid(link->active_tag) && !link->sactive) | ||
4372 | return 0; | ||
4373 | } | ||
4374 | |||
4375 | return ATA_DEFER_LINK; | ||
4376 | } | ||
4377 | |||
4378 | /** | ||
4349 | * ata_qc_prep - Prepare taskfile for submission | 4379 | * ata_qc_prep - Prepare taskfile for submission |
4350 | * @qc: Metadata associated with taskfile to be prepared | 4380 | * @qc: Metadata associated with taskfile to be prepared |
4351 | * | 4381 | * |
@@ -7111,6 +7141,7 @@ EXPORT_SYMBOL_GPL(ata_interrupt); | |||
7111 | EXPORT_SYMBOL_GPL(ata_do_set_mode); | 7141 | EXPORT_SYMBOL_GPL(ata_do_set_mode); |
7112 | EXPORT_SYMBOL_GPL(ata_data_xfer); | 7142 | EXPORT_SYMBOL_GPL(ata_data_xfer); |
7113 | EXPORT_SYMBOL_GPL(ata_data_xfer_noirq); | 7143 | EXPORT_SYMBOL_GPL(ata_data_xfer_noirq); |
7144 | EXPORT_SYMBOL_GPL(ata_std_qc_defer); | ||
7114 | EXPORT_SYMBOL_GPL(ata_qc_prep); | 7145 | EXPORT_SYMBOL_GPL(ata_qc_prep); |
7115 | EXPORT_SYMBOL_GPL(ata_dumb_qc_prep); | 7146 | EXPORT_SYMBOL_GPL(ata_dumb_qc_prep); |
7116 | EXPORT_SYMBOL_GPL(ata_noop_qc_prep); | 7147 | EXPORT_SYMBOL_GPL(ata_noop_qc_prep); |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index dc274001ddd9..8ca2caeed017 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -749,6 +749,13 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev) | |||
749 | { | 749 | { |
750 | sdev->use_10_for_rw = 1; | 750 | sdev->use_10_for_rw = 1; |
751 | sdev->use_10_for_ms = 1; | 751 | sdev->use_10_for_ms = 1; |
752 | |||
753 | /* Schedule policy is determined by ->qc_defer() callback and | ||
754 | * it needs to see every deferred qc. Set dev_blocked to 1 to | ||
755 | * prevent SCSI midlayer from automatically deferring | ||
756 | * requests. | ||
757 | */ | ||
758 | sdev->max_device_blocked = 1; | ||
752 | } | 759 | } |
753 | 760 | ||
754 | static void ata_scsi_dev_config(struct scsi_device *sdev, | 761 | static void ata_scsi_dev_config(struct scsi_device *sdev, |
@@ -1416,37 +1423,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) | |||
1416 | } | 1423 | } |
1417 | 1424 | ||
1418 | /** | 1425 | /** |
1419 | * ata_scmd_need_defer - Check whether we need to defer scmd | ||
1420 | * @dev: ATA device to which the command is addressed | ||
1421 | * @is_io: Is the command IO (and thus possibly NCQ)? | ||
1422 | * | ||
1423 | * NCQ and non-NCQ commands cannot run together. As upper layer | ||
1424 | * only knows the queue depth, we are responsible for maintaining | ||
1425 | * exclusion. This function checks whether a new command can be | ||
1426 | * issued to @dev. | ||
1427 | * | ||
1428 | * LOCKING: | ||
1429 | * spin_lock_irqsave(host lock) | ||
1430 | * | ||
1431 | * RETURNS: | ||
1432 | * 1 if deferring is needed, 0 otherwise. | ||
1433 | */ | ||
1434 | static int ata_scmd_need_defer(struct ata_device *dev, int is_io) | ||
1435 | { | ||
1436 | struct ata_link *link = dev->link; | ||
1437 | int is_ncq = is_io && ata_ncq_enabled(dev); | ||
1438 | |||
1439 | if (is_ncq) { | ||
1440 | if (!ata_tag_valid(link->active_tag)) | ||
1441 | return 0; | ||
1442 | } else { | ||
1443 | if (!ata_tag_valid(link->active_tag) && !link->sactive) | ||
1444 | return 0; | ||
1445 | } | ||
1446 | return 1; | ||
1447 | } | ||
1448 | |||
1449 | /** | ||
1450 | * ata_scsi_translate - Translate then issue SCSI command to ATA device | 1426 | * ata_scsi_translate - Translate then issue SCSI command to ATA device |
1451 | * @dev: ATA device to which the command is addressed | 1427 | * @dev: ATA device to which the command is addressed |
1452 | * @cmd: SCSI command to execute | 1428 | * @cmd: SCSI command to execute |
@@ -1477,14 +1453,12 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, | |||
1477 | void (*done)(struct scsi_cmnd *), | 1453 | void (*done)(struct scsi_cmnd *), |
1478 | ata_xlat_func_t xlat_func) | 1454 | ata_xlat_func_t xlat_func) |
1479 | { | 1455 | { |
1456 | struct ata_port *ap = dev->link->ap; | ||
1480 | struct ata_queued_cmd *qc; | 1457 | struct ata_queued_cmd *qc; |
1481 | int is_io = xlat_func == ata_scsi_rw_xlat; | 1458 | int rc; |
1482 | 1459 | ||
1483 | VPRINTK("ENTER\n"); | 1460 | VPRINTK("ENTER\n"); |
1484 | 1461 | ||
1485 | if (unlikely(ata_scmd_need_defer(dev, is_io))) | ||
1486 | goto defer; | ||
1487 | |||
1488 | qc = ata_scsi_qc_new(dev, cmd, done); | 1462 | qc = ata_scsi_qc_new(dev, cmd, done); |
1489 | if (!qc) | 1463 | if (!qc) |
1490 | goto err_mem; | 1464 | goto err_mem; |
@@ -1508,6 +1482,11 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, | |||
1508 | if (xlat_func(qc)) | 1482 | if (xlat_func(qc)) |
1509 | goto early_finish; | 1483 | goto early_finish; |
1510 | 1484 | ||
1485 | if (ap->ops->qc_defer) { | ||
1486 | if ((rc = ap->ops->qc_defer(qc))) | ||
1487 | goto defer; | ||
1488 | } | ||
1489 | |||
1511 | /* select device, send command to hardware */ | 1490 | /* select device, send command to hardware */ |
1512 | ata_qc_issue(qc); | 1491 | ata_qc_issue(qc); |
1513 | 1492 | ||
@@ -1529,8 +1508,12 @@ err_mem: | |||
1529 | return 0; | 1508 | return 0; |
1530 | 1509 | ||
1531 | defer: | 1510 | defer: |
1511 | ata_qc_free(qc); | ||
1532 | DPRINTK("EXIT - defer\n"); | 1512 | DPRINTK("EXIT - defer\n"); |
1533 | return SCSI_MLQUEUE_DEVICE_BUSY; | 1513 | if (rc == ATA_DEFER_LINK) |
1514 | return SCSI_MLQUEUE_DEVICE_BUSY; | ||
1515 | else | ||
1516 | return SCSI_MLQUEUE_HOST_BUSY; | ||
1534 | } | 1517 | } |
1535 | 1518 | ||
1536 | /** | 1519 | /** |
@@ -3034,6 +3017,13 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht) | |||
3034 | shost->max_channel = 1; | 3017 | shost->max_channel = 1; |
3035 | shost->max_cmd_len = 16; | 3018 | shost->max_cmd_len = 16; |
3036 | 3019 | ||
3020 | /* Schedule policy is determined by ->qc_defer() | ||
3021 | * callback and it needs to see every deferred qc. | ||
3022 | * Set host_blocked to 1 to prevent SCSI midlayer from | ||
3023 | * automatically deferring requests. | ||
3024 | */ | ||
3025 | shost->max_host_blocked = 1; | ||
3026 | |||
3037 | rc = scsi_add_host(ap->scsi_host, ap->host->dev); | 3027 | rc = scsi_add_host(ap->scsi_host, ap->host->dev); |
3038 | if (rc) | 3028 | if (rc) |
3039 | goto err_add; | 3029 | goto err_add; |
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index b860f99fc288..40557fe2ffdf 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c | |||
@@ -423,6 +423,7 @@ static const struct ata_port_operations nv_adma_ops = { | |||
423 | .bmdma_start = ata_bmdma_start, | 423 | .bmdma_start = ata_bmdma_start, |
424 | .bmdma_stop = ata_bmdma_stop, | 424 | .bmdma_stop = ata_bmdma_stop, |
425 | .bmdma_status = ata_bmdma_status, | 425 | .bmdma_status = ata_bmdma_status, |
426 | .qc_defer = ata_std_qc_defer, | ||
426 | .qc_prep = nv_adma_qc_prep, | 427 | .qc_prep = nv_adma_qc_prep, |
427 | .qc_issue = nv_adma_qc_issue, | 428 | .qc_issue = nv_adma_qc_issue, |
428 | .freeze = nv_adma_freeze, | 429 | .freeze = nv_adma_freeze, |
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index d9c010ab2280..9acfce43bde4 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c | |||
@@ -393,6 +393,7 @@ static const struct ata_port_operations sil24_ops = { | |||
393 | 393 | ||
394 | .tf_read = sil24_tf_read, | 394 | .tf_read = sil24_tf_read, |
395 | 395 | ||
396 | .qc_defer = ata_std_qc_defer, | ||
396 | .qc_prep = sil24_qc_prep, | 397 | .qc_prep = sil24_qc_prep, |
397 | .qc_issue = sil24_qc_issue, | 398 | .qc_issue = sil24_qc_issue, |
398 | 399 | ||
diff --git a/include/linux/libata.h b/include/linux/libata.h index c3820f105ffa..b0d4ca0d27b4 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -272,6 +272,10 @@ enum { | |||
272 | /* ering size */ | 272 | /* ering size */ |
273 | ATA_ERING_SIZE = 32, | 273 | ATA_ERING_SIZE = 32, |
274 | 274 | ||
275 | /* return values for ->qc_defer */ | ||
276 | ATA_DEFER_LINK = 1, | ||
277 | ATA_DEFER_PORT = 2, | ||
278 | |||
275 | /* desc_len for ata_eh_info and context */ | 279 | /* desc_len for ata_eh_info and context */ |
276 | ATA_EH_DESC_LEN = 80, | 280 | ATA_EH_DESC_LEN = 80, |
277 | 281 | ||
@@ -639,6 +643,7 @@ struct ata_port_operations { | |||
639 | 643 | ||
640 | void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int); | 644 | void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int); |
641 | 645 | ||
646 | int (*qc_defer) (struct ata_queued_cmd *qc); | ||
642 | void (*qc_prep) (struct ata_queued_cmd *qc); | 647 | void (*qc_prep) (struct ata_queued_cmd *qc); |
643 | unsigned int (*qc_issue) (struct ata_queued_cmd *qc); | 648 | unsigned int (*qc_issue) (struct ata_queued_cmd *qc); |
644 | 649 | ||
@@ -824,6 +829,7 @@ extern void ata_data_xfer(struct ata_device *adev, unsigned char *buf, | |||
824 | unsigned int buflen, int write_data); | 829 | unsigned int buflen, int write_data); |
825 | extern void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf, | 830 | extern void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf, |
826 | unsigned int buflen, int write_data); | 831 | unsigned int buflen, int write_data); |
832 | extern int ata_std_qc_defer(struct ata_queued_cmd *qc); | ||
827 | extern void ata_dumb_qc_prep(struct ata_queued_cmd *qc); | 833 | extern void ata_dumb_qc_prep(struct ata_queued_cmd *qc); |
828 | extern void ata_qc_prep(struct ata_queued_cmd *qc); | 834 | extern void ata_qc_prep(struct ata_queued_cmd *qc); |
829 | extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); | 835 | extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); |