diff options
author | Tejun Heo <htejun@gmail.com> | 2006-01-22 23:09:36 -0500 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2006-01-26 22:33:49 -0500 |
commit | 77853bf2b48e34449e826a9ef4df5ea0dbe947f4 (patch) | |
tree | b46a186c141c61f05352b7a1199b2940fd9a2065 /drivers/scsi/libata-core.c | |
parent | 4ba946e9d8e10fada7bbce527f6ea05842592e06 (diff) |
[PATCH] libata: make the owner of a qc responsible for freeing it
qc used to be freed automatically on command completion. However, as
a qc can carry information about its completion status, it can be
useful to its owner/issuer after command completion. This patch makes
freeing qc responsibility of its owner. This simplifies
ata_exec_internal() and makes command turn-around for atapi request
sensing less hackish.
This change was originally suggested by Jeff Garzik.
Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
Diffstat (limited to 'drivers/scsi/libata-core.c')
-rw-r--r-- | drivers/scsi/libata-core.c | 47 |
1 files changed, 13 insertions, 34 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 0e4932362949..15df633521d0 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
@@ -1072,24 +1072,12 @@ static unsigned int ata_pio_modes(const struct ata_device *adev) | |||
1072 | timing API will get this right anyway */ | 1072 | timing API will get this right anyway */ |
1073 | } | 1073 | } |
1074 | 1074 | ||
1075 | struct ata_exec_internal_arg { | 1075 | void ata_qc_complete_internal(struct ata_queued_cmd *qc) |
1076 | unsigned int err_mask; | ||
1077 | struct ata_taskfile *tf; | ||
1078 | struct completion *waiting; | ||
1079 | }; | ||
1080 | |||
1081 | int ata_qc_complete_internal(struct ata_queued_cmd *qc) | ||
1082 | { | 1076 | { |
1083 | struct ata_exec_internal_arg *arg = qc->private_data; | 1077 | struct completion *waiting = qc->private_data; |
1084 | struct completion *waiting = arg->waiting; | ||
1085 | 1078 | ||
1086 | if (!(qc->err_mask & ~AC_ERR_DEV)) | 1079 | qc->ap->ops->tf_read(qc->ap, &qc->tf); |
1087 | qc->ap->ops->tf_read(qc->ap, arg->tf); | ||
1088 | arg->err_mask = qc->err_mask; | ||
1089 | arg->waiting = NULL; | ||
1090 | complete(waiting); | 1080 | complete(waiting); |
1091 | |||
1092 | return 0; | ||
1093 | } | 1081 | } |
1094 | 1082 | ||
1095 | /** | 1083 | /** |
@@ -1120,7 +1108,7 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev, | |||
1120 | struct ata_queued_cmd *qc; | 1108 | struct ata_queued_cmd *qc; |
1121 | DECLARE_COMPLETION(wait); | 1109 | DECLARE_COMPLETION(wait); |
1122 | unsigned long flags; | 1110 | unsigned long flags; |
1123 | struct ata_exec_internal_arg arg; | 1111 | unsigned int err_mask; |
1124 | 1112 | ||
1125 | spin_lock_irqsave(&ap->host_set->lock, flags); | 1113 | spin_lock_irqsave(&ap->host_set->lock, flags); |
1126 | 1114 | ||
@@ -1134,9 +1122,7 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev, | |||
1134 | qc->nsect = buflen / ATA_SECT_SIZE; | 1122 | qc->nsect = buflen / ATA_SECT_SIZE; |
1135 | } | 1123 | } |
1136 | 1124 | ||
1137 | arg.waiting = &wait; | 1125 | qc->private_data = &wait; |
1138 | arg.tf = tf; | ||
1139 | qc->private_data = &arg; | ||
1140 | qc->complete_fn = ata_qc_complete_internal; | 1126 | qc->complete_fn = ata_qc_complete_internal; |
1141 | 1127 | ||
1142 | if (ata_qc_issue(qc)) | 1128 | if (ata_qc_issue(qc)) |
@@ -1153,7 +1139,7 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev, | |||
1153 | * before the caller cleans up, it will result in a | 1139 | * before the caller cleans up, it will result in a |
1154 | * spurious interrupt. We can live with that. | 1140 | * spurious interrupt. We can live with that. |
1155 | */ | 1141 | */ |
1156 | if (arg.waiting) { | 1142 | if (qc->flags & ATA_QCFLAG_ACTIVE) { |
1157 | qc->err_mask = AC_ERR_OTHER; | 1143 | qc->err_mask = AC_ERR_OTHER; |
1158 | ata_qc_complete(qc); | 1144 | ata_qc_complete(qc); |
1159 | printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n", | 1145 | printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n", |
@@ -1163,7 +1149,12 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev, | |||
1163 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 1149 | spin_unlock_irqrestore(&ap->host_set->lock, flags); |
1164 | } | 1150 | } |
1165 | 1151 | ||
1166 | return arg.err_mask; | 1152 | *tf = qc->tf; |
1153 | err_mask = qc->err_mask; | ||
1154 | |||
1155 | ata_qc_free(qc); | ||
1156 | |||
1157 | return err_mask; | ||
1167 | 1158 | ||
1168 | issue_fail: | 1159 | issue_fail: |
1169 | ata_qc_free(qc); | 1160 | ata_qc_free(qc); |
@@ -3633,8 +3624,6 @@ void ata_qc_free(struct ata_queued_cmd *qc) | |||
3633 | 3624 | ||
3634 | void ata_qc_complete(struct ata_queued_cmd *qc) | 3625 | void ata_qc_complete(struct ata_queued_cmd *qc) |
3635 | { | 3626 | { |
3636 | int rc; | ||
3637 | |||
3638 | assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */ | 3627 | assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */ |
3639 | assert(qc->flags & ATA_QCFLAG_ACTIVE); | 3628 | assert(qc->flags & ATA_QCFLAG_ACTIVE); |
3640 | 3629 | ||
@@ -3648,17 +3637,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc) | |||
3648 | qc->flags &= ~ATA_QCFLAG_ACTIVE; | 3637 | qc->flags &= ~ATA_QCFLAG_ACTIVE; |
3649 | 3638 | ||
3650 | /* call completion callback */ | 3639 | /* call completion callback */ |
3651 | rc = qc->complete_fn(qc); | 3640 | qc->complete_fn(qc); |
3652 | |||
3653 | /* if callback indicates not to complete command (non-zero), | ||
3654 | * return immediately | ||
3655 | */ | ||
3656 | if (rc != 0) | ||
3657 | return; | ||
3658 | |||
3659 | ata_qc_free(qc); | ||
3660 | |||
3661 | VPRINTK("EXIT\n"); | ||
3662 | } | 3641 | } |
3663 | 3642 | ||
3664 | static inline int ata_should_dma_map(struct ata_queued_cmd *qc) | 3643 | static inline int ata_should_dma_map(struct ata_queued_cmd *qc) |