aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/libata-scsi.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/ata/libata-scsi.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'drivers/ata/libata-scsi.c')
-rw-r--r--drivers/ata/libata-scsi.c342
1 files changed, 191 insertions, 151 deletions
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index a89172c100f5..927f968e99d9 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -51,8 +51,8 @@
51#include <asm/unaligned.h> 51#include <asm/unaligned.h>
52 52
53#include "libata.h" 53#include "libata.h"
54#include "libata-transport.h"
54 55
55#define SECTOR_SIZE 512
56#define ATA_SCSI_RBUF_SIZE 4096 56#define ATA_SCSI_RBUF_SIZE 4096
57 57
58static DEFINE_SPINLOCK(ata_scsi_rbuf_lock); 58static DEFINE_SPINLOCK(ata_scsi_rbuf_lock);
@@ -64,9 +64,6 @@ static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap,
64 const struct scsi_device *scsidev); 64 const struct scsi_device *scsidev);
65static struct ata_device *ata_scsi_find_dev(struct ata_port *ap, 65static struct ata_device *ata_scsi_find_dev(struct ata_port *ap,
66 const struct scsi_device *scsidev); 66 const struct scsi_device *scsidev);
67static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
68 unsigned int id, unsigned int lun);
69
70 67
71#define RW_RECOVERY_MPAGE 0x1 68#define RW_RECOVERY_MPAGE 0x1
72#define RW_RECOVERY_MPAGE_LEN 12 69#define RW_RECOVERY_MPAGE_LEN 12
@@ -106,83 +103,55 @@ static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = {
106 0, 30 /* extended self test time, see 05-359r1 */ 103 0, 30 /* extended self test time, see 05-359r1 */
107}; 104};
108 105
109/* 106static const char *ata_lpm_policy_names[] = {
110 * libata transport template. libata doesn't do real transport stuff. 107 [ATA_LPM_UNKNOWN] = "max_performance",
111 * It just needs the eh_timed_out hook. 108 [ATA_LPM_MAX_POWER] = "max_performance",
112 */ 109 [ATA_LPM_MED_POWER] = "medium_power",
113static struct scsi_transport_template ata_scsi_transport_template = { 110 [ATA_LPM_MIN_POWER] = "min_power",
114 .eh_strategy_handler = ata_scsi_error,
115 .eh_timed_out = ata_scsi_timed_out,
116 .user_scan = ata_scsi_user_scan,
117}; 111};
118 112
119 113static ssize_t ata_scsi_lpm_store(struct device *dev,
120static const struct { 114 struct device_attribute *attr,
121 enum link_pm value; 115 const char *buf, size_t count)
122 const char *name;
123} link_pm_policy[] = {
124 { NOT_AVAILABLE, "max_performance" },
125 { MIN_POWER, "min_power" },
126 { MAX_PERFORMANCE, "max_performance" },
127 { MEDIUM_POWER, "medium_power" },
128};
129
130static const char *ata_scsi_lpm_get(enum link_pm policy)
131{
132 int i;
133
134 for (i = 0; i < ARRAY_SIZE(link_pm_policy); i++)
135 if (link_pm_policy[i].value == policy)
136 return link_pm_policy[i].name;
137
138 return NULL;
139}
140
141static ssize_t ata_scsi_lpm_put(struct device *dev,
142 struct device_attribute *attr,
143 const char *buf, size_t count)
144{ 116{
145 struct Scsi_Host *shost = class_to_shost(dev); 117 struct Scsi_Host *shost = class_to_shost(dev);
146 struct ata_port *ap = ata_shost_to_port(shost); 118 struct ata_port *ap = ata_shost_to_port(shost);
147 enum link_pm policy = 0; 119 enum ata_lpm_policy policy;
148 int i; 120 unsigned long flags;
149 121
150 /* 122 /* UNKNOWN is internal state, iterate from MAX_POWER */
151 * we are skipping array location 0 on purpose - this 123 for (policy = ATA_LPM_MAX_POWER;
152 * is because a value of NOT_AVAILABLE is displayed 124 policy < ARRAY_SIZE(ata_lpm_policy_names); policy++) {
153 * to the user as max_performance, but when the user 125 const char *name = ata_lpm_policy_names[policy];
154 * writes "max_performance", they actually want the 126
155 * value to match MAX_PERFORMANCE. 127 if (strncmp(name, buf, strlen(name)) == 0)
156 */
157 for (i = 1; i < ARRAY_SIZE(link_pm_policy); i++) {
158 const int len = strlen(link_pm_policy[i].name);
159 if (strncmp(link_pm_policy[i].name, buf, len) == 0) {
160 policy = link_pm_policy[i].value;
161 break; 128 break;
162 }
163 } 129 }
164 if (!policy) 130 if (policy == ARRAY_SIZE(ata_lpm_policy_names))
165 return -EINVAL; 131 return -EINVAL;
166 132
167 ata_lpm_schedule(ap, policy); 133 spin_lock_irqsave(ap->lock, flags);
134 ap->target_lpm_policy = policy;
135 ata_port_schedule_eh(ap);
136 spin_unlock_irqrestore(ap->lock, flags);
137
168 return count; 138 return count;
169} 139}
170 140
171static ssize_t 141static ssize_t ata_scsi_lpm_show(struct device *dev,
172ata_scsi_lpm_show(struct device *dev, struct device_attribute *attr, char *buf) 142 struct device_attribute *attr, char *buf)
173{ 143{
174 struct Scsi_Host *shost = class_to_shost(dev); 144 struct Scsi_Host *shost = class_to_shost(dev);
175 struct ata_port *ap = ata_shost_to_port(shost); 145 struct ata_port *ap = ata_shost_to_port(shost);
176 const char *policy =
177 ata_scsi_lpm_get(ap->pm_policy);
178 146
179 if (!policy) 147 if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names))
180 return -EINVAL; 148 return -EINVAL;
181 149
182 return snprintf(buf, 23, "%s\n", policy); 150 return snprintf(buf, PAGE_SIZE, "%s\n",
151 ata_lpm_policy_names[ap->target_lpm_policy]);
183} 152}
184DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR, 153DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
185 ata_scsi_lpm_show, ata_scsi_lpm_put); 154 ata_scsi_lpm_show, ata_scsi_lpm_store);
186EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy); 155EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
187 156
188static ssize_t ata_scsi_park_show(struct device *device, 157static ssize_t ata_scsi_park_show(struct device *device,
@@ -377,12 +346,11 @@ struct device_attribute *ata_common_sdev_attrs[] = {
377}; 346};
378EXPORT_SYMBOL_GPL(ata_common_sdev_attrs); 347EXPORT_SYMBOL_GPL(ata_common_sdev_attrs);
379 348
380static void ata_scsi_invalid_field(struct scsi_cmnd *cmd, 349static void ata_scsi_invalid_field(struct scsi_cmnd *cmd)
381 void (*done)(struct scsi_cmnd *))
382{ 350{
383 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0); 351 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0);
384 /* "Invalid field in cbd" */ 352 /* "Invalid field in cbd" */
385 done(cmd); 353 cmd->scsi_done(cmd);
386} 354}
387 355
388/** 356/**
@@ -516,7 +484,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
516 memset(scsi_cmd, 0, sizeof(scsi_cmd)); 484 memset(scsi_cmd, 0, sizeof(scsi_cmd));
517 485
518 if (args[3]) { 486 if (args[3]) {
519 argsize = SECTOR_SIZE * args[3]; 487 argsize = ATA_SECT_SIZE * args[3];
520 argbuf = kmalloc(argsize, GFP_KERNEL); 488 argbuf = kmalloc(argsize, GFP_KERNEL);
521 if (argbuf == NULL) { 489 if (argbuf == NULL) {
522 rc = -ENOMEM; 490 rc = -ENOMEM;
@@ -750,7 +718,6 @@ EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
750 * ata_scsi_qc_new - acquire new ata_queued_cmd reference 718 * ata_scsi_qc_new - acquire new ata_queued_cmd reference
751 * @dev: ATA device to which the new command is attached 719 * @dev: ATA device to which the new command is attached
752 * @cmd: SCSI command that originated this ATA command 720 * @cmd: SCSI command that originated this ATA command
753 * @done: SCSI command completion function
754 * 721 *
755 * Obtain a reference to an unused ata_queued_cmd structure, 722 * Obtain a reference to an unused ata_queued_cmd structure,
756 * which is the basic libata structure representing a single 723 * which is the basic libata structure representing a single
@@ -767,21 +734,20 @@ EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
767 * Command allocated, or %NULL if none available. 734 * Command allocated, or %NULL if none available.
768 */ 735 */
769static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev, 736static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
770 struct scsi_cmnd *cmd, 737 struct scsi_cmnd *cmd)
771 void (*done)(struct scsi_cmnd *))
772{ 738{
773 struct ata_queued_cmd *qc; 739 struct ata_queued_cmd *qc;
774 740
775 qc = ata_qc_new_init(dev); 741 qc = ata_qc_new_init(dev);
776 if (qc) { 742 if (qc) {
777 qc->scsicmd = cmd; 743 qc->scsicmd = cmd;
778 qc->scsidone = done; 744 qc->scsidone = cmd->scsi_done;
779 745
780 qc->sg = scsi_sglist(cmd); 746 qc->sg = scsi_sglist(cmd);
781 qc->n_elem = scsi_sg_count(cmd); 747 qc->n_elem = scsi_sg_count(cmd);
782 } else { 748 } else {
783 cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1); 749 cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
784 done(cmd); 750 cmd->scsi_done(cmd);
785 } 751 }
786 752
787 return qc; 753 return qc;
@@ -1033,7 +999,7 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
1033 * @qc: Command that we are erroring out 999 * @qc: Command that we are erroring out
1034 * 1000 *
1035 * Generate sense block for a failed ATA command @qc. Descriptor 1001 * Generate sense block for a failed ATA command @qc. Descriptor
1036 * format is used to accomodate LBA48 block address. 1002 * format is used to accommodate LBA48 block address.
1037 * 1003 *
1038 * LOCKING: 1004 * LOCKING:
1039 * None. 1005 * None.
@@ -1123,21 +1089,21 @@ static int atapi_drain_needed(struct request *rq)
1123static int ata_scsi_dev_config(struct scsi_device *sdev, 1089static int ata_scsi_dev_config(struct scsi_device *sdev,
1124 struct ata_device *dev) 1090 struct ata_device *dev)
1125{ 1091{
1092 struct request_queue *q = sdev->request_queue;
1093
1126 if (!ata_id_has_unload(dev->id)) 1094 if (!ata_id_has_unload(dev->id))
1127 dev->flags |= ATA_DFLAG_NO_UNLOAD; 1095 dev->flags |= ATA_DFLAG_NO_UNLOAD;
1128 1096
1129 /* configure max sectors */ 1097 /* configure max sectors */
1130 blk_queue_max_hw_sectors(sdev->request_queue, dev->max_sectors); 1098 blk_queue_max_hw_sectors(q, dev->max_sectors);
1131 1099
1132 if (dev->class == ATA_DEV_ATAPI) { 1100 if (dev->class == ATA_DEV_ATAPI) {
1133 struct request_queue *q = sdev->request_queue;
1134 void *buf; 1101 void *buf;
1135 1102
1136 /* set the min alignment and padding */ 1103 sdev->sector_size = ATA_SECT_SIZE;
1137 blk_queue_update_dma_alignment(sdev->request_queue, 1104
1138 ATA_DMA_PAD_SZ - 1); 1105 /* set DMA padding */
1139 blk_queue_update_dma_pad(sdev->request_queue, 1106 blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1);
1140 ATA_DMA_PAD_SZ - 1);
1141 1107
1142 /* configure draining */ 1108 /* configure draining */
1143 buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); 1109 buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
@@ -1149,12 +1115,24 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
1149 1115
1150 blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN); 1116 blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
1151 } else { 1117 } else {
1152 /* ATA devices must be sector aligned */ 1118 sdev->sector_size = ata_id_logical_sector_size(dev->id);
1153 blk_queue_update_dma_alignment(sdev->request_queue,
1154 ATA_SECT_SIZE - 1);
1155 sdev->manage_start_stop = 1; 1119 sdev->manage_start_stop = 1;
1156 } 1120 }
1157 1121
1122 /*
1123 * ata_pio_sectors() expects buffer for each sector to not cross
1124 * page boundary. Enforce it by requiring buffers to be sector
1125 * aligned, which works iff sector_size is not larger than
1126 * PAGE_SIZE. ATAPI devices also need the alignment as
1127 * IDENTIFY_PACKET is executed as ATA_PROT_PIO.
1128 */
1129 if (sdev->sector_size > PAGE_SIZE)
1130 ata_dev_printk(dev, KERN_WARNING,
1131 "sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
1132 sdev->sector_size);
1133
1134 blk_queue_update_dma_alignment(q, sdev->sector_size - 1);
1135
1158 if (dev->flags & ATA_DFLAG_AN) 1136 if (dev->flags & ATA_DFLAG_AN)
1159 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); 1137 set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
1160 1138
@@ -1166,6 +1144,9 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
1166 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); 1144 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
1167 } 1145 }
1168 1146
1147 blk_queue_flush_queueable(q, false);
1148
1149 dev->sdev = sdev;
1169 return 0; 1150 return 0;
1170} 1151}
1171 1152
@@ -1696,7 +1677,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
1696 goto nothing_to_do; 1677 goto nothing_to_do;
1697 1678
1698 qc->flags |= ATA_QCFLAG_IO; 1679 qc->flags |= ATA_QCFLAG_IO;
1699 qc->nbytes = n_block * ATA_SECT_SIZE; 1680 qc->nbytes = n_block * scmd->device->sector_size;
1700 1681
1701 rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags, 1682 rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags,
1702 qc->tag); 1683 qc->tag);
@@ -1764,7 +1745,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1764 * ata_scsi_translate - Translate then issue SCSI command to ATA device 1745 * ata_scsi_translate - Translate then issue SCSI command to ATA device
1765 * @dev: ATA device to which the command is addressed 1746 * @dev: ATA device to which the command is addressed
1766 * @cmd: SCSI command to execute 1747 * @cmd: SCSI command to execute
1767 * @done: SCSI command completion function
1768 * @xlat_func: Actor which translates @cmd to an ATA taskfile 1748 * @xlat_func: Actor which translates @cmd to an ATA taskfile
1769 * 1749 *
1770 * Our ->queuecommand() function has decided that the SCSI 1750 * Our ->queuecommand() function has decided that the SCSI
@@ -1788,7 +1768,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1788 * needs to be deferred. 1768 * needs to be deferred.
1789 */ 1769 */
1790static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, 1770static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1791 void (*done)(struct scsi_cmnd *),
1792 ata_xlat_func_t xlat_func) 1771 ata_xlat_func_t xlat_func)
1793{ 1772{
1794 struct ata_port *ap = dev->link->ap; 1773 struct ata_port *ap = dev->link->ap;
@@ -1797,7 +1776,7 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1797 1776
1798 VPRINTK("ENTER\n"); 1777 VPRINTK("ENTER\n");
1799 1778
1800 qc = ata_scsi_qc_new(dev, cmd, done); 1779 qc = ata_scsi_qc_new(dev, cmd);
1801 if (!qc) 1780 if (!qc)
1802 goto err_mem; 1781 goto err_mem;
1803 1782
@@ -1833,14 +1812,14 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
1833 1812
1834early_finish: 1813early_finish:
1835 ata_qc_free(qc); 1814 ata_qc_free(qc);
1836 qc->scsidone(cmd); 1815 cmd->scsi_done(cmd);
1837 DPRINTK("EXIT - early finish (good or error)\n"); 1816 DPRINTK("EXIT - early finish (good or error)\n");
1838 return 0; 1817 return 0;
1839 1818
1840err_did: 1819err_did:
1841 ata_qc_free(qc); 1820 ata_qc_free(qc);
1842 cmd->result = (DID_ERROR << 16); 1821 cmd->result = (DID_ERROR << 16);
1843 qc->scsidone(cmd); 1822 cmd->scsi_done(cmd);
1844err_mem: 1823err_mem:
1845 DPRINTK("EXIT - internal\n"); 1824 DPRINTK("EXIT - internal\n");
1846 return 0; 1825 return 0;
@@ -2001,6 +1980,7 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
2001 0x89, /* page 0x89, ata info page */ 1980 0x89, /* page 0x89, ata info page */
2002 0xb0, /* page 0xb0, block limits page */ 1981 0xb0, /* page 0xb0, block limits page */
2003 0xb1, /* page 0xb1, block device characteristics page */ 1982 0xb1, /* page 0xb1, block device characteristics page */
1983 0xb2, /* page 0xb2, thin provisioning page */
2004 }; 1984 };
2005 1985
2006 rbuf[3] = sizeof(pages); /* number of supported VPD pages */ 1986 rbuf[3] = sizeof(pages); /* number of supported VPD pages */
@@ -2077,6 +2057,17 @@ static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
2077 ATA_ID_SERNO_LEN); 2057 ATA_ID_SERNO_LEN);
2078 num += ATA_ID_SERNO_LEN; 2058 num += ATA_ID_SERNO_LEN;
2079 2059
2060 if (ata_id_has_wwn(args->id)) {
2061 /* SAT defined lu world wide name */
2062 /* piv=0, assoc=lu, code_set=binary, designator=NAA */
2063 rbuf[num + 0] = 1;
2064 rbuf[num + 1] = 3;
2065 rbuf[num + 3] = ATA_ID_WWN_LEN;
2066 num += 4;
2067 ata_id_string(args->id, (unsigned char *) rbuf + num,
2068 ATA_ID_WWN, ATA_ID_WWN_LEN);
2069 num += ATA_ID_WWN_LEN;
2070 }
2080 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */ 2071 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */
2081 return 0; 2072 return 0;
2082} 2073}
@@ -2123,7 +2114,7 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
2123 2114
2124static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) 2115static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
2125{ 2116{
2126 u32 min_io_sectors; 2117 u16 min_io_sectors;
2127 2118
2128 rbuf[1] = 0xb0; 2119 rbuf[1] = 0xb0;
2129 rbuf[3] = 0x3c; /* required VPD size with unmap support */ 2120 rbuf[3] = 0x3c; /* required VPD size with unmap support */
@@ -2135,10 +2126,7 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
2135 * logical than physical sector size we need to figure out what the 2126 * logical than physical sector size we need to figure out what the
2136 * latter is. 2127 * latter is.
2137 */ 2128 */
2138 if (ata_id_has_large_logical_sectors(args->id)) 2129 min_io_sectors = 1 << ata_id_log2_per_physical_sector(args->id);
2139 min_io_sectors = ata_id_logical_per_physical_sectors(args->id);
2140 else
2141 min_io_sectors = 1;
2142 put_unaligned_be16(min_io_sectors, &rbuf[6]); 2130 put_unaligned_be16(min_io_sectors, &rbuf[6]);
2143 2131
2144 /* 2132 /*
@@ -2151,7 +2139,7 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
2151 * with the unmap bit set. 2139 * with the unmap bit set.
2152 */ 2140 */
2153 if (ata_id_has_trim(args->id)) { 2141 if (ata_id_has_trim(args->id)) {
2154 put_unaligned_be32(65535 * 512 / 8, &rbuf[20]); 2142 put_unaligned_be64(65535 * 512 / 8, &rbuf[36]);
2155 put_unaligned_be32(1, &rbuf[28]); 2143 put_unaligned_be32(1, &rbuf[28]);
2156 } 2144 }
2157 2145
@@ -2172,6 +2160,16 @@ static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf)
2172 return 0; 2160 return 0;
2173} 2161}
2174 2162
2163static unsigned int ata_scsiop_inq_b2(struct ata_scsi_args *args, u8 *rbuf)
2164{
2165 /* SCSI Thin Provisioning VPD page: SBC-3 rev 22 or later */
2166 rbuf[1] = 0xb2;
2167 rbuf[3] = 0x4;
2168 rbuf[5] = 1 << 6; /* TPWS */
2169
2170 return 0;
2171}
2172
2175/** 2173/**
2176 * ata_scsiop_noop - Command handler that simply returns success. 2174 * ata_scsiop_noop - Command handler that simply returns success.
2177 * @args: device IDENTIFY data / SCSI command of interest. 2175 * @args: device IDENTIFY data / SCSI command of interest.
@@ -2397,21 +2395,13 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
2397{ 2395{
2398 struct ata_device *dev = args->dev; 2396 struct ata_device *dev = args->dev;
2399 u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */ 2397 u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */
2400 u8 log_per_phys = 0; 2398 u32 sector_size; /* physical sector size in bytes */
2401 u16 lowest_aligned = 0; 2399 u8 log2_per_phys;
2402 u16 word_106 = dev->id[106]; 2400 u16 lowest_aligned;
2403 u16 word_209 = dev->id[209]; 2401
2404 2402 sector_size = ata_id_logical_sector_size(dev->id);
2405 if ((word_106 & 0xc000) == 0x4000) { 2403 log2_per_phys = ata_id_log2_per_physical_sector(dev->id);
2406 /* Number and offset of logical sectors per physical sector */ 2404 lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys);
2407 if (word_106 & (1 << 13))
2408 log_per_phys = word_106 & 0xf;
2409 if ((word_209 & 0xc000) == 0x4000) {
2410 u16 first = dev->id[209] & 0x3fff;
2411 if (first > 0)
2412 lowest_aligned = (1 << log_per_phys) - first;
2413 }
2414 }
2415 2405
2416 VPRINTK("ENTER\n"); 2406 VPRINTK("ENTER\n");
2417 2407
@@ -2426,8 +2416,10 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
2426 rbuf[3] = last_lba; 2416 rbuf[3] = last_lba;
2427 2417
2428 /* sector size */ 2418 /* sector size */
2429 rbuf[6] = ATA_SECT_SIZE >> 8; 2419 rbuf[4] = sector_size >> (8 * 3);
2430 rbuf[7] = ATA_SECT_SIZE & 0xff; 2420 rbuf[5] = sector_size >> (8 * 2);
2421 rbuf[6] = sector_size >> (8 * 1);
2422 rbuf[7] = sector_size;
2431 } else { 2423 } else {
2432 /* sector count, 64-bit */ 2424 /* sector count, 64-bit */
2433 rbuf[0] = last_lba >> (8 * 7); 2425 rbuf[0] = last_lba >> (8 * 7);
@@ -2440,11 +2432,13 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
2440 rbuf[7] = last_lba; 2432 rbuf[7] = last_lba;
2441 2433
2442 /* sector size */ 2434 /* sector size */
2443 rbuf[10] = ATA_SECT_SIZE >> 8; 2435 rbuf[ 8] = sector_size >> (8 * 3);
2444 rbuf[11] = ATA_SECT_SIZE & 0xff; 2436 rbuf[ 9] = sector_size >> (8 * 2);
2437 rbuf[10] = sector_size >> (8 * 1);
2438 rbuf[11] = sector_size;
2445 2439
2446 rbuf[12] = 0; 2440 rbuf[12] = 0;
2447 rbuf[13] = log_per_phys; 2441 rbuf[13] = log2_per_phys;
2448 rbuf[14] = (lowest_aligned >> 8) & 0x3f; 2442 rbuf[14] = (lowest_aligned >> 8) & 0x3f;
2449 rbuf[15] = lowest_aligned; 2443 rbuf[15] = lowest_aligned;
2450 2444
@@ -2577,8 +2571,11 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
2577 * 2571 *
2578 * If door lock fails, always clear sdev->locked to 2572 * If door lock fails, always clear sdev->locked to
2579 * avoid this infinite loop. 2573 * avoid this infinite loop.
2574 *
2575 * This may happen before SCSI scan is complete. Make
2576 * sure qc->dev->sdev isn't NULL before dereferencing.
2580 */ 2577 */
2581 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL) 2578 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev)
2582 qc->dev->sdev->locked = 0; 2579 qc->dev->sdev->locked = 0;
2583 2580
2584 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; 2581 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
@@ -2888,9 +2885,8 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
2888 tf->device = dev->devno ? 2885 tf->device = dev->devno ?
2889 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1; 2886 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1;
2890 2887
2891 /* READ/WRITE LONG use a non-standard sect_size */
2892 qc->sect_size = ATA_SECT_SIZE;
2893 switch (tf->command) { 2888 switch (tf->command) {
2889 /* READ/WRITE LONG use a non-standard sect_size */
2894 case ATA_CMD_READ_LONG: 2890 case ATA_CMD_READ_LONG:
2895 case ATA_CMD_READ_LONG_ONCE: 2891 case ATA_CMD_READ_LONG_ONCE:
2896 case ATA_CMD_WRITE_LONG: 2892 case ATA_CMD_WRITE_LONG:
@@ -2898,6 +2894,45 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
2898 if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) 2894 if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1)
2899 goto invalid_fld; 2895 goto invalid_fld;
2900 qc->sect_size = scsi_bufflen(scmd); 2896 qc->sect_size = scsi_bufflen(scmd);
2897 break;
2898
2899 /* commands using reported Logical Block size (e.g. 512 or 4K) */
2900 case ATA_CMD_CFA_WRITE_NE:
2901 case ATA_CMD_CFA_TRANS_SECT:
2902 case ATA_CMD_CFA_WRITE_MULT_NE:
2903 /* XXX: case ATA_CMD_CFA_WRITE_SECTORS_WITHOUT_ERASE: */
2904 case ATA_CMD_READ:
2905 case ATA_CMD_READ_EXT:
2906 case ATA_CMD_READ_QUEUED:
2907 /* XXX: case ATA_CMD_READ_QUEUED_EXT: */
2908 case ATA_CMD_FPDMA_READ:
2909 case ATA_CMD_READ_MULTI:
2910 case ATA_CMD_READ_MULTI_EXT:
2911 case ATA_CMD_PIO_READ:
2912 case ATA_CMD_PIO_READ_EXT:
2913 case ATA_CMD_READ_STREAM_DMA_EXT:
2914 case ATA_CMD_READ_STREAM_EXT:
2915 case ATA_CMD_VERIFY:
2916 case ATA_CMD_VERIFY_EXT:
2917 case ATA_CMD_WRITE:
2918 case ATA_CMD_WRITE_EXT:
2919 case ATA_CMD_WRITE_FUA_EXT:
2920 case ATA_CMD_WRITE_QUEUED:
2921 case ATA_CMD_WRITE_QUEUED_FUA_EXT:
2922 case ATA_CMD_FPDMA_WRITE:
2923 case ATA_CMD_WRITE_MULTI:
2924 case ATA_CMD_WRITE_MULTI_EXT:
2925 case ATA_CMD_WRITE_MULTI_FUA_EXT:
2926 case ATA_CMD_PIO_WRITE:
2927 case ATA_CMD_PIO_WRITE_EXT:
2928 case ATA_CMD_WRITE_STREAM_DMA_EXT:
2929 case ATA_CMD_WRITE_STREAM_EXT:
2930 qc->sect_size = scmd->device->sector_size;
2931 break;
2932
2933 /* Everything else uses 512 byte "sectors" */
2934 default:
2935 qc->sect_size = ATA_SECT_SIZE;
2901 } 2936 }
2902 2937
2903 /* 2938 /*
@@ -3100,7 +3135,6 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap,
3100} 3135}
3101 3136
3102static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, 3137static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
3103 void (*done)(struct scsi_cmnd *),
3104 struct ata_device *dev) 3138 struct ata_device *dev)
3105{ 3139{
3106 u8 scsi_op = scmd->cmnd[0]; 3140 u8 scsi_op = scmd->cmnd[0];
@@ -3134,9 +3168,9 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
3134 } 3168 }
3135 3169
3136 if (xlat_func) 3170 if (xlat_func)
3137 rc = ata_scsi_translate(dev, scmd, done, xlat_func); 3171 rc = ata_scsi_translate(dev, scmd, xlat_func);
3138 else 3172 else
3139 ata_scsi_simulate(dev, scmd, done); 3173 ata_scsi_simulate(dev, scmd);
3140 3174
3141 return rc; 3175 return rc;
3142 3176
@@ -3144,14 +3178,14 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
3144 DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n", 3178 DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n",
3145 scmd->cmd_len, scsi_op, dev->cdb_len); 3179 scmd->cmd_len, scsi_op, dev->cdb_len);
3146 scmd->result = DID_ERROR << 16; 3180 scmd->result = DID_ERROR << 16;
3147 done(scmd); 3181 scmd->scsi_done(scmd);
3148 return 0; 3182 return 0;
3149} 3183}
3150 3184
3151/** 3185/**
3152 * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device 3186 * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device
3187 * @shost: SCSI host of command to be sent
3153 * @cmd: SCSI command to be sent 3188 * @cmd: SCSI command to be sent
3154 * @done: Completion function, called when command is complete
3155 * 3189 *
3156 * In some cases, this function translates SCSI commands into 3190 * In some cases, this function translates SCSI commands into
3157 * ATA taskfiles, and queues the taskfiles to be sent to 3191 * ATA taskfiles, and queues the taskfiles to be sent to
@@ -3161,37 +3195,36 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
3161 * ATA and ATAPI devices appearing as SCSI devices. 3195 * ATA and ATAPI devices appearing as SCSI devices.
3162 * 3196 *
3163 * LOCKING: 3197 * LOCKING:
3164 * Releases scsi-layer-held lock, and obtains host lock. 3198 * ATA host lock
3165 * 3199 *
3166 * RETURNS: 3200 * RETURNS:
3167 * Return value from __ata_scsi_queuecmd() if @cmd can be queued, 3201 * Return value from __ata_scsi_queuecmd() if @cmd can be queued,
3168 * 0 otherwise. 3202 * 0 otherwise.
3169 */ 3203 */
3170int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 3204int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
3171{ 3205{
3172 struct ata_port *ap; 3206 struct ata_port *ap;
3173 struct ata_device *dev; 3207 struct ata_device *dev;
3174 struct scsi_device *scsidev = cmd->device; 3208 struct scsi_device *scsidev = cmd->device;
3175 struct Scsi_Host *shost = scsidev->host;
3176 int rc = 0; 3209 int rc = 0;
3210 unsigned long irq_flags;
3177 3211
3178 ap = ata_shost_to_port(shost); 3212 ap = ata_shost_to_port(shost);
3179 3213
3180 spin_unlock(shost->host_lock); 3214 spin_lock_irqsave(ap->lock, irq_flags);
3181 spin_lock(ap->lock);
3182 3215
3183 ata_scsi_dump_cdb(ap, cmd); 3216 ata_scsi_dump_cdb(ap, cmd);
3184 3217
3185 dev = ata_scsi_find_dev(ap, scsidev); 3218 dev = ata_scsi_find_dev(ap, scsidev);
3186 if (likely(dev)) 3219 if (likely(dev))
3187 rc = __ata_scsi_queuecmd(cmd, done, dev); 3220 rc = __ata_scsi_queuecmd(cmd, dev);
3188 else { 3221 else {
3189 cmd->result = (DID_BAD_TARGET << 16); 3222 cmd->result = (DID_BAD_TARGET << 16);
3190 done(cmd); 3223 cmd->scsi_done(cmd);
3191 } 3224 }
3192 3225
3193 spin_unlock(ap->lock); 3226 spin_unlock_irqrestore(ap->lock, irq_flags);
3194 spin_lock(shost->host_lock); 3227
3195 return rc; 3228 return rc;
3196} 3229}
3197 3230
@@ -3199,7 +3232,6 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
3199 * ata_scsi_simulate - simulate SCSI command on ATA device 3232 * ata_scsi_simulate - simulate SCSI command on ATA device
3200 * @dev: the target device 3233 * @dev: the target device
3201 * @cmd: SCSI command being sent to device. 3234 * @cmd: SCSI command being sent to device.
3202 * @done: SCSI command completion function.
3203 * 3235 *
3204 * Interprets and directly executes a select list of SCSI commands 3236 * Interprets and directly executes a select list of SCSI commands
3205 * that can be handled internally. 3237 * that can be handled internally.
@@ -3208,8 +3240,7 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
3208 * spin_lock_irqsave(host lock) 3240 * spin_lock_irqsave(host lock)
3209 */ 3241 */
3210 3242
3211void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, 3243void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
3212 void (*done)(struct scsi_cmnd *))
3213{ 3244{
3214 struct ata_scsi_args args; 3245 struct ata_scsi_args args;
3215 const u8 *scsicmd = cmd->cmnd; 3246 const u8 *scsicmd = cmd->cmnd;
@@ -3218,17 +3249,17 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
3218 args.dev = dev; 3249 args.dev = dev;
3219 args.id = dev->id; 3250 args.id = dev->id;
3220 args.cmd = cmd; 3251 args.cmd = cmd;
3221 args.done = done; 3252 args.done = cmd->scsi_done;
3222 3253
3223 switch(scsicmd[0]) { 3254 switch(scsicmd[0]) {
3224 /* TODO: worth improving? */ 3255 /* TODO: worth improving? */
3225 case FORMAT_UNIT: 3256 case FORMAT_UNIT:
3226 ata_scsi_invalid_field(cmd, done); 3257 ata_scsi_invalid_field(cmd);
3227 break; 3258 break;
3228 3259
3229 case INQUIRY: 3260 case INQUIRY:
3230 if (scsicmd[1] & 2) /* is CmdDt set? */ 3261 if (scsicmd[1] & 2) /* is CmdDt set? */
3231 ata_scsi_invalid_field(cmd, done); 3262 ata_scsi_invalid_field(cmd);
3232 else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ 3263 else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */
3233 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); 3264 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std);
3234 else switch (scsicmd[2]) { 3265 else switch (scsicmd[2]) {
@@ -3250,8 +3281,11 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
3250 case 0xb1: 3281 case 0xb1:
3251 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1); 3282 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1);
3252 break; 3283 break;
3284 case 0xb2:
3285 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2);
3286 break;
3253 default: 3287 default:
3254 ata_scsi_invalid_field(cmd, done); 3288 ata_scsi_invalid_field(cmd);
3255 break; 3289 break;
3256 } 3290 }
3257 break; 3291 break;
@@ -3263,7 +3297,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
3263 3297
3264 case MODE_SELECT: /* unconditionally return */ 3298 case MODE_SELECT: /* unconditionally return */
3265 case MODE_SELECT_10: /* bad-field-in-cdb */ 3299 case MODE_SELECT_10: /* bad-field-in-cdb */
3266 ata_scsi_invalid_field(cmd, done); 3300 ata_scsi_invalid_field(cmd);
3267 break; 3301 break;
3268 3302
3269 case READ_CAPACITY: 3303 case READ_CAPACITY:
@@ -3274,7 +3308,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
3274 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) 3308 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16)
3275 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 3309 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap);
3276 else 3310 else
3277 ata_scsi_invalid_field(cmd, done); 3311 ata_scsi_invalid_field(cmd);
3278 break; 3312 break;
3279 3313
3280 case REPORT_LUNS: 3314 case REPORT_LUNS:
@@ -3284,7 +3318,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
3284 case REQUEST_SENSE: 3318 case REQUEST_SENSE:
3285 ata_scsi_set_sense(cmd, 0, 0, 0); 3319 ata_scsi_set_sense(cmd, 0, 0, 0);
3286 cmd->result = (DRIVER_SENSE << 24); 3320 cmd->result = (DRIVER_SENSE << 24);
3287 done(cmd); 3321 cmd->scsi_done(cmd);
3288 break; 3322 break;
3289 3323
3290 /* if we reach this, then writeback caching is disabled, 3324 /* if we reach this, then writeback caching is disabled,
@@ -3306,14 +3340,14 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
3306 if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4])) 3340 if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4]))
3307 ata_scsi_rbuf_fill(&args, ata_scsiop_noop); 3341 ata_scsi_rbuf_fill(&args, ata_scsiop_noop);
3308 else 3342 else
3309 ata_scsi_invalid_field(cmd, done); 3343 ata_scsi_invalid_field(cmd);
3310 break; 3344 break;
3311 3345
3312 /* all other commands */ 3346 /* all other commands */
3313 default: 3347 default:
3314 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0); 3348 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0);
3315 /* "Invalid command operation code" */ 3349 /* "Invalid command operation code" */
3316 done(cmd); 3350 cmd->scsi_done(cmd);
3317 break; 3351 break;
3318 } 3352 }
3319} 3353}
@@ -3334,7 +3368,7 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
3334 *(struct ata_port **)&shost->hostdata[0] = ap; 3368 *(struct ata_port **)&shost->hostdata[0] = ap;
3335 ap->scsi_host = shost; 3369 ap->scsi_host = shost;
3336 3370
3337 shost->transportt = &ata_scsi_transport_template; 3371 shost->transportt = ata_scsi_transport_template;
3338 shost->unique_id = ap->print_id; 3372 shost->unique_id = ap->print_id;
3339 shost->max_id = 16; 3373 shost->max_id = 16;
3340 shost->max_lun = 1; 3374 shost->max_lun = 1;
@@ -3393,6 +3427,8 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
3393 if (!IS_ERR(sdev)) { 3427 if (!IS_ERR(sdev)) {
3394 dev->sdev = sdev; 3428 dev->sdev = sdev;
3395 scsi_device_put(sdev); 3429 scsi_device_put(sdev);
3430 } else {
3431 dev->sdev = NULL;
3396 } 3432 }
3397 } 3433 }
3398 } 3434 }
@@ -3616,8 +3652,8 @@ void ata_scsi_hotplug(struct work_struct *work)
3616 * RETURNS: 3652 * RETURNS:
3617 * Zero. 3653 * Zero.
3618 */ 3654 */
3619static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, 3655int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
3620 unsigned int id, unsigned int lun) 3656 unsigned int id, unsigned int lun)
3621{ 3657{
3622 struct ata_port *ap = ata_shost_to_port(shost); 3658 struct ata_port *ap = ata_shost_to_port(shost);
3623 unsigned long flags; 3659 unsigned long flags;
@@ -3735,7 +3771,7 @@ struct ata_port *ata_sas_port_alloc(struct ata_host *host,
3735 return NULL; 3771 return NULL;
3736 3772
3737 ap->port_no = 0; 3773 ap->port_no = 0;
3738 ap->lock = shost->host_lock; 3774 ap->lock = &host->lock;
3739 ap->pio_mask = port_info->pio_mask; 3775 ap->pio_mask = port_info->pio_mask;
3740 ap->mwdma_mask = port_info->mwdma_mask; 3776 ap->mwdma_mask = port_info->mwdma_mask;
3741 ap->udma_mask = port_info->udma_mask; 3777 ap->udma_mask = port_info->udma_mask;
@@ -3761,6 +3797,12 @@ EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
3761 */ 3797 */
3762int ata_sas_port_start(struct ata_port *ap) 3798int ata_sas_port_start(struct ata_port *ap)
3763{ 3799{
3800 /*
3801 * the port is marked as frozen at allocation time, but if we don't
3802 * have new eh, we won't thaw it
3803 */
3804 if (!ap->ops->error_handler)
3805 ap->pflags &= ~ATA_PFLAG_FROZEN;
3764 return 0; 3806 return 0;
3765} 3807}
3766EXPORT_SYMBOL_GPL(ata_sas_port_start); 3808EXPORT_SYMBOL_GPL(ata_sas_port_start);
@@ -3797,7 +3839,7 @@ int ata_sas_port_init(struct ata_port *ap)
3797 3839
3798 if (!rc) { 3840 if (!rc) {
3799 ap->print_id = ata_print_id++; 3841 ap->print_id = ata_print_id++;
3800 rc = ata_bus_probe(ap); 3842 rc = ata_port_probe(ap);
3801 } 3843 }
3802 3844
3803 return rc; 3845 return rc;
@@ -3838,7 +3880,6 @@ EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
3838/** 3880/**
3839 * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device 3881 * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
3840 * @cmd: SCSI command to be sent 3882 * @cmd: SCSI command to be sent
3841 * @done: Completion function, called when command is complete
3842 * @ap: ATA port to which the command is being sent 3883 * @ap: ATA port to which the command is being sent
3843 * 3884 *
3844 * RETURNS: 3885 * RETURNS:
@@ -3846,18 +3887,17 @@ EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
3846 * 0 otherwise. 3887 * 0 otherwise.
3847 */ 3888 */
3848 3889
3849int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), 3890int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap)
3850 struct ata_port *ap)
3851{ 3891{
3852 int rc = 0; 3892 int rc = 0;
3853 3893
3854 ata_scsi_dump_cdb(ap, cmd); 3894 ata_scsi_dump_cdb(ap, cmd);
3855 3895
3856 if (likely(ata_dev_enabled(ap->link.device))) 3896 if (likely(ata_dev_enabled(ap->link.device)))
3857 rc = __ata_scsi_queuecmd(cmd, done, ap->link.device); 3897 rc = __ata_scsi_queuecmd(cmd, ap->link.device);
3858 else { 3898 else {
3859 cmd->result = (DID_BAD_TARGET << 16); 3899 cmd->result = (DID_BAD_TARGET << 16);
3860 done(cmd); 3900 cmd->scsi_done(cmd);
3861 } 3901 }
3862 return rc; 3902 return rc;
3863} 3903}