summaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-11-05 04:36:28 -0500
committerChristoph Hellwig <hch@lst.de>2014-11-27 10:40:24 -0500
commit79855d178557cc3e3ffd179fd26a64cef48dfb30 (patch)
tree316621212e058975d86cef255e9ec86f70d0deb0 /drivers/scsi
parent309e7cc433e79ba0124e7e359503e66c41b46509 (diff)
libsas: remove task_collector mode
The task_collector mode (or "latency_injector", (C) Dan Willians) is an optional I/O path in libsas that queues up scsi commands instead of directly sending it to the hardware. It generall increases latencies to in the optiomal case slightly reduce mmio traffic to the hardware. Only the obsolete aic94xx driver and the mvsas driver allowed to use it without recompiling the kernel, and most drivers didn't support it at all. Remove the giant blob of code to allow better optimizations for scsi-mq in the future. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.de> Acked-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/aic94xx/aic94xx.h2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c11
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c13
-rw-r--r--drivers/scsi/isci/init.c2
-rw-r--r--drivers/scsi/isci/task.c147
-rw-r--r--drivers/scsi/isci/task.h1
-rw-r--r--drivers/scsi/libsas/sas_ata.c9
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/libsas/sas_init.c21
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c176
-rw-r--r--drivers/scsi/mvsas/mv_init.c22
-rw-r--r--drivers/scsi/mvsas/mv_sas.c109
-rw-r--r--drivers/scsi/mvsas/mv_sas.h10
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c22
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h3
18 files changed, 92 insertions, 465 deletions
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
index 66cda669b417..26d4ad9ede2e 100644
--- a/drivers/scsi/aic94xx/aic94xx.h
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -78,7 +78,7 @@ void asd_dev_gone(struct domain_device *dev);
78 78
79void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id); 79void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id);
80 80
81int asd_execute_task(struct sas_task *, int num, gfp_t gfp_flags); 81int asd_execute_task(struct sas_task *task, gfp_t gfp_flags);
82 82
83void asd_set_dmamode(struct domain_device *dev); 83void asd_set_dmamode(struct domain_device *dev);
84 84
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 4df867e07b20..9f636a34d595 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -1200,8 +1200,7 @@ static void asd_start_scb_timers(struct list_head *list)
1200 * Case A: we can send the whole batch at once. Increment "pending" 1200 * Case A: we can send the whole batch at once. Increment "pending"
1201 * in the beginning of this function, when it is checked, in order to 1201 * in the beginning of this function, when it is checked, in order to
1202 * eliminate races when this function is called by multiple processes. 1202 * eliminate races when this function is called by multiple processes.
1203 * Case B: should never happen if the managing layer considers 1203 * Case B: should never happen.
1204 * lldd_queue_size.
1205 */ 1204 */
1206int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb, 1205int asd_post_ascb_list(struct asd_ha_struct *asd_ha, struct asd_ascb *ascb,
1207 int num) 1206 int num)
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index a64cf932d03d..14fc018436c2 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -49,14 +49,6 @@ MODULE_PARM_DESC(use_msi, "\n"
49 "\tEnable(1) or disable(0) using PCI MSI.\n" 49 "\tEnable(1) or disable(0) using PCI MSI.\n"
50 "\tDefault: 0"); 50 "\tDefault: 0");
51 51
52static int lldd_max_execute_num = 0;
53module_param_named(collector, lldd_max_execute_num, int, S_IRUGO);
54MODULE_PARM_DESC(collector, "\n"
55 "\tIf greater than one, tells the SAS Layer to run in Task Collector\n"
56 "\tMode. If 1 or 0, tells the SAS Layer to run in Direct Mode.\n"
57 "\tThe aic94xx SAS LLDD supports both modes.\n"
58 "\tDefault: 0 (Direct Mode).\n");
59
60static struct scsi_transport_template *aic94xx_transport_template; 52static struct scsi_transport_template *aic94xx_transport_template;
61static int asd_scan_finished(struct Scsi_Host *, unsigned long); 53static int asd_scan_finished(struct Scsi_Host *, unsigned long);
62static void asd_scan_start(struct Scsi_Host *); 54static void asd_scan_start(struct Scsi_Host *);
@@ -711,9 +703,6 @@ static int asd_register_sas_ha(struct asd_ha_struct *asd_ha)
711 asd_ha->sas_ha.sas_port= sas_ports; 703 asd_ha->sas_ha.sas_port= sas_ports;
712 asd_ha->sas_ha.num_phys= ASD_MAX_PHYS; 704 asd_ha->sas_ha.num_phys= ASD_MAX_PHYS;
713 705
714 asd_ha->sas_ha.lldd_queue_size = asd_ha->seq.can_queue;
715 asd_ha->sas_ha.lldd_max_execute_num = lldd_max_execute_num;
716
717 return sas_register_ha(&asd_ha->sas_ha); 706 return sas_register_ha(&asd_ha->sas_ha);
718} 707}
719 708
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 59b86e260ce9..5ff1ce7ba1f4 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -543,8 +543,7 @@ static int asd_can_queue(struct asd_ha_struct *asd_ha, int num)
543 return res; 543 return res;
544} 544}
545 545
546int asd_execute_task(struct sas_task *task, const int num, 546int asd_execute_task(struct sas_task *task, gfp_t gfp_flags)
547 gfp_t gfp_flags)
548{ 547{
549 int res = 0; 548 int res = 0;
550 LIST_HEAD(alist); 549 LIST_HEAD(alist);
@@ -553,11 +552,11 @@ int asd_execute_task(struct sas_task *task, const int num,
553 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; 552 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
554 unsigned long flags; 553 unsigned long flags;
555 554
556 res = asd_can_queue(asd_ha, num); 555 res = asd_can_queue(asd_ha, 1);
557 if (res) 556 if (res)
558 return res; 557 return res;
559 558
560 res = num; 559 res = 1;
561 ascb = asd_ascb_alloc_list(asd_ha, &res, gfp_flags); 560 ascb = asd_ascb_alloc_list(asd_ha, &res, gfp_flags);
562 if (res) { 561 if (res) {
563 res = -ENOMEM; 562 res = -ENOMEM;
@@ -568,7 +567,7 @@ int asd_execute_task(struct sas_task *task, const int num,
568 list_for_each_entry(a, &alist, list) { 567 list_for_each_entry(a, &alist, list) {
569 a->uldd_task = t; 568 a->uldd_task = t;
570 t->lldd_task = a; 569 t->lldd_task = a;
571 t = list_entry(t->list.next, struct sas_task, list); 570 break;
572 } 571 }
573 list_for_each_entry(a, &alist, list) { 572 list_for_each_entry(a, &alist, list) {
574 t = a->uldd_task; 573 t = a->uldd_task;
@@ -601,7 +600,7 @@ int asd_execute_task(struct sas_task *task, const int num,
601 } 600 }
602 list_del_init(&alist); 601 list_del_init(&alist);
603 602
604 res = asd_post_ascb_list(asd_ha, ascb, num); 603 res = asd_post_ascb_list(asd_ha, ascb, 1);
605 if (unlikely(res)) { 604 if (unlikely(res)) {
606 a = NULL; 605 a = NULL;
607 __list_add(&alist, ascb->list.prev, &ascb->list); 606 __list_add(&alist, ascb->list.prev, &ascb->list);
@@ -639,6 +638,6 @@ out_err_unmap:
639out_err: 638out_err:
640 if (ascb) 639 if (ascb)
641 asd_ascb_free_list(ascb); 640 asd_ascb_free_list(ascb);
642 asd_can_dequeue(asd_ha, num); 641 asd_can_dequeue(asd_ha, 1);
643 return res; 642 return res;
644} 643}
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index a81e546595dd..724c6265b667 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -260,8 +260,6 @@ static int isci_register_sas_ha(struct isci_host *isci_host)
260 sas_ha->sas_port = sas_ports; 260 sas_ha->sas_port = sas_ports;
261 sas_ha->num_phys = SCI_MAX_PHYS; 261 sas_ha->num_phys = SCI_MAX_PHYS;
262 262
263 sas_ha->lldd_queue_size = ISCI_CAN_QUEUE_VAL;
264 sas_ha->lldd_max_execute_num = 1;
265 sas_ha->strict_wide_ports = 1; 263 sas_ha->strict_wide_ports = 1;
266 264
267 sas_register_ha(sas_ha); 265 sas_register_ha(sas_ha);
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 5d6fda72d659..3f63c6318b0d 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -117,104 +117,97 @@ static inline int isci_device_io_ready(struct isci_remote_device *idev,
117 * functions. This function is called by libsas to send a task down to 117 * functions. This function is called by libsas to send a task down to
118 * hardware. 118 * hardware.
119 * @task: This parameter specifies the SAS task to send. 119 * @task: This parameter specifies the SAS task to send.
120 * @num: This parameter specifies the number of tasks to queue.
121 * @gfp_flags: This parameter specifies the context of this call. 120 * @gfp_flags: This parameter specifies the context of this call.
122 * 121 *
123 * status, zero indicates success. 122 * status, zero indicates success.
124 */ 123 */
125int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags) 124int isci_task_execute_task(struct sas_task *task, gfp_t gfp_flags)
126{ 125{
127 struct isci_host *ihost = dev_to_ihost(task->dev); 126 struct isci_host *ihost = dev_to_ihost(task->dev);
128 struct isci_remote_device *idev; 127 struct isci_remote_device *idev;
129 unsigned long flags; 128 unsigned long flags;
129 enum sci_status status = SCI_FAILURE;
130 bool io_ready; 130 bool io_ready;
131 u16 tag; 131 u16 tag;
132 132
133 dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num); 133 spin_lock_irqsave(&ihost->scic_lock, flags);
134 idev = isci_lookup_device(task->dev);
135 io_ready = isci_device_io_ready(idev, task);
136 tag = isci_alloc_tag(ihost);
137 spin_unlock_irqrestore(&ihost->scic_lock, flags);
134 138
135 for_each_sas_task(num, task) { 139 dev_dbg(&ihost->pdev->dev,
136 enum sci_status status = SCI_FAILURE; 140 "task: %p, dev: %p idev: %p:%#lx cmd = %p\n",
141 task, task->dev, idev, idev ? idev->flags : 0,
142 task->uldd_task);
137 143
138 spin_lock_irqsave(&ihost->scic_lock, flags); 144 if (!idev) {
139 idev = isci_lookup_device(task->dev); 145 isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
140 io_ready = isci_device_io_ready(idev, task); 146 SAS_DEVICE_UNKNOWN);
141 tag = isci_alloc_tag(ihost); 147 } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
142 spin_unlock_irqrestore(&ihost->scic_lock, flags); 148 /* Indicate QUEUE_FULL so that the scsi midlayer
149 * retries.
150 */
151 isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
152 SAS_QUEUE_FULL);
153 } else {
154 /* There is a device and it's ready for I/O. */
155 spin_lock_irqsave(&task->task_state_lock, flags);
143 156
144 dev_dbg(&ihost->pdev->dev, 157 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
145 "task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n", 158 /* The I/O was aborted. */
146 task, num, task->dev, idev, idev ? idev->flags : 0, 159 spin_unlock_irqrestore(&task->task_state_lock, flags);
147 task->uldd_task); 160
148 161 isci_task_refuse(ihost, task,
149 if (!idev) { 162 SAS_TASK_UNDELIVERED,
150 isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED, 163 SAM_STAT_TASK_ABORTED);
151 SAS_DEVICE_UNKNOWN);
152 } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
153 /* Indicate QUEUE_FULL so that the scsi midlayer
154 * retries.
155 */
156 isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
157 SAS_QUEUE_FULL);
158 } else { 164 } else {
159 /* There is a device and it's ready for I/O. */ 165 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
160 spin_lock_irqsave(&task->task_state_lock, flags); 166 spin_unlock_irqrestore(&task->task_state_lock, flags);
161 167
162 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { 168 /* build and send the request. */
163 /* The I/O was aborted. */ 169 status = isci_request_execute(ihost, idev, task, tag);
164 spin_unlock_irqrestore(&task->task_state_lock, 170
165 flags); 171 if (status != SCI_SUCCESS) {
166 172 spin_lock_irqsave(&task->task_state_lock, flags);
167 isci_task_refuse(ihost, task, 173 /* Did not really start this command. */
168 SAS_TASK_UNDELIVERED, 174 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
169 SAM_STAT_TASK_ABORTED);
170 } else {
171 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
172 spin_unlock_irqrestore(&task->task_state_lock, flags); 175 spin_unlock_irqrestore(&task->task_state_lock, flags);
173 176
174 /* build and send the request. */ 177 if (test_bit(IDEV_GONE, &idev->flags)) {
175 status = isci_request_execute(ihost, idev, task, tag); 178 /* Indicate that the device
176 179 * is gone.
177 if (status != SCI_SUCCESS) { 180 */
178 181 isci_task_refuse(ihost, task,
179 spin_lock_irqsave(&task->task_state_lock, flags); 182 SAS_TASK_UNDELIVERED,
180 /* Did not really start this command. */ 183 SAS_DEVICE_UNKNOWN);
181 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 184 } else {
182 spin_unlock_irqrestore(&task->task_state_lock, flags); 185 /* Indicate QUEUE_FULL so that
183 186 * the scsi midlayer retries.
184 if (test_bit(IDEV_GONE, &idev->flags)) { 187 * If the request failed for
185 188 * remote device reasons, it
186 /* Indicate that the device 189 * gets returned as
187 * is gone. 190 * SAS_TASK_UNDELIVERED next
188 */ 191 * time through.
189 isci_task_refuse(ihost, task, 192 */
190 SAS_TASK_UNDELIVERED, 193 isci_task_refuse(ihost, task,
191 SAS_DEVICE_UNKNOWN); 194 SAS_TASK_COMPLETE,
192 } else { 195 SAS_QUEUE_FULL);
193 /* Indicate QUEUE_FULL so that
194 * the scsi midlayer retries.
195 * If the request failed for
196 * remote device reasons, it
197 * gets returned as
198 * SAS_TASK_UNDELIVERED next
199 * time through.
200 */
201 isci_task_refuse(ihost, task,
202 SAS_TASK_COMPLETE,
203 SAS_QUEUE_FULL);
204 }
205 } 196 }
206 } 197 }
207 } 198 }
208 if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
209 spin_lock_irqsave(&ihost->scic_lock, flags);
210 /* command never hit the device, so just free
211 * the tci and skip the sequence increment
212 */
213 isci_tci_free(ihost, ISCI_TAG_TCI(tag));
214 spin_unlock_irqrestore(&ihost->scic_lock, flags);
215 }
216 isci_put_device(idev);
217 } 199 }
200
201 if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
202 spin_lock_irqsave(&ihost->scic_lock, flags);
203 /* command never hit the device, so just free
204 * the tci and skip the sequence increment
205 */
206 isci_tci_free(ihost, ISCI_TAG_TCI(tag));
207 spin_unlock_irqrestore(&ihost->scic_lock, flags);
208 }
209
210 isci_put_device(idev);
218 return 0; 211 return 0;
219} 212}
220 213
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index 9c06cbad1d26..8f4531f22ac2 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -131,7 +131,6 @@ static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)
131 131
132int isci_task_execute_task( 132int isci_task_execute_task(
133 struct sas_task *task, 133 struct sas_task *task,
134 int num,
135 gfp_t gfp_flags); 134 gfp_t gfp_flags);
136 135
137int isci_task_abort_task( 136int isci_task_abort_task(
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 766098af4eb7..577770fdee86 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -171,7 +171,6 @@ static void sas_ata_task_done(struct sas_task *task)
171 spin_unlock_irqrestore(ap->lock, flags); 171 spin_unlock_irqrestore(ap->lock, flags);
172 172
173qc_already_gone: 173qc_already_gone:
174 list_del_init(&task->list);
175 sas_free_task(task); 174 sas_free_task(task);
176} 175}
177 176
@@ -244,12 +243,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
244 if (qc->scsicmd) 243 if (qc->scsicmd)
245 ASSIGN_SAS_TASK(qc->scsicmd, task); 244 ASSIGN_SAS_TASK(qc->scsicmd, task);
246 245
247 if (sas_ha->lldd_max_execute_num < 2) 246 ret = i->dft->lldd_execute_task(task, GFP_ATOMIC);
248 ret = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
249 else
250 ret = sas_queue_up(task);
251
252 /* Examine */
253 if (ret) { 247 if (ret) {
254 SAS_DPRINTK("lldd_execute_task returned: %d\n", ret); 248 SAS_DPRINTK("lldd_execute_task returned: %d\n", ret);
255 249
@@ -485,7 +479,6 @@ static void sas_ata_internal_abort(struct sas_task *task)
485 479
486 return; 480 return;
487 out: 481 out:
488 list_del_init(&task->list);
489 sas_free_task(task); 482 sas_free_task(task);
490} 483}
491 484
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 0cac7d8fd0f7..022bb6e10d98 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -96,7 +96,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
96 task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ; 96 task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ;
97 add_timer(&task->slow_task->timer); 97 add_timer(&task->slow_task->timer);
98 98
99 res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL); 99 res = i->dft->lldd_execute_task(task, GFP_KERNEL);
100 100
101 if (res) { 101 if (res) {
102 del_timer(&task->slow_task->timer); 102 del_timer(&task->slow_task->timer);
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index dbc8a793fd86..362da44f2948 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -45,7 +45,6 @@ struct sas_task *sas_alloc_task(gfp_t flags)
45 struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags); 45 struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags);
46 46
47 if (task) { 47 if (task) {
48 INIT_LIST_HEAD(&task->list);
49 spin_lock_init(&task->task_state_lock); 48 spin_lock_init(&task->task_state_lock);
50 task->task_state_flags = SAS_TASK_STATE_PENDING; 49 task->task_state_flags = SAS_TASK_STATE_PENDING;
51 } 50 }
@@ -77,7 +76,6 @@ EXPORT_SYMBOL_GPL(sas_alloc_slow_task);
77void sas_free_task(struct sas_task *task) 76void sas_free_task(struct sas_task *task)
78{ 77{
79 if (task) { 78 if (task) {
80 BUG_ON(!list_empty(&task->list));
81 kfree(task->slow_task); 79 kfree(task->slow_task);
82 kmem_cache_free(sas_task_cache, task); 80 kmem_cache_free(sas_task_cache, task);
83 } 81 }
@@ -127,11 +125,6 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
127 spin_lock_init(&sas_ha->phy_port_lock); 125 spin_lock_init(&sas_ha->phy_port_lock);
128 sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr); 126 sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr);
129 127
130 if (sas_ha->lldd_queue_size == 0)
131 sas_ha->lldd_queue_size = 1;
132 else if (sas_ha->lldd_queue_size == -1)
133 sas_ha->lldd_queue_size = 128; /* Sanity */
134
135 set_bit(SAS_HA_REGISTERED, &sas_ha->state); 128 set_bit(SAS_HA_REGISTERED, &sas_ha->state);
136 spin_lock_init(&sas_ha->lock); 129 spin_lock_init(&sas_ha->lock);
137 mutex_init(&sas_ha->drain_mutex); 130 mutex_init(&sas_ha->drain_mutex);
@@ -157,15 +150,6 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
157 goto Undo_ports; 150 goto Undo_ports;
158 } 151 }
159 152
160 if (sas_ha->lldd_max_execute_num > 1) {
161 error = sas_init_queue(sas_ha);
162 if (error) {
163 printk(KERN_NOTICE "couldn't start queue thread:%d, "
164 "running in direct mode\n", error);
165 sas_ha->lldd_max_execute_num = 1;
166 }
167 }
168
169 INIT_LIST_HEAD(&sas_ha->eh_done_q); 153 INIT_LIST_HEAD(&sas_ha->eh_done_q);
170 INIT_LIST_HEAD(&sas_ha->eh_ata_q); 154 INIT_LIST_HEAD(&sas_ha->eh_ata_q);
171 155
@@ -201,11 +185,6 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha)
201 __sas_drain_work(sas_ha); 185 __sas_drain_work(sas_ha);
202 mutex_unlock(&sas_ha->drain_mutex); 186 mutex_unlock(&sas_ha->drain_mutex);
203 187
204 if (sas_ha->lldd_max_execute_num > 1) {
205 sas_shutdown_queue(sas_ha);
206 sas_ha->lldd_max_execute_num = 1;
207 }
208
209 return 0; 188 return 0;
210} 189}
211 190
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 7e7ba83f0a21..9cf0bc260b0e 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -66,9 +66,7 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha);
66 66
67enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *); 67enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
68 68
69int sas_init_queue(struct sas_ha_struct *sas_ha);
70int sas_init_events(struct sas_ha_struct *sas_ha); 69int sas_init_events(struct sas_ha_struct *sas_ha);
71void sas_shutdown_queue(struct sas_ha_struct *sas_ha);
72void sas_disable_revalidation(struct sas_ha_struct *ha); 70void sas_disable_revalidation(struct sas_ha_struct *ha);
73void sas_enable_revalidation(struct sas_ha_struct *ha); 71void sas_enable_revalidation(struct sas_ha_struct *ha);
74void __sas_drain_work(struct sas_ha_struct *ha); 72void __sas_drain_work(struct sas_ha_struct *ha);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index b492293d51f2..72918d227ead 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -112,7 +112,6 @@ static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
112 112
113 sc->result = (hs << 16) | stat; 113 sc->result = (hs << 16) | stat;
114 ASSIGN_SAS_TASK(sc, NULL); 114 ASSIGN_SAS_TASK(sc, NULL);
115 list_del_init(&task->list);
116 sas_free_task(task); 115 sas_free_task(task);
117} 116}
118 117
@@ -138,7 +137,6 @@ static void sas_scsi_task_done(struct sas_task *task)
138 137
139 if (unlikely(!sc)) { 138 if (unlikely(!sc)) {
140 SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n"); 139 SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
141 list_del_init(&task->list);
142 sas_free_task(task); 140 sas_free_task(task);
143 return; 141 return;
144 } 142 }
@@ -179,31 +177,10 @@ static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
179 return task; 177 return task;
180} 178}
181 179
182int sas_queue_up(struct sas_task *task)
183{
184 struct sas_ha_struct *sas_ha = task->dev->port->ha;
185 struct scsi_core *core = &sas_ha->core;
186 unsigned long flags;
187 LIST_HEAD(list);
188
189 spin_lock_irqsave(&core->task_queue_lock, flags);
190 if (sas_ha->lldd_queue_size < core->task_queue_size + 1) {
191 spin_unlock_irqrestore(&core->task_queue_lock, flags);
192 return -SAS_QUEUE_FULL;
193 }
194 list_add_tail(&task->list, &core->task_queue);
195 core->task_queue_size += 1;
196 spin_unlock_irqrestore(&core->task_queue_lock, flags);
197 wake_up_process(core->queue_thread);
198
199 return 0;
200}
201
202int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) 180int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
203{ 181{
204 struct sas_internal *i = to_sas_internal(host->transportt); 182 struct sas_internal *i = to_sas_internal(host->transportt);
205 struct domain_device *dev = cmd_to_domain_dev(cmd); 183 struct domain_device *dev = cmd_to_domain_dev(cmd);
206 struct sas_ha_struct *sas_ha = dev->port->ha;
207 struct sas_task *task; 184 struct sas_task *task;
208 int res = 0; 185 int res = 0;
209 186
@@ -224,12 +201,7 @@ int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
224 if (!task) 201 if (!task)
225 return SCSI_MLQUEUE_HOST_BUSY; 202 return SCSI_MLQUEUE_HOST_BUSY;
226 203
227 /* Queue up, Direct Mode or Task Collector Mode. */ 204 res = i->dft->lldd_execute_task(task, GFP_ATOMIC);
228 if (sas_ha->lldd_max_execute_num < 2)
229 res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
230 else
231 res = sas_queue_up(task);
232
233 if (res) 205 if (res)
234 goto out_free_task; 206 goto out_free_task;
235 return 0; 207 return 0;
@@ -323,37 +295,17 @@ enum task_disposition {
323 TASK_IS_DONE, 295 TASK_IS_DONE,
324 TASK_IS_ABORTED, 296 TASK_IS_ABORTED,
325 TASK_IS_AT_LU, 297 TASK_IS_AT_LU,
326 TASK_IS_NOT_AT_HA,
327 TASK_IS_NOT_AT_LU, 298 TASK_IS_NOT_AT_LU,
328 TASK_ABORT_FAILED, 299 TASK_ABORT_FAILED,
329}; 300};
330 301
331static enum task_disposition sas_scsi_find_task(struct sas_task *task) 302static enum task_disposition sas_scsi_find_task(struct sas_task *task)
332{ 303{
333 struct sas_ha_struct *ha = task->dev->port->ha;
334 unsigned long flags; 304 unsigned long flags;
335 int i, res; 305 int i, res;
336 struct sas_internal *si = 306 struct sas_internal *si =
337 to_sas_internal(task->dev->port->ha->core.shost->transportt); 307 to_sas_internal(task->dev->port->ha->core.shost->transportt);
338 308
339 if (ha->lldd_max_execute_num > 1) {
340 struct scsi_core *core = &ha->core;
341 struct sas_task *t, *n;
342
343 mutex_lock(&core->task_queue_flush);
344 spin_lock_irqsave(&core->task_queue_lock, flags);
345 list_for_each_entry_safe(t, n, &core->task_queue, list)
346 if (task == t) {
347 list_del_init(&t->list);
348 break;
349 }
350 spin_unlock_irqrestore(&core->task_queue_lock, flags);
351 mutex_unlock(&core->task_queue_flush);
352
353 if (task == t)
354 return TASK_IS_NOT_AT_HA;
355 }
356
357 for (i = 0; i < 5; i++) { 309 for (i = 0; i < 5; i++) {
358 SAS_DPRINTK("%s: aborting task 0x%p\n", __func__, task); 310 SAS_DPRINTK("%s: aborting task 0x%p\n", __func__, task);
359 res = si->dft->lldd_abort_task(task); 311 res = si->dft->lldd_abort_task(task);
@@ -667,14 +619,6 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
667 cmd->eh_eflags = 0; 619 cmd->eh_eflags = 0;
668 620
669 switch (res) { 621 switch (res) {
670 case TASK_IS_NOT_AT_HA:
671 SAS_DPRINTK("%s: task 0x%p is not at ha: %s\n",
672 __func__, task,
673 cmd->retries ? "retry" : "aborted");
674 if (cmd->retries)
675 cmd->retries--;
676 sas_eh_finish_cmd(cmd);
677 continue;
678 case TASK_IS_DONE: 622 case TASK_IS_DONE:
679 SAS_DPRINTK("%s: task 0x%p is done\n", __func__, 623 SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
680 task); 624 task);
@@ -836,9 +780,6 @@ retry:
836 scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q); 780 scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
837 781
838out: 782out:
839 if (ha->lldd_max_execute_num > 1)
840 wake_up_process(ha->core.queue_thread);
841
842 sas_eh_handle_resets(shost); 783 sas_eh_handle_resets(shost);
843 784
844 /* now link into libata eh --- if we have any ata devices */ 785 /* now link into libata eh --- if we have any ata devices */
@@ -984,121 +925,6 @@ int sas_bios_param(struct scsi_device *scsi_dev,
984 return 0; 925 return 0;
985} 926}
986 927
987/* ---------- Task Collector Thread implementation ---------- */
988
989static void sas_queue(struct sas_ha_struct *sas_ha)
990{
991 struct scsi_core *core = &sas_ha->core;
992 unsigned long flags;
993 LIST_HEAD(q);
994 int can_queue;
995 int res;
996 struct sas_internal *i = to_sas_internal(core->shost->transportt);
997
998 mutex_lock(&core->task_queue_flush);
999 spin_lock_irqsave(&core->task_queue_lock, flags);
1000 while (!kthread_should_stop() &&
1001 !list_empty(&core->task_queue) &&
1002 !test_bit(SAS_HA_FROZEN, &sas_ha->state)) {
1003
1004 can_queue = sas_ha->lldd_queue_size - core->task_queue_size;
1005 if (can_queue >= 0) {
1006 can_queue = core->task_queue_size;
1007 list_splice_init(&core->task_queue, &q);
1008 } else {
1009 struct list_head *a, *n;
1010
1011 can_queue = sas_ha->lldd_queue_size;
1012 list_for_each_safe(a, n, &core->task_queue) {
1013 list_move_tail(a, &q);
1014 if (--can_queue == 0)
1015 break;
1016 }
1017 can_queue = sas_ha->lldd_queue_size;
1018 }
1019 core->task_queue_size -= can_queue;
1020 spin_unlock_irqrestore(&core->task_queue_lock, flags);
1021 {
1022 struct sas_task *task = list_entry(q.next,
1023 struct sas_task,
1024 list);
1025 list_del_init(&q);
1026 res = i->dft->lldd_execute_task(task, can_queue,
1027 GFP_KERNEL);
1028 if (unlikely(res))
1029 __list_add(&q, task->list.prev, &task->list);
1030 }
1031 spin_lock_irqsave(&core->task_queue_lock, flags);
1032 if (res) {
1033 list_splice_init(&q, &core->task_queue); /*at head*/
1034 core->task_queue_size += can_queue;
1035 }
1036 }
1037 spin_unlock_irqrestore(&core->task_queue_lock, flags);
1038 mutex_unlock(&core->task_queue_flush);
1039}
1040
1041/**
1042 * sas_queue_thread -- The Task Collector thread
1043 * @_sas_ha: pointer to struct sas_ha
1044 */
1045static int sas_queue_thread(void *_sas_ha)
1046{
1047 struct sas_ha_struct *sas_ha = _sas_ha;
1048
1049 while (1) {
1050 set_current_state(TASK_INTERRUPTIBLE);
1051 schedule();
1052 sas_queue(sas_ha);
1053 if (kthread_should_stop())
1054 break;
1055 }
1056
1057 return 0;
1058}
1059
1060int sas_init_queue(struct sas_ha_struct *sas_ha)
1061{
1062 struct scsi_core *core = &sas_ha->core;
1063
1064 spin_lock_init(&core->task_queue_lock);
1065 mutex_init(&core->task_queue_flush);
1066 core->task_queue_size = 0;
1067 INIT_LIST_HEAD(&core->task_queue);
1068
1069 core->queue_thread = kthread_run(sas_queue_thread, sas_ha,
1070 "sas_queue_%d", core->shost->host_no);
1071 if (IS_ERR(core->queue_thread))
1072 return PTR_ERR(core->queue_thread);
1073 return 0;
1074}
1075
1076void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
1077{
1078 unsigned long flags;
1079 struct scsi_core *core = &sas_ha->core;
1080 struct sas_task *task, *n;
1081
1082 kthread_stop(core->queue_thread);
1083
1084 if (!list_empty(&core->task_queue))
1085 SAS_DPRINTK("HA: %llx: scsi core task queue is NOT empty!?\n",
1086 SAS_ADDR(sas_ha->sas_addr));
1087
1088 spin_lock_irqsave(&core->task_queue_lock, flags);
1089 list_for_each_entry_safe(task, n, &core->task_queue, list) {
1090 struct scsi_cmnd *cmd = task->uldd_task;
1091
1092 list_del_init(&task->list);
1093
1094 ASSIGN_SAS_TASK(cmd, NULL);
1095 sas_free_task(task);
1096 cmd->result = DID_ABORT << 16;
1097 cmd->scsi_done(cmd);
1098 }
1099 spin_unlock_irqrestore(&core->task_queue_lock, flags);
1100}
1101
1102/* 928/*
1103 * Tell an upper layer that it needs to initiate an abort for a given task. 929 * Tell an upper layer that it needs to initiate an abort for a given task.
1104 * This should only ever be called by an LLDD. 930 * This should only ever be called by an LLDD.
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index ac7c03078409..f15df3de6790 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -26,18 +26,9 @@
26 26
27#include "mv_sas.h" 27#include "mv_sas.h"
28 28
29static int lldd_max_execute_num = 1;
30module_param_named(collector, lldd_max_execute_num, int, S_IRUGO);
31MODULE_PARM_DESC(collector, "\n"
32 "\tIf greater than one, tells the SAS Layer to run in Task Collector\n"
33 "\tMode. If 1 or 0, tells the SAS Layer to run in Direct Mode.\n"
34 "\tThe mvsas SAS LLDD supports both modes.\n"
35 "\tDefault: 1 (Direct Mode).\n");
36
37int interrupt_coalescing = 0x80; 29int interrupt_coalescing = 0x80;
38 30
39static struct scsi_transport_template *mvs_stt; 31static struct scsi_transport_template *mvs_stt;
40struct kmem_cache *mvs_task_list_cache;
41static const struct mvs_chip_info mvs_chips[] = { 32static const struct mvs_chip_info mvs_chips[] = {
42 [chip_6320] = { 1, 2, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, }, 33 [chip_6320] = { 1, 2, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
43 [chip_6440] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, }, 34 [chip_6440] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch, },
@@ -513,14 +504,11 @@ static void mvs_post_sas_ha_init(struct Scsi_Host *shost,
513 504
514 sha->num_phys = nr_core * chip_info->n_phy; 505 sha->num_phys = nr_core * chip_info->n_phy;
515 506
516 sha->lldd_max_execute_num = lldd_max_execute_num;
517
518 if (mvi->flags & MVF_FLAG_SOC) 507 if (mvi->flags & MVF_FLAG_SOC)
519 can_queue = MVS_SOC_CAN_QUEUE; 508 can_queue = MVS_SOC_CAN_QUEUE;
520 else 509 else
521 can_queue = MVS_CHIP_SLOT_SZ; 510 can_queue = MVS_CHIP_SLOT_SZ;
522 511
523 sha->lldd_queue_size = can_queue;
524 shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG); 512 shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG);
525 shost->can_queue = can_queue; 513 shost->can_queue = can_queue;
526 mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE; 514 mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE;
@@ -833,16 +821,7 @@ static int __init mvs_init(void)
833 if (!mvs_stt) 821 if (!mvs_stt)
834 return -ENOMEM; 822 return -ENOMEM;
835 823
836 mvs_task_list_cache = kmem_cache_create("mvs_task_list", sizeof(struct mvs_task_list),
837 0, SLAB_HWCACHE_ALIGN, NULL);
838 if (!mvs_task_list_cache) {
839 rc = -ENOMEM;
840 mv_printk("%s: mvs_task_list_cache alloc failed! \n", __func__);
841 goto err_out;
842 }
843
844 rc = pci_register_driver(&mvs_pci_driver); 824 rc = pci_register_driver(&mvs_pci_driver);
845
846 if (rc) 825 if (rc)
847 goto err_out; 826 goto err_out;
848 827
@@ -857,7 +836,6 @@ static void __exit mvs_exit(void)
857{ 836{
858 pci_unregister_driver(&mvs_pci_driver); 837 pci_unregister_driver(&mvs_pci_driver);
859 sas_release_transport(mvs_stt); 838 sas_release_transport(mvs_stt);
860 kmem_cache_destroy(mvs_task_list_cache);
861} 839}
862 840
863struct device_attribute *mvst_host_attrs[] = { 841struct device_attribute *mvst_host_attrs[] = {
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index ac52f7c99513..85d86a5cdb60 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -852,43 +852,7 @@ prep_out:
852 return rc; 852 return rc;
853} 853}
854 854
855static struct mvs_task_list *mvs_task_alloc_list(int *num, gfp_t gfp_flags) 855static int mvs_task_exec(struct sas_task *task, gfp_t gfp_flags,
856{
857 struct mvs_task_list *first = NULL;
858
859 for (; *num > 0; --*num) {
860 struct mvs_task_list *mvs_list = kmem_cache_zalloc(mvs_task_list_cache, gfp_flags);
861
862 if (!mvs_list)
863 break;
864
865 INIT_LIST_HEAD(&mvs_list->list);
866 if (!first)
867 first = mvs_list;
868 else
869 list_add_tail(&mvs_list->list, &first->list);
870
871 }
872
873 return first;
874}
875
876static inline void mvs_task_free_list(struct mvs_task_list *mvs_list)
877{
878 LIST_HEAD(list);
879 struct list_head *pos, *a;
880 struct mvs_task_list *mlist = NULL;
881
882 __list_add(&list, mvs_list->list.prev, &mvs_list->list);
883
884 list_for_each_safe(pos, a, &list) {
885 list_del_init(pos);
886 mlist = list_entry(pos, struct mvs_task_list, list);
887 kmem_cache_free(mvs_task_list_cache, mlist);
888 }
889}
890
891static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
892 struct completion *completion, int is_tmf, 856 struct completion *completion, int is_tmf,
893 struct mvs_tmf_task *tmf) 857 struct mvs_tmf_task *tmf)
894{ 858{
@@ -912,74 +876,9 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
912 return rc; 876 return rc;
913} 877}
914 878
915static int mvs_collector_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, 879int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags)
916 struct completion *completion, int is_tmf,
917 struct mvs_tmf_task *tmf)
918{ 880{
919 struct domain_device *dev = task->dev; 881 return mvs_task_exec(task, gfp_flags, NULL, 0, NULL);
920 struct mvs_prv_info *mpi = dev->port->ha->lldd_ha;
921 struct mvs_info *mvi = NULL;
922 struct sas_task *t = task;
923 struct mvs_task_list *mvs_list = NULL, *a;
924 LIST_HEAD(q);
925 int pass[2] = {0};
926 u32 rc = 0;
927 u32 n = num;
928 unsigned long flags = 0;
929
930 mvs_list = mvs_task_alloc_list(&n, gfp_flags);
931 if (n) {
932 printk(KERN_ERR "%s: mvs alloc list failed.\n", __func__);
933 rc = -ENOMEM;
934 goto free_list;
935 }
936
937 __list_add(&q, mvs_list->list.prev, &mvs_list->list);
938
939 list_for_each_entry(a, &q, list) {
940 a->task = t;
941 t = list_entry(t->list.next, struct sas_task, list);
942 }
943
944 list_for_each_entry(a, &q , list) {
945
946 t = a->task;
947 mvi = ((struct mvs_device *)t->dev->lldd_dev)->mvi_info;
948
949 spin_lock_irqsave(&mvi->lock, flags);
950 rc = mvs_task_prep(t, mvi, is_tmf, tmf, &pass[mvi->id]);
951 if (rc)
952 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
953 spin_unlock_irqrestore(&mvi->lock, flags);
954 }
955
956 if (likely(pass[0]))
957 MVS_CHIP_DISP->start_delivery(mpi->mvi[0],
958 (mpi->mvi[0]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
959
960 if (likely(pass[1]))
961 MVS_CHIP_DISP->start_delivery(mpi->mvi[1],
962 (mpi->mvi[1]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
963
964 list_del_init(&q);
965
966free_list:
967 if (mvs_list)
968 mvs_task_free_list(mvs_list);
969
970 return rc;
971}
972
973int mvs_queue_command(struct sas_task *task, const int num,
974 gfp_t gfp_flags)
975{
976 struct mvs_device *mvi_dev = task->dev->lldd_dev;
977 struct sas_ha_struct *sas = mvi_dev->mvi_info->sas;
978
979 if (sas->lldd_max_execute_num < 2)
980 return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL);
981 else
982 return mvs_collector_task_exec(task, num, gfp_flags, NULL, 0, NULL);
983} 882}
984 883
985static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) 884static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
@@ -1411,7 +1310,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1411 task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ; 1310 task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
1412 add_timer(&task->slow_task->timer); 1311 add_timer(&task->slow_task->timer);
1413 1312
1414 res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf); 1313 res = mvs_task_exec(task, GFP_KERNEL, NULL, 1, tmf);
1415 1314
1416 if (res) { 1315 if (res) {
1417 del_timer(&task->slow_task->timer); 1316 del_timer(&task->slow_task->timer);
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index d6b19dc80bee..dc409c04747a 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -65,7 +65,6 @@ extern struct mvs_tgt_initiator mvs_tgt;
65extern struct mvs_info *tgt_mvi; 65extern struct mvs_info *tgt_mvi;
66extern const struct mvs_dispatch mvs_64xx_dispatch; 66extern const struct mvs_dispatch mvs_64xx_dispatch;
67extern const struct mvs_dispatch mvs_94xx_dispatch; 67extern const struct mvs_dispatch mvs_94xx_dispatch;
68extern struct kmem_cache *mvs_task_list_cache;
69 68
70#define DEV_IS_EXPANDER(type) \ 69#define DEV_IS_EXPANDER(type) \
71 ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE)) 70 ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE))
@@ -440,12 +439,6 @@ struct mvs_task_exec_info {
440 int n_elem; 439 int n_elem;
441}; 440};
442 441
443struct mvs_task_list {
444 struct sas_task *task;
445 struct list_head list;
446};
447
448
449/******************** function prototype *********************/ 442/******************** function prototype *********************/
450void mvs_get_sas_addr(void *buf, u32 buflen); 443void mvs_get_sas_addr(void *buf, u32 buflen);
451void mvs_tag_clear(struct mvs_info *mvi, u32 tag); 444void mvs_tag_clear(struct mvs_info *mvi, u32 tag);
@@ -462,8 +455,7 @@ void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo,
462 u32 off_hi, u64 sas_addr); 455 u32 off_hi, u64 sas_addr);
463void mvs_scan_start(struct Scsi_Host *shost); 456void mvs_scan_start(struct Scsi_Host *shost);
464int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time); 457int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time);
465int mvs_queue_command(struct sas_task *task, const int num, 458int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags);
466 gfp_t gfp_flags);
467int mvs_abort_task(struct sas_task *task); 459int mvs_abort_task(struct sas_task *task);
468int mvs_abort_task_set(struct domain_device *dev, u8 *lun); 460int mvs_abort_task_set(struct domain_device *dev, u8 *lun);
469int mvs_clear_aca(struct domain_device *dev, u8 *lun); 461int mvs_clear_aca(struct domain_device *dev, u8 *lun);
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 19ae6cab5e44..329aba0083ab 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -601,8 +601,6 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
601 sha->lldd_module = THIS_MODULE; 601 sha->lldd_module = THIS_MODULE;
602 sha->sas_addr = &pm8001_ha->sas_addr[0]; 602 sha->sas_addr = &pm8001_ha->sas_addr[0];
603 sha->num_phys = chip_info->n_phy; 603 sha->num_phys = chip_info->n_phy;
604 sha->lldd_max_execute_num = 1;
605 sha->lldd_queue_size = PM8001_CAN_QUEUE;
606 sha->core.shost = shost; 604 sha->core.shost = shost;
607} 605}
608 606
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 76570e6a547d..b93f289b42b3 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -350,7 +350,7 @@ static int sas_find_local_port_id(struct domain_device *dev)
350 */ 350 */
351#define DEV_IS_GONE(pm8001_dev) \ 351#define DEV_IS_GONE(pm8001_dev) \
352 ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))) 352 ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
353static int pm8001_task_exec(struct sas_task *task, const int num, 353static int pm8001_task_exec(struct sas_task *task,
354 gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf) 354 gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf)
355{ 355{
356 struct domain_device *dev = task->dev; 356 struct domain_device *dev = task->dev;
@@ -360,7 +360,6 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
360 struct sas_task *t = task; 360 struct sas_task *t = task;
361 struct pm8001_ccb_info *ccb; 361 struct pm8001_ccb_info *ccb;
362 u32 tag = 0xdeadbeef, rc, n_elem = 0; 362 u32 tag = 0xdeadbeef, rc, n_elem = 0;
363 u32 n = num;
364 unsigned long flags = 0; 363 unsigned long flags = 0;
365 364
366 if (!dev->port) { 365 if (!dev->port) {
@@ -387,18 +386,12 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
387 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 386 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
388 t->task_done(t); 387 t->task_done(t);
389 spin_lock_irqsave(&pm8001_ha->lock, flags); 388 spin_lock_irqsave(&pm8001_ha->lock, flags);
390 if (n > 1)
391 t = list_entry(t->list.next,
392 struct sas_task, list);
393 continue; 389 continue;
394 } else { 390 } else {
395 struct task_status_struct *ts = &t->task_status; 391 struct task_status_struct *ts = &t->task_status;
396 ts->resp = SAS_TASK_UNDELIVERED; 392 ts->resp = SAS_TASK_UNDELIVERED;
397 ts->stat = SAS_PHY_DOWN; 393 ts->stat = SAS_PHY_DOWN;
398 t->task_done(t); 394 t->task_done(t);
399 if (n > 1)
400 t = list_entry(t->list.next,
401 struct sas_task, list);
402 continue; 395 continue;
403 } 396 }
404 } 397 }
@@ -460,9 +453,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
460 t->task_state_flags |= SAS_TASK_AT_INITIATOR; 453 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
461 spin_unlock(&t->task_state_lock); 454 spin_unlock(&t->task_state_lock);
462 pm8001_dev->running_req++; 455 pm8001_dev->running_req++;
463 if (n > 1) 456 } while (0);
464 t = list_entry(t->list.next, struct sas_task, list);
465 } while (--n);
466 rc = 0; 457 rc = 0;
467 goto out_done; 458 goto out_done;
468 459
@@ -483,14 +474,11 @@ out_done:
483 * pm8001_queue_command - register for upper layer used, all IO commands sent 474 * pm8001_queue_command - register for upper layer used, all IO commands sent
484 * to HBA are from this interface. 475 * to HBA are from this interface.
485 * @task: the task to be execute. 476 * @task: the task to be execute.
486 * @num: if can_queue great than 1, the task can be queued up. for SMP task,
487 * we always execute one one time
488 * @gfp_flags: gfp_flags 477 * @gfp_flags: gfp_flags
489 */ 478 */
490int pm8001_queue_command(struct sas_task *task, const int num, 479int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
491 gfp_t gfp_flags)
492{ 480{
493 return pm8001_task_exec(task, num, gfp_flags, 0, NULL); 481 return pm8001_task_exec(task, gfp_flags, 0, NULL);
494} 482}
495 483
496/** 484/**
@@ -708,7 +696,7 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
708 task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ; 696 task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
709 add_timer(&task->slow_task->timer); 697 add_timer(&task->slow_task->timer);
710 698
711 res = pm8001_task_exec(task, 1, GFP_KERNEL, 1, tmf); 699 res = pm8001_task_exec(task, GFP_KERNEL, 1, tmf);
712 700
713 if (res) { 701 if (res) {
714 del_timer(&task->slow_task->timer); 702 del_timer(&task->slow_task->timer);
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index f6b2ac59dae4..8dd8b7840f04 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -623,8 +623,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
623 void *funcdata); 623 void *funcdata);
624void pm8001_scan_start(struct Scsi_Host *shost); 624void pm8001_scan_start(struct Scsi_Host *shost);
625int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time); 625int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time);
626int pm8001_queue_command(struct sas_task *task, const int num, 626int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags);
627 gfp_t gfp_flags);
628int pm8001_abort_task(struct sas_task *task); 627int pm8001_abort_task(struct sas_task *task);
629int pm8001_abort_task_set(struct domain_device *dev, u8 *lun); 628int pm8001_abort_task_set(struct domain_device *dev, u8 *lun);
630int pm8001_clear_aca(struct domain_device *dev, u8 *lun); 629int pm8001_clear_aca(struct domain_device *dev, u8 *lun);