aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-05-09 11:42:02 -0400
committerJens Axboe <axboe@fb.com>2014-05-13 21:51:22 -0400
commitffc771b3ca8b2c03e5e9faa6335b4862108f111f (patch)
tree5e6f71d7de2a9a7bff3a2fe5e512b078a455c697
parentbd6f0bba1d2705748ec94e0aa23ae0c5bd6b2287 (diff)
mtip32xx: convert to use blk-mq
This rips out timeout handling, requeueing, etc in converting it to use blk-mq instead. Acked-by: Asai Thambi S P <asamymuthupa@micron.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c889
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h24
2 files changed, 297 insertions, 616 deletions
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index fb624469d0ee..3a0882ee1642 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -31,6 +31,7 @@
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/genhd.h> 32#include <linux/genhd.h>
33#include <linux/blkdev.h> 33#include <linux/blkdev.h>
34#include <linux/blk-mq.h>
34#include <linux/bio.h> 35#include <linux/bio.h>
35#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
36#include <linux/idr.h> 37#include <linux/idr.h>
@@ -173,60 +174,36 @@ static bool mtip_check_surprise_removal(struct pci_dev *pdev)
173 return false; /* device present */ 174 return false; /* device present */
174} 175}
175 176
176/* 177static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
177 * Obtain an empty command slot.
178 *
179 * This function needs to be reentrant since it could be called
180 * at the same time on multiple CPUs. The allocation of the
181 * command slot must be atomic.
182 *
183 * @port Pointer to the port data structure.
184 *
185 * return value
186 * >= 0 Index of command slot obtained.
187 * -1 No command slots available.
188 */
189static int get_slot(struct mtip_port *port)
190{ 178{
191 int slot, i; 179 struct request *rq;
192 unsigned int num_command_slots = port->dd->slot_groups * 32;
193 180
194 /* 181 rq = blk_mq_alloc_reserved_request(dd->queue, 0, __GFP_WAIT);
195 * Try 10 times, because there is a small race here. 182 return blk_mq_rq_to_pdu(rq);
196 * that's ok, because it's still cheaper than a lock. 183}
197 *
198 * Race: Since this section is not protected by lock, same bit
199 * could be chosen by different process contexts running in
200 * different processor. So instead of costly lock, we are going
201 * with loop.
202 */
203 for (i = 0; i < 10; i++) {
204 slot = find_next_zero_bit(port->allocated,
205 num_command_slots, 1);
206 if ((slot < num_command_slots) &&
207 (!test_and_set_bit(slot, port->allocated)))
208 return slot;
209 }
210 dev_warn(&port->dd->pdev->dev, "Failed to get a tag.\n");
211 184
212 mtip_check_surprise_removal(port->dd->pdev); 185static void mtip_put_int_command(struct driver_data *dd, struct mtip_cmd *cmd)
213 return -1; 186{
187 blk_put_request(blk_mq_rq_from_pdu(cmd));
214} 188}
215 189
216/* 190/*
217 * Release a command slot. 191 * Once we add support for one hctx per mtip group, this will change a bit
218 *
219 * @port Pointer to the port data structure.
220 * @tag Tag of command to release
221 *
222 * return value
223 * None
224 */ 192 */
225static inline void release_slot(struct mtip_port *port, int tag) 193static struct request *mtip_rq_from_tag(struct driver_data *dd,
194 unsigned int tag)
195{
196 struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0];
197
198 return blk_mq_tag_to_rq(hctx->tags, tag);
199}
200
201static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd,
202 unsigned int tag)
226{ 203{
227 smp_mb__before_clear_bit(); 204 struct request *rq = mtip_rq_from_tag(dd, tag);
228 clear_bit(tag, port->allocated); 205
229 smp_mb__after_clear_bit(); 206 return blk_mq_rq_to_pdu(rq);
230} 207}
231 208
232/* 209/*
@@ -248,93 +225,28 @@ static inline void release_slot(struct mtip_port *port, int tag)
248 * None 225 * None
249 */ 226 */
250static void mtip_async_complete(struct mtip_port *port, 227static void mtip_async_complete(struct mtip_port *port,
251 int tag, 228 int tag, struct mtip_cmd *cmd, int status)
252 void *data,
253 int status)
254{ 229{
255 struct mtip_cmd *cmd; 230 struct driver_data *dd = port->dd;
256 struct driver_data *dd = data; 231 struct request *rq;
257 int unaligned, cb_status = status ? -EIO : 0;
258 void (*func)(void *, int);
259 232
260 if (unlikely(!dd) || unlikely(!port)) 233 if (unlikely(!dd) || unlikely(!port))
261 return; 234 return;
262 235
263 cmd = &port->commands[tag];
264
265 if (unlikely(status == PORT_IRQ_TF_ERR)) { 236 if (unlikely(status == PORT_IRQ_TF_ERR)) {
266 dev_warn(&port->dd->pdev->dev, 237 dev_warn(&port->dd->pdev->dev,
267 "Command tag %d failed due to TFE\n", tag); 238 "Command tag %d failed due to TFE\n", tag);
268 } 239 }
269 240
270 /* Clear the active flag */ 241 /* Unmap the DMA scatter list entries */
271 atomic_set(&port->commands[tag].active, 0); 242 dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents, cmd->direction);
272
273 /* Upper layer callback */
274 func = cmd->async_callback;
275 if (likely(func && cmpxchg(&cmd->async_callback, func, 0) == func)) {
276
277 /* Unmap the DMA scatter list entries */
278 dma_unmap_sg(&dd->pdev->dev,
279 cmd->sg,
280 cmd->scatter_ents,
281 cmd->direction);
282
283 func(cmd->async_data, cb_status);
284 unaligned = cmd->unaligned;
285
286 /* Clear the allocated bit for the command */
287 release_slot(port, tag);
288
289 if (unlikely(unaligned))
290 up(&port->cmd_slot_unal);
291 else
292 up(&port->cmd_slot);
293 }
294}
295
296/*
297 * This function is called for clean the pending command in the
298 * command slot during the surprise removal of device and return
299 * error to the upper layer.
300 *
301 * @dd Pointer to the DRIVER_DATA structure.
302 *
303 * return value
304 * None
305 */
306static void mtip_command_cleanup(struct driver_data *dd)
307{
308 int tag = 0;
309 struct mtip_cmd *cmd;
310 struct mtip_port *port = dd->port;
311 unsigned int num_cmd_slots = dd->slot_groups * 32;
312
313 if (!test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
314 return;
315
316 if (!port)
317 return;
318
319 cmd = &port->commands[MTIP_TAG_INTERNAL];
320 if (atomic_read(&cmd->active))
321 if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) &
322 (1 << MTIP_TAG_INTERNAL))
323 if (cmd->comp_func)
324 cmd->comp_func(port, MTIP_TAG_INTERNAL,
325 cmd->comp_data, -ENODEV);
326 243
327 while (1) { 244 rq = mtip_rq_from_tag(dd, tag);
328 tag = find_next_bit(port->allocated, num_cmd_slots, tag);
329 if (tag >= num_cmd_slots)
330 break;
331 245
332 cmd = &port->commands[tag]; 246 if (unlikely(cmd->unaligned))
333 if (atomic_read(&cmd->active)) 247 up(&port->cmd_slot_unal);
334 mtip_async_complete(port, tag, dd, -ENODEV);
335 }
336 248
337 set_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag); 249 blk_mq_end_io(rq, status ? -EIO : 0);
338} 250}
339 251
340/* 252/*
@@ -388,8 +300,6 @@ static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
388{ 300{
389 int group = tag >> 5; 301 int group = tag >> 5;
390 302
391 atomic_set(&port->commands[tag].active, 1);
392
393 /* guard SACT and CI registers */ 303 /* guard SACT and CI registers */
394 spin_lock(&port->cmd_issue_lock[group]); 304 spin_lock(&port->cmd_issue_lock[group]);
395 writel((1 << MTIP_TAG_BIT(tag)), 305 writel((1 << MTIP_TAG_BIT(tag)),
@@ -397,10 +307,6 @@ static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
397 writel((1 << MTIP_TAG_BIT(tag)), 307 writel((1 << MTIP_TAG_BIT(tag)),
398 port->cmd_issue[MTIP_TAG_INDEX(tag)]); 308 port->cmd_issue[MTIP_TAG_INDEX(tag)]);
399 spin_unlock(&port->cmd_issue_lock[group]); 309 spin_unlock(&port->cmd_issue_lock[group]);
400
401 /* Set the command's timeout value.*/
402 port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
403 MTIP_NCQ_COMMAND_TIMEOUT_MS);
404} 310}
405 311
406/* 312/*
@@ -648,132 +554,13 @@ static void print_tags(struct driver_data *dd,
648 554
649 memset(tagmap, 0, sizeof(tagmap)); 555 memset(tagmap, 0, sizeof(tagmap));
650 for (group = SLOTBITS_IN_LONGS; group > 0; group--) 556 for (group = SLOTBITS_IN_LONGS; group > 0; group--)
651 tagmap_len = sprintf(tagmap + tagmap_len, "%016lX ", 557 tagmap_len += sprintf(tagmap + tagmap_len, "%016lX ",
652 tagbits[group-1]); 558 tagbits[group-1]);
653 dev_warn(&dd->pdev->dev, 559 dev_warn(&dd->pdev->dev,
654 "%d command(s) %s: tagmap [%s]", cnt, msg, tagmap); 560 "%d command(s) %s: tagmap [%s]", cnt, msg, tagmap);
655} 561}
656 562
657/* 563/*
658 * Called periodically to see if any read/write commands are
659 * taking too long to complete.
660 *
661 * @data Pointer to the PORT data structure.
662 *
663 * return value
664 * None
665 */
666static void mtip_timeout_function(unsigned long int data)
667{
668 struct mtip_port *port = (struct mtip_port *) data;
669 struct host_to_dev_fis *fis;
670 struct mtip_cmd *cmd;
671 int unaligned, tag, cmdto_cnt = 0;
672 unsigned int bit, group;
673 unsigned int num_command_slots;
674 unsigned long to, tagaccum[SLOTBITS_IN_LONGS];
675 void (*func)(void *, int);
676
677 if (unlikely(!port))
678 return;
679
680 if (unlikely(port->dd->sr))
681 return;
682
683 if (test_bit(MTIP_DDF_RESUME_BIT, &port->dd->dd_flag)) {
684 mod_timer(&port->cmd_timer,
685 jiffies + msecs_to_jiffies(30000));
686 return;
687 }
688 /* clear the tag accumulator */
689 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
690 num_command_slots = port->dd->slot_groups * 32;
691
692 for (tag = 0; tag < num_command_slots; tag++) {
693 /*
694 * Skip internal command slot as it has
695 * its own timeout mechanism
696 */
697 if (tag == MTIP_TAG_INTERNAL)
698 continue;
699
700 if (atomic_read(&port->commands[tag].active) &&
701 (time_after(jiffies, port->commands[tag].comp_time))) {
702 group = tag >> 5;
703 bit = tag & 0x1F;
704
705 cmd = &port->commands[tag];
706 fis = (struct host_to_dev_fis *) cmd->command;
707
708 set_bit(tag, tagaccum);
709 cmdto_cnt++;
710 if (cmdto_cnt == 1)
711 set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
712
713 /*
714 * Clear the completed bit. This should prevent
715 * any interrupt handlers from trying to retire
716 * the command.
717 */
718 writel(1 << bit, port->completed[group]);
719
720 /* Clear the active flag for the command */
721 atomic_set(&port->commands[tag].active, 0);
722
723 func = cmd->async_callback;
724 if (func &&
725 cmpxchg(&cmd->async_callback, func, 0) == func) {
726
727 /* Unmap the DMA scatter list entries */
728 dma_unmap_sg(&port->dd->pdev->dev,
729 cmd->sg,
730 cmd->scatter_ents,
731 cmd->direction);
732
733 func(cmd->async_data, -EIO);
734 unaligned = cmd->unaligned;
735
736 /* Clear the allocated bit for the command. */
737 release_slot(port, tag);
738
739 if (unaligned)
740 up(&port->cmd_slot_unal);
741 else
742 up(&port->cmd_slot);
743 }
744 }
745 }
746
747 if (cmdto_cnt) {
748 print_tags(port->dd, "timed out", tagaccum, cmdto_cnt);
749 if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
750 mtip_device_reset(port->dd);
751 wake_up_interruptible(&port->svc_wait);
752 }
753 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
754 }
755
756 if (port->ic_pause_timer) {
757 to = port->ic_pause_timer + msecs_to_jiffies(1000);
758 if (time_after(jiffies, to)) {
759 if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
760 port->ic_pause_timer = 0;
761 clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
762 clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
763 clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
764 wake_up_interruptible(&port->svc_wait);
765 }
766
767
768 }
769 }
770
771 /* Restart the timer */
772 mod_timer(&port->cmd_timer,
773 jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
774}
775
776/*
777 * Internal command completion callback function. 564 * Internal command completion callback function.
778 * 565 *
779 * This function is normally called by the driver ISR when an internal 566 * This function is normally called by the driver ISR when an internal
@@ -789,28 +576,19 @@ static void mtip_timeout_function(unsigned long int data)
789 * None 576 * None
790 */ 577 */
791static void mtip_completion(struct mtip_port *port, 578static void mtip_completion(struct mtip_port *port,
792 int tag, 579 int tag, struct mtip_cmd *command, int status)
793 void *data,
794 int status)
795{ 580{
796 struct mtip_cmd *command = &port->commands[tag]; 581 struct completion *waiting = command->comp_data;
797 struct completion *waiting = data;
798 if (unlikely(status == PORT_IRQ_TF_ERR)) 582 if (unlikely(status == PORT_IRQ_TF_ERR))
799 dev_warn(&port->dd->pdev->dev, 583 dev_warn(&port->dd->pdev->dev,
800 "Internal command %d completed with TFE\n", tag); 584 "Internal command %d completed with TFE\n", tag);
801 585
802 command->async_callback = NULL;
803 command->comp_func = NULL;
804
805 complete(waiting); 586 complete(waiting);
806} 587}
807 588
808static void mtip_null_completion(struct mtip_port *port, 589static void mtip_null_completion(struct mtip_port *port,
809 int tag, 590 int tag, struct mtip_cmd *command, int status)
810 void *data,
811 int status)
812{ 591{
813 return;
814} 592}
815 593
816static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer, 594static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
@@ -843,18 +621,16 @@ static void mtip_handle_tfe(struct driver_data *dd)
843 port = dd->port; 621 port = dd->port;
844 622
845 /* Stop the timer to prevent command timeouts. */ 623 /* Stop the timer to prevent command timeouts. */
846 del_timer(&port->cmd_timer);
847 set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); 624 set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
848 625
849 if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) && 626 if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) &&
850 test_bit(MTIP_TAG_INTERNAL, port->allocated)) { 627 test_bit(MTIP_TAG_INTERNAL, port->allocated)) {
851 cmd = &port->commands[MTIP_TAG_INTERNAL]; 628 cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
852 dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n"); 629 dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
853 630
854 atomic_inc(&cmd->active); /* active > 1 indicates error */
855 if (cmd->comp_data && cmd->comp_func) { 631 if (cmd->comp_data && cmd->comp_func) {
856 cmd->comp_func(port, MTIP_TAG_INTERNAL, 632 cmd->comp_func(port, MTIP_TAG_INTERNAL,
857 cmd->comp_data, PORT_IRQ_TF_ERR); 633 cmd, PORT_IRQ_TF_ERR);
858 } 634 }
859 goto handle_tfe_exit; 635 goto handle_tfe_exit;
860 } 636 }
@@ -866,6 +642,8 @@ static void mtip_handle_tfe(struct driver_data *dd)
866 for (group = 0; group < dd->slot_groups; group++) { 642 for (group = 0; group < dd->slot_groups; group++) {
867 completed = readl(port->completed[group]); 643 completed = readl(port->completed[group]);
868 644
645 dev_warn(&dd->pdev->dev, "g=%u, comp=%x\n", group, completed);
646
869 /* clear completed status register in the hardware.*/ 647 /* clear completed status register in the hardware.*/
870 writel(completed, port->completed[group]); 648 writel(completed, port->completed[group]);
871 649
@@ -879,15 +657,11 @@ static void mtip_handle_tfe(struct driver_data *dd)
879 if (tag == MTIP_TAG_INTERNAL) 657 if (tag == MTIP_TAG_INTERNAL)
880 continue; 658 continue;
881 659
882 cmd = &port->commands[tag]; 660 cmd = mtip_cmd_from_tag(dd, tag);
883 if (likely(cmd->comp_func)) { 661 if (likely(cmd->comp_func)) {
884 set_bit(tag, tagaccum); 662 set_bit(tag, tagaccum);
885 cmd_cnt++; 663 cmd_cnt++;
886 atomic_set(&cmd->active, 0); 664 cmd->comp_func(port, tag, cmd, 0);
887 cmd->comp_func(port,
888 tag,
889 cmd->comp_data,
890 0);
891 } else { 665 } else {
892 dev_err(&port->dd->pdev->dev, 666 dev_err(&port->dd->pdev->dev,
893 "Missing completion func for tag %d", 667 "Missing completion func for tag %d",
@@ -947,11 +721,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
947 for (bit = 0; bit < 32; bit++) { 721 for (bit = 0; bit < 32; bit++) {
948 reissue = 1; 722 reissue = 1;
949 tag = (group << 5) + bit; 723 tag = (group << 5) + bit;
950 cmd = &port->commands[tag]; 724 cmd = mtip_cmd_from_tag(dd, tag);
951
952 /* If the active bit is set re-issue the command */
953 if (atomic_read(&cmd->active) == 0)
954 continue;
955 725
956 fis = (struct host_to_dev_fis *)cmd->command; 726 fis = (struct host_to_dev_fis *)cmd->command;
957 727
@@ -970,11 +740,9 @@ static void mtip_handle_tfe(struct driver_data *dd)
970 tag, 740 tag,
971 fail_reason != NULL ? 741 fail_reason != NULL ?
972 fail_reason : "unknown"); 742 fail_reason : "unknown");
973 atomic_set(&cmd->active, 0);
974 if (cmd->comp_func) { 743 if (cmd->comp_func) {
975 cmd->comp_func(port, tag, 744 cmd->comp_func(port, tag,
976 cmd->comp_data, 745 cmd, -ENODATA);
977 -ENODATA);
978 } 746 }
979 continue; 747 continue;
980 } 748 }
@@ -997,14 +765,9 @@ static void mtip_handle_tfe(struct driver_data *dd)
997 /* Retire a command that will not be reissued */ 765 /* Retire a command that will not be reissued */
998 dev_warn(&port->dd->pdev->dev, 766 dev_warn(&port->dd->pdev->dev,
999 "retiring tag %d\n", tag); 767 "retiring tag %d\n", tag);
1000 atomic_set(&cmd->active, 0);
1001 768
1002 if (cmd->comp_func) 769 if (cmd->comp_func)
1003 cmd->comp_func( 770 cmd->comp_func(port, tag, cmd, PORT_IRQ_TF_ERR);
1004 port,
1005 tag,
1006 cmd->comp_data,
1007 PORT_IRQ_TF_ERR);
1008 else 771 else
1009 dev_warn(&port->dd->pdev->dev, 772 dev_warn(&port->dd->pdev->dev,
1010 "Bad completion for tag %d\n", 773 "Bad completion for tag %d\n",
@@ -1017,9 +780,6 @@ handle_tfe_exit:
1017 /* clear eh_active */ 780 /* clear eh_active */
1018 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); 781 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
1019 wake_up_interruptible(&port->svc_wait); 782 wake_up_interruptible(&port->svc_wait);
1020
1021 mod_timer(&port->cmd_timer,
1022 jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
1023} 783}
1024 784
1025/* 785/*
@@ -1048,15 +808,10 @@ static inline void mtip_workq_sdbfx(struct mtip_port *port, int group,
1048 if (unlikely(tag == MTIP_TAG_INTERNAL)) 808 if (unlikely(tag == MTIP_TAG_INTERNAL))
1049 continue; 809 continue;
1050 810
1051 command = &port->commands[tag]; 811 command = mtip_cmd_from_tag(dd, tag);
1052 /* make internal callback */ 812 if (likely(command->comp_func))
1053 if (likely(command->comp_func)) { 813 command->comp_func(port, tag, command, 0);
1054 command->comp_func( 814 else {
1055 port,
1056 tag,
1057 command->comp_data,
1058 0);
1059 } else {
1060 dev_dbg(&dd->pdev->dev, 815 dev_dbg(&dd->pdev->dev,
1061 "Null completion for tag %d", 816 "Null completion for tag %d",
1062 tag); 817 tag);
@@ -1081,16 +836,13 @@ static inline void mtip_workq_sdbfx(struct mtip_port *port, int group,
1081static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat) 836static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
1082{ 837{
1083 struct mtip_port *port = dd->port; 838 struct mtip_port *port = dd->port;
1084 struct mtip_cmd *cmd = &port->commands[MTIP_TAG_INTERNAL]; 839 struct mtip_cmd *cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
1085 840
1086 if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) && 841 if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) &&
1087 (cmd != NULL) && !(readl(port->cmd_issue[MTIP_TAG_INTERNAL]) 842 (cmd != NULL) && !(readl(port->cmd_issue[MTIP_TAG_INTERNAL])
1088 & (1 << MTIP_TAG_INTERNAL))) { 843 & (1 << MTIP_TAG_INTERNAL))) {
1089 if (cmd->comp_func) { 844 if (cmd->comp_func) {
1090 cmd->comp_func(port, 845 cmd->comp_func(port, MTIP_TAG_INTERNAL, cmd, 0);
1091 MTIP_TAG_INTERNAL,
1092 cmd->comp_data,
1093 0);
1094 return; 846 return;
1095 } 847 }
1096 } 848 }
@@ -1222,7 +974,6 @@ static irqreturn_t mtip_irq_handler(int irq, void *instance)
1222 974
1223static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag) 975static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag)
1224{ 976{
1225 atomic_set(&port->commands[tag].active, 1);
1226 writel(1 << MTIP_TAG_BIT(tag), 977 writel(1 << MTIP_TAG_BIT(tag),
1227 port->cmd_issue[MTIP_TAG_INDEX(tag)]); 978 port->cmd_issue[MTIP_TAG_INDEX(tag)]);
1228} 979}
@@ -1335,10 +1086,9 @@ static int mtip_exec_internal_command(struct mtip_port *port,
1335{ 1086{
1336 struct mtip_cmd_sg *command_sg; 1087 struct mtip_cmd_sg *command_sg;
1337 DECLARE_COMPLETION_ONSTACK(wait); 1088 DECLARE_COMPLETION_ONSTACK(wait);
1338 int rv = 0, ready2go = 1; 1089 struct mtip_cmd *int_cmd;
1339 struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL];
1340 unsigned long to;
1341 struct driver_data *dd = port->dd; 1090 struct driver_data *dd = port->dd;
1091 int rv = 0;
1342 1092
1343 /* Make sure the buffer is 8 byte aligned. This is asic specific. */ 1093 /* Make sure the buffer is 8 byte aligned. This is asic specific. */
1344 if (buffer & 0x00000007) { 1094 if (buffer & 0x00000007) {
@@ -1346,19 +1096,8 @@ static int mtip_exec_internal_command(struct mtip_port *port,
1346 return -EFAULT; 1096 return -EFAULT;
1347 } 1097 }
1348 1098
1349 to = jiffies + msecs_to_jiffies(timeout); 1099 int_cmd = mtip_get_int_command(dd);
1350 do { 1100
1351 ready2go = !test_and_set_bit(MTIP_TAG_INTERNAL,
1352 port->allocated);
1353 if (ready2go)
1354 break;
1355 mdelay(100);
1356 } while (time_before(jiffies, to));
1357 if (!ready2go) {
1358 dev_warn(&dd->pdev->dev,
1359 "Internal cmd active. new cmd [%02X]\n", fis->command);
1360 return -EBUSY;
1361 }
1362 set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); 1101 set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
1363 port->ic_pause_timer = 0; 1102 port->ic_pause_timer = 0;
1364 1103
@@ -1371,7 +1110,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
1371 if (mtip_quiesce_io(port, 5000) < 0) { 1110 if (mtip_quiesce_io(port, 5000) < 0) {
1372 dev_warn(&dd->pdev->dev, 1111 dev_warn(&dd->pdev->dev,
1373 "Failed to quiesce IO\n"); 1112 "Failed to quiesce IO\n");
1374 release_slot(port, MTIP_TAG_INTERNAL); 1113 mtip_put_int_command(dd, int_cmd);
1375 clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); 1114 clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
1376 wake_up_interruptible(&port->svc_wait); 1115 wake_up_interruptible(&port->svc_wait);
1377 return -EBUSY; 1116 return -EBUSY;
@@ -1497,8 +1236,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
1497 } 1236 }
1498exec_ic_exit: 1237exec_ic_exit:
1499 /* Clear the allocated and active bits for the internal command. */ 1238 /* Clear the allocated and active bits for the internal command. */
1500 atomic_set(&int_cmd->active, 0); 1239 mtip_put_int_command(dd, int_cmd);
1501 release_slot(port, MTIP_TAG_INTERNAL);
1502 if (rv >= 0 && mtip_pause_ncq(port, fis)) { 1240 if (rv >= 0 && mtip_pause_ncq(port, fis)) {
1503 /* NCQ paused */ 1241 /* NCQ paused */
1504 return rv; 1242 return rv;
@@ -2610,22 +2348,21 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
2610 * return value 2348 * return value
2611 * None 2349 * None
2612 */ 2350 */
2613static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector, 2351static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
2614 int nsect, int nents, int tag, void *callback, 2352 struct mtip_cmd *command, int nents,
2615 void *data, int dir, int unaligned) 2353 struct blk_mq_hw_ctx *hctx)
2616{ 2354{
2617 struct host_to_dev_fis *fis; 2355 struct host_to_dev_fis *fis;
2618 struct mtip_port *port = dd->port; 2356 struct mtip_port *port = dd->port;
2619 struct mtip_cmd *command = &port->commands[tag]; 2357 int dma_dir = rq_data_dir(rq) == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2620 int dma_dir = (dir == READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 2358 u64 start = blk_rq_pos(rq);
2621 u64 start = sector; 2359 unsigned int nsect = blk_rq_sectors(rq);
2622 2360
2623 /* Map the scatter list for DMA access */ 2361 /* Map the scatter list for DMA access */
2624 nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir); 2362 nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
2625 2363
2626 command->scatter_ents = nents; 2364 command->scatter_ents = nents;
2627 2365
2628 command->unaligned = unaligned;
2629 /* 2366 /*
2630 * The number of retries for this command before it is 2367 * The number of retries for this command before it is
2631 * reported as a failure to the upper layers. 2368 * reported as a failure to the upper layers.
@@ -2636,8 +2373,10 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector,
2636 fis = command->command; 2373 fis = command->command;
2637 fis->type = 0x27; 2374 fis->type = 0x27;
2638 fis->opts = 1 << 7; 2375 fis->opts = 1 << 7;
2639 fis->command = 2376 if (rq_data_dir(rq) == READ)
2640 (dir == READ ? ATA_CMD_FPDMA_READ : ATA_CMD_FPDMA_WRITE); 2377 fis->command = ATA_CMD_FPDMA_READ;
2378 else
2379 fis->command = ATA_CMD_FPDMA_WRITE;
2641 fis->lba_low = start & 0xFF; 2380 fis->lba_low = start & 0xFF;
2642 fis->lba_mid = (start >> 8) & 0xFF; 2381 fis->lba_mid = (start >> 8) & 0xFF;
2643 fis->lba_hi = (start >> 16) & 0xFF; 2382 fis->lba_hi = (start >> 16) & 0xFF;
@@ -2647,14 +2386,14 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector,
2647 fis->device = 1 << 6; 2386 fis->device = 1 << 6;
2648 fis->features = nsect & 0xFF; 2387 fis->features = nsect & 0xFF;
2649 fis->features_ex = (nsect >> 8) & 0xFF; 2388 fis->features_ex = (nsect >> 8) & 0xFF;
2650 fis->sect_count = ((tag << 3) | (tag >> 5)); 2389 fis->sect_count = ((rq->tag << 3) | (rq->tag >> 5));
2651 fis->sect_cnt_ex = 0; 2390 fis->sect_cnt_ex = 0;
2652 fis->control = 0; 2391 fis->control = 0;
2653 fis->res2 = 0; 2392 fis->res2 = 0;
2654 fis->res3 = 0; 2393 fis->res3 = 0;
2655 fill_command_sg(dd, command, nents); 2394 fill_command_sg(dd, command, nents);
2656 2395
2657 if (unaligned) 2396 if (command->unaligned)
2658 fis->device |= 1 << 7; 2397 fis->device |= 1 << 7;
2659 2398
2660 /* Populate the command header */ 2399 /* Populate the command header */
@@ -2672,81 +2411,17 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector,
2672 command->direction = dma_dir; 2411 command->direction = dma_dir;
2673 2412
2674 /* 2413 /*
2675 * Set the completion function and data for the command passed
2676 * from the upper layer.
2677 */
2678 command->async_data = data;
2679 command->async_callback = callback;
2680
2681 /*
2682 * To prevent this command from being issued 2414 * To prevent this command from being issued
2683 * if an internal command is in progress or error handling is active. 2415 * if an internal command is in progress or error handling is active.
2684 */ 2416 */
2685 if (port->flags & MTIP_PF_PAUSE_IO) { 2417 if (port->flags & MTIP_PF_PAUSE_IO) {
2686 set_bit(tag, port->cmds_to_issue); 2418 set_bit(rq->tag, port->cmds_to_issue);
2687 set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags); 2419 set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
2688 return; 2420 return;
2689 } 2421 }
2690 2422
2691 /* Issue the command to the hardware */ 2423 /* Issue the command to the hardware */
2692 mtip_issue_ncq_command(port, tag); 2424 mtip_issue_ncq_command(port, rq->tag);
2693
2694 return;
2695}
2696
2697/*
2698 * Release a command slot.
2699 *
2700 * @dd Pointer to the driver data structure.
2701 * @tag Slot tag
2702 *
2703 * return value
2704 * None
2705 */
2706static void mtip_hw_release_scatterlist(struct driver_data *dd, int tag,
2707 int unaligned)
2708{
2709 struct semaphore *sem = unaligned ? &dd->port->cmd_slot_unal :
2710 &dd->port->cmd_slot;
2711 release_slot(dd->port, tag);
2712 up(sem);
2713}
2714
2715/*
2716 * Obtain a command slot and return its associated scatter list.
2717 *
2718 * @dd Pointer to the driver data structure.
2719 * @tag Pointer to an int that will receive the allocated command
2720 * slot tag.
2721 *
2722 * return value
2723 * Pointer to the scatter list for the allocated command slot
2724 * or NULL if no command slots are available.
2725 */
2726static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
2727 int *tag, int unaligned)
2728{
2729 struct semaphore *sem = unaligned ? &dd->port->cmd_slot_unal :
2730 &dd->port->cmd_slot;
2731
2732 /*
2733 * It is possible that, even with this semaphore, a thread
2734 * may think that no command slots are available. Therefore, we
2735 * need to make an attempt to get_slot().
2736 */
2737 down(sem);
2738 *tag = get_slot(dd->port);
2739
2740 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) {
2741 up(sem);
2742 return NULL;
2743 }
2744 if (unlikely(*tag < 0)) {
2745 up(sem);
2746 return NULL;
2747 }
2748
2749 return dd->port->commands[*tag].sg;
2750} 2425}
2751 2426
2752/* 2427/*
@@ -3117,6 +2792,7 @@ static int mtip_free_orphan(struct driver_data *dd)
3117 if (dd->queue) { 2792 if (dd->queue) {
3118 dd->queue->queuedata = NULL; 2793 dd->queue->queuedata = NULL;
3119 blk_cleanup_queue(dd->queue); 2794 blk_cleanup_queue(dd->queue);
2795 blk_mq_free_tag_set(&dd->tags);
3120 dd->queue = NULL; 2796 dd->queue = NULL;
3121 } 2797 }
3122 } 2798 }
@@ -3369,7 +3045,6 @@ st_out:
3369 */ 3045 */
3370static void mtip_dma_free(struct driver_data *dd) 3046static void mtip_dma_free(struct driver_data *dd)
3371{ 3047{
3372 int i;
3373 struct mtip_port *port = dd->port; 3048 struct mtip_port *port = dd->port;
3374 3049
3375 if (port->block1) 3050 if (port->block1)
@@ -3380,13 +3055,6 @@ static void mtip_dma_free(struct driver_data *dd)
3380 dmam_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ, 3055 dmam_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ,
3381 port->command_list, port->command_list_dma); 3056 port->command_list, port->command_list_dma);
3382 } 3057 }
3383
3384 for (i = 0; i < MTIP_MAX_COMMAND_SLOTS; i++) {
3385 if (port->commands[i].command)
3386 dmam_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
3387 port->commands[i].command,
3388 port->commands[i].command_dma);
3389 }
3390} 3058}
3391 3059
3392/* 3060/*
@@ -3400,8 +3068,6 @@ static void mtip_dma_free(struct driver_data *dd)
3400static int mtip_dma_alloc(struct driver_data *dd) 3068static int mtip_dma_alloc(struct driver_data *dd)
3401{ 3069{
3402 struct mtip_port *port = dd->port; 3070 struct mtip_port *port = dd->port;
3403 int i, rv = 0;
3404 u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64;
3405 3071
3406 /* Allocate dma memory for RX Fis, Identify, and Sector Bufffer */ 3072 /* Allocate dma memory for RX Fis, Identify, and Sector Bufffer */
3407 port->block1 = 3073 port->block1 =
@@ -3434,41 +3100,63 @@ static int mtip_dma_alloc(struct driver_data *dd)
3434 port->smart_buf = port->block1 + AHCI_SMARTBUF_OFFSET; 3100 port->smart_buf = port->block1 + AHCI_SMARTBUF_OFFSET;
3435 port->smart_buf_dma = port->block1_dma + AHCI_SMARTBUF_OFFSET; 3101 port->smart_buf_dma = port->block1_dma + AHCI_SMARTBUF_OFFSET;
3436 3102
3437 /* Setup per command SGL DMA region */ 3103 return 0;
3438 3104}
3439 /* Point the command headers at the command tables */
3440 for (i = 0; i < MTIP_MAX_COMMAND_SLOTS; i++) {
3441 port->commands[i].command =
3442 dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
3443 &port->commands[i].command_dma, GFP_KERNEL);
3444 if (!port->commands[i].command) {
3445 rv = -ENOMEM;
3446 mtip_dma_free(dd);
3447 return rv;
3448 }
3449 memset(port->commands[i].command, 0, CMD_DMA_ALLOC_SZ);
3450
3451 port->commands[i].command_header = port->command_list +
3452 (sizeof(struct mtip_cmd_hdr) * i);
3453 port->commands[i].command_header_dma =
3454 dd->port->command_list_dma +
3455 (sizeof(struct mtip_cmd_hdr) * i);
3456 3105
3457 if (host_cap_64) 3106static int mtip_hw_get_identify(struct driver_data *dd)
3458 port->commands[i].command_header->ctbau = 3107{
3459 __force_bit2int cpu_to_le32( 3108 struct smart_attr attr242;
3460 (port->commands[i].command_dma >> 16) >> 16); 3109 unsigned char *buf;
3110 int rv;
3461 3111
3462 port->commands[i].command_header->ctba = 3112 if (mtip_get_identify(dd->port, NULL) < 0)
3463 __force_bit2int cpu_to_le32( 3113 return -EFAULT;
3464 port->commands[i].command_dma & 0xFFFFFFFF);
3465 3114
3466 sg_init_table(port->commands[i].sg, MTIP_MAX_SG); 3115 if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
3116 MTIP_FTL_REBUILD_MAGIC) {
3117 set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags);
3118 return MTIP_FTL_REBUILD_MAGIC;
3119 }
3120 mtip_dump_identify(dd->port);
3467 3121
3468 /* Mark command as currently inactive */ 3122 /* check write protect, over temp and rebuild statuses */
3469 atomic_set(&dd->port->commands[i].active, 0); 3123 rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
3124 dd->port->log_buf,
3125 dd->port->log_buf_dma, 1);
3126 if (rv) {
3127 dev_warn(&dd->pdev->dev,
3128 "Error in READ LOG EXT (10h) command\n");
3129 /* non-critical error, don't fail the load */
3130 } else {
3131 buf = (unsigned char *)dd->port->log_buf;
3132 if (buf[259] & 0x1) {
3133 dev_info(&dd->pdev->dev,
3134 "Write protect bit is set.\n");
3135 set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
3136 }
3137 if (buf[288] == 0xF7) {
3138 dev_info(&dd->pdev->dev,
3139 "Exceeded Tmax, drive in thermal shutdown.\n");
3140 set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
3141 }
3142 if (buf[288] == 0xBF) {
3143 dev_info(&dd->pdev->dev,
3144 "Drive indicates rebuild has failed.\n");
3145 /* TODO */
3146 }
3470 } 3147 }
3471 return 0; 3148
3149 /* get write protect progess */
3150 memset(&attr242, 0, sizeof(struct smart_attr));
3151 if (mtip_get_smart_attr(dd->port, 242, &attr242))
3152 dev_warn(&dd->pdev->dev,
3153 "Unable to check write protect progress\n");
3154 else
3155 dev_info(&dd->pdev->dev,
3156 "Write protect progress: %u%% (%u blocks)\n",
3157 attr242.cur, le32_to_cpu(attr242.data));
3158
3159 return rv;
3472} 3160}
3473 3161
3474/* 3162/*
@@ -3485,8 +3173,6 @@ static int mtip_hw_init(struct driver_data *dd)
3485 int rv; 3173 int rv;
3486 unsigned int num_command_slots; 3174 unsigned int num_command_slots;
3487 unsigned long timeout, timetaken; 3175 unsigned long timeout, timetaken;
3488 unsigned char *buf;
3489 struct smart_attr attr242;
3490 3176
3491 dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR]; 3177 dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
3492 3178
@@ -3517,8 +3203,6 @@ static int mtip_hw_init(struct driver_data *dd)
3517 else 3203 else
3518 dd->unal_qdepth = 0; 3204 dd->unal_qdepth = 0;
3519 3205
3520 /* Counting semaphore to track command slot usage */
3521 sema_init(&dd->port->cmd_slot, num_command_slots - 1 - dd->unal_qdepth);
3522 sema_init(&dd->port->cmd_slot_unal, dd->unal_qdepth); 3206 sema_init(&dd->port->cmd_slot_unal, dd->unal_qdepth);
3523 3207
3524 /* Spinlock to prevent concurrent issue */ 3208 /* Spinlock to prevent concurrent issue */
@@ -3603,73 +3287,16 @@ static int mtip_hw_init(struct driver_data *dd)
3603 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, 3287 writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
3604 dd->mmio + HOST_CTL); 3288 dd->mmio + HOST_CTL);
3605 3289
3606 init_timer(&dd->port->cmd_timer);
3607 init_waitqueue_head(&dd->port->svc_wait); 3290 init_waitqueue_head(&dd->port->svc_wait);
3608 3291
3609 dd->port->cmd_timer.data = (unsigned long int) dd->port;
3610 dd->port->cmd_timer.function = mtip_timeout_function;
3611 mod_timer(&dd->port->cmd_timer,
3612 jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
3613
3614
3615 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) { 3292 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
3616 rv = -EFAULT; 3293 rv = -EFAULT;
3617 goto out3; 3294 goto out3;
3618 } 3295 }
3619 3296
3620 if (mtip_get_identify(dd->port, NULL) < 0) {
3621 rv = -EFAULT;
3622 goto out3;
3623 }
3624 mtip_dump_identify(dd->port);
3625
3626 if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
3627 MTIP_FTL_REBUILD_MAGIC) {
3628 set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags);
3629 return MTIP_FTL_REBUILD_MAGIC;
3630 }
3631
3632 /* check write protect, over temp and rebuild statuses */
3633 rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
3634 dd->port->log_buf,
3635 dd->port->log_buf_dma, 1);
3636 if (rv) {
3637 dev_warn(&dd->pdev->dev,
3638 "Error in READ LOG EXT (10h) command\n");
3639 /* non-critical error, don't fail the load */
3640 } else {
3641 buf = (unsigned char *)dd->port->log_buf;
3642 if (buf[259] & 0x1) {
3643 dev_info(&dd->pdev->dev,
3644 "Write protect bit is set.\n");
3645 set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
3646 }
3647 if (buf[288] == 0xF7) {
3648 dev_info(&dd->pdev->dev,
3649 "Exceeded Tmax, drive in thermal shutdown.\n");
3650 set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
3651 }
3652 if (buf[288] == 0xBF) {
3653 dev_info(&dd->pdev->dev,
3654 "Drive is in security locked state.\n");
3655 set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
3656 }
3657 }
3658
3659 /* get write protect progess */
3660 memset(&attr242, 0, sizeof(struct smart_attr));
3661 if (mtip_get_smart_attr(dd->port, 242, &attr242))
3662 dev_warn(&dd->pdev->dev,
3663 "Unable to check write protect progress\n");
3664 else
3665 dev_info(&dd->pdev->dev,
3666 "Write protect progress: %u%% (%u blocks)\n",
3667 attr242.cur, le32_to_cpu(attr242.data));
3668 return rv; 3297 return rv;
3669 3298
3670out3: 3299out3:
3671 del_timer_sync(&dd->port->cmd_timer);
3672
3673 /* Disable interrupts on the HBA. */ 3300 /* Disable interrupts on the HBA. */
3674 writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN, 3301 writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
3675 dd->mmio + HOST_CTL); 3302 dd->mmio + HOST_CTL);
@@ -3689,6 +3316,22 @@ out1:
3689 return rv; 3316 return rv;
3690} 3317}
3691 3318
3319static void mtip_standby_drive(struct driver_data *dd)
3320{
3321 if (dd->sr)
3322 return;
3323
3324 /*
3325 * Send standby immediate (E0h) to the drive so that it
3326 * saves its state.
3327 */
3328 if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
3329 !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
3330 if (mtip_standby_immediate(dd->port))
3331 dev_warn(&dd->pdev->dev,
3332 "STANDBY IMMEDIATE failed\n");
3333}
3334
3692/* 3335/*
3693 * Called to deinitialize an interface. 3336 * Called to deinitialize an interface.
3694 * 3337 *
@@ -3704,12 +3347,6 @@ static int mtip_hw_exit(struct driver_data *dd)
3704 * saves its state. 3347 * saves its state.
3705 */ 3348 */
3706 if (!dd->sr) { 3349 if (!dd->sr) {
3707 if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
3708 !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
3709 if (mtip_standby_immediate(dd->port))
3710 dev_warn(&dd->pdev->dev,
3711 "STANDBY IMMEDIATE failed\n");
3712
3713 /* de-initialize the port. */ 3350 /* de-initialize the port. */
3714 mtip_deinit_port(dd->port); 3351 mtip_deinit_port(dd->port);
3715 3352
@@ -3718,8 +3355,6 @@ static int mtip_hw_exit(struct driver_data *dd)
3718 dd->mmio + HOST_CTL); 3355 dd->mmio + HOST_CTL);
3719 } 3356 }
3720 3357
3721 del_timer_sync(&dd->port->cmd_timer);
3722
3723 /* Release the IRQ. */ 3358 /* Release the IRQ. */
3724 irq_set_affinity_hint(dd->pdev->irq, NULL); 3359 irq_set_affinity_hint(dd->pdev->irq, NULL);
3725 devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd); 3360 devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
@@ -4036,100 +3671,140 @@ static const struct block_device_operations mtip_block_ops = {
4036 * 3671 *
4037 * @queue Pointer to the request queue. Unused other than to obtain 3672 * @queue Pointer to the request queue. Unused other than to obtain
4038 * the driver data structure. 3673 * the driver data structure.
4039 * @bio Pointer to the BIO. 3674 * @rq Pointer to the request.
4040 * 3675 *
4041 */ 3676 */
4042static void mtip_make_request(struct request_queue *queue, struct bio *bio) 3677static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
4043{ 3678{
4044 struct driver_data *dd = queue->queuedata; 3679 struct driver_data *dd = hctx->queue->queuedata;
4045 struct scatterlist *sg; 3680 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
4046 struct bio_vec bvec; 3681 unsigned int nents;
4047 struct bvec_iter iter;
4048 int nents = 0;
4049 int tag = 0, unaligned = 0;
4050 3682
4051 if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) { 3683 if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
4052 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, 3684 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
4053 &dd->dd_flag))) { 3685 &dd->dd_flag))) {
4054 bio_endio(bio, -ENXIO); 3686 return -ENXIO;
4055 return;
4056 } 3687 }
4057 if (unlikely(test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))) { 3688 if (unlikely(test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))) {
4058 bio_endio(bio, -ENODATA); 3689 return -ENODATA;
4059 return;
4060 } 3690 }
4061 if (unlikely(test_bit(MTIP_DDF_WRITE_PROTECT_BIT, 3691 if (unlikely(test_bit(MTIP_DDF_WRITE_PROTECT_BIT,
4062 &dd->dd_flag) && 3692 &dd->dd_flag) &&
4063 bio_data_dir(bio))) { 3693 rq_data_dir(rq))) {
4064 bio_endio(bio, -ENODATA); 3694 return -ENODATA;
4065 return;
4066 }
4067 if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))) {
4068 bio_endio(bio, -ENODATA);
4069 return;
4070 }
4071 if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) {
4072 bio_endio(bio, -ENXIO);
4073 return;
4074 } 3695 }
3696 if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)))
3697 return -ENODATA;
3698 if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
3699 return -ENXIO;
4075 } 3700 }
4076 3701
4077 if (unlikely(bio->bi_rw & REQ_DISCARD)) { 3702 if (rq->cmd_flags & REQ_DISCARD) {
4078 bio_endio(bio, mtip_send_trim(dd, bio->bi_iter.bi_sector, 3703 int err;
4079 bio_sectors(bio)));
4080 return;
4081 }
4082 3704
4083 if (unlikely(!bio_has_data(bio))) { 3705 err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq));
4084 blk_queue_flush(queue, 0); 3706 blk_mq_end_io(rq, err);
4085 bio_endio(bio, 0); 3707 return 0;
4086 return;
4087 } 3708 }
4088 3709
4089 if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 && 3710 /* Create the scatter list for this request. */
4090 dd->unal_qdepth) { 3711 nents = blk_rq_map_sg(hctx->queue, rq, cmd->sg);
4091 if (bio->bi_iter.bi_sector % 8 != 0) 3712
4092 /* Unaligned on 4k boundaries */ 3713 /* Issue the read/write. */
4093 unaligned = 1; 3714 mtip_hw_submit_io(dd, rq, cmd, nents, hctx);
4094 else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */ 3715 return 0;
4095 unaligned = 1; 3716}
3717
3718static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
3719 struct request *rq)
3720{
3721 struct driver_data *dd = hctx->queue->queuedata;
3722 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
3723
3724 if (!dd->unal_qdepth || rq_data_dir(rq) == READ)
3725 return false;
3726
3727 /*
3728 * If unaligned depth must be limited on this controller, mark it
3729 * as unaligned if the IO isn't on a 4k boundary (start of length).
3730 */
3731 if (blk_rq_sectors(rq) <= 64) {
3732 if ((blk_rq_pos(rq) & 7) || (blk_rq_sectors(rq) & 7))
3733 cmd->unaligned = 1;
4096 } 3734 }
4097 3735
4098 sg = mtip_hw_get_scatterlist(dd, &tag, unaligned); 3736 if (cmd->unaligned && down_trylock(&dd->port->cmd_slot_unal))
4099 if (likely(sg != NULL)) { 3737 return true;
4100 blk_queue_bounce(queue, &bio);
4101 3738
4102 if (unlikely((bio)->bi_vcnt > MTIP_MAX_SG)) { 3739 return false;
4103 dev_warn(&dd->pdev->dev, 3740}
4104 "Maximum number of SGL entries exceeded\n");
4105 bio_io_error(bio);
4106 mtip_hw_release_scatterlist(dd, tag, unaligned);
4107 return;
4108 }
4109 3741
4110 /* Create the scatter list for this bio. */ 3742static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
4111 bio_for_each_segment(bvec, bio, iter) { 3743{
4112 sg_set_page(&sg[nents], 3744 int ret;
4113 bvec.bv_page,
4114 bvec.bv_len,
4115 bvec.bv_offset);
4116 nents++;
4117 }
4118 3745
4119 /* Issue the read/write. */ 3746 if (mtip_check_unal_depth(hctx, rq))
4120 mtip_hw_submit_io(dd, 3747 return BLK_MQ_RQ_QUEUE_BUSY;
4121 bio->bi_iter.bi_sector, 3748
4122 bio_sectors(bio), 3749 ret = mtip_submit_request(hctx, rq);
4123 nents, 3750 if (!ret)
4124 tag, 3751 return BLK_MQ_RQ_QUEUE_OK;
4125 bio_endio, 3752
4126 bio, 3753 rq->errors = ret;
4127 bio_data_dir(bio), 3754 return BLK_MQ_RQ_QUEUE_ERROR;
4128 unaligned);
4129 } else
4130 bio_io_error(bio);
4131} 3755}
4132 3756
3757static void mtip_free_cmd(void *data, struct request *rq,
3758 unsigned int hctx_idx, unsigned int request_idx)
3759{
3760 struct driver_data *dd = data;
3761 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
3762
3763 if (!cmd->command)
3764 return;
3765
3766 dmam_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
3767 cmd->command, cmd->command_dma);
3768}
3769
3770static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
3771 unsigned int request_idx, unsigned int numa_node)
3772{
3773 struct driver_data *dd = data;
3774 struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
3775 u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64;
3776
3777 cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
3778 &cmd->command_dma, GFP_KERNEL);
3779 if (!cmd->command)
3780 return -ENOMEM;
3781
3782 memset(cmd->command, 0, CMD_DMA_ALLOC_SZ);
3783
3784 /* Point the command headers at the command tables. */
3785 cmd->command_header = dd->port->command_list +
3786 (sizeof(struct mtip_cmd_hdr) * request_idx);
3787 cmd->command_header_dma = dd->port->command_list_dma +
3788 (sizeof(struct mtip_cmd_hdr) * request_idx);
3789
3790 if (host_cap_64)
3791 cmd->command_header->ctbau = __force_bit2int cpu_to_le32((cmd->command_dma >> 16) >> 16);
3792
3793 cmd->command_header->ctba = __force_bit2int cpu_to_le32(cmd->command_dma & 0xFFFFFFFF);
3794
3795 sg_init_table(cmd->sg, MTIP_MAX_SG);
3796 return 0;
3797}
3798
3799static struct blk_mq_ops mtip_mq_ops = {
3800 .queue_rq = mtip_queue_rq,
3801 .map_queue = blk_mq_map_queue,
3802 .alloc_hctx = blk_mq_alloc_single_hw_queue,
3803 .free_hctx = blk_mq_free_single_hw_queue,
3804 .init_request = mtip_init_cmd,
3805 .exit_request = mtip_free_cmd,
3806};
3807
4133/* 3808/*
4134 * Block layer initialization function. 3809 * Block layer initialization function.
4135 * 3810 *
@@ -4152,11 +3827,7 @@ static int mtip_block_initialize(struct driver_data *dd)
4152 if (dd->disk) 3827 if (dd->disk)
4153 goto skip_create_disk; /* hw init done, before rebuild */ 3828 goto skip_create_disk; /* hw init done, before rebuild */
4154 3829
4155 /* Initialize the protocol layer. */ 3830 if (mtip_hw_init(dd)) {
4156 wait_for_rebuild = mtip_hw_init(dd);
4157 if (wait_for_rebuild < 0) {
4158 dev_err(&dd->pdev->dev,
4159 "Protocol layer initialization failed\n");
4160 rv = -EINVAL; 3831 rv = -EINVAL;
4161 goto protocol_init_error; 3832 goto protocol_init_error;
4162 } 3833 }
@@ -4198,16 +3869,27 @@ static int mtip_block_initialize(struct driver_data *dd)
4198 3869
4199 mtip_hw_debugfs_init(dd); 3870 mtip_hw_debugfs_init(dd);
4200 3871
4201 /*
4202 * if rebuild pending, start the service thread, and delay the block
4203 * queue creation and add_disk()
4204 */
4205 if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
4206 goto start_service_thread;
4207
4208skip_create_disk: 3872skip_create_disk:
3873 memset(&dd->tags, 0, sizeof(dd->tags));
3874 dd->tags.ops = &mtip_mq_ops;
3875 dd->tags.nr_hw_queues = 1;
3876 dd->tags.queue_depth = MTIP_MAX_COMMAND_SLOTS;
3877 dd->tags.reserved_tags = 1;
3878 dd->tags.cmd_size = sizeof(struct mtip_cmd);
3879 dd->tags.numa_node = dd->numa_node;
3880 dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
3881 dd->tags.driver_data = dd;
3882
3883 rv = blk_mq_alloc_tag_set(&dd->tags);
3884 if (rv) {
3885 dev_err(&dd->pdev->dev,
3886 "Unable to allocate request queue\n");
3887 rv = -ENOMEM;
3888 goto block_queue_alloc_init_error;
3889 }
3890
4209 /* Allocate the request queue. */ 3891 /* Allocate the request queue. */
4210 dd->queue = blk_alloc_queue_node(GFP_KERNEL, dd->numa_node); 3892 dd->queue = blk_mq_init_queue(&dd->tags);
4211 if (dd->queue == NULL) { 3893 if (dd->queue == NULL) {
4212 dev_err(&dd->pdev->dev, 3894 dev_err(&dd->pdev->dev,
4213 "Unable to allocate request queue\n"); 3895 "Unable to allocate request queue\n");
@@ -4215,12 +3897,25 @@ skip_create_disk:
4215 goto block_queue_alloc_init_error; 3897 goto block_queue_alloc_init_error;
4216 } 3898 }
4217 3899
4218 /* Attach our request function to the request queue. */
4219 blk_queue_make_request(dd->queue, mtip_make_request);
4220
4221 dd->disk->queue = dd->queue; 3900 dd->disk->queue = dd->queue;
4222 dd->queue->queuedata = dd; 3901 dd->queue->queuedata = dd;
4223 3902
3903 /* Initialize the protocol layer. */
3904 wait_for_rebuild = mtip_hw_get_identify(dd);
3905 if (wait_for_rebuild < 0) {
3906 dev_err(&dd->pdev->dev,
3907 "Protocol layer initialization failed\n");
3908 rv = -EINVAL;
3909 goto init_hw_cmds_error;
3910 }
3911
3912 /*
3913 * if rebuild pending, start the service thread, and delay the block
3914 * queue creation and add_disk()
3915 */
3916 if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
3917 goto start_service_thread;
3918
4224 /* Set device limits. */ 3919 /* Set device limits. */
4225 set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags); 3920 set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags);
4226 blk_queue_max_segments(dd->queue, MTIP_MAX_SG); 3921 blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
@@ -4299,8 +3994,9 @@ kthread_run_error:
4299 del_gendisk(dd->disk); 3994 del_gendisk(dd->disk);
4300 3995
4301read_capacity_error: 3996read_capacity_error:
3997init_hw_cmds_error:
4302 blk_cleanup_queue(dd->queue); 3998 blk_cleanup_queue(dd->queue);
4303 3999 blk_mq_free_tag_set(&dd->tags);
4304block_queue_alloc_init_error: 4000block_queue_alloc_init_error:
4305 mtip_hw_debugfs_exit(dd); 4001 mtip_hw_debugfs_exit(dd);
4306disk_index_error: 4002disk_index_error:
@@ -4349,6 +4045,9 @@ static int mtip_block_remove(struct driver_data *dd)
4349 kobject_put(kobj); 4045 kobject_put(kobj);
4350 } 4046 }
4351 } 4047 }
4048
4049 mtip_standby_drive(dd);
4050
4352 /* 4051 /*
4353 * Delete our gendisk structure. This also removes the device 4052 * Delete our gendisk structure. This also removes the device
4354 * from /dev 4053 * from /dev
@@ -4361,6 +4060,7 @@ static int mtip_block_remove(struct driver_data *dd)
4361 if (dd->disk->queue) { 4060 if (dd->disk->queue) {
4362 del_gendisk(dd->disk); 4061 del_gendisk(dd->disk);
4363 blk_cleanup_queue(dd->queue); 4062 blk_cleanup_queue(dd->queue);
4063 blk_mq_free_tag_set(&dd->tags);
4364 dd->queue = NULL; 4064 dd->queue = NULL;
4365 } else 4065 } else
4366 put_disk(dd->disk); 4066 put_disk(dd->disk);
@@ -4395,6 +4095,8 @@ static int mtip_block_remove(struct driver_data *dd)
4395 */ 4095 */
4396static int mtip_block_shutdown(struct driver_data *dd) 4096static int mtip_block_shutdown(struct driver_data *dd)
4397{ 4097{
4098 mtip_hw_shutdown(dd);
4099
4398 /* Delete our gendisk structure, and cleanup the blk queue. */ 4100 /* Delete our gendisk structure, and cleanup the blk queue. */
4399 if (dd->disk) { 4101 if (dd->disk) {
4400 dev_info(&dd->pdev->dev, 4102 dev_info(&dd->pdev->dev,
@@ -4403,6 +4105,7 @@ static int mtip_block_shutdown(struct driver_data *dd)
4403 if (dd->disk->queue) { 4105 if (dd->disk->queue) {
4404 del_gendisk(dd->disk); 4106 del_gendisk(dd->disk);
4405 blk_cleanup_queue(dd->queue); 4107 blk_cleanup_queue(dd->queue);
4108 blk_mq_free_tag_set(&dd->tags);
4406 } else 4109 } else
4407 put_disk(dd->disk); 4110 put_disk(dd->disk);
4408 dd->disk = NULL; 4111 dd->disk = NULL;
@@ -4412,8 +4115,6 @@ static int mtip_block_shutdown(struct driver_data *dd)
4412 spin_lock(&rssd_index_lock); 4115 spin_lock(&rssd_index_lock);
4413 ida_remove(&rssd_index_ida, dd->index); 4116 ida_remove(&rssd_index_ida, dd->index);
4414 spin_unlock(&rssd_index_lock); 4117 spin_unlock(&rssd_index_lock);
4415
4416 mtip_hw_shutdown(dd);
4417 return 0; 4118 return 0;
4418} 4119}
4419 4120
@@ -4767,8 +4468,6 @@ static void mtip_pci_remove(struct pci_dev *pdev)
4767 dev_warn(&dd->pdev->dev, 4468 dev_warn(&dd->pdev->dev,
4768 "Completion workers still active!\n"); 4469 "Completion workers still active!\n");
4769 } 4470 }
4770 /* Cleanup the outstanding commands */
4771 mtip_command_cleanup(dd);
4772 4471
4773 /* Clean up the block layer. */ 4472 /* Clean up the block layer. */
4774 mtip_block_remove(dd); 4473 mtip_block_remove(dd);
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index ffb955e7ccb9..982a88fe1ab2 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -331,12 +331,8 @@ struct mtip_cmd {
331 */ 331 */
332 void (*comp_func)(struct mtip_port *port, 332 void (*comp_func)(struct mtip_port *port,
333 int tag, 333 int tag,
334 void *data, 334 struct mtip_cmd *cmd,
335 int status); 335 int status);
336 /* Additional callback function that may be called by comp_func() */
337 void (*async_callback)(void *data, int status);
338
339 void *async_data; /* Addl. data passed to async_callback() */
340 336
341 int scatter_ents; /* Number of scatter list entries used */ 337 int scatter_ents; /* Number of scatter list entries used */
342 338
@@ -347,10 +343,6 @@ struct mtip_cmd {
347 int retries; /* The number of retries left for this command. */ 343 int retries; /* The number of retries left for this command. */
348 344
349 int direction; /* Data transfer direction */ 345 int direction; /* Data transfer direction */
350
351 unsigned long comp_time; /* command completion time, in jiffies */
352
353 atomic_t active; /* declares if this command sent to the drive. */
354}; 346};
355 347
356/* Structure used to describe a port. */ 348/* Structure used to describe a port. */
@@ -436,12 +428,6 @@ struct mtip_port {
436 * or error handling is active 428 * or error handling is active
437 */ 429 */
438 unsigned long cmds_to_issue[SLOTBITS_IN_LONGS]; 430 unsigned long cmds_to_issue[SLOTBITS_IN_LONGS];
439 /*
440 * Array of command slots. Structure includes pointers to the
441 * command header and command table, and completion function and data
442 * pointers.
443 */
444 struct mtip_cmd commands[MTIP_MAX_COMMAND_SLOTS];
445 /* Used by mtip_service_thread to wait for an event */ 431 /* Used by mtip_service_thread to wait for an event */
446 wait_queue_head_t svc_wait; 432 wait_queue_head_t svc_wait;
447 /* 433 /*
@@ -452,13 +438,7 @@ struct mtip_port {
452 /* 438 /*
453 * Timer used to complete commands that have been active for too long. 439 * Timer used to complete commands that have been active for too long.
454 */ 440 */
455 struct timer_list cmd_timer;
456 unsigned long ic_pause_timer; 441 unsigned long ic_pause_timer;
457 /*
458 * Semaphore used to block threads if there are no
459 * command slots available.
460 */
461 struct semaphore cmd_slot;
462 442
463 /* Semaphore to control queue depth of unaligned IOs */ 443 /* Semaphore to control queue depth of unaligned IOs */
464 struct semaphore cmd_slot_unal; 444 struct semaphore cmd_slot_unal;
@@ -485,6 +465,8 @@ struct driver_data {
485 465
486 struct request_queue *queue; /* Our request queue. */ 466 struct request_queue *queue; /* Our request queue. */
487 467
468 struct blk_mq_tag_set tags; /* blk_mq tags */
469
488 struct mtip_port *port; /* Pointer to the port data structure. */ 470 struct mtip_port *port; /* Pointer to the port data structure. */
489 471
490 unsigned product_type; /* magic value declaring the product type */ 472 unsigned product_type; /* magic value declaring the product type */