aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/mvsas/mv_sas.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-11-05 04:36:28 -0500
committerChristoph Hellwig <hch@lst.de>2014-11-27 10:40:24 -0500
commit79855d178557cc3e3ffd179fd26a64cef48dfb30 (patch)
tree316621212e058975d86cef255e9ec86f70d0deb0 /drivers/scsi/mvsas/mv_sas.c
parent309e7cc433e79ba0124e7e359503e66c41b46509 (diff)
libsas: remove task_collector mode
The task_collector mode (or "latency_injector", (C) Dan Willians) is an optional I/O path in libsas that queues up scsi commands instead of directly sending it to the hardware. It generall increases latencies to in the optiomal case slightly reduce mmio traffic to the hardware. Only the obsolete aic94xx driver and the mvsas driver allowed to use it without recompiling the kernel, and most drivers didn't support it at all. Remove the giant blob of code to allow better optimizations for scsi-mq in the future. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.de> Acked-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/scsi/mvsas/mv_sas.c')
-rw-r--r--drivers/scsi/mvsas/mv_sas.c109
1 files changed, 4 insertions, 105 deletions
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index ac52f7c99513..85d86a5cdb60 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -852,43 +852,7 @@ prep_out:
852 return rc; 852 return rc;
853} 853}
854 854
855static struct mvs_task_list *mvs_task_alloc_list(int *num, gfp_t gfp_flags) 855static int mvs_task_exec(struct sas_task *task, gfp_t gfp_flags,
856{
857 struct mvs_task_list *first = NULL;
858
859 for (; *num > 0; --*num) {
860 struct mvs_task_list *mvs_list = kmem_cache_zalloc(mvs_task_list_cache, gfp_flags);
861
862 if (!mvs_list)
863 break;
864
865 INIT_LIST_HEAD(&mvs_list->list);
866 if (!first)
867 first = mvs_list;
868 else
869 list_add_tail(&mvs_list->list, &first->list);
870
871 }
872
873 return first;
874}
875
876static inline void mvs_task_free_list(struct mvs_task_list *mvs_list)
877{
878 LIST_HEAD(list);
879 struct list_head *pos, *a;
880 struct mvs_task_list *mlist = NULL;
881
882 __list_add(&list, mvs_list->list.prev, &mvs_list->list);
883
884 list_for_each_safe(pos, a, &list) {
885 list_del_init(pos);
886 mlist = list_entry(pos, struct mvs_task_list, list);
887 kmem_cache_free(mvs_task_list_cache, mlist);
888 }
889}
890
891static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
892 struct completion *completion, int is_tmf, 856 struct completion *completion, int is_tmf,
893 struct mvs_tmf_task *tmf) 857 struct mvs_tmf_task *tmf)
894{ 858{
@@ -912,74 +876,9 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
912 return rc; 876 return rc;
913} 877}
914 878
915static int mvs_collector_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, 879int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags)
916 struct completion *completion, int is_tmf,
917 struct mvs_tmf_task *tmf)
918{ 880{
919 struct domain_device *dev = task->dev; 881 return mvs_task_exec(task, gfp_flags, NULL, 0, NULL);
920 struct mvs_prv_info *mpi = dev->port->ha->lldd_ha;
921 struct mvs_info *mvi = NULL;
922 struct sas_task *t = task;
923 struct mvs_task_list *mvs_list = NULL, *a;
924 LIST_HEAD(q);
925 int pass[2] = {0};
926 u32 rc = 0;
927 u32 n = num;
928 unsigned long flags = 0;
929
930 mvs_list = mvs_task_alloc_list(&n, gfp_flags);
931 if (n) {
932 printk(KERN_ERR "%s: mvs alloc list failed.\n", __func__);
933 rc = -ENOMEM;
934 goto free_list;
935 }
936
937 __list_add(&q, mvs_list->list.prev, &mvs_list->list);
938
939 list_for_each_entry(a, &q, list) {
940 a->task = t;
941 t = list_entry(t->list.next, struct sas_task, list);
942 }
943
944 list_for_each_entry(a, &q , list) {
945
946 t = a->task;
947 mvi = ((struct mvs_device *)t->dev->lldd_dev)->mvi_info;
948
949 spin_lock_irqsave(&mvi->lock, flags);
950 rc = mvs_task_prep(t, mvi, is_tmf, tmf, &pass[mvi->id]);
951 if (rc)
952 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
953 spin_unlock_irqrestore(&mvi->lock, flags);
954 }
955
956 if (likely(pass[0]))
957 MVS_CHIP_DISP->start_delivery(mpi->mvi[0],
958 (mpi->mvi[0]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
959
960 if (likely(pass[1]))
961 MVS_CHIP_DISP->start_delivery(mpi->mvi[1],
962 (mpi->mvi[1]->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
963
964 list_del_init(&q);
965
966free_list:
967 if (mvs_list)
968 mvs_task_free_list(mvs_list);
969
970 return rc;
971}
972
973int mvs_queue_command(struct sas_task *task, const int num,
974 gfp_t gfp_flags)
975{
976 struct mvs_device *mvi_dev = task->dev->lldd_dev;
977 struct sas_ha_struct *sas = mvi_dev->mvi_info->sas;
978
979 if (sas->lldd_max_execute_num < 2)
980 return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL);
981 else
982 return mvs_collector_task_exec(task, num, gfp_flags, NULL, 0, NULL);
983} 882}
984 883
985static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) 884static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
@@ -1411,7 +1310,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1411 task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ; 1310 task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
1412 add_timer(&task->slow_task->timer); 1311 add_timer(&task->slow_task->timer);
1413 1312
1414 res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf); 1313 res = mvs_task_exec(task, GFP_KERNEL, NULL, 1, tmf);
1415 1314
1416 if (res) { 1315 if (res) {
1417 del_timer(&task->slow_task->timer); 1316 del_timer(&task->slow_task->timer);