aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-iopoll.c3
-rw-r--r--drivers/scsi/be2iscsi/be_main.c206
-rw-r--r--drivers/scsi/ipr.c15
-rw-r--r--include/linux/blk-iopoll.h2
-rw-r--r--kernel/sysctl.c12
5 files changed, 68 insertions, 170 deletions
diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
index 1855bf51edb0..c11d24e379e2 100644
--- a/block/blk-iopoll.c
+++ b/block/blk-iopoll.c
@@ -14,9 +14,6 @@
14 14
15#include "blk.h" 15#include "blk.h"
16 16
17int blk_iopoll_enabled = 1;
18EXPORT_SYMBOL(blk_iopoll_enabled);
19
20static unsigned int blk_iopoll_budget __read_mostly = 256; 17static unsigned int blk_iopoll_budget __read_mostly = 256;
21 18
22static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll); 19static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 1f375051483a..a929c3c9aedc 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -873,7 +873,6 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
873 struct be_queue_info *cq; 873 struct be_queue_info *cq;
874 unsigned int num_eq_processed; 874 unsigned int num_eq_processed;
875 struct be_eq_obj *pbe_eq; 875 struct be_eq_obj *pbe_eq;
876 unsigned long flags;
877 876
878 pbe_eq = dev_id; 877 pbe_eq = dev_id;
879 eq = &pbe_eq->q; 878 eq = &pbe_eq->q;
@@ -882,31 +881,15 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
882 881
883 phba = pbe_eq->phba; 882 phba = pbe_eq->phba;
884 num_eq_processed = 0; 883 num_eq_processed = 0;
885 if (blk_iopoll_enabled) { 884 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
886 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 885 & EQE_VALID_MASK) {
887 & EQE_VALID_MASK) { 886 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
888 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) 887 blk_iopoll_sched(&pbe_eq->iopoll);
889 blk_iopoll_sched(&pbe_eq->iopoll);
890
891 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
892 queue_tail_inc(eq);
893 eqe = queue_tail_node(eq);
894 num_eq_processed++;
895 }
896 } else {
897 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
898 & EQE_VALID_MASK) {
899 spin_lock_irqsave(&phba->isr_lock, flags);
900 pbe_eq->todo_cq = true;
901 spin_unlock_irqrestore(&phba->isr_lock, flags);
902 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
903 queue_tail_inc(eq);
904 eqe = queue_tail_node(eq);
905 num_eq_processed++;
906 }
907 888
908 if (pbe_eq->todo_cq) 889 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
909 queue_work(phba->wq, &pbe_eq->work_cqs); 890 queue_tail_inc(eq);
891 eqe = queue_tail_node(eq);
892 num_eq_processed++;
910 } 893 }
911 894
912 if (num_eq_processed) 895 if (num_eq_processed)
@@ -927,7 +910,6 @@ static irqreturn_t be_isr(int irq, void *dev_id)
927 struct hwi_context_memory *phwi_context; 910 struct hwi_context_memory *phwi_context;
928 struct be_eq_entry *eqe = NULL; 911 struct be_eq_entry *eqe = NULL;
929 struct be_queue_info *eq; 912 struct be_queue_info *eq;
930 struct be_queue_info *cq;
931 struct be_queue_info *mcc; 913 struct be_queue_info *mcc;
932 unsigned long flags, index; 914 unsigned long flags, index;
933 unsigned int num_mcceq_processed, num_ioeq_processed; 915 unsigned int num_mcceq_processed, num_ioeq_processed;
@@ -953,72 +935,40 @@ static irqreturn_t be_isr(int irq, void *dev_id)
953 935
954 num_ioeq_processed = 0; 936 num_ioeq_processed = 0;
955 num_mcceq_processed = 0; 937 num_mcceq_processed = 0;
956 if (blk_iopoll_enabled) { 938 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
957 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 939 & EQE_VALID_MASK) {
958 & EQE_VALID_MASK) { 940 if (((eqe->dw[offsetof(struct amap_eq_entry,
959 if (((eqe->dw[offsetof(struct amap_eq_entry, 941 resource_id) / 32] &
960 resource_id) / 32] & 942 EQE_RESID_MASK) >> 16) == mcc->id) {
961 EQE_RESID_MASK) >> 16) == mcc->id) { 943 spin_lock_irqsave(&phba->isr_lock, flags);
962 spin_lock_irqsave(&phba->isr_lock, flags); 944 pbe_eq->todo_mcc_cq = true;
963 pbe_eq->todo_mcc_cq = true; 945 spin_unlock_irqrestore(&phba->isr_lock, flags);
964 spin_unlock_irqrestore(&phba->isr_lock, flags); 946 num_mcceq_processed++;
965 num_mcceq_processed++; 947 } else {
966 } else { 948 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
967 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) 949 blk_iopoll_sched(&pbe_eq->iopoll);
968 blk_iopoll_sched(&pbe_eq->iopoll);
969 num_ioeq_processed++;
970 }
971 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
972 queue_tail_inc(eq);
973 eqe = queue_tail_node(eq);
974 }
975 if (num_ioeq_processed || num_mcceq_processed) {
976 if (pbe_eq->todo_mcc_cq)
977 queue_work(phba->wq, &pbe_eq->work_cqs);
978
979 if ((num_mcceq_processed) && (!num_ioeq_processed))
980 hwi_ring_eq_db(phba, eq->id, 0,
981 (num_ioeq_processed +
982 num_mcceq_processed) , 1, 1);
983 else
984 hwi_ring_eq_db(phba, eq->id, 0,
985 (num_ioeq_processed +
986 num_mcceq_processed), 0, 1);
987
988 return IRQ_HANDLED;
989 } else
990 return IRQ_NONE;
991 } else {
992 cq = &phwi_context->be_cq[0];
993 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
994 & EQE_VALID_MASK) {
995
996 if (((eqe->dw[offsetof(struct amap_eq_entry,
997 resource_id) / 32] &
998 EQE_RESID_MASK) >> 16) != cq->id) {
999 spin_lock_irqsave(&phba->isr_lock, flags);
1000 pbe_eq->todo_mcc_cq = true;
1001 spin_unlock_irqrestore(&phba->isr_lock, flags);
1002 } else {
1003 spin_lock_irqsave(&phba->isr_lock, flags);
1004 pbe_eq->todo_cq = true;
1005 spin_unlock_irqrestore(&phba->isr_lock, flags);
1006 }
1007 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
1008 queue_tail_inc(eq);
1009 eqe = queue_tail_node(eq);
1010 num_ioeq_processed++; 950 num_ioeq_processed++;
1011 } 951 }
1012 if (pbe_eq->todo_cq || pbe_eq->todo_mcc_cq) 952 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
953 queue_tail_inc(eq);
954 eqe = queue_tail_node(eq);
955 }
956 if (num_ioeq_processed || num_mcceq_processed) {
957 if (pbe_eq->todo_mcc_cq)
1013 queue_work(phba->wq, &pbe_eq->work_cqs); 958 queue_work(phba->wq, &pbe_eq->work_cqs);
1014 959
1015 if (num_ioeq_processed) { 960 if ((num_mcceq_processed) && (!num_ioeq_processed))
1016 hwi_ring_eq_db(phba, eq->id, 0, 961 hwi_ring_eq_db(phba, eq->id, 0,
1017 num_ioeq_processed, 1, 1); 962 (num_ioeq_processed +
1018 return IRQ_HANDLED; 963 num_mcceq_processed) , 1, 1);
1019 } else 964 else
1020 return IRQ_NONE; 965 hwi_ring_eq_db(phba, eq->id, 0,
1021 } 966 (num_ioeq_processed +
967 num_mcceq_processed), 0, 1);
968
969 return IRQ_HANDLED;
970 } else
971 return IRQ_NONE;
1022} 972}
1023 973
1024static int beiscsi_init_irqs(struct beiscsi_hba *phba) 974static int beiscsi_init_irqs(struct beiscsi_hba *phba)
@@ -5216,11 +5166,10 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba,
5216 } 5166 }
5217 pci_disable_msix(phba->pcidev); 5167 pci_disable_msix(phba->pcidev);
5218 5168
5219 if (blk_iopoll_enabled) 5169 for (i = 0; i < phba->num_cpus; i++) {
5220 for (i = 0; i < phba->num_cpus; i++) { 5170 pbe_eq = &phwi_context->be_eq[i];
5221 pbe_eq = &phwi_context->be_eq[i]; 5171 blk_iopoll_disable(&pbe_eq->iopoll);
5222 blk_iopoll_disable(&pbe_eq->iopoll); 5172 }
5223 }
5224 5173
5225 if (unload_state == BEISCSI_CLEAN_UNLOAD) { 5174 if (unload_state == BEISCSI_CLEAN_UNLOAD) {
5226 destroy_workqueue(phba->wq); 5175 destroy_workqueue(phba->wq);
@@ -5429,32 +5378,18 @@ static void beiscsi_eeh_resume(struct pci_dev *pdev)
5429 phwi_ctrlr = phba->phwi_ctrlr; 5378 phwi_ctrlr = phba->phwi_ctrlr;
5430 phwi_context = phwi_ctrlr->phwi_ctxt; 5379 phwi_context = phwi_ctrlr->phwi_ctxt;
5431 5380
5432 if (blk_iopoll_enabled) { 5381 for (i = 0; i < phba->num_cpus; i++) {
5433 for (i = 0; i < phba->num_cpus; i++) {
5434 pbe_eq = &phwi_context->be_eq[i];
5435 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
5436 be_iopoll);
5437 blk_iopoll_enable(&pbe_eq->iopoll);
5438 }
5439
5440 i = (phba->msix_enabled) ? i : 0;
5441 /* Work item for MCC handling */
5442 pbe_eq = &phwi_context->be_eq[i]; 5382 pbe_eq = &phwi_context->be_eq[i];
5443 INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); 5383 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
5444 } else { 5384 be_iopoll);
5445 if (phba->msix_enabled) { 5385 blk_iopoll_enable(&pbe_eq->iopoll);
5446 for (i = 0; i <= phba->num_cpus; i++) {
5447 pbe_eq = &phwi_context->be_eq[i];
5448 INIT_WORK(&pbe_eq->work_cqs,
5449 beiscsi_process_all_cqs);
5450 }
5451 } else {
5452 pbe_eq = &phwi_context->be_eq[0];
5453 INIT_WORK(&pbe_eq->work_cqs,
5454 beiscsi_process_all_cqs);
5455 }
5456 } 5386 }
5457 5387
5388 i = (phba->msix_enabled) ? i : 0;
5389 /* Work item for MCC handling */
5390 pbe_eq = &phwi_context->be_eq[i];
5391 INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
5392
5458 ret = beiscsi_init_irqs(phba); 5393 ret = beiscsi_init_irqs(phba);
5459 if (ret < 0) { 5394 if (ret < 0) {
5460 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5395 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -5614,32 +5549,18 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
5614 phwi_ctrlr = phba->phwi_ctrlr; 5549 phwi_ctrlr = phba->phwi_ctrlr;
5615 phwi_context = phwi_ctrlr->phwi_ctxt; 5550 phwi_context = phwi_ctrlr->phwi_ctxt;
5616 5551
5617 if (blk_iopoll_enabled) { 5552 for (i = 0; i < phba->num_cpus; i++) {
5618 for (i = 0; i < phba->num_cpus; i++) {
5619 pbe_eq = &phwi_context->be_eq[i];
5620 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
5621 be_iopoll);
5622 blk_iopoll_enable(&pbe_eq->iopoll);
5623 }
5624
5625 i = (phba->msix_enabled) ? i : 0;
5626 /* Work item for MCC handling */
5627 pbe_eq = &phwi_context->be_eq[i]; 5553 pbe_eq = &phwi_context->be_eq[i];
5628 INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); 5554 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
5629 } else { 5555 be_iopoll);
5630 if (phba->msix_enabled) { 5556 blk_iopoll_enable(&pbe_eq->iopoll);
5631 for (i = 0; i <= phba->num_cpus; i++) {
5632 pbe_eq = &phwi_context->be_eq[i];
5633 INIT_WORK(&pbe_eq->work_cqs,
5634 beiscsi_process_all_cqs);
5635 }
5636 } else {
5637 pbe_eq = &phwi_context->be_eq[0];
5638 INIT_WORK(&pbe_eq->work_cqs,
5639 beiscsi_process_all_cqs);
5640 }
5641 } 5557 }
5642 5558
5559 i = (phba->msix_enabled) ? i : 0;
5560 /* Work item for MCC handling */
5561 pbe_eq = &phwi_context->be_eq[i];
5562 INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
5563
5643 ret = beiscsi_init_irqs(phba); 5564 ret = beiscsi_init_irqs(phba);
5644 if (ret < 0) { 5565 if (ret < 0) {
5645 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5566 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -5668,11 +5589,10 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
5668 5589
5669free_blkenbld: 5590free_blkenbld:
5670 destroy_workqueue(phba->wq); 5591 destroy_workqueue(phba->wq);
5671 if (blk_iopoll_enabled) 5592 for (i = 0; i < phba->num_cpus; i++) {
5672 for (i = 0; i < phba->num_cpus; i++) { 5593 pbe_eq = &phwi_context->be_eq[i];
5673 pbe_eq = &phwi_context->be_eq[i]; 5594 blk_iopoll_disable(&pbe_eq->iopoll);
5674 blk_iopoll_disable(&pbe_eq->iopoll); 5595 }
5675 }
5676free_twq: 5596free_twq:
5677 beiscsi_clean_port(phba); 5597 beiscsi_clean_port(phba);
5678 beiscsi_free_mem(phba); 5598 beiscsi_free_mem(phba);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 3f5b56a99892..69470f5c0ac9 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3630,16 +3630,14 @@ static ssize_t ipr_store_iopoll_weight(struct device *dev,
3630 return strlen(buf); 3630 return strlen(buf);
3631 } 3631 }
3632 3632
3633 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && 3633 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3634 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3635 for (i = 1; i < ioa_cfg->hrrq_num; i++) 3634 for (i = 1; i < ioa_cfg->hrrq_num; i++)
3636 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); 3635 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
3637 } 3636 }
3638 3637
3639 spin_lock_irqsave(shost->host_lock, lock_flags); 3638 spin_lock_irqsave(shost->host_lock, lock_flags);
3640 ioa_cfg->iopoll_weight = user_iopoll_weight; 3639 ioa_cfg->iopoll_weight = user_iopoll_weight;
3641 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && 3640 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3642 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3643 for (i = 1; i < ioa_cfg->hrrq_num; i++) { 3641 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3644 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll, 3642 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
3645 ioa_cfg->iopoll_weight, ipr_iopoll); 3643 ioa_cfg->iopoll_weight, ipr_iopoll);
@@ -5484,8 +5482,7 @@ static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5484 return IRQ_NONE; 5482 return IRQ_NONE;
5485 } 5483 }
5486 5484
5487 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && 5485 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5488 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5489 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == 5486 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5490 hrrq->toggle_bit) { 5487 hrrq->toggle_bit) {
5491 if (!blk_iopoll_sched_prep(&hrrq->iopoll)) 5488 if (!blk_iopoll_sched_prep(&hrrq->iopoll))
@@ -9859,8 +9856,7 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9859 ioa_cfg->host->max_channel = IPR_VSET_BUS; 9856 ioa_cfg->host->max_channel = IPR_VSET_BUS;
9860 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; 9857 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9861 9858
9862 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && 9859 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9863 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9864 for (i = 1; i < ioa_cfg->hrrq_num; i++) { 9860 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9865 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll, 9861 blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
9866 ioa_cfg->iopoll_weight, ipr_iopoll); 9862 ioa_cfg->iopoll_weight, ipr_iopoll);
@@ -9889,8 +9885,7 @@ static void ipr_shutdown(struct pci_dev *pdev)
9889 int i; 9885 int i;
9890 9886
9891 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 9887 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9892 if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && 9888 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9893 ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9894 ioa_cfg->iopoll_weight = 0; 9889 ioa_cfg->iopoll_weight = 0;
9895 for (i = 1; i < ioa_cfg->hrrq_num; i++) 9890 for (i = 1; i < ioa_cfg->hrrq_num; i++)
9896 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); 9891 blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
diff --git a/include/linux/blk-iopoll.h b/include/linux/blk-iopoll.h
index 308734d3d4a2..77ae77c0b704 100644
--- a/include/linux/blk-iopoll.h
+++ b/include/linux/blk-iopoll.h
@@ -43,6 +43,4 @@ extern void __blk_iopoll_complete(struct blk_iopoll *);
43extern void blk_iopoll_enable(struct blk_iopoll *); 43extern void blk_iopoll_enable(struct blk_iopoll *);
44extern void blk_iopoll_disable(struct blk_iopoll *); 44extern void blk_iopoll_disable(struct blk_iopoll *);
45 45
46extern int blk_iopoll_enabled;
47
48#endif 46#endif
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 49e13e1f8fe6..ef0bf04e8649 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -112,9 +112,6 @@ extern int sysctl_nr_open_min, sysctl_nr_open_max;
112#ifndef CONFIG_MMU 112#ifndef CONFIG_MMU
113extern int sysctl_nr_trim_pages; 113extern int sysctl_nr_trim_pages;
114#endif 114#endif
115#ifdef CONFIG_BLOCK
116extern int blk_iopoll_enabled;
117#endif
118 115
119/* Constants used for minimum and maximum */ 116/* Constants used for minimum and maximum */
120#ifdef CONFIG_LOCKUP_DETECTOR 117#ifdef CONFIG_LOCKUP_DETECTOR
@@ -1094,15 +1091,6 @@ static struct ctl_table kern_table[] = {
1094 .proc_handler = proc_dointvec, 1091 .proc_handler = proc_dointvec,
1095 }, 1092 },
1096#endif 1093#endif
1097#ifdef CONFIG_BLOCK
1098 {
1099 .procname = "blk_iopoll",
1100 .data = &blk_iopoll_enabled,
1101 .maxlen = sizeof(int),
1102 .mode = 0644,
1103 .proc_handler = proc_dointvec,
1104 },
1105#endif
1106 { } 1094 { }
1107}; 1095};
1108 1096