diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-01 22:19:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-01 22:19:15 -0400 |
commit | 7a48837732f87a574ee3e1855927dc250117f565 (patch) | |
tree | f2e975a347d6d489e9f1932f9864fc978910def0 /drivers/scsi | |
parent | 1a0b6abaea78f73d9bc0a2f6df2d9e4c917cade1 (diff) | |
parent | 27fbf4e87c16bb3e40730890169a643a494b7c64 (diff) |
Merge branch 'for-3.15/core' of git://git.kernel.dk/linux-block
Pull core block layer updates from Jens Axboe:
"This is the pull request for the core block IO bits for the 3.15
kernel. It's a smaller round this time, it contains:
- Various little blk-mq fixes and additions from Christoph and
myself.
- Cleanup of the IPI usage from the block layer, and associated
helper code. From Frederic Weisbecker and Jan Kara.
- Duplicate code cleanup in bio-integrity from Gu Zheng. This will
give you a merge conflict, but that should be easy to resolve.
- blk-mq notify spinlock fix for RT from Mike Galbraith.
- A blktrace partial accounting bug fix from Roman Pen.
- Missing REQ_SYNC detection fix for blk-mq from Shaohua Li"
* 'for-3.15/core' of git://git.kernel.dk/linux-block: (25 commits)
blk-mq: add REQ_SYNC early
rt,blk,mq: Make blk_mq_cpu_notify_lock a raw spinlock
blk-mq: support partial I/O completions
blk-mq: merge blk_mq_insert_request and blk_mq_run_request
blk-mq: remove blk_mq_alloc_rq
blk-mq: don't dump CPU -> hw queue map on driver load
blk-mq: fix wrong usage of hctx->state vs hctx->flags
blk-mq: allow blk_mq_init_commands() to return failure
block: remove old blk_iopoll_enabled variable
blktrace: fix accounting of partially completed requests
smp: Rename __smp_call_function_single() to smp_call_function_single_async()
smp: Remove wait argument from __smp_call_function_single()
watchdog: Simplify a little the IPI call
smp: Move __smp_call_function_single() below its safe version
smp: Consolidate the various smp_call_function_single() declensions
smp: Teach __smp_call_function_single() to check for offline cpus
smp: Remove unused list_head from csd
smp: Iterate functions through llist_for_each_entry_safe()
block: Stop abusing rq->csd.list in blk-softirq
block: Remove useless IPI struct initialization
...
Diffstat (limited to 'drivers/scsi')
-rw-r--r-- | drivers/scsi/be2iscsi/be_main.c | 206 | ||||
-rw-r--r-- | drivers/scsi/ipr.c | 15 |
2 files changed, 68 insertions, 153 deletions
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 9be818f7b26d..0d822297aa80 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c | |||
@@ -898,7 +898,6 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id) | |||
898 | struct be_queue_info *cq; | 898 | struct be_queue_info *cq; |
899 | unsigned int num_eq_processed; | 899 | unsigned int num_eq_processed; |
900 | struct be_eq_obj *pbe_eq; | 900 | struct be_eq_obj *pbe_eq; |
901 | unsigned long flags; | ||
902 | 901 | ||
903 | pbe_eq = dev_id; | 902 | pbe_eq = dev_id; |
904 | eq = &pbe_eq->q; | 903 | eq = &pbe_eq->q; |
@@ -907,31 +906,15 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id) | |||
907 | 906 | ||
908 | phba = pbe_eq->phba; | 907 | phba = pbe_eq->phba; |
909 | num_eq_processed = 0; | 908 | num_eq_processed = 0; |
910 | if (blk_iopoll_enabled) { | 909 | while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] |
911 | while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] | 910 | & EQE_VALID_MASK) { |
912 | & EQE_VALID_MASK) { | 911 | if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) |
913 | if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) | 912 | blk_iopoll_sched(&pbe_eq->iopoll); |
914 | blk_iopoll_sched(&pbe_eq->iopoll); | ||
915 | |||
916 | AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); | ||
917 | queue_tail_inc(eq); | ||
918 | eqe = queue_tail_node(eq); | ||
919 | num_eq_processed++; | ||
920 | } | ||
921 | } else { | ||
922 | while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] | ||
923 | & EQE_VALID_MASK) { | ||
924 | spin_lock_irqsave(&phba->isr_lock, flags); | ||
925 | pbe_eq->todo_cq = true; | ||
926 | spin_unlock_irqrestore(&phba->isr_lock, flags); | ||
927 | AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); | ||
928 | queue_tail_inc(eq); | ||
929 | eqe = queue_tail_node(eq); | ||
930 | num_eq_processed++; | ||
931 | } | ||
932 | 913 | ||
933 | if (pbe_eq->todo_cq) | 914 | AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); |
934 | queue_work(phba->wq, &pbe_eq->work_cqs); | 915 | queue_tail_inc(eq); |
916 | eqe = queue_tail_node(eq); | ||
917 | num_eq_processed++; | ||
935 | } | 918 | } |
936 | 919 | ||
937 | if (num_eq_processed) | 920 | if (num_eq_processed) |
@@ -952,7 +935,6 @@ static irqreturn_t be_isr(int irq, void *dev_id) | |||
952 | struct hwi_context_memory *phwi_context; | 935 | struct hwi_context_memory *phwi_context; |
953 | struct be_eq_entry *eqe = NULL; | 936 | struct be_eq_entry *eqe = NULL; |
954 | struct be_queue_info *eq; | 937 | struct be_queue_info *eq; |
955 | struct be_queue_info *cq; | ||
956 | struct be_queue_info *mcc; | 938 | struct be_queue_info *mcc; |
957 | unsigned long flags, index; | 939 | unsigned long flags, index; |
958 | unsigned int num_mcceq_processed, num_ioeq_processed; | 940 | unsigned int num_mcceq_processed, num_ioeq_processed; |
@@ -978,72 +960,40 @@ static irqreturn_t be_isr(int irq, void *dev_id) | |||
978 | 960 | ||
979 | num_ioeq_processed = 0; | 961 | num_ioeq_processed = 0; |
980 | num_mcceq_processed = 0; | 962 | num_mcceq_processed = 0; |
981 | if (blk_iopoll_enabled) { | 963 | while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] |
982 | while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] | 964 | & EQE_VALID_MASK) { |
983 | & EQE_VALID_MASK) { | 965 | if (((eqe->dw[offsetof(struct amap_eq_entry, |
984 | if (((eqe->dw[offsetof(struct amap_eq_entry, | 966 | resource_id) / 32] & |
985 | resource_id) / 32] & | 967 | EQE_RESID_MASK) >> 16) == mcc->id) { |
986 | EQE_RESID_MASK) >> 16) == mcc->id) { | 968 | spin_lock_irqsave(&phba->isr_lock, flags); |
987 | spin_lock_irqsave(&phba->isr_lock, flags); | 969 | pbe_eq->todo_mcc_cq = true; |
988 | pbe_eq->todo_mcc_cq = true; | 970 | spin_unlock_irqrestore(&phba->isr_lock, flags); |
989 | spin_unlock_irqrestore(&phba->isr_lock, flags); | 971 | num_mcceq_processed++; |
990 | num_mcceq_processed++; | 972 | } else { |
991 | } else { | 973 | if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) |
992 | if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) | 974 | blk_iopoll_sched(&pbe_eq->iopoll); |
993 | blk_iopoll_sched(&pbe_eq->iopoll); | ||
994 | num_ioeq_processed++; | ||
995 | } | ||
996 | AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); | ||
997 | queue_tail_inc(eq); | ||
998 | eqe = queue_tail_node(eq); | ||
999 | } | ||
1000 | if (num_ioeq_processed || num_mcceq_processed) { | ||
1001 | if (pbe_eq->todo_mcc_cq) | ||
1002 | queue_work(phba->wq, &pbe_eq->work_cqs); | ||
1003 | |||
1004 | if ((num_mcceq_processed) && (!num_ioeq_processed)) | ||
1005 | hwi_ring_eq_db(phba, eq->id, 0, | ||
1006 | (num_ioeq_processed + | ||
1007 | num_mcceq_processed) , 1, 1); | ||
1008 | else | ||
1009 | hwi_ring_eq_db(phba, eq->id, 0, | ||
1010 | (num_ioeq_processed + | ||
1011 | num_mcceq_processed), 0, 1); | ||
1012 | |||
1013 | return IRQ_HANDLED; | ||
1014 | } else | ||
1015 | return IRQ_NONE; | ||
1016 | } else { | ||
1017 | cq = &phwi_context->be_cq[0]; | ||
1018 | while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] | ||
1019 | & EQE_VALID_MASK) { | ||
1020 | |||
1021 | if (((eqe->dw[offsetof(struct amap_eq_entry, | ||
1022 | resource_id) / 32] & | ||
1023 | EQE_RESID_MASK) >> 16) != cq->id) { | ||
1024 | spin_lock_irqsave(&phba->isr_lock, flags); | ||
1025 | pbe_eq->todo_mcc_cq = true; | ||
1026 | spin_unlock_irqrestore(&phba->isr_lock, flags); | ||
1027 | } else { | ||
1028 | spin_lock_irqsave(&phba->isr_lock, flags); | ||
1029 | pbe_eq->todo_cq = true; | ||
1030 | spin_unlock_irqrestore(&phba->isr_lock, flags); | ||
1031 | } | ||
1032 | AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); | ||
1033 | queue_tail_inc(eq); | ||
1034 | eqe = queue_tail_node(eq); | ||
1035 | num_ioeq_processed++; | 975 | num_ioeq_processed++; |
1036 | } | 976 | } |
1037 | if (pbe_eq->todo_cq || pbe_eq->todo_mcc_cq) | 977 | AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); |
978 | queue_tail_inc(eq); | ||
979 | eqe = queue_tail_node(eq); | ||
980 | } | ||
981 | if (num_ioeq_processed || num_mcceq_processed) { | ||
982 | if (pbe_eq->todo_mcc_cq) | ||
1038 | queue_work(phba->wq, &pbe_eq->work_cqs); | 983 | queue_work(phba->wq, &pbe_eq->work_cqs); |
1039 | 984 | ||
1040 | if (num_ioeq_processed) { | 985 | if ((num_mcceq_processed) && (!num_ioeq_processed)) |
1041 | hwi_ring_eq_db(phba, eq->id, 0, | 986 | hwi_ring_eq_db(phba, eq->id, 0, |
1042 | num_ioeq_processed, 1, 1); | 987 | (num_ioeq_processed + |
1043 | return IRQ_HANDLED; | 988 | num_mcceq_processed) , 1, 1); |
1044 | } else | 989 | else |
1045 | return IRQ_NONE; | 990 | hwi_ring_eq_db(phba, eq->id, 0, |
1046 | } | 991 | (num_ioeq_processed + |
992 | num_mcceq_processed), 0, 1); | ||
993 | |||
994 | return IRQ_HANDLED; | ||
995 | } else | ||
996 | return IRQ_NONE; | ||
1047 | } | 997 | } |
1048 | 998 | ||
1049 | static int beiscsi_init_irqs(struct beiscsi_hba *phba) | 999 | static int beiscsi_init_irqs(struct beiscsi_hba *phba) |
@@ -5263,11 +5213,10 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba, | |||
5263 | } | 5213 | } |
5264 | pci_disable_msix(phba->pcidev); | 5214 | pci_disable_msix(phba->pcidev); |
5265 | 5215 | ||
5266 | if (blk_iopoll_enabled) | 5216 | for (i = 0; i < phba->num_cpus; i++) { |
5267 | for (i = 0; i < phba->num_cpus; i++) { | 5217 | pbe_eq = &phwi_context->be_eq[i]; |
5268 | pbe_eq = &phwi_context->be_eq[i]; | 5218 | blk_iopoll_disable(&pbe_eq->iopoll); |
5269 | blk_iopoll_disable(&pbe_eq->iopoll); | 5219 | } |
5270 | } | ||
5271 | 5220 | ||
5272 | if (unload_state == BEISCSI_CLEAN_UNLOAD) { | 5221 | if (unload_state == BEISCSI_CLEAN_UNLOAD) { |
5273 | destroy_workqueue(phba->wq); | 5222 | destroy_workqueue(phba->wq); |
@@ -5478,32 +5427,18 @@ static void beiscsi_eeh_resume(struct pci_dev *pdev) | |||
5478 | phwi_ctrlr = phba->phwi_ctrlr; | 5427 | phwi_ctrlr = phba->phwi_ctrlr; |
5479 | phwi_context = phwi_ctrlr->phwi_ctxt; | 5428 | phwi_context = phwi_ctrlr->phwi_ctxt; |
5480 | 5429 | ||
5481 | if (blk_iopoll_enabled) { | 5430 | for (i = 0; i < phba->num_cpus; i++) { |
5482 | for (i = 0; i < phba->num_cpus; i++) { | ||
5483 | pbe_eq = &phwi_context->be_eq[i]; | ||
5484 | blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, | ||
5485 | be_iopoll); | ||
5486 | blk_iopoll_enable(&pbe_eq->iopoll); | ||
5487 | } | ||
5488 | |||
5489 | i = (phba->msix_enabled) ? i : 0; | ||
5490 | /* Work item for MCC handling */ | ||
5491 | pbe_eq = &phwi_context->be_eq[i]; | 5431 | pbe_eq = &phwi_context->be_eq[i]; |
5492 | INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); | 5432 | blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, |
5493 | } else { | 5433 | be_iopoll); |
5494 | if (phba->msix_enabled) { | 5434 | blk_iopoll_enable(&pbe_eq->iopoll); |
5495 | for (i = 0; i <= phba->num_cpus; i++) { | ||
5496 | pbe_eq = &phwi_context->be_eq[i]; | ||
5497 | INIT_WORK(&pbe_eq->work_cqs, | ||
5498 | beiscsi_process_all_cqs); | ||
5499 | } | ||
5500 | } else { | ||
5501 | pbe_eq = &phwi_context->be_eq[0]; | ||
5502 | INIT_WORK(&pbe_eq->work_cqs, | ||
5503 | beiscsi_process_all_cqs); | ||
5504 | } | ||
5505 | } | 5435 | } |
5506 | 5436 | ||
5437 | i = (phba->msix_enabled) ? i : 0; | ||
5438 | /* Work item for MCC handling */ | ||
5439 | pbe_eq = &phwi_context->be_eq[i]; | ||
5440 | INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); | ||
5441 | |||
5507 | ret = beiscsi_init_irqs(phba); | 5442 | ret = beiscsi_init_irqs(phba); |
5508 | if (ret < 0) { | 5443 | if (ret < 0) { |
5509 | beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, | 5444 | beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, |
@@ -5665,32 +5600,18 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, | |||
5665 | phwi_ctrlr = phba->phwi_ctrlr; | 5600 | phwi_ctrlr = phba->phwi_ctrlr; |
5666 | phwi_context = phwi_ctrlr->phwi_ctxt; | 5601 | phwi_context = phwi_ctrlr->phwi_ctxt; |
5667 | 5602 | ||
5668 | if (blk_iopoll_enabled) { | 5603 | for (i = 0; i < phba->num_cpus; i++) { |
5669 | for (i = 0; i < phba->num_cpus; i++) { | ||
5670 | pbe_eq = &phwi_context->be_eq[i]; | ||
5671 | blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, | ||
5672 | be_iopoll); | ||
5673 | blk_iopoll_enable(&pbe_eq->iopoll); | ||
5674 | } | ||
5675 | |||
5676 | i = (phba->msix_enabled) ? i : 0; | ||
5677 | /* Work item for MCC handling */ | ||
5678 | pbe_eq = &phwi_context->be_eq[i]; | 5604 | pbe_eq = &phwi_context->be_eq[i]; |
5679 | INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); | 5605 | blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, |
5680 | } else { | 5606 | be_iopoll); |
5681 | if (phba->msix_enabled) { | 5607 | blk_iopoll_enable(&pbe_eq->iopoll); |
5682 | for (i = 0; i <= phba->num_cpus; i++) { | ||
5683 | pbe_eq = &phwi_context->be_eq[i]; | ||
5684 | INIT_WORK(&pbe_eq->work_cqs, | ||
5685 | beiscsi_process_all_cqs); | ||
5686 | } | ||
5687 | } else { | ||
5688 | pbe_eq = &phwi_context->be_eq[0]; | ||
5689 | INIT_WORK(&pbe_eq->work_cqs, | ||
5690 | beiscsi_process_all_cqs); | ||
5691 | } | ||
5692 | } | 5608 | } |
5693 | 5609 | ||
5610 | i = (phba->msix_enabled) ? i : 0; | ||
5611 | /* Work item for MCC handling */ | ||
5612 | pbe_eq = &phwi_context->be_eq[i]; | ||
5613 | INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); | ||
5614 | |||
5694 | ret = beiscsi_init_irqs(phba); | 5615 | ret = beiscsi_init_irqs(phba); |
5695 | if (ret < 0) { | 5616 | if (ret < 0) { |
5696 | beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, | 5617 | beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, |
@@ -5719,11 +5640,10 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, | |||
5719 | 5640 | ||
5720 | free_blkenbld: | 5641 | free_blkenbld: |
5721 | destroy_workqueue(phba->wq); | 5642 | destroy_workqueue(phba->wq); |
5722 | if (blk_iopoll_enabled) | 5643 | for (i = 0; i < phba->num_cpus; i++) { |
5723 | for (i = 0; i < phba->num_cpus; i++) { | 5644 | pbe_eq = &phwi_context->be_eq[i]; |
5724 | pbe_eq = &phwi_context->be_eq[i]; | 5645 | blk_iopoll_disable(&pbe_eq->iopoll); |
5725 | blk_iopoll_disable(&pbe_eq->iopoll); | 5646 | } |
5726 | } | ||
5727 | free_twq: | 5647 | free_twq: |
5728 | beiscsi_clean_port(phba); | 5648 | beiscsi_clean_port(phba); |
5729 | beiscsi_free_mem(phba); | 5649 | beiscsi_free_mem(phba); |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 2f8dd8e4225b..924b0ba74dfe 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -3670,16 +3670,14 @@ static ssize_t ipr_store_iopoll_weight(struct device *dev, | |||
3670 | return strlen(buf); | 3670 | return strlen(buf); |
3671 | } | 3671 | } |
3672 | 3672 | ||
3673 | if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && | 3673 | if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { |
3674 | ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { | ||
3675 | for (i = 1; i < ioa_cfg->hrrq_num; i++) | 3674 | for (i = 1; i < ioa_cfg->hrrq_num; i++) |
3676 | blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); | 3675 | blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); |
3677 | } | 3676 | } |
3678 | 3677 | ||
3679 | spin_lock_irqsave(shost->host_lock, lock_flags); | 3678 | spin_lock_irqsave(shost->host_lock, lock_flags); |
3680 | ioa_cfg->iopoll_weight = user_iopoll_weight; | 3679 | ioa_cfg->iopoll_weight = user_iopoll_weight; |
3681 | if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && | 3680 | if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { |
3682 | ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { | ||
3683 | for (i = 1; i < ioa_cfg->hrrq_num; i++) { | 3681 | for (i = 1; i < ioa_cfg->hrrq_num; i++) { |
3684 | blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll, | 3682 | blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll, |
3685 | ioa_cfg->iopoll_weight, ipr_iopoll); | 3683 | ioa_cfg->iopoll_weight, ipr_iopoll); |
@@ -5525,8 +5523,7 @@ static irqreturn_t ipr_isr_mhrrq(int irq, void *devp) | |||
5525 | return IRQ_NONE; | 5523 | return IRQ_NONE; |
5526 | } | 5524 | } |
5527 | 5525 | ||
5528 | if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && | 5526 | if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { |
5529 | ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { | ||
5530 | if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == | 5527 | if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == |
5531 | hrrq->toggle_bit) { | 5528 | hrrq->toggle_bit) { |
5532 | if (!blk_iopoll_sched_prep(&hrrq->iopoll)) | 5529 | if (!blk_iopoll_sched_prep(&hrrq->iopoll)) |
@@ -9975,8 +9972,7 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) | |||
9975 | ioa_cfg->host->max_channel = IPR_VSET_BUS; | 9972 | ioa_cfg->host->max_channel = IPR_VSET_BUS; |
9976 | ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; | 9973 | ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; |
9977 | 9974 | ||
9978 | if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && | 9975 | if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { |
9979 | ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { | ||
9980 | for (i = 1; i < ioa_cfg->hrrq_num; i++) { | 9976 | for (i = 1; i < ioa_cfg->hrrq_num; i++) { |
9981 | blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll, | 9977 | blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll, |
9982 | ioa_cfg->iopoll_weight, ipr_iopoll); | 9978 | ioa_cfg->iopoll_weight, ipr_iopoll); |
@@ -10005,8 +10001,7 @@ static void ipr_shutdown(struct pci_dev *pdev) | |||
10005 | int i; | 10001 | int i; |
10006 | 10002 | ||
10007 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); | 10003 | spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); |
10008 | if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && | 10004 | if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { |
10009 | ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { | ||
10010 | ioa_cfg->iopoll_weight = 0; | 10005 | ioa_cfg->iopoll_weight = 0; |
10011 | for (i = 1; i < ioa_cfg->hrrq_num; i++) | 10006 | for (i = 1; i < ioa_cfg->hrrq_num; i++) |
10012 | blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); | 10007 | blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); |