aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c89
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
3 files changed, 36 insertions, 57 deletions
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 5236e3f2a06a..d38c205ae22c 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2747,7 +2747,7 @@ struct qla_msix_entry {
2747 int have_irq; 2747 int have_irq;
2748 uint32_t vector; 2748 uint32_t vector;
2749 uint16_t entry; 2749 uint16_t entry;
2750 struct rsp_que *rsp; 2750 void *handle;
2751 struct irq_affinity_notify irq_notify; 2751 struct irq_affinity_notify irq_notify;
2752 int cpuid; 2752 int cpuid;
2753}; 2753};
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 19f18485a854..ad5304caf1ff 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -3025,52 +3025,17 @@ static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
3025 { "qla2xxx (atio_q)", qla83xx_msix_atio_q }, 3025 { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
3026}; 3026};
3027 3027
3028static void
3029qla24xx_disable_msix(struct qla_hw_data *ha)
3030{
3031 int i;
3032 struct qla_msix_entry *qentry;
3033 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3034
3035 for (i = 0; i < ha->msix_count; i++) {
3036 qentry = &ha->msix_entries[i];
3037 if (qentry->have_irq) {
3038 /* un-register irq cpu affinity notification */
3039 irq_set_affinity_notifier(qentry->vector, NULL);
3040 free_irq(qentry->vector, qentry->rsp);
3041 }
3042 }
3043 pci_disable_msix(ha->pdev);
3044 kfree(ha->msix_entries);
3045 ha->msix_entries = NULL;
3046 ha->flags.msix_enabled = 0;
3047 ql_dbg(ql_dbg_init, vha, 0x0042,
3048 "Disabled the MSI.\n");
3049}
3050
3051static int 3028static int
3052qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 3029qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3053{ 3030{
3054#define MIN_MSIX_COUNT 2 3031#define MIN_MSIX_COUNT 2
3055#define ATIO_VECTOR 2 3032#define ATIO_VECTOR 2
3056 int i, ret; 3033 int i, ret;
3057 struct msix_entry *entries;
3058 struct qla_msix_entry *qentry; 3034 struct qla_msix_entry *qentry;
3059 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3035 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3060 3036
3061 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 3037 ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count,
3062 GFP_KERNEL); 3038 PCI_IRQ_MSIX);
3063 if (!entries) {
3064 ql_log(ql_log_warn, vha, 0x00bc,
3065 "Failed to allocate memory for msix_entry.\n");
3066 return -ENOMEM;
3067 }
3068
3069 for (i = 0; i < ha->msix_count; i++)
3070 entries[i].entry = i;
3071
3072 ret = pci_enable_msix_range(ha->pdev,
3073 entries, MIN_MSIX_COUNT, ha->msix_count);
3074 if (ret < 0) { 3039 if (ret < 0) {
3075 ql_log(ql_log_fatal, vha, 0x00c7, 3040 ql_log(ql_log_fatal, vha, 0x00c7,
3076 "MSI-X: Failed to enable support, " 3041 "MSI-X: Failed to enable support, "
@@ -3097,10 +3062,10 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3097 3062
3098 for (i = 0; i < ha->msix_count; i++) { 3063 for (i = 0; i < ha->msix_count; i++) {
3099 qentry = &ha->msix_entries[i]; 3064 qentry = &ha->msix_entries[i];
3100 qentry->vector = entries[i].vector; 3065 qentry->vector = pci_irq_vector(ha->pdev, i);
3101 qentry->entry = entries[i].entry; 3066 qentry->entry = i;
3102 qentry->have_irq = 0; 3067 qentry->have_irq = 0;
3103 qentry->rsp = NULL; 3068 qentry->handle = NULL;
3104 qentry->irq_notify.notify = qla_irq_affinity_notify; 3069 qentry->irq_notify.notify = qla_irq_affinity_notify;
3105 qentry->irq_notify.release = qla_irq_affinity_release; 3070 qentry->irq_notify.release = qla_irq_affinity_release;
3106 qentry->cpuid = -1; 3071 qentry->cpuid = -1;
@@ -3109,7 +3074,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3109 /* Enable MSI-X vectors for the base queue */ 3074 /* Enable MSI-X vectors for the base queue */
3110 for (i = 0; i < 2; i++) { 3075 for (i = 0; i < 2; i++) {
3111 qentry = &ha->msix_entries[i]; 3076 qentry = &ha->msix_entries[i];
3112 qentry->rsp = rsp; 3077 qentry->handle = rsp;
3113 rsp->msix = qentry; 3078 rsp->msix = qentry;
3114 if (IS_P3P_TYPE(ha)) 3079 if (IS_P3P_TYPE(ha))
3115 ret = request_irq(qentry->vector, 3080 ret = request_irq(qentry->vector,
@@ -3142,7 +3107,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3142 */ 3107 */
3143 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { 3108 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
3144 qentry = &ha->msix_entries[ATIO_VECTOR]; 3109 qentry = &ha->msix_entries[ATIO_VECTOR];
3145 qentry->rsp = rsp; 3110 qentry->handle = rsp;
3146 rsp->msix = qentry; 3111 rsp->msix = qentry;
3147 ret = request_irq(qentry->vector, 3112 ret = request_irq(qentry->vector,
3148 qla83xx_msix_entries[ATIO_VECTOR].handler, 3113 qla83xx_msix_entries[ATIO_VECTOR].handler,
@@ -3155,7 +3120,7 @@ msix_register_fail:
3155 ql_log(ql_log_fatal, vha, 0x00cb, 3120 ql_log(ql_log_fatal, vha, 0x00cb,
3156 "MSI-X: unable to register handler -- %x/%d.\n", 3121 "MSI-X: unable to register handler -- %x/%d.\n",
3157 qentry->vector, ret); 3122 qentry->vector, ret);
3158 qla24xx_disable_msix(ha); 3123 qla2x00_free_irqs(vha);
3159 ha->mqenable = 0; 3124 ha->mqenable = 0;
3160 goto msix_out; 3125 goto msix_out;
3161 } 3126 }
@@ -3177,7 +3142,6 @@ msix_register_fail:
3177 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 3142 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3178 3143
3179msix_out: 3144msix_out:
3180 kfree(entries);
3181 return ret; 3145 return ret;
3182} 3146}
3183 3147
@@ -3230,7 +3194,7 @@ skip_msix:
3230 !IS_QLA27XX(ha)) 3194 !IS_QLA27XX(ha))
3231 goto skip_msi; 3195 goto skip_msi;
3232 3196
3233 ret = pci_enable_msi(ha->pdev); 3197 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
3234 if (!ret) { 3198 if (!ret) {
3235 ql_dbg(ql_dbg_init, vha, 0x0038, 3199 ql_dbg(ql_dbg_init, vha, 0x0038,
3236 "MSI: Enabled.\n"); 3200 "MSI: Enabled.\n");
@@ -3275,6 +3239,8 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
3275{ 3239{
3276 struct qla_hw_data *ha = vha->hw; 3240 struct qla_hw_data *ha = vha->hw;
3277 struct rsp_que *rsp; 3241 struct rsp_que *rsp;
3242 struct qla_msix_entry *qentry;
3243 int i;
3278 3244
3279 /* 3245 /*
3280 * We need to check that ha->rsp_q_map is valid in case we are called 3246 * We need to check that ha->rsp_q_map is valid in case we are called
@@ -3284,13 +3250,24 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
3284 return; 3250 return;
3285 rsp = ha->rsp_q_map[0]; 3251 rsp = ha->rsp_q_map[0];
3286 3252
3287 if (ha->flags.msix_enabled) 3253 if (ha->flags.msix_enabled) {
3288 qla24xx_disable_msix(ha); 3254 for (i = 0; i < ha->msix_count; i++) {
3289 else if (ha->flags.msi_enabled) { 3255 qentry = &ha->msix_entries[i];
3290 free_irq(ha->pdev->irq, rsp); 3256 if (qentry->have_irq) {
3291 pci_disable_msi(ha->pdev); 3257 irq_set_affinity_notifier(qentry->vector, NULL);
3292 } else 3258 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
3293 free_irq(ha->pdev->irq, rsp); 3259 }
3260 }
3261 kfree(ha->msix_entries);
3262 ha->msix_entries = NULL;
3263 ha->flags.msix_enabled = 0;
3264 ql_dbg(ql_dbg_init, vha, 0x0042,
3265 "Disabled MSI-X.\n");
3266 } else {
3267 free_irq(pci_irq_vector(ha->pdev, 0), rsp);
3268 }
3269
3270 pci_free_irq_vectors(ha->pdev);
3294} 3271}
3295 3272
3296 3273
@@ -3310,7 +3287,7 @@ int qla25xx_request_irq(struct rsp_que *rsp)
3310 return ret; 3287 return ret;
3311 } 3288 }
3312 msix->have_irq = 1; 3289 msix->have_irq = 1;
3313 msix->rsp = rsp; 3290 msix->handle = rsp;
3314 return ret; 3291 return ret;
3315} 3292}
3316 3293
@@ -3323,11 +3300,12 @@ static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
3323 container_of(notify, struct qla_msix_entry, irq_notify); 3300 container_of(notify, struct qla_msix_entry, irq_notify);
3324 struct qla_hw_data *ha; 3301 struct qla_hw_data *ha;
3325 struct scsi_qla_host *base_vha; 3302 struct scsi_qla_host *base_vha;
3303 struct rsp_que *rsp = e->handle;
3326 3304
3327 /* user is recommended to set mask to just 1 cpu */ 3305 /* user is recommended to set mask to just 1 cpu */
3328 e->cpuid = cpumask_first(mask); 3306 e->cpuid = cpumask_first(mask);
3329 3307
3330 ha = e->rsp->hw; 3308 ha = rsp->hw;
3331 base_vha = pci_get_drvdata(ha->pdev); 3309 base_vha = pci_get_drvdata(ha->pdev);
3332 3310
3333 ql_dbg(ql_dbg_init, base_vha, 0xffff, 3311 ql_dbg(ql_dbg_init, base_vha, 0xffff,
@@ -3351,7 +3329,8 @@ static void qla_irq_affinity_release(struct kref *ref)
3351 container_of(ref, struct irq_affinity_notify, kref); 3329 container_of(ref, struct irq_affinity_notify, kref);
3352 struct qla_msix_entry *e = 3330 struct qla_msix_entry *e =
3353 container_of(notify, struct qla_msix_entry, irq_notify); 3331 container_of(notify, struct qla_msix_entry, irq_notify);
3354 struct scsi_qla_host *base_vha = pci_get_drvdata(e->rsp->hw->pdev); 3332 struct rsp_que *rsp = e->handle;
3333 struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev);
3355 3334
3356 ql_dbg(ql_dbg_init, base_vha, 0xffff, 3335 ql_dbg(ql_dbg_init, base_vha, 0xffff,
3357 "%s: host%ld: vector %d cpu %d \n", __func__, 3336 "%s: host%ld: vector %d cpu %d \n", __func__,
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index cf7ba52bae66..8e406fc35db4 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -542,7 +542,7 @@ qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
542 if (rsp->msix && rsp->msix->have_irq) { 542 if (rsp->msix && rsp->msix->have_irq) {
543 free_irq(rsp->msix->vector, rsp); 543 free_irq(rsp->msix->vector, rsp);
544 rsp->msix->have_irq = 0; 544 rsp->msix->have_irq = 0;
545 rsp->msix->rsp = NULL; 545 rsp->msix->handle = NULL;
546 } 546 }
547 dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) * 547 dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
548 sizeof(response_t), rsp->ring, rsp->dma); 548 sizeof(response_t), rsp->ring, rsp->dma);