aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/be2iscsi/be_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/be2iscsi/be_main.c')
-rw-r--r--drivers/scsi/be2iscsi/be_main.c1019
1 files changed, 782 insertions, 237 deletions
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 4f1aca346e38..1a557fa77888 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -39,7 +39,8 @@
39 39
40static unsigned int be_iopoll_budget = 10; 40static unsigned int be_iopoll_budget = 10;
41static unsigned int be_max_phys_size = 64; 41static unsigned int be_max_phys_size = 64;
42static unsigned int enable_msix; 42static unsigned int enable_msix = 1;
43static unsigned int ring_mode;
43 44
44MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 45MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
45MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); 46MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
@@ -58,6 +59,17 @@ static int beiscsi_slave_configure(struct scsi_device *sdev)
58 return 0; 59 return 0;
59} 60}
60 61
62/*------------------- PCI Driver operations and data ----------------- */
63static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
64 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
65 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
66 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
67 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
68 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID4) },
69 { 0 }
70};
71MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
72
61static struct scsi_host_template beiscsi_sht = { 73static struct scsi_host_template beiscsi_sht = {
62 .module = THIS_MODULE, 74 .module = THIS_MODULE,
63 .name = "ServerEngines 10Gbe open-iscsi Initiator Driver", 75 .name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
@@ -76,16 +88,8 @@ static struct scsi_host_template beiscsi_sht = {
76 .cmd_per_lun = BEISCSI_CMD_PER_LUN, 88 .cmd_per_lun = BEISCSI_CMD_PER_LUN,
77 .use_clustering = ENABLE_CLUSTERING, 89 .use_clustering = ENABLE_CLUSTERING,
78}; 90};
79static struct scsi_transport_template *beiscsi_scsi_transport;
80 91
81/*------------------- PCI Driver operations and data ----------------- */ 92static struct scsi_transport_template *beiscsi_scsi_transport;
82static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
83 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
84 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
85 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
86 { 0 }
87};
88MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
89 93
90static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) 94static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
91{ 95{
@@ -104,7 +108,6 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
104 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; 108 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
105 shost->max_lun = BEISCSI_NUM_MAX_LUN; 109 shost->max_lun = BEISCSI_NUM_MAX_LUN;
106 shost->transportt = beiscsi_scsi_transport; 110 shost->transportt = beiscsi_scsi_transport;
107
108 phba = iscsi_host_priv(shost); 111 phba = iscsi_host_priv(shost);
109 memset(phba, 0, sizeof(*phba)); 112 memset(phba, 0, sizeof(*phba));
110 phba->shost = shost; 113 phba->shost = shost;
@@ -181,6 +184,7 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev)
181 return ret; 184 return ret;
182 } 185 }
183 186
187 pci_set_master(pcidev);
184 if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) { 188 if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
185 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)); 189 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
186 if (ret) { 190 if (ret) {
@@ -203,7 +207,6 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
203 status = beiscsi_map_pci_bars(phba, pdev); 207 status = beiscsi_map_pci_bars(phba, pdev);
204 if (status) 208 if (status)
205 return status; 209 return status;
206
207 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 210 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
208 mbox_mem_alloc->va = pci_alloc_consistent(pdev, 211 mbox_mem_alloc->va = pci_alloc_consistent(pdev,
209 mbox_mem_alloc->size, 212 mbox_mem_alloc->size,
@@ -219,6 +222,9 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
219 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 222 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
220 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 223 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
221 spin_lock_init(&ctrl->mbox_lock); 224 spin_lock_init(&ctrl->mbox_lock);
225 spin_lock_init(&phba->ctrl.mcc_lock);
226 spin_lock_init(&phba->ctrl.mcc_cq_lock);
227
222 return status; 228 return status;
223} 229}
224 230
@@ -268,6 +274,113 @@ static void hwi_ring_eq_db(struct beiscsi_hba *phba,
268} 274}
269 275
270/** 276/**
277 * be_isr_mcc - The isr routine of the driver.
278 * @irq: Not used
279 * @dev_id: Pointer to host adapter structure
280 */
281static irqreturn_t be_isr_mcc(int irq, void *dev_id)
282{
283 struct beiscsi_hba *phba;
284 struct be_eq_entry *eqe = NULL;
285 struct be_queue_info *eq;
286 struct be_queue_info *mcc;
287 unsigned int num_eq_processed;
288 struct be_eq_obj *pbe_eq;
289 unsigned long flags;
290
291 pbe_eq = dev_id;
292 eq = &pbe_eq->q;
293 phba = pbe_eq->phba;
294 mcc = &phba->ctrl.mcc_obj.cq;
295 eqe = queue_tail_node(eq);
296 if (!eqe)
297 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
298
299 num_eq_processed = 0;
300
301 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
302 & EQE_VALID_MASK) {
303 if (((eqe->dw[offsetof(struct amap_eq_entry,
304 resource_id) / 32] &
305 EQE_RESID_MASK) >> 16) == mcc->id) {
306 spin_lock_irqsave(&phba->isr_lock, flags);
307 phba->todo_mcc_cq = 1;
308 spin_unlock_irqrestore(&phba->isr_lock, flags);
309 }
310 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
311 queue_tail_inc(eq);
312 eqe = queue_tail_node(eq);
313 num_eq_processed++;
314 }
315 if (phba->todo_mcc_cq)
316 queue_work(phba->wq, &phba->work_cqs);
317 if (num_eq_processed)
318 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
319
320 return IRQ_HANDLED;
321}
322
323/**
324 * be_isr_msix - The isr routine of the driver.
325 * @irq: Not used
326 * @dev_id: Pointer to host adapter structure
327 */
328static irqreturn_t be_isr_msix(int irq, void *dev_id)
329{
330 struct beiscsi_hba *phba;
331 struct be_eq_entry *eqe = NULL;
332 struct be_queue_info *eq;
333 struct be_queue_info *cq;
334 unsigned int num_eq_processed;
335 struct be_eq_obj *pbe_eq;
336 unsigned long flags;
337
338 pbe_eq = dev_id;
339 eq = &pbe_eq->q;
340 cq = pbe_eq->cq;
341 eqe = queue_tail_node(eq);
342 if (!eqe)
343 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
344
345 phba = pbe_eq->phba;
346 num_eq_processed = 0;
347 if (blk_iopoll_enabled) {
348 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
349 & EQE_VALID_MASK) {
350 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
351 blk_iopoll_sched(&pbe_eq->iopoll);
352
353 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
354 queue_tail_inc(eq);
355 eqe = queue_tail_node(eq);
356 num_eq_processed++;
357 }
358 if (num_eq_processed)
359 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
360
361 return IRQ_HANDLED;
362 } else {
363 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
364 & EQE_VALID_MASK) {
365 spin_lock_irqsave(&phba->isr_lock, flags);
366 phba->todo_cq = 1;
367 spin_unlock_irqrestore(&phba->isr_lock, flags);
368 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
369 queue_tail_inc(eq);
370 eqe = queue_tail_node(eq);
371 num_eq_processed++;
372 }
373 if (phba->todo_cq)
374 queue_work(phba->wq, &phba->work_cqs);
375
376 if (num_eq_processed)
377 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
378
379 return IRQ_HANDLED;
380 }
381}
382
383/**
271 * be_isr - The isr routine of the driver. 384 * be_isr - The isr routine of the driver.
272 * @irq: Not used 385 * @irq: Not used
273 * @dev_id: Pointer to host adapter structure 386 * @dev_id: Pointer to host adapter structure
@@ -280,48 +393,70 @@ static irqreturn_t be_isr(int irq, void *dev_id)
280 struct be_eq_entry *eqe = NULL; 393 struct be_eq_entry *eqe = NULL;
281 struct be_queue_info *eq; 394 struct be_queue_info *eq;
282 struct be_queue_info *cq; 395 struct be_queue_info *cq;
396 struct be_queue_info *mcc;
283 unsigned long flags, index; 397 unsigned long flags, index;
284 unsigned int num_eq_processed; 398 unsigned int num_mcceq_processed, num_ioeq_processed;
285 struct be_ctrl_info *ctrl; 399 struct be_ctrl_info *ctrl;
400 struct be_eq_obj *pbe_eq;
286 int isr; 401 int isr;
287 402
288 phba = dev_id; 403 phba = dev_id;
289 if (!enable_msix) { 404 ctrl = &phba->ctrl;;
290 ctrl = &phba->ctrl;; 405 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
291 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET + 406 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
292 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE)); 407 if (!isr)
293 if (!isr) 408 return IRQ_NONE;
294 return IRQ_NONE;
295 }
296 409
297 phwi_ctrlr = phba->phwi_ctrlr; 410 phwi_ctrlr = phba->phwi_ctrlr;
298 phwi_context = phwi_ctrlr->phwi_ctxt; 411 phwi_context = phwi_ctrlr->phwi_ctxt;
299 eq = &phwi_context->be_eq.q; 412 pbe_eq = &phwi_context->be_eq[0];
300 cq = &phwi_context->be_cq; 413
414 eq = &phwi_context->be_eq[0].q;
415 mcc = &phba->ctrl.mcc_obj.cq;
301 index = 0; 416 index = 0;
302 eqe = queue_tail_node(eq); 417 eqe = queue_tail_node(eq);
303 if (!eqe) 418 if (!eqe)
304 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n"); 419 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
305 420
306 num_eq_processed = 0; 421 num_ioeq_processed = 0;
422 num_mcceq_processed = 0;
307 if (blk_iopoll_enabled) { 423 if (blk_iopoll_enabled) {
308 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 424 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
309 & EQE_VALID_MASK) { 425 & EQE_VALID_MASK) {
310 if (!blk_iopoll_sched_prep(&phba->iopoll)) 426 if (((eqe->dw[offsetof(struct amap_eq_entry,
311 blk_iopoll_sched(&phba->iopoll); 427 resource_id) / 32] &
312 428 EQE_RESID_MASK) >> 16) == mcc->id) {
429 spin_lock_irqsave(&phba->isr_lock, flags);
430 phba->todo_mcc_cq = 1;
431 spin_unlock_irqrestore(&phba->isr_lock, flags);
432 num_mcceq_processed++;
433 } else {
434 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
435 blk_iopoll_sched(&pbe_eq->iopoll);
436 num_ioeq_processed++;
437 }
313 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 438 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
314 queue_tail_inc(eq); 439 queue_tail_inc(eq);
315 eqe = queue_tail_node(eq); 440 eqe = queue_tail_node(eq);
316 num_eq_processed++;
317 SE_DEBUG(DBG_LVL_8, "Valid EQE\n");
318 } 441 }
319 if (num_eq_processed) { 442 if (num_ioeq_processed || num_mcceq_processed) {
320 hwi_ring_eq_db(phba, eq->id, 0, num_eq_processed, 0, 1); 443 if (phba->todo_mcc_cq)
444 queue_work(phba->wq, &phba->work_cqs);
445
446 if ((num_mcceq_processed) && (!num_ioeq_processed))
447 hwi_ring_eq_db(phba, eq->id, 0,
448 (num_ioeq_processed +
449 num_mcceq_processed) , 1, 1);
450 else
451 hwi_ring_eq_db(phba, eq->id, 0,
452 (num_ioeq_processed +
453 num_mcceq_processed), 0, 1);
454
321 return IRQ_HANDLED; 455 return IRQ_HANDLED;
322 } else 456 } else
323 return IRQ_NONE; 457 return IRQ_NONE;
324 } else { 458 } else {
459 cq = &phwi_context->be_cq[0];
325 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 460 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
326 & EQE_VALID_MASK) { 461 & EQE_VALID_MASK) {
327 462
@@ -339,13 +474,14 @@ static irqreturn_t be_isr(int irq, void *dev_id)
339 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 474 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
340 queue_tail_inc(eq); 475 queue_tail_inc(eq);
341 eqe = queue_tail_node(eq); 476 eqe = queue_tail_node(eq);
342 num_eq_processed++; 477 num_ioeq_processed++;
343 } 478 }
344 if (phba->todo_cq || phba->todo_mcc_cq) 479 if (phba->todo_cq || phba->todo_mcc_cq)
345 queue_work(phba->wq, &phba->work_cqs); 480 queue_work(phba->wq, &phba->work_cqs);
346 481
347 if (num_eq_processed) { 482 if (num_ioeq_processed) {
348 hwi_ring_eq_db(phba, eq->id, 0, num_eq_processed, 1, 1); 483 hwi_ring_eq_db(phba, eq->id, 0,
484 num_ioeq_processed, 1, 1);
349 return IRQ_HANDLED; 485 return IRQ_HANDLED;
350 } else 486 } else
351 return IRQ_NONE; 487 return IRQ_NONE;
@@ -355,13 +491,32 @@ static irqreturn_t be_isr(int irq, void *dev_id)
355static int beiscsi_init_irqs(struct beiscsi_hba *phba) 491static int beiscsi_init_irqs(struct beiscsi_hba *phba)
356{ 492{
357 struct pci_dev *pcidev = phba->pcidev; 493 struct pci_dev *pcidev = phba->pcidev;
358 int ret; 494 struct hwi_controller *phwi_ctrlr;
495 struct hwi_context_memory *phwi_context;
496 int ret, msix_vec, i = 0;
497 char desc[32];
359 498
360 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, "beiscsi", phba); 499 phwi_ctrlr = phba->phwi_ctrlr;
361 if (ret) { 500 phwi_context = phwi_ctrlr->phwi_ctxt;
362 shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-" 501
363 "Failed to register irq\\n"); 502 if (phba->msix_enabled) {
364 return ret; 503 for (i = 0; i < phba->num_cpus; i++) {
504 sprintf(desc, "beiscsi_msix_%04x", i);
505 msix_vec = phba->msix_entries[i].vector;
506 ret = request_irq(msix_vec, be_isr_msix, 0, desc,
507 &phwi_context->be_eq[i]);
508 }
509 msix_vec = phba->msix_entries[i].vector;
510 ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
511 &phwi_context->be_eq[i]);
512 } else {
513 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
514 "beiscsi", phba);
515 if (ret) {
516 shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
517 "Failed to register irq\\n");
518 return ret;
519 }
365 } 520 }
366 return 0; 521 return 0;
367} 522}
@@ -378,15 +533,6 @@ static void hwi_ring_cq_db(struct beiscsi_hba *phba,
378 iowrite32(val, phba->db_va + DB_CQ_OFFSET); 533 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
379} 534}
380 535
381/*
382 * async pdus include
383 * a. unsolicited NOP-In (target initiated NOP-In)
384 * b. Async Messages
385 * c. Reject PDU
386 * d. Login response
387 * These headers arrive unprocessed by the EP firmware and iSCSI layer
388 * process them
389 */
390static unsigned int 536static unsigned int
391beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, 537beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
392 struct beiscsi_hba *phba, 538 struct beiscsi_hba *phba,
@@ -397,6 +543,9 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
397{ 543{
398 struct iscsi_conn *conn = beiscsi_conn->conn; 544 struct iscsi_conn *conn = beiscsi_conn->conn;
399 struct iscsi_session *session = conn->session; 545 struct iscsi_session *session = conn->session;
546 struct iscsi_task *task;
547 struct beiscsi_io_task *io_task;
548 struct iscsi_hdr *login_hdr;
400 549
401 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] & 550 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
402 PDUBASE_OPCODE_MASK) { 551 PDUBASE_OPCODE_MASK) {
@@ -412,6 +561,10 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
412 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n"); 561 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
413 break; 562 break;
414 case ISCSI_OP_LOGIN_RSP: 563 case ISCSI_OP_LOGIN_RSP:
564 task = conn->login_task;
565 io_task = task->dd_data;
566 login_hdr = (struct iscsi_hdr *)ppdu;
567 login_hdr->itt = io_task->libiscsi_itt;
415 break; 568 break;
416 default: 569 default:
417 shost_printk(KERN_WARNING, phba->shost, 570 shost_printk(KERN_WARNING, phba->shost,
@@ -440,7 +593,8 @@ static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
440 io_sgl_alloc_index]; 593 io_sgl_alloc_index];
441 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL; 594 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
442 phba->io_sgl_hndl_avbl--; 595 phba->io_sgl_hndl_avbl--;
443 if (phba->io_sgl_alloc_index == (phba->params.ios_per_ctrl - 1)) 596 if (phba->io_sgl_alloc_index == (phba->params.
597 ios_per_ctrl - 1))
444 phba->io_sgl_alloc_index = 0; 598 phba->io_sgl_alloc_index = 0;
445 else 599 else
446 phba->io_sgl_alloc_index++; 600 phba->io_sgl_alloc_index++;
@@ -490,9 +644,18 @@ struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
490 644
491 phwi_ctrlr = phba->phwi_ctrlr; 645 phwi_ctrlr = phba->phwi_ctrlr;
492 pwrb_context = &phwi_ctrlr->wrb_context[cid]; 646 pwrb_context = &phwi_ctrlr->wrb_context[cid];
493 pwrb_handle = pwrb_context->pwrb_handle_base[index]; 647 if (pwrb_context->wrb_handles_available) {
494 pwrb_handle->wrb_index = index; 648 pwrb_handle = pwrb_context->pwrb_handle_base[
495 pwrb_handle->nxt_wrb_index = index; 649 pwrb_context->alloc_index];
650 pwrb_context->wrb_handles_available--;
651 pwrb_handle->nxt_wrb_index = pwrb_handle->wrb_index;
652 if (pwrb_context->alloc_index ==
653 (phba->params.wrbs_per_cxn - 1))
654 pwrb_context->alloc_index = 0;
655 else
656 pwrb_context->alloc_index++;
657 } else
658 pwrb_handle = NULL;
496 return pwrb_handle; 659 return pwrb_handle;
497} 660}
498 661
@@ -508,11 +671,20 @@ static void
508free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, 671free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
509 struct wrb_handle *pwrb_handle) 672 struct wrb_handle *pwrb_handle)
510{ 673{
674 if (!ring_mode)
675 pwrb_context->pwrb_handle_base[pwrb_context->free_index] =
676 pwrb_handle;
677 pwrb_context->wrb_handles_available++;
678 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
679 pwrb_context->free_index = 0;
680 else
681 pwrb_context->free_index++;
682
511 SE_DEBUG(DBG_LVL_8, 683 SE_DEBUG(DBG_LVL_8,
512 "FREE WRB: pwrb_handle=%p free_index=%d=0x%x" 684 "FREE WRB: pwrb_handle=%p free_index=0x%x"
513 "wrb_handles_available=%d \n", 685 "wrb_handles_available=%d \n",
514 pwrb_handle, pwrb_context->free_index, 686 pwrb_handle, pwrb_context->free_index,
515 pwrb_context->free_index, pwrb_context->wrb_handles_available); 687 pwrb_context->wrb_handles_available);
516} 688}
517 689
518static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) 690static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
@@ -540,6 +712,8 @@ void
540free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 712free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
541{ 713{
542 714
715 SE_DEBUG(DBG_LVL_8, "In free_mgmt_sgl_handle,eh_sgl_free_index=%d \n",
716 phba->eh_sgl_free_index);
543 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) { 717 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
544 /* 718 /*
545 * this can happen if clean_task is called on a task that 719 * this can happen if clean_task is called on a task that
@@ -572,10 +746,10 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
572 u32 resid = 0, exp_cmdsn, max_cmdsn; 746 u32 resid = 0, exp_cmdsn, max_cmdsn;
573 u8 rsp, status, flags; 747 u8 rsp, status, flags;
574 748
575 exp_cmdsn = be32_to_cpu(psol-> 749 exp_cmdsn = (psol->
576 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32] 750 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
577 & SOL_EXP_CMD_SN_MASK); 751 & SOL_EXP_CMD_SN_MASK);
578 max_cmdsn = be32_to_cpu((psol-> 752 max_cmdsn = ((psol->
579 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32] 753 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
580 & SOL_EXP_CMD_SN_MASK) + 754 & SOL_EXP_CMD_SN_MASK) +
581 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) 755 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
@@ -610,9 +784,9 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
610 } 784 }
611 785
612 if (status == SAM_STAT_CHECK_CONDITION) { 786 if (status == SAM_STAT_CHECK_CONDITION) {
787 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
613 sense = sts_bhs->sense_info + sizeof(unsigned short); 788 sense = sts_bhs->sense_info + sizeof(unsigned short);
614 sense_len = 789 sense_len = cpu_to_be16(*slen);
615 cpu_to_be16((unsigned short)(sts_bhs->sense_info[0]));
616 memcpy(task->sc->sense_buffer, sense, 790 memcpy(task->sc->sense_buffer, sense,
617 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); 791 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
618 } 792 }
@@ -620,8 +794,8 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
620 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32] 794 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
621 & SOL_RES_CNT_MASK) 795 & SOL_RES_CNT_MASK)
622 conn->rxdata_octets += (psol-> 796 conn->rxdata_octets += (psol->
623 dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32] 797 dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
624 & SOL_RES_CNT_MASK); 798 & SOL_RES_CNT_MASK);
625 } 799 }
626unmap: 800unmap:
627 scsi_dma_unmap(io_task->scsi_cmnd); 801 scsi_dma_unmap(io_task->scsi_cmnd);
@@ -633,6 +807,7 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn,
633 struct iscsi_task *task, struct sol_cqe *psol) 807 struct iscsi_task *task, struct sol_cqe *psol)
634{ 808{
635 struct iscsi_logout_rsp *hdr; 809 struct iscsi_logout_rsp *hdr;
810 struct beiscsi_io_task *io_task = task->dd_data;
636 struct iscsi_conn *conn = beiscsi_conn->conn; 811 struct iscsi_conn *conn = beiscsi_conn->conn;
637 812
638 hdr = (struct iscsi_logout_rsp *)task->hdr; 813 hdr = (struct iscsi_logout_rsp *)task->hdr;
@@ -651,7 +826,7 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn,
651 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) 826 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
652 / 32] & SOL_CMD_WND_MASK) >> 24) - 1); 827 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
653 hdr->hlength = 0; 828 hdr->hlength = 0;
654 829 hdr->itt = io_task->libiscsi_itt;
655 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 830 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
656} 831}
657 832
@@ -661,6 +836,7 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
661{ 836{
662 struct iscsi_tm_rsp *hdr; 837 struct iscsi_tm_rsp *hdr;
663 struct iscsi_conn *conn = beiscsi_conn->conn; 838 struct iscsi_conn *conn = beiscsi_conn->conn;
839 struct beiscsi_io_task *io_task = task->dd_data;
664 840
665 hdr = (struct iscsi_tm_rsp *)task->hdr; 841 hdr = (struct iscsi_tm_rsp *)task->hdr;
666 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] 842 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
@@ -668,11 +844,12 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
668 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 844 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
669 32] & SOL_RESP_MASK); 845 32] & SOL_RESP_MASK);
670 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe, 846 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
671 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK); 847 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
672 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe, 848 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
673 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) + 849 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
674 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) 850 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
675 / 32] & SOL_CMD_WND_MASK) >> 24) - 1); 851 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
852 hdr->itt = io_task->libiscsi_itt;
676 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 853 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
677} 854}
678 855
@@ -681,18 +858,36 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
681 struct beiscsi_hba *phba, struct sol_cqe *psol) 858 struct beiscsi_hba *phba, struct sol_cqe *psol)
682{ 859{
683 struct hwi_wrb_context *pwrb_context; 860 struct hwi_wrb_context *pwrb_context;
684 struct wrb_handle *pwrb_handle; 861 struct wrb_handle *pwrb_handle = NULL;
862 struct sgl_handle *psgl_handle = NULL;
685 struct hwi_controller *phwi_ctrlr; 863 struct hwi_controller *phwi_ctrlr;
864 struct iscsi_task *task;
865 struct beiscsi_io_task *io_task;
686 struct iscsi_conn *conn = beiscsi_conn->conn; 866 struct iscsi_conn *conn = beiscsi_conn->conn;
687 struct iscsi_session *session = conn->session; 867 struct iscsi_session *session = conn->session;
688 868
689 phwi_ctrlr = phba->phwi_ctrlr; 869 phwi_ctrlr = phba->phwi_ctrlr;
690 pwrb_context = &phwi_ctrlr->wrb_context[((psol-> 870 if (ring_mode) {
871 psgl_handle = phba->sgl_hndl_array[((psol->
872 dw[offsetof(struct amap_sol_cqe_ring, icd_index) /
873 32] & SOL_ICD_INDEX_MASK) >> 6)];
874 pwrb_context = &phwi_ctrlr->wrb_context[psgl_handle->cid];
875 task = psgl_handle->task;
876 pwrb_handle = NULL;
877 } else {
878 pwrb_context = &phwi_ctrlr->wrb_context[((psol->
691 dw[offsetof(struct amap_sol_cqe, cid) / 32] & 879 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
692 SOL_CID_MASK) >> 6)]; 880 SOL_CID_MASK) >> 6)];
693 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> 881 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
694 dw[offsetof(struct amap_sol_cqe, wrb_index) / 882 dw[offsetof(struct amap_sol_cqe, wrb_index) /
695 32] & SOL_WRB_INDEX_MASK) >> 16)]; 883 32] & SOL_WRB_INDEX_MASK) >> 16)];
884 task = pwrb_handle->pio_handle;
885 }
886
887 io_task = task->dd_data;
888 spin_lock(&phba->mgmt_sgl_lock);
889 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
890 spin_unlock(&phba->mgmt_sgl_lock);
696 spin_lock_bh(&session->lock); 891 spin_lock_bh(&session->lock);
697 free_wrb_handle(phba, pwrb_context, pwrb_handle); 892 free_wrb_handle(phba, pwrb_context, pwrb_handle);
698 spin_unlock_bh(&session->lock); 893 spin_unlock_bh(&session->lock);
@@ -704,6 +899,7 @@ be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
704{ 899{
705 struct iscsi_nopin *hdr; 900 struct iscsi_nopin *hdr;
706 struct iscsi_conn *conn = beiscsi_conn->conn; 901 struct iscsi_conn *conn = beiscsi_conn->conn;
902 struct beiscsi_io_task *io_task = task->dd_data;
707 903
708 hdr = (struct iscsi_nopin *)task->hdr; 904 hdr = (struct iscsi_nopin *)task->hdr;
709 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] 905 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
@@ -715,6 +911,7 @@ be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
715 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) 911 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
716 / 32] & SOL_CMD_WND_MASK) >> 24) - 1); 912 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
717 hdr->opcode = ISCSI_OP_NOOP_IN; 913 hdr->opcode = ISCSI_OP_NOOP_IN;
914 hdr->itt = io_task->libiscsi_itt;
718 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 915 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
719} 916}
720 917
@@ -726,25 +923,33 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
726 struct iscsi_wrb *pwrb = NULL; 923 struct iscsi_wrb *pwrb = NULL;
727 struct hwi_controller *phwi_ctrlr; 924 struct hwi_controller *phwi_ctrlr;
728 struct iscsi_task *task; 925 struct iscsi_task *task;
729 struct beiscsi_io_task *io_task; 926 struct sgl_handle *psgl_handle = NULL;
927 unsigned int type;
730 struct iscsi_conn *conn = beiscsi_conn->conn; 928 struct iscsi_conn *conn = beiscsi_conn->conn;
731 struct iscsi_session *session = conn->session; 929 struct iscsi_session *session = conn->session;
732 930
733 phwi_ctrlr = phba->phwi_ctrlr; 931 phwi_ctrlr = phba->phwi_ctrlr;
734 932 if (ring_mode) {
735 pwrb_context = &phwi_ctrlr-> 933 psgl_handle = phba->sgl_hndl_array[((psol->
736 wrb_context[((psol->dw[offsetof(struct amap_sol_cqe, cid) / 32] 934 dw[offsetof(struct amap_sol_cqe_ring, icd_index) /
737 & SOL_CID_MASK) >> 6)]; 935 32] & SOL_ICD_INDEX_MASK) >> 6)];
738 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> 936 task = psgl_handle->task;
937 type = psgl_handle->type;
938 } else {
939 pwrb_context = &phwi_ctrlr->
940 wrb_context[((psol->dw[offsetof
941 (struct amap_sol_cqe, cid) / 32]
942 & SOL_CID_MASK) >> 6)];
943 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
739 dw[offsetof(struct amap_sol_cqe, wrb_index) / 944 dw[offsetof(struct amap_sol_cqe, wrb_index) /
740 32] & SOL_WRB_INDEX_MASK) >> 16)]; 945 32] & SOL_WRB_INDEX_MASK) >> 16)];
741 946 task = pwrb_handle->pio_handle;
742 task = pwrb_handle->pio_handle; 947 pwrb = pwrb_handle->pwrb;
743 io_task = task->dd_data; 948 type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
949 WRB_TYPE_MASK) >> 28;
950 }
744 spin_lock_bh(&session->lock); 951 spin_lock_bh(&session->lock);
745 pwrb = pwrb_handle->pwrb; 952 switch (type) {
746 switch ((pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
747 WRB_TYPE_MASK) >> 28) {
748 case HWH_TYPE_IO: 953 case HWH_TYPE_IO:
749 case HWH_TYPE_IO_RD: 954 case HWH_TYPE_IO_RD:
750 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == 955 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
@@ -773,12 +978,21 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
773 break; 978 break;
774 979
775 default: 980 default:
776 shost_printk(KERN_WARNING, phba->shost, 981 if (ring_mode)
777 "wrb_index 0x%x CID 0x%x\n", 982 shost_printk(KERN_WARNING, phba->shost,
778 ((psol->dw[offsetof(struct amap_iscsi_wrb, type) / 983 "In hwi_complete_cmd, unknown type = %d"
779 32] & SOL_WRB_INDEX_MASK) >> 16), 984 "icd_index 0x%x CID 0x%x\n", type,
780 ((psol->dw[offsetof(struct amap_sol_cqe, cid) / 32] 985 ((psol->dw[offsetof(struct amap_sol_cqe_ring,
781 & SOL_CID_MASK) >> 6)); 986 icd_index) / 32] & SOL_ICD_INDEX_MASK) >> 6),
987 psgl_handle->cid);
988 else
989 shost_printk(KERN_WARNING, phba->shost,
990 "In hwi_complete_cmd, unknown type = %d"
991 "wrb_index 0x%x CID 0x%x\n", type,
992 ((psol->dw[offsetof(struct amap_iscsi_wrb,
993 type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
994 ((psol->dw[offsetof(struct amap_sol_cqe,
995 cid) / 32] & SOL_CID_MASK) >> 6));
782 break; 996 break;
783 } 997 }
784 998
@@ -1208,40 +1422,55 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1208 hwi_post_async_buffers(phba, pasync_handle->is_header); 1422 hwi_post_async_buffers(phba, pasync_handle->is_header);
1209} 1423}
1210 1424
1211static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba) 1425
1426static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1212{ 1427{
1213 struct hwi_controller *phwi_ctrlr;
1214 struct hwi_context_memory *phwi_context;
1215 struct be_queue_info *cq; 1428 struct be_queue_info *cq;
1216 struct sol_cqe *sol; 1429 struct sol_cqe *sol;
1217 struct dmsg_cqe *dmsg; 1430 struct dmsg_cqe *dmsg;
1218 unsigned int num_processed = 0; 1431 unsigned int num_processed = 0;
1219 unsigned int tot_nump = 0; 1432 unsigned int tot_nump = 0;
1220 struct beiscsi_conn *beiscsi_conn; 1433 struct beiscsi_conn *beiscsi_conn;
1434 struct sgl_handle *psgl_handle = NULL;
1435 struct beiscsi_hba *phba;
1221 1436
1222 phwi_ctrlr = phba->phwi_ctrlr; 1437 cq = pbe_eq->cq;
1223 phwi_context = phwi_ctrlr->phwi_ctxt;
1224 cq = &phwi_context->be_cq;
1225 sol = queue_tail_node(cq); 1438 sol = queue_tail_node(cq);
1439 phba = pbe_eq->phba;
1226 1440
1227 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] & 1441 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1228 CQE_VALID_MASK) { 1442 CQE_VALID_MASK) {
1229 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); 1443 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1230 1444
1231 beiscsi_conn = phba->conn_table[(u32) (sol-> 1445 if (ring_mode) {
1446 psgl_handle = phba->sgl_hndl_array[((sol->
1447 dw[offsetof(struct amap_sol_cqe_ring,
1448 icd_index) / 32] & SOL_ICD_INDEX_MASK)
1449 >> 6)];
1450 beiscsi_conn = phba->conn_table[psgl_handle->cid];
1451 if (!beiscsi_conn || !beiscsi_conn->ep) {
1452 shost_printk(KERN_WARNING, phba->shost,
1453 "Connection table empty for cid = %d\n",
1454 psgl_handle->cid);
1455 return 0;
1456 }
1457
1458 } else {
1459 beiscsi_conn = phba->conn_table[(u32) (sol->
1232 dw[offsetof(struct amap_sol_cqe, cid) / 32] & 1460 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1233 SOL_CID_MASK) >> 6]; 1461 SOL_CID_MASK) >> 6];
1234 1462
1235 if (!beiscsi_conn || !beiscsi_conn->ep) { 1463 if (!beiscsi_conn || !beiscsi_conn->ep) {
1236 shost_printk(KERN_WARNING, phba->shost, 1464 shost_printk(KERN_WARNING, phba->shost,
1237 "Connection table empty for cid = %d\n", 1465 "Connection table empty for cid = %d\n",
1238 (u32)(sol->dw[offsetof(struct amap_sol_cqe, 1466 (u32)(sol->dw[offsetof(struct amap_sol_cqe,
1239 cid) / 32] & SOL_CID_MASK) >> 6); 1467 cid) / 32] & SOL_CID_MASK) >> 6);
1240 return 0; 1468 return 0;
1469 }
1241 } 1470 }
1242 1471
1243 if (num_processed >= 32) { 1472 if (num_processed >= 32) {
1244 hwi_ring_cq_db(phba, phwi_context->be_cq.id, 1473 hwi_ring_cq_db(phba, cq->id,
1245 num_processed, 0, 0); 1474 num_processed, 0, 0);
1246 tot_nump += num_processed; 1475 tot_nump += num_processed;
1247 num_processed = 0; 1476 num_processed = 0;
@@ -1258,8 +1487,12 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba)
1258 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); 1487 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1259 break; 1488 break;
1260 case UNSOL_HDR_NOTIFY: 1489 case UNSOL_HDR_NOTIFY:
1490 SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1491 hwi_process_default_pdu_ring(beiscsi_conn, phba,
1492 (struct i_t_dpdu_cqe *)sol);
1493 break;
1261 case UNSOL_DATA_NOTIFY: 1494 case UNSOL_DATA_NOTIFY:
1262 SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR/DATA_NOTIFY\n"); 1495 SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
1263 hwi_process_default_pdu_ring(beiscsi_conn, phba, 1496 hwi_process_default_pdu_ring(beiscsi_conn, phba,
1264 (struct i_t_dpdu_cqe *)sol); 1497 (struct i_t_dpdu_cqe *)sol);
1265 break; 1498 break;
@@ -1278,13 +1511,21 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba)
1278 case CMD_CXN_KILLED_ITT_INVALID: 1511 case CMD_CXN_KILLED_ITT_INVALID:
1279 case CMD_CXN_KILLED_SEQ_OUTOFORDER: 1512 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1280 case CMD_CXN_KILLED_INVALID_DATASN_RCVD: 1513 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1281 SE_DEBUG(DBG_LVL_1, 1514 if (ring_mode) {
1515 SE_DEBUG(DBG_LVL_1,
1516 "CQ Error notification for cmd.. "
1517 "code %d cid 0x%x\n",
1518 sol->dw[offsetof(struct amap_sol_cqe, code) /
1519 32] & CQE_CODE_MASK, psgl_handle->cid);
1520 } else {
1521 SE_DEBUG(DBG_LVL_1,
1282 "CQ Error notification for cmd.. " 1522 "CQ Error notification for cmd.. "
1283 "code %d cid 0x%x\n", 1523 "code %d cid 0x%x\n",
1284 sol->dw[offsetof(struct amap_sol_cqe, code) / 1524 sol->dw[offsetof(struct amap_sol_cqe, code) /
1285 32] & CQE_CODE_MASK, 1525 32] & CQE_CODE_MASK,
1286 (sol->dw[offsetof(struct amap_sol_cqe, cid) / 1526 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1287 32] & SOL_CID_MASK)); 1527 32] & SOL_CID_MASK));
1528 }
1288 break; 1529 break;
1289 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 1530 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1290 SE_DEBUG(DBG_LVL_1, 1531 SE_DEBUG(DBG_LVL_1,
@@ -1306,23 +1547,37 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba)
1306 case CXN_KILLED_OVER_RUN_RESIDUAL: 1547 case CXN_KILLED_OVER_RUN_RESIDUAL:
1307 case CXN_KILLED_UNDER_RUN_RESIDUAL: 1548 case CXN_KILLED_UNDER_RUN_RESIDUAL:
1308 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: 1549 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1309 SE_DEBUG(DBG_LVL_1, "CQ Error %d, resetting CID " 1550 if (ring_mode) {
1551 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1552 "0x%x...\n",
1553 sol->dw[offsetof(struct amap_sol_cqe, code) /
1554 32] & CQE_CODE_MASK, psgl_handle->cid);
1555 } else {
1556 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1310 "0x%x...\n", 1557 "0x%x...\n",
1311 sol->dw[offsetof(struct amap_sol_cqe, code) / 1558 sol->dw[offsetof(struct amap_sol_cqe, code) /
1312 32] & CQE_CODE_MASK, 1559 32] & CQE_CODE_MASK,
1313 sol->dw[offsetof(struct amap_sol_cqe, cid) / 1560 sol->dw[offsetof(struct amap_sol_cqe, cid) /
1314 32] & CQE_CID_MASK); 1561 32] & CQE_CID_MASK);
1562 }
1315 iscsi_conn_failure(beiscsi_conn->conn, 1563 iscsi_conn_failure(beiscsi_conn->conn,
1316 ISCSI_ERR_CONN_FAILED); 1564 ISCSI_ERR_CONN_FAILED);
1317 break; 1565 break;
1318 case CXN_KILLED_RST_SENT: 1566 case CXN_KILLED_RST_SENT:
1319 case CXN_KILLED_RST_RCVD: 1567 case CXN_KILLED_RST_RCVD:
1320 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset received/sent " 1568 if (ring_mode) {
1321 "on CID 0x%x...\n", 1569 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1570 "received/sent on CID 0x%x...\n",
1571 sol->dw[offsetof(struct amap_sol_cqe, code) /
1572 32] & CQE_CODE_MASK, psgl_handle->cid);
1573 } else {
1574 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1575 "received/sent on CID 0x%x...\n",
1322 sol->dw[offsetof(struct amap_sol_cqe, code) / 1576 sol->dw[offsetof(struct amap_sol_cqe, code) /
1323 32] & CQE_CODE_MASK, 1577 32] & CQE_CODE_MASK,
1324 sol->dw[offsetof(struct amap_sol_cqe, cid) / 1578 sol->dw[offsetof(struct amap_sol_cqe, cid) /
1325 32] & CQE_CID_MASK); 1579 32] & CQE_CID_MASK);
1580 }
1326 iscsi_conn_failure(beiscsi_conn->conn, 1581 iscsi_conn_failure(beiscsi_conn->conn,
1327 ISCSI_ERR_CONN_FAILED); 1582 ISCSI_ERR_CONN_FAILED);
1328 break; 1583 break;
@@ -1344,8 +1599,7 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba)
1344 1599
1345 if (num_processed > 0) { 1600 if (num_processed > 0) {
1346 tot_nump += num_processed; 1601 tot_nump += num_processed;
1347 hwi_ring_cq_db(phba, phwi_context->be_cq.id, num_processed, 1602 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
1348 1, 0);
1349 } 1603 }
1350 return tot_nump; 1604 return tot_nump;
1351} 1605}
@@ -1353,21 +1607,30 @@ static unsigned int beiscsi_process_cq(struct beiscsi_hba *phba)
1353static void beiscsi_process_all_cqs(struct work_struct *work) 1607static void beiscsi_process_all_cqs(struct work_struct *work)
1354{ 1608{
1355 unsigned long flags; 1609 unsigned long flags;
1610 struct hwi_controller *phwi_ctrlr;
1611 struct hwi_context_memory *phwi_context;
1612 struct be_eq_obj *pbe_eq;
1356 struct beiscsi_hba *phba = 1613 struct beiscsi_hba *phba =
1357 container_of(work, struct beiscsi_hba, work_cqs); 1614 container_of(work, struct beiscsi_hba, work_cqs);
1358 1615
1616 phwi_ctrlr = phba->phwi_ctrlr;
1617 phwi_context = phwi_ctrlr->phwi_ctxt;
1618 if (phba->msix_enabled)
1619 pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1620 else
1621 pbe_eq = &phwi_context->be_eq[0];
1622
1359 if (phba->todo_mcc_cq) { 1623 if (phba->todo_mcc_cq) {
1360 spin_lock_irqsave(&phba->isr_lock, flags); 1624 spin_lock_irqsave(&phba->isr_lock, flags);
1361 phba->todo_mcc_cq = 0; 1625 phba->todo_mcc_cq = 0;
1362 spin_unlock_irqrestore(&phba->isr_lock, flags); 1626 spin_unlock_irqrestore(&phba->isr_lock, flags);
1363 SE_DEBUG(DBG_LVL_1, "MCC Interrupt Not expected \n");
1364 } 1627 }
1365 1628
1366 if (phba->todo_cq) { 1629 if (phba->todo_cq) {
1367 spin_lock_irqsave(&phba->isr_lock, flags); 1630 spin_lock_irqsave(&phba->isr_lock, flags);
1368 phba->todo_cq = 0; 1631 phba->todo_cq = 0;
1369 spin_unlock_irqrestore(&phba->isr_lock, flags); 1632 spin_unlock_irqrestore(&phba->isr_lock, flags);
1370 beiscsi_process_cq(phba); 1633 beiscsi_process_cq(pbe_eq);
1371 } 1634 }
1372} 1635}
1373 1636
@@ -1375,19 +1638,15 @@ static int be_iopoll(struct blk_iopoll *iop, int budget)
1375{ 1638{
1376 static unsigned int ret; 1639 static unsigned int ret;
1377 struct beiscsi_hba *phba; 1640 struct beiscsi_hba *phba;
1641 struct be_eq_obj *pbe_eq;
1378 1642
1379 phba = container_of(iop, struct beiscsi_hba, iopoll); 1643 pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1380 1644 ret = beiscsi_process_cq(pbe_eq);
1381 ret = beiscsi_process_cq(phba);
1382 if (ret < budget) { 1645 if (ret < budget) {
1383 struct hwi_controller *phwi_ctrlr; 1646 phba = pbe_eq->phba;
1384 struct hwi_context_memory *phwi_context;
1385
1386 phwi_ctrlr = phba->phwi_ctrlr;
1387 phwi_context = phwi_ctrlr->phwi_ctxt;
1388 blk_iopoll_complete(iop); 1647 blk_iopoll_complete(iop);
1389 hwi_ring_eq_db(phba, phwi_context->be_eq.q.id, 0, 1648 SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
1390 0, 1, 1); 1649 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1391 } 1650 }
1392 return ret; 1651 return ret;
1393} 1652}
@@ -1537,14 +1796,12 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
1537 1796
1538static void beiscsi_find_mem_req(struct beiscsi_hba *phba) 1797static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
1539{ 1798{
1540 unsigned int num_cq_pages, num_eq_pages, num_async_pdu_buf_pages; 1799 unsigned int num_cq_pages, num_async_pdu_buf_pages;
1541 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; 1800 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
1542 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; 1801 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
1543 1802
1544 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ 1803 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
1545 sizeof(struct sol_cqe)); 1804 sizeof(struct sol_cqe));
1546 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
1547 sizeof(struct be_eq_entry));
1548 num_async_pdu_buf_pages = 1805 num_async_pdu_buf_pages =
1549 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ 1806 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1550 phba->params.defpdu_hdr_sz); 1807 phba->params.defpdu_hdr_sz);
@@ -1565,8 +1822,6 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
1565 phba->mem_req[HWI_MEM_ADDN_CONTEXT] = 1822 phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
1566 sizeof(struct hwi_context_memory); 1823 sizeof(struct hwi_context_memory);
1567 1824
1568 phba->mem_req[HWI_MEM_CQ] = num_cq_pages * PAGE_SIZE;
1569 phba->mem_req[HWI_MEM_EQ] = num_eq_pages * PAGE_SIZE;
1570 1825
1571 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb) 1826 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
1572 * (phba->params.wrbs_per_cxn) 1827 * (phba->params.wrbs_per_cxn)
@@ -1751,8 +2006,6 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
1751 2006
1752 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { 2007 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
1753 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2008 pwrb_context = &phwi_ctrlr->wrb_context[index];
1754 SE_DEBUG(DBG_LVL_8, "cid=%d pwrb_context=%p \n", index,
1755 pwrb_context);
1756 pwrb_context->pwrb_handle_base = 2009 pwrb_context->pwrb_handle_base =
1757 kzalloc(sizeof(struct wrb_handle *) * 2010 kzalloc(sizeof(struct wrb_handle *) *
1758 phba->params.wrbs_per_cxn, GFP_KERNEL); 2011 phba->params.wrbs_per_cxn, GFP_KERNEL);
@@ -1767,6 +2020,7 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
1767 pwrb_context->pwrb_handle_basestd[j] = 2020 pwrb_context->pwrb_handle_basestd[j] =
1768 pwrb_handle; 2021 pwrb_handle;
1769 pwrb_context->wrb_handles_available++; 2022 pwrb_context->wrb_handles_available++;
2023 pwrb_handle->wrb_index = j;
1770 pwrb_handle++; 2024 pwrb_handle++;
1771 } 2025 }
1772 pwrb_context->free_index = 0; 2026 pwrb_context->free_index = 0;
@@ -1785,6 +2039,7 @@ static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
1785 pwrb_context->pwrb_handle_basestd[j] = 2039 pwrb_context->pwrb_handle_basestd[j] =
1786 pwrb_handle; 2040 pwrb_handle;
1787 pwrb_context->wrb_handles_available++; 2041 pwrb_context->wrb_handles_available++;
2042 pwrb_handle->wrb_index = j;
1788 pwrb_handle++; 2043 pwrb_handle++;
1789 } 2044 }
1790 pwrb_context->free_index = 0; 2045 pwrb_context->free_index = 0;
@@ -2042,79 +2297,126 @@ static int be_fill_queue(struct be_queue_info *q,
2042 return 0; 2297 return 0;
2043} 2298}
2044 2299
2045static int beiscsi_create_eq(struct beiscsi_hba *phba, 2300static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2046 struct hwi_context_memory *phwi_context) 2301 struct hwi_context_memory *phwi_context)
2047{ 2302{
2048 unsigned int idx; 2303 unsigned int i, num_eq_pages;
2049 int ret; 2304 int ret, eq_for_mcc;
2050 struct be_queue_info *eq; 2305 struct be_queue_info *eq;
2051 struct be_dma_mem *mem; 2306 struct be_dma_mem *mem;
2052 struct be_mem_descriptor *mem_descr;
2053 void *eq_vaddress; 2307 void *eq_vaddress;
2308 dma_addr_t paddr;
2054 2309
2055 idx = 0; 2310 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2056 eq = &phwi_context->be_eq.q; 2311 sizeof(struct be_eq_entry));
2057 mem = &eq->dma_mem;
2058 mem_descr = phba->init_mem;
2059 mem_descr += HWI_MEM_EQ;
2060 eq_vaddress = mem_descr->mem_array[idx].virtual_address;
2061
2062 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2063 sizeof(struct be_eq_entry), eq_vaddress);
2064 if (ret) {
2065 shost_printk(KERN_ERR, phba->shost,
2066 "be_fill_queue Failed for EQ \n");
2067 return ret;
2068 }
2069 2312
2070 mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address; 2313 if (phba->msix_enabled)
2314 eq_for_mcc = 1;
2315 else
2316 eq_for_mcc = 0;
2317 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2318 eq = &phwi_context->be_eq[i].q;
2319 mem = &eq->dma_mem;
2320 phwi_context->be_eq[i].phba = phba;
2321 eq_vaddress = pci_alloc_consistent(phba->pcidev,
2322 num_eq_pages * PAGE_SIZE,
2323 &paddr);
2324 if (!eq_vaddress)
2325 goto create_eq_error;
2326
2327 mem->va = eq_vaddress;
2328 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2329 sizeof(struct be_eq_entry), eq_vaddress);
2330 if (ret) {
2331 shost_printk(KERN_ERR, phba->shost,
2332 "be_fill_queue Failed for EQ \n");
2333 goto create_eq_error;
2334 }
2071 2335
2072 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, 2336 mem->dma = paddr;
2073 phwi_context->be_eq.cur_eqd); 2337 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2074 if (ret) { 2338 phwi_context->cur_eqd);
2075 shost_printk(KERN_ERR, phba->shost, "beiscsi_cmd_eq_create" 2339 if (ret) {
2076 "Failedfor EQ \n"); 2340 shost_printk(KERN_ERR, phba->shost,
2077 return ret; 2341 "beiscsi_cmd_eq_create"
2342 "Failedfor EQ \n");
2343 goto create_eq_error;
2344 }
2345 SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
2078 } 2346 }
2079 SE_DEBUG(DBG_LVL_8, "eq id is %d\n", phwi_context->be_eq.q.id);
2080 return 0; 2347 return 0;
2348create_eq_error:
2349 for (i = 0; i < (phba->num_cpus + 1); i++) {
2350 eq = &phwi_context->be_eq[i].q;
2351 mem = &eq->dma_mem;
2352 if (mem->va)
2353 pci_free_consistent(phba->pcidev, num_eq_pages
2354 * PAGE_SIZE,
2355 mem->va, mem->dma);
2356 }
2357 return ret;
2081} 2358}
2082 2359
2083static int beiscsi_create_cq(struct beiscsi_hba *phba, 2360static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2084 struct hwi_context_memory *phwi_context) 2361 struct hwi_context_memory *phwi_context)
2085{ 2362{
2086 unsigned int idx; 2363 unsigned int i, num_cq_pages;
2087 int ret; 2364 int ret;
2088 struct be_queue_info *cq, *eq; 2365 struct be_queue_info *cq, *eq;
2089 struct be_dma_mem *mem; 2366 struct be_dma_mem *mem;
2090 struct be_mem_descriptor *mem_descr; 2367 struct be_eq_obj *pbe_eq;
2091 void *cq_vaddress; 2368 void *cq_vaddress;
2369 dma_addr_t paddr;
2092 2370
2093 idx = 0; 2371 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2094 cq = &phwi_context->be_cq; 2372 sizeof(struct sol_cqe));
2095 eq = &phwi_context->be_eq.q;
2096 mem = &cq->dma_mem;
2097 mem_descr = phba->init_mem;
2098 mem_descr += HWI_MEM_CQ;
2099 cq_vaddress = mem_descr->mem_array[idx].virtual_address;
2100 ret = be_fill_queue(cq, phba->params.icds_per_ctrl / 2,
2101 sizeof(struct sol_cqe), cq_vaddress);
2102 if (ret) {
2103 shost_printk(KERN_ERR, phba->shost,
2104 "be_fill_queue Failed for ISCSI CQ \n");
2105 return ret;
2106 }
2107 2373
2108 mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address; 2374 for (i = 0; i < phba->num_cpus; i++) {
2109 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, false, 0); 2375 cq = &phwi_context->be_cq[i];
2110 if (ret) { 2376 eq = &phwi_context->be_eq[i].q;
2111 shost_printk(KERN_ERR, phba->shost, 2377 pbe_eq = &phwi_context->be_eq[i];
2112 "beiscsi_cmd_eq_create Failed for ISCSI CQ \n"); 2378 pbe_eq->cq = cq;
2113 return ret; 2379 pbe_eq->phba = phba;
2380 mem = &cq->dma_mem;
2381 cq_vaddress = pci_alloc_consistent(phba->pcidev,
2382 num_cq_pages * PAGE_SIZE,
2383 &paddr);
2384 if (!cq_vaddress)
2385 goto create_cq_error;
2386 ret = be_fill_queue(cq, phba->params.icds_per_ctrl / 2,
2387 sizeof(struct sol_cqe), cq_vaddress);
2388 if (ret) {
2389 shost_printk(KERN_ERR, phba->shost,
2390 "be_fill_queue Failed for ISCSI CQ \n");
2391 goto create_cq_error;
2392 }
2393
2394 mem->dma = paddr;
2395 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2396 false, 0);
2397 if (ret) {
2398 shost_printk(KERN_ERR, phba->shost,
2399 "beiscsi_cmd_eq_create"
2400 "Failed for ISCSI CQ \n");
2401 goto create_cq_error;
2402 }
2403 SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2404 cq->id, eq->id);
2405 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2114 } 2406 }
2115 SE_DEBUG(DBG_LVL_8, "iscsi cq id is %d\n", phwi_context->be_cq.id);
2116 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2117 return 0; 2407 return 0;
2408
2409create_cq_error:
2410 for (i = 0; i < phba->num_cpus; i++) {
2411 cq = &phwi_context->be_cq[i];
2412 mem = &cq->dma_mem;
2413 if (mem->va)
2414 pci_free_consistent(phba->pcidev, num_cq_pages
2415 * PAGE_SIZE,
2416 mem->va, mem->dma);
2417 }
2418 return ret;
2419
2118} 2420}
2119 2421
2120static int 2422static int
@@ -2132,7 +2434,7 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2132 2434
2133 idx = 0; 2435 idx = 0;
2134 dq = &phwi_context->be_def_hdrq; 2436 dq = &phwi_context->be_def_hdrq;
2135 cq = &phwi_context->be_cq; 2437 cq = &phwi_context->be_cq[0];
2136 mem = &dq->dma_mem; 2438 mem = &dq->dma_mem;
2137 mem_descr = phba->init_mem; 2439 mem_descr = phba->init_mem;
2138 mem_descr += HWI_MEM_ASYNC_HEADER_RING; 2440 mem_descr += HWI_MEM_ASYNC_HEADER_RING;
@@ -2176,7 +2478,7 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,
2176 2478
2177 idx = 0; 2479 idx = 0;
2178 dataq = &phwi_context->be_def_dataq; 2480 dataq = &phwi_context->be_def_dataq;
2179 cq = &phwi_context->be_cq; 2481 cq = &phwi_context->be_cq[0];
2180 mem = &dataq->dma_mem; 2482 mem = &dataq->dma_mem;
2181 mem_descr = phba->init_mem; 2483 mem_descr = phba->init_mem;
2182 mem_descr += HWI_MEM_ASYNC_DATA_RING; 2484 mem_descr += HWI_MEM_ASYNC_DATA_RING;
@@ -2239,6 +2541,30 @@ beiscsi_post_pages(struct beiscsi_hba *phba)
2239 return 0; 2541 return 0;
2240} 2542}
2241 2543
2544static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2545{
2546 struct be_dma_mem *mem = &q->dma_mem;
2547 if (mem->va)
2548 pci_free_consistent(phba->pcidev, mem->size,
2549 mem->va, mem->dma);
2550}
2551
2552static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2553 u16 len, u16 entry_size)
2554{
2555 struct be_dma_mem *mem = &q->dma_mem;
2556
2557 memset(q, 0, sizeof(*q));
2558 q->len = len;
2559 q->entry_size = entry_size;
2560 mem->size = len * entry_size;
2561 mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2562 if (!mem->va)
2563 return -1;
2564 memset(mem->va, 0, mem->size);
2565 return 0;
2566}
2567
2242static int 2568static int
2243beiscsi_create_wrb_rings(struct beiscsi_hba *phba, 2569beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2244 struct hwi_context_memory *phwi_context, 2570 struct hwi_context_memory *phwi_context,
@@ -2328,13 +2654,29 @@ static void free_wrb_handles(struct beiscsi_hba *phba)
2328 } 2654 }
2329} 2655}
2330 2656
2657static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
2658{
2659 struct be_queue_info *q;
2660 struct be_ctrl_info *ctrl = &phba->ctrl;
2661
2662 q = &phba->ctrl.mcc_obj.q;
2663 if (q->created)
2664 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
2665 be_queue_free(phba, q);
2666
2667 q = &phba->ctrl.mcc_obj.cq;
2668 if (q->created)
2669 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2670 be_queue_free(phba, q);
2671}
2672
2331static void hwi_cleanup(struct beiscsi_hba *phba) 2673static void hwi_cleanup(struct beiscsi_hba *phba)
2332{ 2674{
2333 struct be_queue_info *q; 2675 struct be_queue_info *q;
2334 struct be_ctrl_info *ctrl = &phba->ctrl; 2676 struct be_ctrl_info *ctrl = &phba->ctrl;
2335 struct hwi_controller *phwi_ctrlr; 2677 struct hwi_controller *phwi_ctrlr;
2336 struct hwi_context_memory *phwi_context; 2678 struct hwi_context_memory *phwi_context;
2337 int i; 2679 int i, eq_num;
2338 2680
2339 phwi_ctrlr = phba->phwi_ctrlr; 2681 phwi_ctrlr = phba->phwi_ctrlr;
2340 phwi_context = phwi_ctrlr->phwi_ctxt; 2682 phwi_context = phwi_ctrlr->phwi_ctxt;
@@ -2343,7 +2685,6 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
2343 if (q->created) 2685 if (q->created)
2344 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); 2686 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
2345 } 2687 }
2346
2347 free_wrb_handles(phba); 2688 free_wrb_handles(phba);
2348 2689
2349 q = &phwi_context->be_def_hdrq; 2690 q = &phwi_context->be_def_hdrq;
@@ -2356,13 +2697,76 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
2356 2697
2357 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); 2698 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
2358 2699
2359 q = &phwi_context->be_cq; 2700 for (i = 0; i < (phba->num_cpus); i++) {
2360 if (q->created) 2701 q = &phwi_context->be_cq[i];
2361 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 2702 if (q->created)
2703 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2704 }
2705 if (phba->msix_enabled)
2706 eq_num = 1;
2707 else
2708 eq_num = 0;
2709 for (i = 0; i < (phba->num_cpus + eq_num); i++) {
2710 q = &phwi_context->be_eq[i].q;
2711 if (q->created)
2712 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
2713 }
2714 be_mcc_queues_destroy(phba);
2715}
2362 2716
2363 q = &phwi_context->be_eq.q; 2717static int be_mcc_queues_create(struct beiscsi_hba *phba,
2364 if (q->created) 2718 struct hwi_context_memory *phwi_context)
2365 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); 2719{
2720 struct be_queue_info *q, *cq;
2721 struct be_ctrl_info *ctrl = &phba->ctrl;
2722
2723 /* Alloc MCC compl queue */
2724 cq = &phba->ctrl.mcc_obj.cq;
2725 if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
2726 sizeof(struct be_mcc_compl)))
2727 goto err;
2728 /* Ask BE to create MCC compl queue; */
2729 if (phba->msix_enabled) {
2730 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
2731 [phba->num_cpus].q, false, true, 0))
2732 goto mcc_cq_free;
2733 } else {
2734 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
2735 false, true, 0))
2736 goto mcc_cq_free;
2737 }
2738
2739 /* Alloc MCC queue */
2740 q = &phba->ctrl.mcc_obj.q;
2741 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2742 goto mcc_cq_destroy;
2743
2744 /* Ask BE to create MCC queue */
2745 if (beiscsi_cmd_mccq_create(phba, q, cq))
2746 goto mcc_q_free;
2747
2748 return 0;
2749
2750mcc_q_free:
2751 be_queue_free(phba, q);
2752mcc_cq_destroy:
2753 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
2754mcc_cq_free:
2755 be_queue_free(phba, cq);
2756err:
2757 return -1;
2758}
2759
2760static int find_num_cpus(void)
2761{
2762 int num_cpus = 0;
2763
2764 num_cpus = num_online_cpus();
2765 if (num_cpus >= MAX_CPUS)
2766 num_cpus = MAX_CPUS - 1;
2767
2768 SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", num_cpus);
2769 return num_cpus;
2366} 2770}
2367 2771
2368static int hwi_init_port(struct beiscsi_hba *phba) 2772static int hwi_init_port(struct beiscsi_hba *phba)
@@ -2376,26 +2780,33 @@ static int hwi_init_port(struct beiscsi_hba *phba)
2376 def_pdu_ring_sz = 2780 def_pdu_ring_sz =
2377 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr); 2781 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
2378 phwi_ctrlr = phba->phwi_ctrlr; 2782 phwi_ctrlr = phba->phwi_ctrlr;
2379
2380 phwi_context = phwi_ctrlr->phwi_ctxt; 2783 phwi_context = phwi_ctrlr->phwi_ctxt;
2381 phwi_context->be_eq.max_eqd = 0; 2784 phwi_context->max_eqd = 0;
2382 phwi_context->be_eq.min_eqd = 0; 2785 phwi_context->min_eqd = 0;
2383 phwi_context->be_eq.cur_eqd = 64; 2786 phwi_context->cur_eqd = 64;
2384 phwi_context->be_eq.enable_aic = false;
2385 be_cmd_fw_initialize(&phba->ctrl); 2787 be_cmd_fw_initialize(&phba->ctrl);
2386 status = beiscsi_create_eq(phba, phwi_context); 2788
2789 status = beiscsi_create_eqs(phba, phwi_context);
2387 if (status != 0) { 2790 if (status != 0) {
2388 shost_printk(KERN_ERR, phba->shost, "EQ not created \n"); 2791 shost_printk(KERN_ERR, phba->shost, "EQ not created \n");
2389 goto error; 2792 goto error;
2390 } 2793 }
2391 2794
2392 status = mgmt_check_supported_fw(ctrl); 2795 status = be_mcc_queues_create(phba, phwi_context);
2796 if (status != 0)
2797 goto error;
2798
2799 status = mgmt_check_supported_fw(ctrl, phba);
2393 if (status != 0) { 2800 if (status != 0) {
2394 shost_printk(KERN_ERR, phba->shost, 2801 shost_printk(KERN_ERR, phba->shost,
2395 "Unsupported fw version \n"); 2802 "Unsupported fw version \n");
2396 goto error; 2803 goto error;
2397 } 2804 }
2398 2805
2806 if (phba->fw_config.iscsi_features == 0x1)
2807 ring_mode = 1;
2808 else
2809 ring_mode = 0;
2399 status = mgmt_get_fw_config(ctrl, phba); 2810 status = mgmt_get_fw_config(ctrl, phba);
2400 if (status != 0) { 2811 if (status != 0) {
2401 shost_printk(KERN_ERR, phba->shost, 2812 shost_printk(KERN_ERR, phba->shost,
@@ -2403,7 +2814,7 @@ static int hwi_init_port(struct beiscsi_hba *phba)
2403 goto error; 2814 goto error;
2404 } 2815 }
2405 2816
2406 status = beiscsi_create_cq(phba, phwi_context); 2817 status = beiscsi_create_cqs(phba, phwi_context);
2407 if (status != 0) { 2818 if (status != 0) {
2408 shost_printk(KERN_ERR, phba->shost, "CQ not created\n"); 2819 shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
2409 goto error; 2820 goto error;
@@ -2447,7 +2858,6 @@ error:
2447 return -ENOMEM; 2858 return -ENOMEM;
2448} 2859}
2449 2860
2450
2451static int hwi_init_controller(struct beiscsi_hba *phba) 2861static int hwi_init_controller(struct beiscsi_hba *phba)
2452{ 2862{
2453 struct hwi_controller *phwi_ctrlr; 2863 struct hwi_controller *phwi_ctrlr;
@@ -2530,6 +2940,18 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
2530 2940
2531 phba->io_sgl_hndl_avbl = 0; 2941 phba->io_sgl_hndl_avbl = 0;
2532 phba->eh_sgl_hndl_avbl = 0; 2942 phba->eh_sgl_hndl_avbl = 0;
2943
2944 if (ring_mode) {
2945 phba->sgl_hndl_array = kzalloc(sizeof(struct sgl_handle *) *
2946 phba->params.icds_per_ctrl,
2947 GFP_KERNEL);
2948 if (!phba->sgl_hndl_array) {
2949 shost_printk(KERN_ERR, phba->shost,
2950 "Mem Alloc Failed. Failing to load\n");
2951 return -ENOMEM;
2952 }
2953 }
2954
2533 mem_descr_sglh = phba->init_mem; 2955 mem_descr_sglh = phba->init_mem;
2534 mem_descr_sglh += HWI_MEM_SGLH; 2956 mem_descr_sglh += HWI_MEM_SGLH;
2535 if (1 == mem_descr_sglh->num_elements) { 2957 if (1 == mem_descr_sglh->num_elements) {
@@ -2537,6 +2959,8 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
2537 phba->params.ios_per_ctrl, 2959 phba->params.ios_per_ctrl,
2538 GFP_KERNEL); 2960 GFP_KERNEL);
2539 if (!phba->io_sgl_hndl_base) { 2961 if (!phba->io_sgl_hndl_base) {
2962 if (ring_mode)
2963 kfree(phba->sgl_hndl_array);
2540 shost_printk(KERN_ERR, phba->shost, 2964 shost_printk(KERN_ERR, phba->shost,
2541 "Mem Alloc Failed. Failing to load\n"); 2965 "Mem Alloc Failed. Failing to load\n");
2542 return -ENOMEM; 2966 return -ENOMEM;
@@ -2656,13 +3080,12 @@ static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
2656 struct hwi_context_memory *phwi_context; 3080 struct hwi_context_memory *phwi_context;
2657 struct be_queue_info *eq; 3081 struct be_queue_info *eq;
2658 u8 __iomem *addr; 3082 u8 __iomem *addr;
2659 u32 reg; 3083 u32 reg, i;
2660 u32 enabled; 3084 u32 enabled;
2661 3085
2662 phwi_ctrlr = phba->phwi_ctrlr; 3086 phwi_ctrlr = phba->phwi_ctrlr;
2663 phwi_context = phwi_ctrlr->phwi_ctxt; 3087 phwi_context = phwi_ctrlr->phwi_ctxt;
2664 3088
2665 eq = &phwi_context->be_eq.q;
2666 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + 3089 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
2667 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); 3090 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
2668 reg = ioread32(addr); 3091 reg = ioread32(addr);
@@ -2673,9 +3096,11 @@ static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
2673 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3096 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
2674 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr); 3097 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr);
2675 iowrite32(reg, addr); 3098 iowrite32(reg, addr);
2676 SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id); 3099 for (i = 0; i <= phba->num_cpus; i++) {
2677 3100 eq = &phwi_context->be_eq[i].q;
2678 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 3101 SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
3102 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3103 }
2679 } else 3104 } else
2680 shost_printk(KERN_WARNING, phba->shost, 3105 shost_printk(KERN_WARNING, phba->shost,
2681 "In hwi_enable_intr, Not Enabled \n"); 3106 "In hwi_enable_intr, Not Enabled \n");
@@ -2720,6 +3145,8 @@ static int beiscsi_init_port(struct beiscsi_hba *phba)
2720 if (hba_setup_cid_tbls(phba)) { 3145 if (hba_setup_cid_tbls(phba)) {
2721 shost_printk(KERN_ERR, phba->shost, 3146 shost_printk(KERN_ERR, phba->shost,
2722 "Failed in hba_setup_cid_tbls\n"); 3147 "Failed in hba_setup_cid_tbls\n");
3148 if (ring_mode)
3149 kfree(phba->sgl_hndl_array);
2723 kfree(phba->io_sgl_hndl_base); 3150 kfree(phba->io_sgl_hndl_base);
2724 kfree(phba->eh_sgl_hndl_base); 3151 kfree(phba->eh_sgl_hndl_base);
2725 goto do_cleanup_ctrlr; 3152 goto do_cleanup_ctrlr;
@@ -2738,17 +3165,25 @@ static void hwi_purge_eq(struct beiscsi_hba *phba)
2738 struct hwi_context_memory *phwi_context; 3165 struct hwi_context_memory *phwi_context;
2739 struct be_queue_info *eq; 3166 struct be_queue_info *eq;
2740 struct be_eq_entry *eqe = NULL; 3167 struct be_eq_entry *eqe = NULL;
3168 int i, eq_msix;
2741 3169
2742 phwi_ctrlr = phba->phwi_ctrlr; 3170 phwi_ctrlr = phba->phwi_ctrlr;
2743 phwi_context = phwi_ctrlr->phwi_ctxt; 3171 phwi_context = phwi_ctrlr->phwi_ctxt;
2744 eq = &phwi_context->be_eq.q; 3172 if (phba->msix_enabled)
2745 eqe = queue_tail_node(eq); 3173 eq_msix = 1;
3174 else
3175 eq_msix = 0;
2746 3176
2747 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 3177 for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
2748 & EQE_VALID_MASK) { 3178 eq = &phwi_context->be_eq[i].q;
2749 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
2750 queue_tail_inc(eq);
2751 eqe = queue_tail_node(eq); 3179 eqe = queue_tail_node(eq);
3180
3181 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3182 & EQE_VALID_MASK) {
3183 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3184 queue_tail_inc(eq);
3185 eqe = queue_tail_node(eq);
3186 }
2752 } 3187 }
2753} 3188}
2754 3189
@@ -2762,6 +3197,8 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba)
2762 "mgmt_epfw_cleanup FAILED \n"); 3197 "mgmt_epfw_cleanup FAILED \n");
2763 hwi_cleanup(phba); 3198 hwi_cleanup(phba);
2764 hwi_purge_eq(phba); 3199 hwi_purge_eq(phba);
3200 if (ring_mode)
3201 kfree(phba->sgl_hndl_array);
2765 kfree(phba->io_sgl_hndl_base); 3202 kfree(phba->io_sgl_hndl_base);
2766 kfree(phba->eh_sgl_hndl_base); 3203 kfree(phba->eh_sgl_hndl_base);
2767 kfree(phba->cid_array); 3204 kfree(phba->cid_array);
@@ -2846,8 +3283,9 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
2846 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb)); 3283 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
2847 3284
2848 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 3285 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
2849 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) << 3286 if (!ring_mode)
2850 DB_DEF_PDU_WRB_INDEX_SHIFT; 3287 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
3288 << DB_DEF_PDU_WRB_INDEX_SHIFT;
2851 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 3289 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
2852 3290
2853 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); 3291 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
@@ -2856,7 +3294,7 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
2856static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, 3294static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
2857 int *index, int *age) 3295 int *index, int *age)
2858{ 3296{
2859 *index = be32_to_cpu(itt) >> 16; 3297 *index = (int)itt;
2860 if (age) 3298 if (age)
2861 *age = conn->session->age; 3299 *age = conn->session->age;
2862} 3300}
@@ -2885,15 +3323,13 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
2885 3323
2886 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool, 3324 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
2887 GFP_KERNEL, &paddr); 3325 GFP_KERNEL, &paddr);
2888
2889 if (!io_task->cmd_bhs) 3326 if (!io_task->cmd_bhs)
2890 return -ENOMEM; 3327 return -ENOMEM;
2891
2892 io_task->bhs_pa.u.a64.address = paddr; 3328 io_task->bhs_pa.u.a64.address = paddr;
3329 io_task->libiscsi_itt = (itt_t)task->itt;
2893 io_task->pwrb_handle = alloc_wrb_handle(phba, 3330 io_task->pwrb_handle = alloc_wrb_handle(phba,
2894 beiscsi_conn->beiscsi_conn_cid, 3331 beiscsi_conn->beiscsi_conn_cid,
2895 task->itt); 3332 task->itt);
2896 io_task->pwrb_handle->pio_handle = task;
2897 io_task->conn = beiscsi_conn; 3333 io_task->conn = beiscsi_conn;
2898 3334
2899 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; 3335 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
@@ -2905,7 +3341,6 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
2905 spin_unlock(&phba->io_sgl_lock); 3341 spin_unlock(&phba->io_sgl_lock);
2906 if (!io_task->psgl_handle) 3342 if (!io_task->psgl_handle)
2907 goto free_hndls; 3343 goto free_hndls;
2908
2909 } else { 3344 } else {
2910 io_task->scsi_cmnd = NULL; 3345 io_task->scsi_cmnd = NULL;
2911 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { 3346 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
@@ -2932,8 +3367,18 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
2932 goto free_hndls; 3367 goto free_hndls;
2933 } 3368 }
2934 } 3369 }
2935 itt = (itt_t) cpu_to_be32(((unsigned int)task->itt << 16) | 3370 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
2936 (unsigned int)(io_task->psgl_handle->sgl_index)); 3371 wrb_index << 16) | (unsigned int)
3372 (io_task->psgl_handle->sgl_index));
3373 if (ring_mode) {
3374 phba->sgl_hndl_array[io_task->psgl_handle->sgl_index -
3375 phba->fw_config.iscsi_cid_start] =
3376 io_task->psgl_handle;
3377 io_task->psgl_handle->task = task;
3378 io_task->psgl_handle->cid = beiscsi_conn->beiscsi_conn_cid;
3379 } else
3380 io_task->pwrb_handle->pio_handle = task;
3381
2937 io_task->cmd_bhs->iscsi_hdr.itt = itt; 3382 io_task->cmd_bhs->iscsi_hdr.itt = itt;
2938 return 0; 3383 return 0;
2939 3384
@@ -3006,7 +3451,6 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3006 io_task->bhs_len = sizeof(struct be_cmd_bhs); 3451 io_task->bhs_len = sizeof(struct be_cmd_bhs);
3007 3452
3008 if (writedir) { 3453 if (writedir) {
3009 SE_DEBUG(DBG_LVL_4, " WRITE Command \t");
3010 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48); 3454 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3011 AMAP_SET_BITS(struct amap_pdu_data_out, itt, 3455 AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3012 &io_task->cmd_bhs->iscsi_data_pdu, 3456 &io_task->cmd_bhs->iscsi_data_pdu,
@@ -3016,11 +3460,18 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3016 ISCSI_OPCODE_SCSI_DATA_OUT); 3460 ISCSI_OPCODE_SCSI_DATA_OUT);
3017 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit, 3461 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3018 &io_task->cmd_bhs->iscsi_data_pdu, 1); 3462 &io_task->cmd_bhs->iscsi_data_pdu, 1);
3019 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_WR_CMD); 3463 if (ring_mode)
3464 io_task->psgl_handle->type = INI_WR_CMD;
3465 else
3466 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3467 INI_WR_CMD);
3020 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); 3468 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3021 } else { 3469 } else {
3022 SE_DEBUG(DBG_LVL_4, "READ Command \t"); 3470 if (ring_mode)
3023 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_RD_CMD); 3471 io_task->psgl_handle->type = INI_RD_CMD;
3472 else
3473 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3474 INI_RD_CMD);
3024 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 3475 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3025 } 3476 }
3026 memcpy(&io_task->cmd_bhs->iscsi_data_pdu. 3477 memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
@@ -3045,7 +3496,8 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3045 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 3496 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3046 3497
3047 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 3498 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3048 doorbell |= (io_task->pwrb_handle->wrb_index & 3499 if (!ring_mode)
3500 doorbell |= (io_task->pwrb_handle->wrb_index &
3049 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 3501 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3050 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 3502 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3051 3503
@@ -3059,10 +3511,16 @@ static int beiscsi_mtask(struct iscsi_task *task)
3059 struct iscsi_conn *conn = task->conn; 3511 struct iscsi_conn *conn = task->conn;
3060 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 3512 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3061 struct beiscsi_hba *phba = beiscsi_conn->phba; 3513 struct beiscsi_hba *phba = beiscsi_conn->phba;
3514 struct iscsi_session *session;
3062 struct iscsi_wrb *pwrb = NULL; 3515 struct iscsi_wrb *pwrb = NULL;
3516 struct hwi_controller *phwi_ctrlr;
3517 struct hwi_wrb_context *pwrb_context;
3518 struct wrb_handle *pwrb_handle;
3063 unsigned int doorbell = 0; 3519 unsigned int doorbell = 0;
3520 unsigned int i, cid;
3064 struct iscsi_task *aborted_task; 3521 struct iscsi_task *aborted_task;
3065 3522
3523 cid = beiscsi_conn->beiscsi_conn_cid;
3066 pwrb = io_task->pwrb_handle->pwrb; 3524 pwrb = io_task->pwrb_handle->pwrb;
3067 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 3525 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3068 be32_to_cpu(task->cmdsn)); 3526 be32_to_cpu(task->cmdsn));
@@ -3073,38 +3531,63 @@ static int beiscsi_mtask(struct iscsi_task *task)
3073 3531
3074 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { 3532 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
3075 case ISCSI_OP_LOGIN: 3533 case ISCSI_OP_LOGIN:
3076 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, TGT_DM_CMD); 3534 if (ring_mode)
3535 io_task->psgl_handle->type = TGT_DM_CMD;
3536 else
3537 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3538 TGT_DM_CMD);
3077 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 3539 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3078 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); 3540 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
3079 hwi_write_buffer(pwrb, task); 3541 hwi_write_buffer(pwrb, task);
3080 break; 3542 break;
3081 case ISCSI_OP_NOOP_OUT: 3543 case ISCSI_OP_NOOP_OUT:
3082 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_RD_CMD); 3544 if (ring_mode)
3545 io_task->psgl_handle->type = INI_RD_CMD;
3546 else
3547 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3548 INI_RD_CMD);
3083 hwi_write_buffer(pwrb, task); 3549 hwi_write_buffer(pwrb, task);
3084 break; 3550 break;
3085 case ISCSI_OP_TEXT: 3551 case ISCSI_OP_TEXT:
3086 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_WR_CMD); 3552 if (ring_mode)
3553 io_task->psgl_handle->type = INI_WR_CMD;
3554 else
3555 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3556 INI_WR_CMD);
3087 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); 3557 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3088 hwi_write_buffer(pwrb, task); 3558 hwi_write_buffer(pwrb, task);
3089 break; 3559 break;
3090 case ISCSI_OP_SCSI_TMFUNC: 3560 case ISCSI_OP_SCSI_TMFUNC:
3091 aborted_task = iscsi_itt_to_task(conn, 3561 session = conn->session;
3092 ((struct iscsi_tm *)task->hdr)->rtt); 3562 i = ((struct iscsi_tm *)task->hdr)->rtt;
3563 phwi_ctrlr = phba->phwi_ctrlr;
3564 pwrb_context = &phwi_ctrlr->wrb_context[cid];
3565 pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i)
3566 >> 16];
3567 aborted_task = pwrb_handle->pio_handle;
3093 if (!aborted_task) 3568 if (!aborted_task)
3094 return 0; 3569 return 0;
3570
3095 aborted_io_task = aborted_task->dd_data; 3571 aborted_io_task = aborted_task->dd_data;
3096 if (!aborted_io_task->scsi_cmnd) 3572 if (!aborted_io_task->scsi_cmnd)
3097 return 0; 3573 return 0;
3098 3574
3099 mgmt_invalidate_icds(phba, 3575 mgmt_invalidate_icds(phba,
3100 aborted_io_task->psgl_handle->sgl_index, 3576 aborted_io_task->psgl_handle->sgl_index,
3101 beiscsi_conn->beiscsi_conn_cid); 3577 cid);
3102 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_TMF_CMD); 3578 if (ring_mode)
3579 io_task->psgl_handle->type = INI_TMF_CMD;
3580 else
3581 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3582 INI_TMF_CMD);
3103 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 3583 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3104 hwi_write_buffer(pwrb, task); 3584 hwi_write_buffer(pwrb, task);
3105 break; 3585 break;
3106 case ISCSI_OP_LOGOUT: 3586 case ISCSI_OP_LOGOUT:
3107 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 3587 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3588 if (ring_mode)
3589 io_task->psgl_handle->type = HWH_TYPE_LOGOUT;
3590 else
3108 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 3591 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3109 HWH_TYPE_LOGOUT); 3592 HWH_TYPE_LOGOUT);
3110 hwi_write_buffer(pwrb, task); 3593 hwi_write_buffer(pwrb, task);
@@ -3122,8 +3605,9 @@ static int beiscsi_mtask(struct iscsi_task *task)
3122 io_task->pwrb_handle->nxt_wrb_index); 3605 io_task->pwrb_handle->nxt_wrb_index);
3123 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 3606 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3124 3607
3125 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 3608 doorbell |= cid & DB_WRB_POST_CID_MASK;
3126 doorbell |= (io_task->pwrb_handle->wrb_index & 3609 if (!ring_mode)
3610 doorbell |= (io_task->pwrb_handle->wrb_index &
3127 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 3611 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3128 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 3612 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3129 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); 3613 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
@@ -3165,9 +3649,14 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
3165 return beiscsi_iotask(task, sg, num_sg, xferlen, writedir); 3649 return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
3166} 3650}
3167 3651
3652
3168static void beiscsi_remove(struct pci_dev *pcidev) 3653static void beiscsi_remove(struct pci_dev *pcidev)
3169{ 3654{
3170 struct beiscsi_hba *phba = NULL; 3655 struct beiscsi_hba *phba = NULL;
3656 struct hwi_controller *phwi_ctrlr;
3657 struct hwi_context_memory *phwi_context;
3658 struct be_eq_obj *pbe_eq;
3659 unsigned int i, msix_vec;
3171 3660
3172 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev); 3661 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
3173 if (!phba) { 3662 if (!phba) {
@@ -3175,12 +3664,24 @@ static void beiscsi_remove(struct pci_dev *pcidev)
3175 return; 3664 return;
3176 } 3665 }
3177 3666
3667 phwi_ctrlr = phba->phwi_ctrlr;
3668 phwi_context = phwi_ctrlr->phwi_ctxt;
3178 hwi_disable_intr(phba); 3669 hwi_disable_intr(phba);
3179 if (phba->pcidev->irq) 3670 if (phba->msix_enabled) {
3180 free_irq(phba->pcidev->irq, phba); 3671 for (i = 0; i <= phba->num_cpus; i++) {
3672 msix_vec = phba->msix_entries[i].vector;
3673 free_irq(msix_vec, &phwi_context->be_eq[i]);
3674 }
3675 } else
3676 if (phba->pcidev->irq)
3677 free_irq(phba->pcidev->irq, phba);
3678 pci_disable_msix(phba->pcidev);
3181 destroy_workqueue(phba->wq); 3679 destroy_workqueue(phba->wq);
3182 if (blk_iopoll_enabled) 3680 if (blk_iopoll_enabled)
3183 blk_iopoll_disable(&phba->iopoll); 3681 for (i = 0; i < phba->num_cpus; i++) {
3682 pbe_eq = &phwi_context->be_eq[i];
3683 blk_iopoll_disable(&pbe_eq->iopoll);
3684 }
3184 3685
3185 beiscsi_clean_port(phba); 3686 beiscsi_clean_port(phba);
3186 beiscsi_free_mem(phba); 3687 beiscsi_free_mem(phba);
@@ -3194,11 +3695,29 @@ static void beiscsi_remove(struct pci_dev *pcidev)
3194 iscsi_host_free(phba->shost); 3695 iscsi_host_free(phba->shost);
3195} 3696}
3196 3697
3698static void beiscsi_msix_enable(struct beiscsi_hba *phba)
3699{
3700 int i, status;
3701
3702 for (i = 0; i <= phba->num_cpus; i++)
3703 phba->msix_entries[i].entry = i;
3704
3705 status = pci_enable_msix(phba->pcidev, phba->msix_entries,
3706 (phba->num_cpus + 1));
3707 if (!status)
3708 phba->msix_enabled = true;
3709
3710 return;
3711}
3712
3197static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, 3713static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3198 const struct pci_device_id *id) 3714 const struct pci_device_id *id)
3199{ 3715{
3200 struct beiscsi_hba *phba = NULL; 3716 struct beiscsi_hba *phba = NULL;
3201 int ret; 3717 struct hwi_controller *phwi_ctrlr;
3718 struct hwi_context_memory *phwi_context;
3719 struct be_eq_obj *pbe_eq;
3720 int ret, msix_vec, num_cpus, i;
3202 3721
3203 ret = beiscsi_enable_pci(pcidev); 3722 ret = beiscsi_enable_pci(pcidev);
3204 if (ret < 0) { 3723 if (ret < 0) {
@@ -3213,8 +3732,18 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3213 " Failed in beiscsi_hba_alloc \n"); 3732 " Failed in beiscsi_hba_alloc \n");
3214 goto disable_pci; 3733 goto disable_pci;
3215 } 3734 }
3735 SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba);
3216 3736
3217 pci_set_drvdata(pcidev, phba); 3737 pci_set_drvdata(pcidev, phba);
3738 if (enable_msix)
3739 num_cpus = find_num_cpus();
3740 else
3741 num_cpus = 1;
3742 phba->num_cpus = num_cpus;
3743 SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", phba->num_cpus);
3744
3745 if (enable_msix)
3746 beiscsi_msix_enable(phba);
3218 ret = be_ctrl_init(phba, pcidev); 3747 ret = be_ctrl_init(phba, pcidev);
3219 if (ret) { 3748 if (ret) {
3220 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" 3749 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
@@ -3235,7 +3764,7 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3235 3764
3236 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u", 3765 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
3237 phba->shost->host_no); 3766 phba->shost->host_no);
3238 phba->wq = create_singlethread_workqueue(phba->wq_name); 3767 phba->wq = create_workqueue(phba->wq_name);
3239 if (!phba->wq) { 3768 if (!phba->wq) {
3240 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" 3769 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3241 "Failed to allocate work queue\n"); 3770 "Failed to allocate work queue\n");
@@ -3244,11 +3773,16 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3244 3773
3245 INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs); 3774 INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
3246 3775
3776 phwi_ctrlr = phba->phwi_ctrlr;
3777 phwi_context = phwi_ctrlr->phwi_ctxt;
3247 if (blk_iopoll_enabled) { 3778 if (blk_iopoll_enabled) {
3248 blk_iopoll_init(&phba->iopoll, be_iopoll_budget, be_iopoll); 3779 for (i = 0; i < phba->num_cpus; i++) {
3249 blk_iopoll_enable(&phba->iopoll); 3780 pbe_eq = &phwi_context->be_eq[i];
3781 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
3782 be_iopoll);
3783 blk_iopoll_enable(&pbe_eq->iopoll);
3784 }
3250 } 3785 }
3251
3252 ret = beiscsi_init_irqs(phba); 3786 ret = beiscsi_init_irqs(phba);
3253 if (ret < 0) { 3787 if (ret < 0) {
3254 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" 3788 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
@@ -3261,17 +3795,26 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3261 "Failed to hwi_enable_intr\n"); 3795 "Failed to hwi_enable_intr\n");
3262 goto free_ctrlr; 3796 goto free_ctrlr;
3263 } 3797 }
3264
3265 SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n"); 3798 SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n");
3266 return 0; 3799 return 0;
3267 3800
3268free_ctrlr: 3801free_ctrlr:
3269 if (phba->pcidev->irq) 3802 if (phba->msix_enabled) {
3270 free_irq(phba->pcidev->irq, phba); 3803 for (i = 0; i <= phba->num_cpus; i++) {
3804 msix_vec = phba->msix_entries[i].vector;
3805 free_irq(msix_vec, &phwi_context->be_eq[i]);
3806 }
3807 } else
3808 if (phba->pcidev->irq)
3809 free_irq(phba->pcidev->irq, phba);
3810 pci_disable_msix(phba->pcidev);
3271free_blkenbld: 3811free_blkenbld:
3272 destroy_workqueue(phba->wq); 3812 destroy_workqueue(phba->wq);
3273 if (blk_iopoll_enabled) 3813 if (blk_iopoll_enabled)
3274 blk_iopoll_disable(&phba->iopoll); 3814 for (i = 0; i < phba->num_cpus; i++) {
3815 pbe_eq = &phwi_context->be_eq[i];
3816 blk_iopoll_disable(&pbe_eq->iopoll);
3817 }
3275free_twq: 3818free_twq:
3276 beiscsi_clean_port(phba); 3819 beiscsi_clean_port(phba);
3277 beiscsi_free_mem(phba); 3820 beiscsi_free_mem(phba);
@@ -3316,7 +3859,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
3316 ISCSI_USERNAME | ISCSI_PASSWORD | 3859 ISCSI_USERNAME | ISCSI_PASSWORD |
3317 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | 3860 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
3318 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | 3861 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
3319 ISCSI_LU_RESET_TMO | 3862 ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
3320 ISCSI_PING_TMO | ISCSI_RECV_TMO | 3863 ISCSI_PING_TMO | ISCSI_RECV_TMO |
3321 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, 3864 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
3322 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | 3865 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
@@ -3351,6 +3894,7 @@ static struct pci_driver beiscsi_pci_driver = {
3351 .id_table = beiscsi_pci_id_table 3894 .id_table = beiscsi_pci_id_table
3352}; 3895};
3353 3896
3897
3354static int __init beiscsi_module_init(void) 3898static int __init beiscsi_module_init(void)
3355{ 3899{
3356 int ret; 3900 int ret;
@@ -3373,6 +3917,7 @@ static int __init beiscsi_module_init(void)
3373 "beiscsi pci driver.\n"); 3917 "beiscsi pci driver.\n");
3374 goto unregister_iscsi_transport; 3918 goto unregister_iscsi_transport;
3375 } 3919 }
3920 ring_mode = 0;
3376 return 0; 3921 return 0;
3377 3922
3378unregister_iscsi_transport: 3923unregister_iscsi_transport: