aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/lpfc/lpfc_bsg.c
diff options
context:
space:
mode:
authorJames Smart <james.smart@emulex.com>2010-03-15 11:25:20 -0400
committerJames Bottomley <James.Bottomley@suse.de>2010-04-11 10:23:50 -0400
commit7a4702774381103e936cae09ec12301090c6c212 (patch)
tree537fcd43fb911d9841d2d3ba3790b135bc6aa907 /drivers/scsi/lpfc/lpfc_bsg.c
parentcb5172eafd9ffdab6bb7b1eec628ea706d5817c8 (diff)
[SCSI] lpfc 8.3.11: Driver management improvements via BSG
- Add BSG support for PCI loopback testing. - Add BSG support for extended mailbox commands. Signed-off-by: Alex Iannicelli <alex.iannicelli@emulex.com> Signed-off-by: James Smart <james.smart@emulex.com> Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_bsg.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c257
1 files changed, 202 insertions, 55 deletions
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index d62b3e467926..92ad202a9380 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -79,6 +79,12 @@ struct lpfc_bsg_iocb {
79struct lpfc_bsg_mbox { 79struct lpfc_bsg_mbox {
80 LPFC_MBOXQ_t *pmboxq; 80 LPFC_MBOXQ_t *pmboxq;
81 MAILBOX_t *mb; 81 MAILBOX_t *mb;
82 struct lpfc_dmabuf *rxbmp; /* for BIU diags */
83 struct lpfc_dmabufext *dmp; /* for BIU diags */
84 uint8_t *ext; /* extended mailbox data */
85 uint32_t mbOffset; /* from app */
86 uint32_t inExtWLen; /* from app */
87 uint32_t outWxtWLen; /* from app */
82 88
83 /* job waiting for this mbox command to finish */ 89 /* job waiting for this mbox command to finish */
84 struct fc_bsg_job *set_job; 90 struct fc_bsg_job *set_job;
@@ -2377,35 +2383,68 @@ void
2377lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2383lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2378{ 2384{
2379 struct bsg_job_data *dd_data; 2385 struct bsg_job_data *dd_data;
2380 MAILBOX_t *pmb;
2381 MAILBOX_t *mb;
2382 struct fc_bsg_job *job; 2386 struct fc_bsg_job *job;
2383 uint32_t size; 2387 uint32_t size;
2384 unsigned long flags; 2388 unsigned long flags;
2389 uint8_t *to;
2390 uint8_t *from;
2385 2391
2386 spin_lock_irqsave(&phba->ct_ev_lock, flags); 2392 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2387 dd_data = pmboxq->context1; 2393 dd_data = pmboxq->context1;
2394 /* job already timed out? */
2388 if (!dd_data) { 2395 if (!dd_data) {
2389 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2396 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2390 return; 2397 return;
2391 } 2398 }
2392 2399
2393 pmb = &dd_data->context_un.mbox.pmboxq->u.mb; 2400 /* build the outgoing buffer to do an sg copy
2394 mb = dd_data->context_un.mbox.mb; 2401 * the format is the response mailbox followed by any extended
2402 * mailbox data
2403 */
2404 from = (uint8_t *)&pmboxq->u.mb;
2405 to = (uint8_t *)dd_data->context_un.mbox.mb;
2406 memcpy(to, from, sizeof(MAILBOX_t));
2407 /* copy the extended data if any, count is in words */
2408 if (dd_data->context_un.mbox.outWxtWLen) {
2409 from = (uint8_t *)dd_data->context_un.mbox.ext;
2410 to += sizeof(MAILBOX_t);
2411 memcpy(to, from,
2412 dd_data->context_un.mbox.outWxtWLen * sizeof(uint32_t));
2413 }
2414
2415 from = (uint8_t *)dd_data->context_un.mbox.mb;
2395 job = dd_data->context_un.mbox.set_job; 2416 job = dd_data->context_un.mbox.set_job;
2396 memcpy(mb, pmb, sizeof(*pmb)); 2417 size = job->reply_payload.payload_len;
2397 size = job->request_payload.payload_len;
2398 job->reply->reply_payload_rcv_len = 2418 job->reply->reply_payload_rcv_len =
2399 sg_copy_from_buffer(job->reply_payload.sg_list, 2419 sg_copy_from_buffer(job->reply_payload.sg_list,
2400 job->reply_payload.sg_cnt, 2420 job->reply_payload.sg_cnt,
2401 mb, size); 2421 from, size);
2402 job->reply->result = 0; 2422 job->reply->result = 0;
2423
2403 dd_data->context_un.mbox.set_job = NULL; 2424 dd_data->context_un.mbox.set_job = NULL;
2404 job->dd_data = NULL; 2425 job->dd_data = NULL;
2405 job->job_done(job); 2426 job->job_done(job);
2427 /* need to hold the lock until we call job done to hold off
2428 * the timeout handler returning to the midlayer while
2429 * we are stillprocessing the job
2430 */
2406 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 2431 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2432
2433 kfree(dd_data->context_un.mbox.mb);
2407 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); 2434 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
2408 kfree(mb); 2435 kfree(dd_data->context_un.mbox.ext);
2436 if (dd_data->context_un.mbox.dmp) {
2437 dma_free_coherent(&phba->pcidev->dev,
2438 dd_data->context_un.mbox.dmp->size,
2439 dd_data->context_un.mbox.dmp->dma.virt,
2440 dd_data->context_un.mbox.dmp->dma.phys);
2441 kfree(dd_data->context_un.mbox.dmp);
2442 }
2443 if (dd_data->context_un.mbox.rxbmp) {
2444 lpfc_mbuf_free(phba, dd_data->context_un.mbox.rxbmp->virt,
2445 dd_data->context_un.mbox.rxbmp->phys);
2446 kfree(dd_data->context_un.mbox.rxbmp);
2447 }
2409 kfree(dd_data); 2448 kfree(dd_data);
2410 return; 2449 return;
2411} 2450}
@@ -2468,6 +2507,7 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
2468 case MBX_WRITE_EVENT_LOG: 2507 case MBX_WRITE_EVENT_LOG:
2469 case MBX_PORT_CAPABILITIES: 2508 case MBX_PORT_CAPABILITIES:
2470 case MBX_PORT_IOV_CONTROL: 2509 case MBX_PORT_IOV_CONTROL:
2510 case MBX_RUN_BIU_DIAG64:
2471 break; 2511 break;
2472 case MBX_SET_VARIABLE: 2512 case MBX_SET_VARIABLE:
2473 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2513 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -2482,7 +2522,6 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
2482 phba->fc_topology = TOPOLOGY_PT_PT; 2522 phba->fc_topology = TOPOLOGY_PT_PT;
2483 } 2523 }
2484 break; 2524 break;
2485 case MBX_RUN_BIU_DIAG64:
2486 case MBX_READ_EVENT_LOG: 2525 case MBX_READ_EVENT_LOG:
2487 case MBX_READ_SPARM64: 2526 case MBX_READ_SPARM64:
2488 case MBX_READ_LA: 2527 case MBX_READ_LA:
@@ -2518,97 +2557,199 @@ static uint32_t
2518lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, 2557lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
2519 struct lpfc_vport *vport) 2558 struct lpfc_vport *vport)
2520{ 2559{
2521 LPFC_MBOXQ_t *pmboxq; 2560 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
2522 MAILBOX_t *pmb; 2561 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
2523 MAILBOX_t *mb; 2562 /* a 4k buffer to hold the mb and extended data from/to the bsg */
2524 struct bsg_job_data *dd_data; 2563 MAILBOX_t *mb = NULL;
2564 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
2525 uint32_t size; 2565 uint32_t size;
2566 struct lpfc_dmabuf *rxbmp = NULL; /* for biu diag */
2567 struct lpfc_dmabufext *dmp = NULL; /* for biu diag */
2568 struct ulp_bde64 *rxbpl = NULL;
2569 struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *)
2570 job->request->rqst_data.h_vendor.vendor_cmd;
2571 uint8_t *ext = NULL;
2526 int rc = 0; 2572 int rc = 0;
2573 uint8_t *from;
2574
2575 /* in case no data is transferred */
2576 job->reply->reply_payload_rcv_len = 0;
2577
2578 /* check if requested extended data lengths are valid */
2579 if ((mbox_req->inExtWLen > MAILBOX_EXT_SIZE) ||
2580 (mbox_req->outWxtWLen > MAILBOX_EXT_SIZE)) {
2581 rc = -ERANGE;
2582 goto job_done;
2583 }
2527 2584
2528 /* allocate our bsg tracking structure */ 2585 /* allocate our bsg tracking structure */
2529 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); 2586 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
2530 if (!dd_data) { 2587 if (!dd_data) {
2531 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, 2588 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2532 "2727 Failed allocation of dd_data\n"); 2589 "2727 Failed allocation of dd_data\n");
2533 return -ENOMEM; 2590 rc = -ENOMEM;
2591 goto job_done;
2534 } 2592 }
2535 2593
2536 mb = kzalloc(PAGE_SIZE, GFP_KERNEL); 2594 mb = kzalloc(PAGE_SIZE, GFP_KERNEL);
2537 if (!mb) { 2595 if (!mb) {
2538 kfree(dd_data); 2596 rc = -ENOMEM;
2539 return -ENOMEM; 2597 goto job_done;
2540 } 2598 }
2541 2599
2542 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2600 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2543 if (!pmboxq) { 2601 if (!pmboxq) {
2544 kfree(dd_data); 2602 rc = -ENOMEM;
2545 kfree(mb); 2603 goto job_done;
2546 return -ENOMEM;
2547 } 2604 }
2605 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
2548 2606
2549 size = job->request_payload.payload_len; 2607 size = job->request_payload.payload_len;
2550 job->reply->reply_payload_rcv_len = 2608 sg_copy_to_buffer(job->request_payload.sg_list,
2551 sg_copy_to_buffer(job->request_payload.sg_list, 2609 job->request_payload.sg_cnt,
2552 job->request_payload.sg_cnt, 2610 mb, size);
2553 mb, size);
2554 2611
2555 rc = lpfc_bsg_check_cmd_access(phba, mb, vport); 2612 rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
2556 if (rc != 0) { 2613 if (rc != 0)
2557 kfree(dd_data); 2614 goto job_done; /* must be negative */
2558 kfree(mb);
2559 mempool_free(pmboxq, phba->mbox_mem_pool);
2560 return rc; /* must be negative */
2561 }
2562 2615
2563 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
2564 pmb = &pmboxq->u.mb; 2616 pmb = &pmboxq->u.mb;
2565 memcpy(pmb, mb, sizeof(*pmb)); 2617 memcpy(pmb, mb, sizeof(*pmb));
2566 pmb->mbxOwner = OWN_HOST; 2618 pmb->mbxOwner = OWN_HOST;
2567 pmboxq->context1 = NULL;
2568 pmboxq->vport = vport; 2619 pmboxq->vport = vport;
2569 2620
2621 /* extended mailbox commands will need an extended buffer */
2622 if (mbox_req->inExtWLen || mbox_req->outWxtWLen) {
2623 ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
2624 if (!ext) {
2625 rc = -ENOMEM;
2626 goto job_done;
2627 }
2628
2629 /* any data for the device? */
2630 if (mbox_req->inExtWLen) {
2631 from = (uint8_t *)mb;
2632 from += sizeof(MAILBOX_t);
2633 memcpy((uint8_t *)ext, from,
2634 mbox_req->inExtWLen * sizeof(uint32_t));
2635 }
2636
2637 pmboxq->context2 = ext;
2638 pmboxq->in_ext_byte_len =
2639 mbox_req->inExtWLen *
2640 sizeof(uint32_t);
2641 pmboxq->out_ext_byte_len =
2642 mbox_req->outWxtWLen *
2643 sizeof(uint32_t);
2644 pmboxq->mbox_offset_word =
2645 mbox_req->mbOffset;
2646 pmboxq->context2 = ext;
2647 pmboxq->in_ext_byte_len =
2648 mbox_req->inExtWLen * sizeof(uint32_t);
2649 pmboxq->out_ext_byte_len =
2650 mbox_req->outWxtWLen * sizeof(uint32_t);
2651 pmboxq->mbox_offset_word = mbox_req->mbOffset;
2652 }
2653
2654 /* biu diag will need a kernel buffer to transfer the data
2655 * allocate our own buffer and setup the mailbox command to
2656 * use ours
2657 */
2658 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
2659 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2660 if (!rxbmp) {
2661 rc = -ENOMEM;
2662 goto job_done;
2663 }
2664
2665 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2666 INIT_LIST_HEAD(&rxbmp->list);
2667 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2668 dmp = diag_cmd_data_alloc(phba, rxbpl, PAGE_SIZE, 0);
2669 if (!dmp) {
2670 rc = -ENOMEM;
2671 goto job_done;
2672 }
2673
2674 dmp->size = PAGE_SIZE;
2675 INIT_LIST_HEAD(&dmp->dma.list);
2676 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
2677 putPaddrHigh(dmp->dma.phys);
2678 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
2679 putPaddrLow(dmp->dma.phys);
2680
2681 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
2682 putPaddrHigh(dmp->dma.phys +
2683 pmb->un.varBIUdiag.un.s2.
2684 xmit_bde64.tus.f.bdeSize);
2685 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
2686 putPaddrLow(dmp->dma.phys +
2687 pmb->un.varBIUdiag.un.s2.
2688 xmit_bde64.tus.f.bdeSize);
2689 dd_data->context_un.mbox.rxbmp = rxbmp;
2690 dd_data->context_un.mbox.dmp = dmp;
2691 } else {
2692 dd_data->context_un.mbox.rxbmp = NULL;
2693 dd_data->context_un.mbox.dmp = NULL;
2694 }
2695
2696 /* setup wake call as IOCB callback */
2697 pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
2698
2699 /* setup context field to pass wait_queue pointer to wake function */
2700 pmboxq->context1 = dd_data;
2701 dd_data->type = TYPE_MBOX;
2702 dd_data->context_un.mbox.pmboxq = pmboxq;
2703 dd_data->context_un.mbox.mb = mb;
2704 dd_data->context_un.mbox.set_job = job;
2705 dd_data->context_un.mbox.ext = ext;
2706 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
2707 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
2708 dd_data->context_un.mbox.outWxtWLen = mbox_req->outWxtWLen;
2709 job->dd_data = dd_data;
2710
2570 if ((vport->fc_flag & FC_OFFLINE_MODE) || 2711 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
2571 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) { 2712 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
2572 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); 2713 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2573 if (rc != MBX_SUCCESS) { 2714 if (rc != MBX_SUCCESS) {
2574 if (rc != MBX_TIMEOUT) { 2715 rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
2575 kfree(dd_data); 2716 goto job_done;
2576 kfree(mb);
2577 mempool_free(pmboxq, phba->mbox_mem_pool);
2578 }
2579 return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
2580 } 2717 }
2581 2718
2719 /* job finished, copy the data */
2582 memcpy(mb, pmb, sizeof(*pmb)); 2720 memcpy(mb, pmb, sizeof(*pmb));
2583 job->reply->reply_payload_rcv_len = 2721 job->reply->reply_payload_rcv_len =
2584 sg_copy_from_buffer(job->reply_payload.sg_list, 2722 sg_copy_from_buffer(job->reply_payload.sg_list,
2585 job->reply_payload.sg_cnt, 2723 job->reply_payload.sg_cnt,
2586 mb, size); 2724 mb, size);
2587 kfree(dd_data);
2588 kfree(mb);
2589 mempool_free(pmboxq, phba->mbox_mem_pool);
2590 /* not waiting mbox already done */ 2725 /* not waiting mbox already done */
2591 return 0; 2726 rc = 0;
2727 goto job_done;
2592 } 2728 }
2593 2729
2594 /* setup wake call as IOCB callback */
2595 pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
2596 /* setup context field to pass wait_queue pointer to wake function */
2597 pmboxq->context1 = dd_data;
2598 dd_data->type = TYPE_MBOX;
2599 dd_data->context_un.mbox.pmboxq = pmboxq;
2600 dd_data->context_un.mbox.mb = mb;
2601 dd_data->context_un.mbox.set_job = job;
2602 job->dd_data = dd_data;
2603 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 2730 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
2604 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { 2731 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
2605 kfree(dd_data); 2732 return 1; /* job started */
2606 kfree(mb); 2733
2734job_done:
2735 /* common exit for error or job completed inline */
2736 kfree(mb);
2737 if (pmboxq)
2607 mempool_free(pmboxq, phba->mbox_mem_pool); 2738 mempool_free(pmboxq, phba->mbox_mem_pool);
2608 return -EIO; 2739 kfree(ext);
2740 if (dmp) {
2741 dma_free_coherent(&phba->pcidev->dev,
2742 dmp->size, dmp->dma.virt,
2743 dmp->dma.phys);
2744 kfree(dmp);
2745 }
2746 if (rxbmp) {
2747 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
2748 kfree(rxbmp);
2609 } 2749 }
2750 kfree(dd_data);
2610 2751
2611 return 1; 2752 return rc;
2612} 2753}
2613 2754
2614/** 2755/**
@@ -2638,6 +2779,11 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
2638 goto job_error; 2779 goto job_error;
2639 } 2780 }
2640 2781
2782 if (job->reply_payload.payload_len != PAGE_SIZE) {
2783 rc = -EINVAL;
2784 goto job_error;
2785 }
2786
2641 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { 2787 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
2642 rc = -EAGAIN; 2788 rc = -EAGAIN;
2643 goto job_error; 2789 goto job_error;
@@ -3094,6 +3240,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job)
3094 job->dd_data = NULL; 3240 job->dd_data = NULL;
3095 job->reply->reply_payload_rcv_len = 0; 3241 job->reply->reply_payload_rcv_len = 0;
3096 job->reply->result = -EAGAIN; 3242 job->reply->result = -EAGAIN;
3243 /* the mbox completion handler can now be run */
3097 spin_unlock_irqrestore(&phba->ct_ev_lock, flags); 3244 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3098 job->job_done(job); 3245 job->job_done(job);
3099 break; 3246 break;