diff options
author | James Smart <james.smart@emulex.com> | 2010-01-26 23:08:55 -0500 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2010-02-08 19:39:02 -0500 |
commit | 4fede78f7552479c4bb3bab221133ec5244e4154 (patch) | |
tree | e328de984674850a139b48730a52cde271f4a509 /drivers/scsi/lpfc/lpfc_bsg.c | |
parent | 65467b6bdffd3efde111444663bc9de35b59b22a (diff) |
[SCSI] lpfc 8.3.8: (BSG1) Update BSG infrastructure
Update BSG infrastructure to handle new vendor specific BSG commands.
Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_bsg.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_bsg.c | 52 |
1 files changed, 29 insertions, 23 deletions
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index a5d9048235d9..dfb1f73252a1 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2009 Emulex. All rights reserved. * | 4 | * Copyright (C) 2009-2010 Emulex. All rights reserved. * |
5 | * EMULEX and SLI are trademarks of Emulex. * | 5 | * EMULEX and SLI are trademarks of Emulex. * |
6 | * www.emulex.com * | 6 | * www.emulex.com * |
7 | * * | 7 | * * |
@@ -33,6 +33,7 @@ | |||
33 | #include "lpfc_sli.h" | 33 | #include "lpfc_sli.h" |
34 | #include "lpfc_sli4.h" | 34 | #include "lpfc_sli4.h" |
35 | #include "lpfc_nl.h" | 35 | #include "lpfc_nl.h" |
36 | #include "lpfc_bsg.h" | ||
36 | #include "lpfc_disc.h" | 37 | #include "lpfc_disc.h" |
37 | #include "lpfc_scsi.h" | 38 | #include "lpfc_scsi.h" |
38 | #include "lpfc.h" | 39 | #include "lpfc.h" |
@@ -476,7 +477,7 @@ enum ELX_LOOPBACK_CMD { | |||
476 | * This function is called when an unsolicited CT command is received. It | 477 | * This function is called when an unsolicited CT command is received. It |
477 | * forwards the event to any processes registerd to receive CT events. | 478 | * forwards the event to any processes registerd to receive CT events. |
478 | */ | 479 | */ |
479 | void | 480 | int |
480 | lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | 481 | lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
481 | struct lpfc_iocbq *piocbq) | 482 | struct lpfc_iocbq *piocbq) |
482 | { | 483 | { |
@@ -496,6 +497,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
496 | struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; | 497 | struct lpfc_dmabuf *bdeBuf2 = piocbq->context3; |
497 | struct lpfc_hbq_entry *hbqe; | 498 | struct lpfc_hbq_entry *hbqe; |
498 | struct lpfc_sli_ct_request *ct_req; | 499 | struct lpfc_sli_ct_request *ct_req; |
500 | unsigned long flags; | ||
499 | 501 | ||
500 | INIT_LIST_HEAD(&head); | 502 | INIT_LIST_HEAD(&head); |
501 | list_add_tail(&head, &piocbq->list); | 503 | list_add_tail(&head, &piocbq->list); |
@@ -519,7 +521,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
519 | if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) | 521 | if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) |
520 | lpfc_sli_ringpostbuf_put(phba, pring, dmabuf); | 522 | lpfc_sli_ringpostbuf_put(phba, pring, dmabuf); |
521 | 523 | ||
522 | mutex_lock(&phba->ct_event_mutex); | 524 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
523 | list_for_each_entry(evt, &phba->ct_ev_waiters, node) { | 525 | list_for_each_entry(evt, &phba->ct_ev_waiters, node) { |
524 | if (evt->req_id != evt_req_id) | 526 | if (evt->req_id != evt_req_id) |
525 | continue; | 527 | continue; |
@@ -535,7 +537,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
535 | break; | 537 | break; |
536 | } | 538 | } |
537 | 539 | ||
538 | mutex_unlock(&phba->ct_event_mutex); | 540 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
539 | 541 | ||
540 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { | 542 | if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { |
541 | /* take accumulated byte count from the last iocbq */ | 543 | /* take accumulated byte count from the last iocbq */ |
@@ -556,9 +558,9 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
556 | "CT event data, size %d\n", | 558 | "CT event data, size %d\n", |
557 | evt_dat->len); | 559 | evt_dat->len); |
558 | kfree(evt_dat); | 560 | kfree(evt_dat); |
559 | mutex_lock(&phba->ct_event_mutex); | 561 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
560 | lpfc_ct_event_unref(evt); | 562 | lpfc_ct_event_unref(evt); |
561 | mutex_unlock(&phba->ct_event_mutex); | 563 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
562 | goto error_ct_unsol_exit; | 564 | goto error_ct_unsol_exit; |
563 | } | 565 | } |
564 | 566 | ||
@@ -601,9 +603,11 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
601 | iocbq); | 603 | iocbq); |
602 | kfree(evt_dat->data); | 604 | kfree(evt_dat->data); |
603 | kfree(evt_dat); | 605 | kfree(evt_dat); |
604 | mutex_lock(&phba->ct_event_mutex); | 606 | spin_lock_irqsave(&phba->ct_ev_lock, |
607 | flags); | ||
605 | lpfc_ct_event_unref(evt); | 608 | lpfc_ct_event_unref(evt); |
606 | mutex_unlock(&phba->ct_event_mutex); | 609 | spin_unlock_irqrestore( |
610 | &phba->ct_ev_lock, flags); | ||
607 | goto error_ct_unsol_exit; | 611 | goto error_ct_unsol_exit; |
608 | } | 612 | } |
609 | memcpy((char *)(evt_dat->data) + offset, | 613 | memcpy((char *)(evt_dat->data) + offset, |
@@ -638,7 +642,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
638 | } | 642 | } |
639 | } | 643 | } |
640 | 644 | ||
641 | mutex_lock(&phba->ct_event_mutex); | 645 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
642 | if (phba->sli_rev == LPFC_SLI_REV4) { | 646 | if (phba->sli_rev == LPFC_SLI_REV4) { |
643 | evt_dat->immed_dat = phba->ctx_idx; | 647 | evt_dat->immed_dat = phba->ctx_idx; |
644 | phba->ctx_idx = (phba->ctx_idx + 1) % 64; | 648 | phba->ctx_idx = (phba->ctx_idx + 1) % 64; |
@@ -656,13 +660,13 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
656 | if (evt_req_id == SLI_CT_ELX_LOOPBACK) | 660 | if (evt_req_id == SLI_CT_ELX_LOOPBACK) |
657 | break; | 661 | break; |
658 | } | 662 | } |
659 | mutex_unlock(&phba->ct_event_mutex); | 663 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
660 | 664 | ||
661 | error_ct_unsol_exit: | 665 | error_ct_unsol_exit: |
662 | if (!list_empty(&head)) | 666 | if (!list_empty(&head)) |
663 | list_del(&head); | 667 | list_del(&head); |
664 | 668 | ||
665 | return; | 669 | return 1; |
666 | } | 670 | } |
667 | 671 | ||
668 | /** | 672 | /** |
@@ -676,6 +680,7 @@ lpfc_bsg_set_event(struct fc_bsg_job *job) | |||
676 | struct lpfc_hba *phba = vport->phba; | 680 | struct lpfc_hba *phba = vport->phba; |
677 | struct set_ct_event *event_req; | 681 | struct set_ct_event *event_req; |
678 | struct lpfc_ct_event *evt; | 682 | struct lpfc_ct_event *evt; |
683 | unsigned long flags; | ||
679 | int rc = 0; | 684 | int rc = 0; |
680 | 685 | ||
681 | if (job->request_len < | 686 | if (job->request_len < |
@@ -689,7 +694,7 @@ lpfc_bsg_set_event(struct fc_bsg_job *job) | |||
689 | event_req = (struct set_ct_event *) | 694 | event_req = (struct set_ct_event *) |
690 | job->request->rqst_data.h_vendor.vendor_cmd; | 695 | job->request->rqst_data.h_vendor.vendor_cmd; |
691 | 696 | ||
692 | mutex_lock(&phba->ct_event_mutex); | 697 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
693 | list_for_each_entry(evt, &phba->ct_ev_waiters, node) { | 698 | list_for_each_entry(evt, &phba->ct_ev_waiters, node) { |
694 | if (evt->reg_id == event_req->ev_reg_id) { | 699 | if (evt->reg_id == event_req->ev_reg_id) { |
695 | lpfc_ct_event_ref(evt); | 700 | lpfc_ct_event_ref(evt); |
@@ -697,7 +702,7 @@ lpfc_bsg_set_event(struct fc_bsg_job *job) | |||
697 | break; | 702 | break; |
698 | } | 703 | } |
699 | } | 704 | } |
700 | mutex_unlock(&phba->ct_event_mutex); | 705 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
701 | 706 | ||
702 | if (&evt->node == &phba->ct_ev_waiters) { | 707 | if (&evt->node == &phba->ct_ev_waiters) { |
703 | /* no event waiting struct yet - first call */ | 708 | /* no event waiting struct yet - first call */ |
@@ -710,19 +715,19 @@ lpfc_bsg_set_event(struct fc_bsg_job *job) | |||
710 | return -ENOMEM; | 715 | return -ENOMEM; |
711 | } | 716 | } |
712 | 717 | ||
713 | mutex_lock(&phba->ct_event_mutex); | 718 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
714 | list_add(&evt->node, &phba->ct_ev_waiters); | 719 | list_add(&evt->node, &phba->ct_ev_waiters); |
715 | lpfc_ct_event_ref(evt); | 720 | lpfc_ct_event_ref(evt); |
716 | mutex_unlock(&phba->ct_event_mutex); | 721 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
717 | } | 722 | } |
718 | 723 | ||
719 | evt->waiting = 1; | 724 | evt->waiting = 1; |
720 | if (wait_event_interruptible(evt->wq, | 725 | if (wait_event_interruptible(evt->wq, |
721 | !list_empty(&evt->events_to_see))) { | 726 | !list_empty(&evt->events_to_see))) { |
722 | mutex_lock(&phba->ct_event_mutex); | 727 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
723 | lpfc_ct_event_unref(evt); /* release ref */ | 728 | lpfc_ct_event_unref(evt); /* release ref */ |
724 | lpfc_ct_event_unref(evt); /* delete */ | 729 | lpfc_ct_event_unref(evt); /* delete */ |
725 | mutex_unlock(&phba->ct_event_mutex); | 730 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
726 | rc = -EINTR; | 731 | rc = -EINTR; |
727 | goto set_event_out; | 732 | goto set_event_out; |
728 | } | 733 | } |
@@ -730,10 +735,10 @@ lpfc_bsg_set_event(struct fc_bsg_job *job) | |||
730 | evt->wait_time_stamp = jiffies; | 735 | evt->wait_time_stamp = jiffies; |
731 | evt->waiting = 0; | 736 | evt->waiting = 0; |
732 | 737 | ||
733 | mutex_lock(&phba->ct_event_mutex); | 738 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
734 | list_move(evt->events_to_see.prev, &evt->events_to_get); | 739 | list_move(evt->events_to_see.prev, &evt->events_to_get); |
735 | lpfc_ct_event_unref(evt); /* release ref */ | 740 | lpfc_ct_event_unref(evt); /* release ref */ |
736 | mutex_unlock(&phba->ct_event_mutex); | 741 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
737 | 742 | ||
738 | set_event_out: | 743 | set_event_out: |
739 | /* set_event carries no reply payload */ | 744 | /* set_event carries no reply payload */ |
@@ -759,6 +764,7 @@ lpfc_bsg_get_event(struct fc_bsg_job *job) | |||
759 | struct get_ct_event_reply *event_reply; | 764 | struct get_ct_event_reply *event_reply; |
760 | struct lpfc_ct_event *evt; | 765 | struct lpfc_ct_event *evt; |
761 | struct event_data *evt_dat = NULL; | 766 | struct event_data *evt_dat = NULL; |
767 | unsigned long flags; | ||
762 | int rc = 0; | 768 | int rc = 0; |
763 | 769 | ||
764 | if (job->request_len < | 770 | if (job->request_len < |
@@ -775,7 +781,7 @@ lpfc_bsg_get_event(struct fc_bsg_job *job) | |||
775 | event_reply = (struct get_ct_event_reply *) | 781 | event_reply = (struct get_ct_event_reply *) |
776 | job->reply->reply_data.vendor_reply.vendor_rsp; | 782 | job->reply->reply_data.vendor_reply.vendor_rsp; |
777 | 783 | ||
778 | mutex_lock(&phba->ct_event_mutex); | 784 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
779 | list_for_each_entry(evt, &phba->ct_ev_waiters, node) { | 785 | list_for_each_entry(evt, &phba->ct_ev_waiters, node) { |
780 | if (evt->reg_id == event_req->ev_reg_id) { | 786 | if (evt->reg_id == event_req->ev_reg_id) { |
781 | if (list_empty(&evt->events_to_get)) | 787 | if (list_empty(&evt->events_to_get)) |
@@ -788,7 +794,7 @@ lpfc_bsg_get_event(struct fc_bsg_job *job) | |||
788 | break; | 794 | break; |
789 | } | 795 | } |
790 | } | 796 | } |
791 | mutex_unlock(&phba->ct_event_mutex); | 797 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
792 | 798 | ||
793 | if (!evt_dat) { | 799 | if (!evt_dat) { |
794 | job->reply->reply_payload_rcv_len = 0; | 800 | job->reply->reply_payload_rcv_len = 0; |
@@ -818,9 +824,9 @@ lpfc_bsg_get_event(struct fc_bsg_job *job) | |||
818 | if (evt_dat) | 824 | if (evt_dat) |
819 | kfree(evt_dat->data); | 825 | kfree(evt_dat->data); |
820 | kfree(evt_dat); | 826 | kfree(evt_dat); |
821 | mutex_lock(&phba->ct_event_mutex); | 827 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
822 | lpfc_ct_event_unref(evt); | 828 | lpfc_ct_event_unref(evt); |
823 | mutex_unlock(&phba->ct_event_mutex); | 829 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
824 | 830 | ||
825 | error_get_event_exit: | 831 | error_get_event_exit: |
826 | /* make error code available to userspace */ | 832 | /* make error code available to userspace */ |