diff options
author | David Dillow <dillowda@ornl.gov> | 2010-10-08 14:48:14 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2010-10-25 01:14:08 -0400 |
commit | 05a1d7504f836ee67e27f2488cb5b8126b51dbd4 (patch) | |
tree | 029c9e682a7070e1323bf71e0ca6977e54f5b64b /drivers/infiniband | |
parent | bb12588a38e6db85e01dceadff7bc161fc92e7d2 (diff) |
IB/srp: Eliminate two forward declarations
Signed-off-by: David Dillow <dillowda@ornl.gov>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 142 |
1 files changed, 69 insertions, 73 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index a54eee9324b6..d4c08d648137 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -83,10 +83,6 @@ static void srp_remove_one(struct ib_device *device); | |||
83 | static void srp_recv_completion(struct ib_cq *cq, void *target_ptr); | 83 | static void srp_recv_completion(struct ib_cq *cq, void *target_ptr); |
84 | static void srp_send_completion(struct ib_cq *cq, void *target_ptr); | 84 | static void srp_send_completion(struct ib_cq *cq, void *target_ptr); |
85 | static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); | 85 | static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); |
86 | static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, | ||
87 | enum srp_iu_type iu_type); | ||
88 | static int __srp_post_send(struct srp_target_port *target, | ||
89 | struct srp_iu *iu, int len); | ||
90 | 86 | ||
91 | static struct scsi_transport_template *ib_srp_transport_template; | 87 | static struct scsi_transport_template *ib_srp_transport_template; |
92 | 88 | ||
@@ -815,6 +811,75 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
815 | return len; | 811 | return len; |
816 | } | 812 | } |
817 | 813 | ||
814 | /* | ||
815 | * Must be called with target->scsi_host->host_lock held to protect | ||
816 | * req_lim and tx_head. Lock cannot be dropped between call here and | ||
817 | * call to __srp_post_send(). | ||
818 | * | ||
819 | * Note: | ||
820 | * An upper limit for the number of allocated information units for each | ||
821 | * request type is: | ||
822 | * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues | ||
823 | * more than Scsi_Host.can_queue requests. | ||
824 | * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE. | ||
825 | * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than | ||
826 | * one unanswered SRP request to an initiator. | ||
827 | */ | ||
828 | static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, | ||
829 | enum srp_iu_type iu_type) | ||
830 | { | ||
831 | s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; | ||
832 | struct srp_iu *iu; | ||
833 | |||
834 | srp_send_completion(target->send_cq, target); | ||
835 | |||
836 | if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) | ||
837 | return NULL; | ||
838 | |||
839 | /* Initiator responses to target requests do not consume credits */ | ||
840 | if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) { | ||
841 | ++target->zero_req_lim; | ||
842 | return NULL; | ||
843 | } | ||
844 | |||
845 | iu = target->tx_ring[target->tx_head & SRP_SQ_MASK]; | ||
846 | iu->type = iu_type; | ||
847 | return iu; | ||
848 | } | ||
849 | |||
850 | /* | ||
851 | * Must be called with target->scsi_host->host_lock held to protect | ||
852 | * req_lim and tx_head. | ||
853 | */ | ||
854 | static int __srp_post_send(struct srp_target_port *target, | ||
855 | struct srp_iu *iu, int len) | ||
856 | { | ||
857 | struct ib_sge list; | ||
858 | struct ib_send_wr wr, *bad_wr; | ||
859 | int ret = 0; | ||
860 | |||
861 | list.addr = iu->dma; | ||
862 | list.length = len; | ||
863 | list.lkey = target->srp_host->srp_dev->mr->lkey; | ||
864 | |||
865 | wr.next = NULL; | ||
866 | wr.wr_id = target->tx_head & SRP_SQ_MASK; | ||
867 | wr.sg_list = &list; | ||
868 | wr.num_sge = 1; | ||
869 | wr.opcode = IB_WR_SEND; | ||
870 | wr.send_flags = IB_SEND_SIGNALED; | ||
871 | |||
872 | ret = ib_post_send(target->qp, &wr, &bad_wr); | ||
873 | |||
874 | if (!ret) { | ||
875 | ++target->tx_head; | ||
876 | if (iu->type != SRP_IU_RSP) | ||
877 | --target->req_lim; | ||
878 | } | ||
879 | |||
880 | return ret; | ||
881 | } | ||
882 | |||
818 | static int srp_post_recv(struct srp_target_port *target) | 883 | static int srp_post_recv(struct srp_target_port *target) |
819 | { | 884 | { |
820 | unsigned long flags; | 885 | unsigned long flags; |
@@ -1058,75 +1123,6 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) | |||
1058 | } | 1123 | } |
1059 | } | 1124 | } |
1060 | 1125 | ||
1061 | /* | ||
1062 | * Must be called with target->scsi_host->host_lock held to protect | ||
1063 | * req_lim and tx_head. Lock cannot be dropped between call here and | ||
1064 | * call to __srp_post_send(). | ||
1065 | * | ||
1066 | * Note: | ||
1067 | * An upper limit for the number of allocated information units for each | ||
1068 | * request type is: | ||
1069 | * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues | ||
1070 | * more than Scsi_Host.can_queue requests. | ||
1071 | * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE. | ||
1072 | * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than | ||
1073 | * one unanswered SRP request to an initiator. | ||
1074 | */ | ||
1075 | static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, | ||
1076 | enum srp_iu_type iu_type) | ||
1077 | { | ||
1078 | s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; | ||
1079 | struct srp_iu *iu; | ||
1080 | |||
1081 | srp_send_completion(target->send_cq, target); | ||
1082 | |||
1083 | if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) | ||
1084 | return NULL; | ||
1085 | |||
1086 | /* Initiator responses to target requests do not consume credits */ | ||
1087 | if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) { | ||
1088 | ++target->zero_req_lim; | ||
1089 | return NULL; | ||
1090 | } | ||
1091 | |||
1092 | iu = target->tx_ring[target->tx_head & SRP_SQ_MASK]; | ||
1093 | iu->type = iu_type; | ||
1094 | return iu; | ||
1095 | } | ||
1096 | |||
1097 | /* | ||
1098 | * Must be called with target->scsi_host->host_lock held to protect | ||
1099 | * req_lim and tx_head. | ||
1100 | */ | ||
1101 | static int __srp_post_send(struct srp_target_port *target, | ||
1102 | struct srp_iu *iu, int len) | ||
1103 | { | ||
1104 | struct ib_sge list; | ||
1105 | struct ib_send_wr wr, *bad_wr; | ||
1106 | int ret = 0; | ||
1107 | |||
1108 | list.addr = iu->dma; | ||
1109 | list.length = len; | ||
1110 | list.lkey = target->srp_host->srp_dev->mr->lkey; | ||
1111 | |||
1112 | wr.next = NULL; | ||
1113 | wr.wr_id = target->tx_head & SRP_SQ_MASK; | ||
1114 | wr.sg_list = &list; | ||
1115 | wr.num_sge = 1; | ||
1116 | wr.opcode = IB_WR_SEND; | ||
1117 | wr.send_flags = IB_SEND_SIGNALED; | ||
1118 | |||
1119 | ret = ib_post_send(target->qp, &wr, &bad_wr); | ||
1120 | |||
1121 | if (!ret) { | ||
1122 | ++target->tx_head; | ||
1123 | if (iu->type != SRP_IU_RSP) | ||
1124 | --target->req_lim; | ||
1125 | } | ||
1126 | |||
1127 | return ret; | ||
1128 | } | ||
1129 | |||
1130 | static int srp_queuecommand(struct scsi_cmnd *scmnd, | 1126 | static int srp_queuecommand(struct scsi_cmnd *scmnd, |
1131 | void (*done)(struct scsi_cmnd *)) | 1127 | void (*done)(struct scsi_cmnd *)) |
1132 | { | 1128 | { |