diff options
author | Roland Dreier <roland@eddore.topspincom.com> | 2005-08-18 15:23:08 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2005-08-26 23:37:36 -0400 |
commit | d41fcc6705eddd04f7218c985b6da35435ed73cc (patch) | |
tree | 9c560f65a731ef79309e07598d63ab57fdfedc46 /drivers | |
parent | d1887ec2125988adccbd8bf0de638c41440bf80e (diff) |
[PATCH] IB: Add SRQ support to midlayer
Make the required core API additions and changes for
shared receive queues (SRQs).
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/infiniband/core/verbs.c | 60 | ||||
-rw-r--r-- | drivers/infiniband/include/ib_verbs.h | 103 |
2 files changed, 158 insertions, 5 deletions
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index c301a2c41f34..c035510c5a36 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
@@ -154,6 +154,66 @@ int ib_destroy_ah(struct ib_ah *ah) | |||
154 | } | 154 | } |
155 | EXPORT_SYMBOL(ib_destroy_ah); | 155 | EXPORT_SYMBOL(ib_destroy_ah); |
156 | 156 | ||
157 | /* Shared receive queues */ | ||
158 | |||
159 | struct ib_srq *ib_create_srq(struct ib_pd *pd, | ||
160 | struct ib_srq_init_attr *srq_init_attr) | ||
161 | { | ||
162 | struct ib_srq *srq; | ||
163 | |||
164 | if (!pd->device->create_srq) | ||
165 | return ERR_PTR(-ENOSYS); | ||
166 | |||
167 | srq = pd->device->create_srq(pd, srq_init_attr, NULL); | ||
168 | |||
169 | if (!IS_ERR(srq)) { | ||
170 | srq->device = pd->device; | ||
171 | srq->pd = pd; | ||
172 | srq->uobject = NULL; | ||
173 | srq->event_handler = srq_init_attr->event_handler; | ||
174 | srq->srq_context = srq_init_attr->srq_context; | ||
175 | atomic_inc(&pd->usecnt); | ||
176 | atomic_set(&srq->usecnt, 0); | ||
177 | } | ||
178 | |||
179 | return srq; | ||
180 | } | ||
181 | EXPORT_SYMBOL(ib_create_srq); | ||
182 | |||
183 | int ib_modify_srq(struct ib_srq *srq, | ||
184 | struct ib_srq_attr *srq_attr, | ||
185 | enum ib_srq_attr_mask srq_attr_mask) | ||
186 | { | ||
187 | return srq->device->modify_srq(srq, srq_attr, srq_attr_mask); | ||
188 | } | ||
189 | EXPORT_SYMBOL(ib_modify_srq); | ||
190 | |||
191 | int ib_query_srq(struct ib_srq *srq, | ||
192 | struct ib_srq_attr *srq_attr) | ||
193 | { | ||
194 | return srq->device->query_srq ? | ||
195 | srq->device->query_srq(srq, srq_attr) : -ENOSYS; | ||
196 | } | ||
197 | EXPORT_SYMBOL(ib_query_srq); | ||
198 | |||
199 | int ib_destroy_srq(struct ib_srq *srq) | ||
200 | { | ||
201 | struct ib_pd *pd; | ||
202 | int ret; | ||
203 | |||
204 | if (atomic_read(&srq->usecnt)) | ||
205 | return -EBUSY; | ||
206 | |||
207 | pd = srq->pd; | ||
208 | |||
209 | ret = srq->device->destroy_srq(srq); | ||
210 | if (!ret) | ||
211 | atomic_dec(&pd->usecnt); | ||
212 | |||
213 | return ret; | ||
214 | } | ||
215 | EXPORT_SYMBOL(ib_destroy_srq); | ||
216 | |||
157 | /* Queue pairs */ | 217 | /* Queue pairs */ |
158 | 218 | ||
159 | struct ib_qp *ib_create_qp(struct ib_pd *pd, | 219 | struct ib_qp *ib_create_qp(struct ib_pd *pd, |
diff --git a/drivers/infiniband/include/ib_verbs.h b/drivers/infiniband/include/ib_verbs.h index 042a7d11fbcc..e16cf94870f2 100644 --- a/drivers/infiniband/include/ib_verbs.h +++ b/drivers/infiniband/include/ib_verbs.h | |||
@@ -256,7 +256,10 @@ enum ib_event_type { | |||
256 | IB_EVENT_PORT_ERR, | 256 | IB_EVENT_PORT_ERR, |
257 | IB_EVENT_LID_CHANGE, | 257 | IB_EVENT_LID_CHANGE, |
258 | IB_EVENT_PKEY_CHANGE, | 258 | IB_EVENT_PKEY_CHANGE, |
259 | IB_EVENT_SM_CHANGE | 259 | IB_EVENT_SM_CHANGE, |
260 | IB_EVENT_SRQ_ERR, | ||
261 | IB_EVENT_SRQ_LIMIT_REACHED, | ||
262 | IB_EVENT_QP_LAST_WQE_REACHED | ||
260 | }; | 263 | }; |
261 | 264 | ||
262 | struct ib_event { | 265 | struct ib_event { |
@@ -264,6 +267,7 @@ struct ib_event { | |||
264 | union { | 267 | union { |
265 | struct ib_cq *cq; | 268 | struct ib_cq *cq; |
266 | struct ib_qp *qp; | 269 | struct ib_qp *qp; |
270 | struct ib_srq *srq; | ||
267 | u8 port_num; | 271 | u8 port_num; |
268 | } element; | 272 | } element; |
269 | enum ib_event_type event; | 273 | enum ib_event_type event; |
@@ -386,6 +390,23 @@ enum ib_cq_notify { | |||
386 | IB_CQ_NEXT_COMP | 390 | IB_CQ_NEXT_COMP |
387 | }; | 391 | }; |
388 | 392 | ||
393 | enum ib_srq_attr_mask { | ||
394 | IB_SRQ_MAX_WR = 1 << 0, | ||
395 | IB_SRQ_LIMIT = 1 << 1, | ||
396 | }; | ||
397 | |||
398 | struct ib_srq_attr { | ||
399 | u32 max_wr; | ||
400 | u32 max_sge; | ||
401 | u32 srq_limit; | ||
402 | }; | ||
403 | |||
404 | struct ib_srq_init_attr { | ||
405 | void (*event_handler)(struct ib_event *, void *); | ||
406 | void *srq_context; | ||
407 | struct ib_srq_attr attr; | ||
408 | }; | ||
409 | |||
389 | struct ib_qp_cap { | 410 | struct ib_qp_cap { |
390 | u32 max_send_wr; | 411 | u32 max_send_wr; |
391 | u32 max_recv_wr; | 412 | u32 max_recv_wr; |
@@ -713,10 +734,11 @@ struct ib_cq { | |||
713 | }; | 734 | }; |
714 | 735 | ||
715 | struct ib_srq { | 736 | struct ib_srq { |
716 | struct ib_device *device; | 737 | struct ib_device *device; |
717 | struct ib_uobject *uobject; | 738 | struct ib_pd *pd; |
718 | struct ib_pd *pd; | 739 | struct ib_uobject *uobject; |
719 | void *srq_context; | 740 | void (*event_handler)(struct ib_event *, void *); |
741 | void *srq_context; | ||
720 | atomic_t usecnt; | 742 | atomic_t usecnt; |
721 | }; | 743 | }; |
722 | 744 | ||
@@ -830,6 +852,18 @@ struct ib_device { | |||
830 | int (*query_ah)(struct ib_ah *ah, | 852 | int (*query_ah)(struct ib_ah *ah, |
831 | struct ib_ah_attr *ah_attr); | 853 | struct ib_ah_attr *ah_attr); |
832 | int (*destroy_ah)(struct ib_ah *ah); | 854 | int (*destroy_ah)(struct ib_ah *ah); |
855 | struct ib_srq * (*create_srq)(struct ib_pd *pd, | ||
856 | struct ib_srq_init_attr *srq_init_attr, | ||
857 | struct ib_udata *udata); | ||
858 | int (*modify_srq)(struct ib_srq *srq, | ||
859 | struct ib_srq_attr *srq_attr, | ||
860 | enum ib_srq_attr_mask srq_attr_mask); | ||
861 | int (*query_srq)(struct ib_srq *srq, | ||
862 | struct ib_srq_attr *srq_attr); | ||
863 | int (*destroy_srq)(struct ib_srq *srq); | ||
864 | int (*post_srq_recv)(struct ib_srq *srq, | ||
865 | struct ib_recv_wr *recv_wr, | ||
866 | struct ib_recv_wr **bad_recv_wr); | ||
833 | struct ib_qp * (*create_qp)(struct ib_pd *pd, | 867 | struct ib_qp * (*create_qp)(struct ib_pd *pd, |
834 | struct ib_qp_init_attr *qp_init_attr, | 868 | struct ib_qp_init_attr *qp_init_attr, |
835 | struct ib_udata *udata); | 869 | struct ib_udata *udata); |
@@ -1042,6 +1076,65 @@ int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); | |||
1042 | int ib_destroy_ah(struct ib_ah *ah); | 1076 | int ib_destroy_ah(struct ib_ah *ah); |
1043 | 1077 | ||
1044 | /** | 1078 | /** |
1079 | * ib_create_srq - Creates a SRQ associated with the specified protection | ||
1080 | * domain. | ||
1081 | * @pd: The protection domain associated with the SRQ. | ||
1082 | * @srq_init_attr: A list of initial attributes required to create the SRQ. | ||
1083 | * | ||
1084 | * srq_attr->max_wr and srq_attr->max_sge are read the determine the | ||
1085 | * requested size of the SRQ, and set to the actual values allocated | ||
1086 | * on return. If ib_create_srq() succeeds, then max_wr and max_sge | ||
1087 | * will always be at least as large as the requested values. | ||
1088 | */ | ||
1089 | struct ib_srq *ib_create_srq(struct ib_pd *pd, | ||
1090 | struct ib_srq_init_attr *srq_init_attr); | ||
1091 | |||
1092 | /** | ||
1093 | * ib_modify_srq - Modifies the attributes for the specified SRQ. | ||
1094 | * @srq: The SRQ to modify. | ||
1095 | * @srq_attr: On input, specifies the SRQ attributes to modify. On output, | ||
1096 | * the current values of selected SRQ attributes are returned. | ||
1097 | * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ | ||
1098 | * are being modified. | ||
1099 | * | ||
1100 | * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or | ||
1101 | * IB_SRQ_LIMIT to set the SRQ's limit and request notification when | ||
1102 | * the number of receives queued drops below the limit. | ||
1103 | */ | ||
1104 | int ib_modify_srq(struct ib_srq *srq, | ||
1105 | struct ib_srq_attr *srq_attr, | ||
1106 | enum ib_srq_attr_mask srq_attr_mask); | ||
1107 | |||
1108 | /** | ||
1109 | * ib_query_srq - Returns the attribute list and current values for the | ||
1110 | * specified SRQ. | ||
1111 | * @srq: The SRQ to query. | ||
1112 | * @srq_attr: The attributes of the specified SRQ. | ||
1113 | */ | ||
1114 | int ib_query_srq(struct ib_srq *srq, | ||
1115 | struct ib_srq_attr *srq_attr); | ||
1116 | |||
1117 | /** | ||
1118 | * ib_destroy_srq - Destroys the specified SRQ. | ||
1119 | * @srq: The SRQ to destroy. | ||
1120 | */ | ||
1121 | int ib_destroy_srq(struct ib_srq *srq); | ||
1122 | |||
1123 | /** | ||
1124 | * ib_post_srq_recv - Posts a list of work requests to the specified SRQ. | ||
1125 | * @srq: The SRQ to post the work request on. | ||
1126 | * @recv_wr: A list of work requests to post on the receive queue. | ||
1127 | * @bad_recv_wr: On an immediate failure, this parameter will reference | ||
1128 | * the work request that failed to be posted on the QP. | ||
1129 | */ | ||
1130 | static inline int ib_post_srq_recv(struct ib_srq *srq, | ||
1131 | struct ib_recv_wr *recv_wr, | ||
1132 | struct ib_recv_wr **bad_recv_wr) | ||
1133 | { | ||
1134 | return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr); | ||
1135 | } | ||
1136 | |||
1137 | /** | ||
1045 | * ib_create_qp - Creates a QP associated with the specified protection | 1138 | * ib_create_qp - Creates a QP associated with the specified protection |
1046 | * domain. | 1139 | * domain. |
1047 | * @pd: The protection domain associated with the QP. | 1140 | * @pd: The protection domain associated with the QP. |