diff options
author | Joachim Fenkes <fenkes@de.ibm.com> | 2007-08-31 10:03:37 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-08-31 16:58:04 -0400 |
commit | 5ff70cac3e98af64f9a1eaec9e762ff4927c26d1 (patch) | |
tree | 4a0b840f6cef113b175bfe63c75c0420611fca82 /drivers | |
parent | fecea0ab3415bfab9a1964690e53b10c5d8f2e46 (diff) |
IB/ehca: SRQ fixes to enable IPoIB CM
Fix ehca SRQ support so that IPoIB connected mode works:
- Report max_srq > 0 if SRQ is supported
- Report "last wqe reached" asynchronous event when base QP dies;
this is required by the IB spec and IPoIB CM relies on receiving it
when cleaning up.
Signed-off-by: Joachim Fenkes <fenkes@de.ibm.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_hca.c | 10 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_irq.c | 48 |
2 files changed, 38 insertions, 20 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c index fc19ef9fd963..cf22472d9414 100644 --- a/drivers/infiniband/hw/ehca/ehca_hca.c +++ b/drivers/infiniband/hw/ehca/ehca_hca.c | |||
@@ -93,9 +93,13 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) | |||
93 | props->max_pd = min_t(int, rblock->max_pd, INT_MAX); | 93 | props->max_pd = min_t(int, rblock->max_pd, INT_MAX); |
94 | props->max_ah = min_t(int, rblock->max_ah, INT_MAX); | 94 | props->max_ah = min_t(int, rblock->max_ah, INT_MAX); |
95 | props->max_fmr = min_t(int, rblock->max_mr, INT_MAX); | 95 | props->max_fmr = min_t(int, rblock->max_mr, INT_MAX); |
96 | props->max_srq = 0; | 96 | |
97 | props->max_srq_wr = 0; | 97 | if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) { |
98 | props->max_srq_sge = 0; | 98 | props->max_srq = props->max_qp; |
99 | props->max_srq_wr = props->max_qp_wr; | ||
100 | props->max_srq_sge = 3; | ||
101 | } | ||
102 | |||
99 | props->max_pkeys = 16; | 103 | props->max_pkeys = 16; |
100 | props->local_ca_ack_delay | 104 | props->local_ca_ack_delay |
101 | = rblock->local_ca_ack_delay; | 105 | = rblock->local_ca_ack_delay; |
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index ee06d8bd7396..a925ea52443f 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
@@ -175,41 +175,55 @@ error_data1: | |||
175 | 175 | ||
176 | } | 176 | } |
177 | 177 | ||
178 | static void qp_event_callback(struct ehca_shca *shca, u64 eqe, | 178 | static void dispatch_qp_event(struct ehca_shca *shca, struct ehca_qp *qp, |
179 | enum ib_event_type event_type, int fatal) | 179 | enum ib_event_type event_type) |
180 | { | 180 | { |
181 | struct ib_event event; | 181 | struct ib_event event; |
182 | struct ehca_qp *qp; | ||
183 | u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe); | ||
184 | |||
185 | read_lock(&ehca_qp_idr_lock); | ||
186 | qp = idr_find(&ehca_qp_idr, token); | ||
187 | read_unlock(&ehca_qp_idr_lock); | ||
188 | |||
189 | |||
190 | if (!qp) | ||
191 | return; | ||
192 | |||
193 | if (fatal) | ||
194 | ehca_error_data(shca, qp, qp->ipz_qp_handle.handle); | ||
195 | 182 | ||
196 | event.device = &shca->ib_device; | 183 | event.device = &shca->ib_device; |
184 | event.event = event_type; | ||
197 | 185 | ||
198 | if (qp->ext_type == EQPT_SRQ) { | 186 | if (qp->ext_type == EQPT_SRQ) { |
199 | if (!qp->ib_srq.event_handler) | 187 | if (!qp->ib_srq.event_handler) |
200 | return; | 188 | return; |
201 | 189 | ||
202 | event.event = fatal ? IB_EVENT_SRQ_ERR : event_type; | ||
203 | event.element.srq = &qp->ib_srq; | 190 | event.element.srq = &qp->ib_srq; |
204 | qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context); | 191 | qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context); |
205 | } else { | 192 | } else { |
206 | if (!qp->ib_qp.event_handler) | 193 | if (!qp->ib_qp.event_handler) |
207 | return; | 194 | return; |
208 | 195 | ||
209 | event.event = event_type; | ||
210 | event.element.qp = &qp->ib_qp; | 196 | event.element.qp = &qp->ib_qp; |
211 | qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context); | 197 | qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context); |
212 | } | 198 | } |
199 | } | ||
200 | |||
201 | static void qp_event_callback(struct ehca_shca *shca, u64 eqe, | ||
202 | enum ib_event_type event_type, int fatal) | ||
203 | { | ||
204 | struct ehca_qp *qp; | ||
205 | u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe); | ||
206 | |||
207 | read_lock(&ehca_qp_idr_lock); | ||
208 | qp = idr_find(&ehca_qp_idr, token); | ||
209 | read_unlock(&ehca_qp_idr_lock); | ||
210 | |||
211 | if (!qp) | ||
212 | return; | ||
213 | |||
214 | if (fatal) | ||
215 | ehca_error_data(shca, qp, qp->ipz_qp_handle.handle); | ||
216 | |||
217 | dispatch_qp_event(shca, qp, fatal && qp->ext_type == EQPT_SRQ ? | ||
218 | IB_EVENT_SRQ_ERR : event_type); | ||
219 | |||
220 | /* | ||
221 | * eHCA only processes one WQE at a time for SRQ base QPs, | ||
222 | * so the last WQE has been processed as soon as the QP enters | ||
223 | * error state. | ||
224 | */ | ||
225 | if (fatal && qp->ext_type == EQPT_SRQBASE) | ||
226 | dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED); | ||
213 | 227 | ||
214 | return; | 228 | return; |
215 | } | 229 | } |