diff options
author | Bryan O'Sullivan <bos@pathscale.com> | 2006-03-29 18:23:34 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-03-31 16:14:20 -0500 |
commit | 74ed6b5eb133b4acae7c47bc23457e5f8e7c1125 (patch) | |
tree | 3ab28b15184e2a28539fc81b53257a53fdb9fc6a /drivers/infiniband/hw | |
parent | aa735edf5dffbe43463c3d1218912fa54a8ec724 (diff) |
IB/ipath: infiniband UC and UD protocol support
These files implement the Infiniband UC ("unreliable connection") and UD
("unreliable datagram") protocols.
Signed-off-by: Bryan O'Sullivan <bos@pathscale.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_uc.c | 645 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_ud.c | 621 |
2 files changed, 1266 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c new file mode 100644 index 000000000000..0d6dbc0a541e --- /dev/null +++ b/drivers/infiniband/hw/ipath/ipath_uc.c | |||
@@ -0,0 +1,645 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #include "ipath_verbs.h" | ||
34 | #include "ips_common.h" | ||
35 | |||
36 | /* cut down ridiculously long IB macro names */ | ||
37 | #define OP(x) IB_OPCODE_UC_##x | ||
38 | |||
39 | static void complete_last_send(struct ipath_qp *qp, struct ipath_swqe *wqe, | ||
40 | struct ib_wc *wc) | ||
41 | { | ||
42 | if (++qp->s_last == qp->s_size) | ||
43 | qp->s_last = 0; | ||
44 | if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) || | ||
45 | (wqe->wr.send_flags & IB_SEND_SIGNALED)) { | ||
46 | wc->wr_id = wqe->wr.wr_id; | ||
47 | wc->status = IB_WC_SUCCESS; | ||
48 | wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; | ||
49 | wc->vendor_err = 0; | ||
50 | wc->byte_len = wqe->length; | ||
51 | wc->qp_num = qp->ibqp.qp_num; | ||
52 | wc->src_qp = qp->remote_qpn; | ||
53 | wc->pkey_index = 0; | ||
54 | wc->slid = qp->remote_ah_attr.dlid; | ||
55 | wc->sl = qp->remote_ah_attr.sl; | ||
56 | wc->dlid_path_bits = 0; | ||
57 | wc->port_num = 0; | ||
58 | ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 0); | ||
59 | } | ||
60 | wqe = get_swqe_ptr(qp, qp->s_last); | ||
61 | } | ||
62 | |||
63 | /** | ||
64 | * ipath_do_uc_send - do a send on a UC queue | ||
65 | * @data: contains a pointer to the QP to send on | ||
66 | * | ||
67 | * Process entries in the send work queue until the queue is exhausted. | ||
68 | * Only allow one CPU to send a packet per QP (tasklet). | ||
69 | * Otherwise, after we drop the QP lock, two threads could send | ||
70 | * packets out of order. | ||
71 | * This is similar to ipath_do_rc_send() below except we don't have | ||
72 | * timeouts or resends. | ||
73 | */ | ||
74 | void ipath_do_uc_send(unsigned long data) | ||
75 | { | ||
76 | struct ipath_qp *qp = (struct ipath_qp *)data; | ||
77 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | ||
78 | struct ipath_swqe *wqe; | ||
79 | unsigned long flags; | ||
80 | u16 lrh0; | ||
81 | u32 hwords; | ||
82 | u32 nwords; | ||
83 | u32 extra_bytes; | ||
84 | u32 bth0; | ||
85 | u32 bth2; | ||
86 | u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); | ||
87 | u32 len; | ||
88 | struct ipath_other_headers *ohdr; | ||
89 | struct ib_wc wc; | ||
90 | |||
91 | if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags)) | ||
92 | goto bail; | ||
93 | |||
94 | if (unlikely(qp->remote_ah_attr.dlid == | ||
95 | ipath_layer_get_lid(dev->dd))) { | ||
96 | /* Pass in an uninitialized ib_wc to save stack space. */ | ||
97 | ipath_ruc_loopback(qp, &wc); | ||
98 | clear_bit(IPATH_S_BUSY, &qp->s_flags); | ||
99 | goto bail; | ||
100 | } | ||
101 | |||
102 | ohdr = &qp->s_hdr.u.oth; | ||
103 | if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) | ||
104 | ohdr = &qp->s_hdr.u.l.oth; | ||
105 | |||
106 | again: | ||
107 | /* Check for a constructed packet to be sent. */ | ||
108 | if (qp->s_hdrwords != 0) { | ||
109 | /* | ||
110 | * If no PIO bufs are available, return. | ||
111 | * An interrupt will call ipath_ib_piobufavail() | ||
112 | * when one is available. | ||
113 | */ | ||
114 | if (ipath_verbs_send(dev->dd, qp->s_hdrwords, | ||
115 | (u32 *) &qp->s_hdr, | ||
116 | qp->s_cur_size, | ||
117 | qp->s_cur_sge)) { | ||
118 | ipath_no_bufs_available(qp, dev); | ||
119 | goto bail; | ||
120 | } | ||
121 | dev->n_unicast_xmit++; | ||
122 | /* Record that we sent the packet and s_hdr is empty. */ | ||
123 | qp->s_hdrwords = 0; | ||
124 | } | ||
125 | |||
126 | lrh0 = IPS_LRH_BTH; | ||
127 | /* header size in 32-bit words LRH+BTH = (8+12)/4. */ | ||
128 | hwords = 5; | ||
129 | |||
130 | /* | ||
131 | * The lock is needed to synchronize between | ||
132 | * setting qp->s_ack_state and post_send(). | ||
133 | */ | ||
134 | spin_lock_irqsave(&qp->s_lock, flags); | ||
135 | |||
136 | if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) | ||
137 | goto done; | ||
138 | |||
139 | bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index); | ||
140 | |||
141 | /* Send a request. */ | ||
142 | wqe = get_swqe_ptr(qp, qp->s_last); | ||
143 | switch (qp->s_state) { | ||
144 | default: | ||
145 | /* | ||
146 | * Signal the completion of the last send (if there is | ||
147 | * one). | ||
148 | */ | ||
149 | if (qp->s_last != qp->s_tail) | ||
150 | complete_last_send(qp, wqe, &wc); | ||
151 | |||
152 | /* Check if send work queue is empty. */ | ||
153 | if (qp->s_tail == qp->s_head) | ||
154 | goto done; | ||
155 | /* | ||
156 | * Start a new request. | ||
157 | */ | ||
158 | qp->s_psn = wqe->psn = qp->s_next_psn; | ||
159 | qp->s_sge.sge = wqe->sg_list[0]; | ||
160 | qp->s_sge.sg_list = wqe->sg_list + 1; | ||
161 | qp->s_sge.num_sge = wqe->wr.num_sge; | ||
162 | qp->s_len = len = wqe->length; | ||
163 | switch (wqe->wr.opcode) { | ||
164 | case IB_WR_SEND: | ||
165 | case IB_WR_SEND_WITH_IMM: | ||
166 | if (len > pmtu) { | ||
167 | qp->s_state = OP(SEND_FIRST); | ||
168 | len = pmtu; | ||
169 | break; | ||
170 | } | ||
171 | if (wqe->wr.opcode == IB_WR_SEND) | ||
172 | qp->s_state = OP(SEND_ONLY); | ||
173 | else { | ||
174 | qp->s_state = | ||
175 | OP(SEND_ONLY_WITH_IMMEDIATE); | ||
176 | /* Immediate data comes after the BTH */ | ||
177 | ohdr->u.imm_data = wqe->wr.imm_data; | ||
178 | hwords += 1; | ||
179 | } | ||
180 | if (wqe->wr.send_flags & IB_SEND_SOLICITED) | ||
181 | bth0 |= 1 << 23; | ||
182 | break; | ||
183 | |||
184 | case IB_WR_RDMA_WRITE: | ||
185 | case IB_WR_RDMA_WRITE_WITH_IMM: | ||
186 | ohdr->u.rc.reth.vaddr = | ||
187 | cpu_to_be64(wqe->wr.wr.rdma.remote_addr); | ||
188 | ohdr->u.rc.reth.rkey = | ||
189 | cpu_to_be32(wqe->wr.wr.rdma.rkey); | ||
190 | ohdr->u.rc.reth.length = cpu_to_be32(len); | ||
191 | hwords += sizeof(struct ib_reth) / 4; | ||
192 | if (len > pmtu) { | ||
193 | qp->s_state = OP(RDMA_WRITE_FIRST); | ||
194 | len = pmtu; | ||
195 | break; | ||
196 | } | ||
197 | if (wqe->wr.opcode == IB_WR_RDMA_WRITE) | ||
198 | qp->s_state = OP(RDMA_WRITE_ONLY); | ||
199 | else { | ||
200 | qp->s_state = | ||
201 | OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); | ||
202 | /* Immediate data comes after the RETH */ | ||
203 | ohdr->u.rc.imm_data = wqe->wr.imm_data; | ||
204 | hwords += 1; | ||
205 | if (wqe->wr.send_flags & IB_SEND_SOLICITED) | ||
206 | bth0 |= 1 << 23; | ||
207 | } | ||
208 | break; | ||
209 | |||
210 | default: | ||
211 | goto done; | ||
212 | } | ||
213 | if (++qp->s_tail >= qp->s_size) | ||
214 | qp->s_tail = 0; | ||
215 | break; | ||
216 | |||
217 | case OP(SEND_FIRST): | ||
218 | qp->s_state = OP(SEND_MIDDLE); | ||
219 | /* FALLTHROUGH */ | ||
220 | case OP(SEND_MIDDLE): | ||
221 | len = qp->s_len; | ||
222 | if (len > pmtu) { | ||
223 | len = pmtu; | ||
224 | break; | ||
225 | } | ||
226 | if (wqe->wr.opcode == IB_WR_SEND) | ||
227 | qp->s_state = OP(SEND_LAST); | ||
228 | else { | ||
229 | qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); | ||
230 | /* Immediate data comes after the BTH */ | ||
231 | ohdr->u.imm_data = wqe->wr.imm_data; | ||
232 | hwords += 1; | ||
233 | } | ||
234 | if (wqe->wr.send_flags & IB_SEND_SOLICITED) | ||
235 | bth0 |= 1 << 23; | ||
236 | break; | ||
237 | |||
238 | case OP(RDMA_WRITE_FIRST): | ||
239 | qp->s_state = OP(RDMA_WRITE_MIDDLE); | ||
240 | /* FALLTHROUGH */ | ||
241 | case OP(RDMA_WRITE_MIDDLE): | ||
242 | len = qp->s_len; | ||
243 | if (len > pmtu) { | ||
244 | len = pmtu; | ||
245 | break; | ||
246 | } | ||
247 | if (wqe->wr.opcode == IB_WR_RDMA_WRITE) | ||
248 | qp->s_state = OP(RDMA_WRITE_LAST); | ||
249 | else { | ||
250 | qp->s_state = | ||
251 | OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); | ||
252 | /* Immediate data comes after the BTH */ | ||
253 | ohdr->u.imm_data = wqe->wr.imm_data; | ||
254 | hwords += 1; | ||
255 | if (wqe->wr.send_flags & IB_SEND_SOLICITED) | ||
256 | bth0 |= 1 << 23; | ||
257 | } | ||
258 | break; | ||
259 | } | ||
260 | bth2 = qp->s_next_psn++ & IPS_PSN_MASK; | ||
261 | qp->s_len -= len; | ||
262 | bth0 |= qp->s_state << 24; | ||
263 | |||
264 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
265 | |||
266 | /* Construct the header. */ | ||
267 | extra_bytes = (4 - len) & 3; | ||
268 | nwords = (len + extra_bytes) >> 2; | ||
269 | if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { | ||
270 | /* Header size in 32-bit words. */ | ||
271 | hwords += 10; | ||
272 | lrh0 = IPS_LRH_GRH; | ||
273 | qp->s_hdr.u.l.grh.version_tclass_flow = | ||
274 | cpu_to_be32((6 << 28) | | ||
275 | (qp->remote_ah_attr.grh.traffic_class | ||
276 | << 20) | | ||
277 | qp->remote_ah_attr.grh.flow_label); | ||
278 | qp->s_hdr.u.l.grh.paylen = | ||
279 | cpu_to_be16(((hwords - 12) + nwords + | ||
280 | SIZE_OF_CRC) << 2); | ||
281 | /* next_hdr is defined by C8-7 in ch. 8.4.1 */ | ||
282 | qp->s_hdr.u.l.grh.next_hdr = 0x1B; | ||
283 | qp->s_hdr.u.l.grh.hop_limit = | ||
284 | qp->remote_ah_attr.grh.hop_limit; | ||
285 | /* The SGID is 32-bit aligned. */ | ||
286 | qp->s_hdr.u.l.grh.sgid.global.subnet_prefix = | ||
287 | dev->gid_prefix; | ||
288 | qp->s_hdr.u.l.grh.sgid.global.interface_id = | ||
289 | ipath_layer_get_guid(dev->dd); | ||
290 | qp->s_hdr.u.l.grh.dgid = qp->remote_ah_attr.grh.dgid; | ||
291 | } | ||
292 | qp->s_hdrwords = hwords; | ||
293 | qp->s_cur_sge = &qp->s_sge; | ||
294 | qp->s_cur_size = len; | ||
295 | lrh0 |= qp->remote_ah_attr.sl << 4; | ||
296 | qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); | ||
297 | /* DEST LID */ | ||
298 | qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); | ||
299 | qp->s_hdr.lrh[2] = cpu_to_be16(hwords + nwords + SIZE_OF_CRC); | ||
300 | qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd)); | ||
301 | bth0 |= extra_bytes << 20; | ||
302 | ohdr->bth[0] = cpu_to_be32(bth0); | ||
303 | ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); | ||
304 | ohdr->bth[2] = cpu_to_be32(bth2); | ||
305 | |||
306 | /* Check for more work to do. */ | ||
307 | goto again; | ||
308 | |||
309 | done: | ||
310 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
311 | clear_bit(IPATH_S_BUSY, &qp->s_flags); | ||
312 | |||
313 | bail: | ||
314 | return; | ||
315 | } | ||
316 | |||
317 | /** | ||
318 | * ipath_uc_rcv - handle an incoming UC packet | ||
319 | * @dev: the device the packet came in on | ||
320 | * @hdr: the header of the packet | ||
321 | * @has_grh: true if the packet has a GRH | ||
322 | * @data: the packet data | ||
323 | * @tlen: the length of the packet | ||
324 | * @qp: the QP for this packet. | ||
325 | * | ||
326 | * This is called from ipath_qp_rcv() to process an incoming UC packet | ||
327 | * for the given QP. | ||
328 | * Called at interrupt level. | ||
329 | */ | ||
330 | void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | ||
331 | int has_grh, void *data, u32 tlen, struct ipath_qp *qp) | ||
332 | { | ||
333 | struct ipath_other_headers *ohdr; | ||
334 | int opcode; | ||
335 | u32 hdrsize; | ||
336 | u32 psn; | ||
337 | u32 pad; | ||
338 | unsigned long flags; | ||
339 | struct ib_wc wc; | ||
340 | u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); | ||
341 | struct ib_reth *reth; | ||
342 | int header_in_data; | ||
343 | |||
344 | /* Check for GRH */ | ||
345 | if (!has_grh) { | ||
346 | ohdr = &hdr->u.oth; | ||
347 | hdrsize = 8 + 12; /* LRH + BTH */ | ||
348 | psn = be32_to_cpu(ohdr->bth[2]); | ||
349 | header_in_data = 0; | ||
350 | } else { | ||
351 | ohdr = &hdr->u.l.oth; | ||
352 | hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */ | ||
353 | /* | ||
354 | * The header with GRH is 60 bytes and the | ||
355 | * core driver sets the eager header buffer | ||
356 | * size to 56 bytes so the last 4 bytes of | ||
357 | * the BTH header (PSN) is in the data buffer. | ||
358 | */ | ||
359 | header_in_data = | ||
360 | ipath_layer_get_rcvhdrentsize(dev->dd) == 16; | ||
361 | if (header_in_data) { | ||
362 | psn = be32_to_cpu(((__be32 *) data)[0]); | ||
363 | data += sizeof(__be32); | ||
364 | } else | ||
365 | psn = be32_to_cpu(ohdr->bth[2]); | ||
366 | } | ||
367 | /* | ||
368 | * The opcode is in the low byte when its in network order | ||
369 | * (top byte when in host order). | ||
370 | */ | ||
371 | opcode = be32_to_cpu(ohdr->bth[0]) >> 24; | ||
372 | |||
373 | wc.imm_data = 0; | ||
374 | wc.wc_flags = 0; | ||
375 | |||
376 | spin_lock_irqsave(&qp->r_rq.lock, flags); | ||
377 | |||
378 | /* Compare the PSN verses the expected PSN. */ | ||
379 | if (unlikely(ipath_cmp24(psn, qp->r_psn) != 0)) { | ||
380 | /* | ||
381 | * Handle a sequence error. | ||
382 | * Silently drop any current message. | ||
383 | */ | ||
384 | qp->r_psn = psn; | ||
385 | inv: | ||
386 | qp->r_state = OP(SEND_LAST); | ||
387 | switch (opcode) { | ||
388 | case OP(SEND_FIRST): | ||
389 | case OP(SEND_ONLY): | ||
390 | case OP(SEND_ONLY_WITH_IMMEDIATE): | ||
391 | goto send_first; | ||
392 | |||
393 | case OP(RDMA_WRITE_FIRST): | ||
394 | case OP(RDMA_WRITE_ONLY): | ||
395 | case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): | ||
396 | goto rdma_first; | ||
397 | |||
398 | default: | ||
399 | dev->n_pkt_drops++; | ||
400 | goto done; | ||
401 | } | ||
402 | } | ||
403 | |||
404 | /* Check for opcode sequence errors. */ | ||
405 | switch (qp->r_state) { | ||
406 | case OP(SEND_FIRST): | ||
407 | case OP(SEND_MIDDLE): | ||
408 | if (opcode == OP(SEND_MIDDLE) || | ||
409 | opcode == OP(SEND_LAST) || | ||
410 | opcode == OP(SEND_LAST_WITH_IMMEDIATE)) | ||
411 | break; | ||
412 | goto inv; | ||
413 | |||
414 | case OP(RDMA_WRITE_FIRST): | ||
415 | case OP(RDMA_WRITE_MIDDLE): | ||
416 | if (opcode == OP(RDMA_WRITE_MIDDLE) || | ||
417 | opcode == OP(RDMA_WRITE_LAST) || | ||
418 | opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) | ||
419 | break; | ||
420 | goto inv; | ||
421 | |||
422 | default: | ||
423 | if (opcode == OP(SEND_FIRST) || | ||
424 | opcode == OP(SEND_ONLY) || | ||
425 | opcode == OP(SEND_ONLY_WITH_IMMEDIATE) || | ||
426 | opcode == OP(RDMA_WRITE_FIRST) || | ||
427 | opcode == OP(RDMA_WRITE_ONLY) || | ||
428 | opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) | ||
429 | break; | ||
430 | goto inv; | ||
431 | } | ||
432 | |||
433 | /* OK, process the packet. */ | ||
434 | switch (opcode) { | ||
435 | case OP(SEND_FIRST): | ||
436 | case OP(SEND_ONLY): | ||
437 | case OP(SEND_ONLY_WITH_IMMEDIATE): | ||
438 | send_first: | ||
439 | if (qp->r_reuse_sge) { | ||
440 | qp->r_reuse_sge = 0; | ||
441 | qp->r_sge = qp->s_rdma_sge; | ||
442 | } else if (!ipath_get_rwqe(qp, 0)) { | ||
443 | dev->n_pkt_drops++; | ||
444 | goto done; | ||
445 | } | ||
446 | /* Save the WQE so we can reuse it in case of an error. */ | ||
447 | qp->s_rdma_sge = qp->r_sge; | ||
448 | qp->r_rcv_len = 0; | ||
449 | if (opcode == OP(SEND_ONLY)) | ||
450 | goto send_last; | ||
451 | else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE)) | ||
452 | goto send_last_imm; | ||
453 | /* FALLTHROUGH */ | ||
454 | case OP(SEND_MIDDLE): | ||
455 | /* Check for invalid length PMTU or posted rwqe len. */ | ||
456 | if (unlikely(tlen != (hdrsize + pmtu + 4))) { | ||
457 | qp->r_reuse_sge = 1; | ||
458 | dev->n_pkt_drops++; | ||
459 | goto done; | ||
460 | } | ||
461 | qp->r_rcv_len += pmtu; | ||
462 | if (unlikely(qp->r_rcv_len > qp->r_len)) { | ||
463 | qp->r_reuse_sge = 1; | ||
464 | dev->n_pkt_drops++; | ||
465 | goto done; | ||
466 | } | ||
467 | ipath_copy_sge(&qp->r_sge, data, pmtu); | ||
468 | break; | ||
469 | |||
470 | case OP(SEND_LAST_WITH_IMMEDIATE): | ||
471 | send_last_imm: | ||
472 | if (header_in_data) { | ||
473 | wc.imm_data = *(__be32 *) data; | ||
474 | data += sizeof(__be32); | ||
475 | } else { | ||
476 | /* Immediate data comes after BTH */ | ||
477 | wc.imm_data = ohdr->u.imm_data; | ||
478 | } | ||
479 | hdrsize += 4; | ||
480 | wc.wc_flags = IB_WC_WITH_IMM; | ||
481 | /* FALLTHROUGH */ | ||
482 | case OP(SEND_LAST): | ||
483 | send_last: | ||
484 | /* Get the number of bytes the message was padded by. */ | ||
485 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; | ||
486 | /* Check for invalid length. */ | ||
487 | /* XXX LAST len should be >= 1 */ | ||
488 | if (unlikely(tlen < (hdrsize + pad + 4))) { | ||
489 | qp->r_reuse_sge = 1; | ||
490 | dev->n_pkt_drops++; | ||
491 | goto done; | ||
492 | } | ||
493 | /* Don't count the CRC. */ | ||
494 | tlen -= (hdrsize + pad + 4); | ||
495 | wc.byte_len = tlen + qp->r_rcv_len; | ||
496 | if (unlikely(wc.byte_len > qp->r_len)) { | ||
497 | qp->r_reuse_sge = 1; | ||
498 | dev->n_pkt_drops++; | ||
499 | goto done; | ||
500 | } | ||
501 | /* XXX Need to free SGEs */ | ||
502 | last_imm: | ||
503 | ipath_copy_sge(&qp->r_sge, data, tlen); | ||
504 | wc.wr_id = qp->r_wr_id; | ||
505 | wc.status = IB_WC_SUCCESS; | ||
506 | wc.opcode = IB_WC_RECV; | ||
507 | wc.vendor_err = 0; | ||
508 | wc.qp_num = qp->ibqp.qp_num; | ||
509 | wc.src_qp = qp->remote_qpn; | ||
510 | wc.pkey_index = 0; | ||
511 | wc.slid = qp->remote_ah_attr.dlid; | ||
512 | wc.sl = qp->remote_ah_attr.sl; | ||
513 | wc.dlid_path_bits = 0; | ||
514 | wc.port_num = 0; | ||
515 | /* Signal completion event if the solicited bit is set. */ | ||
516 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, | ||
517 | (ohdr->bth[0] & | ||
518 | __constant_cpu_to_be32(1 << 23)) != 0); | ||
519 | break; | ||
520 | |||
521 | case OP(RDMA_WRITE_FIRST): | ||
522 | case OP(RDMA_WRITE_ONLY): | ||
523 | case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */ | ||
524 | rdma_first: | ||
525 | /* RETH comes after BTH */ | ||
526 | if (!header_in_data) | ||
527 | reth = &ohdr->u.rc.reth; | ||
528 | else { | ||
529 | reth = (struct ib_reth *)data; | ||
530 | data += sizeof(*reth); | ||
531 | } | ||
532 | hdrsize += sizeof(*reth); | ||
533 | qp->r_len = be32_to_cpu(reth->length); | ||
534 | qp->r_rcv_len = 0; | ||
535 | if (qp->r_len != 0) { | ||
536 | u32 rkey = be32_to_cpu(reth->rkey); | ||
537 | u64 vaddr = be64_to_cpu(reth->vaddr); | ||
538 | |||
539 | /* Check rkey */ | ||
540 | if (unlikely(!ipath_rkey_ok( | ||
541 | dev, &qp->r_sge, qp->r_len, | ||
542 | vaddr, rkey, | ||
543 | IB_ACCESS_REMOTE_WRITE))) { | ||
544 | dev->n_pkt_drops++; | ||
545 | goto done; | ||
546 | } | ||
547 | } else { | ||
548 | qp->r_sge.sg_list = NULL; | ||
549 | qp->r_sge.sge.mr = NULL; | ||
550 | qp->r_sge.sge.vaddr = NULL; | ||
551 | qp->r_sge.sge.length = 0; | ||
552 | qp->r_sge.sge.sge_length = 0; | ||
553 | } | ||
554 | if (unlikely(!(qp->qp_access_flags & | ||
555 | IB_ACCESS_REMOTE_WRITE))) { | ||
556 | dev->n_pkt_drops++; | ||
557 | goto done; | ||
558 | } | ||
559 | if (opcode == OP(RDMA_WRITE_ONLY)) | ||
560 | goto rdma_last; | ||
561 | else if (opcode == | ||
562 | OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) | ||
563 | goto rdma_last_imm; | ||
564 | /* FALLTHROUGH */ | ||
565 | case OP(RDMA_WRITE_MIDDLE): | ||
566 | /* Check for invalid length PMTU or posted rwqe len. */ | ||
567 | if (unlikely(tlen != (hdrsize + pmtu + 4))) { | ||
568 | dev->n_pkt_drops++; | ||
569 | goto done; | ||
570 | } | ||
571 | qp->r_rcv_len += pmtu; | ||
572 | if (unlikely(qp->r_rcv_len > qp->r_len)) { | ||
573 | dev->n_pkt_drops++; | ||
574 | goto done; | ||
575 | } | ||
576 | ipath_copy_sge(&qp->r_sge, data, pmtu); | ||
577 | break; | ||
578 | |||
579 | case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE): | ||
580 | rdma_last_imm: | ||
581 | /* Get the number of bytes the message was padded by. */ | ||
582 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; | ||
583 | /* Check for invalid length. */ | ||
584 | /* XXX LAST len should be >= 1 */ | ||
585 | if (unlikely(tlen < (hdrsize + pad + 4))) { | ||
586 | dev->n_pkt_drops++; | ||
587 | goto done; | ||
588 | } | ||
589 | /* Don't count the CRC. */ | ||
590 | tlen -= (hdrsize + pad + 4); | ||
591 | if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) { | ||
592 | dev->n_pkt_drops++; | ||
593 | goto done; | ||
594 | } | ||
595 | if (qp->r_reuse_sge) { | ||
596 | qp->r_reuse_sge = 0; | ||
597 | } else if (!ipath_get_rwqe(qp, 1)) { | ||
598 | dev->n_pkt_drops++; | ||
599 | goto done; | ||
600 | } | ||
601 | if (header_in_data) { | ||
602 | wc.imm_data = *(__be32 *) data; | ||
603 | data += sizeof(__be32); | ||
604 | } else { | ||
605 | /* Immediate data comes after BTH */ | ||
606 | wc.imm_data = ohdr->u.imm_data; | ||
607 | } | ||
608 | hdrsize += 4; | ||
609 | wc.wc_flags = IB_WC_WITH_IMM; | ||
610 | wc.byte_len = 0; | ||
611 | goto last_imm; | ||
612 | |||
613 | case OP(RDMA_WRITE_LAST): | ||
614 | rdma_last: | ||
615 | /* Get the number of bytes the message was padded by. */ | ||
616 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; | ||
617 | /* Check for invalid length. */ | ||
618 | /* XXX LAST len should be >= 1 */ | ||
619 | if (unlikely(tlen < (hdrsize + pad + 4))) { | ||
620 | dev->n_pkt_drops++; | ||
621 | goto done; | ||
622 | } | ||
623 | /* Don't count the CRC. */ | ||
624 | tlen -= (hdrsize + pad + 4); | ||
625 | if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) { | ||
626 | dev->n_pkt_drops++; | ||
627 | goto done; | ||
628 | } | ||
629 | ipath_copy_sge(&qp->r_sge, data, tlen); | ||
630 | break; | ||
631 | |||
632 | default: | ||
633 | /* Drop packet for unknown opcodes. */ | ||
634 | spin_unlock_irqrestore(&qp->r_rq.lock, flags); | ||
635 | dev->n_pkt_drops++; | ||
636 | goto bail; | ||
637 | } | ||
638 | qp->r_psn++; | ||
639 | qp->r_state = opcode; | ||
640 | done: | ||
641 | spin_unlock_irqrestore(&qp->r_rq.lock, flags); | ||
642 | |||
643 | bail: | ||
644 | return; | ||
645 | } | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c new file mode 100644 index 000000000000..5ff3de6128b2 --- /dev/null +++ b/drivers/infiniband/hw/ipath/ipath_ud.c | |||
@@ -0,0 +1,621 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #include <rdma/ib_smi.h> | ||
34 | |||
35 | #include "ipath_verbs.h" | ||
36 | #include "ips_common.h" | ||
37 | |||
38 | /** | ||
39 | * ipath_ud_loopback - handle send on loopback QPs | ||
40 | * @sqp: the QP | ||
41 | * @ss: the SGE state | ||
42 | * @length: the length of the data to send | ||
43 | * @wr: the work request | ||
44 | * @wc: the work completion entry | ||
45 | * | ||
46 | * This is called from ipath_post_ud_send() to forward a WQE addressed | ||
47 | * to the same HCA. | ||
48 | */ | ||
49 | void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_sge_state *ss, | ||
50 | u32 length, struct ib_send_wr *wr, struct ib_wc *wc) | ||
51 | { | ||
52 | struct ipath_ibdev *dev = to_idev(sqp->ibqp.device); | ||
53 | struct ipath_qp *qp; | ||
54 | struct ib_ah_attr *ah_attr; | ||
55 | unsigned long flags; | ||
56 | struct ipath_rq *rq; | ||
57 | struct ipath_srq *srq; | ||
58 | struct ipath_sge_state rsge; | ||
59 | struct ipath_sge *sge; | ||
60 | struct ipath_rwqe *wqe; | ||
61 | |||
62 | qp = ipath_lookup_qpn(&dev->qp_table, wr->wr.ud.remote_qpn); | ||
63 | if (!qp) | ||
64 | return; | ||
65 | |||
66 | /* | ||
67 | * Check that the qkey matches (except for QP0, see 9.6.1.4.1). | ||
68 | * Qkeys with the high order bit set mean use the | ||
69 | * qkey from the QP context instead of the WR (see 10.2.5). | ||
70 | */ | ||
71 | if (unlikely(qp->ibqp.qp_num && | ||
72 | ((int) wr->wr.ud.remote_qkey < 0 | ||
73 | ? qp->qkey : wr->wr.ud.remote_qkey) != qp->qkey)) { | ||
74 | /* XXX OK to lose a count once in a while. */ | ||
75 | dev->qkey_violations++; | ||
76 | dev->n_pkt_drops++; | ||
77 | goto done; | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | * A GRH is expected to preceed the data even if not | ||
82 | * present on the wire. | ||
83 | */ | ||
84 | wc->byte_len = length + sizeof(struct ib_grh); | ||
85 | |||
86 | if (wr->opcode == IB_WR_SEND_WITH_IMM) { | ||
87 | wc->wc_flags = IB_WC_WITH_IMM; | ||
88 | wc->imm_data = wr->imm_data; | ||
89 | } else { | ||
90 | wc->wc_flags = 0; | ||
91 | wc->imm_data = 0; | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | * Get the next work request entry to find where to put the data. | ||
96 | * Note that it is safe to drop the lock after changing rq->tail | ||
97 | * since ipath_post_receive() won't fill the empty slot. | ||
98 | */ | ||
99 | if (qp->ibqp.srq) { | ||
100 | srq = to_isrq(qp->ibqp.srq); | ||
101 | rq = &srq->rq; | ||
102 | } else { | ||
103 | srq = NULL; | ||
104 | rq = &qp->r_rq; | ||
105 | } | ||
106 | spin_lock_irqsave(&rq->lock, flags); | ||
107 | if (rq->tail == rq->head) { | ||
108 | spin_unlock_irqrestore(&rq->lock, flags); | ||
109 | dev->n_pkt_drops++; | ||
110 | goto done; | ||
111 | } | ||
112 | /* Silently drop packets which are too big. */ | ||
113 | wqe = get_rwqe_ptr(rq, rq->tail); | ||
114 | if (wc->byte_len > wqe->length) { | ||
115 | spin_unlock_irqrestore(&rq->lock, flags); | ||
116 | dev->n_pkt_drops++; | ||
117 | goto done; | ||
118 | } | ||
119 | wc->wr_id = wqe->wr_id; | ||
120 | rsge.sge = wqe->sg_list[0]; | ||
121 | rsge.sg_list = wqe->sg_list + 1; | ||
122 | rsge.num_sge = wqe->num_sge; | ||
123 | if (++rq->tail >= rq->size) | ||
124 | rq->tail = 0; | ||
125 | if (srq && srq->ibsrq.event_handler) { | ||
126 | u32 n; | ||
127 | |||
128 | if (rq->head < rq->tail) | ||
129 | n = rq->size + rq->head - rq->tail; | ||
130 | else | ||
131 | n = rq->head - rq->tail; | ||
132 | if (n < srq->limit) { | ||
133 | struct ib_event ev; | ||
134 | |||
135 | srq->limit = 0; | ||
136 | spin_unlock_irqrestore(&rq->lock, flags); | ||
137 | ev.device = qp->ibqp.device; | ||
138 | ev.element.srq = qp->ibqp.srq; | ||
139 | ev.event = IB_EVENT_SRQ_LIMIT_REACHED; | ||
140 | srq->ibsrq.event_handler(&ev, | ||
141 | srq->ibsrq.srq_context); | ||
142 | } else | ||
143 | spin_unlock_irqrestore(&rq->lock, flags); | ||
144 | } else | ||
145 | spin_unlock_irqrestore(&rq->lock, flags); | ||
146 | ah_attr = &to_iah(wr->wr.ud.ah)->attr; | ||
147 | if (ah_attr->ah_flags & IB_AH_GRH) { | ||
148 | ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh)); | ||
149 | wc->wc_flags |= IB_WC_GRH; | ||
150 | } else | ||
151 | ipath_skip_sge(&rsge, sizeof(struct ib_grh)); | ||
152 | sge = &ss->sge; | ||
153 | while (length) { | ||
154 | u32 len = sge->length; | ||
155 | |||
156 | if (len > length) | ||
157 | len = length; | ||
158 | BUG_ON(len == 0); | ||
159 | ipath_copy_sge(&rsge, sge->vaddr, len); | ||
160 | sge->vaddr += len; | ||
161 | sge->length -= len; | ||
162 | sge->sge_length -= len; | ||
163 | if (sge->sge_length == 0) { | ||
164 | if (--ss->num_sge) | ||
165 | *sge = *ss->sg_list++; | ||
166 | } else if (sge->length == 0 && sge->mr != NULL) { | ||
167 | if (++sge->n >= IPATH_SEGSZ) { | ||
168 | if (++sge->m >= sge->mr->mapsz) | ||
169 | break; | ||
170 | sge->n = 0; | ||
171 | } | ||
172 | sge->vaddr = | ||
173 | sge->mr->map[sge->m]->segs[sge->n].vaddr; | ||
174 | sge->length = | ||
175 | sge->mr->map[sge->m]->segs[sge->n].length; | ||
176 | } | ||
177 | length -= len; | ||
178 | } | ||
179 | wc->status = IB_WC_SUCCESS; | ||
180 | wc->opcode = IB_WC_RECV; | ||
181 | wc->vendor_err = 0; | ||
182 | wc->qp_num = qp->ibqp.qp_num; | ||
183 | wc->src_qp = sqp->ibqp.qp_num; | ||
184 | /* XXX do we know which pkey matched? Only needed for GSI. */ | ||
185 | wc->pkey_index = 0; | ||
186 | wc->slid = ipath_layer_get_lid(dev->dd) | | ||
187 | (ah_attr->src_path_bits & | ||
188 | ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1)); | ||
189 | wc->sl = ah_attr->sl; | ||
190 | wc->dlid_path_bits = | ||
191 | ah_attr->dlid & ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); | ||
192 | /* Signal completion event if the solicited bit is set. */ | ||
193 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), wc, | ||
194 | wr->send_flags & IB_SEND_SOLICITED); | ||
195 | |||
196 | done: | ||
197 | if (atomic_dec_and_test(&qp->refcount)) | ||
198 | wake_up(&qp->wait); | ||
199 | } | ||
200 | |||
201 | /** | ||
202 | * ipath_post_ud_send - post a UD send on QP | ||
203 | * @qp: the QP | ||
204 | * @wr: the work request | ||
205 | * | ||
206 | * Note that we actually send the data as it is posted instead of putting | ||
207 | * the request into a ring buffer. If we wanted to use a ring buffer, | ||
208 | * we would need to save a reference to the destination address in the SWQE. | ||
209 | */ | ||
210 | int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr) | ||
211 | { | ||
212 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | ||
213 | struct ipath_other_headers *ohdr; | ||
214 | struct ib_ah_attr *ah_attr; | ||
215 | struct ipath_sge_state ss; | ||
216 | struct ipath_sge *sg_list; | ||
217 | struct ib_wc wc; | ||
218 | u32 hwords; | ||
219 | u32 nwords; | ||
220 | u32 len; | ||
221 | u32 extra_bytes; | ||
222 | u32 bth0; | ||
223 | u16 lrh0; | ||
224 | u16 lid; | ||
225 | int i; | ||
226 | int ret; | ||
227 | |||
228 | if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) { | ||
229 | ret = 0; | ||
230 | goto bail; | ||
231 | } | ||
232 | |||
233 | /* IB spec says that num_sge == 0 is OK. */ | ||
234 | if (wr->num_sge > qp->s_max_sge) { | ||
235 | ret = -EINVAL; | ||
236 | goto bail; | ||
237 | } | ||
238 | |||
239 | if (wr->num_sge > 1) { | ||
240 | sg_list = kmalloc((qp->s_max_sge - 1) * sizeof(*sg_list), | ||
241 | GFP_ATOMIC); | ||
242 | if (!sg_list) { | ||
243 | ret = -ENOMEM; | ||
244 | goto bail; | ||
245 | } | ||
246 | } else | ||
247 | sg_list = NULL; | ||
248 | |||
249 | /* Check the buffer to send. */ | ||
250 | ss.sg_list = sg_list; | ||
251 | ss.sge.mr = NULL; | ||
252 | ss.sge.vaddr = NULL; | ||
253 | ss.sge.length = 0; | ||
254 | ss.sge.sge_length = 0; | ||
255 | ss.num_sge = 0; | ||
256 | len = 0; | ||
257 | for (i = 0; i < wr->num_sge; i++) { | ||
258 | /* Check LKEY */ | ||
259 | if (to_ipd(qp->ibqp.pd)->user && wr->sg_list[i].lkey == 0) { | ||
260 | ret = -EINVAL; | ||
261 | goto bail; | ||
262 | } | ||
263 | |||
264 | if (wr->sg_list[i].length == 0) | ||
265 | continue; | ||
266 | if (!ipath_lkey_ok(&dev->lk_table, ss.num_sge ? | ||
267 | sg_list + ss.num_sge - 1 : &ss.sge, | ||
268 | &wr->sg_list[i], 0)) { | ||
269 | ret = -EINVAL; | ||
270 | goto bail; | ||
271 | } | ||
272 | len += wr->sg_list[i].length; | ||
273 | ss.num_sge++; | ||
274 | } | ||
275 | extra_bytes = (4 - len) & 3; | ||
276 | nwords = (len + extra_bytes) >> 2; | ||
277 | |||
278 | /* Construct the header. */ | ||
279 | ah_attr = &to_iah(wr->wr.ud.ah)->attr; | ||
280 | if (ah_attr->dlid == 0) { | ||
281 | ret = -EINVAL; | ||
282 | goto bail; | ||
283 | } | ||
284 | if (ah_attr->dlid >= IPS_MULTICAST_LID_BASE) { | ||
285 | if (ah_attr->dlid != IPS_PERMISSIVE_LID) | ||
286 | dev->n_multicast_xmit++; | ||
287 | else | ||
288 | dev->n_unicast_xmit++; | ||
289 | } else { | ||
290 | dev->n_unicast_xmit++; | ||
291 | lid = ah_attr->dlid & | ||
292 | ~((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); | ||
293 | if (unlikely(lid == ipath_layer_get_lid(dev->dd))) { | ||
294 | /* | ||
295 | * Pass in an uninitialized ib_wc to save stack | ||
296 | * space. | ||
297 | */ | ||
298 | ipath_ud_loopback(qp, &ss, len, wr, &wc); | ||
299 | goto done; | ||
300 | } | ||
301 | } | ||
302 | if (ah_attr->ah_flags & IB_AH_GRH) { | ||
303 | /* Header size in 32-bit words. */ | ||
304 | hwords = 17; | ||
305 | lrh0 = IPS_LRH_GRH; | ||
306 | ohdr = &qp->s_hdr.u.l.oth; | ||
307 | qp->s_hdr.u.l.grh.version_tclass_flow = | ||
308 | cpu_to_be32((6 << 28) | | ||
309 | (ah_attr->grh.traffic_class << 20) | | ||
310 | ah_attr->grh.flow_label); | ||
311 | qp->s_hdr.u.l.grh.paylen = | ||
312 | cpu_to_be16(((wr->opcode == | ||
313 | IB_WR_SEND_WITH_IMM ? 6 : 5) + | ||
314 | nwords + SIZE_OF_CRC) << 2); | ||
315 | /* next_hdr is defined by C8-7 in ch. 8.4.1 */ | ||
316 | qp->s_hdr.u.l.grh.next_hdr = 0x1B; | ||
317 | qp->s_hdr.u.l.grh.hop_limit = ah_attr->grh.hop_limit; | ||
318 | /* The SGID is 32-bit aligned. */ | ||
319 | qp->s_hdr.u.l.grh.sgid.global.subnet_prefix = | ||
320 | dev->gid_prefix; | ||
321 | qp->s_hdr.u.l.grh.sgid.global.interface_id = | ||
322 | ipath_layer_get_guid(dev->dd); | ||
323 | qp->s_hdr.u.l.grh.dgid = ah_attr->grh.dgid; | ||
324 | /* | ||
325 | * Don't worry about sending to locally attached multicast | ||
326 | * QPs. It is unspecified by the spec. what happens. | ||
327 | */ | ||
328 | } else { | ||
329 | /* Header size in 32-bit words. */ | ||
330 | hwords = 7; | ||
331 | lrh0 = IPS_LRH_BTH; | ||
332 | ohdr = &qp->s_hdr.u.oth; | ||
333 | } | ||
334 | if (wr->opcode == IB_WR_SEND_WITH_IMM) { | ||
335 | ohdr->u.ud.imm_data = wr->imm_data; | ||
336 | wc.imm_data = wr->imm_data; | ||
337 | hwords += 1; | ||
338 | bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24; | ||
339 | } else if (wr->opcode == IB_WR_SEND) { | ||
340 | wc.imm_data = 0; | ||
341 | bth0 = IB_OPCODE_UD_SEND_ONLY << 24; | ||
342 | } else { | ||
343 | ret = -EINVAL; | ||
344 | goto bail; | ||
345 | } | ||
346 | lrh0 |= ah_attr->sl << 4; | ||
347 | if (qp->ibqp.qp_type == IB_QPT_SMI) | ||
348 | lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */ | ||
349 | qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); | ||
350 | qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */ | ||
351 | qp->s_hdr.lrh[2] = cpu_to_be16(hwords + nwords + SIZE_OF_CRC); | ||
352 | lid = ipath_layer_get_lid(dev->dd); | ||
353 | if (lid) { | ||
354 | lid |= ah_attr->src_path_bits & | ||
355 | ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); | ||
356 | qp->s_hdr.lrh[3] = cpu_to_be16(lid); | ||
357 | } else | ||
358 | qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE; | ||
359 | if (wr->send_flags & IB_SEND_SOLICITED) | ||
360 | bth0 |= 1 << 23; | ||
361 | bth0 |= extra_bytes << 20; | ||
362 | bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPS_DEFAULT_P_KEY : | ||
363 | ipath_layer_get_pkey(dev->dd, qp->s_pkey_index); | ||
364 | ohdr->bth[0] = cpu_to_be32(bth0); | ||
365 | /* | ||
366 | * Use the multicast QP if the destination LID is a multicast LID. | ||
367 | */ | ||
368 | ohdr->bth[1] = ah_attr->dlid >= IPS_MULTICAST_LID_BASE && | ||
369 | ah_attr->dlid != IPS_PERMISSIVE_LID ? | ||
370 | __constant_cpu_to_be32(IPS_MULTICAST_QPN) : | ||
371 | cpu_to_be32(wr->wr.ud.remote_qpn); | ||
372 | /* XXX Could lose a PSN count but not worth locking */ | ||
373 | ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPS_PSN_MASK); | ||
374 | /* | ||
375 | * Qkeys with the high order bit set mean use the | ||
376 | * qkey from the QP context instead of the WR (see 10.2.5). | ||
377 | */ | ||
378 | ohdr->u.ud.deth[0] = cpu_to_be32((int)wr->wr.ud.remote_qkey < 0 ? | ||
379 | qp->qkey : wr->wr.ud.remote_qkey); | ||
380 | ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); | ||
381 | if (ipath_verbs_send(dev->dd, hwords, (u32 *) &qp->s_hdr, | ||
382 | len, &ss)) | ||
383 | dev->n_no_piobuf++; | ||
384 | |||
385 | done: | ||
386 | /* Queue the completion status entry. */ | ||
387 | if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) || | ||
388 | (wr->send_flags & IB_SEND_SIGNALED)) { | ||
389 | wc.wr_id = wr->wr_id; | ||
390 | wc.status = IB_WC_SUCCESS; | ||
391 | wc.vendor_err = 0; | ||
392 | wc.opcode = IB_WC_SEND; | ||
393 | wc.byte_len = len; | ||
394 | wc.qp_num = qp->ibqp.qp_num; | ||
395 | wc.src_qp = 0; | ||
396 | wc.wc_flags = 0; | ||
397 | /* XXX initialize other fields? */ | ||
398 | ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); | ||
399 | } | ||
400 | kfree(sg_list); | ||
401 | |||
402 | ret = 0; | ||
403 | |||
404 | bail: | ||
405 | return ret; | ||
406 | } | ||
407 | |||
408 | /** | ||
409 | * ipath_ud_rcv - receive an incoming UD packet | ||
410 | * @dev: the device the packet came in on | ||
411 | * @hdr: the packet header | ||
412 | * @has_grh: true if the packet has a GRH | ||
413 | * @data: the packet data | ||
414 | * @tlen: the packet length | ||
415 | * @qp: the QP the packet came on | ||
416 | * | ||
417 | * This is called from ipath_qp_rcv() to process an incoming UD packet | ||
418 | * for the given QP. | ||
419 | * Called at interrupt level. | ||
420 | */ | ||
421 | void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | ||
422 | int has_grh, void *data, u32 tlen, struct ipath_qp *qp) | ||
423 | { | ||
424 | struct ipath_other_headers *ohdr; | ||
425 | int opcode; | ||
426 | u32 hdrsize; | ||
427 | u32 pad; | ||
428 | unsigned long flags; | ||
429 | struct ib_wc wc; | ||
430 | u32 qkey; | ||
431 | u32 src_qp; | ||
432 | struct ipath_rq *rq; | ||
433 | struct ipath_srq *srq; | ||
434 | struct ipath_rwqe *wqe; | ||
435 | u16 dlid; | ||
436 | int header_in_data; | ||
437 | |||
438 | /* Check for GRH */ | ||
439 | if (!has_grh) { | ||
440 | ohdr = &hdr->u.oth; | ||
441 | hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */ | ||
442 | qkey = be32_to_cpu(ohdr->u.ud.deth[0]); | ||
443 | src_qp = be32_to_cpu(ohdr->u.ud.deth[1]); | ||
444 | header_in_data = 0; | ||
445 | } else { | ||
446 | ohdr = &hdr->u.l.oth; | ||
447 | hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */ | ||
448 | /* | ||
449 | * The header with GRH is 68 bytes and the core driver sets | ||
450 | * the eager header buffer size to 56 bytes so the last 12 | ||
451 | * bytes of the IB header is in the data buffer. | ||
452 | */ | ||
453 | header_in_data = | ||
454 | ipath_layer_get_rcvhdrentsize(dev->dd) == 16; | ||
455 | if (header_in_data) { | ||
456 | qkey = be32_to_cpu(((__be32 *) data)[1]); | ||
457 | src_qp = be32_to_cpu(((__be32 *) data)[2]); | ||
458 | data += 12; | ||
459 | } else { | ||
460 | qkey = be32_to_cpu(ohdr->u.ud.deth[0]); | ||
461 | src_qp = be32_to_cpu(ohdr->u.ud.deth[1]); | ||
462 | } | ||
463 | } | ||
464 | src_qp &= IPS_QPN_MASK; | ||
465 | |||
466 | /* | ||
467 | * Check that the permissive LID is only used on QP0 | ||
468 | * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1). | ||
469 | */ | ||
470 | if (qp->ibqp.qp_num) { | ||
471 | if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE || | ||
472 | hdr->lrh[3] == IB_LID_PERMISSIVE)) { | ||
473 | dev->n_pkt_drops++; | ||
474 | goto bail; | ||
475 | } | ||
476 | if (unlikely(qkey != qp->qkey)) { | ||
477 | /* XXX OK to lose a count once in a while. */ | ||
478 | dev->qkey_violations++; | ||
479 | dev->n_pkt_drops++; | ||
480 | goto bail; | ||
481 | } | ||
482 | } else if (hdr->lrh[1] == IB_LID_PERMISSIVE || | ||
483 | hdr->lrh[3] == IB_LID_PERMISSIVE) { | ||
484 | struct ib_smp *smp = (struct ib_smp *) data; | ||
485 | |||
486 | if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { | ||
487 | dev->n_pkt_drops++; | ||
488 | goto bail; | ||
489 | } | ||
490 | } | ||
491 | |||
492 | /* Get the number of bytes the message was padded by. */ | ||
493 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; | ||
494 | if (unlikely(tlen < (hdrsize + pad + 4))) { | ||
495 | /* Drop incomplete packets. */ | ||
496 | dev->n_pkt_drops++; | ||
497 | goto bail; | ||
498 | } | ||
499 | tlen -= hdrsize + pad + 4; | ||
500 | |||
501 | /* Drop invalid MAD packets (see 13.5.3.1). */ | ||
502 | if (unlikely((qp->ibqp.qp_num == 0 && | ||
503 | (tlen != 256 || | ||
504 | (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)) || | ||
505 | (qp->ibqp.qp_num == 1 && | ||
506 | (tlen != 256 || | ||
507 | (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))) { | ||
508 | dev->n_pkt_drops++; | ||
509 | goto bail; | ||
510 | } | ||
511 | |||
512 | /* | ||
513 | * A GRH is expected to preceed the data even if not | ||
514 | * present on the wire. | ||
515 | */ | ||
516 | wc.byte_len = tlen + sizeof(struct ib_grh); | ||
517 | |||
518 | /* | ||
519 | * The opcode is in the low byte when its in network order | ||
520 | * (top byte when in host order). | ||
521 | */ | ||
522 | opcode = be32_to_cpu(ohdr->bth[0]) >> 24; | ||
523 | if (qp->ibqp.qp_num > 1 && | ||
524 | opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { | ||
525 | if (header_in_data) { | ||
526 | wc.imm_data = *(__be32 *) data; | ||
527 | data += sizeof(__be32); | ||
528 | } else | ||
529 | wc.imm_data = ohdr->u.ud.imm_data; | ||
530 | wc.wc_flags = IB_WC_WITH_IMM; | ||
531 | hdrsize += sizeof(u32); | ||
532 | } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { | ||
533 | wc.imm_data = 0; | ||
534 | wc.wc_flags = 0; | ||
535 | } else { | ||
536 | dev->n_pkt_drops++; | ||
537 | goto bail; | ||
538 | } | ||
539 | |||
540 | /* | ||
541 | * Get the next work request entry to find where to put the data. | ||
542 | * Note that it is safe to drop the lock after changing rq->tail | ||
543 | * since ipath_post_receive() won't fill the empty slot. | ||
544 | */ | ||
545 | if (qp->ibqp.srq) { | ||
546 | srq = to_isrq(qp->ibqp.srq); | ||
547 | rq = &srq->rq; | ||
548 | } else { | ||
549 | srq = NULL; | ||
550 | rq = &qp->r_rq; | ||
551 | } | ||
552 | spin_lock_irqsave(&rq->lock, flags); | ||
553 | if (rq->tail == rq->head) { | ||
554 | spin_unlock_irqrestore(&rq->lock, flags); | ||
555 | dev->n_pkt_drops++; | ||
556 | goto bail; | ||
557 | } | ||
558 | /* Silently drop packets which are too big. */ | ||
559 | wqe = get_rwqe_ptr(rq, rq->tail); | ||
560 | if (wc.byte_len > wqe->length) { | ||
561 | spin_unlock_irqrestore(&rq->lock, flags); | ||
562 | dev->n_pkt_drops++; | ||
563 | goto bail; | ||
564 | } | ||
565 | wc.wr_id = wqe->wr_id; | ||
566 | qp->r_sge.sge = wqe->sg_list[0]; | ||
567 | qp->r_sge.sg_list = wqe->sg_list + 1; | ||
568 | qp->r_sge.num_sge = wqe->num_sge; | ||
569 | if (++rq->tail >= rq->size) | ||
570 | rq->tail = 0; | ||
571 | if (srq && srq->ibsrq.event_handler) { | ||
572 | u32 n; | ||
573 | |||
574 | if (rq->head < rq->tail) | ||
575 | n = rq->size + rq->head - rq->tail; | ||
576 | else | ||
577 | n = rq->head - rq->tail; | ||
578 | if (n < srq->limit) { | ||
579 | struct ib_event ev; | ||
580 | |||
581 | srq->limit = 0; | ||
582 | spin_unlock_irqrestore(&rq->lock, flags); | ||
583 | ev.device = qp->ibqp.device; | ||
584 | ev.element.srq = qp->ibqp.srq; | ||
585 | ev.event = IB_EVENT_SRQ_LIMIT_REACHED; | ||
586 | srq->ibsrq.event_handler(&ev, | ||
587 | srq->ibsrq.srq_context); | ||
588 | } else | ||
589 | spin_unlock_irqrestore(&rq->lock, flags); | ||
590 | } else | ||
591 | spin_unlock_irqrestore(&rq->lock, flags); | ||
592 | if (has_grh) { | ||
593 | ipath_copy_sge(&qp->r_sge, &hdr->u.l.grh, | ||
594 | sizeof(struct ib_grh)); | ||
595 | wc.wc_flags |= IB_WC_GRH; | ||
596 | } else | ||
597 | ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh)); | ||
598 | ipath_copy_sge(&qp->r_sge, data, | ||
599 | wc.byte_len - sizeof(struct ib_grh)); | ||
600 | wc.status = IB_WC_SUCCESS; | ||
601 | wc.opcode = IB_WC_RECV; | ||
602 | wc.vendor_err = 0; | ||
603 | wc.qp_num = qp->ibqp.qp_num; | ||
604 | wc.src_qp = src_qp; | ||
605 | /* XXX do we know which pkey matched? Only needed for GSI. */ | ||
606 | wc.pkey_index = 0; | ||
607 | wc.slid = be16_to_cpu(hdr->lrh[3]); | ||
608 | wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF; | ||
609 | dlid = be16_to_cpu(hdr->lrh[1]); | ||
610 | /* | ||
611 | * Save the LMC lower bits if the destination LID is a unicast LID. | ||
612 | */ | ||
613 | wc.dlid_path_bits = dlid >= IPS_MULTICAST_LID_BASE ? 0 : | ||
614 | dlid & ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); | ||
615 | /* Signal completion event if the solicited bit is set. */ | ||
616 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, | ||
617 | (ohdr->bth[0] & | ||
618 | __constant_cpu_to_be32(1 << 23)) != 0); | ||
619 | |||
620 | bail:; | ||
621 | } | ||