aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath/ipath_uc.c
diff options
context:
space:
mode:
authorBryan O'Sullivan <bos@pathscale.com>2006-07-01 07:35:50 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-01 12:55:58 -0400
commitddd4bb22108417fdc5c35324bd13a3265581ae76 (patch)
tree5cc35e54e8761af27746bae48ef66318237ad8a0 /drivers/infiniband/hw/ipath/ipath_uc.c
parent759d57686dab8169ca68bbf938ce8e965d1e107a (diff)
[PATCH] IB/ipath: share more common code between RC and UC protocols
Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com> Signed-off-by: Bryan O'Sullivan <bryan.osullivan@qlogic.com> Cc: "Michael S. Tsirkin" <mst@mellanox.co.il> Cc: Roland Dreier <rolandd@cisco.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_uc.c')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_uc.c153
1 files changed, 29 insertions, 124 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_uc.c b/drivers/infiniband/hw/ipath/ipath_uc.c
index b64d9ddc075c..89b3e1a5e3e3 100644
--- a/drivers/infiniband/hw/ipath/ipath_uc.c
+++ b/drivers/infiniband/hw/ipath/ipath_uc.c
@@ -62,90 +62,40 @@ static void complete_last_send(struct ipath_qp *qp, struct ipath_swqe *wqe,
62} 62}
63 63
64/** 64/**
65 * ipath_do_uc_send - do a send on a UC queue 65 * ipath_make_uc_req - construct a request packet (SEND, RDMA write)
66 * @data: contains a pointer to the QP to send on 66 * @qp: a pointer to the QP
67 * @ohdr: a pointer to the IB header being constructed
68 * @pmtu: the path MTU
69 * @bth0p: pointer to the BTH opcode word
70 * @bth2p: pointer to the BTH PSN word
67 * 71 *
68 * Process entries in the send work queue until the queue is exhausted. 72 * Return 1 if constructed; otherwise, return 0.
69 * Only allow one CPU to send a packet per QP (tasklet). 73 * Note the QP s_lock must be held and interrupts disabled.
70 * Otherwise, after we drop the QP lock, two threads could send
71 * packets out of order.
72 * This is similar to ipath_do_rc_send() below except we don't have
73 * timeouts or resends.
74 */ 74 */
75void ipath_do_uc_send(unsigned long data) 75int ipath_make_uc_req(struct ipath_qp *qp,
76 struct ipath_other_headers *ohdr,
77 u32 pmtu, u32 *bth0p, u32 *bth2p)
76{ 78{
77 struct ipath_qp *qp = (struct ipath_qp *)data;
78 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
79 struct ipath_swqe *wqe; 79 struct ipath_swqe *wqe;
80 unsigned long flags;
81 u16 lrh0;
82 u32 hwords; 80 u32 hwords;
83 u32 nwords;
84 u32 extra_bytes;
85 u32 bth0; 81 u32 bth0;
86 u32 bth2;
87 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
88 u32 len; 82 u32 len;
89 struct ipath_other_headers *ohdr;
90 struct ib_wc wc; 83 struct ib_wc wc;
91 84
92 if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
93 goto bail;
94
95 if (unlikely(qp->remote_ah_attr.dlid ==
96 ipath_layer_get_lid(dev->dd))) {
97 /* Pass in an uninitialized ib_wc to save stack space. */
98 ipath_ruc_loopback(qp, &wc);
99 clear_bit(IPATH_S_BUSY, &qp->s_flags);
100 goto bail;
101 }
102
103 ohdr = &qp->s_hdr.u.oth;
104 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
105 ohdr = &qp->s_hdr.u.l.oth;
106
107again:
108 /* Check for a constructed packet to be sent. */
109 if (qp->s_hdrwords != 0) {
110 /*
111 * If no PIO bufs are available, return.
112 * An interrupt will call ipath_ib_piobufavail()
113 * when one is available.
114 */
115 if (ipath_verbs_send(dev->dd, qp->s_hdrwords,
116 (u32 *) &qp->s_hdr,
117 qp->s_cur_size,
118 qp->s_cur_sge)) {
119 ipath_no_bufs_available(qp, dev);
120 goto bail;
121 }
122 dev->n_unicast_xmit++;
123 /* Record that we sent the packet and s_hdr is empty. */
124 qp->s_hdrwords = 0;
125 }
126
127 lrh0 = IPS_LRH_BTH;
128 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
129 hwords = 5;
130
131 /*
132 * The lock is needed to synchronize between
133 * setting qp->s_ack_state and post_send().
134 */
135 spin_lock_irqsave(&qp->s_lock, flags);
136
137 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) 85 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK))
138 goto done; 86 goto done;
139 87
140 bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index); 88 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
89 hwords = 5;
90 bth0 = 0;
141 91
142 /* Send a request. */ 92 /* Get the next send request. */
143 wqe = get_swqe_ptr(qp, qp->s_last); 93 wqe = get_swqe_ptr(qp, qp->s_last);
144 switch (qp->s_state) { 94 switch (qp->s_state) {
145 default: 95 default:
146 /* 96 /*
147 * Signal the completion of the last send (if there is 97 * Signal the completion of the last send
148 * one). 98 * (if there is one).
149 */ 99 */
150 if (qp->s_last != qp->s_tail) 100 if (qp->s_last != qp->s_tail)
151 complete_last_send(qp, wqe, &wc); 101 complete_last_send(qp, wqe, &wc);
@@ -258,61 +208,16 @@ again:
258 } 208 }
259 break; 209 break;
260 } 210 }
261 bth2 = qp->s_next_psn++ & IPS_PSN_MASK;
262 qp->s_len -= len; 211 qp->s_len -= len;
263 bth0 |= qp->s_state << 24;
264
265 spin_unlock_irqrestore(&qp->s_lock, flags);
266
267 /* Construct the header. */
268 extra_bytes = (4 - len) & 3;
269 nwords = (len + extra_bytes) >> 2;
270 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
271 /* Header size in 32-bit words. */
272 hwords += 10;
273 lrh0 = IPS_LRH_GRH;
274 qp->s_hdr.u.l.grh.version_tclass_flow =
275 cpu_to_be32((6 << 28) |
276 (qp->remote_ah_attr.grh.traffic_class
277 << 20) |
278 qp->remote_ah_attr.grh.flow_label);
279 qp->s_hdr.u.l.grh.paylen =
280 cpu_to_be16(((hwords - 12) + nwords +
281 SIZE_OF_CRC) << 2);
282 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
283 qp->s_hdr.u.l.grh.next_hdr = 0x1B;
284 qp->s_hdr.u.l.grh.hop_limit =
285 qp->remote_ah_attr.grh.hop_limit;
286 /* The SGID is 32-bit aligned. */
287 qp->s_hdr.u.l.grh.sgid.global.subnet_prefix =
288 dev->gid_prefix;
289 qp->s_hdr.u.l.grh.sgid.global.interface_id =
290 ipath_layer_get_guid(dev->dd);
291 qp->s_hdr.u.l.grh.dgid = qp->remote_ah_attr.grh.dgid;
292 }
293 qp->s_hdrwords = hwords; 212 qp->s_hdrwords = hwords;
294 qp->s_cur_sge = &qp->s_sge; 213 qp->s_cur_sge = &qp->s_sge;
295 qp->s_cur_size = len; 214 qp->s_cur_size = len;
296 lrh0 |= qp->remote_ah_attr.sl << 4; 215 *bth0p = bth0 | (qp->s_state << 24);
297 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); 216 *bth2p = qp->s_next_psn++ & IPS_PSN_MASK;
298 /* DEST LID */ 217 return 1;
299 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
300 qp->s_hdr.lrh[2] = cpu_to_be16(hwords + nwords + SIZE_OF_CRC);
301 qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
302 bth0 |= extra_bytes << 20;
303 ohdr->bth[0] = cpu_to_be32(bth0);
304 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
305 ohdr->bth[2] = cpu_to_be32(bth2);
306
307 /* Check for more work to do. */
308 goto again;
309 218
310done: 219done:
311 spin_unlock_irqrestore(&qp->s_lock, flags); 220 return 0;
312 clear_bit(IPATH_S_BUSY, &qp->s_flags);
313
314bail:
315 return;
316} 221}
317 222
318/** 223/**
@@ -536,12 +441,13 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
536 if (qp->r_len != 0) { 441 if (qp->r_len != 0) {
537 u32 rkey = be32_to_cpu(reth->rkey); 442 u32 rkey = be32_to_cpu(reth->rkey);
538 u64 vaddr = be64_to_cpu(reth->vaddr); 443 u64 vaddr = be64_to_cpu(reth->vaddr);
444 int ok;
539 445
540 /* Check rkey */ 446 /* Check rkey */
541 if (unlikely(!ipath_rkey_ok( 447 ok = ipath_rkey_ok(dev, &qp->r_sge, qp->r_len,
542 dev, &qp->r_sge, qp->r_len, 448 vaddr, rkey,
543 vaddr, rkey, 449 IB_ACCESS_REMOTE_WRITE);
544 IB_ACCESS_REMOTE_WRITE))) { 450 if (unlikely(!ok)) {
545 dev->n_pkt_drops++; 451 dev->n_pkt_drops++;
546 goto done; 452 goto done;
547 } 453 }
@@ -559,8 +465,7 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
559 } 465 }
560 if (opcode == OP(RDMA_WRITE_ONLY)) 466 if (opcode == OP(RDMA_WRITE_ONLY))
561 goto rdma_last; 467 goto rdma_last;
562 else if (opcode == 468 else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
563 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
564 goto rdma_last_imm; 469 goto rdma_last_imm;
565 /* FALLTHROUGH */ 470 /* FALLTHROUGH */
566 case OP(RDMA_WRITE_MIDDLE): 471 case OP(RDMA_WRITE_MIDDLE):
@@ -593,9 +498,9 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
593 dev->n_pkt_drops++; 498 dev->n_pkt_drops++;
594 goto done; 499 goto done;
595 } 500 }
596 if (qp->r_reuse_sge) { 501 if (qp->r_reuse_sge)
597 qp->r_reuse_sge = 0; 502 qp->r_reuse_sge = 0;
598 } else if (!ipath_get_rwqe(qp, 1)) { 503 else if (!ipath_get_rwqe(qp, 1)) {
599 dev->n_pkt_drops++; 504 dev->n_pkt_drops++;
600 goto done; 505 goto done;
601 } 506 }