aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath/ipath_ud.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_ud.c')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c41
1 files changed, 28 insertions, 13 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index e606daf83210..3466129af804 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
2 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
@@ -33,7 +34,7 @@
33#include <rdma/ib_smi.h> 34#include <rdma/ib_smi.h>
34 35
35#include "ipath_verbs.h" 36#include "ipath_verbs.h"
36#include "ips_common.h" 37#include "ipath_common.h"
37 38
38/** 39/**
39 * ipath_ud_loopback - handle send on loopback QPs 40 * ipath_ud_loopback - handle send on loopback QPs
@@ -274,6 +275,11 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
274 len += wr->sg_list[i].length; 275 len += wr->sg_list[i].length;
275 ss.num_sge++; 276 ss.num_sge++;
276 } 277 }
278 /* Check for invalid packet size. */
279 if (len > ipath_layer_get_ibmtu(dev->dd)) {
280 ret = -EINVAL;
281 goto bail;
282 }
277 extra_bytes = (4 - len) & 3; 283 extra_bytes = (4 - len) & 3;
278 nwords = (len + extra_bytes) >> 2; 284 nwords = (len + extra_bytes) >> 2;
279 285
@@ -283,8 +289,8 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
283 ret = -EINVAL; 289 ret = -EINVAL;
284 goto bail; 290 goto bail;
285 } 291 }
286 if (ah_attr->dlid >= IPS_MULTICAST_LID_BASE) { 292 if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE) {
287 if (ah_attr->dlid != IPS_PERMISSIVE_LID) 293 if (ah_attr->dlid != IPATH_PERMISSIVE_LID)
288 dev->n_multicast_xmit++; 294 dev->n_multicast_xmit++;
289 else 295 else
290 dev->n_unicast_xmit++; 296 dev->n_unicast_xmit++;
@@ -304,7 +310,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
304 if (ah_attr->ah_flags & IB_AH_GRH) { 310 if (ah_attr->ah_flags & IB_AH_GRH) {
305 /* Header size in 32-bit words. */ 311 /* Header size in 32-bit words. */
306 hwords = 17; 312 hwords = 17;
307 lrh0 = IPS_LRH_GRH; 313 lrh0 = IPATH_LRH_GRH;
308 ohdr = &qp->s_hdr.u.l.oth; 314 ohdr = &qp->s_hdr.u.l.oth;
309 qp->s_hdr.u.l.grh.version_tclass_flow = 315 qp->s_hdr.u.l.grh.version_tclass_flow =
310 cpu_to_be32((6 << 28) | 316 cpu_to_be32((6 << 28) |
@@ -330,7 +336,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
330 } else { 336 } else {
331 /* Header size in 32-bit words. */ 337 /* Header size in 32-bit words. */
332 hwords = 7; 338 hwords = 7;
333 lrh0 = IPS_LRH_BTH; 339 lrh0 = IPATH_LRH_BTH;
334 ohdr = &qp->s_hdr.u.oth; 340 ohdr = &qp->s_hdr.u.oth;
335 } 341 }
336 if (wr->opcode == IB_WR_SEND_WITH_IMM) { 342 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
@@ -361,18 +367,18 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
361 if (wr->send_flags & IB_SEND_SOLICITED) 367 if (wr->send_flags & IB_SEND_SOLICITED)
362 bth0 |= 1 << 23; 368 bth0 |= 1 << 23;
363 bth0 |= extra_bytes << 20; 369 bth0 |= extra_bytes << 20;
364 bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPS_DEFAULT_P_KEY : 370 bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY :
365 ipath_layer_get_pkey(dev->dd, qp->s_pkey_index); 371 ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
366 ohdr->bth[0] = cpu_to_be32(bth0); 372 ohdr->bth[0] = cpu_to_be32(bth0);
367 /* 373 /*
368 * Use the multicast QP if the destination LID is a multicast LID. 374 * Use the multicast QP if the destination LID is a multicast LID.
369 */ 375 */
370 ohdr->bth[1] = ah_attr->dlid >= IPS_MULTICAST_LID_BASE && 376 ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
371 ah_attr->dlid != IPS_PERMISSIVE_LID ? 377 ah_attr->dlid != IPATH_PERMISSIVE_LID ?
372 __constant_cpu_to_be32(IPS_MULTICAST_QPN) : 378 __constant_cpu_to_be32(IPATH_MULTICAST_QPN) :
373 cpu_to_be32(wr->wr.ud.remote_qpn); 379 cpu_to_be32(wr->wr.ud.remote_qpn);
374 /* XXX Could lose a PSN count but not worth locking */ 380 /* XXX Could lose a PSN count but not worth locking */
375 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPS_PSN_MASK); 381 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK);
376 /* 382 /*
377 * Qkeys with the high order bit set mean use the 383 * Qkeys with the high order bit set mean use the
378 * qkey from the QP context instead of the WR (see 10.2.5). 384 * qkey from the QP context instead of the WR (see 10.2.5).
@@ -463,7 +469,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
463 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]); 469 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]);
464 } 470 }
465 } 471 }
466 src_qp &= IPS_QPN_MASK; 472 src_qp &= IPATH_QPN_MASK;
467 473
468 /* 474 /*
469 * Check that the permissive LID is only used on QP0 475 * Check that the permissive LID is only used on QP0
@@ -554,7 +560,16 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
554 spin_lock_irqsave(&rq->lock, flags); 560 spin_lock_irqsave(&rq->lock, flags);
555 if (rq->tail == rq->head) { 561 if (rq->tail == rq->head) {
556 spin_unlock_irqrestore(&rq->lock, flags); 562 spin_unlock_irqrestore(&rq->lock, flags);
557 dev->n_pkt_drops++; 563 /*
564 * Count VL15 packets dropped due to no receive buffer.
565 * Otherwise, count them as buffer overruns since usually,
566 * the HW will be able to receive packets even if there are
567 * no QPs with posted receive buffers.
568 */
569 if (qp->ibqp.qp_num == 0)
570 dev->n_vl15_dropped++;
571 else
572 dev->rcv_errors++;
558 goto bail; 573 goto bail;
559 } 574 }
560 /* Silently drop packets which are too big. */ 575 /* Silently drop packets which are too big. */
@@ -612,7 +627,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
612 /* 627 /*
613 * Save the LMC lower bits if the destination LID is a unicast LID. 628 * Save the LMC lower bits if the destination LID is a unicast LID.
614 */ 629 */
615 wc.dlid_path_bits = dlid >= IPS_MULTICAST_LID_BASE ? 0 : 630 wc.dlid_path_bits = dlid >= IPATH_MULTICAST_LID_BASE ? 0 :
616 dlid & ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1); 631 dlid & ((1 << (dev->mkeyprot_resv_lmc & 7)) - 1);
617 /* Signal completion event if the solicited bit is set. */ 632 /* Signal completion event if the solicited bit is set. */
618 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 633 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,