aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx4/cq.c
diff options
context:
space:
mode:
authorYevgeny Petrilin <yevgenyp@mellanox.co.il>2008-08-06 23:14:06 -0400
committerRoland Dreier <rolandd@cisco.com>2008-08-06 23:14:06 -0400
commitf780a9f119caa48088b230836a7fa73d1096de7c (patch)
tree513fb3aa4342a481aa1f4101675ea2e9c41bc28a /drivers/infiniband/hw/mlx4/cq.c
parent6e86841d05f371b5b9b86ce76c02aaee83352298 (diff)
mlx4_core: Add ethernet fields to CQE struct
Add ethernet-related fields to struct mlx4_cqe so that the mlx4_en ethernet NIC driver can share the same definition. Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx4/cq.c')
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c33
1 files changed, 16 insertions, 17 deletions
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index a1464574bfdd..d0866a3636e2 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -515,17 +515,17 @@ static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
515 wc->vendor_err = cqe->vendor_err_syndrome; 515 wc->vendor_err = cqe->vendor_err_syndrome;
516} 516}
517 517
518static int mlx4_ib_ipoib_csum_ok(__be32 status, __be16 checksum) 518static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum)
519{ 519{
520 return ((status & cpu_to_be32(MLX4_CQE_IPOIB_STATUS_IPV4 | 520 return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
521 MLX4_CQE_IPOIB_STATUS_IPV4F | 521 MLX4_CQE_STATUS_IPV4F |
522 MLX4_CQE_IPOIB_STATUS_IPV4OPT | 522 MLX4_CQE_STATUS_IPV4OPT |
523 MLX4_CQE_IPOIB_STATUS_IPV6 | 523 MLX4_CQE_STATUS_IPV6 |
524 MLX4_CQE_IPOIB_STATUS_IPOK)) == 524 MLX4_CQE_STATUS_IPOK)) ==
525 cpu_to_be32(MLX4_CQE_IPOIB_STATUS_IPV4 | 525 cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
526 MLX4_CQE_IPOIB_STATUS_IPOK)) && 526 MLX4_CQE_STATUS_IPOK)) &&
527 (status & cpu_to_be32(MLX4_CQE_IPOIB_STATUS_UDP | 527 (status & cpu_to_be16(MLX4_CQE_STATUS_UDP |
528 MLX4_CQE_IPOIB_STATUS_TCP)) && 528 MLX4_CQE_STATUS_TCP)) &&
529 checksum == cpu_to_be16(0xffff); 529 checksum == cpu_to_be16(0xffff);
530} 530}
531 531
@@ -582,17 +582,17 @@ repoll:
582 } 582 }
583 583
584 if (!*cur_qp || 584 if (!*cur_qp ||
585 (be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) { 585 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
586 /* 586 /*
587 * We do not have to take the QP table lock here, 587 * We do not have to take the QP table lock here,
588 * because CQs will be locked while QPs are removed 588 * because CQs will be locked while QPs are removed
589 * from the table. 589 * from the table.
590 */ 590 */
591 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, 591 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev,
592 be32_to_cpu(cqe->my_qpn)); 592 be32_to_cpu(cqe->vlan_my_qpn));
593 if (unlikely(!mqp)) { 593 if (unlikely(!mqp)) {
594 printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n", 594 printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n",
595 cq->mcq.cqn, be32_to_cpu(cqe->my_qpn) & 0xffffff); 595 cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK);
596 return -EINVAL; 596 return -EINVAL;
597 } 597 }
598 598
@@ -692,14 +692,13 @@ repoll:
692 } 692 }
693 693
694 wc->slid = be16_to_cpu(cqe->rlid); 694 wc->slid = be16_to_cpu(cqe->rlid);
695 wc->sl = cqe->sl >> 4; 695 wc->sl = be16_to_cpu(cqe->sl_vid >> 12);
696 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); 696 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
697 wc->src_qp = g_mlpath_rqpn & 0xffffff; 697 wc->src_qp = g_mlpath_rqpn & 0xffffff;
698 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; 698 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
699 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0; 699 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
700 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f; 700 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
701 wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->ipoib_status, 701 wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum);
702 cqe->checksum);
703 } 702 }
704 703
705 return 0; 704 return 0;
@@ -767,7 +766,7 @@ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
767 */ 766 */
768 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { 767 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
769 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); 768 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
770 if ((be32_to_cpu(cqe->my_qpn) & 0xffffff) == qpn) { 769 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
771 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) 770 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
772 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index)); 771 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
773 ++nfreed; 772 ++nfreed;