aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ipath/ipath_cq.c
diff options
context:
space:
mode:
authorRobert Walsh <robert.walsh@qlogic.com>2007-03-15 17:45:17 -0400
committerRoland Dreier <rolandd@cisco.com>2007-04-18 23:21:01 -0400
commit40b90430ecac40cc9adb26b808cc12a3d569da5d (patch)
tree4c87d4b4d3c4f270a0371fdd66b02d78ac8ce306 /drivers/infiniband/hw/ipath/ipath_cq.c
parent6ce73b07db7aa05d4a30716d6a99c832b6d9db4a (diff)
IB/ipath: Fix WC format drift between user and kernel space
The kernel ib_wc structure now uses a QP pointer, but the user space equivalent uses a QP number instead. This means we can no longer use a simple structure copy to copy stuff into user space. Signed-off-by: Bryan O'Sullivan <bryan.osullivan@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_cq.c')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_cq.c38
1 files changed, 36 insertions, 2 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c
index 87462e0cb4d2..ea78e6dddc90 100644
--- a/drivers/infiniband/hw/ipath/ipath_cq.c
+++ b/drivers/infiniband/hw/ipath/ipath_cq.c
@@ -76,7 +76,20 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
76 } 76 }
77 return; 77 return;
78 } 78 }
79 wc->queue[head] = *entry; 79 wc->queue[head].wr_id = entry->wr_id;
80 wc->queue[head].status = entry->status;
81 wc->queue[head].opcode = entry->opcode;
82 wc->queue[head].vendor_err = entry->vendor_err;
83 wc->queue[head].byte_len = entry->byte_len;
84 wc->queue[head].imm_data = (__u32 __force)entry->imm_data;
85 wc->queue[head].qp_num = entry->qp->qp_num;
86 wc->queue[head].src_qp = entry->src_qp;
87 wc->queue[head].wc_flags = entry->wc_flags;
88 wc->queue[head].pkey_index = entry->pkey_index;
89 wc->queue[head].slid = entry->slid;
90 wc->queue[head].sl = entry->sl;
91 wc->queue[head].dlid_path_bits = entry->dlid_path_bits;
92 wc->queue[head].port_num = entry->port_num;
80 wc->head = next; 93 wc->head = next;
81 94
82 if (cq->notify == IB_CQ_NEXT_COMP || 95 if (cq->notify == IB_CQ_NEXT_COMP ||
@@ -122,9 +135,30 @@ int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
122 if (tail > (u32) cq->ibcq.cqe) 135 if (tail > (u32) cq->ibcq.cqe)
123 tail = (u32) cq->ibcq.cqe; 136 tail = (u32) cq->ibcq.cqe;
124 for (npolled = 0; npolled < num_entries; ++npolled, ++entry) { 137 for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
138 struct ipath_qp *qp;
139
125 if (tail == wc->head) 140 if (tail == wc->head)
126 break; 141 break;
127 *entry = wc->queue[tail]; 142
143 qp = ipath_lookup_qpn(&to_idev(cq->ibcq.device)->qp_table,
144 wc->queue[tail].qp_num);
145 entry->qp = &qp->ibqp;
146 if (atomic_dec_and_test(&qp->refcount))
147 wake_up(&qp->wait);
148
149 entry->wr_id = wc->queue[tail].wr_id;
150 entry->status = wc->queue[tail].status;
151 entry->opcode = wc->queue[tail].opcode;
152 entry->vendor_err = wc->queue[tail].vendor_err;
153 entry->byte_len = wc->queue[tail].byte_len;
154 entry->imm_data = wc->queue[tail].imm_data;
155 entry->src_qp = wc->queue[tail].src_qp;
156 entry->wc_flags = wc->queue[tail].wc_flags;
157 entry->pkey_index = wc->queue[tail].pkey_index;
158 entry->slid = wc->queue[tail].slid;
159 entry->sl = wc->queue[tail].sl;
160 entry->dlid_path_bits = wc->queue[tail].dlid_path_bits;
161 entry->port_num = wc->queue[tail].port_num;
128 if (tail >= cq->ibcq.cqe) 162 if (tail >= cq->ibcq.cqe)
129 tail = 0; 163 tail = 0;
130 else 164 else