diff options
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_cq.c | 94 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_verbs.h | 6 |
2 files changed, 53 insertions, 47 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c index a6f04d27ec57..645ed71fd797 100644 --- a/drivers/infiniband/hw/ipath/ipath_cq.c +++ b/drivers/infiniband/hw/ipath/ipath_cq.c | |||
@@ -76,22 +76,25 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) | |||
76 | } | 76 | } |
77 | return; | 77 | return; |
78 | } | 78 | } |
79 | wc->queue[head].wr_id = entry->wr_id; | 79 | if (cq->ip) { |
80 | wc->queue[head].status = entry->status; | 80 | wc->uqueue[head].wr_id = entry->wr_id; |
81 | wc->queue[head].opcode = entry->opcode; | 81 | wc->uqueue[head].status = entry->status; |
82 | wc->queue[head].vendor_err = entry->vendor_err; | 82 | wc->uqueue[head].opcode = entry->opcode; |
83 | wc->queue[head].byte_len = entry->byte_len; | 83 | wc->uqueue[head].vendor_err = entry->vendor_err; |
84 | wc->queue[head].imm_data = (__u32 __force)entry->imm_data; | 84 | wc->uqueue[head].byte_len = entry->byte_len; |
85 | wc->queue[head].qp_num = entry->qp->qp_num; | 85 | wc->uqueue[head].imm_data = (__u32 __force)entry->imm_data; |
86 | wc->queue[head].src_qp = entry->src_qp; | 86 | wc->uqueue[head].qp_num = entry->qp->qp_num; |
87 | wc->queue[head].wc_flags = entry->wc_flags; | 87 | wc->uqueue[head].src_qp = entry->src_qp; |
88 | wc->queue[head].pkey_index = entry->pkey_index; | 88 | wc->uqueue[head].wc_flags = entry->wc_flags; |
89 | wc->queue[head].slid = entry->slid; | 89 | wc->uqueue[head].pkey_index = entry->pkey_index; |
90 | wc->queue[head].sl = entry->sl; | 90 | wc->uqueue[head].slid = entry->slid; |
91 | wc->queue[head].dlid_path_bits = entry->dlid_path_bits; | 91 | wc->uqueue[head].sl = entry->sl; |
92 | wc->queue[head].port_num = entry->port_num; | 92 | wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits; |
93 | /* Make sure queue entry is written before the head index. */ | 93 | wc->uqueue[head].port_num = entry->port_num; |
94 | smp_wmb(); | 94 | /* Make sure entry is written before the head index. */ |
95 | smp_wmb(); | ||
96 | } else | ||
97 | wc->kqueue[head] = *entry; | ||
95 | wc->head = next; | 98 | wc->head = next; |
96 | 99 | ||
97 | if (cq->notify == IB_CQ_NEXT_COMP || | 100 | if (cq->notify == IB_CQ_NEXT_COMP || |
@@ -130,6 +133,12 @@ int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) | |||
130 | int npolled; | 133 | int npolled; |
131 | u32 tail; | 134 | u32 tail; |
132 | 135 | ||
136 | /* The kernel can only poll a kernel completion queue */ | ||
137 | if (cq->ip) { | ||
138 | npolled = -EINVAL; | ||
139 | goto bail; | ||
140 | } | ||
141 | |||
133 | spin_lock_irqsave(&cq->lock, flags); | 142 | spin_lock_irqsave(&cq->lock, flags); |
134 | 143 | ||
135 | wc = cq->queue; | 144 | wc = cq->queue; |
@@ -137,31 +146,10 @@ int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) | |||
137 | if (tail > (u32) cq->ibcq.cqe) | 146 | if (tail > (u32) cq->ibcq.cqe) |
138 | tail = (u32) cq->ibcq.cqe; | 147 | tail = (u32) cq->ibcq.cqe; |
139 | for (npolled = 0; npolled < num_entries; ++npolled, ++entry) { | 148 | for (npolled = 0; npolled < num_entries; ++npolled, ++entry) { |
140 | struct ipath_qp *qp; | ||
141 | |||
142 | if (tail == wc->head) | 149 | if (tail == wc->head) |
143 | break; | 150 | break; |
144 | /* Make sure entry is read after head index is read. */ | 151 | /* The kernel doesn't need a RMB since it has the lock. */ |
145 | smp_rmb(); | 152 | *entry = wc->kqueue[tail]; |
146 | qp = ipath_lookup_qpn(&to_idev(cq->ibcq.device)->qp_table, | ||
147 | wc->queue[tail].qp_num); | ||
148 | entry->qp = &qp->ibqp; | ||
149 | if (atomic_dec_and_test(&qp->refcount)) | ||
150 | wake_up(&qp->wait); | ||
151 | |||
152 | entry->wr_id = wc->queue[tail].wr_id; | ||
153 | entry->status = wc->queue[tail].status; | ||
154 | entry->opcode = wc->queue[tail].opcode; | ||
155 | entry->vendor_err = wc->queue[tail].vendor_err; | ||
156 | entry->byte_len = wc->queue[tail].byte_len; | ||
157 | entry->imm_data = wc->queue[tail].imm_data; | ||
158 | entry->src_qp = wc->queue[tail].src_qp; | ||
159 | entry->wc_flags = wc->queue[tail].wc_flags; | ||
160 | entry->pkey_index = wc->queue[tail].pkey_index; | ||
161 | entry->slid = wc->queue[tail].slid; | ||
162 | entry->sl = wc->queue[tail].sl; | ||
163 | entry->dlid_path_bits = wc->queue[tail].dlid_path_bits; | ||
164 | entry->port_num = wc->queue[tail].port_num; | ||
165 | if (tail >= cq->ibcq.cqe) | 153 | if (tail >= cq->ibcq.cqe) |
166 | tail = 0; | 154 | tail = 0; |
167 | else | 155 | else |
@@ -171,6 +159,7 @@ int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) | |||
171 | 159 | ||
172 | spin_unlock_irqrestore(&cq->lock, flags); | 160 | spin_unlock_irqrestore(&cq->lock, flags); |
173 | 161 | ||
162 | bail: | ||
174 | return npolled; | 163 | return npolled; |
175 | } | 164 | } |
176 | 165 | ||
@@ -215,6 +204,7 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vec | |||
215 | struct ipath_cq *cq; | 204 | struct ipath_cq *cq; |
216 | struct ipath_cq_wc *wc; | 205 | struct ipath_cq_wc *wc; |
217 | struct ib_cq *ret; | 206 | struct ib_cq *ret; |
207 | u32 sz; | ||
218 | 208 | ||
219 | if (entries < 1 || entries > ib_ipath_max_cqes) { | 209 | if (entries < 1 || entries > ib_ipath_max_cqes) { |
220 | ret = ERR_PTR(-EINVAL); | 210 | ret = ERR_PTR(-EINVAL); |
@@ -235,7 +225,12 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vec | |||
235 | * We need to use vmalloc() in order to support mmap and large | 225 | * We need to use vmalloc() in order to support mmap and large |
236 | * numbers of entries. | 226 | * numbers of entries. |
237 | */ | 227 | */ |
238 | wc = vmalloc_user(sizeof(*wc) + sizeof(struct ib_wc) * entries); | 228 | sz = sizeof(*wc); |
229 | if (udata && udata->outlen >= sizeof(__u64)) | ||
230 | sz += sizeof(struct ib_uverbs_wc) * (entries + 1); | ||
231 | else | ||
232 | sz += sizeof(struct ib_wc) * (entries + 1); | ||
233 | wc = vmalloc_user(sz); | ||
239 | if (!wc) { | 234 | if (!wc) { |
240 | ret = ERR_PTR(-ENOMEM); | 235 | ret = ERR_PTR(-ENOMEM); |
241 | goto bail_cq; | 236 | goto bail_cq; |
@@ -247,9 +242,8 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vec | |||
247 | */ | 242 | */ |
248 | if (udata && udata->outlen >= sizeof(__u64)) { | 243 | if (udata && udata->outlen >= sizeof(__u64)) { |
249 | int err; | 244 | int err; |
250 | u32 s = sizeof *wc + sizeof(struct ib_wc) * entries; | ||
251 | 245 | ||
252 | cq->ip = ipath_create_mmap_info(dev, s, context, wc); | 246 | cq->ip = ipath_create_mmap_info(dev, sz, context, wc); |
253 | if (!cq->ip) { | 247 | if (!cq->ip) { |
254 | ret = ERR_PTR(-ENOMEM); | 248 | ret = ERR_PTR(-ENOMEM); |
255 | goto bail_wc; | 249 | goto bail_wc; |
@@ -380,6 +374,7 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) | |||
380 | struct ipath_cq_wc *wc; | 374 | struct ipath_cq_wc *wc; |
381 | u32 head, tail, n; | 375 | u32 head, tail, n; |
382 | int ret; | 376 | int ret; |
377 | u32 sz; | ||
383 | 378 | ||
384 | if (cqe < 1 || cqe > ib_ipath_max_cqes) { | 379 | if (cqe < 1 || cqe > ib_ipath_max_cqes) { |
385 | ret = -EINVAL; | 380 | ret = -EINVAL; |
@@ -389,7 +384,12 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) | |||
389 | /* | 384 | /* |
390 | * Need to use vmalloc() if we want to support large #s of entries. | 385 | * Need to use vmalloc() if we want to support large #s of entries. |
391 | */ | 386 | */ |
392 | wc = vmalloc_user(sizeof(*wc) + sizeof(struct ib_wc) * cqe); | 387 | sz = sizeof(*wc); |
388 | if (udata && udata->outlen >= sizeof(__u64)) | ||
389 | sz += sizeof(struct ib_uverbs_wc) * (cqe + 1); | ||
390 | else | ||
391 | sz += sizeof(struct ib_wc) * (cqe + 1); | ||
392 | wc = vmalloc_user(sz); | ||
393 | if (!wc) { | 393 | if (!wc) { |
394 | ret = -ENOMEM; | 394 | ret = -ENOMEM; |
395 | goto bail; | 395 | goto bail; |
@@ -430,7 +430,10 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) | |||
430 | goto bail; | 430 | goto bail; |
431 | } | 431 | } |
432 | for (n = 0; tail != head; n++) { | 432 | for (n = 0; tail != head; n++) { |
433 | wc->queue[n] = old_wc->queue[tail]; | 433 | if (cq->ip) |
434 | wc->uqueue[n] = old_wc->uqueue[tail]; | ||
435 | else | ||
436 | wc->kqueue[n] = old_wc->kqueue[tail]; | ||
434 | if (tail == (u32) cq->ibcq.cqe) | 437 | if (tail == (u32) cq->ibcq.cqe) |
435 | tail = 0; | 438 | tail = 0; |
436 | else | 439 | else |
@@ -447,9 +450,8 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) | |||
447 | if (cq->ip) { | 450 | if (cq->ip) { |
448 | struct ipath_ibdev *dev = to_idev(ibcq->device); | 451 | struct ipath_ibdev *dev = to_idev(ibcq->device); |
449 | struct ipath_mmap_info *ip = cq->ip; | 452 | struct ipath_mmap_info *ip = cq->ip; |
450 | u32 s = sizeof *wc + sizeof(struct ib_wc) * cqe; | ||
451 | 453 | ||
452 | ipath_update_mmap_info(dev, ip, s, wc); | 454 | ipath_update_mmap_info(dev, ip, sz, wc); |
453 | spin_lock_irq(&dev->pending_lock); | 455 | spin_lock_irq(&dev->pending_lock); |
454 | if (list_empty(&ip->pending_mmaps)) | 456 | if (list_empty(&ip->pending_mmaps)) |
455 | list_add(&ip->pending_mmaps, &dev->pending_mmaps); | 457 | list_add(&ip->pending_mmaps, &dev->pending_mmaps); |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h index a1972295bffd..9be9bf91f4f0 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.h +++ b/drivers/infiniband/hw/ipath/ipath_verbs.h | |||
@@ -191,7 +191,11 @@ struct ipath_mmap_info { | |||
191 | struct ipath_cq_wc { | 191 | struct ipath_cq_wc { |
192 | u32 head; /* index of next entry to fill */ | 192 | u32 head; /* index of next entry to fill */ |
193 | u32 tail; /* index of next ib_poll_cq() entry */ | 193 | u32 tail; /* index of next ib_poll_cq() entry */ |
194 | struct ib_uverbs_wc queue[1]; /* this is actually size ibcq.cqe + 1 */ | 194 | union { |
195 | /* these are actually size ibcq.cqe + 1 */ | ||
196 | struct ib_uverbs_wc uqueue[0]; | ||
197 | struct ib_wc kqueue[0]; | ||
198 | }; | ||
195 | }; | 199 | }; |
196 | 200 | ||
197 | /* | 201 | /* |