diff options
author | Bryan O'Sullivan <bos@pathscale.com> | 2006-09-28 12:00:23 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-09-28 14:17:12 -0400 |
commit | 7a26c47412b201e1977ad42b760885f825158915 (patch) | |
tree | 523874b1a1ad97cbfee58737952dc38ce717a518 /drivers | |
parent | bf3258ec418a008ab4672787ebff2c5837dd1e69 (diff) |
IB/ipath: Fix races with ib_resize_cq()
The resize CQ function changes the memory used to store the queue.
Other routines need to honor the lock before accessing the pointer
to the queue and verify that the head and tail are in range.
Signed-off-by: Bryan O'Sullivan <bryan.osullivan@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_cq.c | 30 |
1 files changed, 22 insertions, 8 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c index 00440d5c91e0..87462e0cb4d2 100644 --- a/drivers/infiniband/hw/ipath/ipath_cq.c +++ b/drivers/infiniband/hw/ipath/ipath_cq.c | |||
@@ -46,7 +46,7 @@ | |||
46 | */ | 46 | */ |
47 | void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) | 47 | void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) |
48 | { | 48 | { |
49 | struct ipath_cq_wc *wc = cq->queue; | 49 | struct ipath_cq_wc *wc; |
50 | unsigned long flags; | 50 | unsigned long flags; |
51 | u32 head; | 51 | u32 head; |
52 | u32 next; | 52 | u32 next; |
@@ -57,6 +57,7 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) | |||
57 | * Note that the head pointer might be writable by user processes. | 57 | * Note that the head pointer might be writable by user processes. |
58 | * Take care to verify it is a sane value. | 58 | * Take care to verify it is a sane value. |
59 | */ | 59 | */ |
60 | wc = cq->queue; | ||
60 | head = wc->head; | 61 | head = wc->head; |
61 | if (head >= (unsigned) cq->ibcq.cqe) { | 62 | if (head >= (unsigned) cq->ibcq.cqe) { |
62 | head = cq->ibcq.cqe; | 63 | head = cq->ibcq.cqe; |
@@ -109,21 +110,27 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) | |||
109 | int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) | 110 | int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) |
110 | { | 111 | { |
111 | struct ipath_cq *cq = to_icq(ibcq); | 112 | struct ipath_cq *cq = to_icq(ibcq); |
112 | struct ipath_cq_wc *wc = cq->queue; | 113 | struct ipath_cq_wc *wc; |
113 | unsigned long flags; | 114 | unsigned long flags; |
114 | int npolled; | 115 | int npolled; |
116 | u32 tail; | ||
115 | 117 | ||
116 | spin_lock_irqsave(&cq->lock, flags); | 118 | spin_lock_irqsave(&cq->lock, flags); |
117 | 119 | ||
120 | wc = cq->queue; | ||
121 | tail = wc->tail; | ||
122 | if (tail > (u32) cq->ibcq.cqe) | ||
123 | tail = (u32) cq->ibcq.cqe; | ||
118 | for (npolled = 0; npolled < num_entries; ++npolled, ++entry) { | 124 | for (npolled = 0; npolled < num_entries; ++npolled, ++entry) { |
119 | if (wc->tail == wc->head) | 125 | if (tail == wc->head) |
120 | break; | 126 | break; |
121 | *entry = wc->queue[wc->tail]; | 127 | *entry = wc->queue[tail]; |
122 | if (wc->tail >= cq->ibcq.cqe) | 128 | if (tail >= cq->ibcq.cqe) |
123 | wc->tail = 0; | 129 | tail = 0; |
124 | else | 130 | else |
125 | wc->tail++; | 131 | tail++; |
126 | } | 132 | } |
133 | wc->tail = tail; | ||
127 | 134 | ||
128 | spin_unlock_irqrestore(&cq->lock, flags); | 135 | spin_unlock_irqrestore(&cq->lock, flags); |
129 | 136 | ||
@@ -322,10 +329,16 @@ int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) | |||
322 | return 0; | 329 | return 0; |
323 | } | 330 | } |
324 | 331 | ||
332 | /** | ||
333 | * ipath_resize_cq - change the size of the CQ | ||
334 | * @ibcq: the completion queue | ||
335 | * | ||
336 | * Returns 0 for success. | ||
337 | */ | ||
325 | int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) | 338 | int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) |
326 | { | 339 | { |
327 | struct ipath_cq *cq = to_icq(ibcq); | 340 | struct ipath_cq *cq = to_icq(ibcq); |
328 | struct ipath_cq_wc *old_wc = cq->queue; | 341 | struct ipath_cq_wc *old_wc; |
329 | struct ipath_cq_wc *wc; | 342 | struct ipath_cq_wc *wc; |
330 | u32 head, tail, n; | 343 | u32 head, tail, n; |
331 | int ret; | 344 | int ret; |
@@ -361,6 +374,7 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) | |||
361 | * Make sure head and tail are sane since they | 374 | * Make sure head and tail are sane since they |
362 | * might be user writable. | 375 | * might be user writable. |
363 | */ | 376 | */ |
377 | old_wc = cq->queue; | ||
364 | head = old_wc->head; | 378 | head = old_wc->head; |
365 | if (head > (u32) cq->ibcq.cqe) | 379 | if (head > (u32) cq->ibcq.cqe) |
366 | head = (u32) cq->ibcq.cqe; | 380 | head = (u32) cq->ibcq.cqe; |