diff options
author | Mike Marciniszyn <mike.marciniszyn@intel.com> | 2013-06-15 17:06:58 -0400 |
---|---|---|
committer | Roland Dreier <roland@purestorage.com> | 2013-06-21 20:19:46 -0400 |
commit | f7cf9a618b48212394c07b169864d20beb23b8e5 (patch) | |
tree | ed7fb723991bb6b8f7ce35591f3d946418952724 | |
parent | 8469ba39a6b77917e8879680aed17229bf72f263 (diff) |
IB/qib: Remove atomic_inc_not_zero() from QP RCU
Follow Documentation/RCU/rcuref.txt guidance in removing
atomic_inc_not_zero() from QP RCU implementation.
This patch also removes an unneeded synchronize_rcu() in the add path.
Reviewed-by: Dean Luick <dean.luick@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r-- | drivers/infiniband/hw/qib/qib_qp.c | 29 |
1 files changed, 15 insertions, 14 deletions
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index a6a2cc2ba260..c1f573a331c7 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. | 2 | * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. |
3 | * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved. | 3 | * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved. |
4 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | 4 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. |
5 | * | 5 | * |
@@ -222,8 +222,8 @@ static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) | |||
222 | unsigned long flags; | 222 | unsigned long flags; |
223 | unsigned n = qpn_hash(dev, qp->ibqp.qp_num); | 223 | unsigned n = qpn_hash(dev, qp->ibqp.qp_num); |
224 | 224 | ||
225 | spin_lock_irqsave(&dev->qpt_lock, flags); | ||
226 | atomic_inc(&qp->refcount); | 225 | atomic_inc(&qp->refcount); |
226 | spin_lock_irqsave(&dev->qpt_lock, flags); | ||
227 | 227 | ||
228 | if (qp->ibqp.qp_num == 0) | 228 | if (qp->ibqp.qp_num == 0) |
229 | rcu_assign_pointer(ibp->qp0, qp); | 229 | rcu_assign_pointer(ibp->qp0, qp); |
@@ -235,7 +235,6 @@ static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) | |||
235 | } | 235 | } |
236 | 236 | ||
237 | spin_unlock_irqrestore(&dev->qpt_lock, flags); | 237 | spin_unlock_irqrestore(&dev->qpt_lock, flags); |
238 | synchronize_rcu(); | ||
239 | } | 238 | } |
240 | 239 | ||
241 | /* | 240 | /* |
@@ -247,36 +246,39 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) | |||
247 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | 246 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); |
248 | unsigned n = qpn_hash(dev, qp->ibqp.qp_num); | 247 | unsigned n = qpn_hash(dev, qp->ibqp.qp_num); |
249 | unsigned long flags; | 248 | unsigned long flags; |
249 | int removed = 1; | ||
250 | 250 | ||
251 | spin_lock_irqsave(&dev->qpt_lock, flags); | 251 | spin_lock_irqsave(&dev->qpt_lock, flags); |
252 | 252 | ||
253 | if (rcu_dereference_protected(ibp->qp0, | 253 | if (rcu_dereference_protected(ibp->qp0, |
254 | lockdep_is_held(&dev->qpt_lock)) == qp) { | 254 | lockdep_is_held(&dev->qpt_lock)) == qp) { |
255 | atomic_dec(&qp->refcount); | ||
256 | rcu_assign_pointer(ibp->qp0, NULL); | 255 | rcu_assign_pointer(ibp->qp0, NULL); |
257 | } else if (rcu_dereference_protected(ibp->qp1, | 256 | } else if (rcu_dereference_protected(ibp->qp1, |
258 | lockdep_is_held(&dev->qpt_lock)) == qp) { | 257 | lockdep_is_held(&dev->qpt_lock)) == qp) { |
259 | atomic_dec(&qp->refcount); | ||
260 | rcu_assign_pointer(ibp->qp1, NULL); | 258 | rcu_assign_pointer(ibp->qp1, NULL); |
261 | } else { | 259 | } else { |
262 | struct qib_qp *q; | 260 | struct qib_qp *q; |
263 | struct qib_qp __rcu **qpp; | 261 | struct qib_qp __rcu **qpp; |
264 | 262 | ||
263 | removed = 0; | ||
265 | qpp = &dev->qp_table[n]; | 264 | qpp = &dev->qp_table[n]; |
266 | for (; (q = rcu_dereference_protected(*qpp, | 265 | for (; (q = rcu_dereference_protected(*qpp, |
267 | lockdep_is_held(&dev->qpt_lock))) != NULL; | 266 | lockdep_is_held(&dev->qpt_lock))) != NULL; |
268 | qpp = &q->next) | 267 | qpp = &q->next) |
269 | if (q == qp) { | 268 | if (q == qp) { |
270 | atomic_dec(&qp->refcount); | ||
271 | rcu_assign_pointer(*qpp, | 269 | rcu_assign_pointer(*qpp, |
272 | rcu_dereference_protected(qp->next, | 270 | rcu_dereference_protected(qp->next, |
273 | lockdep_is_held(&dev->qpt_lock))); | 271 | lockdep_is_held(&dev->qpt_lock))); |
272 | removed = 1; | ||
274 | break; | 273 | break; |
275 | } | 274 | } |
276 | } | 275 | } |
277 | 276 | ||
278 | spin_unlock_irqrestore(&dev->qpt_lock, flags); | 277 | spin_unlock_irqrestore(&dev->qpt_lock, flags); |
279 | synchronize_rcu(); | 278 | if (removed) { |
279 | synchronize_rcu(); | ||
280 | atomic_dec(&qp->refcount); | ||
281 | } | ||
280 | } | 282 | } |
281 | 283 | ||
282 | /** | 284 | /** |
@@ -334,26 +336,25 @@ struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn) | |||
334 | { | 336 | { |
335 | struct qib_qp *qp = NULL; | 337 | struct qib_qp *qp = NULL; |
336 | 338 | ||
339 | rcu_read_lock(); | ||
337 | if (unlikely(qpn <= 1)) { | 340 | if (unlikely(qpn <= 1)) { |
338 | rcu_read_lock(); | ||
339 | if (qpn == 0) | 341 | if (qpn == 0) |
340 | qp = rcu_dereference(ibp->qp0); | 342 | qp = rcu_dereference(ibp->qp0); |
341 | else | 343 | else |
342 | qp = rcu_dereference(ibp->qp1); | 344 | qp = rcu_dereference(ibp->qp1); |
345 | if (qp) | ||
346 | atomic_inc(&qp->refcount); | ||
343 | } else { | 347 | } else { |
344 | struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev; | 348 | struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev; |
345 | unsigned n = qpn_hash(dev, qpn); | 349 | unsigned n = qpn_hash(dev, qpn); |
346 | 350 | ||
347 | rcu_read_lock(); | ||
348 | for (qp = rcu_dereference(dev->qp_table[n]); qp; | 351 | for (qp = rcu_dereference(dev->qp_table[n]); qp; |
349 | qp = rcu_dereference(qp->next)) | 352 | qp = rcu_dereference(qp->next)) |
350 | if (qp->ibqp.qp_num == qpn) | 353 | if (qp->ibqp.qp_num == qpn) { |
354 | atomic_inc(&qp->refcount); | ||
351 | break; | 355 | break; |
356 | } | ||
352 | } | 357 | } |
353 | if (qp) | ||
354 | if (unlikely(!atomic_inc_not_zero(&qp->refcount))) | ||
355 | qp = NULL; | ||
356 | |||
357 | rcu_read_unlock(); | 358 | rcu_read_unlock(); |
358 | return qp; | 359 | return qp; |
359 | } | 360 | } |