diff options
author | Mike Marciniszyn <mike.marciniszyn@qlogic.com> | 2011-09-23 13:16:44 -0400 |
---|---|---|
committer | Roland Dreier <roland@purestorage.com> | 2011-10-21 12:38:54 -0400 |
commit | af061a644a0e4d4778fe6cd2246479c1962e153b (patch) | |
tree | 01ed5c508274adc6e46f99d4091fffe70632711f /drivers | |
parent | 9e1c0e43257b6df1ef012dd37c3f0f93b1ee47af (diff) |
IB/qib: Use RCU for qpn lookup
The heavy weight spinlock in qib_lookup_qpn() is replaced with RCU.
The hash list itself is now accessed via jhash functions instead of mod.
The changes should benefit multiple receive contexts in different
processors by not contending for the lock just to read the hash
structures.
The patch also adds a lookaside_qp (pointer) and a lookaside_qpn in
the context. The interrupt handler will test the current packet's qpn
against lookaside_qpn if the lookaside_qp pointer is non-NULL. The
pointer is NULL'ed when the interrupt handler exits.
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@qlogic.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/infiniband/hw/qib/qib.h | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_driver.c | 9 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_qp.c | 77 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_verbs.c | 36 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_verbs.h | 3 |
5 files changed, 87 insertions, 41 deletions
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index 97e623383e1a..b881bdc401f5 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h | |||
@@ -223,6 +223,9 @@ struct qib_ctxtdata { | |||
223 | /* ctxt rcvhdrq head offset */ | 223 | /* ctxt rcvhdrq head offset */ |
224 | u32 head; | 224 | u32 head; |
225 | u32 pkt_count; | 225 | u32 pkt_count; |
226 | /* lookaside fields */ | ||
227 | struct qib_qp *lookaside_qp; | ||
228 | u32 lookaside_qpn; | ||
226 | /* QPs waiting for context processing */ | 229 | /* QPs waiting for context processing */ |
227 | struct list_head qp_wait_list; | 230 | struct list_head qp_wait_list; |
228 | }; | 231 | }; |
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c index 89264ffc7ee9..d35c9d38ceee 100644 --- a/drivers/infiniband/hw/qib/qib_driver.c +++ b/drivers/infiniband/hw/qib/qib_driver.c | |||
@@ -547,6 +547,15 @@ move_along: | |||
547 | updegr = 0; | 547 | updegr = 0; |
548 | } | 548 | } |
549 | } | 549 | } |
550 | /* | ||
551 | * Notify qib_destroy_qp() if it is waiting | ||
552 | * for lookaside_qp to finish. | ||
553 | */ | ||
554 | if (rcd->lookaside_qp) { | ||
555 | if (atomic_dec_and_test(&rcd->lookaside_qp->refcount)) | ||
556 | wake_up(&rcd->lookaside_qp->wait); | ||
557 | rcd->lookaside_qp = NULL; | ||
558 | } | ||
550 | 559 | ||
551 | rcd->head = l; | 560 | rcd->head = l; |
552 | rcd->pkt_count += i; | 561 | rcd->pkt_count += i; |
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 9d094f910360..500981bce9c0 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c | |||
@@ -34,6 +34,7 @@ | |||
34 | 34 | ||
35 | #include <linux/err.h> | 35 | #include <linux/err.h> |
36 | #include <linux/vmalloc.h> | 36 | #include <linux/vmalloc.h> |
37 | #include <linux/jhash.h> | ||
37 | 38 | ||
38 | #include "qib.h" | 39 | #include "qib.h" |
39 | 40 | ||
@@ -204,6 +205,13 @@ static void free_qpn(struct qib_qpn_table *qpt, u32 qpn) | |||
204 | clear_bit(qpn & BITS_PER_PAGE_MASK, map->page); | 205 | clear_bit(qpn & BITS_PER_PAGE_MASK, map->page); |
205 | } | 206 | } |
206 | 207 | ||
208 | static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn) | ||
209 | { | ||
210 | return jhash_1word(qpn, dev->qp_rnd) & | ||
211 | (dev->qp_table_size - 1); | ||
212 | } | ||
213 | |||
214 | |||
207 | /* | 215 | /* |
208 | * Put the QP into the hash table. | 216 | * Put the QP into the hash table. |
209 | * The hash table holds a reference to the QP. | 217 | * The hash table holds a reference to the QP. |
@@ -211,22 +219,23 @@ static void free_qpn(struct qib_qpn_table *qpt, u32 qpn) | |||
211 | static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) | 219 | static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) |
212 | { | 220 | { |
213 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | 221 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); |
214 | unsigned n = qp->ibqp.qp_num % dev->qp_table_size; | ||
215 | unsigned long flags; | 222 | unsigned long flags; |
223 | unsigned n = qpn_hash(dev, qp->ibqp.qp_num); | ||
216 | 224 | ||
217 | spin_lock_irqsave(&dev->qpt_lock, flags); | 225 | spin_lock_irqsave(&dev->qpt_lock, flags); |
226 | atomic_inc(&qp->refcount); | ||
218 | 227 | ||
219 | if (qp->ibqp.qp_num == 0) | 228 | if (qp->ibqp.qp_num == 0) |
220 | ibp->qp0 = qp; | 229 | rcu_assign_pointer(ibp->qp0, qp); |
221 | else if (qp->ibqp.qp_num == 1) | 230 | else if (qp->ibqp.qp_num == 1) |
222 | ibp->qp1 = qp; | 231 | rcu_assign_pointer(ibp->qp1, qp); |
223 | else { | 232 | else { |
224 | qp->next = dev->qp_table[n]; | 233 | qp->next = dev->qp_table[n]; |
225 | dev->qp_table[n] = qp; | 234 | rcu_assign_pointer(dev->qp_table[n], qp); |
226 | } | 235 | } |
227 | atomic_inc(&qp->refcount); | ||
228 | 236 | ||
229 | spin_unlock_irqrestore(&dev->qpt_lock, flags); | 237 | spin_unlock_irqrestore(&dev->qpt_lock, flags); |
238 | synchronize_rcu(); | ||
230 | } | 239 | } |
231 | 240 | ||
232 | /* | 241 | /* |
@@ -236,29 +245,32 @@ static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) | |||
236 | static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) | 245 | static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) |
237 | { | 246 | { |
238 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); | 247 | struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); |
239 | struct qib_qp *q, **qpp; | 248 | unsigned n = qpn_hash(dev, qp->ibqp.qp_num); |
240 | unsigned long flags; | 249 | unsigned long flags; |
241 | 250 | ||
242 | qpp = &dev->qp_table[qp->ibqp.qp_num % dev->qp_table_size]; | ||
243 | |||
244 | spin_lock_irqsave(&dev->qpt_lock, flags); | 251 | spin_lock_irqsave(&dev->qpt_lock, flags); |
245 | 252 | ||
246 | if (ibp->qp0 == qp) { | 253 | if (ibp->qp0 == qp) { |
247 | ibp->qp0 = NULL; | ||
248 | atomic_dec(&qp->refcount); | 254 | atomic_dec(&qp->refcount); |
255 | rcu_assign_pointer(ibp->qp0, NULL); | ||
249 | } else if (ibp->qp1 == qp) { | 256 | } else if (ibp->qp1 == qp) { |
250 | ibp->qp1 = NULL; | ||
251 | atomic_dec(&qp->refcount); | 257 | atomic_dec(&qp->refcount); |
252 | } else | 258 | rcu_assign_pointer(ibp->qp1, NULL); |
259 | } else { | ||
260 | struct qib_qp *q, **qpp; | ||
261 | |||
262 | qpp = &dev->qp_table[n]; | ||
253 | for (; (q = *qpp) != NULL; qpp = &q->next) | 263 | for (; (q = *qpp) != NULL; qpp = &q->next) |
254 | if (q == qp) { | 264 | if (q == qp) { |
255 | *qpp = qp->next; | ||
256 | qp->next = NULL; | ||
257 | atomic_dec(&qp->refcount); | 265 | atomic_dec(&qp->refcount); |
266 | rcu_assign_pointer(*qpp, qp->next); | ||
267 | qp->next = NULL; | ||
258 | break; | 268 | break; |
259 | } | 269 | } |
270 | } | ||
260 | 271 | ||
261 | spin_unlock_irqrestore(&dev->qpt_lock, flags); | 272 | spin_unlock_irqrestore(&dev->qpt_lock, flags); |
273 | synchronize_rcu(); | ||
262 | } | 274 | } |
263 | 275 | ||
264 | /** | 276 | /** |
@@ -280,21 +292,24 @@ unsigned qib_free_all_qps(struct qib_devdata *dd) | |||
280 | 292 | ||
281 | if (!qib_mcast_tree_empty(ibp)) | 293 | if (!qib_mcast_tree_empty(ibp)) |
282 | qp_inuse++; | 294 | qp_inuse++; |
283 | if (ibp->qp0) | 295 | rcu_read_lock(); |
296 | if (rcu_dereference(ibp->qp0)) | ||
284 | qp_inuse++; | 297 | qp_inuse++; |
285 | if (ibp->qp1) | 298 | if (rcu_dereference(ibp->qp1)) |
286 | qp_inuse++; | 299 | qp_inuse++; |
300 | rcu_read_unlock(); | ||
287 | } | 301 | } |
288 | 302 | ||
289 | spin_lock_irqsave(&dev->qpt_lock, flags); | 303 | spin_lock_irqsave(&dev->qpt_lock, flags); |
290 | for (n = 0; n < dev->qp_table_size; n++) { | 304 | for (n = 0; n < dev->qp_table_size; n++) { |
291 | qp = dev->qp_table[n]; | 305 | qp = dev->qp_table[n]; |
292 | dev->qp_table[n] = NULL; | 306 | rcu_assign_pointer(dev->qp_table[n], NULL); |
293 | 307 | ||
294 | for (; qp; qp = qp->next) | 308 | for (; qp; qp = qp->next) |
295 | qp_inuse++; | 309 | qp_inuse++; |
296 | } | 310 | } |
297 | spin_unlock_irqrestore(&dev->qpt_lock, flags); | 311 | spin_unlock_irqrestore(&dev->qpt_lock, flags); |
312 | synchronize_rcu(); | ||
298 | 313 | ||
299 | return qp_inuse; | 314 | return qp_inuse; |
300 | } | 315 | } |
@@ -309,25 +324,28 @@ unsigned qib_free_all_qps(struct qib_devdata *dd) | |||
309 | */ | 324 | */ |
310 | struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn) | 325 | struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn) |
311 | { | 326 | { |
312 | struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev; | 327 | struct qib_qp *qp = NULL; |
313 | unsigned long flags; | ||
314 | struct qib_qp *qp; | ||
315 | 328 | ||
316 | spin_lock_irqsave(&dev->qpt_lock, flags); | 329 | if (unlikely(qpn <= 1)) { |
330 | rcu_read_lock(); | ||
331 | if (qpn == 0) | ||
332 | qp = rcu_dereference(ibp->qp0); | ||
333 | else | ||
334 | qp = rcu_dereference(ibp->qp1); | ||
335 | } else { | ||
336 | struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev; | ||
337 | unsigned n = qpn_hash(dev, qpn); | ||
317 | 338 | ||
318 | if (qpn == 0) | 339 | rcu_read_lock(); |
319 | qp = ibp->qp0; | 340 | for (qp = dev->qp_table[n]; rcu_dereference(qp); qp = qp->next) |
320 | else if (qpn == 1) | ||
321 | qp = ibp->qp1; | ||
322 | else | ||
323 | for (qp = dev->qp_table[qpn % dev->qp_table_size]; qp; | ||
324 | qp = qp->next) | ||
325 | if (qp->ibqp.qp_num == qpn) | 341 | if (qp->ibqp.qp_num == qpn) |
326 | break; | 342 | break; |
343 | } | ||
327 | if (qp) | 344 | if (qp) |
328 | atomic_inc(&qp->refcount); | 345 | if (unlikely(!atomic_inc_not_zero(&qp->refcount))) |
346 | qp = NULL; | ||
329 | 347 | ||
330 | spin_unlock_irqrestore(&dev->qpt_lock, flags); | 348 | rcu_read_unlock(); |
331 | return qp; | 349 | return qp; |
332 | } | 350 | } |
333 | 351 | ||
@@ -1015,6 +1033,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd, | |||
1015 | ret = ERR_PTR(-ENOMEM); | 1033 | ret = ERR_PTR(-ENOMEM); |
1016 | goto bail_swq; | 1034 | goto bail_swq; |
1017 | } | 1035 | } |
1036 | RCU_INIT_POINTER(qp->next, NULL); | ||
1018 | if (init_attr->srq) | 1037 | if (init_attr->srq) |
1019 | sz = 0; | 1038 | sz = 0; |
1020 | else { | 1039 | else { |
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 9fab40488850..9627cb737125 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c | |||
@@ -38,11 +38,12 @@ | |||
38 | #include <linux/utsname.h> | 38 | #include <linux/utsname.h> |
39 | #include <linux/rculist.h> | 39 | #include <linux/rculist.h> |
40 | #include <linux/mm.h> | 40 | #include <linux/mm.h> |
41 | #include <linux/random.h> | ||
41 | 42 | ||
42 | #include "qib.h" | 43 | #include "qib.h" |
43 | #include "qib_common.h" | 44 | #include "qib_common.h" |
44 | 45 | ||
45 | static unsigned int ib_qib_qp_table_size = 251; | 46 | static unsigned int ib_qib_qp_table_size = 256; |
46 | module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO); | 47 | module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO); |
47 | MODULE_PARM_DESC(qp_table_size, "QP table size"); | 48 | MODULE_PARM_DESC(qp_table_size, "QP table size"); |
48 | 49 | ||
@@ -659,17 +660,25 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) | |||
659 | if (atomic_dec_return(&mcast->refcount) <= 1) | 660 | if (atomic_dec_return(&mcast->refcount) <= 1) |
660 | wake_up(&mcast->wait); | 661 | wake_up(&mcast->wait); |
661 | } else { | 662 | } else { |
662 | qp = qib_lookup_qpn(ibp, qp_num); | 663 | if (rcd->lookaside_qp) { |
663 | if (!qp) | 664 | if (rcd->lookaside_qpn != qp_num) { |
664 | goto drop; | 665 | if (atomic_dec_and_test( |
666 | &rcd->lookaside_qp->refcount)) | ||
667 | wake_up( | ||
668 | &rcd->lookaside_qp->wait); | ||
669 | rcd->lookaside_qp = NULL; | ||
670 | } | ||
671 | } | ||
672 | if (!rcd->lookaside_qp) { | ||
673 | qp = qib_lookup_qpn(ibp, qp_num); | ||
674 | if (!qp) | ||
675 | goto drop; | ||
676 | rcd->lookaside_qp = qp; | ||
677 | rcd->lookaside_qpn = qp_num; | ||
678 | } else | ||
679 | qp = rcd->lookaside_qp; | ||
665 | ibp->n_unicast_rcv++; | 680 | ibp->n_unicast_rcv++; |
666 | qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp); | 681 | qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp); |
667 | /* | ||
668 | * Notify qib_destroy_qp() if it is waiting | ||
669 | * for us to finish. | ||
670 | */ | ||
671 | if (atomic_dec_and_test(&qp->refcount)) | ||
672 | wake_up(&qp->wait); | ||
673 | } | 682 | } |
674 | return; | 683 | return; |
675 | 684 | ||
@@ -1974,6 +1983,8 @@ static void init_ibport(struct qib_pportdata *ppd) | |||
1974 | ibp->z_excessive_buffer_overrun_errors = | 1983 | ibp->z_excessive_buffer_overrun_errors = |
1975 | cntrs.excessive_buffer_overrun_errors; | 1984 | cntrs.excessive_buffer_overrun_errors; |
1976 | ibp->z_vl15_dropped = cntrs.vl15_dropped; | 1985 | ibp->z_vl15_dropped = cntrs.vl15_dropped; |
1986 | RCU_INIT_POINTER(ibp->qp0, NULL); | ||
1987 | RCU_INIT_POINTER(ibp->qp1, NULL); | ||
1977 | } | 1988 | } |
1978 | 1989 | ||
1979 | /** | 1990 | /** |
@@ -1990,12 +2001,15 @@ int qib_register_ib_device(struct qib_devdata *dd) | |||
1990 | int ret; | 2001 | int ret; |
1991 | 2002 | ||
1992 | dev->qp_table_size = ib_qib_qp_table_size; | 2003 | dev->qp_table_size = ib_qib_qp_table_size; |
1993 | dev->qp_table = kzalloc(dev->qp_table_size * sizeof *dev->qp_table, | 2004 | get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd)); |
2005 | dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table, | ||
1994 | GFP_KERNEL); | 2006 | GFP_KERNEL); |
1995 | if (!dev->qp_table) { | 2007 | if (!dev->qp_table) { |
1996 | ret = -ENOMEM; | 2008 | ret = -ENOMEM; |
1997 | goto err_qpt; | 2009 | goto err_qpt; |
1998 | } | 2010 | } |
2011 | for (i = 0; i < dev->qp_table_size; i++) | ||
2012 | RCU_INIT_POINTER(dev->qp_table[i], NULL); | ||
1999 | 2013 | ||
2000 | for (i = 0; i < dd->num_pports; i++) | 2014 | for (i = 0; i < dd->num_pports; i++) |
2001 | init_ibport(ppd + i); | 2015 | init_ibport(ppd + i); |
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index ec3711f743c5..d7b6109528a4 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h | |||
@@ -724,7 +724,8 @@ struct qib_ibdev { | |||
724 | dma_addr_t pio_hdrs_phys; | 724 | dma_addr_t pio_hdrs_phys; |
725 | /* list of QPs waiting for RNR timer */ | 725 | /* list of QPs waiting for RNR timer */ |
726 | spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */ | 726 | spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */ |
727 | unsigned qp_table_size; /* size of the hash table */ | 727 | u32 qp_table_size; /* size of the hash table */ |
728 | u32 qp_rnd; /* random bytes for hash */ | ||
728 | spinlock_t qpt_lock; | 729 | spinlock_t qpt_lock; |
729 | 730 | ||
730 | u32 n_piowait; | 731 | u32 n_piowait; |