aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/qib/qib_qp.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /drivers/infiniband/hw/qib/qib_qp.c
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_qp.c')
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c145
1 files changed, 53 insertions, 92 deletions
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 4850d03870c..e16751f8639 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (c) 2012 Intel Corporation. All rights reserved. 2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved. 3 * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * 5 *
6 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
@@ -34,7 +34,6 @@
34 34
35#include <linux/err.h> 35#include <linux/err.h>
36#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
37#include <linux/jhash.h>
38 37
39#include "qib.h" 38#include "qib.h"
40 39
@@ -205,13 +204,6 @@ static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
205 clear_bit(qpn & BITS_PER_PAGE_MASK, map->page); 204 clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
206} 205}
207 206
208static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
209{
210 return jhash_1word(qpn, dev->qp_rnd) &
211 (dev->qp_table_size - 1);
212}
213
214
215/* 207/*
216 * Put the QP into the hash table. 208 * Put the QP into the hash table.
217 * The hash table holds a reference to the QP. 209 * The hash table holds a reference to the QP.
@@ -219,23 +211,22 @@ static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
219static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) 211static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
220{ 212{
221 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 213 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
214 unsigned n = qp->ibqp.qp_num % dev->qp_table_size;
222 unsigned long flags; 215 unsigned long flags;
223 unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
224 216
225 spin_lock_irqsave(&dev->qpt_lock, flags); 217 spin_lock_irqsave(&dev->qpt_lock, flags);
226 atomic_inc(&qp->refcount);
227 218
228 if (qp->ibqp.qp_num == 0) 219 if (qp->ibqp.qp_num == 0)
229 rcu_assign_pointer(ibp->qp0, qp); 220 ibp->qp0 = qp;
230 else if (qp->ibqp.qp_num == 1) 221 else if (qp->ibqp.qp_num == 1)
231 rcu_assign_pointer(ibp->qp1, qp); 222 ibp->qp1 = qp;
232 else { 223 else {
233 qp->next = dev->qp_table[n]; 224 qp->next = dev->qp_table[n];
234 rcu_assign_pointer(dev->qp_table[n], qp); 225 dev->qp_table[n] = qp;
235 } 226 }
227 atomic_inc(&qp->refcount);
236 228
237 spin_unlock_irqrestore(&dev->qpt_lock, flags); 229 spin_unlock_irqrestore(&dev->qpt_lock, flags);
238 synchronize_rcu();
239} 230}
240 231
241/* 232/*
@@ -245,42 +236,29 @@ static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
245static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) 236static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
246{ 237{
247 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); 238 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
248 unsigned n = qpn_hash(dev, qp->ibqp.qp_num); 239 struct qib_qp *q, **qpp;
249 unsigned long flags; 240 unsigned long flags;
250 241
242 qpp = &dev->qp_table[qp->ibqp.qp_num % dev->qp_table_size];
243
251 spin_lock_irqsave(&dev->qpt_lock, flags); 244 spin_lock_irqsave(&dev->qpt_lock, flags);
252 245
253 if (rcu_dereference_protected(ibp->qp0, 246 if (ibp->qp0 == qp) {
254 lockdep_is_held(&dev->qpt_lock)) == qp) { 247 ibp->qp0 = NULL;
255 atomic_dec(&qp->refcount); 248 atomic_dec(&qp->refcount);
256 rcu_assign_pointer(ibp->qp0, NULL); 249 } else if (ibp->qp1 == qp) {
257 } else if (rcu_dereference_protected(ibp->qp1, 250 ibp->qp1 = NULL;
258 lockdep_is_held(&dev->qpt_lock)) == qp) {
259 atomic_dec(&qp->refcount); 251 atomic_dec(&qp->refcount);
260 rcu_assign_pointer(ibp->qp1, NULL); 252 } else
261 } else { 253 for (; (q = *qpp) != NULL; qpp = &q->next)
262 struct qib_qp *q;
263 struct qib_qp __rcu **qpp;
264
265 qpp = &dev->qp_table[n];
266 q = rcu_dereference_protected(*qpp,
267 lockdep_is_held(&dev->qpt_lock));
268 for (; q; qpp = &q->next) {
269 if (q == qp) { 254 if (q == qp) {
270 atomic_dec(&qp->refcount);
271 *qpp = qp->next; 255 *qpp = qp->next;
272 rcu_assign_pointer(qp->next, NULL); 256 qp->next = NULL;
273 q = rcu_dereference_protected(*qpp, 257 atomic_dec(&qp->refcount);
274 lockdep_is_held(&dev->qpt_lock));
275 break; 258 break;
276 } 259 }
277 q = rcu_dereference_protected(*qpp,
278 lockdep_is_held(&dev->qpt_lock));
279 }
280 }
281 260
282 spin_unlock_irqrestore(&dev->qpt_lock, flags); 261 spin_unlock_irqrestore(&dev->qpt_lock, flags);
283 synchronize_rcu();
284} 262}
285 263
286/** 264/**
@@ -302,26 +280,21 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
302 280
303 if (!qib_mcast_tree_empty(ibp)) 281 if (!qib_mcast_tree_empty(ibp))
304 qp_inuse++; 282 qp_inuse++;
305 rcu_read_lock(); 283 if (ibp->qp0)
306 if (rcu_dereference(ibp->qp0))
307 qp_inuse++; 284 qp_inuse++;
308 if (rcu_dereference(ibp->qp1)) 285 if (ibp->qp1)
309 qp_inuse++; 286 qp_inuse++;
310 rcu_read_unlock();
311 } 287 }
312 288
313 spin_lock_irqsave(&dev->qpt_lock, flags); 289 spin_lock_irqsave(&dev->qpt_lock, flags);
314 for (n = 0; n < dev->qp_table_size; n++) { 290 for (n = 0; n < dev->qp_table_size; n++) {
315 qp = rcu_dereference_protected(dev->qp_table[n], 291 qp = dev->qp_table[n];
316 lockdep_is_held(&dev->qpt_lock)); 292 dev->qp_table[n] = NULL;
317 rcu_assign_pointer(dev->qp_table[n], NULL);
318 293
319 for (; qp; qp = rcu_dereference_protected(qp->next, 294 for (; qp; qp = qp->next)
320 lockdep_is_held(&dev->qpt_lock)))
321 qp_inuse++; 295 qp_inuse++;
322 } 296 }
323 spin_unlock_irqrestore(&dev->qpt_lock, flags); 297 spin_unlock_irqrestore(&dev->qpt_lock, flags);
324 synchronize_rcu();
325 298
326 return qp_inuse; 299 return qp_inuse;
327} 300}
@@ -336,29 +309,25 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
336 */ 309 */
337struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn) 310struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
338{ 311{
339 struct qib_qp *qp = NULL; 312 struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
313 unsigned long flags;
314 struct qib_qp *qp;
340 315
341 if (unlikely(qpn <= 1)) { 316 spin_lock_irqsave(&dev->qpt_lock, flags);
342 rcu_read_lock();
343 if (qpn == 0)
344 qp = rcu_dereference(ibp->qp0);
345 else
346 qp = rcu_dereference(ibp->qp1);
347 } else {
348 struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
349 unsigned n = qpn_hash(dev, qpn);
350 317
351 rcu_read_lock(); 318 if (qpn == 0)
352 for (qp = rcu_dereference(dev->qp_table[n]); qp; 319 qp = ibp->qp0;
353 qp = rcu_dereference(qp->next)) 320 else if (qpn == 1)
321 qp = ibp->qp1;
322 else
323 for (qp = dev->qp_table[qpn % dev->qp_table_size]; qp;
324 qp = qp->next)
354 if (qp->ibqp.qp_num == qpn) 325 if (qp->ibqp.qp_num == qpn)
355 break; 326 break;
356 }
357 if (qp) 327 if (qp)
358 if (unlikely(!atomic_inc_not_zero(&qp->refcount))) 328 atomic_inc(&qp->refcount);
359 qp = NULL;
360 329
361 rcu_read_unlock(); 330 spin_unlock_irqrestore(&dev->qpt_lock, flags);
362 return qp; 331 return qp;
363} 332}
364 333
@@ -419,9 +388,18 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
419 unsigned n; 388 unsigned n;
420 389
421 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) 390 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
422 qib_put_ss(&qp->s_rdma_read_sge); 391 while (qp->s_rdma_read_sge.num_sge) {
392 atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
393 if (--qp->s_rdma_read_sge.num_sge)
394 qp->s_rdma_read_sge.sge =
395 *qp->s_rdma_read_sge.sg_list++;
396 }
423 397
424 qib_put_ss(&qp->r_sge); 398 while (qp->r_sge.num_sge) {
399 atomic_dec(&qp->r_sge.sge.mr->refcount);
400 if (--qp->r_sge.num_sge)
401 qp->r_sge.sge = *qp->r_sge.sg_list++;
402 }
425 403
426 if (clr_sends) { 404 if (clr_sends) {
427 while (qp->s_last != qp->s_head) { 405 while (qp->s_last != qp->s_head) {
@@ -431,7 +409,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
431 for (i = 0; i < wqe->wr.num_sge; i++) { 409 for (i = 0; i < wqe->wr.num_sge; i++) {
432 struct qib_sge *sge = &wqe->sg_list[i]; 410 struct qib_sge *sge = &wqe->sg_list[i];
433 411
434 qib_put_mr(sge->mr); 412 atomic_dec(&sge->mr->refcount);
435 } 413 }
436 if (qp->ibqp.qp_type == IB_QPT_UD || 414 if (qp->ibqp.qp_type == IB_QPT_UD ||
437 qp->ibqp.qp_type == IB_QPT_SMI || 415 qp->ibqp.qp_type == IB_QPT_SMI ||
@@ -441,7 +419,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
441 qp->s_last = 0; 419 qp->s_last = 0;
442 } 420 }
443 if (qp->s_rdma_mr) { 421 if (qp->s_rdma_mr) {
444 qib_put_mr(qp->s_rdma_mr); 422 atomic_dec(&qp->s_rdma_mr->refcount);
445 qp->s_rdma_mr = NULL; 423 qp->s_rdma_mr = NULL;
446 } 424 }
447 } 425 }
@@ -454,7 +432,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
454 432
455 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && 433 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
456 e->rdma_sge.mr) { 434 e->rdma_sge.mr) {
457 qib_put_mr(e->rdma_sge.mr); 435 atomic_dec(&e->rdma_sge.mr->refcount);
458 e->rdma_sge.mr = NULL; 436 e->rdma_sge.mr = NULL;
459 } 437 }
460 } 438 }
@@ -499,7 +477,7 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
499 if (!(qp->s_flags & QIB_S_BUSY)) { 477 if (!(qp->s_flags & QIB_S_BUSY)) {
500 qp->s_hdrwords = 0; 478 qp->s_hdrwords = 0;
501 if (qp->s_rdma_mr) { 479 if (qp->s_rdma_mr) {
502 qib_put_mr(qp->s_rdma_mr); 480 atomic_dec(&qp->s_rdma_mr->refcount);
503 qp->s_rdma_mr = NULL; 481 qp->s_rdma_mr = NULL;
504 } 482 }
505 if (qp->s_tx) { 483 if (qp->s_tx) {
@@ -787,10 +765,8 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
787 } 765 }
788 } 766 }
789 767
790 if (attr_mask & IB_QP_PATH_MTU) { 768 if (attr_mask & IB_QP_PATH_MTU)
791 qp->path_mtu = pmtu; 769 qp->path_mtu = pmtu;
792 qp->pmtu = ib_mtu_enum_to_int(pmtu);
793 }
794 770
795 if (attr_mask & IB_QP_RETRY_CNT) { 771 if (attr_mask & IB_QP_RETRY_CNT) {
796 qp->s_retry_cnt = attr->retry_cnt; 772 qp->s_retry_cnt = attr->retry_cnt;
@@ -805,12 +781,8 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
805 if (attr_mask & IB_QP_MIN_RNR_TIMER) 781 if (attr_mask & IB_QP_MIN_RNR_TIMER)
806 qp->r_min_rnr_timer = attr->min_rnr_timer; 782 qp->r_min_rnr_timer = attr->min_rnr_timer;
807 783
808 if (attr_mask & IB_QP_TIMEOUT) { 784 if (attr_mask & IB_QP_TIMEOUT)
809 qp->timeout = attr->timeout; 785 qp->timeout = attr->timeout;
810 qp->timeout_jiffies =
811 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
812 1000UL);
813 }
814 786
815 if (attr_mask & IB_QP_QKEY) 787 if (attr_mask & IB_QP_QKEY)
816 qp->qkey = attr->qkey; 788 qp->qkey = attr->qkey;
@@ -1041,15 +1013,6 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
1041 ret = ERR_PTR(-ENOMEM); 1013 ret = ERR_PTR(-ENOMEM);
1042 goto bail_swq; 1014 goto bail_swq;
1043 } 1015 }
1044 RCU_INIT_POINTER(qp->next, NULL);
1045 qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
1046 if (!qp->s_hdr) {
1047 ret = ERR_PTR(-ENOMEM);
1048 goto bail_qp;
1049 }
1050 qp->timeout_jiffies =
1051 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1052 1000UL);
1053 if (init_attr->srq) 1016 if (init_attr->srq)
1054 sz = 0; 1017 sz = 0;
1055 else { 1018 else {
@@ -1168,7 +1131,6 @@ bail_ip:
1168 vfree(qp->r_rq.wq); 1131 vfree(qp->r_rq.wq);
1169 free_qpn(&dev->qpn_table, qp->ibqp.qp_num); 1132 free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1170bail_qp: 1133bail_qp:
1171 kfree(qp->s_hdr);
1172 kfree(qp); 1134 kfree(qp);
1173bail_swq: 1135bail_swq:
1174 vfree(swq); 1136 vfree(swq);
@@ -1224,7 +1186,6 @@ int qib_destroy_qp(struct ib_qp *ibqp)
1224 else 1186 else
1225 vfree(qp->r_rq.wq); 1187 vfree(qp->r_rq.wq);
1226 vfree(qp->s_wq); 1188 vfree(qp->s_wq);
1227 kfree(qp->s_hdr);
1228 kfree(qp); 1189 kfree(qp);
1229 return 0; 1190 return 0;
1230} 1191}