diff options
author | Zach Brown <zach.brown@oracle.com> | 2006-07-04 05:57:52 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-04 13:24:57 -0400 |
commit | a46f9484f8926aacb2e79a0e1676de3a6a6fbae8 (patch) | |
tree | 21d90306af4677091547465c1ba02e0545276d1a /drivers/infiniband | |
parent | dd8041f16b117f63f40fb844d6cdebe8b03514d2 (diff) |
[PATCH] mthca: initialize send and receive queue locks separately
mthca: initialize send and receive queue locks separately
lockdep identifies a lock by the call site of its initialization. By
initializing the send and receive queue locks in mthca_wq_init() we confuse
lockdep. It warns that that the ordered acquiry of both locks in
mthca_modify_qp() is recursive acquiry of one lock:
=============================================
[ INFO: possible recursive locking detected ]
---------------------------------------------
modprobe/1192 is trying to acquire lock:
(&wq->lock){....}, at: [<f892b4db>] mthca_modify_qp+0x60/0xa7b [ib_mthca]
but task is already holding lock:
(&wq->lock){....}, at: [<f892b4ce>] mthca_modify_qp+0x53/0xa7b [ib_mthca]
Initializing the locks separately in mthca_alloc_qp_common() stops the
warning and will let lockdep enforce proper ordering on paths that acquire
both locks.
Signed-off-by: Zach Brown <zach.brown@oracle.com>
Cc: Roland Dreier <rolandd@cisco.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_qp.c | 5 |
1 files changed, 4 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 16c387d8170c..490fc783bb0c 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -224,7 +224,7 @@ static void *get_send_wqe(struct mthca_qp *qp, int n) | |||
224 | 224 | ||
225 | static void mthca_wq_init(struct mthca_wq *wq) | 225 | static void mthca_wq_init(struct mthca_wq *wq) |
226 | { | 226 | { |
227 | spin_lock_init(&wq->lock); | 227 | /* mthca_alloc_qp_common() initializes the locks */ |
228 | wq->next_ind = 0; | 228 | wq->next_ind = 0; |
229 | wq->last_comp = wq->max - 1; | 229 | wq->last_comp = wq->max - 1; |
230 | wq->head = 0; | 230 | wq->head = 0; |
@@ -1114,6 +1114,9 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, | |||
1114 | qp->sq_policy = send_policy; | 1114 | qp->sq_policy = send_policy; |
1115 | mthca_wq_init(&qp->sq); | 1115 | mthca_wq_init(&qp->sq); |
1116 | mthca_wq_init(&qp->rq); | 1116 | mthca_wq_init(&qp->rq); |
1117 | /* these are initialized separately so lockdep can tell them apart */ | ||
1118 | spin_lock_init(&qp->sq.lock); | ||
1119 | spin_lock_init(&qp->rq.lock); | ||
1117 | 1120 | ||
1118 | ret = mthca_map_memfree(dev, qp); | 1121 | ret = mthca_map_memfree(dev, qp); |
1119 | if (ret) | 1122 | if (ret) |