diff options
author | Roland Dreier <rolandd@cisco.com> | 2005-06-27 17:36:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-27 18:11:47 -0400 |
commit | dae4c1d2362292ccd3318ff67d18aa5c22ee820c (patch) | |
tree | 2bab3983a784bd6baef0fda8f8a62a555f38cb99 | |
parent | cae54bdf6f8b643e0e7a36ed531951f19e14fe56 (diff) |
[PATCH] IB: Fix race in sa_query
Use a copy of the id we'll return to the consumer so that we don't
dereference query->sa_query after calling send_mad(). A completion may
occur very quickly and end up freeing the query before we get to do
anything after send_mad().
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | drivers/infiniband/core/sa_query.c | 18 |
1 files changed, 13 insertions, 5 deletions
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 276e1a53010d..5a08e81fa827 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
@@ -507,7 +507,13 @@ retry: | |||
507 | spin_unlock_irqrestore(&idr_lock, flags); | 507 | spin_unlock_irqrestore(&idr_lock, flags); |
508 | } | 508 | } |
509 | 509 | ||
510 | return ret; | 510 | /* |
511 | * It's not safe to dereference query any more, because the | ||
512 | * send may already have completed and freed the query in | ||
513 | * another context. So use wr.wr_id, which has a copy of the | ||
514 | * query's id. | ||
515 | */ | ||
516 | return ret ? ret : wr.wr_id; | ||
511 | } | 517 | } |
512 | 518 | ||
513 | static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, | 519 | static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, |
@@ -598,14 +604,15 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, | |||
598 | rec, query->sa_query.mad->data); | 604 | rec, query->sa_query.mad->data); |
599 | 605 | ||
600 | *sa_query = &query->sa_query; | 606 | *sa_query = &query->sa_query; |
607 | |||
601 | ret = send_mad(&query->sa_query, timeout_ms); | 608 | ret = send_mad(&query->sa_query, timeout_ms); |
602 | if (ret) { | 609 | if (ret < 0) { |
603 | *sa_query = NULL; | 610 | *sa_query = NULL; |
604 | kfree(query->sa_query.mad); | 611 | kfree(query->sa_query.mad); |
605 | kfree(query); | 612 | kfree(query); |
606 | } | 613 | } |
607 | 614 | ||
608 | return ret ? ret : query->sa_query.id; | 615 | return ret; |
609 | } | 616 | } |
610 | EXPORT_SYMBOL(ib_sa_path_rec_get); | 617 | EXPORT_SYMBOL(ib_sa_path_rec_get); |
611 | 618 | ||
@@ -674,14 +681,15 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num, | |||
674 | rec, query->sa_query.mad->data); | 681 | rec, query->sa_query.mad->data); |
675 | 682 | ||
676 | *sa_query = &query->sa_query; | 683 | *sa_query = &query->sa_query; |
684 | |||
677 | ret = send_mad(&query->sa_query, timeout_ms); | 685 | ret = send_mad(&query->sa_query, timeout_ms); |
678 | if (ret) { | 686 | if (ret < 0) { |
679 | *sa_query = NULL; | 687 | *sa_query = NULL; |
680 | kfree(query->sa_query.mad); | 688 | kfree(query->sa_query.mad); |
681 | kfree(query); | 689 | kfree(query); |
682 | } | 690 | } |
683 | 691 | ||
684 | return ret ? ret : query->sa_query.id; | 692 | return ret; |
685 | } | 693 | } |
686 | EXPORT_SYMBOL(ib_sa_mcmember_rec_query); | 694 | EXPORT_SYMBOL(ib_sa_mcmember_rec_query); |
687 | 695 | ||