diff options
author | David Teigland <teigland@redhat.com> | 2006-08-10 14:31:23 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-08-11 09:44:00 -0400 |
commit | fcc8abc8d4fcdbddc383091449f3696b411aa8fb (patch) | |
tree | 281f7d7af595153904ed3458329f3c868f4e092d /fs/dlm/lowcomms.c | |
parent | 8872187780f6104216c67e7b60c11f708b513c38 (diff) |
[DLM] move kmap to after spin_unlock
Doing the kmap() while holding the spinlock was causing recursive spinlock
problems. It seems the kmap was scheduling, although there was no warning
as I'd expect. Patrick, do we need locking around the kmap?
Signed-off-by: David Teigland <teigland@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/dlm/lowcomms.c')
-rw-r--r-- | fs/dlm/lowcomms.c | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 7ab40422ab57..23f5ce12080b 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c | |||
@@ -934,11 +934,11 @@ static int send_to_sock(struct nodeinfo *ni) | |||
934 | break; | 934 | break; |
935 | e = list_entry(ni->writequeue.next, struct writequeue_entry, | 935 | e = list_entry(ni->writequeue.next, struct writequeue_entry, |
936 | list); | 936 | list); |
937 | kmap(e->page); | ||
938 | len = e->len; | 937 | len = e->len; |
939 | offset = e->offset; | 938 | offset = e->offset; |
940 | BUG_ON(len == 0 && e->users == 0); | 939 | BUG_ON(len == 0 && e->users == 0); |
941 | spin_unlock(&ni->writequeue_lock); | 940 | spin_unlock(&ni->writequeue_lock); |
941 | kmap(e->page); | ||
942 | 942 | ||
943 | ret = 0; | 943 | ret = 0; |
944 | if (len) { | 944 | if (len) { |