diff options
author | Kurt Hackel <kurt.hackel@oracle.com> | 2005-12-15 17:31:23 -0500 |
---|---|---|
committer | Joel Becker <joel.becker@oracle.com> | 2006-01-03 14:45:47 -0500 |
commit | 6714d8e86bf443f6f7af50f9d432025649f091f5 (patch) | |
tree | 2c484bd1894a90cad7020869c7054f192d3bf34d /fs/ocfs2/dlm | |
parent | 98211489d4147e41b11703e4245846d60b3acce4 (diff) |
[PATCH] OCFS2: The Second Oracle Cluster Filesystem
A distributed lock manager built with the cluster file system use case
in mind. The OCFS2 dlm exposes a VMS style API, though things have
been simplified internally. The only lock levels implemented currently
are NLMODE, PRMODE and EXMODE.
Signed-off-by: Mark Fasheh <mark.fasheh@oracle.com>
Signed-off-by: Kurt Hackel <kurt.hackel@oracle.com>
Diffstat (limited to 'fs/ocfs2/dlm')
-rw-r--r-- | fs/ocfs2/dlm/Makefile | 6 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmapi.h | 214 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmast.c | 466 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmcommon.h | 884 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmconvert.c | 530 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmconvert.h | 35 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmdebug.c | 246 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmdebug.h | 30 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmdomain.c | 1469 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmdomain.h | 36 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmlock.c | 676 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmmaster.c | 2666 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmrecovery.c | 2132 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmthread.c | 695 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmunlock.c | 672 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmver.c | 42 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmver.h | 31 |
17 files changed, 10830 insertions, 0 deletions
diff --git a/fs/ocfs2/dlm/Makefile b/fs/ocfs2/dlm/Makefile new file mode 100644 index 000000000000..2a5274bcc8bb --- /dev/null +++ b/fs/ocfs2/dlm/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | EXTRA_CFLAGS += -Ifs/ocfs2 | ||
2 | |||
3 | obj-$(CONFIG_OCFS2_FS) += ocfs2_dlm.o | ||
4 | |||
5 | ocfs2_dlm-objs := dlmdomain.o dlmdebug.o dlmthread.o dlmrecovery.o \ | ||
6 | dlmmaster.o dlmast.o dlmconvert.o dlmlock.o dlmunlock.o dlmver.o | ||
diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h new file mode 100644 index 000000000000..53652f51c0e1 --- /dev/null +++ b/fs/ocfs2/dlm/dlmapi.h | |||
@@ -0,0 +1,214 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmapi.h | ||
5 | * | ||
6 | * externally exported dlm interfaces | ||
7 | * | ||
8 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #ifndef DLMAPI_H | ||
28 | #define DLMAPI_H | ||
29 | |||
30 | struct dlm_lock; | ||
31 | struct dlm_ctxt; | ||
32 | |||
33 | /* NOTE: changes made to this enum should be reflected in dlmdebug.c */ | ||
34 | enum dlm_status { | ||
35 | DLM_NORMAL = 0, /* 0: request in progress */ | ||
36 | DLM_GRANTED, /* 1: request granted */ | ||
37 | DLM_DENIED, /* 2: request denied */ | ||
38 | DLM_DENIED_NOLOCKS, /* 3: request denied, out of system resources */ | ||
39 | DLM_WORKING, /* 4: async request in progress */ | ||
40 | DLM_BLOCKED, /* 5: lock request blocked */ | ||
41 | DLM_BLOCKED_ORPHAN, /* 6: lock request blocked by a orphan lock*/ | ||
42 | DLM_DENIED_GRACE_PERIOD, /* 7: topological change in progress */ | ||
43 | DLM_SYSERR, /* 8: system error */ | ||
44 | DLM_NOSUPPORT, /* 9: unsupported */ | ||
45 | DLM_CANCELGRANT, /* 10: can't cancel convert: already granted */ | ||
46 | DLM_IVLOCKID, /* 11: bad lockid */ | ||
47 | DLM_SYNC, /* 12: synchronous request granted */ | ||
48 | DLM_BADTYPE, /* 13: bad resource type */ | ||
49 | DLM_BADRESOURCE, /* 14: bad resource handle */ | ||
50 | DLM_MAXHANDLES, /* 15: no more resource handles */ | ||
51 | DLM_NOCLINFO, /* 16: can't contact cluster manager */ | ||
52 | DLM_NOLOCKMGR, /* 17: can't contact lock manager */ | ||
53 | DLM_NOPURGED, /* 18: can't contact purge daemon */ | ||
54 | DLM_BADARGS, /* 19: bad api args */ | ||
55 | DLM_VOID, /* 20: no status */ | ||
56 | DLM_NOTQUEUED, /* 21: NOQUEUE was specified and request failed */ | ||
57 | DLM_IVBUFLEN, /* 22: invalid resource name length */ | ||
58 | DLM_CVTUNGRANT, /* 23: attempted to convert ungranted lock */ | ||
59 | DLM_BADPARAM, /* 24: invalid lock mode specified */ | ||
60 | DLM_VALNOTVALID, /* 25: value block has been invalidated */ | ||
61 | DLM_REJECTED, /* 26: request rejected, unrecognized client */ | ||
62 | DLM_ABORT, /* 27: blocked lock request cancelled */ | ||
63 | DLM_CANCEL, /* 28: conversion request cancelled */ | ||
64 | DLM_IVRESHANDLE, /* 29: invalid resource handle */ | ||
65 | DLM_DEADLOCK, /* 30: deadlock recovery refused this request */ | ||
66 | DLM_DENIED_NOASTS, /* 31: failed to allocate AST */ | ||
67 | DLM_FORWARD, /* 32: request must wait for primary's response */ | ||
68 | DLM_TIMEOUT, /* 33: timeout value for lock has expired */ | ||
69 | DLM_IVGROUPID, /* 34: invalid group specification */ | ||
70 | DLM_VERS_CONFLICT, /* 35: version conflicts prevent request handling */ | ||
71 | DLM_BAD_DEVICE_PATH, /* 36: Locks device does not exist or path wrong */ | ||
72 | DLM_NO_DEVICE_PERMISSION, /* 37: Client has insufficient pers for device */ | ||
73 | DLM_NO_CONTROL_DEVICE, /* 38: Cannot set options on opened device */ | ||
74 | |||
75 | DLM_RECOVERING, /* 39: extension, allows caller to fail a lock | ||
76 | request if it is being recovered */ | ||
77 | DLM_MIGRATING, /* 40: extension, allows caller to fail a lock | ||
78 | request if it is being migrated */ | ||
79 | DLM_MAXSTATS, /* 41: upper limit for return code validation */ | ||
80 | }; | ||
81 | |||
82 | /* for pretty-printing dlm_status error messages */ | ||
83 | const char *dlm_errmsg(enum dlm_status err); | ||
84 | /* for pretty-printing dlm_status error names */ | ||
85 | const char *dlm_errname(enum dlm_status err); | ||
86 | |||
87 | /* Eventually the DLM will use standard errno values, but in the | ||
88 | * meantime this lets us track dlm errors as they bubble up. When we | ||
89 | * bring its error reporting into line with the rest of the stack, | ||
90 | * these can just be replaced with calls to mlog_errno. */ | ||
91 | #define dlm_error(st) do { \ | ||
92 | if ((st) != DLM_RECOVERING && \ | ||
93 | (st) != DLM_MIGRATING && \ | ||
94 | (st) != DLM_FORWARD) \ | ||
95 | mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \ | ||
96 | } while (0) | ||
97 | |||
98 | #define DLM_LKSB_UNUSED1 0x01 | ||
99 | #define DLM_LKSB_PUT_LVB 0x02 | ||
100 | #define DLM_LKSB_GET_LVB 0x04 | ||
101 | #define DLM_LKSB_UNUSED2 0x08 | ||
102 | #define DLM_LKSB_UNUSED3 0x10 | ||
103 | #define DLM_LKSB_UNUSED4 0x20 | ||
104 | #define DLM_LKSB_UNUSED5 0x40 | ||
105 | #define DLM_LKSB_UNUSED6 0x80 | ||
106 | |||
107 | #define DLM_LVB_LEN 64 | ||
108 | |||
109 | /* Callers are only allowed access to the lvb and status members of | ||
110 | * this struct. */ | ||
111 | struct dlm_lockstatus { | ||
112 | enum dlm_status status; | ||
113 | u32 flags; | ||
114 | struct dlm_lock *lockid; | ||
115 | char lvb[DLM_LVB_LEN]; | ||
116 | }; | ||
117 | |||
118 | /* Valid lock modes. */ | ||
119 | #define LKM_IVMODE (-1) /* invalid mode */ | ||
120 | #define LKM_NLMODE 0 /* null lock */ | ||
121 | #define LKM_CRMODE 1 /* concurrent read unsupported */ | ||
122 | #define LKM_CWMODE 2 /* concurrent write unsupported */ | ||
123 | #define LKM_PRMODE 3 /* protected read */ | ||
124 | #define LKM_PWMODE 4 /* protected write unsupported */ | ||
125 | #define LKM_EXMODE 5 /* exclusive */ | ||
126 | #define LKM_MAXMODE 5 | ||
127 | #define LKM_MODEMASK 0xff | ||
128 | |||
129 | /* Flags passed to dlmlock and dlmunlock: | ||
130 | * reserved: flags used by the "real" dlm | ||
131 | * only a few are supported by this dlm | ||
132 | * (U) = unsupported by ocfs2 dlm */ | ||
133 | #define LKM_ORPHAN 0x00000010 /* this lock is orphanable (U) */ | ||
134 | #define LKM_PARENTABLE 0x00000020 /* this lock was orphaned (U) */ | ||
135 | #define LKM_BLOCK 0x00000040 /* blocking lock request (U) */ | ||
136 | #define LKM_LOCAL 0x00000080 /* local lock request */ | ||
137 | #define LKM_VALBLK 0x00000100 /* lock value block request */ | ||
138 | #define LKM_NOQUEUE 0x00000200 /* non blocking request */ | ||
139 | #define LKM_CONVERT 0x00000400 /* conversion request */ | ||
140 | #define LKM_NODLCKWT 0x00000800 /* this lock wont deadlock (U) */ | ||
141 | #define LKM_UNLOCK 0x00001000 /* deallocate this lock */ | ||
142 | #define LKM_CANCEL 0x00002000 /* cancel conversion request */ | ||
143 | #define LKM_DEQALL 0x00004000 /* remove all locks held by proc (U) */ | ||
144 | #define LKM_INVVALBLK 0x00008000 /* invalidate lock value block */ | ||
145 | #define LKM_SYNCSTS 0x00010000 /* return synchronous status if poss (U) */ | ||
146 | #define LKM_TIMEOUT 0x00020000 /* lock request contains timeout (U) */ | ||
147 | #define LKM_SNGLDLCK 0x00040000 /* request can self-deadlock (U) */ | ||
148 | #define LKM_FINDLOCAL 0x00080000 /* find local lock request (U) */ | ||
149 | #define LKM_PROC_OWNED 0x00100000 /* owned by process, not group (U) */ | ||
150 | #define LKM_XID 0x00200000 /* use transaction id for deadlock (U) */ | ||
151 | #define LKM_XID_CONFLICT 0x00400000 /* do not allow lock inheritance (U) */ | ||
152 | #define LKM_FORCE 0x00800000 /* force unlock flag */ | ||
153 | #define LKM_REVVALBLK 0x01000000 /* temporary solution: re-validate | ||
154 | lock value block (U) */ | ||
155 | /* unused */ | ||
156 | #define LKM_UNUSED1 0x00000001 /* unused */ | ||
157 | #define LKM_UNUSED2 0x00000002 /* unused */ | ||
158 | #define LKM_UNUSED3 0x00000004 /* unused */ | ||
159 | #define LKM_UNUSED4 0x00000008 /* unused */ | ||
160 | #define LKM_UNUSED5 0x02000000 /* unused */ | ||
161 | #define LKM_UNUSED6 0x04000000 /* unused */ | ||
162 | #define LKM_UNUSED7 0x08000000 /* unused */ | ||
163 | |||
164 | /* ocfs2 extensions: internal only | ||
165 | * should never be used by caller */ | ||
166 | #define LKM_MIGRATION 0x10000000 /* extension: lockres is to be migrated | ||
167 | to another node */ | ||
168 | #define LKM_PUT_LVB 0x20000000 /* extension: lvb is being passed | ||
169 | should be applied to lockres */ | ||
170 | #define LKM_GET_LVB 0x40000000 /* extension: lvb should be copied | ||
171 | from lockres when lock is granted */ | ||
172 | #define LKM_RECOVERY 0x80000000 /* extension: flag for recovery lock | ||
173 | used to avoid recovery rwsem */ | ||
174 | |||
175 | |||
176 | typedef void (dlm_astlockfunc_t)(void *); | ||
177 | typedef void (dlm_bastlockfunc_t)(void *, int); | ||
178 | typedef void (dlm_astunlockfunc_t)(void *, enum dlm_status); | ||
179 | |||
180 | enum dlm_status dlmlock(struct dlm_ctxt *dlm, | ||
181 | int mode, | ||
182 | struct dlm_lockstatus *lksb, | ||
183 | int flags, | ||
184 | const char *name, | ||
185 | dlm_astlockfunc_t *ast, | ||
186 | void *data, | ||
187 | dlm_bastlockfunc_t *bast); | ||
188 | |||
189 | enum dlm_status dlmunlock(struct dlm_ctxt *dlm, | ||
190 | struct dlm_lockstatus *lksb, | ||
191 | int flags, | ||
192 | dlm_astunlockfunc_t *unlockast, | ||
193 | void *data); | ||
194 | |||
195 | struct dlm_ctxt * dlm_register_domain(const char *domain, u32 key); | ||
196 | |||
197 | void dlm_unregister_domain(struct dlm_ctxt *dlm); | ||
198 | |||
199 | void dlm_print_one_lock(struct dlm_lock *lockid); | ||
200 | |||
201 | typedef void (dlm_eviction_func)(int, void *); | ||
202 | struct dlm_eviction_cb { | ||
203 | struct list_head ec_item; | ||
204 | dlm_eviction_func *ec_func; | ||
205 | void *ec_data; | ||
206 | }; | ||
207 | void dlm_setup_eviction_cb(struct dlm_eviction_cb *cb, | ||
208 | dlm_eviction_func *f, | ||
209 | void *data); | ||
210 | void dlm_register_eviction_cb(struct dlm_ctxt *dlm, | ||
211 | struct dlm_eviction_cb *cb); | ||
212 | void dlm_unregister_eviction_cb(struct dlm_eviction_cb *cb); | ||
213 | |||
214 | #endif /* DLMAPI_H */ | ||
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c new file mode 100644 index 000000000000..8d17d28ef91c --- /dev/null +++ b/fs/ocfs2/dlm/dlmast.c | |||
@@ -0,0 +1,466 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmast.c | ||
5 | * | ||
6 | * AST and BAST functionality for local and remote nodes | ||
7 | * | ||
8 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | |||
28 | #include <linux/module.h> | ||
29 | #include <linux/fs.h> | ||
30 | #include <linux/types.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/utsname.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/sysctl.h> | ||
36 | #include <linux/random.h> | ||
37 | #include <linux/blkdev.h> | ||
38 | #include <linux/socket.h> | ||
39 | #include <linux/inet.h> | ||
40 | #include <linux/spinlock.h> | ||
41 | |||
42 | |||
43 | #include "cluster/heartbeat.h" | ||
44 | #include "cluster/nodemanager.h" | ||
45 | #include "cluster/tcp.h" | ||
46 | #include "cluster/endian.h" | ||
47 | |||
48 | #include "dlmapi.h" | ||
49 | #include "dlmcommon.h" | ||
50 | |||
51 | #define MLOG_MASK_PREFIX ML_DLM | ||
52 | #include "cluster/masklog.h" | ||
53 | |||
54 | static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | ||
55 | struct dlm_lock *lock); | ||
56 | static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); | ||
57 | |||
58 | /* Should be called as an ast gets queued to see if the new | ||
59 | * lock level will obsolete a pending bast. | ||
60 | * For example, if dlm_thread queued a bast for an EX lock that | ||
61 | * was blocking another EX, but before sending the bast the | ||
62 | * lock owner downconverted to NL, the bast is now obsolete. | ||
63 | * Only the ast should be sent. | ||
64 | * This is needed because the lock and convert paths can queue | ||
65 | * asts out-of-band (not waiting for dlm_thread) in order to | ||
66 | * allow for LKM_NOQUEUE to get immediate responses. */ | ||
67 | static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | ||
68 | { | ||
69 | assert_spin_locked(&dlm->ast_lock); | ||
70 | assert_spin_locked(&lock->spinlock); | ||
71 | |||
72 | if (lock->ml.highest_blocked == LKM_IVMODE) | ||
73 | return 0; | ||
74 | BUG_ON(lock->ml.highest_blocked == LKM_NLMODE); | ||
75 | |||
76 | if (lock->bast_pending && | ||
77 | list_empty(&lock->bast_list)) | ||
78 | /* old bast already sent, ok */ | ||
79 | return 0; | ||
80 | |||
81 | if (lock->ml.type == LKM_EXMODE) | ||
82 | /* EX blocks anything left, any bast still valid */ | ||
83 | return 0; | ||
84 | else if (lock->ml.type == LKM_NLMODE) | ||
85 | /* NL blocks nothing, no reason to send any bast, cancel it */ | ||
86 | return 1; | ||
87 | else if (lock->ml.highest_blocked != LKM_EXMODE) | ||
88 | /* PR only blocks EX */ | ||
89 | return 1; | ||
90 | |||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | ||
95 | { | ||
96 | mlog_entry_void(); | ||
97 | |||
98 | BUG_ON(!dlm); | ||
99 | BUG_ON(!lock); | ||
100 | |||
101 | assert_spin_locked(&dlm->ast_lock); | ||
102 | if (!list_empty(&lock->ast_list)) { | ||
103 | mlog(ML_ERROR, "ast list not empty!! pending=%d, newlevel=%d\n", | ||
104 | lock->ast_pending, lock->ml.type); | ||
105 | BUG(); | ||
106 | } | ||
107 | BUG_ON(!list_empty(&lock->ast_list)); | ||
108 | if (lock->ast_pending) | ||
109 | mlog(0, "lock has an ast getting flushed right now\n"); | ||
110 | |||
111 | /* putting lock on list, add a ref */ | ||
112 | dlm_lock_get(lock); | ||
113 | spin_lock(&lock->spinlock); | ||
114 | |||
115 | /* check to see if this ast obsoletes the bast */ | ||
116 | if (dlm_should_cancel_bast(dlm, lock)) { | ||
117 | struct dlm_lock_resource *res = lock->lockres; | ||
118 | mlog(0, "%s: cancelling bast for %.*s\n", | ||
119 | dlm->name, res->lockname.len, res->lockname.name); | ||
120 | lock->bast_pending = 0; | ||
121 | list_del_init(&lock->bast_list); | ||
122 | lock->ml.highest_blocked = LKM_IVMODE; | ||
123 | /* removing lock from list, remove a ref. guaranteed | ||
124 | * this won't be the last ref because of the get above, | ||
125 | * so res->spinlock will not be taken here */ | ||
126 | dlm_lock_put(lock); | ||
127 | /* free up the reserved bast that we are cancelling. | ||
128 | * guaranteed that this will not be the last reserved | ||
129 | * ast because *both* an ast and a bast were reserved | ||
130 | * to get to this point. the res->spinlock will not be | ||
131 | * taken here */ | ||
132 | dlm_lockres_release_ast(dlm, res); | ||
133 | } | ||
134 | list_add_tail(&lock->ast_list, &dlm->pending_asts); | ||
135 | lock->ast_pending = 1; | ||
136 | spin_unlock(&lock->spinlock); | ||
137 | } | ||
138 | |||
139 | void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | ||
140 | { | ||
141 | mlog_entry_void(); | ||
142 | |||
143 | BUG_ON(!dlm); | ||
144 | BUG_ON(!lock); | ||
145 | |||
146 | spin_lock(&dlm->ast_lock); | ||
147 | __dlm_queue_ast(dlm, lock); | ||
148 | spin_unlock(&dlm->ast_lock); | ||
149 | } | ||
150 | |||
151 | |||
152 | static void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | ||
153 | { | ||
154 | mlog_entry_void(); | ||
155 | |||
156 | BUG_ON(!dlm); | ||
157 | BUG_ON(!lock); | ||
158 | assert_spin_locked(&dlm->ast_lock); | ||
159 | |||
160 | BUG_ON(!list_empty(&lock->bast_list)); | ||
161 | if (lock->bast_pending) | ||
162 | mlog(0, "lock has a bast getting flushed right now\n"); | ||
163 | |||
164 | /* putting lock on list, add a ref */ | ||
165 | dlm_lock_get(lock); | ||
166 | spin_lock(&lock->spinlock); | ||
167 | list_add_tail(&lock->bast_list, &dlm->pending_basts); | ||
168 | lock->bast_pending = 1; | ||
169 | spin_unlock(&lock->spinlock); | ||
170 | } | ||
171 | |||
172 | void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | ||
173 | { | ||
174 | mlog_entry_void(); | ||
175 | |||
176 | BUG_ON(!dlm); | ||
177 | BUG_ON(!lock); | ||
178 | |||
179 | spin_lock(&dlm->ast_lock); | ||
180 | __dlm_queue_bast(dlm, lock); | ||
181 | spin_unlock(&dlm->ast_lock); | ||
182 | } | ||
183 | |||
184 | static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | ||
185 | struct dlm_lock *lock) | ||
186 | { | ||
187 | struct dlm_lockstatus *lksb = lock->lksb; | ||
188 | BUG_ON(!lksb); | ||
189 | |||
190 | /* only updates if this node masters the lockres */ | ||
191 | if (res->owner == dlm->node_num) { | ||
192 | |||
193 | spin_lock(&res->spinlock); | ||
194 | /* check the lksb flags for the direction */ | ||
195 | if (lksb->flags & DLM_LKSB_GET_LVB) { | ||
196 | mlog(0, "getting lvb from lockres for %s node\n", | ||
197 | lock->ml.node == dlm->node_num ? "master" : | ||
198 | "remote"); | ||
199 | memcpy(lksb->lvb, res->lvb, DLM_LVB_LEN); | ||
200 | } else if (lksb->flags & DLM_LKSB_PUT_LVB) { | ||
201 | mlog(0, "setting lvb from lockres for %s node\n", | ||
202 | lock->ml.node == dlm->node_num ? "master" : | ||
203 | "remote"); | ||
204 | memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN); | ||
205 | } | ||
206 | spin_unlock(&res->spinlock); | ||
207 | } | ||
208 | |||
209 | /* reset any lvb flags on the lksb */ | ||
210 | lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB); | ||
211 | } | ||
212 | |||
213 | void dlm_do_local_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | ||
214 | struct dlm_lock *lock) | ||
215 | { | ||
216 | dlm_astlockfunc_t *fn; | ||
217 | struct dlm_lockstatus *lksb; | ||
218 | |||
219 | mlog_entry_void(); | ||
220 | |||
221 | lksb = lock->lksb; | ||
222 | fn = lock->ast; | ||
223 | BUG_ON(lock->ml.node != dlm->node_num); | ||
224 | |||
225 | dlm_update_lvb(dlm, res, lock); | ||
226 | (*fn)(lock->astdata); | ||
227 | } | ||
228 | |||
229 | |||
230 | int dlm_do_remote_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | ||
231 | struct dlm_lock *lock) | ||
232 | { | ||
233 | int ret; | ||
234 | struct dlm_lockstatus *lksb; | ||
235 | int lksbflags; | ||
236 | |||
237 | mlog_entry_void(); | ||
238 | |||
239 | lksb = lock->lksb; | ||
240 | BUG_ON(lock->ml.node == dlm->node_num); | ||
241 | |||
242 | lksbflags = lksb->flags; | ||
243 | dlm_update_lvb(dlm, res, lock); | ||
244 | |||
245 | /* lock request came from another node | ||
246 | * go do the ast over there */ | ||
247 | ret = dlm_send_proxy_ast(dlm, res, lock, lksbflags); | ||
248 | return ret; | ||
249 | } | ||
250 | |||
251 | void dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | ||
252 | struct dlm_lock *lock, int blocked_type) | ||
253 | { | ||
254 | dlm_bastlockfunc_t *fn = lock->bast; | ||
255 | |||
256 | mlog_entry_void(); | ||
257 | BUG_ON(lock->ml.node != dlm->node_num); | ||
258 | |||
259 | (*fn)(lock->astdata, blocked_type); | ||
260 | } | ||
261 | |||
262 | |||
263 | |||
264 | int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data) | ||
265 | { | ||
266 | int ret; | ||
267 | unsigned int locklen; | ||
268 | struct dlm_ctxt *dlm = data; | ||
269 | struct dlm_lock_resource *res = NULL; | ||
270 | struct dlm_lock *lock = NULL; | ||
271 | struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf; | ||
272 | char *name; | ||
273 | struct list_head *iter, *head=NULL; | ||
274 | u64 cookie; | ||
275 | u32 flags; | ||
276 | |||
277 | if (!dlm_grab(dlm)) { | ||
278 | dlm_error(DLM_REJECTED); | ||
279 | return DLM_REJECTED; | ||
280 | } | ||
281 | |||
282 | mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), | ||
283 | "Domain %s not fully joined!\n", dlm->name); | ||
284 | |||
285 | name = past->name; | ||
286 | locklen = past->namelen; | ||
287 | cookie = be64_to_cpu(past->cookie); | ||
288 | flags = be32_to_cpu(past->flags); | ||
289 | |||
290 | if (locklen > DLM_LOCKID_NAME_MAX) { | ||
291 | ret = DLM_IVBUFLEN; | ||
292 | mlog(ML_ERROR, "Invalid name length in proxy ast handler!\n"); | ||
293 | goto leave; | ||
294 | } | ||
295 | |||
296 | if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) == | ||
297 | (LKM_PUT_LVB|LKM_GET_LVB)) { | ||
298 | mlog(ML_ERROR, "both PUT and GET lvb specified\n"); | ||
299 | ret = DLM_BADARGS; | ||
300 | goto leave; | ||
301 | } | ||
302 | |||
303 | mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : | ||
304 | (flags & LKM_GET_LVB ? "get lvb" : "none")); | ||
305 | |||
306 | mlog(0, "type=%d, blocked_type=%d\n", past->type, past->blocked_type); | ||
307 | |||
308 | if (past->type != DLM_AST && | ||
309 | past->type != DLM_BAST) { | ||
310 | mlog(ML_ERROR, "Unknown ast type! %d, cookie=%"MLFu64", " | ||
311 | "name=%.*s\n", past->type, cookie, locklen, name); | ||
312 | ret = DLM_IVLOCKID; | ||
313 | goto leave; | ||
314 | } | ||
315 | |||
316 | res = dlm_lookup_lockres(dlm, name, locklen); | ||
317 | if (!res) { | ||
318 | mlog(ML_ERROR, "got %sast for unknown lockres! " | ||
319 | "cookie=%"MLFu64", name=%.*s, namelen=%u\n", | ||
320 | past->type == DLM_AST ? "" : "b", | ||
321 | cookie, locklen, name, locklen); | ||
322 | ret = DLM_IVLOCKID; | ||
323 | goto leave; | ||
324 | } | ||
325 | |||
326 | /* cannot get a proxy ast message if this node owns it */ | ||
327 | BUG_ON(res->owner == dlm->node_num); | ||
328 | |||
329 | mlog(0, "lockres %.*s\n", res->lockname.len, res->lockname.name); | ||
330 | |||
331 | spin_lock(&res->spinlock); | ||
332 | if (res->state & DLM_LOCK_RES_RECOVERING) { | ||
333 | mlog(0, "responding with DLM_RECOVERING!\n"); | ||
334 | ret = DLM_RECOVERING; | ||
335 | goto unlock_out; | ||
336 | } | ||
337 | if (res->state & DLM_LOCK_RES_MIGRATING) { | ||
338 | mlog(0, "responding with DLM_MIGRATING!\n"); | ||
339 | ret = DLM_MIGRATING; | ||
340 | goto unlock_out; | ||
341 | } | ||
342 | /* try convert queue for both ast/bast */ | ||
343 | head = &res->converting; | ||
344 | lock = NULL; | ||
345 | list_for_each(iter, head) { | ||
346 | lock = list_entry (iter, struct dlm_lock, list); | ||
347 | if (be64_to_cpu(lock->ml.cookie) == cookie) | ||
348 | goto do_ast; | ||
349 | } | ||
350 | |||
351 | /* if not on convert, try blocked for ast, granted for bast */ | ||
352 | if (past->type == DLM_AST) | ||
353 | head = &res->blocked; | ||
354 | else | ||
355 | head = &res->granted; | ||
356 | |||
357 | list_for_each(iter, head) { | ||
358 | lock = list_entry (iter, struct dlm_lock, list); | ||
359 | if (be64_to_cpu(lock->ml.cookie) == cookie) | ||
360 | goto do_ast; | ||
361 | } | ||
362 | |||
363 | mlog(ML_ERROR, "got %sast for unknown lock! cookie=%"MLFu64", " | ||
364 | "name=%.*s, namelen=%u\n", | ||
365 | past->type == DLM_AST ? "" : "b", cookie, locklen, name, locklen); | ||
366 | |||
367 | ret = DLM_NORMAL; | ||
368 | unlock_out: | ||
369 | spin_unlock(&res->spinlock); | ||
370 | goto leave; | ||
371 | |||
372 | do_ast: | ||
373 | ret = DLM_NORMAL; | ||
374 | if (past->type == DLM_AST) { | ||
375 | /* do not alter lock refcount. switching lists. */ | ||
376 | list_del_init(&lock->list); | ||
377 | list_add_tail(&lock->list, &res->granted); | ||
378 | mlog(0, "ast: adding to granted list... type=%d, " | ||
379 | "convert_type=%d\n", lock->ml.type, lock->ml.convert_type); | ||
380 | if (lock->ml.convert_type != LKM_IVMODE) { | ||
381 | lock->ml.type = lock->ml.convert_type; | ||
382 | lock->ml.convert_type = LKM_IVMODE; | ||
383 | } else { | ||
384 | // should already be there.... | ||
385 | } | ||
386 | |||
387 | lock->lksb->status = DLM_NORMAL; | ||
388 | |||
389 | /* if we requested the lvb, fetch it into our lksb now */ | ||
390 | if (flags & LKM_GET_LVB) { | ||
391 | BUG_ON(!(lock->lksb->flags & DLM_LKSB_GET_LVB)); | ||
392 | memcpy(lock->lksb->lvb, past->lvb, DLM_LVB_LEN); | ||
393 | } | ||
394 | } | ||
395 | spin_unlock(&res->spinlock); | ||
396 | |||
397 | if (past->type == DLM_AST) | ||
398 | dlm_do_local_ast(dlm, res, lock); | ||
399 | else | ||
400 | dlm_do_local_bast(dlm, res, lock, past->blocked_type); | ||
401 | |||
402 | leave: | ||
403 | |||
404 | if (res) | ||
405 | dlm_lockres_put(res); | ||
406 | |||
407 | dlm_put(dlm); | ||
408 | return ret; | ||
409 | } | ||
410 | |||
411 | |||
412 | |||
413 | int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | ||
414 | struct dlm_lock *lock, int msg_type, | ||
415 | int blocked_type, int flags) | ||
416 | { | ||
417 | int ret = 0; | ||
418 | struct dlm_proxy_ast past; | ||
419 | struct kvec vec[2]; | ||
420 | size_t veclen = 1; | ||
421 | int status; | ||
422 | |||
423 | mlog_entry("res %.*s, to=%u, type=%d, blocked_type=%d\n", | ||
424 | res->lockname.len, res->lockname.name, lock->ml.node, | ||
425 | msg_type, blocked_type); | ||
426 | |||
427 | memset(&past, 0, sizeof(struct dlm_proxy_ast)); | ||
428 | past.node_idx = dlm->node_num; | ||
429 | past.type = msg_type; | ||
430 | past.blocked_type = blocked_type; | ||
431 | past.namelen = res->lockname.len; | ||
432 | memcpy(past.name, res->lockname.name, past.namelen); | ||
433 | past.cookie = lock->ml.cookie; | ||
434 | |||
435 | vec[0].iov_len = sizeof(struct dlm_proxy_ast); | ||
436 | vec[0].iov_base = &past; | ||
437 | if (flags & DLM_LKSB_GET_LVB) { | ||
438 | mlog(0, "returning requested LVB data\n"); | ||
439 | be32_add_cpu(&past.flags, LKM_GET_LVB); | ||
440 | vec[1].iov_len = DLM_LVB_LEN; | ||
441 | vec[1].iov_base = lock->lksb->lvb; | ||
442 | veclen++; | ||
443 | } | ||
444 | |||
445 | ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen, | ||
446 | lock->ml.node, &status); | ||
447 | if (ret < 0) | ||
448 | mlog_errno(ret); | ||
449 | else { | ||
450 | if (status == DLM_RECOVERING) { | ||
451 | mlog(ML_ERROR, "sent AST to node %u, it thinks this " | ||
452 | "node is dead!\n", lock->ml.node); | ||
453 | BUG(); | ||
454 | } else if (status == DLM_MIGRATING) { | ||
455 | mlog(ML_ERROR, "sent AST to node %u, it returned " | ||
456 | "DLM_MIGRATING!\n", lock->ml.node); | ||
457 | BUG(); | ||
458 | } else if (status != DLM_NORMAL) { | ||
459 | mlog(ML_ERROR, "AST to node %u returned %d!\n", | ||
460 | lock->ml.node, status); | ||
461 | /* ignore it */ | ||
462 | } | ||
463 | ret = 0; | ||
464 | } | ||
465 | return ret; | ||
466 | } | ||
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h new file mode 100644 index 000000000000..3fecba0a6023 --- /dev/null +++ b/fs/ocfs2/dlm/dlmcommon.h | |||
@@ -0,0 +1,884 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmcommon.h | ||
5 | * | ||
6 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public | ||
10 | * License as published by the Free Software Foundation; either | ||
11 | * version 2 of the License, or (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public | ||
19 | * License along with this program; if not, write to the | ||
20 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
21 | * Boston, MA 021110-1307, USA. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #ifndef DLMCOMMON_H | ||
26 | #define DLMCOMMON_H | ||
27 | |||
28 | #include <linux/kref.h> | ||
29 | |||
30 | #define DLM_HB_NODE_DOWN_PRI (0xf000000) | ||
31 | #define DLM_HB_NODE_UP_PRI (0x8000000) | ||
32 | |||
33 | #define DLM_LOCKID_NAME_MAX 32 | ||
34 | |||
35 | #define DLM_DOMAIN_NAME_MAX_LEN 255 | ||
36 | #define DLM_LOCK_RES_OWNER_UNKNOWN O2NM_MAX_NODES | ||
37 | #define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes | ||
38 | #define DLM_THREAD_MS 200 // flush at least every 200 ms | ||
39 | |||
40 | #define DLM_HASH_BITS 7 | ||
41 | #define DLM_HASH_SIZE (1 << DLM_HASH_BITS) | ||
42 | #define DLM_HASH_MASK (DLM_HASH_SIZE - 1) | ||
43 | |||
44 | enum dlm_ast_type { | ||
45 | DLM_AST = 0, | ||
46 | DLM_BAST, | ||
47 | DLM_ASTUNLOCK | ||
48 | }; | ||
49 | |||
50 | |||
51 | #define LKM_VALID_FLAGS (LKM_VALBLK | LKM_CONVERT | LKM_UNLOCK | \ | ||
52 | LKM_CANCEL | LKM_INVVALBLK | LKM_FORCE | \ | ||
53 | LKM_RECOVERY | LKM_LOCAL | LKM_NOQUEUE) | ||
54 | |||
55 | #define DLM_RECOVERY_LOCK_NAME "$RECOVERY" | ||
56 | #define DLM_RECOVERY_LOCK_NAME_LEN 9 | ||
57 | |||
58 | static inline int dlm_is_recovery_lock(const char *lock_name, int name_len) | ||
59 | { | ||
60 | if (name_len == DLM_RECOVERY_LOCK_NAME_LEN && | ||
61 | memcmp(lock_name, DLM_RECOVERY_LOCK_NAME, name_len)==0) | ||
62 | return 1; | ||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | #define DLM_RECO_STATE_ACTIVE 0x0001 | ||
67 | |||
68 | struct dlm_recovery_ctxt | ||
69 | { | ||
70 | struct list_head resources; | ||
71 | struct list_head received; | ||
72 | struct list_head node_data; | ||
73 | u8 new_master; | ||
74 | u8 dead_node; | ||
75 | u16 state; | ||
76 | unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
77 | wait_queue_head_t event; | ||
78 | }; | ||
79 | |||
80 | enum dlm_ctxt_state { | ||
81 | DLM_CTXT_NEW = 0, | ||
82 | DLM_CTXT_JOINED, | ||
83 | DLM_CTXT_IN_SHUTDOWN, | ||
84 | DLM_CTXT_LEAVING, | ||
85 | }; | ||
86 | |||
87 | struct dlm_ctxt | ||
88 | { | ||
89 | struct list_head list; | ||
90 | struct list_head *resources; | ||
91 | struct list_head dirty_list; | ||
92 | struct list_head purge_list; | ||
93 | struct list_head pending_asts; | ||
94 | struct list_head pending_basts; | ||
95 | unsigned int purge_count; | ||
96 | spinlock_t spinlock; | ||
97 | spinlock_t ast_lock; | ||
98 | char *name; | ||
99 | u8 node_num; | ||
100 | u32 key; | ||
101 | u8 joining_node; | ||
102 | wait_queue_head_t dlm_join_events; | ||
103 | unsigned long live_nodes_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
104 | unsigned long domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
105 | unsigned long recovery_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
106 | struct dlm_recovery_ctxt reco; | ||
107 | spinlock_t master_lock; | ||
108 | struct list_head master_list; | ||
109 | struct list_head mle_hb_events; | ||
110 | |||
111 | /* these give a really vague idea of the system load */ | ||
112 | atomic_t local_resources; | ||
113 | atomic_t remote_resources; | ||
114 | atomic_t unknown_resources; | ||
115 | |||
116 | /* NOTE: Next three are protected by dlm_domain_lock */ | ||
117 | struct kref dlm_refs; | ||
118 | enum dlm_ctxt_state dlm_state; | ||
119 | unsigned int num_joins; | ||
120 | |||
121 | struct o2hb_callback_func dlm_hb_up; | ||
122 | struct o2hb_callback_func dlm_hb_down; | ||
123 | struct task_struct *dlm_thread_task; | ||
124 | struct task_struct *dlm_reco_thread_task; | ||
125 | wait_queue_head_t dlm_thread_wq; | ||
126 | wait_queue_head_t dlm_reco_thread_wq; | ||
127 | wait_queue_head_t ast_wq; | ||
128 | wait_queue_head_t migration_wq; | ||
129 | |||
130 | struct work_struct dispatched_work; | ||
131 | struct list_head work_list; | ||
132 | spinlock_t work_lock; | ||
133 | struct list_head dlm_domain_handlers; | ||
134 | struct list_head dlm_eviction_callbacks; | ||
135 | }; | ||
136 | |||
137 | /* these keventd work queue items are for less-frequently | ||
138 | * called functions that cannot be directly called from the | ||
139 | * net message handlers for some reason, usually because | ||
140 | * they need to send net messages of their own. */ | ||
141 | void dlm_dispatch_work(void *data); | ||
142 | |||
143 | struct dlm_lock_resource; | ||
144 | struct dlm_work_item; | ||
145 | |||
146 | typedef void (dlm_workfunc_t)(struct dlm_work_item *, void *); | ||
147 | |||
148 | struct dlm_request_all_locks_priv | ||
149 | { | ||
150 | u8 reco_master; | ||
151 | u8 dead_node; | ||
152 | }; | ||
153 | |||
154 | struct dlm_mig_lockres_priv | ||
155 | { | ||
156 | struct dlm_lock_resource *lockres; | ||
157 | u8 real_master; | ||
158 | }; | ||
159 | |||
160 | struct dlm_assert_master_priv | ||
161 | { | ||
162 | struct dlm_lock_resource *lockres; | ||
163 | u8 request_from; | ||
164 | u32 flags; | ||
165 | unsigned ignore_higher:1; | ||
166 | }; | ||
167 | |||
168 | |||
169 | struct dlm_work_item | ||
170 | { | ||
171 | struct list_head list; | ||
172 | dlm_workfunc_t *func; | ||
173 | struct dlm_ctxt *dlm; | ||
174 | void *data; | ||
175 | union { | ||
176 | struct dlm_request_all_locks_priv ral; | ||
177 | struct dlm_mig_lockres_priv ml; | ||
178 | struct dlm_assert_master_priv am; | ||
179 | } u; | ||
180 | }; | ||
181 | |||
182 | static inline void dlm_init_work_item(struct dlm_ctxt *dlm, | ||
183 | struct dlm_work_item *i, | ||
184 | dlm_workfunc_t *f, void *data) | ||
185 | { | ||
186 | memset(i, 0, sizeof(*i)); | ||
187 | i->func = f; | ||
188 | INIT_LIST_HEAD(&i->list); | ||
189 | i->data = data; | ||
190 | i->dlm = dlm; /* must have already done a dlm_grab on this! */ | ||
191 | } | ||
192 | |||
193 | |||
194 | |||
195 | static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm, | ||
196 | u8 node) | ||
197 | { | ||
198 | assert_spin_locked(&dlm->spinlock); | ||
199 | |||
200 | dlm->joining_node = node; | ||
201 | wake_up(&dlm->dlm_join_events); | ||
202 | } | ||
203 | |||
204 | #define DLM_LOCK_RES_UNINITED 0x00000001 | ||
205 | #define DLM_LOCK_RES_RECOVERING 0x00000002 | ||
206 | #define DLM_LOCK_RES_READY 0x00000004 | ||
207 | #define DLM_LOCK_RES_DIRTY 0x00000008 | ||
208 | #define DLM_LOCK_RES_IN_PROGRESS 0x00000010 | ||
209 | #define DLM_LOCK_RES_MIGRATING 0x00000020 | ||
210 | |||
211 | #define DLM_PURGE_INTERVAL_MS (8 * 1000) | ||
212 | |||
213 | struct dlm_lock_resource | ||
214 | { | ||
215 | /* WARNING: Please see the comment in dlm_init_lockres before | ||
216 | * adding fields here. */ | ||
217 | struct list_head list; | ||
218 | struct kref refs; | ||
219 | |||
220 | /* please keep these next 3 in this order | ||
221 | * some funcs want to iterate over all lists */ | ||
222 | struct list_head granted; | ||
223 | struct list_head converting; | ||
224 | struct list_head blocked; | ||
225 | |||
226 | struct list_head dirty; | ||
227 | struct list_head recovering; // dlm_recovery_ctxt.resources list | ||
228 | |||
229 | /* unused lock resources have their last_used stamped and are | ||
230 | * put on a list for the dlm thread to run. */ | ||
231 | struct list_head purge; | ||
232 | unsigned long last_used; | ||
233 | |||
234 | unsigned migration_pending:1; | ||
235 | atomic_t asts_reserved; | ||
236 | spinlock_t spinlock; | ||
237 | wait_queue_head_t wq; | ||
238 | u8 owner; //node which owns the lock resource, or unknown | ||
239 | u16 state; | ||
240 | struct qstr lockname; | ||
241 | char lvb[DLM_LVB_LEN]; | ||
242 | }; | ||
243 | |||
244 | struct dlm_migratable_lock | ||
245 | { | ||
246 | __be64 cookie; | ||
247 | |||
248 | /* these 3 are just padding for the in-memory structure, but | ||
249 | * list and flags are actually used when sent over the wire */ | ||
250 | __be16 pad1; | ||
251 | u8 list; // 0=granted, 1=converting, 2=blocked | ||
252 | u8 flags; | ||
253 | |||
254 | s8 type; | ||
255 | s8 convert_type; | ||
256 | s8 highest_blocked; | ||
257 | u8 node; | ||
258 | }; // 16 bytes | ||
259 | |||
260 | struct dlm_lock | ||
261 | { | ||
262 | struct dlm_migratable_lock ml; | ||
263 | |||
264 | struct list_head list; | ||
265 | struct list_head ast_list; | ||
266 | struct list_head bast_list; | ||
267 | struct dlm_lock_resource *lockres; | ||
268 | spinlock_t spinlock; | ||
269 | struct kref lock_refs; | ||
270 | |||
271 | // ast and bast must be callable while holding a spinlock! | ||
272 | dlm_astlockfunc_t *ast; | ||
273 | dlm_bastlockfunc_t *bast; | ||
274 | void *astdata; | ||
275 | struct dlm_lockstatus *lksb; | ||
276 | unsigned ast_pending:1, | ||
277 | bast_pending:1, | ||
278 | convert_pending:1, | ||
279 | lock_pending:1, | ||
280 | cancel_pending:1, | ||
281 | unlock_pending:1, | ||
282 | lksb_kernel_allocated:1; | ||
283 | }; | ||
284 | |||
285 | |||
286 | #define DLM_LKSB_UNUSED1 0x01 | ||
287 | #define DLM_LKSB_PUT_LVB 0x02 | ||
288 | #define DLM_LKSB_GET_LVB 0x04 | ||
289 | #define DLM_LKSB_UNUSED2 0x08 | ||
290 | #define DLM_LKSB_UNUSED3 0x10 | ||
291 | #define DLM_LKSB_UNUSED4 0x20 | ||
292 | #define DLM_LKSB_UNUSED5 0x40 | ||
293 | #define DLM_LKSB_UNUSED6 0x80 | ||
294 | |||
295 | |||
296 | enum dlm_lockres_list { | ||
297 | DLM_GRANTED_LIST = 0, | ||
298 | DLM_CONVERTING_LIST, | ||
299 | DLM_BLOCKED_LIST | ||
300 | }; | ||
301 | |||
302 | static inline struct list_head * | ||
303 | dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx) | ||
304 | { | ||
305 | struct list_head *ret = NULL; | ||
306 | if (idx == DLM_GRANTED_LIST) | ||
307 | ret = &res->granted; | ||
308 | else if (idx == DLM_CONVERTING_LIST) | ||
309 | ret = &res->converting; | ||
310 | else if (idx == DLM_BLOCKED_LIST) | ||
311 | ret = &res->blocked; | ||
312 | else | ||
313 | BUG(); | ||
314 | return ret; | ||
315 | } | ||
316 | |||
317 | |||
318 | |||
319 | |||
320 | struct dlm_node_iter | ||
321 | { | ||
322 | unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
323 | int curnode; | ||
324 | }; | ||
325 | |||
326 | |||
327 | enum { | ||
328 | DLM_MASTER_REQUEST_MSG = 500, | ||
329 | DLM_UNUSED_MSG1, /* 501 */ | ||
330 | DLM_ASSERT_MASTER_MSG, /* 502 */ | ||
331 | DLM_CREATE_LOCK_MSG, /* 503 */ | ||
332 | DLM_CONVERT_LOCK_MSG, /* 504 */ | ||
333 | DLM_PROXY_AST_MSG, /* 505 */ | ||
334 | DLM_UNLOCK_LOCK_MSG, /* 506 */ | ||
335 | DLM_UNUSED_MSG2, /* 507 */ | ||
336 | DLM_MIGRATE_REQUEST_MSG, /* 508 */ | ||
337 | DLM_MIG_LOCKRES_MSG, /* 509 */ | ||
338 | DLM_QUERY_JOIN_MSG, /* 510 */ | ||
339 | DLM_ASSERT_JOINED_MSG, /* 511 */ | ||
340 | DLM_CANCEL_JOIN_MSG, /* 512 */ | ||
341 | DLM_EXIT_DOMAIN_MSG, /* 513 */ | ||
342 | DLM_MASTER_REQUERY_MSG, /* 514 */ | ||
343 | DLM_LOCK_REQUEST_MSG, /* 515 */ | ||
344 | DLM_RECO_DATA_DONE_MSG, /* 516 */ | ||
345 | DLM_BEGIN_RECO_MSG, /* 517 */ | ||
346 | DLM_FINALIZE_RECO_MSG /* 518 */ | ||
347 | }; | ||
348 | |||
349 | struct dlm_reco_node_data | ||
350 | { | ||
351 | int state; | ||
352 | u8 node_num; | ||
353 | struct list_head list; | ||
354 | }; | ||
355 | |||
356 | enum { | ||
357 | DLM_RECO_NODE_DATA_DEAD = -1, | ||
358 | DLM_RECO_NODE_DATA_INIT = 0, | ||
359 | DLM_RECO_NODE_DATA_REQUESTING, | ||
360 | DLM_RECO_NODE_DATA_REQUESTED, | ||
361 | DLM_RECO_NODE_DATA_RECEIVING, | ||
362 | DLM_RECO_NODE_DATA_DONE, | ||
363 | DLM_RECO_NODE_DATA_FINALIZE_SENT, | ||
364 | }; | ||
365 | |||
366 | |||
367 | enum { | ||
368 | DLM_MASTER_RESP_NO = 0, | ||
369 | DLM_MASTER_RESP_YES, | ||
370 | DLM_MASTER_RESP_MAYBE, | ||
371 | DLM_MASTER_RESP_ERROR | ||
372 | }; | ||
373 | |||
374 | |||
375 | struct dlm_master_request | ||
376 | { | ||
377 | u8 node_idx; | ||
378 | u8 namelen; | ||
379 | __be16 pad1; | ||
380 | __be32 flags; | ||
381 | |||
382 | u8 name[O2NM_MAX_NAME_LEN]; | ||
383 | }; | ||
384 | |||
385 | #define DLM_ASSERT_MASTER_MLE_CLEANUP 0x00000001 | ||
386 | #define DLM_ASSERT_MASTER_REQUERY 0x00000002 | ||
387 | #define DLM_ASSERT_MASTER_FINISH_MIGRATION 0x00000004 | ||
388 | struct dlm_assert_master | ||
389 | { | ||
390 | u8 node_idx; | ||
391 | u8 namelen; | ||
392 | __be16 pad1; | ||
393 | __be32 flags; | ||
394 | |||
395 | u8 name[O2NM_MAX_NAME_LEN]; | ||
396 | }; | ||
397 | |||
398 | struct dlm_migrate_request | ||
399 | { | ||
400 | u8 master; | ||
401 | u8 new_master; | ||
402 | u8 namelen; | ||
403 | u8 pad1; | ||
404 | __be32 pad2; | ||
405 | u8 name[O2NM_MAX_NAME_LEN]; | ||
406 | }; | ||
407 | |||
408 | struct dlm_master_requery | ||
409 | { | ||
410 | u8 pad1; | ||
411 | u8 pad2; | ||
412 | u8 node_idx; | ||
413 | u8 namelen; | ||
414 | __be32 pad3; | ||
415 | u8 name[O2NM_MAX_NAME_LEN]; | ||
416 | }; | ||
417 | |||
418 | #define DLM_MRES_RECOVERY 0x01 | ||
419 | #define DLM_MRES_MIGRATION 0x02 | ||
420 | #define DLM_MRES_ALL_DONE 0x04 | ||
421 | |||
422 | /* | ||
423 | * We would like to get one whole lockres into a single network | ||
424 | * message whenever possible. Generally speaking, there will be | ||
425 | * at most one dlm_lock on a lockres for each node in the cluster, | ||
426 | * plus (infrequently) any additional locks coming in from userdlm. | ||
427 | * | ||
428 | * struct _dlm_lockres_page | ||
429 | * { | ||
430 | * dlm_migratable_lockres mres; | ||
431 | * dlm_migratable_lock ml[DLM_MAX_MIGRATABLE_LOCKS]; | ||
432 | * u8 pad[DLM_MIG_LOCKRES_RESERVED]; | ||
433 | * }; | ||
434 | * | ||
435 | * from ../cluster/tcp.h | ||
436 | * NET_MAX_PAYLOAD_BYTES (4096 - sizeof(net_msg)) | ||
437 | * (roughly 4080 bytes) | ||
438 | * and sizeof(dlm_migratable_lockres) = 112 bytes | ||
439 | * and sizeof(dlm_migratable_lock) = 16 bytes | ||
440 | * | ||
441 | * Choosing DLM_MAX_MIGRATABLE_LOCKS=240 and | ||
442 | * DLM_MIG_LOCKRES_RESERVED=128 means we have this: | ||
443 | * | ||
444 | * (DLM_MAX_MIGRATABLE_LOCKS * sizeof(dlm_migratable_lock)) + | ||
445 | * sizeof(dlm_migratable_lockres) + DLM_MIG_LOCKRES_RESERVED = | ||
446 | * NET_MAX_PAYLOAD_BYTES | ||
447 | * (240 * 16) + 112 + 128 = 4080 | ||
448 | * | ||
449 | * So a lockres would need more than 240 locks before it would | ||
450 | * use more than one network packet to recover. Not too bad. | ||
451 | */ | ||
452 | #define DLM_MAX_MIGRATABLE_LOCKS 240 | ||
453 | |||
454 | struct dlm_migratable_lockres | ||
455 | { | ||
456 | u8 master; | ||
457 | u8 lockname_len; | ||
458 | u8 num_locks; // locks sent in this structure | ||
459 | u8 flags; | ||
460 | __be32 total_locks; // locks to be sent for this migration cookie | ||
461 | __be64 mig_cookie; // cookie for this lockres migration | ||
462 | // or zero if not needed | ||
463 | // 16 bytes | ||
464 | u8 lockname[DLM_LOCKID_NAME_MAX]; | ||
465 | // 48 bytes | ||
466 | u8 lvb[DLM_LVB_LEN]; | ||
467 | // 112 bytes | ||
468 | struct dlm_migratable_lock ml[0]; // 16 bytes each, begins at byte 112 | ||
469 | }; | ||
470 | #define DLM_MIG_LOCKRES_MAX_LEN \ | ||
471 | (sizeof(struct dlm_migratable_lockres) + \ | ||
472 | (sizeof(struct dlm_migratable_lock) * \ | ||
473 | DLM_MAX_MIGRATABLE_LOCKS) ) | ||
474 | |||
475 | /* from above, 128 bytes | ||
476 | * for some undetermined future use */ | ||
477 | #define DLM_MIG_LOCKRES_RESERVED (NET_MAX_PAYLOAD_BYTES - \ | ||
478 | DLM_MIG_LOCKRES_MAX_LEN) | ||
479 | |||
480 | struct dlm_create_lock | ||
481 | { | ||
482 | __be64 cookie; | ||
483 | |||
484 | __be32 flags; | ||
485 | u8 pad1; | ||
486 | u8 node_idx; | ||
487 | s8 requested_type; | ||
488 | u8 namelen; | ||
489 | |||
490 | u8 name[O2NM_MAX_NAME_LEN]; | ||
491 | }; | ||
492 | |||
493 | struct dlm_convert_lock | ||
494 | { | ||
495 | __be64 cookie; | ||
496 | |||
497 | __be32 flags; | ||
498 | u8 pad1; | ||
499 | u8 node_idx; | ||
500 | s8 requested_type; | ||
501 | u8 namelen; | ||
502 | |||
503 | u8 name[O2NM_MAX_NAME_LEN]; | ||
504 | |||
505 | s8 lvb[0]; | ||
506 | }; | ||
507 | #define DLM_CONVERT_LOCK_MAX_LEN (sizeof(struct dlm_convert_lock)+DLM_LVB_LEN) | ||
508 | |||
509 | struct dlm_unlock_lock | ||
510 | { | ||
511 | __be64 cookie; | ||
512 | |||
513 | __be32 flags; | ||
514 | __be16 pad1; | ||
515 | u8 node_idx; | ||
516 | u8 namelen; | ||
517 | |||
518 | u8 name[O2NM_MAX_NAME_LEN]; | ||
519 | |||
520 | s8 lvb[0]; | ||
521 | }; | ||
522 | #define DLM_UNLOCK_LOCK_MAX_LEN (sizeof(struct dlm_unlock_lock)+DLM_LVB_LEN) | ||
523 | |||
524 | struct dlm_proxy_ast | ||
525 | { | ||
526 | __be64 cookie; | ||
527 | |||
528 | __be32 flags; | ||
529 | u8 node_idx; | ||
530 | u8 type; | ||
531 | u8 blocked_type; | ||
532 | u8 namelen; | ||
533 | |||
534 | u8 name[O2NM_MAX_NAME_LEN]; | ||
535 | |||
536 | s8 lvb[0]; | ||
537 | }; | ||
538 | #define DLM_PROXY_AST_MAX_LEN (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN) | ||
539 | |||
540 | #define DLM_MOD_KEY (0x666c6172) | ||
541 | enum dlm_query_join_response { | ||
542 | JOIN_DISALLOW = 0, | ||
543 | JOIN_OK, | ||
544 | JOIN_OK_NO_MAP, | ||
545 | }; | ||
546 | |||
547 | struct dlm_lock_request | ||
548 | { | ||
549 | u8 node_idx; | ||
550 | u8 dead_node; | ||
551 | __be16 pad1; | ||
552 | __be32 pad2; | ||
553 | }; | ||
554 | |||
555 | struct dlm_reco_data_done | ||
556 | { | ||
557 | u8 node_idx; | ||
558 | u8 dead_node; | ||
559 | __be16 pad1; | ||
560 | __be32 pad2; | ||
561 | |||
562 | /* unused for now */ | ||
563 | /* eventually we can use this to attempt | ||
564 | * lvb recovery based on each node's info */ | ||
565 | u8 reco_lvb[DLM_LVB_LEN]; | ||
566 | }; | ||
567 | |||
568 | struct dlm_begin_reco | ||
569 | { | ||
570 | u8 node_idx; | ||
571 | u8 dead_node; | ||
572 | __be16 pad1; | ||
573 | __be32 pad2; | ||
574 | }; | ||
575 | |||
576 | |||
577 | struct dlm_query_join_request | ||
578 | { | ||
579 | u8 node_idx; | ||
580 | u8 pad1[2]; | ||
581 | u8 name_len; | ||
582 | u8 domain[O2NM_MAX_NAME_LEN]; | ||
583 | }; | ||
584 | |||
585 | struct dlm_assert_joined | ||
586 | { | ||
587 | u8 node_idx; | ||
588 | u8 pad1[2]; | ||
589 | u8 name_len; | ||
590 | u8 domain[O2NM_MAX_NAME_LEN]; | ||
591 | }; | ||
592 | |||
593 | struct dlm_cancel_join | ||
594 | { | ||
595 | u8 node_idx; | ||
596 | u8 pad1[2]; | ||
597 | u8 name_len; | ||
598 | u8 domain[O2NM_MAX_NAME_LEN]; | ||
599 | }; | ||
600 | |||
601 | struct dlm_exit_domain | ||
602 | { | ||
603 | u8 node_idx; | ||
604 | u8 pad1[3]; | ||
605 | }; | ||
606 | |||
607 | struct dlm_finalize_reco | ||
608 | { | ||
609 | u8 node_idx; | ||
610 | u8 dead_node; | ||
611 | __be16 pad1; | ||
612 | __be32 pad2; | ||
613 | }; | ||
614 | |||
615 | static inline enum dlm_status | ||
616 | __dlm_lockres_state_to_status(struct dlm_lock_resource *res) | ||
617 | { | ||
618 | enum dlm_status status = DLM_NORMAL; | ||
619 | |||
620 | assert_spin_locked(&res->spinlock); | ||
621 | |||
622 | if (res->state & DLM_LOCK_RES_RECOVERING) | ||
623 | status = DLM_RECOVERING; | ||
624 | else if (res->state & DLM_LOCK_RES_MIGRATING) | ||
625 | status = DLM_MIGRATING; | ||
626 | else if (res->state & DLM_LOCK_RES_IN_PROGRESS) | ||
627 | status = DLM_FORWARD; | ||
628 | |||
629 | return status; | ||
630 | } | ||
631 | |||
632 | struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie, | ||
633 | struct dlm_lockstatus *lksb); | ||
634 | void dlm_lock_get(struct dlm_lock *lock); | ||
635 | void dlm_lock_put(struct dlm_lock *lock); | ||
636 | |||
637 | void dlm_lock_attach_lockres(struct dlm_lock *lock, | ||
638 | struct dlm_lock_resource *res); | ||
639 | |||
640 | int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data); | ||
641 | int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data); | ||
642 | int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data); | ||
643 | |||
644 | void dlm_revert_pending_convert(struct dlm_lock_resource *res, | ||
645 | struct dlm_lock *lock); | ||
646 | void dlm_revert_pending_lock(struct dlm_lock_resource *res, | ||
647 | struct dlm_lock *lock); | ||
648 | |||
649 | int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data); | ||
650 | void dlm_commit_pending_cancel(struct dlm_lock_resource *res, | ||
651 | struct dlm_lock *lock); | ||
652 | void dlm_commit_pending_unlock(struct dlm_lock_resource *res, | ||
653 | struct dlm_lock *lock); | ||
654 | |||
655 | int dlm_launch_thread(struct dlm_ctxt *dlm); | ||
656 | void dlm_complete_thread(struct dlm_ctxt *dlm); | ||
657 | int dlm_launch_recovery_thread(struct dlm_ctxt *dlm); | ||
658 | void dlm_complete_recovery_thread(struct dlm_ctxt *dlm); | ||
659 | void dlm_wait_for_recovery(struct dlm_ctxt *dlm); | ||
660 | |||
661 | void dlm_put(struct dlm_ctxt *dlm); | ||
662 | struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm); | ||
663 | int dlm_domain_fully_joined(struct dlm_ctxt *dlm); | ||
664 | |||
665 | void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | ||
666 | struct dlm_lock_resource *res); | ||
667 | void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | ||
668 | struct dlm_lock_resource *res); | ||
669 | void dlm_purge_lockres(struct dlm_ctxt *dlm, | ||
670 | struct dlm_lock_resource *lockres); | ||
671 | void dlm_lockres_get(struct dlm_lock_resource *res); | ||
672 | void dlm_lockres_put(struct dlm_lock_resource *res); | ||
673 | void __dlm_unhash_lockres(struct dlm_lock_resource *res); | ||
674 | void __dlm_insert_lockres(struct dlm_ctxt *dlm, | ||
675 | struct dlm_lock_resource *res); | ||
676 | struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, | ||
677 | const char *name, | ||
678 | unsigned int len); | ||
679 | struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm, | ||
680 | const char *name, | ||
681 | unsigned int len); | ||
682 | |||
683 | int dlm_is_host_down(int errno); | ||
684 | void dlm_change_lockres_owner(struct dlm_ctxt *dlm, | ||
685 | struct dlm_lock_resource *res, | ||
686 | u8 owner); | ||
687 | struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, | ||
688 | const char *lockid, | ||
689 | int flags); | ||
690 | struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, | ||
691 | const char *name, | ||
692 | unsigned int namelen); | ||
693 | |||
694 | void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); | ||
695 | void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); | ||
696 | void dlm_do_local_ast(struct dlm_ctxt *dlm, | ||
697 | struct dlm_lock_resource *res, | ||
698 | struct dlm_lock *lock); | ||
699 | int dlm_do_remote_ast(struct dlm_ctxt *dlm, | ||
700 | struct dlm_lock_resource *res, | ||
701 | struct dlm_lock *lock); | ||
702 | void dlm_do_local_bast(struct dlm_ctxt *dlm, | ||
703 | struct dlm_lock_resource *res, | ||
704 | struct dlm_lock *lock, | ||
705 | int blocked_type); | ||
706 | int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, | ||
707 | struct dlm_lock_resource *res, | ||
708 | struct dlm_lock *lock, | ||
709 | int msg_type, | ||
710 | int blocked_type, int flags); | ||
711 | static inline int dlm_send_proxy_bast(struct dlm_ctxt *dlm, | ||
712 | struct dlm_lock_resource *res, | ||
713 | struct dlm_lock *lock, | ||
714 | int blocked_type) | ||
715 | { | ||
716 | return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_BAST, | ||
717 | blocked_type, 0); | ||
718 | } | ||
719 | |||
720 | static inline int dlm_send_proxy_ast(struct dlm_ctxt *dlm, | ||
721 | struct dlm_lock_resource *res, | ||
722 | struct dlm_lock *lock, | ||
723 | int flags) | ||
724 | { | ||
725 | return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_AST, | ||
726 | 0, flags); | ||
727 | } | ||
728 | |||
729 | void dlm_print_one_lock_resource(struct dlm_lock_resource *res); | ||
730 | void __dlm_print_one_lock_resource(struct dlm_lock_resource *res); | ||
731 | |||
732 | u8 dlm_nm_this_node(struct dlm_ctxt *dlm); | ||
733 | void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); | ||
734 | void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); | ||
735 | |||
736 | |||
737 | int dlm_nm_init(struct dlm_ctxt *dlm); | ||
738 | int dlm_heartbeat_init(struct dlm_ctxt *dlm); | ||
739 | void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data); | ||
740 | void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data); | ||
741 | |||
742 | int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); | ||
743 | int dlm_migrate_lockres(struct dlm_ctxt *dlm, | ||
744 | struct dlm_lock_resource *res, | ||
745 | u8 target); | ||
746 | int dlm_finish_migration(struct dlm_ctxt *dlm, | ||
747 | struct dlm_lock_resource *res, | ||
748 | u8 old_master); | ||
749 | void dlm_lockres_release_ast(struct dlm_ctxt *dlm, | ||
750 | struct dlm_lock_resource *res); | ||
751 | void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res); | ||
752 | |||
753 | int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data); | ||
754 | int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data); | ||
755 | int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data); | ||
756 | int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data); | ||
757 | int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data); | ||
758 | int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data); | ||
759 | int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data); | ||
760 | int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data); | ||
761 | int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data); | ||
762 | |||
763 | int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, | ||
764 | struct dlm_lock_resource *res, | ||
765 | int ignore_higher, | ||
766 | u8 request_from, | ||
767 | u32 flags); | ||
768 | |||
769 | |||
770 | int dlm_send_one_lockres(struct dlm_ctxt *dlm, | ||
771 | struct dlm_lock_resource *res, | ||
772 | struct dlm_migratable_lockres *mres, | ||
773 | u8 send_to, | ||
774 | u8 flags); | ||
775 | void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, | ||
776 | struct dlm_lock_resource *res); | ||
777 | |||
778 | /* will exit holding res->spinlock, but may drop in function */ | ||
779 | void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags); | ||
780 | void __dlm_wait_on_lockres_flags_set(struct dlm_lock_resource *res, int flags); | ||
781 | |||
782 | /* will exit holding res->spinlock, but may drop in function */ | ||
783 | static inline void __dlm_wait_on_lockres(struct dlm_lock_resource *res) | ||
784 | { | ||
785 | __dlm_wait_on_lockres_flags(res, (DLM_LOCK_RES_IN_PROGRESS| | ||
786 | DLM_LOCK_RES_RECOVERING| | ||
787 | DLM_LOCK_RES_MIGRATING)); | ||
788 | } | ||
789 | |||
790 | |||
791 | int dlm_init_mle_cache(void); | ||
792 | void dlm_destroy_mle_cache(void); | ||
793 | void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up); | ||
794 | void dlm_clean_master_list(struct dlm_ctxt *dlm, | ||
795 | u8 dead_node); | ||
796 | int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock); | ||
797 | |||
798 | |||
799 | static inline const char * dlm_lock_mode_name(int mode) | ||
800 | { | ||
801 | switch (mode) { | ||
802 | case LKM_EXMODE: | ||
803 | return "EX"; | ||
804 | case LKM_PRMODE: | ||
805 | return "PR"; | ||
806 | case LKM_NLMODE: | ||
807 | return "NL"; | ||
808 | } | ||
809 | return "UNKNOWN"; | ||
810 | } | ||
811 | |||
812 | |||
813 | static inline int dlm_lock_compatible(int existing, int request) | ||
814 | { | ||
815 | /* NO_LOCK compatible with all */ | ||
816 | if (request == LKM_NLMODE || | ||
817 | existing == LKM_NLMODE) | ||
818 | return 1; | ||
819 | |||
820 | /* EX incompatible with all non-NO_LOCK */ | ||
821 | if (request == LKM_EXMODE) | ||
822 | return 0; | ||
823 | |||
824 | /* request must be PR, which is compatible with PR */ | ||
825 | if (existing == LKM_PRMODE) | ||
826 | return 1; | ||
827 | |||
828 | return 0; | ||
829 | } | ||
830 | |||
831 | static inline int dlm_lock_on_list(struct list_head *head, | ||
832 | struct dlm_lock *lock) | ||
833 | { | ||
834 | struct list_head *iter; | ||
835 | struct dlm_lock *tmplock; | ||
836 | |||
837 | list_for_each(iter, head) { | ||
838 | tmplock = list_entry(iter, struct dlm_lock, list); | ||
839 | if (tmplock == lock) | ||
840 | return 1; | ||
841 | } | ||
842 | return 0; | ||
843 | } | ||
844 | |||
845 | |||
846 | static inline enum dlm_status dlm_err_to_dlm_status(int err) | ||
847 | { | ||
848 | enum dlm_status ret; | ||
849 | if (err == -ENOMEM) | ||
850 | ret = DLM_SYSERR; | ||
851 | else if (err == -ETIMEDOUT || o2net_link_down(err, NULL)) | ||
852 | ret = DLM_NOLOCKMGR; | ||
853 | else if (err == -EINVAL) | ||
854 | ret = DLM_BADPARAM; | ||
855 | else if (err == -ENAMETOOLONG) | ||
856 | ret = DLM_IVBUFLEN; | ||
857 | else | ||
858 | ret = DLM_BADARGS; | ||
859 | return ret; | ||
860 | } | ||
861 | |||
862 | |||
863 | static inline void dlm_node_iter_init(unsigned long *map, | ||
864 | struct dlm_node_iter *iter) | ||
865 | { | ||
866 | memcpy(iter->node_map, map, sizeof(iter->node_map)); | ||
867 | iter->curnode = -1; | ||
868 | } | ||
869 | |||
870 | static inline int dlm_node_iter_next(struct dlm_node_iter *iter) | ||
871 | { | ||
872 | int bit; | ||
873 | bit = find_next_bit(iter->node_map, O2NM_MAX_NODES, iter->curnode+1); | ||
874 | if (bit >= O2NM_MAX_NODES) { | ||
875 | iter->curnode = O2NM_MAX_NODES; | ||
876 | return -ENOENT; | ||
877 | } | ||
878 | iter->curnode = bit; | ||
879 | return bit; | ||
880 | } | ||
881 | |||
882 | |||
883 | |||
884 | #endif /* DLMCOMMON_H */ | ||
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c new file mode 100644 index 000000000000..6001b22a997d --- /dev/null +++ b/fs/ocfs2/dlm/dlmconvert.c | |||
@@ -0,0 +1,530 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmconvert.c | ||
5 | * | ||
6 | * underlying calls for lock conversion | ||
7 | * | ||
8 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | |||
28 | #include <linux/module.h> | ||
29 | #include <linux/fs.h> | ||
30 | #include <linux/types.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/utsname.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/sysctl.h> | ||
36 | #include <linux/random.h> | ||
37 | #include <linux/blkdev.h> | ||
38 | #include <linux/socket.h> | ||
39 | #include <linux/inet.h> | ||
40 | #include <linux/spinlock.h> | ||
41 | |||
42 | |||
43 | #include "cluster/heartbeat.h" | ||
44 | #include "cluster/nodemanager.h" | ||
45 | #include "cluster/tcp.h" | ||
46 | |||
47 | #include "dlmapi.h" | ||
48 | #include "dlmcommon.h" | ||
49 | |||
50 | #include "dlmconvert.h" | ||
51 | |||
52 | #define MLOG_MASK_PREFIX ML_DLM | ||
53 | #include "cluster/masklog.h" | ||
54 | |||
55 | /* NOTE: __dlmconvert_master is the only function in here that | ||
56 | * needs a spinlock held on entry (res->spinlock) and it is the | ||
57 | * only one that holds a lock on exit (res->spinlock). | ||
58 | * All other functions in here need no locks and drop all of | ||
59 | * the locks that they acquire. */ | ||
60 | static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm, | ||
61 | struct dlm_lock_resource *res, | ||
62 | struct dlm_lock *lock, int flags, | ||
63 | int type, int *call_ast, | ||
64 | int *kick_thread); | ||
65 | static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, | ||
66 | struct dlm_lock_resource *res, | ||
67 | struct dlm_lock *lock, int flags, int type); | ||
68 | |||
69 | /* | ||
70 | * this is only called directly by dlmlock(), and only when the | ||
71 | * local node is the owner of the lockres | ||
72 | * locking: | ||
73 | * caller needs: none | ||
74 | * taken: takes and drops res->spinlock | ||
75 | * held on exit: none | ||
76 | * returns: see __dlmconvert_master | ||
77 | */ | ||
78 | enum dlm_status dlmconvert_master(struct dlm_ctxt *dlm, | ||
79 | struct dlm_lock_resource *res, | ||
80 | struct dlm_lock *lock, int flags, int type) | ||
81 | { | ||
82 | int call_ast = 0, kick_thread = 0; | ||
83 | enum dlm_status status; | ||
84 | |||
85 | spin_lock(&res->spinlock); | ||
86 | /* we are not in a network handler, this is fine */ | ||
87 | __dlm_wait_on_lockres(res); | ||
88 | __dlm_lockres_reserve_ast(res); | ||
89 | res->state |= DLM_LOCK_RES_IN_PROGRESS; | ||
90 | |||
91 | status = __dlmconvert_master(dlm, res, lock, flags, type, | ||
92 | &call_ast, &kick_thread); | ||
93 | |||
94 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; | ||
95 | spin_unlock(&res->spinlock); | ||
96 | wake_up(&res->wq); | ||
97 | if (status != DLM_NORMAL && status != DLM_NOTQUEUED) | ||
98 | dlm_error(status); | ||
99 | |||
100 | /* either queue the ast or release it */ | ||
101 | if (call_ast) | ||
102 | dlm_queue_ast(dlm, lock); | ||
103 | else | ||
104 | dlm_lockres_release_ast(dlm, res); | ||
105 | |||
106 | if (kick_thread) | ||
107 | dlm_kick_thread(dlm, res); | ||
108 | |||
109 | return status; | ||
110 | } | ||
111 | |||
112 | /* performs lock conversion at the lockres master site | ||
113 | * locking: | ||
114 | * caller needs: res->spinlock | ||
115 | * taken: takes and drops lock->spinlock | ||
116 | * held on exit: res->spinlock | ||
117 | * returns: DLM_NORMAL, DLM_NOTQUEUED, DLM_DENIED | ||
118 | * call_ast: whether ast should be called for this lock | ||
119 | * kick_thread: whether dlm_kick_thread should be called | ||
120 | */ | ||
121 | static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm, | ||
122 | struct dlm_lock_resource *res, | ||
123 | struct dlm_lock *lock, int flags, | ||
124 | int type, int *call_ast, | ||
125 | int *kick_thread) | ||
126 | { | ||
127 | enum dlm_status status = DLM_NORMAL; | ||
128 | struct list_head *iter; | ||
129 | struct dlm_lock *tmplock=NULL; | ||
130 | |||
131 | assert_spin_locked(&res->spinlock); | ||
132 | |||
133 | mlog_entry("type=%d, convert_type=%d, new convert_type=%d\n", | ||
134 | lock->ml.type, lock->ml.convert_type, type); | ||
135 | |||
136 | spin_lock(&lock->spinlock); | ||
137 | |||
138 | /* already converting? */ | ||
139 | if (lock->ml.convert_type != LKM_IVMODE) { | ||
140 | mlog(ML_ERROR, "attempted to convert a lock with a lock " | ||
141 | "conversion pending\n"); | ||
142 | status = DLM_DENIED; | ||
143 | goto unlock_exit; | ||
144 | } | ||
145 | |||
146 | /* must be on grant queue to convert */ | ||
147 | if (!dlm_lock_on_list(&res->granted, lock)) { | ||
148 | mlog(ML_ERROR, "attempted to convert a lock not on grant " | ||
149 | "queue\n"); | ||
150 | status = DLM_DENIED; | ||
151 | goto unlock_exit; | ||
152 | } | ||
153 | |||
154 | if (flags & LKM_VALBLK) { | ||
155 | switch (lock->ml.type) { | ||
156 | case LKM_EXMODE: | ||
157 | /* EX + LKM_VALBLK + convert == set lvb */ | ||
158 | mlog(0, "will set lvb: converting %s->%s\n", | ||
159 | dlm_lock_mode_name(lock->ml.type), | ||
160 | dlm_lock_mode_name(type)); | ||
161 | lock->lksb->flags |= DLM_LKSB_PUT_LVB; | ||
162 | break; | ||
163 | case LKM_PRMODE: | ||
164 | case LKM_NLMODE: | ||
165 | /* refetch if new level is not NL */ | ||
166 | if (type > LKM_NLMODE) { | ||
167 | mlog(0, "will fetch new value into " | ||
168 | "lvb: converting %s->%s\n", | ||
169 | dlm_lock_mode_name(lock->ml.type), | ||
170 | dlm_lock_mode_name(type)); | ||
171 | lock->lksb->flags |= DLM_LKSB_GET_LVB; | ||
172 | } else { | ||
173 | mlog(0, "will NOT fetch new value " | ||
174 | "into lvb: converting %s->%s\n", | ||
175 | dlm_lock_mode_name(lock->ml.type), | ||
176 | dlm_lock_mode_name(type)); | ||
177 | flags &= ~(LKM_VALBLK); | ||
178 | } | ||
179 | break; | ||
180 | } | ||
181 | } | ||
182 | |||
183 | |||
184 | /* in-place downconvert? */ | ||
185 | if (type <= lock->ml.type) | ||
186 | goto grant; | ||
187 | |||
188 | /* upconvert from here on */ | ||
189 | status = DLM_NORMAL; | ||
190 | list_for_each(iter, &res->granted) { | ||
191 | tmplock = list_entry(iter, struct dlm_lock, list); | ||
192 | if (tmplock == lock) | ||
193 | continue; | ||
194 | if (!dlm_lock_compatible(tmplock->ml.type, type)) | ||
195 | goto switch_queues; | ||
196 | } | ||
197 | |||
198 | list_for_each(iter, &res->converting) { | ||
199 | tmplock = list_entry(iter, struct dlm_lock, list); | ||
200 | if (!dlm_lock_compatible(tmplock->ml.type, type)) | ||
201 | goto switch_queues; | ||
202 | /* existing conversion requests take precedence */ | ||
203 | if (!dlm_lock_compatible(tmplock->ml.convert_type, type)) | ||
204 | goto switch_queues; | ||
205 | } | ||
206 | |||
207 | /* fall thru to grant */ | ||
208 | |||
209 | grant: | ||
210 | mlog(0, "res %.*s, granting %s lock\n", res->lockname.len, | ||
211 | res->lockname.name, dlm_lock_mode_name(type)); | ||
212 | /* immediately grant the new lock type */ | ||
213 | lock->lksb->status = DLM_NORMAL; | ||
214 | if (lock->ml.node == dlm->node_num) | ||
215 | mlog(0, "doing in-place convert for nonlocal lock\n"); | ||
216 | lock->ml.type = type; | ||
217 | status = DLM_NORMAL; | ||
218 | *call_ast = 1; | ||
219 | goto unlock_exit; | ||
220 | |||
221 | switch_queues: | ||
222 | if (flags & LKM_NOQUEUE) { | ||
223 | mlog(0, "failed to convert NOQUEUE lock %.*s from " | ||
224 | "%d to %d...\n", res->lockname.len, res->lockname.name, | ||
225 | lock->ml.type, type); | ||
226 | status = DLM_NOTQUEUED; | ||
227 | goto unlock_exit; | ||
228 | } | ||
229 | mlog(0, "res %.*s, queueing...\n", res->lockname.len, | ||
230 | res->lockname.name); | ||
231 | |||
232 | lock->ml.convert_type = type; | ||
233 | /* do not alter lock refcount. switching lists. */ | ||
234 | list_del_init(&lock->list); | ||
235 | list_add_tail(&lock->list, &res->converting); | ||
236 | |||
237 | unlock_exit: | ||
238 | spin_unlock(&lock->spinlock); | ||
239 | if (status == DLM_DENIED) { | ||
240 | __dlm_print_one_lock_resource(res); | ||
241 | } | ||
242 | if (status == DLM_NORMAL) | ||
243 | *kick_thread = 1; | ||
244 | return status; | ||
245 | } | ||
246 | |||
247 | void dlm_revert_pending_convert(struct dlm_lock_resource *res, | ||
248 | struct dlm_lock *lock) | ||
249 | { | ||
250 | /* do not alter lock refcount. switching lists. */ | ||
251 | list_del_init(&lock->list); | ||
252 | list_add_tail(&lock->list, &res->granted); | ||
253 | lock->ml.convert_type = LKM_IVMODE; | ||
254 | lock->lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB); | ||
255 | } | ||
256 | |||
257 | /* messages the master site to do lock conversion | ||
258 | * locking: | ||
259 | * caller needs: none | ||
260 | * taken: takes and drops res->spinlock, uses DLM_LOCK_RES_IN_PROGRESS | ||
261 | * held on exit: none | ||
262 | * returns: DLM_NORMAL, DLM_RECOVERING, status from remote node | ||
263 | */ | ||
264 | enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, | ||
265 | struct dlm_lock_resource *res, | ||
266 | struct dlm_lock *lock, int flags, int type) | ||
267 | { | ||
268 | enum dlm_status status; | ||
269 | |||
270 | mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type, | ||
271 | lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS); | ||
272 | |||
273 | spin_lock(&res->spinlock); | ||
274 | if (res->state & DLM_LOCK_RES_RECOVERING) { | ||
275 | mlog(0, "bailing out early since res is RECOVERING " | ||
276 | "on secondary queue\n"); | ||
277 | /* __dlm_print_one_lock_resource(res); */ | ||
278 | status = DLM_RECOVERING; | ||
279 | goto bail; | ||
280 | } | ||
281 | /* will exit this call with spinlock held */ | ||
282 | __dlm_wait_on_lockres(res); | ||
283 | |||
284 | if (lock->ml.convert_type != LKM_IVMODE) { | ||
285 | __dlm_print_one_lock_resource(res); | ||
286 | mlog(ML_ERROR, "converting a remote lock that is already " | ||
287 | "converting! (cookie=%"MLFu64", conv=%d)\n", | ||
288 | lock->ml.cookie, lock->ml.convert_type); | ||
289 | status = DLM_DENIED; | ||
290 | goto bail; | ||
291 | } | ||
292 | res->state |= DLM_LOCK_RES_IN_PROGRESS; | ||
293 | /* move lock to local convert queue */ | ||
294 | /* do not alter lock refcount. switching lists. */ | ||
295 | list_del_init(&lock->list); | ||
296 | list_add_tail(&lock->list, &res->converting); | ||
297 | lock->convert_pending = 1; | ||
298 | lock->ml.convert_type = type; | ||
299 | |||
300 | if (flags & LKM_VALBLK) { | ||
301 | if (lock->ml.type == LKM_EXMODE) { | ||
302 | flags |= LKM_PUT_LVB; | ||
303 | lock->lksb->flags |= DLM_LKSB_PUT_LVB; | ||
304 | } else { | ||
305 | if (lock->ml.convert_type == LKM_NLMODE) | ||
306 | flags &= ~LKM_VALBLK; | ||
307 | else { | ||
308 | flags |= LKM_GET_LVB; | ||
309 | lock->lksb->flags |= DLM_LKSB_GET_LVB; | ||
310 | } | ||
311 | } | ||
312 | } | ||
313 | spin_unlock(&res->spinlock); | ||
314 | |||
315 | /* no locks held here. | ||
316 | * need to wait for a reply as to whether it got queued or not. */ | ||
317 | status = dlm_send_remote_convert_request(dlm, res, lock, flags, type); | ||
318 | |||
319 | spin_lock(&res->spinlock); | ||
320 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; | ||
321 | lock->convert_pending = 0; | ||
322 | /* if it failed, move it back to granted queue */ | ||
323 | if (status != DLM_NORMAL) { | ||
324 | if (status != DLM_NOTQUEUED) | ||
325 | dlm_error(status); | ||
326 | dlm_revert_pending_convert(res, lock); | ||
327 | } | ||
328 | bail: | ||
329 | spin_unlock(&res->spinlock); | ||
330 | |||
331 | /* TODO: should this be a wake_one? */ | ||
332 | /* wake up any IN_PROGRESS waiters */ | ||
333 | wake_up(&res->wq); | ||
334 | |||
335 | return status; | ||
336 | } | ||
337 | |||
338 | /* sends DLM_CONVERT_LOCK_MSG to master site | ||
339 | * locking: | ||
340 | * caller needs: none | ||
341 | * taken: none | ||
342 | * held on exit: none | ||
343 | * returns: DLM_NOLOCKMGR, status from remote node | ||
344 | */ | ||
345 | static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, | ||
346 | struct dlm_lock_resource *res, | ||
347 | struct dlm_lock *lock, int flags, int type) | ||
348 | { | ||
349 | struct dlm_convert_lock convert; | ||
350 | int tmpret; | ||
351 | enum dlm_status ret; | ||
352 | int status = 0; | ||
353 | struct kvec vec[2]; | ||
354 | size_t veclen = 1; | ||
355 | |||
356 | mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); | ||
357 | |||
358 | memset(&convert, 0, sizeof(struct dlm_convert_lock)); | ||
359 | convert.node_idx = dlm->node_num; | ||
360 | convert.requested_type = type; | ||
361 | convert.cookie = lock->ml.cookie; | ||
362 | convert.namelen = res->lockname.len; | ||
363 | convert.flags = cpu_to_be32(flags); | ||
364 | memcpy(convert.name, res->lockname.name, convert.namelen); | ||
365 | |||
366 | vec[0].iov_len = sizeof(struct dlm_convert_lock); | ||
367 | vec[0].iov_base = &convert; | ||
368 | |||
369 | if (flags & LKM_PUT_LVB) { | ||
370 | /* extra data to send if we are updating lvb */ | ||
371 | vec[1].iov_len = DLM_LVB_LEN; | ||
372 | vec[1].iov_base = lock->lksb->lvb; | ||
373 | veclen++; | ||
374 | } | ||
375 | |||
376 | tmpret = o2net_send_message_vec(DLM_CONVERT_LOCK_MSG, dlm->key, | ||
377 | vec, veclen, res->owner, &status); | ||
378 | if (tmpret >= 0) { | ||
379 | // successfully sent and received | ||
380 | ret = status; // this is already a dlm_status | ||
381 | if (ret == DLM_RECOVERING) { | ||
382 | mlog(0, "node %u returned DLM_RECOVERING from convert " | ||
383 | "message!\n", res->owner); | ||
384 | } else if (ret == DLM_MIGRATING) { | ||
385 | mlog(0, "node %u returned DLM_MIGRATING from convert " | ||
386 | "message!\n", res->owner); | ||
387 | } else if (ret == DLM_FORWARD) { | ||
388 | mlog(0, "node %u returned DLM_FORWARD from convert " | ||
389 | "message!\n", res->owner); | ||
390 | } else if (ret != DLM_NORMAL && ret != DLM_NOTQUEUED) | ||
391 | dlm_error(ret); | ||
392 | } else { | ||
393 | mlog_errno(tmpret); | ||
394 | if (dlm_is_host_down(tmpret)) { | ||
395 | ret = DLM_RECOVERING; | ||
396 | mlog(0, "node %u died so returning DLM_RECOVERING " | ||
397 | "from convert message!\n", res->owner); | ||
398 | } else { | ||
399 | ret = dlm_err_to_dlm_status(tmpret); | ||
400 | } | ||
401 | } | ||
402 | |||
403 | return ret; | ||
404 | } | ||
405 | |||
406 | /* handler for DLM_CONVERT_LOCK_MSG on master site | ||
407 | * locking: | ||
408 | * caller needs: none | ||
409 | * taken: takes and drop res->spinlock | ||
410 | * held on exit: none | ||
411 | * returns: DLM_NORMAL, DLM_IVLOCKID, DLM_BADARGS, | ||
412 | * status from __dlmconvert_master | ||
413 | */ | ||
414 | int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data) | ||
415 | { | ||
416 | struct dlm_ctxt *dlm = data; | ||
417 | struct dlm_convert_lock *cnv = (struct dlm_convert_lock *)msg->buf; | ||
418 | struct dlm_lock_resource *res = NULL; | ||
419 | struct list_head *iter; | ||
420 | struct dlm_lock *lock = NULL; | ||
421 | struct dlm_lockstatus *lksb; | ||
422 | enum dlm_status status = DLM_NORMAL; | ||
423 | u32 flags; | ||
424 | int call_ast = 0, kick_thread = 0; | ||
425 | |||
426 | if (!dlm_grab(dlm)) { | ||
427 | dlm_error(DLM_REJECTED); | ||
428 | return DLM_REJECTED; | ||
429 | } | ||
430 | |||
431 | mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), | ||
432 | "Domain %s not fully joined!\n", dlm->name); | ||
433 | |||
434 | if (cnv->namelen > DLM_LOCKID_NAME_MAX) { | ||
435 | status = DLM_IVBUFLEN; | ||
436 | dlm_error(status); | ||
437 | goto leave; | ||
438 | } | ||
439 | |||
440 | flags = be32_to_cpu(cnv->flags); | ||
441 | |||
442 | if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) == | ||
443 | (LKM_PUT_LVB|LKM_GET_LVB)) { | ||
444 | mlog(ML_ERROR, "both PUT and GET lvb specified\n"); | ||
445 | status = DLM_BADARGS; | ||
446 | goto leave; | ||
447 | } | ||
448 | |||
449 | mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : | ||
450 | (flags & LKM_GET_LVB ? "get lvb" : "none")); | ||
451 | |||
452 | status = DLM_IVLOCKID; | ||
453 | res = dlm_lookup_lockres(dlm, cnv->name, cnv->namelen); | ||
454 | if (!res) { | ||
455 | dlm_error(status); | ||
456 | goto leave; | ||
457 | } | ||
458 | |||
459 | spin_lock(&res->spinlock); | ||
460 | list_for_each(iter, &res->granted) { | ||
461 | lock = list_entry(iter, struct dlm_lock, list); | ||
462 | if (lock->ml.cookie == cnv->cookie && | ||
463 | lock->ml.node == cnv->node_idx) { | ||
464 | dlm_lock_get(lock); | ||
465 | break; | ||
466 | } | ||
467 | lock = NULL; | ||
468 | } | ||
469 | spin_unlock(&res->spinlock); | ||
470 | if (!lock) { | ||
471 | status = DLM_IVLOCKID; | ||
472 | dlm_error(status); | ||
473 | goto leave; | ||
474 | } | ||
475 | |||
476 | /* found the lock */ | ||
477 | lksb = lock->lksb; | ||
478 | |||
479 | /* see if caller needed to get/put lvb */ | ||
480 | if (flags & LKM_PUT_LVB) { | ||
481 | BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); | ||
482 | lksb->flags |= DLM_LKSB_PUT_LVB; | ||
483 | memcpy(&lksb->lvb[0], &cnv->lvb[0], DLM_LVB_LEN); | ||
484 | } else if (flags & LKM_GET_LVB) { | ||
485 | BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); | ||
486 | lksb->flags |= DLM_LKSB_GET_LVB; | ||
487 | } | ||
488 | |||
489 | spin_lock(&res->spinlock); | ||
490 | status = __dlm_lockres_state_to_status(res); | ||
491 | if (status == DLM_NORMAL) { | ||
492 | __dlm_lockres_reserve_ast(res); | ||
493 | res->state |= DLM_LOCK_RES_IN_PROGRESS; | ||
494 | status = __dlmconvert_master(dlm, res, lock, flags, | ||
495 | cnv->requested_type, | ||
496 | &call_ast, &kick_thread); | ||
497 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; | ||
498 | } | ||
499 | spin_unlock(&res->spinlock); | ||
500 | |||
501 | if (status != DLM_NORMAL) { | ||
502 | if (status != DLM_NOTQUEUED) | ||
503 | dlm_error(status); | ||
504 | lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB); | ||
505 | } | ||
506 | |||
507 | leave: | ||
508 | if (!lock) | ||
509 | mlog(ML_ERROR, "did not find lock to convert on grant queue! " | ||
510 | "cookie=%"MLFu64"\n", | ||
511 | cnv->cookie); | ||
512 | else | ||
513 | dlm_lock_put(lock); | ||
514 | |||
515 | /* either queue the ast or release it */ | ||
516 | if (call_ast) | ||
517 | dlm_queue_ast(dlm, lock); | ||
518 | else | ||
519 | dlm_lockres_release_ast(dlm, res); | ||
520 | |||
521 | if (kick_thread) | ||
522 | dlm_kick_thread(dlm, res); | ||
523 | |||
524 | if (res) | ||
525 | dlm_lockres_put(res); | ||
526 | |||
527 | dlm_put(dlm); | ||
528 | |||
529 | return status; | ||
530 | } | ||
diff --git a/fs/ocfs2/dlm/dlmconvert.h b/fs/ocfs2/dlm/dlmconvert.h new file mode 100644 index 000000000000..b2e3677df878 --- /dev/null +++ b/fs/ocfs2/dlm/dlmconvert.h | |||
@@ -0,0 +1,35 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmconvert.h | ||
5 | * | ||
6 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public | ||
10 | * License as published by the Free Software Foundation; either | ||
11 | * version 2 of the License, or (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public | ||
19 | * License along with this program; if not, write to the | ||
20 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
21 | * Boston, MA 021110-1307, USA. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #ifndef DLMCONVERT_H | ||
26 | #define DLMCONVERT_H | ||
27 | |||
28 | enum dlm_status dlmconvert_master(struct dlm_ctxt *dlm, | ||
29 | struct dlm_lock_resource *res, | ||
30 | struct dlm_lock *lock, int flags, int type); | ||
31 | enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, | ||
32 | struct dlm_lock_resource *res, | ||
33 | struct dlm_lock *lock, int flags, int type); | ||
34 | |||
35 | #endif | ||
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c new file mode 100644 index 000000000000..f339fe27975a --- /dev/null +++ b/fs/ocfs2/dlm/dlmdebug.c | |||
@@ -0,0 +1,246 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmdebug.c | ||
5 | * | ||
6 | * debug functionality for the dlm | ||
7 | * | ||
8 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include <linux/types.h> | ||
28 | #include <linux/slab.h> | ||
29 | #include <linux/highmem.h> | ||
30 | #include <linux/utsname.h> | ||
31 | #include <linux/sysctl.h> | ||
32 | #include <linux/spinlock.h> | ||
33 | |||
34 | #include "cluster/heartbeat.h" | ||
35 | #include "cluster/nodemanager.h" | ||
36 | #include "cluster/tcp.h" | ||
37 | |||
38 | #include "dlmapi.h" | ||
39 | #include "dlmcommon.h" | ||
40 | #include "dlmdebug.h" | ||
41 | |||
42 | #include "dlmdomain.h" | ||
43 | #include "dlmdebug.h" | ||
44 | |||
45 | #define MLOG_MASK_PREFIX ML_DLM | ||
46 | #include "cluster/masklog.h" | ||
47 | |||
48 | void dlm_print_one_lock_resource(struct dlm_lock_resource *res) | ||
49 | { | ||
50 | mlog(ML_NOTICE, "lockres: %.*s, owner=%u, state=%u\n", | ||
51 | res->lockname.len, res->lockname.name, | ||
52 | res->owner, res->state); | ||
53 | spin_lock(&res->spinlock); | ||
54 | __dlm_print_one_lock_resource(res); | ||
55 | spin_unlock(&res->spinlock); | ||
56 | } | ||
57 | |||
58 | void __dlm_print_one_lock_resource(struct dlm_lock_resource *res) | ||
59 | { | ||
60 | struct list_head *iter2; | ||
61 | struct dlm_lock *lock; | ||
62 | |||
63 | assert_spin_locked(&res->spinlock); | ||
64 | |||
65 | mlog(ML_NOTICE, "lockres: %.*s, owner=%u, state=%u\n", | ||
66 | res->lockname.len, res->lockname.name, | ||
67 | res->owner, res->state); | ||
68 | mlog(ML_NOTICE, " last used: %lu, on purge list: %s\n", | ||
69 | res->last_used, list_empty(&res->purge) ? "no" : "yes"); | ||
70 | mlog(ML_NOTICE, " granted queue: \n"); | ||
71 | list_for_each(iter2, &res->granted) { | ||
72 | lock = list_entry(iter2, struct dlm_lock, list); | ||
73 | spin_lock(&lock->spinlock); | ||
74 | mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, " | ||
75 | "cookie=%"MLFu64", ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n", | ||
76 | lock->ml.type, lock->ml.convert_type, lock->ml.node, lock->ml.cookie, | ||
77 | list_empty(&lock->ast_list) ? 'y' : 'n', | ||
78 | lock->ast_pending ? 'y' : 'n', | ||
79 | list_empty(&lock->bast_list) ? 'y' : 'n', | ||
80 | lock->bast_pending ? 'y' : 'n'); | ||
81 | spin_unlock(&lock->spinlock); | ||
82 | } | ||
83 | mlog(ML_NOTICE, " converting queue: \n"); | ||
84 | list_for_each(iter2, &res->converting) { | ||
85 | lock = list_entry(iter2, struct dlm_lock, list); | ||
86 | spin_lock(&lock->spinlock); | ||
87 | mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, " | ||
88 | "cookie=%"MLFu64", ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n", | ||
89 | lock->ml.type, lock->ml.convert_type, lock->ml.node, lock->ml.cookie, | ||
90 | list_empty(&lock->ast_list) ? 'y' : 'n', | ||
91 | lock->ast_pending ? 'y' : 'n', | ||
92 | list_empty(&lock->bast_list) ? 'y' : 'n', | ||
93 | lock->bast_pending ? 'y' : 'n'); | ||
94 | spin_unlock(&lock->spinlock); | ||
95 | } | ||
96 | mlog(ML_NOTICE, " blocked queue: \n"); | ||
97 | list_for_each(iter2, &res->blocked) { | ||
98 | lock = list_entry(iter2, struct dlm_lock, list); | ||
99 | spin_lock(&lock->spinlock); | ||
100 | mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, " | ||
101 | "cookie=%"MLFu64", ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n", | ||
102 | lock->ml.type, lock->ml.convert_type, lock->ml.node, lock->ml.cookie, | ||
103 | list_empty(&lock->ast_list) ? 'y' : 'n', | ||
104 | lock->ast_pending ? 'y' : 'n', | ||
105 | list_empty(&lock->bast_list) ? 'y' : 'n', | ||
106 | lock->bast_pending ? 'y' : 'n'); | ||
107 | spin_unlock(&lock->spinlock); | ||
108 | } | ||
109 | } | ||
110 | |||
111 | void dlm_print_one_lock(struct dlm_lock *lockid) | ||
112 | { | ||
113 | dlm_print_one_lock_resource(lockid->lockres); | ||
114 | } | ||
115 | EXPORT_SYMBOL_GPL(dlm_print_one_lock); | ||
116 | |||
117 | void dlm_dump_lock_resources(struct dlm_ctxt *dlm) | ||
118 | { | ||
119 | struct dlm_lock_resource *res; | ||
120 | struct list_head *iter; | ||
121 | struct list_head *bucket; | ||
122 | int i; | ||
123 | |||
124 | mlog(ML_NOTICE, "struct dlm_ctxt: %s, node=%u, key=%u\n", | ||
125 | dlm->name, dlm->node_num, dlm->key); | ||
126 | if (!dlm || !dlm->name) { | ||
127 | mlog(ML_ERROR, "dlm=%p\n", dlm); | ||
128 | return; | ||
129 | } | ||
130 | |||
131 | spin_lock(&dlm->spinlock); | ||
132 | for (i=0; i<DLM_HASH_SIZE; i++) { | ||
133 | bucket = &(dlm->resources[i]); | ||
134 | list_for_each(iter, bucket) { | ||
135 | res = list_entry(iter, struct dlm_lock_resource, list); | ||
136 | dlm_print_one_lock_resource(res); | ||
137 | } | ||
138 | } | ||
139 | spin_unlock(&dlm->spinlock); | ||
140 | } | ||
141 | |||
142 | static const char *dlm_errnames[] = { | ||
143 | [DLM_NORMAL] = "DLM_NORMAL", | ||
144 | [DLM_GRANTED] = "DLM_GRANTED", | ||
145 | [DLM_DENIED] = "DLM_DENIED", | ||
146 | [DLM_DENIED_NOLOCKS] = "DLM_DENIED_NOLOCKS", | ||
147 | [DLM_WORKING] = "DLM_WORKING", | ||
148 | [DLM_BLOCKED] = "DLM_BLOCKED", | ||
149 | [DLM_BLOCKED_ORPHAN] = "DLM_BLOCKED_ORPHAN", | ||
150 | [DLM_DENIED_GRACE_PERIOD] = "DLM_DENIED_GRACE_PERIOD", | ||
151 | [DLM_SYSERR] = "DLM_SYSERR", | ||
152 | [DLM_NOSUPPORT] = "DLM_NOSUPPORT", | ||
153 | [DLM_CANCELGRANT] = "DLM_CANCELGRANT", | ||
154 | [DLM_IVLOCKID] = "DLM_IVLOCKID", | ||
155 | [DLM_SYNC] = "DLM_SYNC", | ||
156 | [DLM_BADTYPE] = "DLM_BADTYPE", | ||
157 | [DLM_BADRESOURCE] = "DLM_BADRESOURCE", | ||
158 | [DLM_MAXHANDLES] = "DLM_MAXHANDLES", | ||
159 | [DLM_NOCLINFO] = "DLM_NOCLINFO", | ||
160 | [DLM_NOLOCKMGR] = "DLM_NOLOCKMGR", | ||
161 | [DLM_NOPURGED] = "DLM_NOPURGED", | ||
162 | [DLM_BADARGS] = "DLM_BADARGS", | ||
163 | [DLM_VOID] = "DLM_VOID", | ||
164 | [DLM_NOTQUEUED] = "DLM_NOTQUEUED", | ||
165 | [DLM_IVBUFLEN] = "DLM_IVBUFLEN", | ||
166 | [DLM_CVTUNGRANT] = "DLM_CVTUNGRANT", | ||
167 | [DLM_BADPARAM] = "DLM_BADPARAM", | ||
168 | [DLM_VALNOTVALID] = "DLM_VALNOTVALID", | ||
169 | [DLM_REJECTED] = "DLM_REJECTED", | ||
170 | [DLM_ABORT] = "DLM_ABORT", | ||
171 | [DLM_CANCEL] = "DLM_CANCEL", | ||
172 | [DLM_IVRESHANDLE] = "DLM_IVRESHANDLE", | ||
173 | [DLM_DEADLOCK] = "DLM_DEADLOCK", | ||
174 | [DLM_DENIED_NOASTS] = "DLM_DENIED_NOASTS", | ||
175 | [DLM_FORWARD] = "DLM_FORWARD", | ||
176 | [DLM_TIMEOUT] = "DLM_TIMEOUT", | ||
177 | [DLM_IVGROUPID] = "DLM_IVGROUPID", | ||
178 | [DLM_VERS_CONFLICT] = "DLM_VERS_CONFLICT", | ||
179 | [DLM_BAD_DEVICE_PATH] = "DLM_BAD_DEVICE_PATH", | ||
180 | [DLM_NO_DEVICE_PERMISSION] = "DLM_NO_DEVICE_PERMISSION", | ||
181 | [DLM_NO_CONTROL_DEVICE ] = "DLM_NO_CONTROL_DEVICE ", | ||
182 | [DLM_RECOVERING] = "DLM_RECOVERING", | ||
183 | [DLM_MIGRATING] = "DLM_MIGRATING", | ||
184 | [DLM_MAXSTATS] = "DLM_MAXSTATS", | ||
185 | }; | ||
186 | |||
187 | static const char *dlm_errmsgs[] = { | ||
188 | [DLM_NORMAL] = "request in progress", | ||
189 | [DLM_GRANTED] = "request granted", | ||
190 | [DLM_DENIED] = "request denied", | ||
191 | [DLM_DENIED_NOLOCKS] = "request denied, out of system resources", | ||
192 | [DLM_WORKING] = "async request in progress", | ||
193 | [DLM_BLOCKED] = "lock request blocked", | ||
194 | [DLM_BLOCKED_ORPHAN] = "lock request blocked by a orphan lock", | ||
195 | [DLM_DENIED_GRACE_PERIOD] = "topological change in progress", | ||
196 | [DLM_SYSERR] = "system error", | ||
197 | [DLM_NOSUPPORT] = "unsupported", | ||
198 | [DLM_CANCELGRANT] = "can't cancel convert: already granted", | ||
199 | [DLM_IVLOCKID] = "bad lockid", | ||
200 | [DLM_SYNC] = "synchronous request granted", | ||
201 | [DLM_BADTYPE] = "bad resource type", | ||
202 | [DLM_BADRESOURCE] = "bad resource handle", | ||
203 | [DLM_MAXHANDLES] = "no more resource handles", | ||
204 | [DLM_NOCLINFO] = "can't contact cluster manager", | ||
205 | [DLM_NOLOCKMGR] = "can't contact lock manager", | ||
206 | [DLM_NOPURGED] = "can't contact purge daemon", | ||
207 | [DLM_BADARGS] = "bad api args", | ||
208 | [DLM_VOID] = "no status", | ||
209 | [DLM_NOTQUEUED] = "NOQUEUE was specified and request failed", | ||
210 | [DLM_IVBUFLEN] = "invalid resource name length", | ||
211 | [DLM_CVTUNGRANT] = "attempted to convert ungranted lock", | ||
212 | [DLM_BADPARAM] = "invalid lock mode specified", | ||
213 | [DLM_VALNOTVALID] = "value block has been invalidated", | ||
214 | [DLM_REJECTED] = "request rejected, unrecognized client", | ||
215 | [DLM_ABORT] = "blocked lock request cancelled", | ||
216 | [DLM_CANCEL] = "conversion request cancelled", | ||
217 | [DLM_IVRESHANDLE] = "invalid resource handle", | ||
218 | [DLM_DEADLOCK] = "deadlock recovery refused this request", | ||
219 | [DLM_DENIED_NOASTS] = "failed to allocate AST", | ||
220 | [DLM_FORWARD] = "request must wait for primary's response", | ||
221 | [DLM_TIMEOUT] = "timeout value for lock has expired", | ||
222 | [DLM_IVGROUPID] = "invalid group specification", | ||
223 | [DLM_VERS_CONFLICT] = "version conflicts prevent request handling", | ||
224 | [DLM_BAD_DEVICE_PATH] = "Locks device does not exist or path wrong", | ||
225 | [DLM_NO_DEVICE_PERMISSION] = "Client has insufficient perms for device", | ||
226 | [DLM_NO_CONTROL_DEVICE] = "Cannot set options on opened device ", | ||
227 | [DLM_RECOVERING] = "lock resource being recovered", | ||
228 | [DLM_MIGRATING] = "lock resource being migrated", | ||
229 | [DLM_MAXSTATS] = "invalid error number", | ||
230 | }; | ||
231 | |||
232 | const char *dlm_errmsg(enum dlm_status err) | ||
233 | { | ||
234 | if (err >= DLM_MAXSTATS || err < 0) | ||
235 | return dlm_errmsgs[DLM_MAXSTATS]; | ||
236 | return dlm_errmsgs[err]; | ||
237 | } | ||
238 | EXPORT_SYMBOL_GPL(dlm_errmsg); | ||
239 | |||
240 | const char *dlm_errname(enum dlm_status err) | ||
241 | { | ||
242 | if (err >= DLM_MAXSTATS || err < 0) | ||
243 | return dlm_errnames[DLM_MAXSTATS]; | ||
244 | return dlm_errnames[err]; | ||
245 | } | ||
246 | EXPORT_SYMBOL_GPL(dlm_errname); | ||
diff --git a/fs/ocfs2/dlm/dlmdebug.h b/fs/ocfs2/dlm/dlmdebug.h new file mode 100644 index 000000000000..6858510c3ccd --- /dev/null +++ b/fs/ocfs2/dlm/dlmdebug.h | |||
@@ -0,0 +1,30 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmdebug.h | ||
5 | * | ||
6 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public | ||
10 | * License as published by the Free Software Foundation; either | ||
11 | * version 2 of the License, or (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public | ||
19 | * License along with this program; if not, write to the | ||
20 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
21 | * Boston, MA 021110-1307, USA. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #ifndef DLMDEBUG_H | ||
26 | #define DLMDEBUG_H | ||
27 | |||
28 | void dlm_dump_lock_resources(struct dlm_ctxt *dlm); | ||
29 | |||
30 | #endif | ||
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c new file mode 100644 index 000000000000..da3c22045f89 --- /dev/null +++ b/fs/ocfs2/dlm/dlmdomain.c | |||
@@ -0,0 +1,1469 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmdomain.c | ||
5 | * | ||
6 | * defines domain join / leave apis | ||
7 | * | ||
8 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include <linux/module.h> | ||
28 | #include <linux/types.h> | ||
29 | #include <linux/slab.h> | ||
30 | #include <linux/highmem.h> | ||
31 | #include <linux/utsname.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/spinlock.h> | ||
34 | #include <linux/delay.h> | ||
35 | #include <linux/err.h> | ||
36 | |||
37 | #include "cluster/heartbeat.h" | ||
38 | #include "cluster/nodemanager.h" | ||
39 | #include "cluster/tcp.h" | ||
40 | |||
41 | #include "dlmapi.h" | ||
42 | #include "dlmcommon.h" | ||
43 | |||
44 | #include "dlmdebug.h" | ||
45 | #include "dlmdomain.h" | ||
46 | |||
47 | #include "dlmver.h" | ||
48 | |||
49 | #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN) | ||
50 | #include "cluster/masklog.h" | ||
51 | |||
52 | /* | ||
53 | * | ||
54 | * spinlock lock ordering: if multiple locks are needed, obey this ordering: | ||
55 | * dlm_domain_lock | ||
56 | * struct dlm_ctxt->spinlock | ||
57 | * struct dlm_lock_resource->spinlock | ||
58 | * struct dlm_ctxt->master_lock | ||
59 | * struct dlm_ctxt->ast_lock | ||
60 | * dlm_master_list_entry->spinlock | ||
61 | * dlm_lock->spinlock | ||
62 | * | ||
63 | */ | ||
64 | |||
65 | spinlock_t dlm_domain_lock = SPIN_LOCK_UNLOCKED; | ||
66 | LIST_HEAD(dlm_domains); | ||
67 | static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events); | ||
68 | |||
69 | #define DLM_DOMAIN_BACKOFF_MS 200 | ||
70 | |||
71 | static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data); | ||
72 | static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data); | ||
73 | static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data); | ||
74 | static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data); | ||
75 | |||
76 | static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm); | ||
77 | |||
78 | void __dlm_unhash_lockres(struct dlm_lock_resource *lockres) | ||
79 | { | ||
80 | list_del_init(&lockres->list); | ||
81 | dlm_lockres_put(lockres); | ||
82 | } | ||
83 | |||
84 | void __dlm_insert_lockres(struct dlm_ctxt *dlm, | ||
85 | struct dlm_lock_resource *res) | ||
86 | { | ||
87 | struct list_head *bucket; | ||
88 | struct qstr *q; | ||
89 | |||
90 | assert_spin_locked(&dlm->spinlock); | ||
91 | |||
92 | q = &res->lockname; | ||
93 | q->hash = full_name_hash(q->name, q->len); | ||
94 | bucket = &(dlm->resources[q->hash & DLM_HASH_MASK]); | ||
95 | |||
96 | /* get a reference for our hashtable */ | ||
97 | dlm_lockres_get(res); | ||
98 | |||
99 | list_add_tail(&res->list, bucket); | ||
100 | } | ||
101 | |||
102 | struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, | ||
103 | const char *name, | ||
104 | unsigned int len) | ||
105 | { | ||
106 | unsigned int hash; | ||
107 | struct list_head *iter; | ||
108 | struct dlm_lock_resource *tmpres=NULL; | ||
109 | struct list_head *bucket; | ||
110 | |||
111 | mlog_entry("%.*s\n", len, name); | ||
112 | |||
113 | assert_spin_locked(&dlm->spinlock); | ||
114 | |||
115 | hash = full_name_hash(name, len); | ||
116 | |||
117 | bucket = &(dlm->resources[hash & DLM_HASH_MASK]); | ||
118 | |||
119 | /* check for pre-existing lock */ | ||
120 | list_for_each(iter, bucket) { | ||
121 | tmpres = list_entry(iter, struct dlm_lock_resource, list); | ||
122 | if (tmpres->lockname.len == len && | ||
123 | memcmp(tmpres->lockname.name, name, len) == 0) { | ||
124 | dlm_lockres_get(tmpres); | ||
125 | break; | ||
126 | } | ||
127 | |||
128 | tmpres = NULL; | ||
129 | } | ||
130 | return tmpres; | ||
131 | } | ||
132 | |||
133 | struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm, | ||
134 | const char *name, | ||
135 | unsigned int len) | ||
136 | { | ||
137 | struct dlm_lock_resource *res; | ||
138 | |||
139 | spin_lock(&dlm->spinlock); | ||
140 | res = __dlm_lookup_lockres(dlm, name, len); | ||
141 | spin_unlock(&dlm->spinlock); | ||
142 | return res; | ||
143 | } | ||
144 | |||
145 | static struct dlm_ctxt * __dlm_lookup_domain_full(const char *domain, int len) | ||
146 | { | ||
147 | struct dlm_ctxt *tmp = NULL; | ||
148 | struct list_head *iter; | ||
149 | |||
150 | assert_spin_locked(&dlm_domain_lock); | ||
151 | |||
152 | /* tmp->name here is always NULL terminated, | ||
153 | * but domain may not be! */ | ||
154 | list_for_each(iter, &dlm_domains) { | ||
155 | tmp = list_entry (iter, struct dlm_ctxt, list); | ||
156 | if (strlen(tmp->name) == len && | ||
157 | memcmp(tmp->name, domain, len)==0) | ||
158 | break; | ||
159 | tmp = NULL; | ||
160 | } | ||
161 | |||
162 | return tmp; | ||
163 | } | ||
164 | |||
165 | /* For null terminated domain strings ONLY */ | ||
166 | static struct dlm_ctxt * __dlm_lookup_domain(const char *domain) | ||
167 | { | ||
168 | assert_spin_locked(&dlm_domain_lock); | ||
169 | |||
170 | return __dlm_lookup_domain_full(domain, strlen(domain)); | ||
171 | } | ||
172 | |||
173 | |||
174 | /* returns true on one of two conditions: | ||
175 | * 1) the domain does not exist | ||
176 | * 2) the domain exists and it's state is "joined" */ | ||
177 | static int dlm_wait_on_domain_helper(const char *domain) | ||
178 | { | ||
179 | int ret = 0; | ||
180 | struct dlm_ctxt *tmp = NULL; | ||
181 | |||
182 | spin_lock(&dlm_domain_lock); | ||
183 | |||
184 | tmp = __dlm_lookup_domain(domain); | ||
185 | if (!tmp) | ||
186 | ret = 1; | ||
187 | else if (tmp->dlm_state == DLM_CTXT_JOINED) | ||
188 | ret = 1; | ||
189 | |||
190 | spin_unlock(&dlm_domain_lock); | ||
191 | return ret; | ||
192 | } | ||
193 | |||
194 | static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm) | ||
195 | { | ||
196 | if (dlm->resources) | ||
197 | free_page((unsigned long) dlm->resources); | ||
198 | |||
199 | if (dlm->name) | ||
200 | kfree(dlm->name); | ||
201 | |||
202 | kfree(dlm); | ||
203 | } | ||
204 | |||
205 | /* A little strange - this function will be called while holding | ||
206 | * dlm_domain_lock and is expected to be holding it on the way out. We | ||
207 | * will however drop and reacquire it multiple times */ | ||
208 | static void dlm_ctxt_release(struct kref *kref) | ||
209 | { | ||
210 | struct dlm_ctxt *dlm; | ||
211 | |||
212 | dlm = container_of(kref, struct dlm_ctxt, dlm_refs); | ||
213 | |||
214 | BUG_ON(dlm->num_joins); | ||
215 | BUG_ON(dlm->dlm_state == DLM_CTXT_JOINED); | ||
216 | |||
217 | /* we may still be in the list if we hit an error during join. */ | ||
218 | list_del_init(&dlm->list); | ||
219 | |||
220 | spin_unlock(&dlm_domain_lock); | ||
221 | |||
222 | mlog(0, "freeing memory from domain %s\n", dlm->name); | ||
223 | |||
224 | wake_up(&dlm_domain_events); | ||
225 | |||
226 | dlm_free_ctxt_mem(dlm); | ||
227 | |||
228 | spin_lock(&dlm_domain_lock); | ||
229 | } | ||
230 | |||
231 | void dlm_put(struct dlm_ctxt *dlm) | ||
232 | { | ||
233 | spin_lock(&dlm_domain_lock); | ||
234 | kref_put(&dlm->dlm_refs, dlm_ctxt_release); | ||
235 | spin_unlock(&dlm_domain_lock); | ||
236 | } | ||
237 | |||
238 | static void __dlm_get(struct dlm_ctxt *dlm) | ||
239 | { | ||
240 | kref_get(&dlm->dlm_refs); | ||
241 | } | ||
242 | |||
243 | /* given a questionable reference to a dlm object, gets a reference if | ||
244 | * it can find it in the list, otherwise returns NULL in which case | ||
245 | * you shouldn't trust your pointer. */ | ||
246 | struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm) | ||
247 | { | ||
248 | struct list_head *iter; | ||
249 | struct dlm_ctxt *target = NULL; | ||
250 | |||
251 | spin_lock(&dlm_domain_lock); | ||
252 | |||
253 | list_for_each(iter, &dlm_domains) { | ||
254 | target = list_entry (iter, struct dlm_ctxt, list); | ||
255 | |||
256 | if (target == dlm) { | ||
257 | __dlm_get(target); | ||
258 | break; | ||
259 | } | ||
260 | |||
261 | target = NULL; | ||
262 | } | ||
263 | |||
264 | spin_unlock(&dlm_domain_lock); | ||
265 | |||
266 | return target; | ||
267 | } | ||
268 | |||
269 | int dlm_domain_fully_joined(struct dlm_ctxt *dlm) | ||
270 | { | ||
271 | int ret; | ||
272 | |||
273 | spin_lock(&dlm_domain_lock); | ||
274 | ret = (dlm->dlm_state == DLM_CTXT_JOINED) || | ||
275 | (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN); | ||
276 | spin_unlock(&dlm_domain_lock); | ||
277 | |||
278 | return ret; | ||
279 | } | ||
280 | |||
281 | static void dlm_complete_dlm_shutdown(struct dlm_ctxt *dlm) | ||
282 | { | ||
283 | dlm_unregister_domain_handlers(dlm); | ||
284 | dlm_complete_thread(dlm); | ||
285 | dlm_complete_recovery_thread(dlm); | ||
286 | |||
287 | /* We've left the domain. Now we can take ourselves out of the | ||
288 | * list and allow the kref stuff to help us free the | ||
289 | * memory. */ | ||
290 | spin_lock(&dlm_domain_lock); | ||
291 | list_del_init(&dlm->list); | ||
292 | spin_unlock(&dlm_domain_lock); | ||
293 | |||
294 | /* Wake up anyone waiting for us to remove this domain */ | ||
295 | wake_up(&dlm_domain_events); | ||
296 | } | ||
297 | |||
298 | static void dlm_migrate_all_locks(struct dlm_ctxt *dlm) | ||
299 | { | ||
300 | int i; | ||
301 | struct dlm_lock_resource *res; | ||
302 | |||
303 | mlog(0, "Migrating locks from domain %s\n", dlm->name); | ||
304 | restart: | ||
305 | spin_lock(&dlm->spinlock); | ||
306 | for (i=0; i<DLM_HASH_SIZE; i++) { | ||
307 | while (!list_empty(&dlm->resources[i])) { | ||
308 | res = list_entry(dlm->resources[i].next, | ||
309 | struct dlm_lock_resource, list); | ||
310 | /* need reference when manually grabbing lockres */ | ||
311 | dlm_lockres_get(res); | ||
312 | /* this should unhash the lockres | ||
313 | * and exit with dlm->spinlock */ | ||
314 | mlog(0, "purging res=%p\n", res); | ||
315 | if (dlm_lockres_is_dirty(dlm, res)) { | ||
316 | /* HACK! this should absolutely go. | ||
317 | * need to figure out why some empty | ||
318 | * lockreses are still marked dirty */ | ||
319 | mlog(ML_ERROR, "lockres %.*s dirty!\n", | ||
320 | res->lockname.len, res->lockname.name); | ||
321 | |||
322 | spin_unlock(&dlm->spinlock); | ||
323 | dlm_kick_thread(dlm, res); | ||
324 | wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res)); | ||
325 | dlm_lockres_put(res); | ||
326 | goto restart; | ||
327 | } | ||
328 | dlm_purge_lockres(dlm, res); | ||
329 | dlm_lockres_put(res); | ||
330 | } | ||
331 | } | ||
332 | spin_unlock(&dlm->spinlock); | ||
333 | |||
334 | mlog(0, "DONE Migrating locks from domain %s\n", dlm->name); | ||
335 | } | ||
336 | |||
337 | static int dlm_no_joining_node(struct dlm_ctxt *dlm) | ||
338 | { | ||
339 | int ret; | ||
340 | |||
341 | spin_lock(&dlm->spinlock); | ||
342 | ret = dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN; | ||
343 | spin_unlock(&dlm->spinlock); | ||
344 | |||
345 | return ret; | ||
346 | } | ||
347 | |||
348 | static void dlm_mark_domain_leaving(struct dlm_ctxt *dlm) | ||
349 | { | ||
350 | /* Yikes, a double spinlock! I need domain_lock for the dlm | ||
351 | * state and the dlm spinlock for join state... Sorry! */ | ||
352 | again: | ||
353 | spin_lock(&dlm_domain_lock); | ||
354 | spin_lock(&dlm->spinlock); | ||
355 | |||
356 | if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
357 | mlog(0, "Node %d is joining, we wait on it.\n", | ||
358 | dlm->joining_node); | ||
359 | spin_unlock(&dlm->spinlock); | ||
360 | spin_unlock(&dlm_domain_lock); | ||
361 | |||
362 | wait_event(dlm->dlm_join_events, dlm_no_joining_node(dlm)); | ||
363 | goto again; | ||
364 | } | ||
365 | |||
366 | dlm->dlm_state = DLM_CTXT_LEAVING; | ||
367 | spin_unlock(&dlm->spinlock); | ||
368 | spin_unlock(&dlm_domain_lock); | ||
369 | } | ||
370 | |||
371 | static void __dlm_print_nodes(struct dlm_ctxt *dlm) | ||
372 | { | ||
373 | int node = -1; | ||
374 | |||
375 | assert_spin_locked(&dlm->spinlock); | ||
376 | |||
377 | mlog(ML_NOTICE, "Nodes in my domain (\"%s\"):\n", dlm->name); | ||
378 | |||
379 | while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, | ||
380 | node + 1)) < O2NM_MAX_NODES) { | ||
381 | mlog(ML_NOTICE, " node %d\n", node); | ||
382 | } | ||
383 | } | ||
384 | |||
385 | static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data) | ||
386 | { | ||
387 | struct dlm_ctxt *dlm = data; | ||
388 | unsigned int node; | ||
389 | struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf; | ||
390 | |||
391 | mlog_entry("%p %u %p", msg, len, data); | ||
392 | |||
393 | if (!dlm_grab(dlm)) | ||
394 | return 0; | ||
395 | |||
396 | node = exit_msg->node_idx; | ||
397 | |||
398 | mlog(0, "Node %u leaves domain %s\n", node, dlm->name); | ||
399 | |||
400 | spin_lock(&dlm->spinlock); | ||
401 | clear_bit(node, dlm->domain_map); | ||
402 | __dlm_print_nodes(dlm); | ||
403 | |||
404 | /* notify anything attached to the heartbeat events */ | ||
405 | dlm_hb_event_notify_attached(dlm, node, 0); | ||
406 | |||
407 | spin_unlock(&dlm->spinlock); | ||
408 | |||
409 | dlm_put(dlm); | ||
410 | |||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | static int dlm_send_one_domain_exit(struct dlm_ctxt *dlm, | ||
415 | unsigned int node) | ||
416 | { | ||
417 | int status; | ||
418 | struct dlm_exit_domain leave_msg; | ||
419 | |||
420 | mlog(0, "Asking node %u if we can leave the domain %s me = %u\n", | ||
421 | node, dlm->name, dlm->node_num); | ||
422 | |||
423 | memset(&leave_msg, 0, sizeof(leave_msg)); | ||
424 | leave_msg.node_idx = dlm->node_num; | ||
425 | |||
426 | status = o2net_send_message(DLM_EXIT_DOMAIN_MSG, dlm->key, | ||
427 | &leave_msg, sizeof(leave_msg), node, | ||
428 | NULL); | ||
429 | |||
430 | mlog(0, "status return %d from o2net_send_message\n", status); | ||
431 | |||
432 | return status; | ||
433 | } | ||
434 | |||
435 | |||
436 | static void dlm_leave_domain(struct dlm_ctxt *dlm) | ||
437 | { | ||
438 | int node, clear_node, status; | ||
439 | |||
440 | /* At this point we've migrated away all our locks and won't | ||
441 | * accept mastership of new ones. The dlm is responsible for | ||
442 | * almost nothing now. We make sure not to confuse any joining | ||
443 | * nodes and then commence shutdown procedure. */ | ||
444 | |||
445 | spin_lock(&dlm->spinlock); | ||
446 | /* Clear ourselves from the domain map */ | ||
447 | clear_bit(dlm->node_num, dlm->domain_map); | ||
448 | while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, | ||
449 | 0)) < O2NM_MAX_NODES) { | ||
450 | /* Drop the dlm spinlock. This is safe wrt the domain_map. | ||
451 | * -nodes cannot be added now as the | ||
452 | * query_join_handlers knows to respond with OK_NO_MAP | ||
453 | * -we catch the right network errors if a node is | ||
454 | * removed from the map while we're sending him the | ||
455 | * exit message. */ | ||
456 | spin_unlock(&dlm->spinlock); | ||
457 | |||
458 | clear_node = 1; | ||
459 | |||
460 | status = dlm_send_one_domain_exit(dlm, node); | ||
461 | if (status < 0 && | ||
462 | status != -ENOPROTOOPT && | ||
463 | status != -ENOTCONN) { | ||
464 | mlog(ML_NOTICE, "Error %d sending domain exit message " | ||
465 | "to node %d\n", status, node); | ||
466 | |||
467 | /* Not sure what to do here but lets sleep for | ||
468 | * a bit in case this was a transient | ||
469 | * error... */ | ||
470 | msleep(DLM_DOMAIN_BACKOFF_MS); | ||
471 | clear_node = 0; | ||
472 | } | ||
473 | |||
474 | spin_lock(&dlm->spinlock); | ||
475 | /* If we're not clearing the node bit then we intend | ||
476 | * to loop back around to try again. */ | ||
477 | if (clear_node) | ||
478 | clear_bit(node, dlm->domain_map); | ||
479 | } | ||
480 | spin_unlock(&dlm->spinlock); | ||
481 | } | ||
482 | |||
483 | int dlm_joined(struct dlm_ctxt *dlm) | ||
484 | { | ||
485 | int ret = 0; | ||
486 | |||
487 | spin_lock(&dlm_domain_lock); | ||
488 | |||
489 | if (dlm->dlm_state == DLM_CTXT_JOINED) | ||
490 | ret = 1; | ||
491 | |||
492 | spin_unlock(&dlm_domain_lock); | ||
493 | |||
494 | return ret; | ||
495 | } | ||
496 | |||
497 | int dlm_shutting_down(struct dlm_ctxt *dlm) | ||
498 | { | ||
499 | int ret = 0; | ||
500 | |||
501 | spin_lock(&dlm_domain_lock); | ||
502 | |||
503 | if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN) | ||
504 | ret = 1; | ||
505 | |||
506 | spin_unlock(&dlm_domain_lock); | ||
507 | |||
508 | return ret; | ||
509 | } | ||
510 | |||
511 | void dlm_unregister_domain(struct dlm_ctxt *dlm) | ||
512 | { | ||
513 | int leave = 0; | ||
514 | |||
515 | spin_lock(&dlm_domain_lock); | ||
516 | BUG_ON(dlm->dlm_state != DLM_CTXT_JOINED); | ||
517 | BUG_ON(!dlm->num_joins); | ||
518 | |||
519 | dlm->num_joins--; | ||
520 | if (!dlm->num_joins) { | ||
521 | /* We mark it "in shutdown" now so new register | ||
522 | * requests wait until we've completely left the | ||
523 | * domain. Don't use DLM_CTXT_LEAVING yet as we still | ||
524 | * want new domain joins to communicate with us at | ||
525 | * least until we've completed migration of our | ||
526 | * resources. */ | ||
527 | dlm->dlm_state = DLM_CTXT_IN_SHUTDOWN; | ||
528 | leave = 1; | ||
529 | } | ||
530 | spin_unlock(&dlm_domain_lock); | ||
531 | |||
532 | if (leave) { | ||
533 | mlog(0, "shutting down domain %s\n", dlm->name); | ||
534 | |||
535 | /* We changed dlm state, notify the thread */ | ||
536 | dlm_kick_thread(dlm, NULL); | ||
537 | |||
538 | dlm_migrate_all_locks(dlm); | ||
539 | dlm_mark_domain_leaving(dlm); | ||
540 | dlm_leave_domain(dlm); | ||
541 | dlm_complete_dlm_shutdown(dlm); | ||
542 | } | ||
543 | dlm_put(dlm); | ||
544 | } | ||
545 | EXPORT_SYMBOL_GPL(dlm_unregister_domain); | ||
546 | |||
547 | static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data) | ||
548 | { | ||
549 | struct dlm_query_join_request *query; | ||
550 | enum dlm_query_join_response response; | ||
551 | struct dlm_ctxt *dlm = NULL; | ||
552 | |||
553 | query = (struct dlm_query_join_request *) msg->buf; | ||
554 | |||
555 | mlog(0, "node %u wants to join domain %s\n", query->node_idx, | ||
556 | query->domain); | ||
557 | |||
558 | /* | ||
559 | * If heartbeat doesn't consider the node live, tell it | ||
560 | * to back off and try again. This gives heartbeat a chance | ||
561 | * to catch up. | ||
562 | */ | ||
563 | if (!o2hb_check_node_heartbeating(query->node_idx)) { | ||
564 | mlog(0, "node %u is not in our live map yet\n", | ||
565 | query->node_idx); | ||
566 | |||
567 | response = JOIN_DISALLOW; | ||
568 | goto respond; | ||
569 | } | ||
570 | |||
571 | response = JOIN_OK_NO_MAP; | ||
572 | |||
573 | spin_lock(&dlm_domain_lock); | ||
574 | dlm = __dlm_lookup_domain_full(query->domain, query->name_len); | ||
575 | /* Once the dlm ctxt is marked as leaving then we don't want | ||
576 | * to be put in someone's domain map. */ | ||
577 | if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) { | ||
578 | spin_lock(&dlm->spinlock); | ||
579 | |||
580 | if (dlm->dlm_state == DLM_CTXT_NEW && | ||
581 | dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
582 | /*If this is a brand new context and we | ||
583 | * haven't started our join process yet, then | ||
584 | * the other node won the race. */ | ||
585 | response = JOIN_OK_NO_MAP; | ||
586 | } else if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
587 | /* Disallow parallel joins. */ | ||
588 | response = JOIN_DISALLOW; | ||
589 | } else { | ||
590 | /* Alright we're fully a part of this domain | ||
591 | * so we keep some state as to who's joining | ||
592 | * and indicate to him that needs to be fixed | ||
593 | * up. */ | ||
594 | response = JOIN_OK; | ||
595 | __dlm_set_joining_node(dlm, query->node_idx); | ||
596 | } | ||
597 | |||
598 | spin_unlock(&dlm->spinlock); | ||
599 | } | ||
600 | spin_unlock(&dlm_domain_lock); | ||
601 | |||
602 | respond: | ||
603 | mlog(0, "We respond with %u\n", response); | ||
604 | |||
605 | return response; | ||
606 | } | ||
607 | |||
608 | static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data) | ||
609 | { | ||
610 | struct dlm_assert_joined *assert; | ||
611 | struct dlm_ctxt *dlm = NULL; | ||
612 | |||
613 | assert = (struct dlm_assert_joined *) msg->buf; | ||
614 | |||
615 | mlog(0, "node %u asserts join on domain %s\n", assert->node_idx, | ||
616 | assert->domain); | ||
617 | |||
618 | spin_lock(&dlm_domain_lock); | ||
619 | dlm = __dlm_lookup_domain_full(assert->domain, assert->name_len); | ||
620 | /* XXX should we consider no dlm ctxt an error? */ | ||
621 | if (dlm) { | ||
622 | spin_lock(&dlm->spinlock); | ||
623 | |||
624 | /* Alright, this node has officially joined our | ||
625 | * domain. Set him in the map and clean up our | ||
626 | * leftover join state. */ | ||
627 | BUG_ON(dlm->joining_node != assert->node_idx); | ||
628 | set_bit(assert->node_idx, dlm->domain_map); | ||
629 | __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); | ||
630 | |||
631 | __dlm_print_nodes(dlm); | ||
632 | |||
633 | /* notify anything attached to the heartbeat events */ | ||
634 | dlm_hb_event_notify_attached(dlm, assert->node_idx, 1); | ||
635 | |||
636 | spin_unlock(&dlm->spinlock); | ||
637 | } | ||
638 | spin_unlock(&dlm_domain_lock); | ||
639 | |||
640 | return 0; | ||
641 | } | ||
642 | |||
643 | static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data) | ||
644 | { | ||
645 | struct dlm_cancel_join *cancel; | ||
646 | struct dlm_ctxt *dlm = NULL; | ||
647 | |||
648 | cancel = (struct dlm_cancel_join *) msg->buf; | ||
649 | |||
650 | mlog(0, "node %u cancels join on domain %s\n", cancel->node_idx, | ||
651 | cancel->domain); | ||
652 | |||
653 | spin_lock(&dlm_domain_lock); | ||
654 | dlm = __dlm_lookup_domain_full(cancel->domain, cancel->name_len); | ||
655 | |||
656 | if (dlm) { | ||
657 | spin_lock(&dlm->spinlock); | ||
658 | |||
659 | /* Yikes, this guy wants to cancel his join. No | ||
660 | * problem, we simply cleanup our join state. */ | ||
661 | BUG_ON(dlm->joining_node != cancel->node_idx); | ||
662 | __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); | ||
663 | |||
664 | spin_unlock(&dlm->spinlock); | ||
665 | } | ||
666 | spin_unlock(&dlm_domain_lock); | ||
667 | |||
668 | return 0; | ||
669 | } | ||
670 | |||
671 | static int dlm_send_one_join_cancel(struct dlm_ctxt *dlm, | ||
672 | unsigned int node) | ||
673 | { | ||
674 | int status; | ||
675 | struct dlm_cancel_join cancel_msg; | ||
676 | |||
677 | memset(&cancel_msg, 0, sizeof(cancel_msg)); | ||
678 | cancel_msg.node_idx = dlm->node_num; | ||
679 | cancel_msg.name_len = strlen(dlm->name); | ||
680 | memcpy(cancel_msg.domain, dlm->name, cancel_msg.name_len); | ||
681 | |||
682 | status = o2net_send_message(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY, | ||
683 | &cancel_msg, sizeof(cancel_msg), node, | ||
684 | NULL); | ||
685 | if (status < 0) { | ||
686 | mlog_errno(status); | ||
687 | goto bail; | ||
688 | } | ||
689 | |||
690 | bail: | ||
691 | return status; | ||
692 | } | ||
693 | |||
694 | /* map_size should be in bytes. */ | ||
695 | static int dlm_send_join_cancels(struct dlm_ctxt *dlm, | ||
696 | unsigned long *node_map, | ||
697 | unsigned int map_size) | ||
698 | { | ||
699 | int status, tmpstat; | ||
700 | unsigned int node; | ||
701 | |||
702 | if (map_size != (BITS_TO_LONGS(O2NM_MAX_NODES) * | ||
703 | sizeof(unsigned long))) { | ||
704 | mlog(ML_ERROR, | ||
705 | "map_size %u != BITS_TO_LONGS(O2NM_MAX_NODES) %u\n", | ||
706 | map_size, BITS_TO_LONGS(O2NM_MAX_NODES)); | ||
707 | return -EINVAL; | ||
708 | } | ||
709 | |||
710 | status = 0; | ||
711 | node = -1; | ||
712 | while ((node = find_next_bit(node_map, O2NM_MAX_NODES, | ||
713 | node + 1)) < O2NM_MAX_NODES) { | ||
714 | if (node == dlm->node_num) | ||
715 | continue; | ||
716 | |||
717 | tmpstat = dlm_send_one_join_cancel(dlm, node); | ||
718 | if (tmpstat) { | ||
719 | mlog(ML_ERROR, "Error return %d cancelling join on " | ||
720 | "node %d\n", tmpstat, node); | ||
721 | if (!status) | ||
722 | status = tmpstat; | ||
723 | } | ||
724 | } | ||
725 | |||
726 | if (status) | ||
727 | mlog_errno(status); | ||
728 | return status; | ||
729 | } | ||
730 | |||
731 | static int dlm_request_join(struct dlm_ctxt *dlm, | ||
732 | int node, | ||
733 | enum dlm_query_join_response *response) | ||
734 | { | ||
735 | int status, retval; | ||
736 | struct dlm_query_join_request join_msg; | ||
737 | |||
738 | mlog(0, "querying node %d\n", node); | ||
739 | |||
740 | memset(&join_msg, 0, sizeof(join_msg)); | ||
741 | join_msg.node_idx = dlm->node_num; | ||
742 | join_msg.name_len = strlen(dlm->name); | ||
743 | memcpy(join_msg.domain, dlm->name, join_msg.name_len); | ||
744 | |||
745 | status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg, | ||
746 | sizeof(join_msg), node, &retval); | ||
747 | if (status < 0 && status != -ENOPROTOOPT) { | ||
748 | mlog_errno(status); | ||
749 | goto bail; | ||
750 | } | ||
751 | |||
752 | /* -ENOPROTOOPT from the net code means the other side isn't | ||
753 | listening for our message type -- that's fine, it means | ||
754 | his dlm isn't up, so we can consider him a 'yes' but not | ||
755 | joined into the domain. */ | ||
756 | if (status == -ENOPROTOOPT) { | ||
757 | status = 0; | ||
758 | *response = JOIN_OK_NO_MAP; | ||
759 | } else if (retval == JOIN_DISALLOW || | ||
760 | retval == JOIN_OK || | ||
761 | retval == JOIN_OK_NO_MAP) { | ||
762 | *response = retval; | ||
763 | } else { | ||
764 | status = -EINVAL; | ||
765 | mlog(ML_ERROR, "invalid response %d from node %u\n", retval, | ||
766 | node); | ||
767 | } | ||
768 | |||
769 | mlog(0, "status %d, node %d response is %d\n", status, node, | ||
770 | *response); | ||
771 | |||
772 | bail: | ||
773 | return status; | ||
774 | } | ||
775 | |||
776 | static int dlm_send_one_join_assert(struct dlm_ctxt *dlm, | ||
777 | unsigned int node) | ||
778 | { | ||
779 | int status; | ||
780 | struct dlm_assert_joined assert_msg; | ||
781 | |||
782 | mlog(0, "Sending join assert to node %u\n", node); | ||
783 | |||
784 | memset(&assert_msg, 0, sizeof(assert_msg)); | ||
785 | assert_msg.node_idx = dlm->node_num; | ||
786 | assert_msg.name_len = strlen(dlm->name); | ||
787 | memcpy(assert_msg.domain, dlm->name, assert_msg.name_len); | ||
788 | |||
789 | status = o2net_send_message(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY, | ||
790 | &assert_msg, sizeof(assert_msg), node, | ||
791 | NULL); | ||
792 | if (status < 0) | ||
793 | mlog_errno(status); | ||
794 | |||
795 | return status; | ||
796 | } | ||
797 | |||
798 | static void dlm_send_join_asserts(struct dlm_ctxt *dlm, | ||
799 | unsigned long *node_map) | ||
800 | { | ||
801 | int status, node, live; | ||
802 | |||
803 | status = 0; | ||
804 | node = -1; | ||
805 | while ((node = find_next_bit(node_map, O2NM_MAX_NODES, | ||
806 | node + 1)) < O2NM_MAX_NODES) { | ||
807 | if (node == dlm->node_num) | ||
808 | continue; | ||
809 | |||
810 | do { | ||
811 | /* It is very important that this message be | ||
812 | * received so we spin until either the node | ||
813 | * has died or it gets the message. */ | ||
814 | status = dlm_send_one_join_assert(dlm, node); | ||
815 | |||
816 | spin_lock(&dlm->spinlock); | ||
817 | live = test_bit(node, dlm->live_nodes_map); | ||
818 | spin_unlock(&dlm->spinlock); | ||
819 | |||
820 | if (status) { | ||
821 | mlog(ML_ERROR, "Error return %d asserting " | ||
822 | "join on node %d\n", status, node); | ||
823 | |||
824 | /* give us some time between errors... */ | ||
825 | if (live) | ||
826 | msleep(DLM_DOMAIN_BACKOFF_MS); | ||
827 | } | ||
828 | } while (status && live); | ||
829 | } | ||
830 | } | ||
831 | |||
832 | struct domain_join_ctxt { | ||
833 | unsigned long live_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
834 | unsigned long yes_resp_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
835 | }; | ||
836 | |||
837 | static int dlm_should_restart_join(struct dlm_ctxt *dlm, | ||
838 | struct domain_join_ctxt *ctxt, | ||
839 | enum dlm_query_join_response response) | ||
840 | { | ||
841 | int ret; | ||
842 | |||
843 | if (response == JOIN_DISALLOW) { | ||
844 | mlog(0, "Latest response of disallow -- should restart\n"); | ||
845 | return 1; | ||
846 | } | ||
847 | |||
848 | spin_lock(&dlm->spinlock); | ||
849 | /* For now, we restart the process if the node maps have | ||
850 | * changed at all */ | ||
851 | ret = memcmp(ctxt->live_map, dlm->live_nodes_map, | ||
852 | sizeof(dlm->live_nodes_map)); | ||
853 | spin_unlock(&dlm->spinlock); | ||
854 | |||
855 | if (ret) | ||
856 | mlog(0, "Node maps changed -- should restart\n"); | ||
857 | |||
858 | return ret; | ||
859 | } | ||
860 | |||
861 | static int dlm_try_to_join_domain(struct dlm_ctxt *dlm) | ||
862 | { | ||
863 | int status = 0, tmpstat, node; | ||
864 | struct domain_join_ctxt *ctxt; | ||
865 | enum dlm_query_join_response response; | ||
866 | |||
867 | mlog_entry("%p", dlm); | ||
868 | |||
869 | ctxt = kcalloc(1, sizeof(*ctxt), GFP_KERNEL); | ||
870 | if (!ctxt) { | ||
871 | status = -ENOMEM; | ||
872 | mlog_errno(status); | ||
873 | goto bail; | ||
874 | } | ||
875 | |||
876 | /* group sem locking should work for us here -- we're already | ||
877 | * registered for heartbeat events so filling this should be | ||
878 | * atomic wrt getting those handlers called. */ | ||
879 | o2hb_fill_node_map(dlm->live_nodes_map, sizeof(dlm->live_nodes_map)); | ||
880 | |||
881 | spin_lock(&dlm->spinlock); | ||
882 | memcpy(ctxt->live_map, dlm->live_nodes_map, sizeof(ctxt->live_map)); | ||
883 | |||
884 | __dlm_set_joining_node(dlm, dlm->node_num); | ||
885 | |||
886 | spin_unlock(&dlm->spinlock); | ||
887 | |||
888 | node = -1; | ||
889 | while ((node = find_next_bit(ctxt->live_map, O2NM_MAX_NODES, | ||
890 | node + 1)) < O2NM_MAX_NODES) { | ||
891 | if (node == dlm->node_num) | ||
892 | continue; | ||
893 | |||
894 | status = dlm_request_join(dlm, node, &response); | ||
895 | if (status < 0) { | ||
896 | mlog_errno(status); | ||
897 | goto bail; | ||
898 | } | ||
899 | |||
900 | /* Ok, either we got a response or the node doesn't have a | ||
901 | * dlm up. */ | ||
902 | if (response == JOIN_OK) | ||
903 | set_bit(node, ctxt->yes_resp_map); | ||
904 | |||
905 | if (dlm_should_restart_join(dlm, ctxt, response)) { | ||
906 | status = -EAGAIN; | ||
907 | goto bail; | ||
908 | } | ||
909 | } | ||
910 | |||
911 | mlog(0, "Yay, done querying nodes!\n"); | ||
912 | |||
913 | /* Yay, everyone agree's we can join the domain. My domain is | ||
914 | * comprised of all nodes who were put in the | ||
915 | * yes_resp_map. Copy that into our domain map and send a join | ||
916 | * assert message to clean up everyone elses state. */ | ||
917 | spin_lock(&dlm->spinlock); | ||
918 | memcpy(dlm->domain_map, ctxt->yes_resp_map, | ||
919 | sizeof(ctxt->yes_resp_map)); | ||
920 | set_bit(dlm->node_num, dlm->domain_map); | ||
921 | spin_unlock(&dlm->spinlock); | ||
922 | |||
923 | dlm_send_join_asserts(dlm, ctxt->yes_resp_map); | ||
924 | |||
925 | /* Joined state *must* be set before the joining node | ||
926 | * information, otherwise the query_join handler may read no | ||
927 | * current joiner but a state of NEW and tell joining nodes | ||
928 | * we're not in the domain. */ | ||
929 | spin_lock(&dlm_domain_lock); | ||
930 | dlm->dlm_state = DLM_CTXT_JOINED; | ||
931 | dlm->num_joins++; | ||
932 | spin_unlock(&dlm_domain_lock); | ||
933 | |||
934 | bail: | ||
935 | spin_lock(&dlm->spinlock); | ||
936 | __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); | ||
937 | if (!status) | ||
938 | __dlm_print_nodes(dlm); | ||
939 | spin_unlock(&dlm->spinlock); | ||
940 | |||
941 | if (ctxt) { | ||
942 | /* Do we need to send a cancel message to any nodes? */ | ||
943 | if (status < 0) { | ||
944 | tmpstat = dlm_send_join_cancels(dlm, | ||
945 | ctxt->yes_resp_map, | ||
946 | sizeof(ctxt->yes_resp_map)); | ||
947 | if (tmpstat < 0) | ||
948 | mlog_errno(tmpstat); | ||
949 | } | ||
950 | kfree(ctxt); | ||
951 | } | ||
952 | |||
953 | mlog(0, "returning %d\n", status); | ||
954 | return status; | ||
955 | } | ||
956 | |||
957 | static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm) | ||
958 | { | ||
959 | o2hb_unregister_callback(&dlm->dlm_hb_up); | ||
960 | o2hb_unregister_callback(&dlm->dlm_hb_down); | ||
961 | o2net_unregister_handler_list(&dlm->dlm_domain_handlers); | ||
962 | } | ||
963 | |||
964 | static int dlm_register_domain_handlers(struct dlm_ctxt *dlm) | ||
965 | { | ||
966 | int status; | ||
967 | |||
968 | mlog(0, "registering handlers.\n"); | ||
969 | |||
970 | o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB, | ||
971 | dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI); | ||
972 | status = o2hb_register_callback(&dlm->dlm_hb_down); | ||
973 | if (status) | ||
974 | goto bail; | ||
975 | |||
976 | o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB, | ||
977 | dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI); | ||
978 | status = o2hb_register_callback(&dlm->dlm_hb_up); | ||
979 | if (status) | ||
980 | goto bail; | ||
981 | |||
982 | status = o2net_register_handler(DLM_MASTER_REQUEST_MSG, dlm->key, | ||
983 | sizeof(struct dlm_master_request), | ||
984 | dlm_master_request_handler, | ||
985 | dlm, &dlm->dlm_domain_handlers); | ||
986 | if (status) | ||
987 | goto bail; | ||
988 | |||
989 | status = o2net_register_handler(DLM_ASSERT_MASTER_MSG, dlm->key, | ||
990 | sizeof(struct dlm_assert_master), | ||
991 | dlm_assert_master_handler, | ||
992 | dlm, &dlm->dlm_domain_handlers); | ||
993 | if (status) | ||
994 | goto bail; | ||
995 | |||
996 | status = o2net_register_handler(DLM_CREATE_LOCK_MSG, dlm->key, | ||
997 | sizeof(struct dlm_create_lock), | ||
998 | dlm_create_lock_handler, | ||
999 | dlm, &dlm->dlm_domain_handlers); | ||
1000 | if (status) | ||
1001 | goto bail; | ||
1002 | |||
1003 | status = o2net_register_handler(DLM_CONVERT_LOCK_MSG, dlm->key, | ||
1004 | DLM_CONVERT_LOCK_MAX_LEN, | ||
1005 | dlm_convert_lock_handler, | ||
1006 | dlm, &dlm->dlm_domain_handlers); | ||
1007 | if (status) | ||
1008 | goto bail; | ||
1009 | |||
1010 | status = o2net_register_handler(DLM_UNLOCK_LOCK_MSG, dlm->key, | ||
1011 | DLM_UNLOCK_LOCK_MAX_LEN, | ||
1012 | dlm_unlock_lock_handler, | ||
1013 | dlm, &dlm->dlm_domain_handlers); | ||
1014 | if (status) | ||
1015 | goto bail; | ||
1016 | |||
1017 | status = o2net_register_handler(DLM_PROXY_AST_MSG, dlm->key, | ||
1018 | DLM_PROXY_AST_MAX_LEN, | ||
1019 | dlm_proxy_ast_handler, | ||
1020 | dlm, &dlm->dlm_domain_handlers); | ||
1021 | if (status) | ||
1022 | goto bail; | ||
1023 | |||
1024 | status = o2net_register_handler(DLM_EXIT_DOMAIN_MSG, dlm->key, | ||
1025 | sizeof(struct dlm_exit_domain), | ||
1026 | dlm_exit_domain_handler, | ||
1027 | dlm, &dlm->dlm_domain_handlers); | ||
1028 | if (status) | ||
1029 | goto bail; | ||
1030 | |||
1031 | status = o2net_register_handler(DLM_MIGRATE_REQUEST_MSG, dlm->key, | ||
1032 | sizeof(struct dlm_migrate_request), | ||
1033 | dlm_migrate_request_handler, | ||
1034 | dlm, &dlm->dlm_domain_handlers); | ||
1035 | if (status) | ||
1036 | goto bail; | ||
1037 | |||
1038 | status = o2net_register_handler(DLM_MIG_LOCKRES_MSG, dlm->key, | ||
1039 | DLM_MIG_LOCKRES_MAX_LEN, | ||
1040 | dlm_mig_lockres_handler, | ||
1041 | dlm, &dlm->dlm_domain_handlers); | ||
1042 | if (status) | ||
1043 | goto bail; | ||
1044 | |||
1045 | status = o2net_register_handler(DLM_MASTER_REQUERY_MSG, dlm->key, | ||
1046 | sizeof(struct dlm_master_requery), | ||
1047 | dlm_master_requery_handler, | ||
1048 | dlm, &dlm->dlm_domain_handlers); | ||
1049 | if (status) | ||
1050 | goto bail; | ||
1051 | |||
1052 | status = o2net_register_handler(DLM_LOCK_REQUEST_MSG, dlm->key, | ||
1053 | sizeof(struct dlm_lock_request), | ||
1054 | dlm_request_all_locks_handler, | ||
1055 | dlm, &dlm->dlm_domain_handlers); | ||
1056 | if (status) | ||
1057 | goto bail; | ||
1058 | |||
1059 | status = o2net_register_handler(DLM_RECO_DATA_DONE_MSG, dlm->key, | ||
1060 | sizeof(struct dlm_reco_data_done), | ||
1061 | dlm_reco_data_done_handler, | ||
1062 | dlm, &dlm->dlm_domain_handlers); | ||
1063 | if (status) | ||
1064 | goto bail; | ||
1065 | |||
1066 | status = o2net_register_handler(DLM_BEGIN_RECO_MSG, dlm->key, | ||
1067 | sizeof(struct dlm_begin_reco), | ||
1068 | dlm_begin_reco_handler, | ||
1069 | dlm, &dlm->dlm_domain_handlers); | ||
1070 | if (status) | ||
1071 | goto bail; | ||
1072 | |||
1073 | status = o2net_register_handler(DLM_FINALIZE_RECO_MSG, dlm->key, | ||
1074 | sizeof(struct dlm_finalize_reco), | ||
1075 | dlm_finalize_reco_handler, | ||
1076 | dlm, &dlm->dlm_domain_handlers); | ||
1077 | if (status) | ||
1078 | goto bail; | ||
1079 | |||
1080 | bail: | ||
1081 | if (status) | ||
1082 | dlm_unregister_domain_handlers(dlm); | ||
1083 | |||
1084 | return status; | ||
1085 | } | ||
1086 | |||
1087 | static int dlm_join_domain(struct dlm_ctxt *dlm) | ||
1088 | { | ||
1089 | int status; | ||
1090 | |||
1091 | BUG_ON(!dlm); | ||
1092 | |||
1093 | mlog(0, "Join domain %s\n", dlm->name); | ||
1094 | |||
1095 | status = dlm_register_domain_handlers(dlm); | ||
1096 | if (status) { | ||
1097 | mlog_errno(status); | ||
1098 | goto bail; | ||
1099 | } | ||
1100 | |||
1101 | status = dlm_launch_thread(dlm); | ||
1102 | if (status < 0) { | ||
1103 | mlog_errno(status); | ||
1104 | goto bail; | ||
1105 | } | ||
1106 | |||
1107 | status = dlm_launch_recovery_thread(dlm); | ||
1108 | if (status < 0) { | ||
1109 | mlog_errno(status); | ||
1110 | goto bail; | ||
1111 | } | ||
1112 | |||
1113 | do { | ||
1114 | unsigned int backoff; | ||
1115 | status = dlm_try_to_join_domain(dlm); | ||
1116 | |||
1117 | /* If we're racing another node to the join, then we | ||
1118 | * need to back off temporarily and let them | ||
1119 | * complete. */ | ||
1120 | if (status == -EAGAIN) { | ||
1121 | if (signal_pending(current)) { | ||
1122 | status = -ERESTARTSYS; | ||
1123 | goto bail; | ||
1124 | } | ||
1125 | |||
1126 | /* | ||
1127 | * <chip> After you! | ||
1128 | * <dale> No, after you! | ||
1129 | * <chip> I insist! | ||
1130 | * <dale> But you first! | ||
1131 | * ... | ||
1132 | */ | ||
1133 | backoff = (unsigned int)(jiffies & 0x3); | ||
1134 | backoff *= DLM_DOMAIN_BACKOFF_MS; | ||
1135 | mlog(0, "backoff %d\n", backoff); | ||
1136 | msleep(backoff); | ||
1137 | } | ||
1138 | } while (status == -EAGAIN); | ||
1139 | |||
1140 | if (status < 0) { | ||
1141 | mlog_errno(status); | ||
1142 | goto bail; | ||
1143 | } | ||
1144 | |||
1145 | status = 0; | ||
1146 | bail: | ||
1147 | wake_up(&dlm_domain_events); | ||
1148 | |||
1149 | if (status) { | ||
1150 | dlm_unregister_domain_handlers(dlm); | ||
1151 | dlm_complete_thread(dlm); | ||
1152 | dlm_complete_recovery_thread(dlm); | ||
1153 | } | ||
1154 | |||
1155 | return status; | ||
1156 | } | ||
1157 | |||
1158 | static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, | ||
1159 | u32 key) | ||
1160 | { | ||
1161 | int i; | ||
1162 | struct dlm_ctxt *dlm = NULL; | ||
1163 | |||
1164 | dlm = kcalloc(1, sizeof(*dlm), GFP_KERNEL); | ||
1165 | if (!dlm) { | ||
1166 | mlog_errno(-ENOMEM); | ||
1167 | goto leave; | ||
1168 | } | ||
1169 | |||
1170 | dlm->name = kmalloc(strlen(domain) + 1, GFP_KERNEL); | ||
1171 | if (dlm->name == NULL) { | ||
1172 | mlog_errno(-ENOMEM); | ||
1173 | kfree(dlm); | ||
1174 | dlm = NULL; | ||
1175 | goto leave; | ||
1176 | } | ||
1177 | |||
1178 | dlm->resources = (struct list_head *) __get_free_page(GFP_KERNEL); | ||
1179 | if (!dlm->resources) { | ||
1180 | mlog_errno(-ENOMEM); | ||
1181 | kfree(dlm->name); | ||
1182 | kfree(dlm); | ||
1183 | dlm = NULL; | ||
1184 | goto leave; | ||
1185 | } | ||
1186 | memset(dlm->resources, 0, PAGE_SIZE); | ||
1187 | |||
1188 | for (i=0; i<DLM_HASH_SIZE; i++) | ||
1189 | INIT_LIST_HEAD(&dlm->resources[i]); | ||
1190 | |||
1191 | strcpy(dlm->name, domain); | ||
1192 | dlm->key = key; | ||
1193 | dlm->node_num = o2nm_this_node(); | ||
1194 | |||
1195 | spin_lock_init(&dlm->spinlock); | ||
1196 | spin_lock_init(&dlm->master_lock); | ||
1197 | spin_lock_init(&dlm->ast_lock); | ||
1198 | INIT_LIST_HEAD(&dlm->list); | ||
1199 | INIT_LIST_HEAD(&dlm->dirty_list); | ||
1200 | INIT_LIST_HEAD(&dlm->reco.resources); | ||
1201 | INIT_LIST_HEAD(&dlm->reco.received); | ||
1202 | INIT_LIST_HEAD(&dlm->reco.node_data); | ||
1203 | INIT_LIST_HEAD(&dlm->purge_list); | ||
1204 | INIT_LIST_HEAD(&dlm->dlm_domain_handlers); | ||
1205 | dlm->reco.state = 0; | ||
1206 | |||
1207 | INIT_LIST_HEAD(&dlm->pending_asts); | ||
1208 | INIT_LIST_HEAD(&dlm->pending_basts); | ||
1209 | |||
1210 | mlog(0, "dlm->recovery_map=%p, &(dlm->recovery_map[0])=%p\n", | ||
1211 | dlm->recovery_map, &(dlm->recovery_map[0])); | ||
1212 | |||
1213 | memset(dlm->recovery_map, 0, sizeof(dlm->recovery_map)); | ||
1214 | memset(dlm->live_nodes_map, 0, sizeof(dlm->live_nodes_map)); | ||
1215 | memset(dlm->domain_map, 0, sizeof(dlm->domain_map)); | ||
1216 | |||
1217 | dlm->dlm_thread_task = NULL; | ||
1218 | dlm->dlm_reco_thread_task = NULL; | ||
1219 | init_waitqueue_head(&dlm->dlm_thread_wq); | ||
1220 | init_waitqueue_head(&dlm->dlm_reco_thread_wq); | ||
1221 | init_waitqueue_head(&dlm->reco.event); | ||
1222 | init_waitqueue_head(&dlm->ast_wq); | ||
1223 | init_waitqueue_head(&dlm->migration_wq); | ||
1224 | INIT_LIST_HEAD(&dlm->master_list); | ||
1225 | INIT_LIST_HEAD(&dlm->mle_hb_events); | ||
1226 | |||
1227 | dlm->joining_node = DLM_LOCK_RES_OWNER_UNKNOWN; | ||
1228 | init_waitqueue_head(&dlm->dlm_join_events); | ||
1229 | |||
1230 | dlm->reco.new_master = O2NM_INVALID_NODE_NUM; | ||
1231 | dlm->reco.dead_node = O2NM_INVALID_NODE_NUM; | ||
1232 | atomic_set(&dlm->local_resources, 0); | ||
1233 | atomic_set(&dlm->remote_resources, 0); | ||
1234 | atomic_set(&dlm->unknown_resources, 0); | ||
1235 | |||
1236 | spin_lock_init(&dlm->work_lock); | ||
1237 | INIT_LIST_HEAD(&dlm->work_list); | ||
1238 | INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work, dlm); | ||
1239 | |||
1240 | kref_init(&dlm->dlm_refs); | ||
1241 | dlm->dlm_state = DLM_CTXT_NEW; | ||
1242 | |||
1243 | INIT_LIST_HEAD(&dlm->dlm_eviction_callbacks); | ||
1244 | |||
1245 | mlog(0, "context init: refcount %u\n", | ||
1246 | atomic_read(&dlm->dlm_refs.refcount)); | ||
1247 | |||
1248 | leave: | ||
1249 | return dlm; | ||
1250 | } | ||
1251 | |||
1252 | /* | ||
1253 | * dlm_register_domain: one-time setup per "domain" | ||
1254 | */ | ||
1255 | struct dlm_ctxt * dlm_register_domain(const char *domain, | ||
1256 | u32 key) | ||
1257 | { | ||
1258 | int ret; | ||
1259 | struct dlm_ctxt *dlm = NULL; | ||
1260 | struct dlm_ctxt *new_ctxt = NULL; | ||
1261 | |||
1262 | if (strlen(domain) > O2NM_MAX_NAME_LEN) { | ||
1263 | ret = -ENAMETOOLONG; | ||
1264 | mlog(ML_ERROR, "domain name length too long\n"); | ||
1265 | goto leave; | ||
1266 | } | ||
1267 | |||
1268 | if (!o2hb_check_local_node_heartbeating()) { | ||
1269 | mlog(ML_ERROR, "the local node has not been configured, or is " | ||
1270 | "not heartbeating\n"); | ||
1271 | ret = -EPROTO; | ||
1272 | goto leave; | ||
1273 | } | ||
1274 | |||
1275 | mlog(0, "register called for domain \"%s\"\n", domain); | ||
1276 | |||
1277 | retry: | ||
1278 | dlm = NULL; | ||
1279 | if (signal_pending(current)) { | ||
1280 | ret = -ERESTARTSYS; | ||
1281 | mlog_errno(ret); | ||
1282 | goto leave; | ||
1283 | } | ||
1284 | |||
1285 | spin_lock(&dlm_domain_lock); | ||
1286 | |||
1287 | dlm = __dlm_lookup_domain(domain); | ||
1288 | if (dlm) { | ||
1289 | if (dlm->dlm_state != DLM_CTXT_JOINED) { | ||
1290 | spin_unlock(&dlm_domain_lock); | ||
1291 | |||
1292 | mlog(0, "This ctxt is not joined yet!\n"); | ||
1293 | wait_event_interruptible(dlm_domain_events, | ||
1294 | dlm_wait_on_domain_helper( | ||
1295 | domain)); | ||
1296 | goto retry; | ||
1297 | } | ||
1298 | |||
1299 | __dlm_get(dlm); | ||
1300 | dlm->num_joins++; | ||
1301 | |||
1302 | spin_unlock(&dlm_domain_lock); | ||
1303 | |||
1304 | ret = 0; | ||
1305 | goto leave; | ||
1306 | } | ||
1307 | |||
1308 | /* doesn't exist */ | ||
1309 | if (!new_ctxt) { | ||
1310 | spin_unlock(&dlm_domain_lock); | ||
1311 | |||
1312 | new_ctxt = dlm_alloc_ctxt(domain, key); | ||
1313 | if (new_ctxt) | ||
1314 | goto retry; | ||
1315 | |||
1316 | ret = -ENOMEM; | ||
1317 | mlog_errno(ret); | ||
1318 | goto leave; | ||
1319 | } | ||
1320 | |||
1321 | /* a little variable switch-a-roo here... */ | ||
1322 | dlm = new_ctxt; | ||
1323 | new_ctxt = NULL; | ||
1324 | |||
1325 | /* add the new domain */ | ||
1326 | list_add_tail(&dlm->list, &dlm_domains); | ||
1327 | spin_unlock(&dlm_domain_lock); | ||
1328 | |||
1329 | ret = dlm_join_domain(dlm); | ||
1330 | if (ret) { | ||
1331 | mlog_errno(ret); | ||
1332 | dlm_put(dlm); | ||
1333 | goto leave; | ||
1334 | } | ||
1335 | |||
1336 | ret = 0; | ||
1337 | leave: | ||
1338 | if (new_ctxt) | ||
1339 | dlm_free_ctxt_mem(new_ctxt); | ||
1340 | |||
1341 | if (ret < 0) | ||
1342 | dlm = ERR_PTR(ret); | ||
1343 | |||
1344 | return dlm; | ||
1345 | } | ||
1346 | EXPORT_SYMBOL_GPL(dlm_register_domain); | ||
1347 | |||
1348 | static LIST_HEAD(dlm_join_handlers); | ||
1349 | |||
1350 | static void dlm_unregister_net_handlers(void) | ||
1351 | { | ||
1352 | o2net_unregister_handler_list(&dlm_join_handlers); | ||
1353 | } | ||
1354 | |||
1355 | static int dlm_register_net_handlers(void) | ||
1356 | { | ||
1357 | int status = 0; | ||
1358 | |||
1359 | status = o2net_register_handler(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, | ||
1360 | sizeof(struct dlm_query_join_request), | ||
1361 | dlm_query_join_handler, | ||
1362 | NULL, &dlm_join_handlers); | ||
1363 | if (status) | ||
1364 | goto bail; | ||
1365 | |||
1366 | status = o2net_register_handler(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY, | ||
1367 | sizeof(struct dlm_assert_joined), | ||
1368 | dlm_assert_joined_handler, | ||
1369 | NULL, &dlm_join_handlers); | ||
1370 | if (status) | ||
1371 | goto bail; | ||
1372 | |||
1373 | status = o2net_register_handler(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY, | ||
1374 | sizeof(struct dlm_cancel_join), | ||
1375 | dlm_cancel_join_handler, | ||
1376 | NULL, &dlm_join_handlers); | ||
1377 | |||
1378 | bail: | ||
1379 | if (status < 0) | ||
1380 | dlm_unregister_net_handlers(); | ||
1381 | |||
1382 | return status; | ||
1383 | } | ||
1384 | |||
1385 | /* Domain eviction callback handling. | ||
1386 | * | ||
1387 | * The file system requires notification of node death *before* the | ||
1388 | * dlm completes it's recovery work, otherwise it may be able to | ||
1389 | * acquire locks on resources requiring recovery. Since the dlm can | ||
1390 | * evict a node from it's domain *before* heartbeat fires, a similar | ||
1391 | * mechanism is required. */ | ||
1392 | |||
1393 | /* Eviction is not expected to happen often, so a per-domain lock is | ||
1394 | * not necessary. Eviction callbacks are allowed to sleep for short | ||
1395 | * periods of time. */ | ||
1396 | static DECLARE_RWSEM(dlm_callback_sem); | ||
1397 | |||
1398 | void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm, | ||
1399 | int node_num) | ||
1400 | { | ||
1401 | struct list_head *iter; | ||
1402 | struct dlm_eviction_cb *cb; | ||
1403 | |||
1404 | down_read(&dlm_callback_sem); | ||
1405 | list_for_each(iter, &dlm->dlm_eviction_callbacks) { | ||
1406 | cb = list_entry(iter, struct dlm_eviction_cb, ec_item); | ||
1407 | |||
1408 | cb->ec_func(node_num, cb->ec_data); | ||
1409 | } | ||
1410 | up_read(&dlm_callback_sem); | ||
1411 | } | ||
1412 | |||
1413 | void dlm_setup_eviction_cb(struct dlm_eviction_cb *cb, | ||
1414 | dlm_eviction_func *f, | ||
1415 | void *data) | ||
1416 | { | ||
1417 | INIT_LIST_HEAD(&cb->ec_item); | ||
1418 | cb->ec_func = f; | ||
1419 | cb->ec_data = data; | ||
1420 | } | ||
1421 | EXPORT_SYMBOL_GPL(dlm_setup_eviction_cb); | ||
1422 | |||
1423 | void dlm_register_eviction_cb(struct dlm_ctxt *dlm, | ||
1424 | struct dlm_eviction_cb *cb) | ||
1425 | { | ||
1426 | down_write(&dlm_callback_sem); | ||
1427 | list_add_tail(&cb->ec_item, &dlm->dlm_eviction_callbacks); | ||
1428 | up_write(&dlm_callback_sem); | ||
1429 | } | ||
1430 | EXPORT_SYMBOL_GPL(dlm_register_eviction_cb); | ||
1431 | |||
1432 | void dlm_unregister_eviction_cb(struct dlm_eviction_cb *cb) | ||
1433 | { | ||
1434 | down_write(&dlm_callback_sem); | ||
1435 | list_del_init(&cb->ec_item); | ||
1436 | up_write(&dlm_callback_sem); | ||
1437 | } | ||
1438 | EXPORT_SYMBOL_GPL(dlm_unregister_eviction_cb); | ||
1439 | |||
1440 | static int __init dlm_init(void) | ||
1441 | { | ||
1442 | int status; | ||
1443 | |||
1444 | dlm_print_version(); | ||
1445 | |||
1446 | status = dlm_init_mle_cache(); | ||
1447 | if (status) | ||
1448 | return -1; | ||
1449 | |||
1450 | status = dlm_register_net_handlers(); | ||
1451 | if (status) { | ||
1452 | dlm_destroy_mle_cache(); | ||
1453 | return -1; | ||
1454 | } | ||
1455 | |||
1456 | return 0; | ||
1457 | } | ||
1458 | |||
1459 | static void __exit dlm_exit (void) | ||
1460 | { | ||
1461 | dlm_unregister_net_handlers(); | ||
1462 | dlm_destroy_mle_cache(); | ||
1463 | } | ||
1464 | |||
1465 | MODULE_AUTHOR("Oracle"); | ||
1466 | MODULE_LICENSE("GPL"); | ||
1467 | |||
1468 | module_init(dlm_init); | ||
1469 | module_exit(dlm_exit); | ||
diff --git a/fs/ocfs2/dlm/dlmdomain.h b/fs/ocfs2/dlm/dlmdomain.h new file mode 100644 index 000000000000..2f7f60bfeb3b --- /dev/null +++ b/fs/ocfs2/dlm/dlmdomain.h | |||
@@ -0,0 +1,36 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmdomain.h | ||
5 | * | ||
6 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public | ||
10 | * License as published by the Free Software Foundation; either | ||
11 | * version 2 of the License, or (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public | ||
19 | * License along with this program; if not, write to the | ||
20 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
21 | * Boston, MA 021110-1307, USA. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #ifndef DLMDOMAIN_H | ||
26 | #define DLMDOMAIN_H | ||
27 | |||
28 | extern spinlock_t dlm_domain_lock; | ||
29 | extern struct list_head dlm_domains; | ||
30 | |||
31 | int dlm_joined(struct dlm_ctxt *dlm); | ||
32 | int dlm_shutting_down(struct dlm_ctxt *dlm); | ||
33 | void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm, | ||
34 | int node_num); | ||
35 | |||
36 | #endif | ||
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c new file mode 100644 index 000000000000..d1a0038557a3 --- /dev/null +++ b/fs/ocfs2/dlm/dlmlock.c | |||
@@ -0,0 +1,676 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmlock.c | ||
5 | * | ||
6 | * underlying calls for lock creation | ||
7 | * | ||
8 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | |||
28 | #include <linux/module.h> | ||
29 | #include <linux/fs.h> | ||
30 | #include <linux/types.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/utsname.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/sysctl.h> | ||
36 | #include <linux/random.h> | ||
37 | #include <linux/blkdev.h> | ||
38 | #include <linux/socket.h> | ||
39 | #include <linux/inet.h> | ||
40 | #include <linux/spinlock.h> | ||
41 | #include <linux/delay.h> | ||
42 | |||
43 | |||
44 | #include "cluster/heartbeat.h" | ||
45 | #include "cluster/nodemanager.h" | ||
46 | #include "cluster/tcp.h" | ||
47 | |||
48 | #include "dlmapi.h" | ||
49 | #include "dlmcommon.h" | ||
50 | |||
51 | #include "dlmconvert.h" | ||
52 | |||
53 | #define MLOG_MASK_PREFIX ML_DLM | ||
54 | #include "cluster/masklog.h" | ||
55 | |||
56 | static spinlock_t dlm_cookie_lock = SPIN_LOCK_UNLOCKED; | ||
57 | static u64 dlm_next_cookie = 1; | ||
58 | |||
59 | static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, | ||
60 | struct dlm_lock_resource *res, | ||
61 | struct dlm_lock *lock, int flags); | ||
62 | static void dlm_init_lock(struct dlm_lock *newlock, int type, | ||
63 | u8 node, u64 cookie); | ||
64 | static void dlm_lock_release(struct kref *kref); | ||
65 | static void dlm_lock_detach_lockres(struct dlm_lock *lock); | ||
66 | |||
67 | /* Tell us whether we can grant a new lock request. | ||
68 | * locking: | ||
69 | * caller needs: res->spinlock | ||
70 | * taken: none | ||
71 | * held on exit: none | ||
72 | * returns: 1 if the lock can be granted, 0 otherwise. | ||
73 | */ | ||
74 | static int dlm_can_grant_new_lock(struct dlm_lock_resource *res, | ||
75 | struct dlm_lock *lock) | ||
76 | { | ||
77 | struct list_head *iter; | ||
78 | struct dlm_lock *tmplock; | ||
79 | |||
80 | list_for_each(iter, &res->granted) { | ||
81 | tmplock = list_entry(iter, struct dlm_lock, list); | ||
82 | |||
83 | if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) | ||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | list_for_each(iter, &res->converting) { | ||
88 | tmplock = list_entry(iter, struct dlm_lock, list); | ||
89 | |||
90 | if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | return 1; | ||
95 | } | ||
96 | |||
97 | /* performs lock creation at the lockres master site | ||
98 | * locking: | ||
99 | * caller needs: none | ||
100 | * taken: takes and drops res->spinlock | ||
101 | * held on exit: none | ||
102 | * returns: DLM_NORMAL, DLM_NOTQUEUED | ||
103 | */ | ||
104 | static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm, | ||
105 | struct dlm_lock_resource *res, | ||
106 | struct dlm_lock *lock, int flags) | ||
107 | { | ||
108 | int call_ast = 0, kick_thread = 0; | ||
109 | enum dlm_status status = DLM_NORMAL; | ||
110 | |||
111 | mlog_entry("type=%d\n", lock->ml.type); | ||
112 | |||
113 | spin_lock(&res->spinlock); | ||
114 | /* if called from dlm_create_lock_handler, need to | ||
115 | * ensure it will not sleep in dlm_wait_on_lockres */ | ||
116 | status = __dlm_lockres_state_to_status(res); | ||
117 | if (status != DLM_NORMAL && | ||
118 | lock->ml.node != dlm->node_num) { | ||
119 | /* erf. state changed after lock was dropped. */ | ||
120 | spin_unlock(&res->spinlock); | ||
121 | dlm_error(status); | ||
122 | return status; | ||
123 | } | ||
124 | __dlm_wait_on_lockres(res); | ||
125 | __dlm_lockres_reserve_ast(res); | ||
126 | |||
127 | if (dlm_can_grant_new_lock(res, lock)) { | ||
128 | mlog(0, "I can grant this lock right away\n"); | ||
129 | /* got it right away */ | ||
130 | lock->lksb->status = DLM_NORMAL; | ||
131 | status = DLM_NORMAL; | ||
132 | dlm_lock_get(lock); | ||
133 | list_add_tail(&lock->list, &res->granted); | ||
134 | |||
135 | /* for the recovery lock, we can't allow the ast | ||
136 | * to be queued since the dlmthread is already | ||
137 | * frozen. but the recovery lock is always locked | ||
138 | * with LKM_NOQUEUE so we do not need the ast in | ||
139 | * this special case */ | ||
140 | if (!dlm_is_recovery_lock(res->lockname.name, | ||
141 | res->lockname.len)) { | ||
142 | kick_thread = 1; | ||
143 | call_ast = 1; | ||
144 | } | ||
145 | } else { | ||
146 | /* for NOQUEUE request, unless we get the | ||
147 | * lock right away, return DLM_NOTQUEUED */ | ||
148 | if (flags & LKM_NOQUEUE) | ||
149 | status = DLM_NOTQUEUED; | ||
150 | else { | ||
151 | dlm_lock_get(lock); | ||
152 | list_add_tail(&lock->list, &res->blocked); | ||
153 | kick_thread = 1; | ||
154 | } | ||
155 | } | ||
156 | |||
157 | spin_unlock(&res->spinlock); | ||
158 | wake_up(&res->wq); | ||
159 | |||
160 | /* either queue the ast or release it */ | ||
161 | if (call_ast) | ||
162 | dlm_queue_ast(dlm, lock); | ||
163 | else | ||
164 | dlm_lockres_release_ast(dlm, res); | ||
165 | |||
166 | dlm_lockres_calc_usage(dlm, res); | ||
167 | if (kick_thread) | ||
168 | dlm_kick_thread(dlm, res); | ||
169 | |||
170 | return status; | ||
171 | } | ||
172 | |||
173 | void dlm_revert_pending_lock(struct dlm_lock_resource *res, | ||
174 | struct dlm_lock *lock) | ||
175 | { | ||
176 | /* remove from local queue if it failed */ | ||
177 | list_del_init(&lock->list); | ||
178 | lock->lksb->flags &= ~DLM_LKSB_GET_LVB; | ||
179 | } | ||
180 | |||
181 | |||
182 | /* | ||
183 | * locking: | ||
184 | * caller needs: none | ||
185 | * taken: takes and drops res->spinlock | ||
186 | * held on exit: none | ||
187 | * returns: DLM_DENIED, DLM_RECOVERING, or net status | ||
188 | */ | ||
189 | static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, | ||
190 | struct dlm_lock_resource *res, | ||
191 | struct dlm_lock *lock, int flags) | ||
192 | { | ||
193 | enum dlm_status status = DLM_DENIED; | ||
194 | |||
195 | mlog_entry("type=%d\n", lock->ml.type); | ||
196 | mlog(0, "lockres %.*s, flags = 0x%x\n", res->lockname.len, | ||
197 | res->lockname.name, flags); | ||
198 | |||
199 | spin_lock(&res->spinlock); | ||
200 | |||
201 | /* will exit this call with spinlock held */ | ||
202 | __dlm_wait_on_lockres(res); | ||
203 | res->state |= DLM_LOCK_RES_IN_PROGRESS; | ||
204 | |||
205 | /* add lock to local (secondary) queue */ | ||
206 | dlm_lock_get(lock); | ||
207 | list_add_tail(&lock->list, &res->blocked); | ||
208 | lock->lock_pending = 1; | ||
209 | spin_unlock(&res->spinlock); | ||
210 | |||
211 | /* spec seems to say that you will get DLM_NORMAL when the lock | ||
212 | * has been queued, meaning we need to wait for a reply here. */ | ||
213 | status = dlm_send_remote_lock_request(dlm, res, lock, flags); | ||
214 | |||
215 | spin_lock(&res->spinlock); | ||
216 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; | ||
217 | lock->lock_pending = 0; | ||
218 | if (status != DLM_NORMAL) { | ||
219 | if (status != DLM_NOTQUEUED) | ||
220 | dlm_error(status); | ||
221 | dlm_revert_pending_lock(res, lock); | ||
222 | dlm_lock_put(lock); | ||
223 | } | ||
224 | spin_unlock(&res->spinlock); | ||
225 | |||
226 | dlm_lockres_calc_usage(dlm, res); | ||
227 | |||
228 | wake_up(&res->wq); | ||
229 | return status; | ||
230 | } | ||
231 | |||
232 | |||
233 | /* for remote lock creation. | ||
234 | * locking: | ||
235 | * caller needs: none, but need res->state & DLM_LOCK_RES_IN_PROGRESS | ||
236 | * taken: none | ||
237 | * held on exit: none | ||
238 | * returns: DLM_NOLOCKMGR, or net status | ||
239 | */ | ||
240 | static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, | ||
241 | struct dlm_lock_resource *res, | ||
242 | struct dlm_lock *lock, int flags) | ||
243 | { | ||
244 | struct dlm_create_lock create; | ||
245 | int tmpret, status = 0; | ||
246 | enum dlm_status ret; | ||
247 | |||
248 | mlog_entry_void(); | ||
249 | |||
250 | memset(&create, 0, sizeof(create)); | ||
251 | create.node_idx = dlm->node_num; | ||
252 | create.requested_type = lock->ml.type; | ||
253 | create.cookie = lock->ml.cookie; | ||
254 | create.namelen = res->lockname.len; | ||
255 | create.flags = cpu_to_be32(flags); | ||
256 | memcpy(create.name, res->lockname.name, create.namelen); | ||
257 | |||
258 | tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create, | ||
259 | sizeof(create), res->owner, &status); | ||
260 | if (tmpret >= 0) { | ||
261 | // successfully sent and received | ||
262 | ret = status; // this is already a dlm_status | ||
263 | } else { | ||
264 | mlog_errno(tmpret); | ||
265 | if (dlm_is_host_down(tmpret)) { | ||
266 | ret = DLM_RECOVERING; | ||
267 | mlog(0, "node %u died so returning DLM_RECOVERING " | ||
268 | "from lock message!\n", res->owner); | ||
269 | } else { | ||
270 | ret = dlm_err_to_dlm_status(tmpret); | ||
271 | } | ||
272 | } | ||
273 | |||
274 | return ret; | ||
275 | } | ||
276 | |||
277 | void dlm_lock_get(struct dlm_lock *lock) | ||
278 | { | ||
279 | kref_get(&lock->lock_refs); | ||
280 | } | ||
281 | |||
282 | void dlm_lock_put(struct dlm_lock *lock) | ||
283 | { | ||
284 | kref_put(&lock->lock_refs, dlm_lock_release); | ||
285 | } | ||
286 | |||
287 | static void dlm_lock_release(struct kref *kref) | ||
288 | { | ||
289 | struct dlm_lock *lock; | ||
290 | |||
291 | lock = container_of(kref, struct dlm_lock, lock_refs); | ||
292 | |||
293 | BUG_ON(!list_empty(&lock->list)); | ||
294 | BUG_ON(!list_empty(&lock->ast_list)); | ||
295 | BUG_ON(!list_empty(&lock->bast_list)); | ||
296 | BUG_ON(lock->ast_pending); | ||
297 | BUG_ON(lock->bast_pending); | ||
298 | |||
299 | dlm_lock_detach_lockres(lock); | ||
300 | |||
301 | if (lock->lksb_kernel_allocated) { | ||
302 | mlog(0, "freeing kernel-allocated lksb\n"); | ||
303 | kfree(lock->lksb); | ||
304 | } | ||
305 | kfree(lock); | ||
306 | } | ||
307 | |||
308 | /* associate a lock with it's lockres, getting a ref on the lockres */ | ||
309 | void dlm_lock_attach_lockres(struct dlm_lock *lock, | ||
310 | struct dlm_lock_resource *res) | ||
311 | { | ||
312 | dlm_lockres_get(res); | ||
313 | lock->lockres = res; | ||
314 | } | ||
315 | |||
316 | /* drop ref on lockres, if there is still one associated with lock */ | ||
317 | static void dlm_lock_detach_lockres(struct dlm_lock *lock) | ||
318 | { | ||
319 | struct dlm_lock_resource *res; | ||
320 | |||
321 | res = lock->lockres; | ||
322 | if (res) { | ||
323 | lock->lockres = NULL; | ||
324 | mlog(0, "removing lock's lockres reference\n"); | ||
325 | dlm_lockres_put(res); | ||
326 | } | ||
327 | } | ||
328 | |||
329 | static void dlm_init_lock(struct dlm_lock *newlock, int type, | ||
330 | u8 node, u64 cookie) | ||
331 | { | ||
332 | INIT_LIST_HEAD(&newlock->list); | ||
333 | INIT_LIST_HEAD(&newlock->ast_list); | ||
334 | INIT_LIST_HEAD(&newlock->bast_list); | ||
335 | spin_lock_init(&newlock->spinlock); | ||
336 | newlock->ml.type = type; | ||
337 | newlock->ml.convert_type = LKM_IVMODE; | ||
338 | newlock->ml.highest_blocked = LKM_IVMODE; | ||
339 | newlock->ml.node = node; | ||
340 | newlock->ml.pad1 = 0; | ||
341 | newlock->ml.list = 0; | ||
342 | newlock->ml.flags = 0; | ||
343 | newlock->ast = NULL; | ||
344 | newlock->bast = NULL; | ||
345 | newlock->astdata = NULL; | ||
346 | newlock->ml.cookie = cpu_to_be64(cookie); | ||
347 | newlock->ast_pending = 0; | ||
348 | newlock->bast_pending = 0; | ||
349 | newlock->convert_pending = 0; | ||
350 | newlock->lock_pending = 0; | ||
351 | newlock->unlock_pending = 0; | ||
352 | newlock->cancel_pending = 0; | ||
353 | newlock->lksb_kernel_allocated = 0; | ||
354 | |||
355 | kref_init(&newlock->lock_refs); | ||
356 | } | ||
357 | |||
358 | struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie, | ||
359 | struct dlm_lockstatus *lksb) | ||
360 | { | ||
361 | struct dlm_lock *lock; | ||
362 | int kernel_allocated = 0; | ||
363 | |||
364 | lock = kcalloc(1, sizeof(*lock), GFP_KERNEL); | ||
365 | if (!lock) | ||
366 | return NULL; | ||
367 | |||
368 | if (!lksb) { | ||
369 | /* zero memory only if kernel-allocated */ | ||
370 | lksb = kcalloc(1, sizeof(*lksb), GFP_KERNEL); | ||
371 | if (!lksb) { | ||
372 | kfree(lock); | ||
373 | return NULL; | ||
374 | } | ||
375 | kernel_allocated = 1; | ||
376 | } | ||
377 | |||
378 | dlm_init_lock(lock, type, node, cookie); | ||
379 | if (kernel_allocated) | ||
380 | lock->lksb_kernel_allocated = 1; | ||
381 | lock->lksb = lksb; | ||
382 | lksb->lockid = lock; | ||
383 | return lock; | ||
384 | } | ||
385 | |||
386 | /* handler for lock creation net message | ||
387 | * locking: | ||
388 | * caller needs: none | ||
389 | * taken: takes and drops res->spinlock | ||
390 | * held on exit: none | ||
391 | * returns: DLM_NORMAL, DLM_SYSERR, DLM_IVLOCKID, DLM_NOTQUEUED | ||
392 | */ | ||
393 | int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data) | ||
394 | { | ||
395 | struct dlm_ctxt *dlm = data; | ||
396 | struct dlm_create_lock *create = (struct dlm_create_lock *)msg->buf; | ||
397 | struct dlm_lock_resource *res = NULL; | ||
398 | struct dlm_lock *newlock = NULL; | ||
399 | struct dlm_lockstatus *lksb = NULL; | ||
400 | enum dlm_status status = DLM_NORMAL; | ||
401 | char *name; | ||
402 | unsigned int namelen; | ||
403 | |||
404 | BUG_ON(!dlm); | ||
405 | |||
406 | mlog_entry_void(); | ||
407 | |||
408 | if (!dlm_grab(dlm)) | ||
409 | return DLM_REJECTED; | ||
410 | |||
411 | mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), | ||
412 | "Domain %s not fully joined!\n", dlm->name); | ||
413 | |||
414 | name = create->name; | ||
415 | namelen = create->namelen; | ||
416 | |||
417 | status = DLM_IVBUFLEN; | ||
418 | if (namelen > DLM_LOCKID_NAME_MAX) { | ||
419 | dlm_error(status); | ||
420 | goto leave; | ||
421 | } | ||
422 | |||
423 | status = DLM_SYSERR; | ||
424 | newlock = dlm_new_lock(create->requested_type, | ||
425 | create->node_idx, | ||
426 | be64_to_cpu(create->cookie), NULL); | ||
427 | if (!newlock) { | ||
428 | dlm_error(status); | ||
429 | goto leave; | ||
430 | } | ||
431 | |||
432 | lksb = newlock->lksb; | ||
433 | |||
434 | if (be32_to_cpu(create->flags) & LKM_GET_LVB) { | ||
435 | lksb->flags |= DLM_LKSB_GET_LVB; | ||
436 | mlog(0, "set DLM_LKSB_GET_LVB flag\n"); | ||
437 | } | ||
438 | |||
439 | status = DLM_IVLOCKID; | ||
440 | res = dlm_lookup_lockres(dlm, name, namelen); | ||
441 | if (!res) { | ||
442 | dlm_error(status); | ||
443 | goto leave; | ||
444 | } | ||
445 | |||
446 | spin_lock(&res->spinlock); | ||
447 | status = __dlm_lockres_state_to_status(res); | ||
448 | spin_unlock(&res->spinlock); | ||
449 | |||
450 | if (status != DLM_NORMAL) { | ||
451 | mlog(0, "lockres recovering/migrating/in-progress\n"); | ||
452 | goto leave; | ||
453 | } | ||
454 | |||
455 | dlm_lock_attach_lockres(newlock, res); | ||
456 | |||
457 | status = dlmlock_master(dlm, res, newlock, be32_to_cpu(create->flags)); | ||
458 | leave: | ||
459 | if (status != DLM_NORMAL) | ||
460 | if (newlock) | ||
461 | dlm_lock_put(newlock); | ||
462 | |||
463 | if (res) | ||
464 | dlm_lockres_put(res); | ||
465 | |||
466 | dlm_put(dlm); | ||
467 | |||
468 | return status; | ||
469 | } | ||
470 | |||
471 | |||
472 | /* fetch next node-local (u8 nodenum + u56 cookie) into u64 */ | ||
473 | static inline void dlm_get_next_cookie(u8 node_num, u64 *cookie) | ||
474 | { | ||
475 | u64 tmpnode = node_num; | ||
476 | |||
477 | /* shift single byte of node num into top 8 bits */ | ||
478 | tmpnode <<= 56; | ||
479 | |||
480 | spin_lock(&dlm_cookie_lock); | ||
481 | *cookie = (dlm_next_cookie | tmpnode); | ||
482 | if (++dlm_next_cookie & 0xff00000000000000ull) { | ||
483 | mlog(0, "This node's cookie will now wrap!\n"); | ||
484 | dlm_next_cookie = 1; | ||
485 | } | ||
486 | spin_unlock(&dlm_cookie_lock); | ||
487 | } | ||
488 | |||
489 | enum dlm_status dlmlock(struct dlm_ctxt *dlm, int mode, | ||
490 | struct dlm_lockstatus *lksb, int flags, | ||
491 | const char *name, dlm_astlockfunc_t *ast, void *data, | ||
492 | dlm_bastlockfunc_t *bast) | ||
493 | { | ||
494 | enum dlm_status status; | ||
495 | struct dlm_lock_resource *res = NULL; | ||
496 | struct dlm_lock *lock = NULL; | ||
497 | int convert = 0, recovery = 0; | ||
498 | |||
499 | /* yes this function is a mess. | ||
500 | * TODO: clean this up. lots of common code in the | ||
501 | * lock and convert paths, especially in the retry blocks */ | ||
502 | if (!lksb) { | ||
503 | dlm_error(DLM_BADARGS); | ||
504 | return DLM_BADARGS; | ||
505 | } | ||
506 | |||
507 | status = DLM_BADPARAM; | ||
508 | if (mode != LKM_EXMODE && mode != LKM_PRMODE && mode != LKM_NLMODE) { | ||
509 | dlm_error(status); | ||
510 | goto error; | ||
511 | } | ||
512 | |||
513 | if (flags & ~LKM_VALID_FLAGS) { | ||
514 | dlm_error(status); | ||
515 | goto error; | ||
516 | } | ||
517 | |||
518 | convert = (flags & LKM_CONVERT); | ||
519 | recovery = (flags & LKM_RECOVERY); | ||
520 | |||
521 | if (recovery && | ||
522 | (!dlm_is_recovery_lock(name, strlen(name)) || convert) ) { | ||
523 | dlm_error(status); | ||
524 | goto error; | ||
525 | } | ||
526 | if (convert && (flags & LKM_LOCAL)) { | ||
527 | mlog(ML_ERROR, "strange LOCAL convert request!\n"); | ||
528 | goto error; | ||
529 | } | ||
530 | |||
531 | if (convert) { | ||
532 | /* CONVERT request */ | ||
533 | |||
534 | /* if converting, must pass in a valid dlm_lock */ | ||
535 | lock = lksb->lockid; | ||
536 | if (!lock) { | ||
537 | mlog(ML_ERROR, "NULL lock pointer in convert " | ||
538 | "request\n"); | ||
539 | goto error; | ||
540 | } | ||
541 | |||
542 | res = lock->lockres; | ||
543 | if (!res) { | ||
544 | mlog(ML_ERROR, "NULL lockres pointer in convert " | ||
545 | "request\n"); | ||
546 | goto error; | ||
547 | } | ||
548 | dlm_lockres_get(res); | ||
549 | |||
550 | /* XXX: for ocfs2 purposes, the ast/bast/astdata/lksb are | ||
551 | * static after the original lock call. convert requests will | ||
552 | * ensure that everything is the same, or return DLM_BADARGS. | ||
553 | * this means that DLM_DENIED_NOASTS will never be returned. | ||
554 | */ | ||
555 | if (lock->lksb != lksb || lock->ast != ast || | ||
556 | lock->bast != bast || lock->astdata != data) { | ||
557 | status = DLM_BADARGS; | ||
558 | mlog(ML_ERROR, "new args: lksb=%p, ast=%p, bast=%p, " | ||
559 | "astdata=%p\n", lksb, ast, bast, data); | ||
560 | mlog(ML_ERROR, "orig args: lksb=%p, ast=%p, bast=%p, " | ||
561 | "astdata=%p\n", lock->lksb, lock->ast, | ||
562 | lock->bast, lock->astdata); | ||
563 | goto error; | ||
564 | } | ||
565 | retry_convert: | ||
566 | dlm_wait_for_recovery(dlm); | ||
567 | |||
568 | if (res->owner == dlm->node_num) | ||
569 | status = dlmconvert_master(dlm, res, lock, flags, mode); | ||
570 | else | ||
571 | status = dlmconvert_remote(dlm, res, lock, flags, mode); | ||
572 | if (status == DLM_RECOVERING || status == DLM_MIGRATING || | ||
573 | status == DLM_FORWARD) { | ||
574 | /* for now, see how this works without sleeping | ||
575 | * and just retry right away. I suspect the reco | ||
576 | * or migration will complete fast enough that | ||
577 | * no waiting will be necessary */ | ||
578 | mlog(0, "retrying convert with migration/recovery/" | ||
579 | "in-progress\n"); | ||
580 | msleep(100); | ||
581 | goto retry_convert; | ||
582 | } | ||
583 | } else { | ||
584 | u64 tmpcookie; | ||
585 | |||
586 | /* LOCK request */ | ||
587 | status = DLM_BADARGS; | ||
588 | if (!name) { | ||
589 | dlm_error(status); | ||
590 | goto error; | ||
591 | } | ||
592 | |||
593 | status = DLM_IVBUFLEN; | ||
594 | if (strlen(name) > DLM_LOCKID_NAME_MAX || strlen(name) < 1) { | ||
595 | dlm_error(status); | ||
596 | goto error; | ||
597 | } | ||
598 | |||
599 | dlm_get_next_cookie(dlm->node_num, &tmpcookie); | ||
600 | lock = dlm_new_lock(mode, dlm->node_num, tmpcookie, lksb); | ||
601 | if (!lock) { | ||
602 | dlm_error(status); | ||
603 | goto error; | ||
604 | } | ||
605 | |||
606 | if (!recovery) | ||
607 | dlm_wait_for_recovery(dlm); | ||
608 | |||
609 | /* find or create the lock resource */ | ||
610 | res = dlm_get_lock_resource(dlm, name, flags); | ||
611 | if (!res) { | ||
612 | status = DLM_IVLOCKID; | ||
613 | dlm_error(status); | ||
614 | goto error; | ||
615 | } | ||
616 | |||
617 | mlog(0, "type=%d, flags = 0x%x\n", mode, flags); | ||
618 | mlog(0, "creating lock: lock=%p res=%p\n", lock, res); | ||
619 | |||
620 | dlm_lock_attach_lockres(lock, res); | ||
621 | lock->ast = ast; | ||
622 | lock->bast = bast; | ||
623 | lock->astdata = data; | ||
624 | |||
625 | retry_lock: | ||
626 | if (flags & LKM_VALBLK) { | ||
627 | mlog(0, "LKM_VALBLK passed by caller\n"); | ||
628 | |||
629 | /* LVB requests for non PR, PW or EX locks are | ||
630 | * ignored. */ | ||
631 | if (mode < LKM_PRMODE) | ||
632 | flags &= ~LKM_VALBLK; | ||
633 | else { | ||
634 | flags |= LKM_GET_LVB; | ||
635 | lock->lksb->flags |= DLM_LKSB_GET_LVB; | ||
636 | } | ||
637 | } | ||
638 | |||
639 | if (res->owner == dlm->node_num) | ||
640 | status = dlmlock_master(dlm, res, lock, flags); | ||
641 | else | ||
642 | status = dlmlock_remote(dlm, res, lock, flags); | ||
643 | |||
644 | if (status == DLM_RECOVERING || status == DLM_MIGRATING || | ||
645 | status == DLM_FORWARD) { | ||
646 | mlog(0, "retrying lock with migration/" | ||
647 | "recovery/in progress\n"); | ||
648 | msleep(100); | ||
649 | dlm_wait_for_recovery(dlm); | ||
650 | goto retry_lock; | ||
651 | } | ||
652 | |||
653 | if (status != DLM_NORMAL) { | ||
654 | lock->lksb->flags &= ~DLM_LKSB_GET_LVB; | ||
655 | if (status != DLM_NOTQUEUED) | ||
656 | dlm_error(status); | ||
657 | goto error; | ||
658 | } | ||
659 | } | ||
660 | |||
661 | error: | ||
662 | if (status != DLM_NORMAL) { | ||
663 | if (lock && !convert) | ||
664 | dlm_lock_put(lock); | ||
665 | // this is kind of unnecessary | ||
666 | lksb->status = status; | ||
667 | } | ||
668 | |||
669 | /* put lockres ref from the convert path | ||
670 | * or from dlm_get_lock_resource */ | ||
671 | if (res) | ||
672 | dlm_lockres_put(res); | ||
673 | |||
674 | return status; | ||
675 | } | ||
676 | EXPORT_SYMBOL_GPL(dlmlock); | ||
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c new file mode 100644 index 000000000000..047279546b4f --- /dev/null +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -0,0 +1,2666 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmmod.c | ||
5 | * | ||
6 | * standalone DLM module | ||
7 | * | ||
8 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | |||
28 | #include <linux/module.h> | ||
29 | #include <linux/fs.h> | ||
30 | #include <linux/types.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/utsname.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/sysctl.h> | ||
36 | #include <linux/random.h> | ||
37 | #include <linux/blkdev.h> | ||
38 | #include <linux/socket.h> | ||
39 | #include <linux/inet.h> | ||
40 | #include <linux/spinlock.h> | ||
41 | #include <linux/delay.h> | ||
42 | |||
43 | |||
44 | #include "cluster/heartbeat.h" | ||
45 | #include "cluster/nodemanager.h" | ||
46 | #include "cluster/tcp.h" | ||
47 | |||
48 | #include "dlmapi.h" | ||
49 | #include "dlmcommon.h" | ||
50 | #include "dlmdebug.h" | ||
51 | |||
52 | #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER) | ||
53 | #include "cluster/masklog.h" | ||
54 | |||
55 | enum dlm_mle_type { | ||
56 | DLM_MLE_BLOCK, | ||
57 | DLM_MLE_MASTER, | ||
58 | DLM_MLE_MIGRATION | ||
59 | }; | ||
60 | |||
61 | struct dlm_lock_name | ||
62 | { | ||
63 | u8 len; | ||
64 | u8 name[DLM_LOCKID_NAME_MAX]; | ||
65 | }; | ||
66 | |||
67 | struct dlm_master_list_entry | ||
68 | { | ||
69 | struct list_head list; | ||
70 | struct list_head hb_events; | ||
71 | struct dlm_ctxt *dlm; | ||
72 | spinlock_t spinlock; | ||
73 | wait_queue_head_t wq; | ||
74 | atomic_t woken; | ||
75 | struct kref mle_refs; | ||
76 | unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
77 | unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
78 | unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
79 | unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
80 | u8 master; | ||
81 | u8 new_master; | ||
82 | enum dlm_mle_type type; | ||
83 | struct o2hb_callback_func mle_hb_up; | ||
84 | struct o2hb_callback_func mle_hb_down; | ||
85 | union { | ||
86 | struct dlm_lock_resource *res; | ||
87 | struct dlm_lock_name name; | ||
88 | } u; | ||
89 | }; | ||
90 | |||
91 | static void dlm_mle_node_down(struct dlm_ctxt *dlm, | ||
92 | struct dlm_master_list_entry *mle, | ||
93 | struct o2nm_node *node, | ||
94 | int idx); | ||
95 | static void dlm_mle_node_up(struct dlm_ctxt *dlm, | ||
96 | struct dlm_master_list_entry *mle, | ||
97 | struct o2nm_node *node, | ||
98 | int idx); | ||
99 | |||
100 | static void dlm_assert_master_worker(struct dlm_work_item *item, void *data); | ||
101 | static int dlm_do_assert_master(struct dlm_ctxt *dlm, const char *lockname, | ||
102 | unsigned int namelen, void *nodemap, | ||
103 | u32 flags); | ||
104 | |||
105 | static inline int dlm_mle_equal(struct dlm_ctxt *dlm, | ||
106 | struct dlm_master_list_entry *mle, | ||
107 | const char *name, | ||
108 | unsigned int namelen) | ||
109 | { | ||
110 | struct dlm_lock_resource *res; | ||
111 | |||
112 | if (dlm != mle->dlm) | ||
113 | return 0; | ||
114 | |||
115 | if (mle->type == DLM_MLE_BLOCK || | ||
116 | mle->type == DLM_MLE_MIGRATION) { | ||
117 | if (namelen != mle->u.name.len || | ||
118 | memcmp(name, mle->u.name.name, namelen)!=0) | ||
119 | return 0; | ||
120 | } else { | ||
121 | res = mle->u.res; | ||
122 | if (namelen != res->lockname.len || | ||
123 | memcmp(res->lockname.name, name, namelen) != 0) | ||
124 | return 0; | ||
125 | } | ||
126 | return 1; | ||
127 | } | ||
128 | |||
129 | #if 0 | ||
130 | /* Code here is included but defined out as it aids debugging */ | ||
131 | |||
132 | void dlm_print_one_mle(struct dlm_master_list_entry *mle) | ||
133 | { | ||
134 | int i = 0, refs; | ||
135 | char *type; | ||
136 | char attached; | ||
137 | u8 master; | ||
138 | unsigned int namelen; | ||
139 | const char *name; | ||
140 | struct kref *k; | ||
141 | |||
142 | k = &mle->mle_refs; | ||
143 | if (mle->type == DLM_MLE_BLOCK) | ||
144 | type = "BLK"; | ||
145 | else if (mle->type == DLM_MLE_MASTER) | ||
146 | type = "MAS"; | ||
147 | else | ||
148 | type = "MIG"; | ||
149 | refs = atomic_read(&k->refcount); | ||
150 | master = mle->master; | ||
151 | attached = (list_empty(&mle->hb_events) ? 'N' : 'Y'); | ||
152 | |||
153 | if (mle->type != DLM_MLE_MASTER) { | ||
154 | namelen = mle->u.name.len; | ||
155 | name = mle->u.name.name; | ||
156 | } else { | ||
157 | namelen = mle->u.res->lockname.len; | ||
158 | name = mle->u.res->lockname.name; | ||
159 | } | ||
160 | |||
161 | mlog(ML_NOTICE, " #%3d: %3s %3d %3u %3u %c (%d)%.*s\n", | ||
162 | i, type, refs, master, mle->new_master, attached, | ||
163 | namelen, namelen, name); | ||
164 | } | ||
165 | |||
166 | static void dlm_dump_mles(struct dlm_ctxt *dlm) | ||
167 | { | ||
168 | struct dlm_master_list_entry *mle; | ||
169 | struct list_head *iter; | ||
170 | |||
171 | mlog(ML_NOTICE, "dumping all mles for domain %s:\n", dlm->name); | ||
172 | mlog(ML_NOTICE, " ####: type refs owner new events? lockname nodemap votemap respmap maybemap\n"); | ||
173 | spin_lock(&dlm->master_lock); | ||
174 | list_for_each(iter, &dlm->master_list) { | ||
175 | mle = list_entry(iter, struct dlm_master_list_entry, list); | ||
176 | dlm_print_one_mle(mle); | ||
177 | } | ||
178 | spin_unlock(&dlm->master_lock); | ||
179 | } | ||
180 | |||
181 | extern spinlock_t dlm_domain_lock; | ||
182 | extern struct list_head dlm_domains; | ||
183 | |||
184 | int dlm_dump_all_mles(const char __user *data, unsigned int len) | ||
185 | { | ||
186 | struct list_head *iter; | ||
187 | struct dlm_ctxt *dlm; | ||
188 | |||
189 | spin_lock(&dlm_domain_lock); | ||
190 | list_for_each(iter, &dlm_domains) { | ||
191 | dlm = list_entry (iter, struct dlm_ctxt, list); | ||
192 | mlog(ML_NOTICE, "found dlm: %p, name=%s\n", dlm, dlm->name); | ||
193 | dlm_dump_mles(dlm); | ||
194 | } | ||
195 | spin_unlock(&dlm_domain_lock); | ||
196 | return len; | ||
197 | } | ||
198 | EXPORT_SYMBOL_GPL(dlm_dump_all_mles); | ||
199 | |||
200 | #endif /* 0 */ | ||
201 | |||
202 | |||
203 | static kmem_cache_t *dlm_mle_cache = NULL; | ||
204 | |||
205 | |||
206 | static void dlm_mle_release(struct kref *kref); | ||
207 | static void dlm_init_mle(struct dlm_master_list_entry *mle, | ||
208 | enum dlm_mle_type type, | ||
209 | struct dlm_ctxt *dlm, | ||
210 | struct dlm_lock_resource *res, | ||
211 | const char *name, | ||
212 | unsigned int namelen); | ||
213 | static void dlm_put_mle(struct dlm_master_list_entry *mle); | ||
214 | static void __dlm_put_mle(struct dlm_master_list_entry *mle); | ||
215 | static int dlm_find_mle(struct dlm_ctxt *dlm, | ||
216 | struct dlm_master_list_entry **mle, | ||
217 | char *name, unsigned int namelen); | ||
218 | |||
219 | static int dlm_do_master_request(struct dlm_master_list_entry *mle, int to); | ||
220 | |||
221 | |||
222 | static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, | ||
223 | struct dlm_lock_resource *res, | ||
224 | struct dlm_master_list_entry *mle, | ||
225 | int *blocked); | ||
226 | static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, | ||
227 | struct dlm_lock_resource *res, | ||
228 | struct dlm_master_list_entry *mle, | ||
229 | int blocked); | ||
230 | static int dlm_add_migration_mle(struct dlm_ctxt *dlm, | ||
231 | struct dlm_lock_resource *res, | ||
232 | struct dlm_master_list_entry *mle, | ||
233 | struct dlm_master_list_entry **oldmle, | ||
234 | const char *name, unsigned int namelen, | ||
235 | u8 new_master, u8 master); | ||
236 | |||
237 | static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, | ||
238 | struct dlm_lock_resource *res); | ||
239 | static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, | ||
240 | struct dlm_lock_resource *res); | ||
241 | static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, | ||
242 | struct dlm_lock_resource *res, | ||
243 | u8 target); | ||
244 | |||
245 | |||
246 | int dlm_is_host_down(int errno) | ||
247 | { | ||
248 | switch (errno) { | ||
249 | case -EBADF: | ||
250 | case -ECONNREFUSED: | ||
251 | case -ENOTCONN: | ||
252 | case -ECONNRESET: | ||
253 | case -EPIPE: | ||
254 | case -EHOSTDOWN: | ||
255 | case -EHOSTUNREACH: | ||
256 | case -ETIMEDOUT: | ||
257 | case -ECONNABORTED: | ||
258 | case -ENETDOWN: | ||
259 | case -ENETUNREACH: | ||
260 | case -ENETRESET: | ||
261 | case -ESHUTDOWN: | ||
262 | case -ENOPROTOOPT: | ||
263 | case -EINVAL: /* if returned from our tcp code, | ||
264 | this means there is no socket */ | ||
265 | return 1; | ||
266 | } | ||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | |||
271 | /* | ||
272 | * MASTER LIST FUNCTIONS | ||
273 | */ | ||
274 | |||
275 | |||
276 | /* | ||
277 | * regarding master list entries and heartbeat callbacks: | ||
278 | * | ||
279 | * in order to avoid sleeping and allocation that occurs in | ||
280 | * heartbeat, master list entries are simply attached to the | ||
281 | * dlm's established heartbeat callbacks. the mle is attached | ||
282 | * when it is created, and since the dlm->spinlock is held at | ||
283 | * that time, any heartbeat event will be properly discovered | ||
284 | * by the mle. the mle needs to be detached from the | ||
285 | * dlm->mle_hb_events list as soon as heartbeat events are no | ||
286 | * longer useful to the mle, and before the mle is freed. | ||
287 | * | ||
288 | * as a general rule, heartbeat events are no longer needed by | ||
289 | * the mle once an "answer" regarding the lock master has been | ||
290 | * received. | ||
291 | */ | ||
292 | static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm, | ||
293 | struct dlm_master_list_entry *mle) | ||
294 | { | ||
295 | assert_spin_locked(&dlm->spinlock); | ||
296 | |||
297 | list_add_tail(&mle->hb_events, &dlm->mle_hb_events); | ||
298 | } | ||
299 | |||
300 | |||
301 | static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, | ||
302 | struct dlm_master_list_entry *mle) | ||
303 | { | ||
304 | if (!list_empty(&mle->hb_events)) | ||
305 | list_del_init(&mle->hb_events); | ||
306 | } | ||
307 | |||
308 | |||
309 | static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, | ||
310 | struct dlm_master_list_entry *mle) | ||
311 | { | ||
312 | spin_lock(&dlm->spinlock); | ||
313 | __dlm_mle_detach_hb_events(dlm, mle); | ||
314 | spin_unlock(&dlm->spinlock); | ||
315 | } | ||
316 | |||
317 | /* remove from list and free */ | ||
318 | static void __dlm_put_mle(struct dlm_master_list_entry *mle) | ||
319 | { | ||
320 | struct dlm_ctxt *dlm; | ||
321 | dlm = mle->dlm; | ||
322 | |||
323 | assert_spin_locked(&dlm->spinlock); | ||
324 | assert_spin_locked(&dlm->master_lock); | ||
325 | BUG_ON(!atomic_read(&mle->mle_refs.refcount)); | ||
326 | |||
327 | kref_put(&mle->mle_refs, dlm_mle_release); | ||
328 | } | ||
329 | |||
330 | |||
331 | /* must not have any spinlocks coming in */ | ||
332 | static void dlm_put_mle(struct dlm_master_list_entry *mle) | ||
333 | { | ||
334 | struct dlm_ctxt *dlm; | ||
335 | dlm = mle->dlm; | ||
336 | |||
337 | spin_lock(&dlm->spinlock); | ||
338 | spin_lock(&dlm->master_lock); | ||
339 | __dlm_put_mle(mle); | ||
340 | spin_unlock(&dlm->master_lock); | ||
341 | spin_unlock(&dlm->spinlock); | ||
342 | } | ||
343 | |||
344 | static inline void dlm_get_mle(struct dlm_master_list_entry *mle) | ||
345 | { | ||
346 | kref_get(&mle->mle_refs); | ||
347 | } | ||
348 | |||
349 | static void dlm_init_mle(struct dlm_master_list_entry *mle, | ||
350 | enum dlm_mle_type type, | ||
351 | struct dlm_ctxt *dlm, | ||
352 | struct dlm_lock_resource *res, | ||
353 | const char *name, | ||
354 | unsigned int namelen) | ||
355 | { | ||
356 | assert_spin_locked(&dlm->spinlock); | ||
357 | |||
358 | mle->dlm = dlm; | ||
359 | mle->type = type; | ||
360 | INIT_LIST_HEAD(&mle->list); | ||
361 | INIT_LIST_HEAD(&mle->hb_events); | ||
362 | memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); | ||
363 | spin_lock_init(&mle->spinlock); | ||
364 | init_waitqueue_head(&mle->wq); | ||
365 | atomic_set(&mle->woken, 0); | ||
366 | kref_init(&mle->mle_refs); | ||
367 | memset(mle->response_map, 0, sizeof(mle->response_map)); | ||
368 | mle->master = O2NM_MAX_NODES; | ||
369 | mle->new_master = O2NM_MAX_NODES; | ||
370 | |||
371 | if (mle->type == DLM_MLE_MASTER) { | ||
372 | BUG_ON(!res); | ||
373 | mle->u.res = res; | ||
374 | } else if (mle->type == DLM_MLE_BLOCK) { | ||
375 | BUG_ON(!name); | ||
376 | memcpy(mle->u.name.name, name, namelen); | ||
377 | mle->u.name.len = namelen; | ||
378 | } else /* DLM_MLE_MIGRATION */ { | ||
379 | BUG_ON(!name); | ||
380 | memcpy(mle->u.name.name, name, namelen); | ||
381 | mle->u.name.len = namelen; | ||
382 | } | ||
383 | |||
384 | /* copy off the node_map and register hb callbacks on our copy */ | ||
385 | memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map)); | ||
386 | memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map)); | ||
387 | clear_bit(dlm->node_num, mle->vote_map); | ||
388 | clear_bit(dlm->node_num, mle->node_map); | ||
389 | |||
390 | /* attach the mle to the domain node up/down events */ | ||
391 | __dlm_mle_attach_hb_events(dlm, mle); | ||
392 | } | ||
393 | |||
394 | |||
395 | /* returns 1 if found, 0 if not */ | ||
396 | static int dlm_find_mle(struct dlm_ctxt *dlm, | ||
397 | struct dlm_master_list_entry **mle, | ||
398 | char *name, unsigned int namelen) | ||
399 | { | ||
400 | struct dlm_master_list_entry *tmpmle; | ||
401 | struct list_head *iter; | ||
402 | |||
403 | assert_spin_locked(&dlm->master_lock); | ||
404 | |||
405 | list_for_each(iter, &dlm->master_list) { | ||
406 | tmpmle = list_entry(iter, struct dlm_master_list_entry, list); | ||
407 | if (!dlm_mle_equal(dlm, tmpmle, name, namelen)) | ||
408 | continue; | ||
409 | dlm_get_mle(tmpmle); | ||
410 | *mle = tmpmle; | ||
411 | return 1; | ||
412 | } | ||
413 | return 0; | ||
414 | } | ||
415 | |||
416 | void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up) | ||
417 | { | ||
418 | struct dlm_master_list_entry *mle; | ||
419 | struct list_head *iter; | ||
420 | |||
421 | assert_spin_locked(&dlm->spinlock); | ||
422 | |||
423 | list_for_each(iter, &dlm->mle_hb_events) { | ||
424 | mle = list_entry(iter, struct dlm_master_list_entry, | ||
425 | hb_events); | ||
426 | if (node_up) | ||
427 | dlm_mle_node_up(dlm, mle, NULL, idx); | ||
428 | else | ||
429 | dlm_mle_node_down(dlm, mle, NULL, idx); | ||
430 | } | ||
431 | } | ||
432 | |||
433 | static void dlm_mle_node_down(struct dlm_ctxt *dlm, | ||
434 | struct dlm_master_list_entry *mle, | ||
435 | struct o2nm_node *node, int idx) | ||
436 | { | ||
437 | spin_lock(&mle->spinlock); | ||
438 | |||
439 | if (!test_bit(idx, mle->node_map)) | ||
440 | mlog(0, "node %u already removed from nodemap!\n", idx); | ||
441 | else | ||
442 | clear_bit(idx, mle->node_map); | ||
443 | |||
444 | spin_unlock(&mle->spinlock); | ||
445 | } | ||
446 | |||
447 | static void dlm_mle_node_up(struct dlm_ctxt *dlm, | ||
448 | struct dlm_master_list_entry *mle, | ||
449 | struct o2nm_node *node, int idx) | ||
450 | { | ||
451 | spin_lock(&mle->spinlock); | ||
452 | |||
453 | if (test_bit(idx, mle->node_map)) | ||
454 | mlog(0, "node %u already in node map!\n", idx); | ||
455 | else | ||
456 | set_bit(idx, mle->node_map); | ||
457 | |||
458 | spin_unlock(&mle->spinlock); | ||
459 | } | ||
460 | |||
461 | |||
462 | int dlm_init_mle_cache(void) | ||
463 | { | ||
464 | dlm_mle_cache = kmem_cache_create("dlm_mle_cache", | ||
465 | sizeof(struct dlm_master_list_entry), | ||
466 | 0, SLAB_HWCACHE_ALIGN, | ||
467 | NULL, NULL); | ||
468 | if (dlm_mle_cache == NULL) | ||
469 | return -ENOMEM; | ||
470 | return 0; | ||
471 | } | ||
472 | |||
473 | void dlm_destroy_mle_cache(void) | ||
474 | { | ||
475 | if (dlm_mle_cache) | ||
476 | kmem_cache_destroy(dlm_mle_cache); | ||
477 | } | ||
478 | |||
479 | static void dlm_mle_release(struct kref *kref) | ||
480 | { | ||
481 | struct dlm_master_list_entry *mle; | ||
482 | struct dlm_ctxt *dlm; | ||
483 | |||
484 | mlog_entry_void(); | ||
485 | |||
486 | mle = container_of(kref, struct dlm_master_list_entry, mle_refs); | ||
487 | dlm = mle->dlm; | ||
488 | |||
489 | if (mle->type != DLM_MLE_MASTER) { | ||
490 | mlog(0, "calling mle_release for %.*s, type %d\n", | ||
491 | mle->u.name.len, mle->u.name.name, mle->type); | ||
492 | } else { | ||
493 | mlog(0, "calling mle_release for %.*s, type %d\n", | ||
494 | mle->u.res->lockname.len, | ||
495 | mle->u.res->lockname.name, mle->type); | ||
496 | } | ||
497 | assert_spin_locked(&dlm->spinlock); | ||
498 | assert_spin_locked(&dlm->master_lock); | ||
499 | |||
500 | /* remove from list if not already */ | ||
501 | if (!list_empty(&mle->list)) | ||
502 | list_del_init(&mle->list); | ||
503 | |||
504 | /* detach the mle from the domain node up/down events */ | ||
505 | __dlm_mle_detach_hb_events(dlm, mle); | ||
506 | |||
507 | /* NOTE: kfree under spinlock here. | ||
508 | * if this is bad, we can move this to a freelist. */ | ||
509 | kmem_cache_free(dlm_mle_cache, mle); | ||
510 | } | ||
511 | |||
512 | |||
513 | /* | ||
514 | * LOCK RESOURCE FUNCTIONS | ||
515 | */ | ||
516 | |||
517 | static void dlm_set_lockres_owner(struct dlm_ctxt *dlm, | ||
518 | struct dlm_lock_resource *res, | ||
519 | u8 owner) | ||
520 | { | ||
521 | assert_spin_locked(&res->spinlock); | ||
522 | |||
523 | mlog_entry("%.*s, %u\n", res->lockname.len, res->lockname.name, owner); | ||
524 | |||
525 | if (owner == dlm->node_num) | ||
526 | atomic_inc(&dlm->local_resources); | ||
527 | else if (owner == DLM_LOCK_RES_OWNER_UNKNOWN) | ||
528 | atomic_inc(&dlm->unknown_resources); | ||
529 | else | ||
530 | atomic_inc(&dlm->remote_resources); | ||
531 | |||
532 | res->owner = owner; | ||
533 | } | ||
534 | |||
535 | void dlm_change_lockres_owner(struct dlm_ctxt *dlm, | ||
536 | struct dlm_lock_resource *res, u8 owner) | ||
537 | { | ||
538 | assert_spin_locked(&res->spinlock); | ||
539 | |||
540 | if (owner == res->owner) | ||
541 | return; | ||
542 | |||
543 | if (res->owner == dlm->node_num) | ||
544 | atomic_dec(&dlm->local_resources); | ||
545 | else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) | ||
546 | atomic_dec(&dlm->unknown_resources); | ||
547 | else | ||
548 | atomic_dec(&dlm->remote_resources); | ||
549 | |||
550 | dlm_set_lockres_owner(dlm, res, owner); | ||
551 | } | ||
552 | |||
553 | |||
554 | static void dlm_lockres_release(struct kref *kref) | ||
555 | { | ||
556 | struct dlm_lock_resource *res; | ||
557 | |||
558 | res = container_of(kref, struct dlm_lock_resource, refs); | ||
559 | |||
560 | /* This should not happen -- all lockres' have a name | ||
561 | * associated with them at init time. */ | ||
562 | BUG_ON(!res->lockname.name); | ||
563 | |||
564 | mlog(0, "destroying lockres %.*s\n", res->lockname.len, | ||
565 | res->lockname.name); | ||
566 | |||
567 | /* By the time we're ready to blow this guy away, we shouldn't | ||
568 | * be on any lists. */ | ||
569 | BUG_ON(!list_empty(&res->list)); | ||
570 | BUG_ON(!list_empty(&res->granted)); | ||
571 | BUG_ON(!list_empty(&res->converting)); | ||
572 | BUG_ON(!list_empty(&res->blocked)); | ||
573 | BUG_ON(!list_empty(&res->dirty)); | ||
574 | BUG_ON(!list_empty(&res->recovering)); | ||
575 | BUG_ON(!list_empty(&res->purge)); | ||
576 | |||
577 | kfree(res->lockname.name); | ||
578 | |||
579 | kfree(res); | ||
580 | } | ||
581 | |||
582 | void dlm_lockres_get(struct dlm_lock_resource *res) | ||
583 | { | ||
584 | kref_get(&res->refs); | ||
585 | } | ||
586 | |||
587 | void dlm_lockres_put(struct dlm_lock_resource *res) | ||
588 | { | ||
589 | kref_put(&res->refs, dlm_lockres_release); | ||
590 | } | ||
591 | |||
592 | static void dlm_init_lockres(struct dlm_ctxt *dlm, | ||
593 | struct dlm_lock_resource *res, | ||
594 | const char *name, unsigned int namelen) | ||
595 | { | ||
596 | char *qname; | ||
597 | |||
598 | /* If we memset here, we lose our reference to the kmalloc'd | ||
599 | * res->lockname.name, so be sure to init every field | ||
600 | * correctly! */ | ||
601 | |||
602 | qname = (char *) res->lockname.name; | ||
603 | memcpy(qname, name, namelen); | ||
604 | |||
605 | res->lockname.len = namelen; | ||
606 | res->lockname.hash = full_name_hash(name, namelen); | ||
607 | |||
608 | init_waitqueue_head(&res->wq); | ||
609 | spin_lock_init(&res->spinlock); | ||
610 | INIT_LIST_HEAD(&res->list); | ||
611 | INIT_LIST_HEAD(&res->granted); | ||
612 | INIT_LIST_HEAD(&res->converting); | ||
613 | INIT_LIST_HEAD(&res->blocked); | ||
614 | INIT_LIST_HEAD(&res->dirty); | ||
615 | INIT_LIST_HEAD(&res->recovering); | ||
616 | INIT_LIST_HEAD(&res->purge); | ||
617 | atomic_set(&res->asts_reserved, 0); | ||
618 | res->migration_pending = 0; | ||
619 | |||
620 | kref_init(&res->refs); | ||
621 | |||
622 | /* just for consistency */ | ||
623 | spin_lock(&res->spinlock); | ||
624 | dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN); | ||
625 | spin_unlock(&res->spinlock); | ||
626 | |||
627 | res->state = DLM_LOCK_RES_IN_PROGRESS; | ||
628 | |||
629 | res->last_used = 0; | ||
630 | |||
631 | memset(res->lvb, 0, DLM_LVB_LEN); | ||
632 | } | ||
633 | |||
634 | struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, | ||
635 | const char *name, | ||
636 | unsigned int namelen) | ||
637 | { | ||
638 | struct dlm_lock_resource *res; | ||
639 | |||
640 | res = kmalloc(sizeof(struct dlm_lock_resource), GFP_KERNEL); | ||
641 | if (!res) | ||
642 | return NULL; | ||
643 | |||
644 | res->lockname.name = kmalloc(namelen, GFP_KERNEL); | ||
645 | if (!res->lockname.name) { | ||
646 | kfree(res); | ||
647 | return NULL; | ||
648 | } | ||
649 | |||
650 | dlm_init_lockres(dlm, res, name, namelen); | ||
651 | return res; | ||
652 | } | ||
653 | |||
654 | /* | ||
655 | * lookup a lock resource by name. | ||
656 | * may already exist in the hashtable. | ||
657 | * lockid is null terminated | ||
658 | * | ||
659 | * if not, allocate enough for the lockres and for | ||
660 | * the temporary structure used in doing the mastering. | ||
661 | * | ||
662 | * also, do a lookup in the dlm->master_list to see | ||
663 | * if another node has begun mastering the same lock. | ||
664 | * if so, there should be a block entry in there | ||
665 | * for this name, and we should *not* attempt to master | ||
666 | * the lock here. need to wait around for that node | ||
667 | * to assert_master (or die). | ||
668 | * | ||
669 | */ | ||
670 | struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, | ||
671 | const char *lockid, | ||
672 | int flags) | ||
673 | { | ||
674 | struct dlm_lock_resource *tmpres=NULL, *res=NULL; | ||
675 | struct dlm_master_list_entry *mle = NULL; | ||
676 | struct dlm_master_list_entry *alloc_mle = NULL; | ||
677 | int blocked = 0; | ||
678 | int ret, nodenum; | ||
679 | struct dlm_node_iter iter; | ||
680 | unsigned int namelen; | ||
681 | int tries = 0; | ||
682 | |||
683 | BUG_ON(!lockid); | ||
684 | |||
685 | namelen = strlen(lockid); | ||
686 | |||
687 | mlog(0, "get lockres %s (len %d)\n", lockid, namelen); | ||
688 | |||
689 | lookup: | ||
690 | spin_lock(&dlm->spinlock); | ||
691 | tmpres = __dlm_lookup_lockres(dlm, lockid, namelen); | ||
692 | if (tmpres) { | ||
693 | spin_unlock(&dlm->spinlock); | ||
694 | mlog(0, "found in hash!\n"); | ||
695 | if (res) | ||
696 | dlm_lockres_put(res); | ||
697 | res = tmpres; | ||
698 | goto leave; | ||
699 | } | ||
700 | |||
701 | if (!res) { | ||
702 | spin_unlock(&dlm->spinlock); | ||
703 | mlog(0, "allocating a new resource\n"); | ||
704 | /* nothing found and we need to allocate one. */ | ||
705 | alloc_mle = (struct dlm_master_list_entry *) | ||
706 | kmem_cache_alloc(dlm_mle_cache, GFP_KERNEL); | ||
707 | if (!alloc_mle) | ||
708 | goto leave; | ||
709 | res = dlm_new_lockres(dlm, lockid, namelen); | ||
710 | if (!res) | ||
711 | goto leave; | ||
712 | goto lookup; | ||
713 | } | ||
714 | |||
715 | mlog(0, "no lockres found, allocated our own: %p\n", res); | ||
716 | |||
717 | if (flags & LKM_LOCAL) { | ||
718 | /* caller knows it's safe to assume it's not mastered elsewhere | ||
719 | * DONE! return right away */ | ||
720 | spin_lock(&res->spinlock); | ||
721 | dlm_change_lockres_owner(dlm, res, dlm->node_num); | ||
722 | __dlm_insert_lockres(dlm, res); | ||
723 | spin_unlock(&res->spinlock); | ||
724 | spin_unlock(&dlm->spinlock); | ||
725 | /* lockres still marked IN_PROGRESS */ | ||
726 | goto wake_waiters; | ||
727 | } | ||
728 | |||
729 | /* check master list to see if another node has started mastering it */ | ||
730 | spin_lock(&dlm->master_lock); | ||
731 | |||
732 | /* if we found a block, wait for lock to be mastered by another node */ | ||
733 | blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen); | ||
734 | if (blocked) { | ||
735 | if (mle->type == DLM_MLE_MASTER) { | ||
736 | mlog(ML_ERROR, "master entry for nonexistent lock!\n"); | ||
737 | BUG(); | ||
738 | } else if (mle->type == DLM_MLE_MIGRATION) { | ||
739 | /* migration is in progress! */ | ||
740 | /* the good news is that we now know the | ||
741 | * "current" master (mle->master). */ | ||
742 | |||
743 | spin_unlock(&dlm->master_lock); | ||
744 | assert_spin_locked(&dlm->spinlock); | ||
745 | |||
746 | /* set the lockres owner and hash it */ | ||
747 | spin_lock(&res->spinlock); | ||
748 | dlm_set_lockres_owner(dlm, res, mle->master); | ||
749 | __dlm_insert_lockres(dlm, res); | ||
750 | spin_unlock(&res->spinlock); | ||
751 | spin_unlock(&dlm->spinlock); | ||
752 | |||
753 | /* master is known, detach */ | ||
754 | dlm_mle_detach_hb_events(dlm, mle); | ||
755 | dlm_put_mle(mle); | ||
756 | mle = NULL; | ||
757 | goto wake_waiters; | ||
758 | } | ||
759 | } else { | ||
760 | /* go ahead and try to master lock on this node */ | ||
761 | mle = alloc_mle; | ||
762 | /* make sure this does not get freed below */ | ||
763 | alloc_mle = NULL; | ||
764 | dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0); | ||
765 | set_bit(dlm->node_num, mle->maybe_map); | ||
766 | list_add(&mle->list, &dlm->master_list); | ||
767 | } | ||
768 | |||
769 | /* at this point there is either a DLM_MLE_BLOCK or a | ||
770 | * DLM_MLE_MASTER on the master list, so it's safe to add the | ||
771 | * lockres to the hashtable. anyone who finds the lock will | ||
772 | * still have to wait on the IN_PROGRESS. */ | ||
773 | |||
774 | /* finally add the lockres to its hash bucket */ | ||
775 | __dlm_insert_lockres(dlm, res); | ||
776 | /* get an extra ref on the mle in case this is a BLOCK | ||
777 | * if so, the creator of the BLOCK may try to put the last | ||
778 | * ref at this time in the assert master handler, so we | ||
779 | * need an extra one to keep from a bad ptr deref. */ | ||
780 | dlm_get_mle(mle); | ||
781 | spin_unlock(&dlm->master_lock); | ||
782 | spin_unlock(&dlm->spinlock); | ||
783 | |||
784 | /* must wait for lock to be mastered elsewhere */ | ||
785 | if (blocked) | ||
786 | goto wait; | ||
787 | |||
788 | redo_request: | ||
789 | ret = -EINVAL; | ||
790 | dlm_node_iter_init(mle->vote_map, &iter); | ||
791 | while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { | ||
792 | ret = dlm_do_master_request(mle, nodenum); | ||
793 | if (ret < 0) | ||
794 | mlog_errno(ret); | ||
795 | if (mle->master != O2NM_MAX_NODES) { | ||
796 | /* found a master ! */ | ||
797 | break; | ||
798 | } | ||
799 | } | ||
800 | |||
801 | wait: | ||
802 | /* keep going until the response map includes all nodes */ | ||
803 | ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); | ||
804 | if (ret < 0) { | ||
805 | mlog(0, "%s:%.*s: node map changed, redo the " | ||
806 | "master request now, blocked=%d\n", | ||
807 | dlm->name, res->lockname.len, | ||
808 | res->lockname.name, blocked); | ||
809 | if (++tries > 20) { | ||
810 | mlog(ML_ERROR, "%s:%.*s: spinning on " | ||
811 | "dlm_wait_for_lock_mastery, blocked=%d\n", | ||
812 | dlm->name, res->lockname.len, | ||
813 | res->lockname.name, blocked); | ||
814 | dlm_print_one_lock_resource(res); | ||
815 | /* dlm_print_one_mle(mle); */ | ||
816 | tries = 0; | ||
817 | } | ||
818 | goto redo_request; | ||
819 | } | ||
820 | |||
821 | mlog(0, "lockres mastered by %u\n", res->owner); | ||
822 | /* make sure we never continue without this */ | ||
823 | BUG_ON(res->owner == O2NM_MAX_NODES); | ||
824 | |||
825 | /* master is known, detach if not already detached */ | ||
826 | dlm_mle_detach_hb_events(dlm, mle); | ||
827 | dlm_put_mle(mle); | ||
828 | /* put the extra ref */ | ||
829 | dlm_put_mle(mle); | ||
830 | |||
831 | wake_waiters: | ||
832 | spin_lock(&res->spinlock); | ||
833 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; | ||
834 | spin_unlock(&res->spinlock); | ||
835 | wake_up(&res->wq); | ||
836 | |||
837 | leave: | ||
838 | /* need to free the unused mle */ | ||
839 | if (alloc_mle) | ||
840 | kmem_cache_free(dlm_mle_cache, alloc_mle); | ||
841 | |||
842 | return res; | ||
843 | } | ||
844 | |||
845 | |||
846 | #define DLM_MASTERY_TIMEOUT_MS 5000 | ||
847 | |||
848 | static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, | ||
849 | struct dlm_lock_resource *res, | ||
850 | struct dlm_master_list_entry *mle, | ||
851 | int *blocked) | ||
852 | { | ||
853 | u8 m; | ||
854 | int ret, bit; | ||
855 | int map_changed, voting_done; | ||
856 | int assert, sleep; | ||
857 | |||
858 | recheck: | ||
859 | ret = 0; | ||
860 | assert = 0; | ||
861 | |||
862 | /* check if another node has already become the owner */ | ||
863 | spin_lock(&res->spinlock); | ||
864 | if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
865 | spin_unlock(&res->spinlock); | ||
866 | goto leave; | ||
867 | } | ||
868 | spin_unlock(&res->spinlock); | ||
869 | |||
870 | spin_lock(&mle->spinlock); | ||
871 | m = mle->master; | ||
872 | map_changed = (memcmp(mle->vote_map, mle->node_map, | ||
873 | sizeof(mle->vote_map)) != 0); | ||
874 | voting_done = (memcmp(mle->vote_map, mle->response_map, | ||
875 | sizeof(mle->vote_map)) == 0); | ||
876 | |||
877 | /* restart if we hit any errors */ | ||
878 | if (map_changed) { | ||
879 | int b; | ||
880 | mlog(0, "%s: %.*s: node map changed, restarting\n", | ||
881 | dlm->name, res->lockname.len, res->lockname.name); | ||
882 | ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); | ||
883 | b = (mle->type == DLM_MLE_BLOCK); | ||
884 | if ((*blocked && !b) || (!*blocked && b)) { | ||
885 | mlog(0, "%s:%.*s: status change: old=%d new=%d\n", | ||
886 | dlm->name, res->lockname.len, res->lockname.name, | ||
887 | *blocked, b); | ||
888 | *blocked = b; | ||
889 | } | ||
890 | spin_unlock(&mle->spinlock); | ||
891 | if (ret < 0) { | ||
892 | mlog_errno(ret); | ||
893 | goto leave; | ||
894 | } | ||
895 | mlog(0, "%s:%.*s: restart lock mastery succeeded, " | ||
896 | "rechecking now\n", dlm->name, res->lockname.len, | ||
897 | res->lockname.name); | ||
898 | goto recheck; | ||
899 | } | ||
900 | |||
901 | if (m != O2NM_MAX_NODES) { | ||
902 | /* another node has done an assert! | ||
903 | * all done! */ | ||
904 | sleep = 0; | ||
905 | } else { | ||
906 | sleep = 1; | ||
907 | /* have all nodes responded? */ | ||
908 | if (voting_done && !*blocked) { | ||
909 | bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); | ||
910 | if (dlm->node_num <= bit) { | ||
911 | /* my node number is lowest. | ||
912 | * now tell other nodes that I am | ||
913 | * mastering this. */ | ||
914 | mle->master = dlm->node_num; | ||
915 | assert = 1; | ||
916 | sleep = 0; | ||
917 | } | ||
918 | /* if voting is done, but we have not received | ||
919 | * an assert master yet, we must sleep */ | ||
920 | } | ||
921 | } | ||
922 | |||
923 | spin_unlock(&mle->spinlock); | ||
924 | |||
925 | /* sleep if we haven't finished voting yet */ | ||
926 | if (sleep) { | ||
927 | unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS); | ||
928 | |||
929 | /* | ||
930 | if (atomic_read(&mle->mle_refs.refcount) < 2) | ||
931 | mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle, | ||
932 | atomic_read(&mle->mle_refs.refcount), | ||
933 | res->lockname.len, res->lockname.name); | ||
934 | */ | ||
935 | atomic_set(&mle->woken, 0); | ||
936 | (void)wait_event_timeout(mle->wq, | ||
937 | (atomic_read(&mle->woken) == 1), | ||
938 | timeo); | ||
939 | if (res->owner == O2NM_MAX_NODES) { | ||
940 | mlog(0, "waiting again\n"); | ||
941 | goto recheck; | ||
942 | } | ||
943 | mlog(0, "done waiting, master is %u\n", res->owner); | ||
944 | ret = 0; | ||
945 | goto leave; | ||
946 | } | ||
947 | |||
948 | ret = 0; /* done */ | ||
949 | if (assert) { | ||
950 | m = dlm->node_num; | ||
951 | mlog(0, "about to master %.*s here, this=%u\n", | ||
952 | res->lockname.len, res->lockname.name, m); | ||
953 | ret = dlm_do_assert_master(dlm, res->lockname.name, | ||
954 | res->lockname.len, mle->vote_map, 0); | ||
955 | if (ret) { | ||
956 | /* This is a failure in the network path, | ||
957 | * not in the response to the assert_master | ||
958 | * (any nonzero response is a BUG on this node). | ||
959 | * Most likely a socket just got disconnected | ||
960 | * due to node death. */ | ||
961 | mlog_errno(ret); | ||
962 | } | ||
963 | /* no longer need to restart lock mastery. | ||
964 | * all living nodes have been contacted. */ | ||
965 | ret = 0; | ||
966 | } | ||
967 | |||
968 | /* set the lockres owner */ | ||
969 | spin_lock(&res->spinlock); | ||
970 | dlm_change_lockres_owner(dlm, res, m); | ||
971 | spin_unlock(&res->spinlock); | ||
972 | |||
973 | leave: | ||
974 | return ret; | ||
975 | } | ||
976 | |||
977 | struct dlm_bitmap_diff_iter | ||
978 | { | ||
979 | int curnode; | ||
980 | unsigned long *orig_bm; | ||
981 | unsigned long *cur_bm; | ||
982 | unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
983 | }; | ||
984 | |||
985 | enum dlm_node_state_change | ||
986 | { | ||
987 | NODE_DOWN = -1, | ||
988 | NODE_NO_CHANGE = 0, | ||
989 | NODE_UP | ||
990 | }; | ||
991 | |||
992 | static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter, | ||
993 | unsigned long *orig_bm, | ||
994 | unsigned long *cur_bm) | ||
995 | { | ||
996 | unsigned long p1, p2; | ||
997 | int i; | ||
998 | |||
999 | iter->curnode = -1; | ||
1000 | iter->orig_bm = orig_bm; | ||
1001 | iter->cur_bm = cur_bm; | ||
1002 | |||
1003 | for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) { | ||
1004 | p1 = *(iter->orig_bm + i); | ||
1005 | p2 = *(iter->cur_bm + i); | ||
1006 | iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1); | ||
1007 | } | ||
1008 | } | ||
1009 | |||
1010 | static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter, | ||
1011 | enum dlm_node_state_change *state) | ||
1012 | { | ||
1013 | int bit; | ||
1014 | |||
1015 | if (iter->curnode >= O2NM_MAX_NODES) | ||
1016 | return -ENOENT; | ||
1017 | |||
1018 | bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES, | ||
1019 | iter->curnode+1); | ||
1020 | if (bit >= O2NM_MAX_NODES) { | ||
1021 | iter->curnode = O2NM_MAX_NODES; | ||
1022 | return -ENOENT; | ||
1023 | } | ||
1024 | |||
1025 | /* if it was there in the original then this node died */ | ||
1026 | if (test_bit(bit, iter->orig_bm)) | ||
1027 | *state = NODE_DOWN; | ||
1028 | else | ||
1029 | *state = NODE_UP; | ||
1030 | |||
1031 | iter->curnode = bit; | ||
1032 | return bit; | ||
1033 | } | ||
1034 | |||
1035 | |||
1036 | static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, | ||
1037 | struct dlm_lock_resource *res, | ||
1038 | struct dlm_master_list_entry *mle, | ||
1039 | int blocked) | ||
1040 | { | ||
1041 | struct dlm_bitmap_diff_iter bdi; | ||
1042 | enum dlm_node_state_change sc; | ||
1043 | int node; | ||
1044 | int ret = 0; | ||
1045 | |||
1046 | mlog(0, "something happened such that the " | ||
1047 | "master process may need to be restarted!\n"); | ||
1048 | |||
1049 | assert_spin_locked(&mle->spinlock); | ||
1050 | |||
1051 | dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map); | ||
1052 | node = dlm_bitmap_diff_iter_next(&bdi, &sc); | ||
1053 | while (node >= 0) { | ||
1054 | if (sc == NODE_UP) { | ||
1055 | /* a node came up. easy. might not even need | ||
1056 | * to talk to it if its node number is higher | ||
1057 | * or if we are already blocked. */ | ||
1058 | mlog(0, "node up! %d\n", node); | ||
1059 | if (blocked) | ||
1060 | goto next; | ||
1061 | |||
1062 | if (node > dlm->node_num) { | ||
1063 | mlog(0, "node > this node. skipping.\n"); | ||
1064 | goto next; | ||
1065 | } | ||
1066 | |||
1067 | /* redo the master request, but only for the new node */ | ||
1068 | mlog(0, "sending request to new node\n"); | ||
1069 | clear_bit(node, mle->response_map); | ||
1070 | set_bit(node, mle->vote_map); | ||
1071 | } else { | ||
1072 | mlog(ML_ERROR, "node down! %d\n", node); | ||
1073 | |||
1074 | /* if the node wasn't involved in mastery skip it, | ||
1075 | * but clear it out from the maps so that it will | ||
1076 | * not affect mastery of this lockres */ | ||
1077 | clear_bit(node, mle->response_map); | ||
1078 | clear_bit(node, mle->vote_map); | ||
1079 | if (!test_bit(node, mle->maybe_map)) | ||
1080 | goto next; | ||
1081 | |||
1082 | /* if we're already blocked on lock mastery, and the | ||
1083 | * dead node wasn't the expected master, or there is | ||
1084 | * another node in the maybe_map, keep waiting */ | ||
1085 | if (blocked) { | ||
1086 | int lowest = find_next_bit(mle->maybe_map, | ||
1087 | O2NM_MAX_NODES, 0); | ||
1088 | |||
1089 | /* act like it was never there */ | ||
1090 | clear_bit(node, mle->maybe_map); | ||
1091 | |||
1092 | if (node != lowest) | ||
1093 | goto next; | ||
1094 | |||
1095 | mlog(ML_ERROR, "expected master %u died while " | ||
1096 | "this node was blocked waiting on it!\n", | ||
1097 | node); | ||
1098 | lowest = find_next_bit(mle->maybe_map, | ||
1099 | O2NM_MAX_NODES, | ||
1100 | lowest+1); | ||
1101 | if (lowest < O2NM_MAX_NODES) { | ||
1102 | mlog(0, "still blocked. waiting " | ||
1103 | "on %u now\n", lowest); | ||
1104 | goto next; | ||
1105 | } | ||
1106 | |||
1107 | /* mle is an MLE_BLOCK, but there is now | ||
1108 | * nothing left to block on. we need to return | ||
1109 | * all the way back out and try again with | ||
1110 | * an MLE_MASTER. dlm_do_local_recovery_cleanup | ||
1111 | * has already run, so the mle refcount is ok */ | ||
1112 | mlog(0, "no longer blocking. we can " | ||
1113 | "try to master this here\n"); | ||
1114 | mle->type = DLM_MLE_MASTER; | ||
1115 | memset(mle->maybe_map, 0, | ||
1116 | sizeof(mle->maybe_map)); | ||
1117 | memset(mle->response_map, 0, | ||
1118 | sizeof(mle->maybe_map)); | ||
1119 | memcpy(mle->vote_map, mle->node_map, | ||
1120 | sizeof(mle->node_map)); | ||
1121 | mle->u.res = res; | ||
1122 | set_bit(dlm->node_num, mle->maybe_map); | ||
1123 | |||
1124 | ret = -EAGAIN; | ||
1125 | goto next; | ||
1126 | } | ||
1127 | |||
1128 | clear_bit(node, mle->maybe_map); | ||
1129 | if (node > dlm->node_num) | ||
1130 | goto next; | ||
1131 | |||
1132 | mlog(0, "dead node in map!\n"); | ||
1133 | /* yuck. go back and re-contact all nodes | ||
1134 | * in the vote_map, removing this node. */ | ||
1135 | memset(mle->response_map, 0, | ||
1136 | sizeof(mle->response_map)); | ||
1137 | } | ||
1138 | ret = -EAGAIN; | ||
1139 | next: | ||
1140 | node = dlm_bitmap_diff_iter_next(&bdi, &sc); | ||
1141 | } | ||
1142 | return ret; | ||
1143 | } | ||
1144 | |||
1145 | |||
1146 | /* | ||
1147 | * DLM_MASTER_REQUEST_MSG | ||
1148 | * | ||
1149 | * returns: 0 on success, | ||
1150 | * -errno on a network error | ||
1151 | * | ||
1152 | * on error, the caller should assume the target node is "dead" | ||
1153 | * | ||
1154 | */ | ||
1155 | |||
1156 | static int dlm_do_master_request(struct dlm_master_list_entry *mle, int to) | ||
1157 | { | ||
1158 | struct dlm_ctxt *dlm = mle->dlm; | ||
1159 | struct dlm_master_request request; | ||
1160 | int ret, response=0, resend; | ||
1161 | |||
1162 | memset(&request, 0, sizeof(request)); | ||
1163 | request.node_idx = dlm->node_num; | ||
1164 | |||
1165 | BUG_ON(mle->type == DLM_MLE_MIGRATION); | ||
1166 | |||
1167 | if (mle->type != DLM_MLE_MASTER) { | ||
1168 | request.namelen = mle->u.name.len; | ||
1169 | memcpy(request.name, mle->u.name.name, request.namelen); | ||
1170 | } else { | ||
1171 | request.namelen = mle->u.res->lockname.len; | ||
1172 | memcpy(request.name, mle->u.res->lockname.name, | ||
1173 | request.namelen); | ||
1174 | } | ||
1175 | |||
1176 | again: | ||
1177 | ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request, | ||
1178 | sizeof(request), to, &response); | ||
1179 | if (ret < 0) { | ||
1180 | if (ret == -ESRCH) { | ||
1181 | /* should never happen */ | ||
1182 | mlog(ML_ERROR, "TCP stack not ready!\n"); | ||
1183 | BUG(); | ||
1184 | } else if (ret == -EINVAL) { | ||
1185 | mlog(ML_ERROR, "bad args passed to o2net!\n"); | ||
1186 | BUG(); | ||
1187 | } else if (ret == -ENOMEM) { | ||
1188 | mlog(ML_ERROR, "out of memory while trying to send " | ||
1189 | "network message! retrying\n"); | ||
1190 | /* this is totally crude */ | ||
1191 | msleep(50); | ||
1192 | goto again; | ||
1193 | } else if (!dlm_is_host_down(ret)) { | ||
1194 | /* not a network error. bad. */ | ||
1195 | mlog_errno(ret); | ||
1196 | mlog(ML_ERROR, "unhandled error!"); | ||
1197 | BUG(); | ||
1198 | } | ||
1199 | /* all other errors should be network errors, | ||
1200 | * and likely indicate node death */ | ||
1201 | mlog(ML_ERROR, "link to %d went down!\n", to); | ||
1202 | goto out; | ||
1203 | } | ||
1204 | |||
1205 | ret = 0; | ||
1206 | resend = 0; | ||
1207 | spin_lock(&mle->spinlock); | ||
1208 | switch (response) { | ||
1209 | case DLM_MASTER_RESP_YES: | ||
1210 | set_bit(to, mle->response_map); | ||
1211 | mlog(0, "node %u is the master, response=YES\n", to); | ||
1212 | mle->master = to; | ||
1213 | break; | ||
1214 | case DLM_MASTER_RESP_NO: | ||
1215 | mlog(0, "node %u not master, response=NO\n", to); | ||
1216 | set_bit(to, mle->response_map); | ||
1217 | break; | ||
1218 | case DLM_MASTER_RESP_MAYBE: | ||
1219 | mlog(0, "node %u not master, response=MAYBE\n", to); | ||
1220 | set_bit(to, mle->response_map); | ||
1221 | set_bit(to, mle->maybe_map); | ||
1222 | break; | ||
1223 | case DLM_MASTER_RESP_ERROR: | ||
1224 | mlog(0, "node %u hit an error, resending\n", to); | ||
1225 | resend = 1; | ||
1226 | response = 0; | ||
1227 | break; | ||
1228 | default: | ||
1229 | mlog(ML_ERROR, "bad response! %u\n", response); | ||
1230 | BUG(); | ||
1231 | } | ||
1232 | spin_unlock(&mle->spinlock); | ||
1233 | if (resend) { | ||
1234 | /* this is also totally crude */ | ||
1235 | msleep(50); | ||
1236 | goto again; | ||
1237 | } | ||
1238 | |||
1239 | out: | ||
1240 | return ret; | ||
1241 | } | ||
1242 | |||
1243 | /* | ||
1244 | * locks that can be taken here: | ||
1245 | * dlm->spinlock | ||
1246 | * res->spinlock | ||
1247 | * mle->spinlock | ||
1248 | * dlm->master_list | ||
1249 | * | ||
1250 | * if possible, TRIM THIS DOWN!!! | ||
1251 | */ | ||
1252 | int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data) | ||
1253 | { | ||
1254 | u8 response = DLM_MASTER_RESP_MAYBE; | ||
1255 | struct dlm_ctxt *dlm = data; | ||
1256 | struct dlm_lock_resource *res; | ||
1257 | struct dlm_master_request *request = (struct dlm_master_request *) msg->buf; | ||
1258 | struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL; | ||
1259 | char *name; | ||
1260 | unsigned int namelen; | ||
1261 | int found, ret; | ||
1262 | int set_maybe; | ||
1263 | |||
1264 | if (!dlm_grab(dlm)) | ||
1265 | return DLM_MASTER_RESP_NO; | ||
1266 | |||
1267 | if (!dlm_domain_fully_joined(dlm)) { | ||
1268 | response = DLM_MASTER_RESP_NO; | ||
1269 | goto send_response; | ||
1270 | } | ||
1271 | |||
1272 | name = request->name; | ||
1273 | namelen = request->namelen; | ||
1274 | |||
1275 | if (namelen > DLM_LOCKID_NAME_MAX) { | ||
1276 | response = DLM_IVBUFLEN; | ||
1277 | goto send_response; | ||
1278 | } | ||
1279 | |||
1280 | way_up_top: | ||
1281 | spin_lock(&dlm->spinlock); | ||
1282 | res = __dlm_lookup_lockres(dlm, name, namelen); | ||
1283 | if (res) { | ||
1284 | spin_unlock(&dlm->spinlock); | ||
1285 | |||
1286 | /* take care of the easy cases up front */ | ||
1287 | spin_lock(&res->spinlock); | ||
1288 | if (res->state & DLM_LOCK_RES_RECOVERING) { | ||
1289 | spin_unlock(&res->spinlock); | ||
1290 | mlog(0, "returning DLM_MASTER_RESP_ERROR since res is " | ||
1291 | "being recovered\n"); | ||
1292 | response = DLM_MASTER_RESP_ERROR; | ||
1293 | if (mle) | ||
1294 | kmem_cache_free(dlm_mle_cache, mle); | ||
1295 | goto send_response; | ||
1296 | } | ||
1297 | |||
1298 | if (res->owner == dlm->node_num) { | ||
1299 | u32 flags = DLM_ASSERT_MASTER_MLE_CLEANUP; | ||
1300 | spin_unlock(&res->spinlock); | ||
1301 | // mlog(0, "this node is the master\n"); | ||
1302 | response = DLM_MASTER_RESP_YES; | ||
1303 | if (mle) | ||
1304 | kmem_cache_free(dlm_mle_cache, mle); | ||
1305 | |||
1306 | /* this node is the owner. | ||
1307 | * there is some extra work that needs to | ||
1308 | * happen now. the requesting node has | ||
1309 | * caused all nodes up to this one to | ||
1310 | * create mles. this node now needs to | ||
1311 | * go back and clean those up. */ | ||
1312 | mlog(0, "%u is the owner of %.*s, cleaning everyone else\n", | ||
1313 | dlm->node_num, res->lockname.len, res->lockname.name); | ||
1314 | ret = dlm_dispatch_assert_master(dlm, res, 1, | ||
1315 | request->node_idx, | ||
1316 | flags); | ||
1317 | if (ret < 0) { | ||
1318 | mlog(ML_ERROR, "failed to dispatch assert " | ||
1319 | "master work\n"); | ||
1320 | response = DLM_MASTER_RESP_ERROR; | ||
1321 | } | ||
1322 | goto send_response; | ||
1323 | } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
1324 | spin_unlock(&res->spinlock); | ||
1325 | // mlog(0, "node %u is the master\n", res->owner); | ||
1326 | response = DLM_MASTER_RESP_NO; | ||
1327 | if (mle) | ||
1328 | kmem_cache_free(dlm_mle_cache, mle); | ||
1329 | goto send_response; | ||
1330 | } | ||
1331 | |||
1332 | /* ok, there is no owner. either this node is | ||
1333 | * being blocked, or it is actively trying to | ||
1334 | * master this lock. */ | ||
1335 | if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { | ||
1336 | mlog(ML_ERROR, "lock with no owner should be " | ||
1337 | "in-progress!\n"); | ||
1338 | BUG(); | ||
1339 | } | ||
1340 | |||
1341 | // mlog(0, "lockres is in progress...\n"); | ||
1342 | spin_lock(&dlm->master_lock); | ||
1343 | found = dlm_find_mle(dlm, &tmpmle, name, namelen); | ||
1344 | if (!found) { | ||
1345 | mlog(ML_ERROR, "no mle found for this lock!\n"); | ||
1346 | BUG(); | ||
1347 | } | ||
1348 | set_maybe = 1; | ||
1349 | spin_lock(&tmpmle->spinlock); | ||
1350 | if (tmpmle->type == DLM_MLE_BLOCK) { | ||
1351 | // mlog(0, "this node is waiting for " | ||
1352 | // "lockres to be mastered\n"); | ||
1353 | response = DLM_MASTER_RESP_NO; | ||
1354 | } else if (tmpmle->type == DLM_MLE_MIGRATION) { | ||
1355 | mlog(0, "node %u is master, but trying to migrate to " | ||
1356 | "node %u.\n", tmpmle->master, tmpmle->new_master); | ||
1357 | if (tmpmle->master == dlm->node_num) { | ||
1358 | response = DLM_MASTER_RESP_YES; | ||
1359 | mlog(ML_ERROR, "no owner on lockres, but this " | ||
1360 | "node is trying to migrate it to %u?!\n", | ||
1361 | tmpmle->new_master); | ||
1362 | BUG(); | ||
1363 | } else { | ||
1364 | /* the real master can respond on its own */ | ||
1365 | response = DLM_MASTER_RESP_NO; | ||
1366 | } | ||
1367 | } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
1368 | set_maybe = 0; | ||
1369 | if (tmpmle->master == dlm->node_num) | ||
1370 | response = DLM_MASTER_RESP_YES; | ||
1371 | else | ||
1372 | response = DLM_MASTER_RESP_NO; | ||
1373 | } else { | ||
1374 | // mlog(0, "this node is attempting to " | ||
1375 | // "master lockres\n"); | ||
1376 | response = DLM_MASTER_RESP_MAYBE; | ||
1377 | } | ||
1378 | if (set_maybe) | ||
1379 | set_bit(request->node_idx, tmpmle->maybe_map); | ||
1380 | spin_unlock(&tmpmle->spinlock); | ||
1381 | |||
1382 | spin_unlock(&dlm->master_lock); | ||
1383 | spin_unlock(&res->spinlock); | ||
1384 | |||
1385 | /* keep the mle attached to heartbeat events */ | ||
1386 | dlm_put_mle(tmpmle); | ||
1387 | if (mle) | ||
1388 | kmem_cache_free(dlm_mle_cache, mle); | ||
1389 | goto send_response; | ||
1390 | } | ||
1391 | |||
1392 | /* | ||
1393 | * lockres doesn't exist on this node | ||
1394 | * if there is an MLE_BLOCK, return NO | ||
1395 | * if there is an MLE_MASTER, return MAYBE | ||
1396 | * otherwise, add an MLE_BLOCK, return NO | ||
1397 | */ | ||
1398 | spin_lock(&dlm->master_lock); | ||
1399 | found = dlm_find_mle(dlm, &tmpmle, name, namelen); | ||
1400 | if (!found) { | ||
1401 | /* this lockid has never been seen on this node yet */ | ||
1402 | // mlog(0, "no mle found\n"); | ||
1403 | if (!mle) { | ||
1404 | spin_unlock(&dlm->master_lock); | ||
1405 | spin_unlock(&dlm->spinlock); | ||
1406 | |||
1407 | mle = (struct dlm_master_list_entry *) | ||
1408 | kmem_cache_alloc(dlm_mle_cache, GFP_KERNEL); | ||
1409 | if (!mle) { | ||
1410 | // bad bad bad... this sucks. | ||
1411 | response = DLM_MASTER_RESP_ERROR; | ||
1412 | goto send_response; | ||
1413 | } | ||
1414 | spin_lock(&dlm->spinlock); | ||
1415 | dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, | ||
1416 | name, namelen); | ||
1417 | spin_unlock(&dlm->spinlock); | ||
1418 | goto way_up_top; | ||
1419 | } | ||
1420 | |||
1421 | // mlog(0, "this is second time thru, already allocated, " | ||
1422 | // "add the block.\n"); | ||
1423 | set_bit(request->node_idx, mle->maybe_map); | ||
1424 | list_add(&mle->list, &dlm->master_list); | ||
1425 | response = DLM_MASTER_RESP_NO; | ||
1426 | } else { | ||
1427 | // mlog(0, "mle was found\n"); | ||
1428 | set_maybe = 1; | ||
1429 | spin_lock(&tmpmle->spinlock); | ||
1430 | if (tmpmle->type == DLM_MLE_BLOCK) | ||
1431 | response = DLM_MASTER_RESP_NO; | ||
1432 | else if (tmpmle->type == DLM_MLE_MIGRATION) { | ||
1433 | mlog(0, "migration mle was found (%u->%u)\n", | ||
1434 | tmpmle->master, tmpmle->new_master); | ||
1435 | if (tmpmle->master == dlm->node_num) { | ||
1436 | mlog(ML_ERROR, "no lockres, but migration mle " | ||
1437 | "says that this node is master!\n"); | ||
1438 | BUG(); | ||
1439 | } | ||
1440 | /* real master can respond on its own */ | ||
1441 | response = DLM_MASTER_RESP_NO; | ||
1442 | } else { | ||
1443 | if (tmpmle->master == dlm->node_num) { | ||
1444 | response = DLM_MASTER_RESP_YES; | ||
1445 | set_maybe = 0; | ||
1446 | } else | ||
1447 | response = DLM_MASTER_RESP_MAYBE; | ||
1448 | } | ||
1449 | if (set_maybe) | ||
1450 | set_bit(request->node_idx, tmpmle->maybe_map); | ||
1451 | spin_unlock(&tmpmle->spinlock); | ||
1452 | } | ||
1453 | spin_unlock(&dlm->master_lock); | ||
1454 | spin_unlock(&dlm->spinlock); | ||
1455 | |||
1456 | if (found) { | ||
1457 | /* keep the mle attached to heartbeat events */ | ||
1458 | dlm_put_mle(tmpmle); | ||
1459 | } | ||
1460 | send_response: | ||
1461 | dlm_put(dlm); | ||
1462 | return response; | ||
1463 | } | ||
1464 | |||
1465 | /* | ||
1466 | * DLM_ASSERT_MASTER_MSG | ||
1467 | */ | ||
1468 | |||
1469 | |||
1470 | /* | ||
1471 | * NOTE: this can be used for debugging | ||
1472 | * can periodically run all locks owned by this node | ||
1473 | * and re-assert across the cluster... | ||
1474 | */ | ||
1475 | static int dlm_do_assert_master(struct dlm_ctxt *dlm, const char *lockname, | ||
1476 | unsigned int namelen, void *nodemap, | ||
1477 | u32 flags) | ||
1478 | { | ||
1479 | struct dlm_assert_master assert; | ||
1480 | int to, tmpret; | ||
1481 | struct dlm_node_iter iter; | ||
1482 | int ret = 0; | ||
1483 | |||
1484 | BUG_ON(namelen > O2NM_MAX_NAME_LEN); | ||
1485 | |||
1486 | /* note that if this nodemap is empty, it returns 0 */ | ||
1487 | dlm_node_iter_init(nodemap, &iter); | ||
1488 | while ((to = dlm_node_iter_next(&iter)) >= 0) { | ||
1489 | int r = 0; | ||
1490 | mlog(0, "sending assert master to %d (%.*s)\n", to, | ||
1491 | namelen, lockname); | ||
1492 | memset(&assert, 0, sizeof(assert)); | ||
1493 | assert.node_idx = dlm->node_num; | ||
1494 | assert.namelen = namelen; | ||
1495 | memcpy(assert.name, lockname, namelen); | ||
1496 | assert.flags = cpu_to_be32(flags); | ||
1497 | |||
1498 | tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key, | ||
1499 | &assert, sizeof(assert), to, &r); | ||
1500 | if (tmpret < 0) { | ||
1501 | mlog(ML_ERROR, "assert_master returned %d!\n", tmpret); | ||
1502 | if (!dlm_is_host_down(tmpret)) { | ||
1503 | mlog(ML_ERROR, "unhandled error!\n"); | ||
1504 | BUG(); | ||
1505 | } | ||
1506 | /* a node died. finish out the rest of the nodes. */ | ||
1507 | mlog(ML_ERROR, "link to %d went down!\n", to); | ||
1508 | /* any nonzero status return will do */ | ||
1509 | ret = tmpret; | ||
1510 | } else if (r < 0) { | ||
1511 | /* ok, something horribly messed. kill thyself. */ | ||
1512 | mlog(ML_ERROR,"during assert master of %.*s to %u, " | ||
1513 | "got %d.\n", namelen, lockname, to, r); | ||
1514 | dlm_dump_lock_resources(dlm); | ||
1515 | BUG(); | ||
1516 | } | ||
1517 | } | ||
1518 | |||
1519 | return ret; | ||
1520 | } | ||
1521 | |||
1522 | /* | ||
1523 | * locks that can be taken here: | ||
1524 | * dlm->spinlock | ||
1525 | * res->spinlock | ||
1526 | * mle->spinlock | ||
1527 | * dlm->master_list | ||
1528 | * | ||
1529 | * if possible, TRIM THIS DOWN!!! | ||
1530 | */ | ||
1531 | int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data) | ||
1532 | { | ||
1533 | struct dlm_ctxt *dlm = data; | ||
1534 | struct dlm_master_list_entry *mle = NULL; | ||
1535 | struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf; | ||
1536 | struct dlm_lock_resource *res = NULL; | ||
1537 | char *name; | ||
1538 | unsigned int namelen; | ||
1539 | u32 flags; | ||
1540 | |||
1541 | if (!dlm_grab(dlm)) | ||
1542 | return 0; | ||
1543 | |||
1544 | name = assert->name; | ||
1545 | namelen = assert->namelen; | ||
1546 | flags = be32_to_cpu(assert->flags); | ||
1547 | |||
1548 | if (namelen > DLM_LOCKID_NAME_MAX) { | ||
1549 | mlog(ML_ERROR, "Invalid name length!"); | ||
1550 | goto done; | ||
1551 | } | ||
1552 | |||
1553 | spin_lock(&dlm->spinlock); | ||
1554 | |||
1555 | if (flags) | ||
1556 | mlog(0, "assert_master with flags: %u\n", flags); | ||
1557 | |||
1558 | /* find the MLE */ | ||
1559 | spin_lock(&dlm->master_lock); | ||
1560 | if (!dlm_find_mle(dlm, &mle, name, namelen)) { | ||
1561 | /* not an error, could be master just re-asserting */ | ||
1562 | mlog(0, "just got an assert_master from %u, but no " | ||
1563 | "MLE for it! (%.*s)\n", assert->node_idx, | ||
1564 | namelen, name); | ||
1565 | } else { | ||
1566 | int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0); | ||
1567 | if (bit >= O2NM_MAX_NODES) { | ||
1568 | /* not necessarily an error, though less likely. | ||
1569 | * could be master just re-asserting. */ | ||
1570 | mlog(ML_ERROR, "no bits set in the maybe_map, but %u " | ||
1571 | "is asserting! (%.*s)\n", assert->node_idx, | ||
1572 | namelen, name); | ||
1573 | } else if (bit != assert->node_idx) { | ||
1574 | if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) { | ||
1575 | mlog(0, "master %u was found, %u should " | ||
1576 | "back off\n", assert->node_idx, bit); | ||
1577 | } else { | ||
1578 | /* with the fix for bug 569, a higher node | ||
1579 | * number winning the mastery will respond | ||
1580 | * YES to mastery requests, but this node | ||
1581 | * had no way of knowing. let it pass. */ | ||
1582 | mlog(ML_ERROR, "%u is the lowest node, " | ||
1583 | "%u is asserting. (%.*s) %u must " | ||
1584 | "have begun after %u won.\n", bit, | ||
1585 | assert->node_idx, namelen, name, bit, | ||
1586 | assert->node_idx); | ||
1587 | } | ||
1588 | } | ||
1589 | } | ||
1590 | spin_unlock(&dlm->master_lock); | ||
1591 | |||
1592 | /* ok everything checks out with the MLE | ||
1593 | * now check to see if there is a lockres */ | ||
1594 | res = __dlm_lookup_lockres(dlm, name, namelen); | ||
1595 | if (res) { | ||
1596 | spin_lock(&res->spinlock); | ||
1597 | if (res->state & DLM_LOCK_RES_RECOVERING) { | ||
1598 | mlog(ML_ERROR, "%u asserting but %.*s is " | ||
1599 | "RECOVERING!\n", assert->node_idx, namelen, name); | ||
1600 | goto kill; | ||
1601 | } | ||
1602 | if (!mle) { | ||
1603 | if (res->owner != assert->node_idx) { | ||
1604 | mlog(ML_ERROR, "assert_master from " | ||
1605 | "%u, but current owner is " | ||
1606 | "%u! (%.*s)\n", | ||
1607 | assert->node_idx, res->owner, | ||
1608 | namelen, name); | ||
1609 | goto kill; | ||
1610 | } | ||
1611 | } else if (mle->type != DLM_MLE_MIGRATION) { | ||
1612 | if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
1613 | /* owner is just re-asserting */ | ||
1614 | if (res->owner == assert->node_idx) { | ||
1615 | mlog(0, "owner %u re-asserting on " | ||
1616 | "lock %.*s\n", assert->node_idx, | ||
1617 | namelen, name); | ||
1618 | goto ok; | ||
1619 | } | ||
1620 | mlog(ML_ERROR, "got assert_master from " | ||
1621 | "node %u, but %u is the owner! " | ||
1622 | "(%.*s)\n", assert->node_idx, | ||
1623 | res->owner, namelen, name); | ||
1624 | goto kill; | ||
1625 | } | ||
1626 | if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { | ||
1627 | mlog(ML_ERROR, "got assert from %u, but lock " | ||
1628 | "with no owner should be " | ||
1629 | "in-progress! (%.*s)\n", | ||
1630 | assert->node_idx, | ||
1631 | namelen, name); | ||
1632 | goto kill; | ||
1633 | } | ||
1634 | } else /* mle->type == DLM_MLE_MIGRATION */ { | ||
1635 | /* should only be getting an assert from new master */ | ||
1636 | if (assert->node_idx != mle->new_master) { | ||
1637 | mlog(ML_ERROR, "got assert from %u, but " | ||
1638 | "new master is %u, and old master " | ||
1639 | "was %u (%.*s)\n", | ||
1640 | assert->node_idx, mle->new_master, | ||
1641 | mle->master, namelen, name); | ||
1642 | goto kill; | ||
1643 | } | ||
1644 | |||
1645 | } | ||
1646 | ok: | ||
1647 | spin_unlock(&res->spinlock); | ||
1648 | } | ||
1649 | spin_unlock(&dlm->spinlock); | ||
1650 | |||
1651 | // mlog(0, "woo! got an assert_master from node %u!\n", | ||
1652 | // assert->node_idx); | ||
1653 | if (mle) { | ||
1654 | int extra_ref; | ||
1655 | |||
1656 | spin_lock(&mle->spinlock); | ||
1657 | extra_ref = !!(mle->type == DLM_MLE_BLOCK | ||
1658 | || mle->type == DLM_MLE_MIGRATION); | ||
1659 | mle->master = assert->node_idx; | ||
1660 | atomic_set(&mle->woken, 1); | ||
1661 | wake_up(&mle->wq); | ||
1662 | spin_unlock(&mle->spinlock); | ||
1663 | |||
1664 | if (mle->type == DLM_MLE_MIGRATION && res) { | ||
1665 | mlog(0, "finishing off migration of lockres %.*s, " | ||
1666 | "from %u to %u\n", | ||
1667 | res->lockname.len, res->lockname.name, | ||
1668 | dlm->node_num, mle->new_master); | ||
1669 | spin_lock(&res->spinlock); | ||
1670 | res->state &= ~DLM_LOCK_RES_MIGRATING; | ||
1671 | dlm_change_lockres_owner(dlm, res, mle->new_master); | ||
1672 | BUG_ON(res->state & DLM_LOCK_RES_DIRTY); | ||
1673 | spin_unlock(&res->spinlock); | ||
1674 | } | ||
1675 | /* master is known, detach if not already detached */ | ||
1676 | dlm_mle_detach_hb_events(dlm, mle); | ||
1677 | dlm_put_mle(mle); | ||
1678 | |||
1679 | if (extra_ref) { | ||
1680 | /* the assert master message now balances the extra | ||
1681 | * ref given by the master / migration request message. | ||
1682 | * if this is the last put, it will be removed | ||
1683 | * from the list. */ | ||
1684 | dlm_put_mle(mle); | ||
1685 | } | ||
1686 | } | ||
1687 | |||
1688 | done: | ||
1689 | if (res) | ||
1690 | dlm_lockres_put(res); | ||
1691 | dlm_put(dlm); | ||
1692 | return 0; | ||
1693 | |||
1694 | kill: | ||
1695 | /* kill the caller! */ | ||
1696 | spin_unlock(&res->spinlock); | ||
1697 | spin_unlock(&dlm->spinlock); | ||
1698 | dlm_lockres_put(res); | ||
1699 | mlog(ML_ERROR, "Bad message received from another node. Dumping state " | ||
1700 | "and killing the other node now! This node is OK and can continue.\n"); | ||
1701 | dlm_dump_lock_resources(dlm); | ||
1702 | dlm_put(dlm); | ||
1703 | return -EINVAL; | ||
1704 | } | ||
1705 | |||
1706 | int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, | ||
1707 | struct dlm_lock_resource *res, | ||
1708 | int ignore_higher, u8 request_from, u32 flags) | ||
1709 | { | ||
1710 | struct dlm_work_item *item; | ||
1711 | item = kcalloc(1, sizeof(*item), GFP_KERNEL); | ||
1712 | if (!item) | ||
1713 | return -ENOMEM; | ||
1714 | |||
1715 | |||
1716 | /* queue up work for dlm_assert_master_worker */ | ||
1717 | dlm_grab(dlm); /* get an extra ref for the work item */ | ||
1718 | dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL); | ||
1719 | item->u.am.lockres = res; /* already have a ref */ | ||
1720 | /* can optionally ignore node numbers higher than this node */ | ||
1721 | item->u.am.ignore_higher = ignore_higher; | ||
1722 | item->u.am.request_from = request_from; | ||
1723 | item->u.am.flags = flags; | ||
1724 | |||
1725 | spin_lock(&dlm->work_lock); | ||
1726 | list_add_tail(&item->list, &dlm->work_list); | ||
1727 | spin_unlock(&dlm->work_lock); | ||
1728 | |||
1729 | schedule_work(&dlm->dispatched_work); | ||
1730 | return 0; | ||
1731 | } | ||
1732 | |||
1733 | static void dlm_assert_master_worker(struct dlm_work_item *item, void *data) | ||
1734 | { | ||
1735 | struct dlm_ctxt *dlm = data; | ||
1736 | int ret = 0; | ||
1737 | struct dlm_lock_resource *res; | ||
1738 | unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
1739 | int ignore_higher; | ||
1740 | int bit; | ||
1741 | u8 request_from; | ||
1742 | u32 flags; | ||
1743 | |||
1744 | dlm = item->dlm; | ||
1745 | res = item->u.am.lockres; | ||
1746 | ignore_higher = item->u.am.ignore_higher; | ||
1747 | request_from = item->u.am.request_from; | ||
1748 | flags = item->u.am.flags; | ||
1749 | |||
1750 | spin_lock(&dlm->spinlock); | ||
1751 | memcpy(nodemap, dlm->domain_map, sizeof(nodemap)); | ||
1752 | spin_unlock(&dlm->spinlock); | ||
1753 | |||
1754 | clear_bit(dlm->node_num, nodemap); | ||
1755 | if (ignore_higher) { | ||
1756 | /* if is this just to clear up mles for nodes below | ||
1757 | * this node, do not send the message to the original | ||
1758 | * caller or any node number higher than this */ | ||
1759 | clear_bit(request_from, nodemap); | ||
1760 | bit = dlm->node_num; | ||
1761 | while (1) { | ||
1762 | bit = find_next_bit(nodemap, O2NM_MAX_NODES, | ||
1763 | bit+1); | ||
1764 | if (bit >= O2NM_MAX_NODES) | ||
1765 | break; | ||
1766 | clear_bit(bit, nodemap); | ||
1767 | } | ||
1768 | } | ||
1769 | |||
1770 | /* this call now finishes out the nodemap | ||
1771 | * even if one or more nodes die */ | ||
1772 | mlog(0, "worker about to master %.*s here, this=%u\n", | ||
1773 | res->lockname.len, res->lockname.name, dlm->node_num); | ||
1774 | ret = dlm_do_assert_master(dlm, res->lockname.name, | ||
1775 | res->lockname.len, | ||
1776 | nodemap, flags); | ||
1777 | if (ret < 0) { | ||
1778 | /* no need to restart, we are done */ | ||
1779 | mlog_errno(ret); | ||
1780 | } | ||
1781 | |||
1782 | dlm_lockres_put(res); | ||
1783 | |||
1784 | mlog(0, "finished with dlm_assert_master_worker\n"); | ||
1785 | } | ||
1786 | |||
1787 | |||
1788 | /* | ||
1789 | * DLM_MIGRATE_LOCKRES | ||
1790 | */ | ||
1791 | |||
1792 | |||
1793 | int dlm_migrate_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | ||
1794 | u8 target) | ||
1795 | { | ||
1796 | struct dlm_master_list_entry *mle = NULL; | ||
1797 | struct dlm_master_list_entry *oldmle = NULL; | ||
1798 | struct dlm_migratable_lockres *mres = NULL; | ||
1799 | int ret = -EINVAL; | ||
1800 | const char *name; | ||
1801 | unsigned int namelen; | ||
1802 | int mle_added = 0; | ||
1803 | struct list_head *queue, *iter; | ||
1804 | int i; | ||
1805 | struct dlm_lock *lock; | ||
1806 | int empty = 1; | ||
1807 | |||
1808 | if (!dlm_grab(dlm)) | ||
1809 | return -EINVAL; | ||
1810 | |||
1811 | name = res->lockname.name; | ||
1812 | namelen = res->lockname.len; | ||
1813 | |||
1814 | mlog(0, "migrating %.*s to %u\n", namelen, name, target); | ||
1815 | |||
1816 | /* | ||
1817 | * ensure this lockres is a proper candidate for migration | ||
1818 | */ | ||
1819 | spin_lock(&res->spinlock); | ||
1820 | if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
1821 | mlog(0, "cannot migrate lockres with unknown owner!\n"); | ||
1822 | spin_unlock(&res->spinlock); | ||
1823 | goto leave; | ||
1824 | } | ||
1825 | if (res->owner != dlm->node_num) { | ||
1826 | mlog(0, "cannot migrate lockres this node doesn't own!\n"); | ||
1827 | spin_unlock(&res->spinlock); | ||
1828 | goto leave; | ||
1829 | } | ||
1830 | mlog(0, "checking queues...\n"); | ||
1831 | queue = &res->granted; | ||
1832 | for (i=0; i<3; i++) { | ||
1833 | list_for_each(iter, queue) { | ||
1834 | lock = list_entry (iter, struct dlm_lock, list); | ||
1835 | empty = 0; | ||
1836 | if (lock->ml.node == dlm->node_num) { | ||
1837 | mlog(0, "found a lock owned by this node " | ||
1838 | "still on the %s queue! will not " | ||
1839 | "migrate this lockres\n", | ||
1840 | i==0 ? "granted" : | ||
1841 | (i==1 ? "converting" : "blocked")); | ||
1842 | spin_unlock(&res->spinlock); | ||
1843 | ret = -ENOTEMPTY; | ||
1844 | goto leave; | ||
1845 | } | ||
1846 | } | ||
1847 | queue++; | ||
1848 | } | ||
1849 | mlog(0, "all locks on this lockres are nonlocal. continuing\n"); | ||
1850 | spin_unlock(&res->spinlock); | ||
1851 | |||
1852 | /* no work to do */ | ||
1853 | if (empty) { | ||
1854 | mlog(0, "no locks were found on this lockres! done!\n"); | ||
1855 | ret = 0; | ||
1856 | goto leave; | ||
1857 | } | ||
1858 | |||
1859 | /* | ||
1860 | * preallocate up front | ||
1861 | * if this fails, abort | ||
1862 | */ | ||
1863 | |||
1864 | ret = -ENOMEM; | ||
1865 | mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_KERNEL); | ||
1866 | if (!mres) { | ||
1867 | mlog_errno(ret); | ||
1868 | goto leave; | ||
1869 | } | ||
1870 | |||
1871 | mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache, | ||
1872 | GFP_KERNEL); | ||
1873 | if (!mle) { | ||
1874 | mlog_errno(ret); | ||
1875 | goto leave; | ||
1876 | } | ||
1877 | ret = 0; | ||
1878 | |||
1879 | /* | ||
1880 | * find a node to migrate the lockres to | ||
1881 | */ | ||
1882 | |||
1883 | mlog(0, "picking a migration node\n"); | ||
1884 | spin_lock(&dlm->spinlock); | ||
1885 | /* pick a new node */ | ||
1886 | if (!test_bit(target, dlm->domain_map) || | ||
1887 | target >= O2NM_MAX_NODES) { | ||
1888 | target = dlm_pick_migration_target(dlm, res); | ||
1889 | } | ||
1890 | mlog(0, "node %u chosen for migration\n", target); | ||
1891 | |||
1892 | if (target >= O2NM_MAX_NODES || | ||
1893 | !test_bit(target, dlm->domain_map)) { | ||
1894 | /* target chosen is not alive */ | ||
1895 | ret = -EINVAL; | ||
1896 | } | ||
1897 | |||
1898 | if (ret) { | ||
1899 | spin_unlock(&dlm->spinlock); | ||
1900 | goto fail; | ||
1901 | } | ||
1902 | |||
1903 | mlog(0, "continuing with target = %u\n", target); | ||
1904 | |||
1905 | /* | ||
1906 | * clear any existing master requests and | ||
1907 | * add the migration mle to the list | ||
1908 | */ | ||
1909 | spin_lock(&dlm->master_lock); | ||
1910 | ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, | ||
1911 | namelen, target, dlm->node_num); | ||
1912 | spin_unlock(&dlm->master_lock); | ||
1913 | spin_unlock(&dlm->spinlock); | ||
1914 | |||
1915 | if (ret == -EEXIST) { | ||
1916 | mlog(0, "another process is already migrating it\n"); | ||
1917 | goto fail; | ||
1918 | } | ||
1919 | mle_added = 1; | ||
1920 | |||
1921 | /* | ||
1922 | * set the MIGRATING flag and flush asts | ||
1923 | * if we fail after this we need to re-dirty the lockres | ||
1924 | */ | ||
1925 | if (dlm_mark_lockres_migrating(dlm, res, target) < 0) { | ||
1926 | mlog(ML_ERROR, "tried to migrate %.*s to %u, but " | ||
1927 | "the target went down.\n", res->lockname.len, | ||
1928 | res->lockname.name, target); | ||
1929 | spin_lock(&res->spinlock); | ||
1930 | res->state &= ~DLM_LOCK_RES_MIGRATING; | ||
1931 | spin_unlock(&res->spinlock); | ||
1932 | ret = -EINVAL; | ||
1933 | } | ||
1934 | |||
1935 | fail: | ||
1936 | if (oldmle) { | ||
1937 | /* master is known, detach if not already detached */ | ||
1938 | dlm_mle_detach_hb_events(dlm, oldmle); | ||
1939 | dlm_put_mle(oldmle); | ||
1940 | } | ||
1941 | |||
1942 | if (ret < 0) { | ||
1943 | if (mle_added) { | ||
1944 | dlm_mle_detach_hb_events(dlm, mle); | ||
1945 | dlm_put_mle(mle); | ||
1946 | } else if (mle) { | ||
1947 | kmem_cache_free(dlm_mle_cache, mle); | ||
1948 | } | ||
1949 | goto leave; | ||
1950 | } | ||
1951 | |||
1952 | /* | ||
1953 | * at this point, we have a migration target, an mle | ||
1954 | * in the master list, and the MIGRATING flag set on | ||
1955 | * the lockres | ||
1956 | */ | ||
1957 | |||
1958 | |||
1959 | /* get an extra reference on the mle. | ||
1960 | * otherwise the assert_master from the new | ||
1961 | * master will destroy this. | ||
1962 | * also, make sure that all callers of dlm_get_mle | ||
1963 | * take both dlm->spinlock and dlm->master_lock */ | ||
1964 | spin_lock(&dlm->spinlock); | ||
1965 | spin_lock(&dlm->master_lock); | ||
1966 | dlm_get_mle(mle); | ||
1967 | spin_unlock(&dlm->master_lock); | ||
1968 | spin_unlock(&dlm->spinlock); | ||
1969 | |||
1970 | /* notify new node and send all lock state */ | ||
1971 | /* call send_one_lockres with migration flag. | ||
1972 | * this serves as notice to the target node that a | ||
1973 | * migration is starting. */ | ||
1974 | ret = dlm_send_one_lockres(dlm, res, mres, target, | ||
1975 | DLM_MRES_MIGRATION); | ||
1976 | |||
1977 | if (ret < 0) { | ||
1978 | mlog(0, "migration to node %u failed with %d\n", | ||
1979 | target, ret); | ||
1980 | /* migration failed, detach and clean up mle */ | ||
1981 | dlm_mle_detach_hb_events(dlm, mle); | ||
1982 | dlm_put_mle(mle); | ||
1983 | dlm_put_mle(mle); | ||
1984 | goto leave; | ||
1985 | } | ||
1986 | |||
1987 | /* at this point, the target sends a message to all nodes, | ||
1988 | * (using dlm_do_migrate_request). this node is skipped since | ||
1989 | * we had to put an mle in the list to begin the process. this | ||
1990 | * node now waits for target to do an assert master. this node | ||
1991 | * will be the last one notified, ensuring that the migration | ||
1992 | * is complete everywhere. if the target dies while this is | ||
1993 | * going on, some nodes could potentially see the target as the | ||
1994 | * master, so it is important that my recovery finds the migration | ||
1995 | * mle and sets the master to UNKNONWN. */ | ||
1996 | |||
1997 | |||
1998 | /* wait for new node to assert master */ | ||
1999 | while (1) { | ||
2000 | ret = wait_event_interruptible_timeout(mle->wq, | ||
2001 | (atomic_read(&mle->woken) == 1), | ||
2002 | msecs_to_jiffies(5000)); | ||
2003 | |||
2004 | if (ret >= 0) { | ||
2005 | if (atomic_read(&mle->woken) == 1 || | ||
2006 | res->owner == target) | ||
2007 | break; | ||
2008 | |||
2009 | mlog(0, "timed out during migration\n"); | ||
2010 | } | ||
2011 | if (ret == -ERESTARTSYS) { | ||
2012 | /* migration failed, detach and clean up mle */ | ||
2013 | dlm_mle_detach_hb_events(dlm, mle); | ||
2014 | dlm_put_mle(mle); | ||
2015 | dlm_put_mle(mle); | ||
2016 | goto leave; | ||
2017 | } | ||
2018 | /* TODO: if node died: stop, clean up, return error */ | ||
2019 | } | ||
2020 | |||
2021 | /* all done, set the owner, clear the flag */ | ||
2022 | spin_lock(&res->spinlock); | ||
2023 | dlm_set_lockres_owner(dlm, res, target); | ||
2024 | res->state &= ~DLM_LOCK_RES_MIGRATING; | ||
2025 | dlm_remove_nonlocal_locks(dlm, res); | ||
2026 | spin_unlock(&res->spinlock); | ||
2027 | wake_up(&res->wq); | ||
2028 | |||
2029 | /* master is known, detach if not already detached */ | ||
2030 | dlm_mle_detach_hb_events(dlm, mle); | ||
2031 | dlm_put_mle(mle); | ||
2032 | ret = 0; | ||
2033 | |||
2034 | dlm_lockres_calc_usage(dlm, res); | ||
2035 | |||
2036 | leave: | ||
2037 | /* re-dirty the lockres if we failed */ | ||
2038 | if (ret < 0) | ||
2039 | dlm_kick_thread(dlm, res); | ||
2040 | |||
2041 | /* TODO: cleanup */ | ||
2042 | if (mres) | ||
2043 | free_page((unsigned long)mres); | ||
2044 | |||
2045 | dlm_put(dlm); | ||
2046 | |||
2047 | mlog(0, "returning %d\n", ret); | ||
2048 | return ret; | ||
2049 | } | ||
2050 | EXPORT_SYMBOL_GPL(dlm_migrate_lockres); | ||
2051 | |||
2052 | int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock) | ||
2053 | { | ||
2054 | int ret; | ||
2055 | spin_lock(&dlm->ast_lock); | ||
2056 | spin_lock(&lock->spinlock); | ||
2057 | ret = (list_empty(&lock->bast_list) && !lock->bast_pending); | ||
2058 | spin_unlock(&lock->spinlock); | ||
2059 | spin_unlock(&dlm->ast_lock); | ||
2060 | return ret; | ||
2061 | } | ||
2062 | |||
2063 | static int dlm_migration_can_proceed(struct dlm_ctxt *dlm, | ||
2064 | struct dlm_lock_resource *res, | ||
2065 | u8 mig_target) | ||
2066 | { | ||
2067 | int can_proceed; | ||
2068 | spin_lock(&res->spinlock); | ||
2069 | can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); | ||
2070 | spin_unlock(&res->spinlock); | ||
2071 | |||
2072 | /* target has died, so make the caller break out of the | ||
2073 | * wait_event, but caller must recheck the domain_map */ | ||
2074 | spin_lock(&dlm->spinlock); | ||
2075 | if (!test_bit(mig_target, dlm->domain_map)) | ||
2076 | can_proceed = 1; | ||
2077 | spin_unlock(&dlm->spinlock); | ||
2078 | return can_proceed; | ||
2079 | } | ||
2080 | |||
2081 | int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | ||
2082 | { | ||
2083 | int ret; | ||
2084 | spin_lock(&res->spinlock); | ||
2085 | ret = !!(res->state & DLM_LOCK_RES_DIRTY); | ||
2086 | spin_unlock(&res->spinlock); | ||
2087 | return ret; | ||
2088 | } | ||
2089 | |||
2090 | |||
2091 | static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, | ||
2092 | struct dlm_lock_resource *res, | ||
2093 | u8 target) | ||
2094 | { | ||
2095 | int ret = 0; | ||
2096 | |||
2097 | mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n", | ||
2098 | res->lockname.len, res->lockname.name, dlm->node_num, | ||
2099 | target); | ||
2100 | /* need to set MIGRATING flag on lockres. this is done by | ||
2101 | * ensuring that all asts have been flushed for this lockres. */ | ||
2102 | spin_lock(&res->spinlock); | ||
2103 | BUG_ON(res->migration_pending); | ||
2104 | res->migration_pending = 1; | ||
2105 | /* strategy is to reserve an extra ast then release | ||
2106 | * it below, letting the release do all of the work */ | ||
2107 | __dlm_lockres_reserve_ast(res); | ||
2108 | spin_unlock(&res->spinlock); | ||
2109 | |||
2110 | /* now flush all the pending asts.. hang out for a bit */ | ||
2111 | dlm_kick_thread(dlm, res); | ||
2112 | wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res)); | ||
2113 | dlm_lockres_release_ast(dlm, res); | ||
2114 | |||
2115 | mlog(0, "about to wait on migration_wq, dirty=%s\n", | ||
2116 | res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no"); | ||
2117 | /* if the extra ref we just put was the final one, this | ||
2118 | * will pass thru immediately. otherwise, we need to wait | ||
2119 | * for the last ast to finish. */ | ||
2120 | again: | ||
2121 | ret = wait_event_interruptible_timeout(dlm->migration_wq, | ||
2122 | dlm_migration_can_proceed(dlm, res, target), | ||
2123 | msecs_to_jiffies(1000)); | ||
2124 | if (ret < 0) { | ||
2125 | mlog(0, "woken again: migrating? %s, dead? %s\n", | ||
2126 | res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", | ||
2127 | test_bit(target, dlm->domain_map) ? "no":"yes"); | ||
2128 | } else { | ||
2129 | mlog(0, "all is well: migrating? %s, dead? %s\n", | ||
2130 | res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", | ||
2131 | test_bit(target, dlm->domain_map) ? "no":"yes"); | ||
2132 | } | ||
2133 | if (!dlm_migration_can_proceed(dlm, res, target)) { | ||
2134 | mlog(0, "trying again...\n"); | ||
2135 | goto again; | ||
2136 | } | ||
2137 | |||
2138 | /* did the target go down or die? */ | ||
2139 | spin_lock(&dlm->spinlock); | ||
2140 | if (!test_bit(target, dlm->domain_map)) { | ||
2141 | mlog(ML_ERROR, "aha. migration target %u just went down\n", | ||
2142 | target); | ||
2143 | ret = -EHOSTDOWN; | ||
2144 | } | ||
2145 | spin_unlock(&dlm->spinlock); | ||
2146 | |||
2147 | /* | ||
2148 | * at this point: | ||
2149 | * | ||
2150 | * o the DLM_LOCK_RES_MIGRATING flag is set | ||
2151 | * o there are no pending asts on this lockres | ||
2152 | * o all processes trying to reserve an ast on this | ||
2153 | * lockres must wait for the MIGRATING flag to clear | ||
2154 | */ | ||
2155 | return ret; | ||
2156 | } | ||
2157 | |||
2158 | /* last step in the migration process. | ||
2159 | * original master calls this to free all of the dlm_lock | ||
2160 | * structures that used to be for other nodes. */ | ||
2161 | static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, | ||
2162 | struct dlm_lock_resource *res) | ||
2163 | { | ||
2164 | struct list_head *iter, *iter2; | ||
2165 | struct list_head *queue = &res->granted; | ||
2166 | int i; | ||
2167 | struct dlm_lock *lock; | ||
2168 | |||
2169 | assert_spin_locked(&res->spinlock); | ||
2170 | |||
2171 | BUG_ON(res->owner == dlm->node_num); | ||
2172 | |||
2173 | for (i=0; i<3; i++) { | ||
2174 | list_for_each_safe(iter, iter2, queue) { | ||
2175 | lock = list_entry (iter, struct dlm_lock, list); | ||
2176 | if (lock->ml.node != dlm->node_num) { | ||
2177 | mlog(0, "putting lock for node %u\n", | ||
2178 | lock->ml.node); | ||
2179 | /* be extra careful */ | ||
2180 | BUG_ON(!list_empty(&lock->ast_list)); | ||
2181 | BUG_ON(!list_empty(&lock->bast_list)); | ||
2182 | BUG_ON(lock->ast_pending); | ||
2183 | BUG_ON(lock->bast_pending); | ||
2184 | list_del_init(&lock->list); | ||
2185 | dlm_lock_put(lock); | ||
2186 | } | ||
2187 | } | ||
2188 | queue++; | ||
2189 | } | ||
2190 | } | ||
2191 | |||
2192 | /* for now this is not too intelligent. we will | ||
2193 | * need stats to make this do the right thing. | ||
2194 | * this just finds the first lock on one of the | ||
2195 | * queues and uses that node as the target. */ | ||
2196 | static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, | ||
2197 | struct dlm_lock_resource *res) | ||
2198 | { | ||
2199 | int i; | ||
2200 | struct list_head *queue = &res->granted; | ||
2201 | struct list_head *iter; | ||
2202 | struct dlm_lock *lock; | ||
2203 | int nodenum; | ||
2204 | |||
2205 | assert_spin_locked(&dlm->spinlock); | ||
2206 | |||
2207 | spin_lock(&res->spinlock); | ||
2208 | for (i=0; i<3; i++) { | ||
2209 | list_for_each(iter, queue) { | ||
2210 | /* up to the caller to make sure this node | ||
2211 | * is alive */ | ||
2212 | lock = list_entry (iter, struct dlm_lock, list); | ||
2213 | if (lock->ml.node != dlm->node_num) { | ||
2214 | spin_unlock(&res->spinlock); | ||
2215 | return lock->ml.node; | ||
2216 | } | ||
2217 | } | ||
2218 | queue++; | ||
2219 | } | ||
2220 | spin_unlock(&res->spinlock); | ||
2221 | mlog(0, "have not found a suitable target yet! checking domain map\n"); | ||
2222 | |||
2223 | /* ok now we're getting desperate. pick anyone alive. */ | ||
2224 | nodenum = -1; | ||
2225 | while (1) { | ||
2226 | nodenum = find_next_bit(dlm->domain_map, | ||
2227 | O2NM_MAX_NODES, nodenum+1); | ||
2228 | mlog(0, "found %d in domain map\n", nodenum); | ||
2229 | if (nodenum >= O2NM_MAX_NODES) | ||
2230 | break; | ||
2231 | if (nodenum != dlm->node_num) { | ||
2232 | mlog(0, "picking %d\n", nodenum); | ||
2233 | return nodenum; | ||
2234 | } | ||
2235 | } | ||
2236 | |||
2237 | mlog(0, "giving up. no master to migrate to\n"); | ||
2238 | return DLM_LOCK_RES_OWNER_UNKNOWN; | ||
2239 | } | ||
2240 | |||
2241 | |||
2242 | |||
2243 | /* this is called by the new master once all lockres | ||
2244 | * data has been received */ | ||
2245 | static int dlm_do_migrate_request(struct dlm_ctxt *dlm, | ||
2246 | struct dlm_lock_resource *res, | ||
2247 | u8 master, u8 new_master, | ||
2248 | struct dlm_node_iter *iter) | ||
2249 | { | ||
2250 | struct dlm_migrate_request migrate; | ||
2251 | int ret, status = 0; | ||
2252 | int nodenum; | ||
2253 | |||
2254 | memset(&migrate, 0, sizeof(migrate)); | ||
2255 | migrate.namelen = res->lockname.len; | ||
2256 | memcpy(migrate.name, res->lockname.name, migrate.namelen); | ||
2257 | migrate.new_master = new_master; | ||
2258 | migrate.master = master; | ||
2259 | |||
2260 | ret = 0; | ||
2261 | |||
2262 | /* send message to all nodes, except the master and myself */ | ||
2263 | while ((nodenum = dlm_node_iter_next(iter)) >= 0) { | ||
2264 | if (nodenum == master || | ||
2265 | nodenum == new_master) | ||
2266 | continue; | ||
2267 | |||
2268 | ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key, | ||
2269 | &migrate, sizeof(migrate), nodenum, | ||
2270 | &status); | ||
2271 | if (ret < 0) | ||
2272 | mlog_errno(ret); | ||
2273 | else if (status < 0) { | ||
2274 | mlog(0, "migrate request (node %u) returned %d!\n", | ||
2275 | nodenum, status); | ||
2276 | ret = status; | ||
2277 | } | ||
2278 | } | ||
2279 | |||
2280 | if (ret < 0) | ||
2281 | mlog_errno(ret); | ||
2282 | |||
2283 | mlog(0, "returning ret=%d\n", ret); | ||
2284 | return ret; | ||
2285 | } | ||
2286 | |||
2287 | |||
2288 | /* if there is an existing mle for this lockres, we now know who the master is. | ||
2289 | * (the one who sent us *this* message) we can clear it up right away. | ||
2290 | * since the process that put the mle on the list still has a reference to it, | ||
2291 | * we can unhash it now, set the master and wake the process. as a result, | ||
2292 | * we will have no mle in the list to start with. now we can add an mle for | ||
2293 | * the migration and this should be the only one found for those scanning the | ||
2294 | * list. */ | ||
2295 | int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data) | ||
2296 | { | ||
2297 | struct dlm_ctxt *dlm = data; | ||
2298 | struct dlm_lock_resource *res = NULL; | ||
2299 | struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf; | ||
2300 | struct dlm_master_list_entry *mle = NULL, *oldmle = NULL; | ||
2301 | const char *name; | ||
2302 | unsigned int namelen; | ||
2303 | int ret = 0; | ||
2304 | |||
2305 | if (!dlm_grab(dlm)) | ||
2306 | return -EINVAL; | ||
2307 | |||
2308 | name = migrate->name; | ||
2309 | namelen = migrate->namelen; | ||
2310 | |||
2311 | /* preallocate.. if this fails, abort */ | ||
2312 | mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache, | ||
2313 | GFP_KERNEL); | ||
2314 | |||
2315 | if (!mle) { | ||
2316 | ret = -ENOMEM; | ||
2317 | goto leave; | ||
2318 | } | ||
2319 | |||
2320 | /* check for pre-existing lock */ | ||
2321 | spin_lock(&dlm->spinlock); | ||
2322 | res = __dlm_lookup_lockres(dlm, name, namelen); | ||
2323 | spin_lock(&dlm->master_lock); | ||
2324 | |||
2325 | if (res) { | ||
2326 | spin_lock(&res->spinlock); | ||
2327 | if (res->state & DLM_LOCK_RES_RECOVERING) { | ||
2328 | /* if all is working ok, this can only mean that we got | ||
2329 | * a migrate request from a node that we now see as | ||
2330 | * dead. what can we do here? drop it to the floor? */ | ||
2331 | spin_unlock(&res->spinlock); | ||
2332 | mlog(ML_ERROR, "Got a migrate request, but the " | ||
2333 | "lockres is marked as recovering!"); | ||
2334 | kmem_cache_free(dlm_mle_cache, mle); | ||
2335 | ret = -EINVAL; /* need a better solution */ | ||
2336 | goto unlock; | ||
2337 | } | ||
2338 | res->state |= DLM_LOCK_RES_MIGRATING; | ||
2339 | spin_unlock(&res->spinlock); | ||
2340 | } | ||
2341 | |||
2342 | /* ignore status. only nonzero status would BUG. */ | ||
2343 | ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, | ||
2344 | name, namelen, | ||
2345 | migrate->new_master, | ||
2346 | migrate->master); | ||
2347 | |||
2348 | unlock: | ||
2349 | spin_unlock(&dlm->master_lock); | ||
2350 | spin_unlock(&dlm->spinlock); | ||
2351 | |||
2352 | if (oldmle) { | ||
2353 | /* master is known, detach if not already detached */ | ||
2354 | dlm_mle_detach_hb_events(dlm, oldmle); | ||
2355 | dlm_put_mle(oldmle); | ||
2356 | } | ||
2357 | |||
2358 | if (res) | ||
2359 | dlm_lockres_put(res); | ||
2360 | leave: | ||
2361 | dlm_put(dlm); | ||
2362 | return ret; | ||
2363 | } | ||
2364 | |||
2365 | /* must be holding dlm->spinlock and dlm->master_lock | ||
2366 | * when adding a migration mle, we can clear any other mles | ||
2367 | * in the master list because we know with certainty that | ||
2368 | * the master is "master". so we remove any old mle from | ||
2369 | * the list after setting it's master field, and then add | ||
2370 | * the new migration mle. this way we can hold with the rule | ||
2371 | * of having only one mle for a given lock name at all times. */ | ||
2372 | static int dlm_add_migration_mle(struct dlm_ctxt *dlm, | ||
2373 | struct dlm_lock_resource *res, | ||
2374 | struct dlm_master_list_entry *mle, | ||
2375 | struct dlm_master_list_entry **oldmle, | ||
2376 | const char *name, unsigned int namelen, | ||
2377 | u8 new_master, u8 master) | ||
2378 | { | ||
2379 | int found; | ||
2380 | int ret = 0; | ||
2381 | |||
2382 | *oldmle = NULL; | ||
2383 | |||
2384 | mlog_entry_void(); | ||
2385 | |||
2386 | assert_spin_locked(&dlm->spinlock); | ||
2387 | assert_spin_locked(&dlm->master_lock); | ||
2388 | |||
2389 | /* caller is responsible for any ref taken here on oldmle */ | ||
2390 | found = dlm_find_mle(dlm, oldmle, (char *)name, namelen); | ||
2391 | if (found) { | ||
2392 | struct dlm_master_list_entry *tmp = *oldmle; | ||
2393 | spin_lock(&tmp->spinlock); | ||
2394 | if (tmp->type == DLM_MLE_MIGRATION) { | ||
2395 | if (master == dlm->node_num) { | ||
2396 | /* ah another process raced me to it */ | ||
2397 | mlog(0, "tried to migrate %.*s, but some " | ||
2398 | "process beat me to it\n", | ||
2399 | namelen, name); | ||
2400 | ret = -EEXIST; | ||
2401 | } else { | ||
2402 | /* bad. 2 NODES are trying to migrate! */ | ||
2403 | mlog(ML_ERROR, "migration error mle: " | ||
2404 | "master=%u new_master=%u // request: " | ||
2405 | "master=%u new_master=%u // " | ||
2406 | "lockres=%.*s\n", | ||
2407 | tmp->master, tmp->new_master, | ||
2408 | master, new_master, | ||
2409 | namelen, name); | ||
2410 | BUG(); | ||
2411 | } | ||
2412 | } else { | ||
2413 | /* this is essentially what assert_master does */ | ||
2414 | tmp->master = master; | ||
2415 | atomic_set(&tmp->woken, 1); | ||
2416 | wake_up(&tmp->wq); | ||
2417 | /* remove it from the list so that only one | ||
2418 | * mle will be found */ | ||
2419 | list_del_init(&tmp->list); | ||
2420 | } | ||
2421 | spin_unlock(&tmp->spinlock); | ||
2422 | } | ||
2423 | |||
2424 | /* now add a migration mle to the tail of the list */ | ||
2425 | dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen); | ||
2426 | mle->new_master = new_master; | ||
2427 | mle->master = master; | ||
2428 | /* do this for consistency with other mle types */ | ||
2429 | set_bit(new_master, mle->maybe_map); | ||
2430 | list_add(&mle->list, &dlm->master_list); | ||
2431 | |||
2432 | return ret; | ||
2433 | } | ||
2434 | |||
2435 | |||
2436 | void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node) | ||
2437 | { | ||
2438 | struct list_head *iter, *iter2; | ||
2439 | struct dlm_master_list_entry *mle; | ||
2440 | struct dlm_lock_resource *res; | ||
2441 | |||
2442 | mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node); | ||
2443 | top: | ||
2444 | assert_spin_locked(&dlm->spinlock); | ||
2445 | |||
2446 | /* clean the master list */ | ||
2447 | spin_lock(&dlm->master_lock); | ||
2448 | list_for_each_safe(iter, iter2, &dlm->master_list) { | ||
2449 | mle = list_entry(iter, struct dlm_master_list_entry, list); | ||
2450 | |||
2451 | BUG_ON(mle->type != DLM_MLE_BLOCK && | ||
2452 | mle->type != DLM_MLE_MASTER && | ||
2453 | mle->type != DLM_MLE_MIGRATION); | ||
2454 | |||
2455 | /* MASTER mles are initiated locally. the waiting | ||
2456 | * process will notice the node map change | ||
2457 | * shortly. let that happen as normal. */ | ||
2458 | if (mle->type == DLM_MLE_MASTER) | ||
2459 | continue; | ||
2460 | |||
2461 | |||
2462 | /* BLOCK mles are initiated by other nodes. | ||
2463 | * need to clean up if the dead node would have | ||
2464 | * been the master. */ | ||
2465 | if (mle->type == DLM_MLE_BLOCK) { | ||
2466 | int bit; | ||
2467 | |||
2468 | spin_lock(&mle->spinlock); | ||
2469 | bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); | ||
2470 | if (bit != dead_node) { | ||
2471 | mlog(0, "mle found, but dead node %u would " | ||
2472 | "not have been master\n", dead_node); | ||
2473 | spin_unlock(&mle->spinlock); | ||
2474 | } else { | ||
2475 | /* must drop the refcount by one since the | ||
2476 | * assert_master will never arrive. this | ||
2477 | * may result in the mle being unlinked and | ||
2478 | * freed, but there may still be a process | ||
2479 | * waiting in the dlmlock path which is fine. */ | ||
2480 | mlog(ML_ERROR, "node %u was expected master\n", | ||
2481 | dead_node); | ||
2482 | atomic_set(&mle->woken, 1); | ||
2483 | spin_unlock(&mle->spinlock); | ||
2484 | wake_up(&mle->wq); | ||
2485 | /* final put will take care of list removal */ | ||
2486 | __dlm_put_mle(mle); | ||
2487 | } | ||
2488 | continue; | ||
2489 | } | ||
2490 | |||
2491 | /* everything else is a MIGRATION mle */ | ||
2492 | |||
2493 | /* the rule for MIGRATION mles is that the master | ||
2494 | * becomes UNKNOWN if *either* the original or | ||
2495 | * the new master dies. all UNKNOWN lockreses | ||
2496 | * are sent to whichever node becomes the recovery | ||
2497 | * master. the new master is responsible for | ||
2498 | * determining if there is still a master for | ||
2499 | * this lockres, or if he needs to take over | ||
2500 | * mastery. either way, this node should expect | ||
2501 | * another message to resolve this. */ | ||
2502 | if (mle->master != dead_node && | ||
2503 | mle->new_master != dead_node) | ||
2504 | continue; | ||
2505 | |||
2506 | /* if we have reached this point, this mle needs to | ||
2507 | * be removed from the list and freed. */ | ||
2508 | |||
2509 | /* remove from the list early. NOTE: unlinking | ||
2510 | * list_head while in list_for_each_safe */ | ||
2511 | spin_lock(&mle->spinlock); | ||
2512 | list_del_init(&mle->list); | ||
2513 | atomic_set(&mle->woken, 1); | ||
2514 | spin_unlock(&mle->spinlock); | ||
2515 | wake_up(&mle->wq); | ||
2516 | |||
2517 | mlog(0, "node %u died during migration from " | ||
2518 | "%u to %u!\n", dead_node, | ||
2519 | mle->master, mle->new_master); | ||
2520 | /* if there is a lockres associated with this | ||
2521 | * mle, find it and set its owner to UNKNOWN */ | ||
2522 | res = __dlm_lookup_lockres(dlm, mle->u.name.name, | ||
2523 | mle->u.name.len); | ||
2524 | if (res) { | ||
2525 | /* unfortunately if we hit this rare case, our | ||
2526 | * lock ordering is messed. we need to drop | ||
2527 | * the master lock so that we can take the | ||
2528 | * lockres lock, meaning that we will have to | ||
2529 | * restart from the head of list. */ | ||
2530 | spin_unlock(&dlm->master_lock); | ||
2531 | |||
2532 | /* move lockres onto recovery list */ | ||
2533 | spin_lock(&res->spinlock); | ||
2534 | dlm_set_lockres_owner(dlm, res, | ||
2535 | DLM_LOCK_RES_OWNER_UNKNOWN); | ||
2536 | dlm_move_lockres_to_recovery_list(dlm, res); | ||
2537 | spin_unlock(&res->spinlock); | ||
2538 | dlm_lockres_put(res); | ||
2539 | |||
2540 | /* dump the mle */ | ||
2541 | spin_lock(&dlm->master_lock); | ||
2542 | __dlm_put_mle(mle); | ||
2543 | spin_unlock(&dlm->master_lock); | ||
2544 | |||
2545 | /* restart */ | ||
2546 | goto top; | ||
2547 | } | ||
2548 | |||
2549 | /* this may be the last reference */ | ||
2550 | __dlm_put_mle(mle); | ||
2551 | } | ||
2552 | spin_unlock(&dlm->master_lock); | ||
2553 | } | ||
2554 | |||
2555 | |||
2556 | int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | ||
2557 | u8 old_master) | ||
2558 | { | ||
2559 | struct dlm_node_iter iter; | ||
2560 | int ret = 0; | ||
2561 | |||
2562 | spin_lock(&dlm->spinlock); | ||
2563 | dlm_node_iter_init(dlm->domain_map, &iter); | ||
2564 | clear_bit(old_master, iter.node_map); | ||
2565 | clear_bit(dlm->node_num, iter.node_map); | ||
2566 | spin_unlock(&dlm->spinlock); | ||
2567 | |||
2568 | mlog(0, "now time to do a migrate request to other nodes\n"); | ||
2569 | ret = dlm_do_migrate_request(dlm, res, old_master, | ||
2570 | dlm->node_num, &iter); | ||
2571 | if (ret < 0) { | ||
2572 | mlog_errno(ret); | ||
2573 | goto leave; | ||
2574 | } | ||
2575 | |||
2576 | mlog(0, "doing assert master of %.*s to all except the original node\n", | ||
2577 | res->lockname.len, res->lockname.name); | ||
2578 | /* this call now finishes out the nodemap | ||
2579 | * even if one or more nodes die */ | ||
2580 | ret = dlm_do_assert_master(dlm, res->lockname.name, | ||
2581 | res->lockname.len, iter.node_map, | ||
2582 | DLM_ASSERT_MASTER_FINISH_MIGRATION); | ||
2583 | if (ret < 0) { | ||
2584 | /* no longer need to retry. all living nodes contacted. */ | ||
2585 | mlog_errno(ret); | ||
2586 | ret = 0; | ||
2587 | } | ||
2588 | |||
2589 | memset(iter.node_map, 0, sizeof(iter.node_map)); | ||
2590 | set_bit(old_master, iter.node_map); | ||
2591 | mlog(0, "doing assert master of %.*s back to %u\n", | ||
2592 | res->lockname.len, res->lockname.name, old_master); | ||
2593 | ret = dlm_do_assert_master(dlm, res->lockname.name, | ||
2594 | res->lockname.len, iter.node_map, | ||
2595 | DLM_ASSERT_MASTER_FINISH_MIGRATION); | ||
2596 | if (ret < 0) { | ||
2597 | mlog(0, "assert master to original master failed " | ||
2598 | "with %d.\n", ret); | ||
2599 | /* the only nonzero status here would be because of | ||
2600 | * a dead original node. we're done. */ | ||
2601 | ret = 0; | ||
2602 | } | ||
2603 | |||
2604 | /* all done, set the owner, clear the flag */ | ||
2605 | spin_lock(&res->spinlock); | ||
2606 | dlm_set_lockres_owner(dlm, res, dlm->node_num); | ||
2607 | res->state &= ~DLM_LOCK_RES_MIGRATING; | ||
2608 | spin_unlock(&res->spinlock); | ||
2609 | /* re-dirty it on the new master */ | ||
2610 | dlm_kick_thread(dlm, res); | ||
2611 | wake_up(&res->wq); | ||
2612 | leave: | ||
2613 | return ret; | ||
2614 | } | ||
2615 | |||
2616 | /* | ||
2617 | * LOCKRES AST REFCOUNT | ||
2618 | * this is integral to migration | ||
2619 | */ | ||
2620 | |||
2621 | /* for future intent to call an ast, reserve one ahead of time. | ||
2622 | * this should be called only after waiting on the lockres | ||
2623 | * with dlm_wait_on_lockres, and while still holding the | ||
2624 | * spinlock after the call. */ | ||
2625 | void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res) | ||
2626 | { | ||
2627 | assert_spin_locked(&res->spinlock); | ||
2628 | if (res->state & DLM_LOCK_RES_MIGRATING) { | ||
2629 | __dlm_print_one_lock_resource(res); | ||
2630 | } | ||
2631 | BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); | ||
2632 | |||
2633 | atomic_inc(&res->asts_reserved); | ||
2634 | } | ||
2635 | |||
2636 | /* | ||
2637 | * used to drop the reserved ast, either because it went unused, | ||
2638 | * or because the ast/bast was actually called. | ||
2639 | * | ||
2640 | * also, if there is a pending migration on this lockres, | ||
2641 | * and this was the last pending ast on the lockres, | ||
2642 | * atomically set the MIGRATING flag before we drop the lock. | ||
2643 | * this is how we ensure that migration can proceed with no | ||
2644 | * asts in progress. note that it is ok if the state of the | ||
2645 | * queues is such that a lock should be granted in the future | ||
2646 | * or that a bast should be fired, because the new master will | ||
2647 | * shuffle the lists on this lockres as soon as it is migrated. | ||
2648 | */ | ||
2649 | void dlm_lockres_release_ast(struct dlm_ctxt *dlm, | ||
2650 | struct dlm_lock_resource *res) | ||
2651 | { | ||
2652 | if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock)) | ||
2653 | return; | ||
2654 | |||
2655 | if (!res->migration_pending) { | ||
2656 | spin_unlock(&res->spinlock); | ||
2657 | return; | ||
2658 | } | ||
2659 | |||
2660 | BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); | ||
2661 | res->migration_pending = 0; | ||
2662 | res->state |= DLM_LOCK_RES_MIGRATING; | ||
2663 | spin_unlock(&res->spinlock); | ||
2664 | wake_up(&res->wq); | ||
2665 | wake_up(&dlm->migration_wq); | ||
2666 | } | ||
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c new file mode 100644 index 000000000000..0c8eb1093f00 --- /dev/null +++ b/fs/ocfs2/dlm/dlmrecovery.c | |||
@@ -0,0 +1,2132 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmrecovery.c | ||
5 | * | ||
6 | * recovery stuff | ||
7 | * | ||
8 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | |||
28 | #include <linux/module.h> | ||
29 | #include <linux/fs.h> | ||
30 | #include <linux/types.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/utsname.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/sysctl.h> | ||
36 | #include <linux/random.h> | ||
37 | #include <linux/blkdev.h> | ||
38 | #include <linux/socket.h> | ||
39 | #include <linux/inet.h> | ||
40 | #include <linux/timer.h> | ||
41 | #include <linux/kthread.h> | ||
42 | |||
43 | |||
44 | #include "cluster/heartbeat.h" | ||
45 | #include "cluster/nodemanager.h" | ||
46 | #include "cluster/tcp.h" | ||
47 | |||
48 | #include "dlmapi.h" | ||
49 | #include "dlmcommon.h" | ||
50 | #include "dlmdomain.h" | ||
51 | |||
52 | #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY) | ||
53 | #include "cluster/masklog.h" | ||
54 | |||
55 | static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node); | ||
56 | |||
57 | static int dlm_recovery_thread(void *data); | ||
58 | void dlm_complete_recovery_thread(struct dlm_ctxt *dlm); | ||
59 | int dlm_launch_recovery_thread(struct dlm_ctxt *dlm); | ||
60 | static void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); | ||
61 | static int dlm_do_recovery(struct dlm_ctxt *dlm); | ||
62 | |||
63 | static int dlm_pick_recovery_master(struct dlm_ctxt *dlm); | ||
64 | static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node); | ||
65 | static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); | ||
66 | static int dlm_request_all_locks(struct dlm_ctxt *dlm, | ||
67 | u8 request_from, u8 dead_node); | ||
68 | static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); | ||
69 | |||
70 | static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res); | ||
71 | static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, | ||
72 | const char *lockname, int namelen, | ||
73 | int total_locks, u64 cookie, | ||
74 | u8 flags, u8 master); | ||
75 | static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, | ||
76 | struct dlm_migratable_lockres *mres, | ||
77 | u8 send_to, | ||
78 | struct dlm_lock_resource *res, | ||
79 | int total_locks); | ||
80 | static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, | ||
81 | struct dlm_lock_resource *res, | ||
82 | u8 *real_master); | ||
83 | static int dlm_process_recovery_data(struct dlm_ctxt *dlm, | ||
84 | struct dlm_lock_resource *res, | ||
85 | struct dlm_migratable_lockres *mres); | ||
86 | static int dlm_do_master_requery(struct dlm_ctxt *dlm, | ||
87 | struct dlm_lock_resource *res, | ||
88 | u8 nodenum, u8 *real_master); | ||
89 | static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm); | ||
90 | static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, | ||
91 | u8 dead_node, u8 send_to); | ||
92 | static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node); | ||
93 | static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, | ||
94 | struct list_head *list, u8 dead_node); | ||
95 | static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, | ||
96 | u8 dead_node, u8 new_master); | ||
97 | static void dlm_reco_ast(void *astdata); | ||
98 | static void dlm_reco_bast(void *astdata, int blocked_type); | ||
99 | static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st); | ||
100 | static void dlm_request_all_locks_worker(struct dlm_work_item *item, | ||
101 | void *data); | ||
102 | static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data); | ||
103 | |||
104 | static u64 dlm_get_next_mig_cookie(void); | ||
105 | |||
106 | static spinlock_t dlm_reco_state_lock = SPIN_LOCK_UNLOCKED; | ||
107 | static spinlock_t dlm_mig_cookie_lock = SPIN_LOCK_UNLOCKED; | ||
108 | static u64 dlm_mig_cookie = 1; | ||
109 | |||
110 | static u64 dlm_get_next_mig_cookie(void) | ||
111 | { | ||
112 | u64 c; | ||
113 | spin_lock(&dlm_mig_cookie_lock); | ||
114 | c = dlm_mig_cookie; | ||
115 | if (dlm_mig_cookie == (~0ULL)) | ||
116 | dlm_mig_cookie = 1; | ||
117 | else | ||
118 | dlm_mig_cookie++; | ||
119 | spin_unlock(&dlm_mig_cookie_lock); | ||
120 | return c; | ||
121 | } | ||
122 | |||
123 | static inline void dlm_reset_recovery(struct dlm_ctxt *dlm) | ||
124 | { | ||
125 | spin_lock(&dlm->spinlock); | ||
126 | clear_bit(dlm->reco.dead_node, dlm->recovery_map); | ||
127 | dlm->reco.dead_node = O2NM_INVALID_NODE_NUM; | ||
128 | dlm->reco.new_master = O2NM_INVALID_NODE_NUM; | ||
129 | spin_unlock(&dlm->spinlock); | ||
130 | } | ||
131 | |||
132 | /* Worker function used during recovery. */ | ||
133 | void dlm_dispatch_work(void *data) | ||
134 | { | ||
135 | struct dlm_ctxt *dlm = (struct dlm_ctxt *)data; | ||
136 | LIST_HEAD(tmp_list); | ||
137 | struct list_head *iter, *iter2; | ||
138 | struct dlm_work_item *item; | ||
139 | dlm_workfunc_t *workfunc; | ||
140 | |||
141 | spin_lock(&dlm->work_lock); | ||
142 | list_splice_init(&dlm->work_list, &tmp_list); | ||
143 | spin_unlock(&dlm->work_lock); | ||
144 | |||
145 | list_for_each_safe(iter, iter2, &tmp_list) { | ||
146 | item = list_entry(iter, struct dlm_work_item, list); | ||
147 | workfunc = item->func; | ||
148 | list_del_init(&item->list); | ||
149 | |||
150 | /* already have ref on dlm to avoid having | ||
151 | * it disappear. just double-check. */ | ||
152 | BUG_ON(item->dlm != dlm); | ||
153 | |||
154 | /* this is allowed to sleep and | ||
155 | * call network stuff */ | ||
156 | workfunc(item, item->data); | ||
157 | |||
158 | dlm_put(dlm); | ||
159 | kfree(item); | ||
160 | } | ||
161 | } | ||
162 | |||
163 | /* | ||
164 | * RECOVERY THREAD | ||
165 | */ | ||
166 | |||
167 | static void dlm_kick_recovery_thread(struct dlm_ctxt *dlm) | ||
168 | { | ||
169 | /* wake the recovery thread | ||
170 | * this will wake the reco thread in one of three places | ||
171 | * 1) sleeping with no recovery happening | ||
172 | * 2) sleeping with recovery mastered elsewhere | ||
173 | * 3) recovery mastered here, waiting on reco data */ | ||
174 | |||
175 | wake_up(&dlm->dlm_reco_thread_wq); | ||
176 | } | ||
177 | |||
178 | /* Launch the recovery thread */ | ||
179 | int dlm_launch_recovery_thread(struct dlm_ctxt *dlm) | ||
180 | { | ||
181 | mlog(0, "starting dlm recovery thread...\n"); | ||
182 | |||
183 | dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm, | ||
184 | "dlm_reco_thread"); | ||
185 | if (IS_ERR(dlm->dlm_reco_thread_task)) { | ||
186 | mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task)); | ||
187 | dlm->dlm_reco_thread_task = NULL; | ||
188 | return -EINVAL; | ||
189 | } | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | void dlm_complete_recovery_thread(struct dlm_ctxt *dlm) | ||
195 | { | ||
196 | if (dlm->dlm_reco_thread_task) { | ||
197 | mlog(0, "waiting for dlm recovery thread to exit\n"); | ||
198 | kthread_stop(dlm->dlm_reco_thread_task); | ||
199 | dlm->dlm_reco_thread_task = NULL; | ||
200 | } | ||
201 | } | ||
202 | |||
203 | |||
204 | |||
205 | /* | ||
206 | * this is lame, but here's how recovery works... | ||
207 | * 1) all recovery threads cluster wide will work on recovering | ||
208 | * ONE node at a time | ||
209 | * 2) negotiate who will take over all the locks for the dead node. | ||
210 | * thats right... ALL the locks. | ||
211 | * 3) once a new master is chosen, everyone scans all locks | ||
212 | * and moves aside those mastered by the dead guy | ||
213 | * 4) each of these locks should be locked until recovery is done | ||
214 | * 5) the new master collects up all of secondary lock queue info | ||
215 | * one lock at a time, forcing each node to communicate back | ||
216 | * before continuing | ||
217 | * 6) each secondary lock queue responds with the full known lock info | ||
218 | * 7) once the new master has run all its locks, it sends a ALLDONE! | ||
219 | * message to everyone | ||
220 | * 8) upon receiving this message, the secondary queue node unlocks | ||
221 | * and responds to the ALLDONE | ||
222 | * 9) once the new master gets responses from everyone, he unlocks | ||
223 | * everything and recovery for this dead node is done | ||
224 | *10) go back to 2) while there are still dead nodes | ||
225 | * | ||
226 | */ | ||
227 | |||
228 | |||
229 | #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000) | ||
230 | |||
231 | static int dlm_recovery_thread(void *data) | ||
232 | { | ||
233 | int status; | ||
234 | struct dlm_ctxt *dlm = data; | ||
235 | unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS); | ||
236 | |||
237 | mlog(0, "dlm thread running for %s...\n", dlm->name); | ||
238 | |||
239 | while (!kthread_should_stop()) { | ||
240 | if (dlm_joined(dlm)) { | ||
241 | status = dlm_do_recovery(dlm); | ||
242 | if (status == -EAGAIN) { | ||
243 | /* do not sleep, recheck immediately. */ | ||
244 | continue; | ||
245 | } | ||
246 | if (status < 0) | ||
247 | mlog_errno(status); | ||
248 | } | ||
249 | |||
250 | wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, | ||
251 | kthread_should_stop(), | ||
252 | timeout); | ||
253 | } | ||
254 | |||
255 | mlog(0, "quitting DLM recovery thread\n"); | ||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | /* callers of the top-level api calls (dlmlock/dlmunlock) should | ||
260 | * block on the dlm->reco.event when recovery is in progress. | ||
261 | * the dlm recovery thread will set this state when it begins | ||
262 | * recovering a dead node (as the new master or not) and clear | ||
263 | * the state and wake as soon as all affected lock resources have | ||
264 | * been marked with the RECOVERY flag */ | ||
265 | static int dlm_in_recovery(struct dlm_ctxt *dlm) | ||
266 | { | ||
267 | int in_recovery; | ||
268 | spin_lock(&dlm->spinlock); | ||
269 | in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE); | ||
270 | spin_unlock(&dlm->spinlock); | ||
271 | return in_recovery; | ||
272 | } | ||
273 | |||
274 | |||
275 | void dlm_wait_for_recovery(struct dlm_ctxt *dlm) | ||
276 | { | ||
277 | wait_event(dlm->reco.event, !dlm_in_recovery(dlm)); | ||
278 | } | ||
279 | |||
280 | static void dlm_begin_recovery(struct dlm_ctxt *dlm) | ||
281 | { | ||
282 | spin_lock(&dlm->spinlock); | ||
283 | BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE); | ||
284 | dlm->reco.state |= DLM_RECO_STATE_ACTIVE; | ||
285 | spin_unlock(&dlm->spinlock); | ||
286 | } | ||
287 | |||
288 | static void dlm_end_recovery(struct dlm_ctxt *dlm) | ||
289 | { | ||
290 | spin_lock(&dlm->spinlock); | ||
291 | BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE)); | ||
292 | dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE; | ||
293 | spin_unlock(&dlm->spinlock); | ||
294 | wake_up(&dlm->reco.event); | ||
295 | } | ||
296 | |||
297 | static int dlm_do_recovery(struct dlm_ctxt *dlm) | ||
298 | { | ||
299 | int status = 0; | ||
300 | |||
301 | spin_lock(&dlm->spinlock); | ||
302 | |||
303 | /* check to see if the new master has died */ | ||
304 | if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM && | ||
305 | test_bit(dlm->reco.new_master, dlm->recovery_map)) { | ||
306 | mlog(0, "new master %u died while recovering %u!\n", | ||
307 | dlm->reco.new_master, dlm->reco.dead_node); | ||
308 | /* unset the new_master, leave dead_node */ | ||
309 | dlm->reco.new_master = O2NM_INVALID_NODE_NUM; | ||
310 | } | ||
311 | |||
312 | /* select a target to recover */ | ||
313 | if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { | ||
314 | int bit; | ||
315 | |||
316 | bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0); | ||
317 | if (bit >= O2NM_MAX_NODES || bit < 0) | ||
318 | dlm->reco.dead_node = O2NM_INVALID_NODE_NUM; | ||
319 | else | ||
320 | dlm->reco.dead_node = bit; | ||
321 | } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) { | ||
322 | /* BUG? */ | ||
323 | mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n", | ||
324 | dlm->reco.dead_node); | ||
325 | dlm->reco.dead_node = O2NM_INVALID_NODE_NUM; | ||
326 | } | ||
327 | |||
328 | if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { | ||
329 | // mlog(0, "nothing to recover! sleeping now!\n"); | ||
330 | spin_unlock(&dlm->spinlock); | ||
331 | /* return to main thread loop and sleep. */ | ||
332 | return 0; | ||
333 | } | ||
334 | mlog(0, "recovery thread found node %u in the recovery map!\n", | ||
335 | dlm->reco.dead_node); | ||
336 | spin_unlock(&dlm->spinlock); | ||
337 | |||
338 | /* take write barrier */ | ||
339 | /* (stops the list reshuffling thread, proxy ast handling) */ | ||
340 | dlm_begin_recovery(dlm); | ||
341 | |||
342 | if (dlm->reco.new_master == dlm->node_num) | ||
343 | goto master_here; | ||
344 | |||
345 | if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { | ||
346 | /* choose a new master */ | ||
347 | if (!dlm_pick_recovery_master(dlm)) { | ||
348 | /* already notified everyone. go. */ | ||
349 | dlm->reco.new_master = dlm->node_num; | ||
350 | goto master_here; | ||
351 | } | ||
352 | mlog(0, "another node will master this recovery session.\n"); | ||
353 | } | ||
354 | mlog(0, "dlm=%s, new_master=%u, this node=%u, dead_node=%u\n", | ||
355 | dlm->name, dlm->reco.new_master, | ||
356 | dlm->node_num, dlm->reco.dead_node); | ||
357 | |||
358 | /* it is safe to start everything back up here | ||
359 | * because all of the dead node's lock resources | ||
360 | * have been marked as in-recovery */ | ||
361 | dlm_end_recovery(dlm); | ||
362 | |||
363 | /* sleep out in main dlm_recovery_thread loop. */ | ||
364 | return 0; | ||
365 | |||
366 | master_here: | ||
367 | mlog(0, "mastering recovery of %s:%u here(this=%u)!\n", | ||
368 | dlm->name, dlm->reco.dead_node, dlm->node_num); | ||
369 | |||
370 | status = dlm_remaster_locks(dlm, dlm->reco.dead_node); | ||
371 | if (status < 0) { | ||
372 | mlog(ML_ERROR, "error %d remastering locks for node %u, " | ||
373 | "retrying.\n", status, dlm->reco.dead_node); | ||
374 | } else { | ||
375 | /* success! see if any other nodes need recovery */ | ||
376 | dlm_reset_recovery(dlm); | ||
377 | } | ||
378 | dlm_end_recovery(dlm); | ||
379 | |||
380 | /* continue and look for another dead node */ | ||
381 | return -EAGAIN; | ||
382 | } | ||
383 | |||
384 | static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) | ||
385 | { | ||
386 | int status = 0; | ||
387 | struct dlm_reco_node_data *ndata; | ||
388 | struct list_head *iter; | ||
389 | int all_nodes_done; | ||
390 | int destroy = 0; | ||
391 | int pass = 0; | ||
392 | |||
393 | status = dlm_init_recovery_area(dlm, dead_node); | ||
394 | if (status < 0) | ||
395 | goto leave; | ||
396 | |||
397 | /* safe to access the node data list without a lock, since this | ||
398 | * process is the only one to change the list */ | ||
399 | list_for_each(iter, &dlm->reco.node_data) { | ||
400 | ndata = list_entry (iter, struct dlm_reco_node_data, list); | ||
401 | BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT); | ||
402 | ndata->state = DLM_RECO_NODE_DATA_REQUESTING; | ||
403 | |||
404 | mlog(0, "requesting lock info from node %u\n", | ||
405 | ndata->node_num); | ||
406 | |||
407 | if (ndata->node_num == dlm->node_num) { | ||
408 | ndata->state = DLM_RECO_NODE_DATA_DONE; | ||
409 | continue; | ||
410 | } | ||
411 | |||
412 | status = dlm_request_all_locks(dlm, ndata->node_num, dead_node); | ||
413 | if (status < 0) { | ||
414 | mlog_errno(status); | ||
415 | if (dlm_is_host_down(status)) | ||
416 | ndata->state = DLM_RECO_NODE_DATA_DEAD; | ||
417 | else { | ||
418 | destroy = 1; | ||
419 | goto leave; | ||
420 | } | ||
421 | } | ||
422 | |||
423 | switch (ndata->state) { | ||
424 | case DLM_RECO_NODE_DATA_INIT: | ||
425 | case DLM_RECO_NODE_DATA_FINALIZE_SENT: | ||
426 | case DLM_RECO_NODE_DATA_REQUESTED: | ||
427 | BUG(); | ||
428 | break; | ||
429 | case DLM_RECO_NODE_DATA_DEAD: | ||
430 | mlog(0, "node %u died after requesting " | ||
431 | "recovery info for node %u\n", | ||
432 | ndata->node_num, dead_node); | ||
433 | // start all over | ||
434 | destroy = 1; | ||
435 | status = -EAGAIN; | ||
436 | goto leave; | ||
437 | case DLM_RECO_NODE_DATA_REQUESTING: | ||
438 | ndata->state = DLM_RECO_NODE_DATA_REQUESTED; | ||
439 | mlog(0, "now receiving recovery data from " | ||
440 | "node %u for dead node %u\n", | ||
441 | ndata->node_num, dead_node); | ||
442 | break; | ||
443 | case DLM_RECO_NODE_DATA_RECEIVING: | ||
444 | mlog(0, "already receiving recovery data from " | ||
445 | "node %u for dead node %u\n", | ||
446 | ndata->node_num, dead_node); | ||
447 | break; | ||
448 | case DLM_RECO_NODE_DATA_DONE: | ||
449 | mlog(0, "already DONE receiving recovery data " | ||
450 | "from node %u for dead node %u\n", | ||
451 | ndata->node_num, dead_node); | ||
452 | break; | ||
453 | } | ||
454 | } | ||
455 | |||
456 | mlog(0, "done requesting all lock info\n"); | ||
457 | |||
458 | /* nodes should be sending reco data now | ||
459 | * just need to wait */ | ||
460 | |||
461 | while (1) { | ||
462 | /* check all the nodes now to see if we are | ||
463 | * done, or if anyone died */ | ||
464 | all_nodes_done = 1; | ||
465 | spin_lock(&dlm_reco_state_lock); | ||
466 | list_for_each(iter, &dlm->reco.node_data) { | ||
467 | ndata = list_entry (iter, struct dlm_reco_node_data, list); | ||
468 | |||
469 | mlog(0, "checking recovery state of node %u\n", | ||
470 | ndata->node_num); | ||
471 | switch (ndata->state) { | ||
472 | case DLM_RECO_NODE_DATA_INIT: | ||
473 | case DLM_RECO_NODE_DATA_REQUESTING: | ||
474 | mlog(ML_ERROR, "bad ndata state for " | ||
475 | "node %u: state=%d\n", | ||
476 | ndata->node_num, ndata->state); | ||
477 | BUG(); | ||
478 | break; | ||
479 | case DLM_RECO_NODE_DATA_DEAD: | ||
480 | mlog(0, "node %u died after " | ||
481 | "requesting recovery info for " | ||
482 | "node %u\n", ndata->node_num, | ||
483 | dead_node); | ||
484 | spin_unlock(&dlm_reco_state_lock); | ||
485 | // start all over | ||
486 | destroy = 1; | ||
487 | status = -EAGAIN; | ||
488 | goto leave; | ||
489 | case DLM_RECO_NODE_DATA_RECEIVING: | ||
490 | case DLM_RECO_NODE_DATA_REQUESTED: | ||
491 | all_nodes_done = 0; | ||
492 | break; | ||
493 | case DLM_RECO_NODE_DATA_DONE: | ||
494 | break; | ||
495 | case DLM_RECO_NODE_DATA_FINALIZE_SENT: | ||
496 | break; | ||
497 | } | ||
498 | } | ||
499 | spin_unlock(&dlm_reco_state_lock); | ||
500 | |||
501 | mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass, | ||
502 | all_nodes_done?"yes":"no"); | ||
503 | if (all_nodes_done) { | ||
504 | int ret; | ||
505 | |||
506 | /* all nodes are now in DLM_RECO_NODE_DATA_DONE state | ||
507 | * just send a finalize message to everyone and | ||
508 | * clean up */ | ||
509 | mlog(0, "all nodes are done! send finalize\n"); | ||
510 | ret = dlm_send_finalize_reco_message(dlm); | ||
511 | if (ret < 0) | ||
512 | mlog_errno(ret); | ||
513 | |||
514 | spin_lock(&dlm->spinlock); | ||
515 | dlm_finish_local_lockres_recovery(dlm, dead_node, | ||
516 | dlm->node_num); | ||
517 | spin_unlock(&dlm->spinlock); | ||
518 | mlog(0, "should be done with recovery!\n"); | ||
519 | |||
520 | mlog(0, "finishing recovery of %s at %lu, " | ||
521 | "dead=%u, this=%u, new=%u\n", dlm->name, | ||
522 | jiffies, dlm->reco.dead_node, | ||
523 | dlm->node_num, dlm->reco.new_master); | ||
524 | destroy = 1; | ||
525 | status = ret; | ||
526 | /* rescan everything marked dirty along the way */ | ||
527 | dlm_kick_thread(dlm, NULL); | ||
528 | break; | ||
529 | } | ||
530 | /* wait to be signalled, with periodic timeout | ||
531 | * to check for node death */ | ||
532 | wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, | ||
533 | kthread_should_stop(), | ||
534 | msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS)); | ||
535 | |||
536 | } | ||
537 | |||
538 | leave: | ||
539 | if (destroy) | ||
540 | dlm_destroy_recovery_area(dlm, dead_node); | ||
541 | |||
542 | mlog_exit(status); | ||
543 | return status; | ||
544 | } | ||
545 | |||
546 | static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) | ||
547 | { | ||
548 | int num=0; | ||
549 | struct dlm_reco_node_data *ndata; | ||
550 | |||
551 | spin_lock(&dlm->spinlock); | ||
552 | memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map)); | ||
553 | /* nodes can only be removed (by dying) after dropping | ||
554 | * this lock, and death will be trapped later, so this should do */ | ||
555 | spin_unlock(&dlm->spinlock); | ||
556 | |||
557 | while (1) { | ||
558 | num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num); | ||
559 | if (num >= O2NM_MAX_NODES) { | ||
560 | break; | ||
561 | } | ||
562 | BUG_ON(num == dead_node); | ||
563 | |||
564 | ndata = kcalloc(1, sizeof(*ndata), GFP_KERNEL); | ||
565 | if (!ndata) { | ||
566 | dlm_destroy_recovery_area(dlm, dead_node); | ||
567 | return -ENOMEM; | ||
568 | } | ||
569 | ndata->node_num = num; | ||
570 | ndata->state = DLM_RECO_NODE_DATA_INIT; | ||
571 | spin_lock(&dlm_reco_state_lock); | ||
572 | list_add_tail(&ndata->list, &dlm->reco.node_data); | ||
573 | spin_unlock(&dlm_reco_state_lock); | ||
574 | num++; | ||
575 | } | ||
576 | |||
577 | return 0; | ||
578 | } | ||
579 | |||
580 | static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) | ||
581 | { | ||
582 | struct list_head *iter, *iter2; | ||
583 | struct dlm_reco_node_data *ndata; | ||
584 | LIST_HEAD(tmplist); | ||
585 | |||
586 | spin_lock(&dlm_reco_state_lock); | ||
587 | list_splice_init(&dlm->reco.node_data, &tmplist); | ||
588 | spin_unlock(&dlm_reco_state_lock); | ||
589 | |||
590 | list_for_each_safe(iter, iter2, &tmplist) { | ||
591 | ndata = list_entry (iter, struct dlm_reco_node_data, list); | ||
592 | list_del_init(&ndata->list); | ||
593 | kfree(ndata); | ||
594 | } | ||
595 | } | ||
596 | |||
597 | static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, | ||
598 | u8 dead_node) | ||
599 | { | ||
600 | struct dlm_lock_request lr; | ||
601 | enum dlm_status ret; | ||
602 | |||
603 | mlog(0, "\n"); | ||
604 | |||
605 | |||
606 | mlog(0, "dlm_request_all_locks: dead node is %u, sending request " | ||
607 | "to %u\n", dead_node, request_from); | ||
608 | |||
609 | memset(&lr, 0, sizeof(lr)); | ||
610 | lr.node_idx = dlm->node_num; | ||
611 | lr.dead_node = dead_node; | ||
612 | |||
613 | // send message | ||
614 | ret = DLM_NOLOCKMGR; | ||
615 | ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key, | ||
616 | &lr, sizeof(lr), request_from, NULL); | ||
617 | |||
618 | /* negative status is handled by caller */ | ||
619 | if (ret < 0) | ||
620 | mlog_errno(ret); | ||
621 | |||
622 | // return from here, then | ||
623 | // sleep until all received or error | ||
624 | return ret; | ||
625 | |||
626 | } | ||
627 | |||
628 | int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data) | ||
629 | { | ||
630 | struct dlm_ctxt *dlm = data; | ||
631 | struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf; | ||
632 | char *buf = NULL; | ||
633 | struct dlm_work_item *item = NULL; | ||
634 | |||
635 | if (!dlm_grab(dlm)) | ||
636 | return -EINVAL; | ||
637 | |||
638 | BUG_ON(lr->dead_node != dlm->reco.dead_node); | ||
639 | |||
640 | item = kcalloc(1, sizeof(*item), GFP_KERNEL); | ||
641 | if (!item) { | ||
642 | dlm_put(dlm); | ||
643 | return -ENOMEM; | ||
644 | } | ||
645 | |||
646 | /* this will get freed by dlm_request_all_locks_worker */ | ||
647 | buf = (char *) __get_free_page(GFP_KERNEL); | ||
648 | if (!buf) { | ||
649 | kfree(item); | ||
650 | dlm_put(dlm); | ||
651 | return -ENOMEM; | ||
652 | } | ||
653 | |||
654 | /* queue up work for dlm_request_all_locks_worker */ | ||
655 | dlm_grab(dlm); /* get an extra ref for the work item */ | ||
656 | dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf); | ||
657 | item->u.ral.reco_master = lr->node_idx; | ||
658 | item->u.ral.dead_node = lr->dead_node; | ||
659 | spin_lock(&dlm->work_lock); | ||
660 | list_add_tail(&item->list, &dlm->work_list); | ||
661 | spin_unlock(&dlm->work_lock); | ||
662 | schedule_work(&dlm->dispatched_work); | ||
663 | |||
664 | dlm_put(dlm); | ||
665 | return 0; | ||
666 | } | ||
667 | |||
668 | static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data) | ||
669 | { | ||
670 | struct dlm_migratable_lockres *mres; | ||
671 | struct dlm_lock_resource *res; | ||
672 | struct dlm_ctxt *dlm; | ||
673 | LIST_HEAD(resources); | ||
674 | struct list_head *iter; | ||
675 | int ret; | ||
676 | u8 dead_node, reco_master; | ||
677 | |||
678 | dlm = item->dlm; | ||
679 | dead_node = item->u.ral.dead_node; | ||
680 | reco_master = item->u.ral.reco_master; | ||
681 | BUG_ON(dead_node != dlm->reco.dead_node); | ||
682 | BUG_ON(reco_master != dlm->reco.new_master); | ||
683 | |||
684 | mres = (struct dlm_migratable_lockres *)data; | ||
685 | |||
686 | /* lock resources should have already been moved to the | ||
687 | * dlm->reco.resources list. now move items from that list | ||
688 | * to a temp list if the dead owner matches. note that the | ||
689 | * whole cluster recovers only one node at a time, so we | ||
690 | * can safely move UNKNOWN lock resources for each recovery | ||
691 | * session. */ | ||
692 | dlm_move_reco_locks_to_list(dlm, &resources, dead_node); | ||
693 | |||
694 | /* now we can begin blasting lockreses without the dlm lock */ | ||
695 | list_for_each(iter, &resources) { | ||
696 | res = list_entry (iter, struct dlm_lock_resource, recovering); | ||
697 | ret = dlm_send_one_lockres(dlm, res, mres, reco_master, | ||
698 | DLM_MRES_RECOVERY); | ||
699 | if (ret < 0) | ||
700 | mlog_errno(ret); | ||
701 | } | ||
702 | |||
703 | /* move the resources back to the list */ | ||
704 | spin_lock(&dlm->spinlock); | ||
705 | list_splice_init(&resources, &dlm->reco.resources); | ||
706 | spin_unlock(&dlm->spinlock); | ||
707 | |||
708 | ret = dlm_send_all_done_msg(dlm, dead_node, reco_master); | ||
709 | if (ret < 0) | ||
710 | mlog_errno(ret); | ||
711 | |||
712 | free_page((unsigned long)data); | ||
713 | } | ||
714 | |||
715 | |||
716 | static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to) | ||
717 | { | ||
718 | int ret, tmpret; | ||
719 | struct dlm_reco_data_done done_msg; | ||
720 | |||
721 | memset(&done_msg, 0, sizeof(done_msg)); | ||
722 | done_msg.node_idx = dlm->node_num; | ||
723 | done_msg.dead_node = dead_node; | ||
724 | mlog(0, "sending DATA DONE message to %u, " | ||
725 | "my node=%u, dead node=%u\n", send_to, done_msg.node_idx, | ||
726 | done_msg.dead_node); | ||
727 | |||
728 | ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, | ||
729 | sizeof(done_msg), send_to, &tmpret); | ||
730 | /* negative status is ignored by the caller */ | ||
731 | if (ret >= 0) | ||
732 | ret = tmpret; | ||
733 | return ret; | ||
734 | } | ||
735 | |||
736 | |||
737 | int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data) | ||
738 | { | ||
739 | struct dlm_ctxt *dlm = data; | ||
740 | struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf; | ||
741 | struct list_head *iter; | ||
742 | struct dlm_reco_node_data *ndata = NULL; | ||
743 | int ret = -EINVAL; | ||
744 | |||
745 | if (!dlm_grab(dlm)) | ||
746 | return -EINVAL; | ||
747 | |||
748 | mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, " | ||
749 | "node_idx=%u, this node=%u\n", done->dead_node, | ||
750 | dlm->reco.dead_node, done->node_idx, dlm->node_num); | ||
751 | BUG_ON(done->dead_node != dlm->reco.dead_node); | ||
752 | |||
753 | spin_lock(&dlm_reco_state_lock); | ||
754 | list_for_each(iter, &dlm->reco.node_data) { | ||
755 | ndata = list_entry (iter, struct dlm_reco_node_data, list); | ||
756 | if (ndata->node_num != done->node_idx) | ||
757 | continue; | ||
758 | |||
759 | switch (ndata->state) { | ||
760 | case DLM_RECO_NODE_DATA_INIT: | ||
761 | case DLM_RECO_NODE_DATA_DEAD: | ||
762 | case DLM_RECO_NODE_DATA_DONE: | ||
763 | case DLM_RECO_NODE_DATA_FINALIZE_SENT: | ||
764 | mlog(ML_ERROR, "bad ndata state for node %u:" | ||
765 | " state=%d\n", ndata->node_num, | ||
766 | ndata->state); | ||
767 | BUG(); | ||
768 | break; | ||
769 | case DLM_RECO_NODE_DATA_RECEIVING: | ||
770 | case DLM_RECO_NODE_DATA_REQUESTED: | ||
771 | case DLM_RECO_NODE_DATA_REQUESTING: | ||
772 | mlog(0, "node %u is DONE sending " | ||
773 | "recovery data!\n", | ||
774 | ndata->node_num); | ||
775 | |||
776 | ndata->state = DLM_RECO_NODE_DATA_DONE; | ||
777 | ret = 0; | ||
778 | break; | ||
779 | } | ||
780 | } | ||
781 | spin_unlock(&dlm_reco_state_lock); | ||
782 | |||
783 | /* wake the recovery thread, some node is done */ | ||
784 | if (!ret) | ||
785 | dlm_kick_recovery_thread(dlm); | ||
786 | |||
787 | if (ret < 0) | ||
788 | mlog(ML_ERROR, "failed to find recovery node data for node " | ||
789 | "%u\n", done->node_idx); | ||
790 | dlm_put(dlm); | ||
791 | |||
792 | mlog(0, "leaving reco data done handler, ret=%d\n", ret); | ||
793 | return ret; | ||
794 | } | ||
795 | |||
796 | static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, | ||
797 | struct list_head *list, | ||
798 | u8 dead_node) | ||
799 | { | ||
800 | struct dlm_lock_resource *res; | ||
801 | struct list_head *iter, *iter2; | ||
802 | |||
803 | spin_lock(&dlm->spinlock); | ||
804 | list_for_each_safe(iter, iter2, &dlm->reco.resources) { | ||
805 | res = list_entry (iter, struct dlm_lock_resource, recovering); | ||
806 | if (dlm_is_recovery_lock(res->lockname.name, | ||
807 | res->lockname.len)) | ||
808 | continue; | ||
809 | if (res->owner == dead_node) { | ||
810 | mlog(0, "found lockres owned by dead node while " | ||
811 | "doing recovery for node %u. sending it.\n", | ||
812 | dead_node); | ||
813 | list_del_init(&res->recovering); | ||
814 | list_add_tail(&res->recovering, list); | ||
815 | } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
816 | mlog(0, "found UNKNOWN owner while doing recovery " | ||
817 | "for node %u. sending it.\n", dead_node); | ||
818 | list_del_init(&res->recovering); | ||
819 | list_add_tail(&res->recovering, list); | ||
820 | } | ||
821 | } | ||
822 | spin_unlock(&dlm->spinlock); | ||
823 | } | ||
824 | |||
825 | static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res) | ||
826 | { | ||
827 | int total_locks = 0; | ||
828 | struct list_head *iter, *queue = &res->granted; | ||
829 | int i; | ||
830 | |||
831 | for (i=0; i<3; i++) { | ||
832 | list_for_each(iter, queue) | ||
833 | total_locks++; | ||
834 | queue++; | ||
835 | } | ||
836 | return total_locks; | ||
837 | } | ||
838 | |||
839 | |||
840 | static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, | ||
841 | struct dlm_migratable_lockres *mres, | ||
842 | u8 send_to, | ||
843 | struct dlm_lock_resource *res, | ||
844 | int total_locks) | ||
845 | { | ||
846 | u64 mig_cookie = be64_to_cpu(mres->mig_cookie); | ||
847 | int mres_total_locks = be32_to_cpu(mres->total_locks); | ||
848 | int sz, ret = 0, status = 0; | ||
849 | u8 orig_flags = mres->flags, | ||
850 | orig_master = mres->master; | ||
851 | |||
852 | BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS); | ||
853 | if (!mres->num_locks) | ||
854 | return 0; | ||
855 | |||
856 | sz = sizeof(struct dlm_migratable_lockres) + | ||
857 | (mres->num_locks * sizeof(struct dlm_migratable_lock)); | ||
858 | |||
859 | /* add an all-done flag if we reached the last lock */ | ||
860 | orig_flags = mres->flags; | ||
861 | BUG_ON(total_locks > mres_total_locks); | ||
862 | if (total_locks == mres_total_locks) | ||
863 | mres->flags |= DLM_MRES_ALL_DONE; | ||
864 | |||
865 | /* send it */ | ||
866 | ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres, | ||
867 | sz, send_to, &status); | ||
868 | if (ret < 0) { | ||
869 | /* XXX: negative status is not handled. | ||
870 | * this will end up killing this node. */ | ||
871 | mlog_errno(ret); | ||
872 | } else { | ||
873 | /* might get an -ENOMEM back here */ | ||
874 | ret = status; | ||
875 | if (ret < 0) { | ||
876 | mlog_errno(ret); | ||
877 | |||
878 | if (ret == -EFAULT) { | ||
879 | mlog(ML_ERROR, "node %u told me to kill " | ||
880 | "myself!\n", send_to); | ||
881 | BUG(); | ||
882 | } | ||
883 | } | ||
884 | } | ||
885 | |||
886 | /* zero and reinit the message buffer */ | ||
887 | dlm_init_migratable_lockres(mres, res->lockname.name, | ||
888 | res->lockname.len, mres_total_locks, | ||
889 | mig_cookie, orig_flags, orig_master); | ||
890 | return ret; | ||
891 | } | ||
892 | |||
893 | static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, | ||
894 | const char *lockname, int namelen, | ||
895 | int total_locks, u64 cookie, | ||
896 | u8 flags, u8 master) | ||
897 | { | ||
898 | /* mres here is one full page */ | ||
899 | memset(mres, 0, PAGE_SIZE); | ||
900 | mres->lockname_len = namelen; | ||
901 | memcpy(mres->lockname, lockname, namelen); | ||
902 | mres->num_locks = 0; | ||
903 | mres->total_locks = cpu_to_be32(total_locks); | ||
904 | mres->mig_cookie = cpu_to_be64(cookie); | ||
905 | mres->flags = flags; | ||
906 | mres->master = master; | ||
907 | } | ||
908 | |||
909 | |||
910 | /* returns 1 if this lock fills the network structure, | ||
911 | * 0 otherwise */ | ||
912 | static int dlm_add_lock_to_array(struct dlm_lock *lock, | ||
913 | struct dlm_migratable_lockres *mres, int queue) | ||
914 | { | ||
915 | struct dlm_migratable_lock *ml; | ||
916 | int lock_num = mres->num_locks; | ||
917 | |||
918 | ml = &(mres->ml[lock_num]); | ||
919 | ml->cookie = lock->ml.cookie; | ||
920 | ml->type = lock->ml.type; | ||
921 | ml->convert_type = lock->ml.convert_type; | ||
922 | ml->highest_blocked = lock->ml.highest_blocked; | ||
923 | ml->list = queue; | ||
924 | if (lock->lksb) { | ||
925 | ml->flags = lock->lksb->flags; | ||
926 | /* send our current lvb */ | ||
927 | if (ml->type == LKM_EXMODE || | ||
928 | ml->type == LKM_PRMODE) { | ||
929 | /* if it is already set, this had better be a PR | ||
930 | * and it has to match */ | ||
931 | if (mres->lvb[0] && (ml->type == LKM_EXMODE || | ||
932 | memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) { | ||
933 | mlog(ML_ERROR, "mismatched lvbs!\n"); | ||
934 | __dlm_print_one_lock_resource(lock->lockres); | ||
935 | BUG(); | ||
936 | } | ||
937 | memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); | ||
938 | } | ||
939 | } | ||
940 | ml->node = lock->ml.node; | ||
941 | mres->num_locks++; | ||
942 | /* we reached the max, send this network message */ | ||
943 | if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS) | ||
944 | return 1; | ||
945 | return 0; | ||
946 | } | ||
947 | |||
948 | |||
949 | int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | ||
950 | struct dlm_migratable_lockres *mres, | ||
951 | u8 send_to, u8 flags) | ||
952 | { | ||
953 | struct list_head *queue, *iter; | ||
954 | int total_locks, i; | ||
955 | u64 mig_cookie = 0; | ||
956 | struct dlm_lock *lock; | ||
957 | int ret = 0; | ||
958 | |||
959 | BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); | ||
960 | |||
961 | mlog(0, "sending to %u\n", send_to); | ||
962 | |||
963 | total_locks = dlm_num_locks_in_lockres(res); | ||
964 | if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) { | ||
965 | /* rare, but possible */ | ||
966 | mlog(0, "argh. lockres has %d locks. this will " | ||
967 | "require more than one network packet to " | ||
968 | "migrate\n", total_locks); | ||
969 | mig_cookie = dlm_get_next_mig_cookie(); | ||
970 | } | ||
971 | |||
972 | dlm_init_migratable_lockres(mres, res->lockname.name, | ||
973 | res->lockname.len, total_locks, | ||
974 | mig_cookie, flags, res->owner); | ||
975 | |||
976 | total_locks = 0; | ||
977 | for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) { | ||
978 | queue = dlm_list_idx_to_ptr(res, i); | ||
979 | list_for_each(iter, queue) { | ||
980 | lock = list_entry (iter, struct dlm_lock, list); | ||
981 | |||
982 | /* add another lock. */ | ||
983 | total_locks++; | ||
984 | if (!dlm_add_lock_to_array(lock, mres, i)) | ||
985 | continue; | ||
986 | |||
987 | /* this filled the lock message, | ||
988 | * we must send it immediately. */ | ||
989 | ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, | ||
990 | res, total_locks); | ||
991 | if (ret < 0) { | ||
992 | // TODO | ||
993 | mlog(ML_ERROR, "dlm_send_mig_lockres_msg " | ||
994 | "returned %d, TODO\n", ret); | ||
995 | BUG(); | ||
996 | } | ||
997 | } | ||
998 | } | ||
999 | /* flush any remaining locks */ | ||
1000 | ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks); | ||
1001 | if (ret < 0) { | ||
1002 | // TODO | ||
1003 | mlog(ML_ERROR, "dlm_send_mig_lockres_msg returned %d, " | ||
1004 | "TODO\n", ret); | ||
1005 | BUG(); | ||
1006 | } | ||
1007 | return ret; | ||
1008 | } | ||
1009 | |||
1010 | |||
1011 | |||
1012 | /* | ||
1013 | * this message will contain no more than one page worth of | ||
1014 | * recovery data, and it will work on only one lockres. | ||
1015 | * there may be many locks in this page, and we may need to wait | ||
1016 | * for additional packets to complete all the locks (rare, but | ||
1017 | * possible). | ||
1018 | */ | ||
1019 | /* | ||
1020 | * NOTE: the allocation error cases here are scary | ||
1021 | * we really cannot afford to fail an alloc in recovery | ||
1022 | * do we spin? returning an error only delays the problem really | ||
1023 | */ | ||
1024 | |||
1025 | int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data) | ||
1026 | { | ||
1027 | struct dlm_ctxt *dlm = data; | ||
1028 | struct dlm_migratable_lockres *mres = | ||
1029 | (struct dlm_migratable_lockres *)msg->buf; | ||
1030 | int ret = 0; | ||
1031 | u8 real_master; | ||
1032 | char *buf = NULL; | ||
1033 | struct dlm_work_item *item = NULL; | ||
1034 | struct dlm_lock_resource *res = NULL; | ||
1035 | |||
1036 | if (!dlm_grab(dlm)) | ||
1037 | return -EINVAL; | ||
1038 | |||
1039 | BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); | ||
1040 | |||
1041 | real_master = mres->master; | ||
1042 | if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
1043 | /* cannot migrate a lockres with no master */ | ||
1044 | BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); | ||
1045 | } | ||
1046 | |||
1047 | mlog(0, "%s message received from node %u\n", | ||
1048 | (mres->flags & DLM_MRES_RECOVERY) ? | ||
1049 | "recovery" : "migration", mres->master); | ||
1050 | if (mres->flags & DLM_MRES_ALL_DONE) | ||
1051 | mlog(0, "all done flag. all lockres data received!\n"); | ||
1052 | |||
1053 | ret = -ENOMEM; | ||
1054 | buf = kmalloc(be16_to_cpu(msg->data_len), GFP_KERNEL); | ||
1055 | item = kcalloc(1, sizeof(*item), GFP_KERNEL); | ||
1056 | if (!buf || !item) | ||
1057 | goto leave; | ||
1058 | |||
1059 | /* lookup the lock to see if we have a secondary queue for this | ||
1060 | * already... just add the locks in and this will have its owner | ||
1061 | * and RECOVERY flag changed when it completes. */ | ||
1062 | res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len); | ||
1063 | if (res) { | ||
1064 | /* this will get a ref on res */ | ||
1065 | /* mark it as recovering/migrating and hash it */ | ||
1066 | spin_lock(&res->spinlock); | ||
1067 | if (mres->flags & DLM_MRES_RECOVERY) { | ||
1068 | res->state |= DLM_LOCK_RES_RECOVERING; | ||
1069 | } else { | ||
1070 | if (res->state & DLM_LOCK_RES_MIGRATING) { | ||
1071 | /* this is at least the second | ||
1072 | * lockres message */ | ||
1073 | mlog(0, "lock %.*s is already migrating\n", | ||
1074 | mres->lockname_len, | ||
1075 | mres->lockname); | ||
1076 | } else if (res->state & DLM_LOCK_RES_RECOVERING) { | ||
1077 | /* caller should BUG */ | ||
1078 | mlog(ML_ERROR, "node is attempting to migrate " | ||
1079 | "lock %.*s, but marked as recovering!\n", | ||
1080 | mres->lockname_len, mres->lockname); | ||
1081 | ret = -EFAULT; | ||
1082 | spin_unlock(&res->spinlock); | ||
1083 | goto leave; | ||
1084 | } | ||
1085 | res->state |= DLM_LOCK_RES_MIGRATING; | ||
1086 | } | ||
1087 | spin_unlock(&res->spinlock); | ||
1088 | } else { | ||
1089 | /* need to allocate, just like if it was | ||
1090 | * mastered here normally */ | ||
1091 | res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len); | ||
1092 | if (!res) | ||
1093 | goto leave; | ||
1094 | |||
1095 | /* to match the ref that we would have gotten if | ||
1096 | * dlm_lookup_lockres had succeeded */ | ||
1097 | dlm_lockres_get(res); | ||
1098 | |||
1099 | /* mark it as recovering/migrating and hash it */ | ||
1100 | if (mres->flags & DLM_MRES_RECOVERY) | ||
1101 | res->state |= DLM_LOCK_RES_RECOVERING; | ||
1102 | else | ||
1103 | res->state |= DLM_LOCK_RES_MIGRATING; | ||
1104 | |||
1105 | spin_lock(&dlm->spinlock); | ||
1106 | __dlm_insert_lockres(dlm, res); | ||
1107 | spin_unlock(&dlm->spinlock); | ||
1108 | |||
1109 | /* now that the new lockres is inserted, | ||
1110 | * make it usable by other processes */ | ||
1111 | spin_lock(&res->spinlock); | ||
1112 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; | ||
1113 | spin_unlock(&res->spinlock); | ||
1114 | |||
1115 | /* add an extra ref for just-allocated lockres | ||
1116 | * otherwise the lockres will be purged immediately */ | ||
1117 | dlm_lockres_get(res); | ||
1118 | |||
1119 | } | ||
1120 | |||
1121 | /* at this point we have allocated everything we need, | ||
1122 | * and we have a hashed lockres with an extra ref and | ||
1123 | * the proper res->state flags. */ | ||
1124 | ret = 0; | ||
1125 | if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
1126 | /* migration cannot have an unknown master */ | ||
1127 | BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); | ||
1128 | mlog(0, "recovery has passed me a lockres with an " | ||
1129 | "unknown owner.. will need to requery: " | ||
1130 | "%.*s\n", mres->lockname_len, mres->lockname); | ||
1131 | } else { | ||
1132 | spin_lock(&res->spinlock); | ||
1133 | dlm_change_lockres_owner(dlm, res, dlm->node_num); | ||
1134 | spin_unlock(&res->spinlock); | ||
1135 | } | ||
1136 | |||
1137 | /* queue up work for dlm_mig_lockres_worker */ | ||
1138 | dlm_grab(dlm); /* get an extra ref for the work item */ | ||
1139 | memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */ | ||
1140 | dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf); | ||
1141 | item->u.ml.lockres = res; /* already have a ref */ | ||
1142 | item->u.ml.real_master = real_master; | ||
1143 | spin_lock(&dlm->work_lock); | ||
1144 | list_add_tail(&item->list, &dlm->work_list); | ||
1145 | spin_unlock(&dlm->work_lock); | ||
1146 | schedule_work(&dlm->dispatched_work); | ||
1147 | |||
1148 | leave: | ||
1149 | dlm_put(dlm); | ||
1150 | if (ret < 0) { | ||
1151 | if (buf) | ||
1152 | kfree(buf); | ||
1153 | if (item) | ||
1154 | kfree(item); | ||
1155 | } | ||
1156 | |||
1157 | mlog_exit(ret); | ||
1158 | return ret; | ||
1159 | } | ||
1160 | |||
1161 | |||
1162 | static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data) | ||
1163 | { | ||
1164 | struct dlm_ctxt *dlm = data; | ||
1165 | struct dlm_migratable_lockres *mres; | ||
1166 | int ret = 0; | ||
1167 | struct dlm_lock_resource *res; | ||
1168 | u8 real_master; | ||
1169 | |||
1170 | dlm = item->dlm; | ||
1171 | mres = (struct dlm_migratable_lockres *)data; | ||
1172 | |||
1173 | res = item->u.ml.lockres; | ||
1174 | real_master = item->u.ml.real_master; | ||
1175 | |||
1176 | if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
1177 | /* this case is super-rare. only occurs if | ||
1178 | * node death happens during migration. */ | ||
1179 | again: | ||
1180 | ret = dlm_lockres_master_requery(dlm, res, &real_master); | ||
1181 | if (ret < 0) { | ||
1182 | mlog(0, "dlm_lockres_master_requery failure: %d\n", | ||
1183 | ret); | ||
1184 | goto again; | ||
1185 | } | ||
1186 | if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
1187 | mlog(0, "lockres %.*s not claimed. " | ||
1188 | "this node will take it.\n", | ||
1189 | res->lockname.len, res->lockname.name); | ||
1190 | } else { | ||
1191 | mlog(0, "master needs to respond to sender " | ||
1192 | "that node %u still owns %.*s\n", | ||
1193 | real_master, res->lockname.len, | ||
1194 | res->lockname.name); | ||
1195 | /* cannot touch this lockres */ | ||
1196 | goto leave; | ||
1197 | } | ||
1198 | } | ||
1199 | |||
1200 | ret = dlm_process_recovery_data(dlm, res, mres); | ||
1201 | if (ret < 0) | ||
1202 | mlog(0, "dlm_process_recovery_data returned %d\n", ret); | ||
1203 | else | ||
1204 | mlog(0, "dlm_process_recovery_data succeeded\n"); | ||
1205 | |||
1206 | if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) == | ||
1207 | (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) { | ||
1208 | ret = dlm_finish_migration(dlm, res, mres->master); | ||
1209 | if (ret < 0) | ||
1210 | mlog_errno(ret); | ||
1211 | } | ||
1212 | |||
1213 | leave: | ||
1214 | kfree(data); | ||
1215 | mlog_exit(ret); | ||
1216 | } | ||
1217 | |||
1218 | |||
1219 | |||
1220 | static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, | ||
1221 | struct dlm_lock_resource *res, | ||
1222 | u8 *real_master) | ||
1223 | { | ||
1224 | struct dlm_node_iter iter; | ||
1225 | int nodenum; | ||
1226 | int ret = 0; | ||
1227 | |||
1228 | *real_master = DLM_LOCK_RES_OWNER_UNKNOWN; | ||
1229 | |||
1230 | /* we only reach here if one of the two nodes in a | ||
1231 | * migration died while the migration was in progress. | ||
1232 | * at this point we need to requery the master. we | ||
1233 | * know that the new_master got as far as creating | ||
1234 | * an mle on at least one node, but we do not know | ||
1235 | * if any nodes had actually cleared the mle and set | ||
1236 | * the master to the new_master. the old master | ||
1237 | * is supposed to set the owner to UNKNOWN in the | ||
1238 | * event of a new_master death, so the only possible | ||
1239 | * responses that we can get from nodes here are | ||
1240 | * that the master is new_master, or that the master | ||
1241 | * is UNKNOWN. | ||
1242 | * if all nodes come back with UNKNOWN then we know | ||
1243 | * the lock needs remastering here. | ||
1244 | * if any node comes back with a valid master, check | ||
1245 | * to see if that master is the one that we are | ||
1246 | * recovering. if so, then the new_master died and | ||
1247 | * we need to remaster this lock. if not, then the | ||
1248 | * new_master survived and that node will respond to | ||
1249 | * other nodes about the owner. | ||
1250 | * if there is an owner, this node needs to dump this | ||
1251 | * lockres and alert the sender that this lockres | ||
1252 | * was rejected. */ | ||
1253 | spin_lock(&dlm->spinlock); | ||
1254 | dlm_node_iter_init(dlm->domain_map, &iter); | ||
1255 | spin_unlock(&dlm->spinlock); | ||
1256 | |||
1257 | while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { | ||
1258 | /* do not send to self */ | ||
1259 | if (nodenum == dlm->node_num) | ||
1260 | continue; | ||
1261 | ret = dlm_do_master_requery(dlm, res, nodenum, real_master); | ||
1262 | if (ret < 0) { | ||
1263 | mlog_errno(ret); | ||
1264 | BUG(); | ||
1265 | /* TODO: need to figure a way to restart this */ | ||
1266 | } | ||
1267 | if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
1268 | mlog(0, "lock master is %u\n", *real_master); | ||
1269 | break; | ||
1270 | } | ||
1271 | } | ||
1272 | return ret; | ||
1273 | } | ||
1274 | |||
1275 | |||
1276 | static int dlm_do_master_requery(struct dlm_ctxt *dlm, | ||
1277 | struct dlm_lock_resource *res, | ||
1278 | u8 nodenum, u8 *real_master) | ||
1279 | { | ||
1280 | int ret = -EINVAL; | ||
1281 | struct dlm_master_requery req; | ||
1282 | int status = DLM_LOCK_RES_OWNER_UNKNOWN; | ||
1283 | |||
1284 | memset(&req, 0, sizeof(req)); | ||
1285 | req.node_idx = dlm->node_num; | ||
1286 | req.namelen = res->lockname.len; | ||
1287 | memcpy(req.name, res->lockname.name, res->lockname.len); | ||
1288 | |||
1289 | ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key, | ||
1290 | &req, sizeof(req), nodenum, &status); | ||
1291 | /* XXX: negative status not handled properly here. */ | ||
1292 | if (ret < 0) | ||
1293 | mlog_errno(ret); | ||
1294 | else { | ||
1295 | BUG_ON(status < 0); | ||
1296 | BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN); | ||
1297 | *real_master = (u8) (status & 0xff); | ||
1298 | mlog(0, "node %u responded to master requery with %u\n", | ||
1299 | nodenum, *real_master); | ||
1300 | ret = 0; | ||
1301 | } | ||
1302 | return ret; | ||
1303 | } | ||
1304 | |||
1305 | |||
1306 | /* this function cannot error, so unless the sending | ||
1307 | * or receiving of the message failed, the owner can | ||
1308 | * be trusted */ | ||
1309 | int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data) | ||
1310 | { | ||
1311 | struct dlm_ctxt *dlm = data; | ||
1312 | struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf; | ||
1313 | struct dlm_lock_resource *res = NULL; | ||
1314 | int master = DLM_LOCK_RES_OWNER_UNKNOWN; | ||
1315 | u32 flags = DLM_ASSERT_MASTER_REQUERY; | ||
1316 | |||
1317 | if (!dlm_grab(dlm)) { | ||
1318 | /* since the domain has gone away on this | ||
1319 | * node, the proper response is UNKNOWN */ | ||
1320 | return master; | ||
1321 | } | ||
1322 | |||
1323 | spin_lock(&dlm->spinlock); | ||
1324 | res = __dlm_lookup_lockres(dlm, req->name, req->namelen); | ||
1325 | if (res) { | ||
1326 | spin_lock(&res->spinlock); | ||
1327 | master = res->owner; | ||
1328 | if (master == dlm->node_num) { | ||
1329 | int ret = dlm_dispatch_assert_master(dlm, res, | ||
1330 | 0, 0, flags); | ||
1331 | if (ret < 0) { | ||
1332 | mlog_errno(-ENOMEM); | ||
1333 | /* retry!? */ | ||
1334 | BUG(); | ||
1335 | } | ||
1336 | } | ||
1337 | spin_unlock(&res->spinlock); | ||
1338 | } | ||
1339 | spin_unlock(&dlm->spinlock); | ||
1340 | |||
1341 | dlm_put(dlm); | ||
1342 | return master; | ||
1343 | } | ||
1344 | |||
1345 | static inline struct list_head * | ||
1346 | dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num) | ||
1347 | { | ||
1348 | struct list_head *ret; | ||
1349 | BUG_ON(list_num < 0); | ||
1350 | BUG_ON(list_num > 2); | ||
1351 | ret = &(res->granted); | ||
1352 | ret += list_num; | ||
1353 | return ret; | ||
1354 | } | ||
1355 | /* TODO: do ast flush business | ||
1356 | * TODO: do MIGRATING and RECOVERING spinning | ||
1357 | */ | ||
1358 | |||
1359 | /* | ||
1360 | * NOTE about in-flight requests during migration: | ||
1361 | * | ||
1362 | * Before attempting the migrate, the master has marked the lockres as | ||
1363 | * MIGRATING and then flushed all of its pending ASTS. So any in-flight | ||
1364 | * requests either got queued before the MIGRATING flag got set, in which | ||
1365 | * case the lock data will reflect the change and a return message is on | ||
1366 | * the way, or the request failed to get in before MIGRATING got set. In | ||
1367 | * this case, the caller will be told to spin and wait for the MIGRATING | ||
1368 | * flag to be dropped, then recheck the master. | ||
1369 | * This holds true for the convert, cancel and unlock cases, and since lvb | ||
1370 | * updates are tied to these same messages, it applies to lvb updates as | ||
1371 | * well. For the lock case, there is no way a lock can be on the master | ||
1372 | * queue and not be on the secondary queue since the lock is always added | ||
1373 | * locally first. This means that the new target node will never be sent | ||
1374 | * a lock that he doesn't already have on the list. | ||
1375 | * In total, this means that the local lock is correct and should not be | ||
1376 | * updated to match the one sent by the master. Any messages sent back | ||
1377 | * from the master before the MIGRATING flag will bring the lock properly | ||
1378 | * up-to-date, and the change will be ordered properly for the waiter. | ||
1379 | * We will *not* attempt to modify the lock underneath the waiter. | ||
1380 | */ | ||
1381 | |||
1382 | static int dlm_process_recovery_data(struct dlm_ctxt *dlm, | ||
1383 | struct dlm_lock_resource *res, | ||
1384 | struct dlm_migratable_lockres *mres) | ||
1385 | { | ||
1386 | struct dlm_migratable_lock *ml; | ||
1387 | struct list_head *queue; | ||
1388 | struct dlm_lock *newlock = NULL; | ||
1389 | struct dlm_lockstatus *lksb = NULL; | ||
1390 | int ret = 0; | ||
1391 | int i; | ||
1392 | struct list_head *iter; | ||
1393 | struct dlm_lock *lock = NULL; | ||
1394 | |||
1395 | mlog(0, "running %d locks for this lockres\n", mres->num_locks); | ||
1396 | for (i=0; i<mres->num_locks; i++) { | ||
1397 | ml = &(mres->ml[i]); | ||
1398 | BUG_ON(ml->highest_blocked != LKM_IVMODE); | ||
1399 | newlock = NULL; | ||
1400 | lksb = NULL; | ||
1401 | |||
1402 | queue = dlm_list_num_to_pointer(res, ml->list); | ||
1403 | |||
1404 | /* if the lock is for the local node it needs to | ||
1405 | * be moved to the proper location within the queue. | ||
1406 | * do not allocate a new lock structure. */ | ||
1407 | if (ml->node == dlm->node_num) { | ||
1408 | /* MIGRATION ONLY! */ | ||
1409 | BUG_ON(!(mres->flags & DLM_MRES_MIGRATION)); | ||
1410 | |||
1411 | spin_lock(&res->spinlock); | ||
1412 | list_for_each(iter, queue) { | ||
1413 | lock = list_entry (iter, struct dlm_lock, list); | ||
1414 | if (lock->ml.cookie != ml->cookie) | ||
1415 | lock = NULL; | ||
1416 | else | ||
1417 | break; | ||
1418 | } | ||
1419 | |||
1420 | /* lock is always created locally first, and | ||
1421 | * destroyed locally last. it must be on the list */ | ||
1422 | if (!lock) { | ||
1423 | mlog(ML_ERROR, "could not find local lock " | ||
1424 | "with cookie %"MLFu64"!\n", | ||
1425 | ml->cookie); | ||
1426 | BUG(); | ||
1427 | } | ||
1428 | BUG_ON(lock->ml.node != ml->node); | ||
1429 | |||
1430 | /* see NOTE above about why we do not update | ||
1431 | * to match the master here */ | ||
1432 | |||
1433 | /* move the lock to its proper place */ | ||
1434 | /* do not alter lock refcount. switching lists. */ | ||
1435 | list_del_init(&lock->list); | ||
1436 | list_add_tail(&lock->list, queue); | ||
1437 | spin_unlock(&res->spinlock); | ||
1438 | |||
1439 | mlog(0, "just reordered a local lock!\n"); | ||
1440 | continue; | ||
1441 | } | ||
1442 | |||
1443 | /* lock is for another node. */ | ||
1444 | newlock = dlm_new_lock(ml->type, ml->node, | ||
1445 | be64_to_cpu(ml->cookie), NULL); | ||
1446 | if (!newlock) { | ||
1447 | ret = -ENOMEM; | ||
1448 | goto leave; | ||
1449 | } | ||
1450 | lksb = newlock->lksb; | ||
1451 | dlm_lock_attach_lockres(newlock, res); | ||
1452 | |||
1453 | if (ml->convert_type != LKM_IVMODE) { | ||
1454 | BUG_ON(queue != &res->converting); | ||
1455 | newlock->ml.convert_type = ml->convert_type; | ||
1456 | } | ||
1457 | lksb->flags |= (ml->flags & | ||
1458 | (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); | ||
1459 | |||
1460 | if (mres->lvb[0]) { | ||
1461 | if (lksb->flags & DLM_LKSB_PUT_LVB) { | ||
1462 | /* other node was trying to update | ||
1463 | * lvb when node died. recreate the | ||
1464 | * lksb with the updated lvb. */ | ||
1465 | memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN); | ||
1466 | } else { | ||
1467 | /* otherwise, the node is sending its | ||
1468 | * most recent valid lvb info */ | ||
1469 | BUG_ON(ml->type != LKM_EXMODE && | ||
1470 | ml->type != LKM_PRMODE); | ||
1471 | if (res->lvb[0] && (ml->type == LKM_EXMODE || | ||
1472 | memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) { | ||
1473 | mlog(ML_ERROR, "received bad lvb!\n"); | ||
1474 | __dlm_print_one_lock_resource(res); | ||
1475 | BUG(); | ||
1476 | } | ||
1477 | memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); | ||
1478 | } | ||
1479 | } | ||
1480 | |||
1481 | |||
1482 | /* NOTE: | ||
1483 | * wrt lock queue ordering and recovery: | ||
1484 | * 1. order of locks on granted queue is | ||
1485 | * meaningless. | ||
1486 | * 2. order of locks on converting queue is | ||
1487 | * LOST with the node death. sorry charlie. | ||
1488 | * 3. order of locks on the blocked queue is | ||
1489 | * also LOST. | ||
1490 | * order of locks does not affect integrity, it | ||
1491 | * just means that a lock request may get pushed | ||
1492 | * back in line as a result of the node death. | ||
1493 | * also note that for a given node the lock order | ||
1494 | * for its secondary queue locks is preserved | ||
1495 | * relative to each other, but clearly *not* | ||
1496 | * preserved relative to locks from other nodes. | ||
1497 | */ | ||
1498 | spin_lock(&res->spinlock); | ||
1499 | dlm_lock_get(newlock); | ||
1500 | list_add_tail(&newlock->list, queue); | ||
1501 | spin_unlock(&res->spinlock); | ||
1502 | } | ||
1503 | mlog(0, "done running all the locks\n"); | ||
1504 | |||
1505 | leave: | ||
1506 | if (ret < 0) { | ||
1507 | mlog_errno(ret); | ||
1508 | if (newlock) | ||
1509 | dlm_lock_put(newlock); | ||
1510 | } | ||
1511 | |||
1512 | mlog_exit(ret); | ||
1513 | return ret; | ||
1514 | } | ||
1515 | |||
1516 | void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, | ||
1517 | struct dlm_lock_resource *res) | ||
1518 | { | ||
1519 | int i; | ||
1520 | struct list_head *queue, *iter, *iter2; | ||
1521 | struct dlm_lock *lock; | ||
1522 | |||
1523 | res->state |= DLM_LOCK_RES_RECOVERING; | ||
1524 | if (!list_empty(&res->recovering)) | ||
1525 | list_del_init(&res->recovering); | ||
1526 | list_add_tail(&res->recovering, &dlm->reco.resources); | ||
1527 | |||
1528 | /* find any pending locks and put them back on proper list */ | ||
1529 | for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) { | ||
1530 | queue = dlm_list_idx_to_ptr(res, i); | ||
1531 | list_for_each_safe(iter, iter2, queue) { | ||
1532 | lock = list_entry (iter, struct dlm_lock, list); | ||
1533 | dlm_lock_get(lock); | ||
1534 | if (lock->convert_pending) { | ||
1535 | /* move converting lock back to granted */ | ||
1536 | BUG_ON(i != DLM_CONVERTING_LIST); | ||
1537 | mlog(0, "node died with convert pending " | ||
1538 | "on %.*s. move back to granted list.\n", | ||
1539 | res->lockname.len, res->lockname.name); | ||
1540 | dlm_revert_pending_convert(res, lock); | ||
1541 | lock->convert_pending = 0; | ||
1542 | } else if (lock->lock_pending) { | ||
1543 | /* remove pending lock requests completely */ | ||
1544 | BUG_ON(i != DLM_BLOCKED_LIST); | ||
1545 | mlog(0, "node died with lock pending " | ||
1546 | "on %.*s. remove from blocked list and skip.\n", | ||
1547 | res->lockname.len, res->lockname.name); | ||
1548 | /* lock will be floating until ref in | ||
1549 | * dlmlock_remote is freed after the network | ||
1550 | * call returns. ok for it to not be on any | ||
1551 | * list since no ast can be called | ||
1552 | * (the master is dead). */ | ||
1553 | dlm_revert_pending_lock(res, lock); | ||
1554 | lock->lock_pending = 0; | ||
1555 | } else if (lock->unlock_pending) { | ||
1556 | /* if an unlock was in progress, treat as | ||
1557 | * if this had completed successfully | ||
1558 | * before sending this lock state to the | ||
1559 | * new master. note that the dlm_unlock | ||
1560 | * call is still responsible for calling | ||
1561 | * the unlockast. that will happen after | ||
1562 | * the network call times out. for now, | ||
1563 | * just move lists to prepare the new | ||
1564 | * recovery master. */ | ||
1565 | BUG_ON(i != DLM_GRANTED_LIST); | ||
1566 | mlog(0, "node died with unlock pending " | ||
1567 | "on %.*s. remove from blocked list and skip.\n", | ||
1568 | res->lockname.len, res->lockname.name); | ||
1569 | dlm_commit_pending_unlock(res, lock); | ||
1570 | lock->unlock_pending = 0; | ||
1571 | } else if (lock->cancel_pending) { | ||
1572 | /* if a cancel was in progress, treat as | ||
1573 | * if this had completed successfully | ||
1574 | * before sending this lock state to the | ||
1575 | * new master */ | ||
1576 | BUG_ON(i != DLM_CONVERTING_LIST); | ||
1577 | mlog(0, "node died with cancel pending " | ||
1578 | "on %.*s. move back to granted list.\n", | ||
1579 | res->lockname.len, res->lockname.name); | ||
1580 | dlm_commit_pending_cancel(res, lock); | ||
1581 | lock->cancel_pending = 0; | ||
1582 | } | ||
1583 | dlm_lock_put(lock); | ||
1584 | } | ||
1585 | } | ||
1586 | } | ||
1587 | |||
1588 | |||
1589 | |||
1590 | /* removes all recovered locks from the recovery list. | ||
1591 | * sets the res->owner to the new master. | ||
1592 | * unsets the RECOVERY flag and wakes waiters. */ | ||
1593 | static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, | ||
1594 | u8 dead_node, u8 new_master) | ||
1595 | { | ||
1596 | int i; | ||
1597 | struct list_head *iter, *iter2, *bucket; | ||
1598 | struct dlm_lock_resource *res; | ||
1599 | |||
1600 | mlog_entry_void(); | ||
1601 | |||
1602 | assert_spin_locked(&dlm->spinlock); | ||
1603 | |||
1604 | list_for_each_safe(iter, iter2, &dlm->reco.resources) { | ||
1605 | res = list_entry (iter, struct dlm_lock_resource, recovering); | ||
1606 | if (res->owner == dead_node) { | ||
1607 | list_del_init(&res->recovering); | ||
1608 | spin_lock(&res->spinlock); | ||
1609 | dlm_change_lockres_owner(dlm, res, new_master); | ||
1610 | res->state &= ~DLM_LOCK_RES_RECOVERING; | ||
1611 | __dlm_dirty_lockres(dlm, res); | ||
1612 | spin_unlock(&res->spinlock); | ||
1613 | wake_up(&res->wq); | ||
1614 | } | ||
1615 | } | ||
1616 | |||
1617 | /* this will become unnecessary eventually, but | ||
1618 | * for now we need to run the whole hash, clear | ||
1619 | * the RECOVERING state and set the owner | ||
1620 | * if necessary */ | ||
1621 | for (i=0; i<DLM_HASH_SIZE; i++) { | ||
1622 | bucket = &(dlm->resources[i]); | ||
1623 | list_for_each(iter, bucket) { | ||
1624 | res = list_entry (iter, struct dlm_lock_resource, list); | ||
1625 | if (res->state & DLM_LOCK_RES_RECOVERING) { | ||
1626 | if (res->owner == dead_node) { | ||
1627 | mlog(0, "(this=%u) res %.*s owner=%u " | ||
1628 | "was not on recovering list, but " | ||
1629 | "clearing state anyway\n", | ||
1630 | dlm->node_num, res->lockname.len, | ||
1631 | res->lockname.name, new_master); | ||
1632 | } else if (res->owner == dlm->node_num) { | ||
1633 | mlog(0, "(this=%u) res %.*s owner=%u " | ||
1634 | "was not on recovering list, " | ||
1635 | "owner is THIS node, clearing\n", | ||
1636 | dlm->node_num, res->lockname.len, | ||
1637 | res->lockname.name, new_master); | ||
1638 | } else | ||
1639 | continue; | ||
1640 | |||
1641 | spin_lock(&res->spinlock); | ||
1642 | dlm_change_lockres_owner(dlm, res, new_master); | ||
1643 | res->state &= ~DLM_LOCK_RES_RECOVERING; | ||
1644 | __dlm_dirty_lockres(dlm, res); | ||
1645 | spin_unlock(&res->spinlock); | ||
1646 | wake_up(&res->wq); | ||
1647 | } | ||
1648 | } | ||
1649 | } | ||
1650 | } | ||
1651 | |||
1652 | static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local) | ||
1653 | { | ||
1654 | if (local) { | ||
1655 | if (lock->ml.type != LKM_EXMODE && | ||
1656 | lock->ml.type != LKM_PRMODE) | ||
1657 | return 1; | ||
1658 | } else if (lock->ml.type == LKM_EXMODE) | ||
1659 | return 1; | ||
1660 | return 0; | ||
1661 | } | ||
1662 | |||
1663 | static void dlm_revalidate_lvb(struct dlm_ctxt *dlm, | ||
1664 | struct dlm_lock_resource *res, u8 dead_node) | ||
1665 | { | ||
1666 | struct list_head *iter, *queue; | ||
1667 | struct dlm_lock *lock; | ||
1668 | int blank_lvb = 0, local = 0; | ||
1669 | int i; | ||
1670 | u8 search_node; | ||
1671 | |||
1672 | assert_spin_locked(&dlm->spinlock); | ||
1673 | assert_spin_locked(&res->spinlock); | ||
1674 | |||
1675 | if (res->owner == dlm->node_num) | ||
1676 | /* if this node owned the lockres, and if the dead node | ||
1677 | * had an EX when he died, blank out the lvb */ | ||
1678 | search_node = dead_node; | ||
1679 | else { | ||
1680 | /* if this is a secondary lockres, and we had no EX or PR | ||
1681 | * locks granted, we can no longer trust the lvb */ | ||
1682 | search_node = dlm->node_num; | ||
1683 | local = 1; /* check local state for valid lvb */ | ||
1684 | } | ||
1685 | |||
1686 | for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) { | ||
1687 | queue = dlm_list_idx_to_ptr(res, i); | ||
1688 | list_for_each(iter, queue) { | ||
1689 | lock = list_entry (iter, struct dlm_lock, list); | ||
1690 | if (lock->ml.node == search_node) { | ||
1691 | if (dlm_lvb_needs_invalidation(lock, local)) { | ||
1692 | /* zero the lksb lvb and lockres lvb */ | ||
1693 | blank_lvb = 1; | ||
1694 | memset(lock->lksb->lvb, 0, DLM_LVB_LEN); | ||
1695 | } | ||
1696 | } | ||
1697 | } | ||
1698 | } | ||
1699 | |||
1700 | if (blank_lvb) { | ||
1701 | mlog(0, "clearing %.*s lvb, dead node %u had EX\n", | ||
1702 | res->lockname.len, res->lockname.name, dead_node); | ||
1703 | memset(res->lvb, 0, DLM_LVB_LEN); | ||
1704 | } | ||
1705 | } | ||
1706 | |||
1707 | static void dlm_free_dead_locks(struct dlm_ctxt *dlm, | ||
1708 | struct dlm_lock_resource *res, u8 dead_node) | ||
1709 | { | ||
1710 | struct list_head *iter, *tmpiter; | ||
1711 | struct dlm_lock *lock; | ||
1712 | |||
1713 | /* this node is the lockres master: | ||
1714 | * 1) remove any stale locks for the dead node | ||
1715 | * 2) if the dead node had an EX when he died, blank out the lvb | ||
1716 | */ | ||
1717 | assert_spin_locked(&dlm->spinlock); | ||
1718 | assert_spin_locked(&res->spinlock); | ||
1719 | |||
1720 | /* TODO: check pending_asts, pending_basts here */ | ||
1721 | list_for_each_safe(iter, tmpiter, &res->granted) { | ||
1722 | lock = list_entry (iter, struct dlm_lock, list); | ||
1723 | if (lock->ml.node == dead_node) { | ||
1724 | list_del_init(&lock->list); | ||
1725 | dlm_lock_put(lock); | ||
1726 | } | ||
1727 | } | ||
1728 | list_for_each_safe(iter, tmpiter, &res->converting) { | ||
1729 | lock = list_entry (iter, struct dlm_lock, list); | ||
1730 | if (lock->ml.node == dead_node) { | ||
1731 | list_del_init(&lock->list); | ||
1732 | dlm_lock_put(lock); | ||
1733 | } | ||
1734 | } | ||
1735 | list_for_each_safe(iter, tmpiter, &res->blocked) { | ||
1736 | lock = list_entry (iter, struct dlm_lock, list); | ||
1737 | if (lock->ml.node == dead_node) { | ||
1738 | list_del_init(&lock->list); | ||
1739 | dlm_lock_put(lock); | ||
1740 | } | ||
1741 | } | ||
1742 | |||
1743 | /* do not kick thread yet */ | ||
1744 | __dlm_dirty_lockres(dlm, res); | ||
1745 | } | ||
1746 | |||
1747 | /* if this node is the recovery master, and there are no | ||
1748 | * locks for a given lockres owned by this node that are in | ||
1749 | * either PR or EX mode, zero out the lvb before requesting. | ||
1750 | * | ||
1751 | */ | ||
1752 | |||
1753 | |||
1754 | static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) | ||
1755 | { | ||
1756 | struct list_head *iter; | ||
1757 | struct dlm_lock_resource *res; | ||
1758 | int i; | ||
1759 | struct list_head *bucket; | ||
1760 | |||
1761 | |||
1762 | /* purge any stale mles */ | ||
1763 | dlm_clean_master_list(dlm, dead_node); | ||
1764 | |||
1765 | /* | ||
1766 | * now clean up all lock resources. there are two rules: | ||
1767 | * | ||
1768 | * 1) if the dead node was the master, move the lockres | ||
1769 | * to the recovering list. set the RECOVERING flag. | ||
1770 | * this lockres needs to be cleaned up before it can | ||
1771 | * be used further. | ||
1772 | * | ||
1773 | * 2) if this node was the master, remove all locks from | ||
1774 | * each of the lockres queues that were owned by the | ||
1775 | * dead node. once recovery finishes, the dlm thread | ||
1776 | * can be kicked again to see if any ASTs or BASTs | ||
1777 | * need to be fired as a result. | ||
1778 | */ | ||
1779 | for (i=0; i<DLM_HASH_SIZE; i++) { | ||
1780 | bucket = &(dlm->resources[i]); | ||
1781 | list_for_each(iter, bucket) { | ||
1782 | res = list_entry (iter, struct dlm_lock_resource, list); | ||
1783 | if (dlm_is_recovery_lock(res->lockname.name, | ||
1784 | res->lockname.len)) | ||
1785 | continue; | ||
1786 | |||
1787 | spin_lock(&res->spinlock); | ||
1788 | /* zero the lvb if necessary */ | ||
1789 | dlm_revalidate_lvb(dlm, res, dead_node); | ||
1790 | if (res->owner == dead_node) | ||
1791 | dlm_move_lockres_to_recovery_list(dlm, res); | ||
1792 | else if (res->owner == dlm->node_num) { | ||
1793 | dlm_free_dead_locks(dlm, res, dead_node); | ||
1794 | __dlm_lockres_calc_usage(dlm, res); | ||
1795 | } | ||
1796 | spin_unlock(&res->spinlock); | ||
1797 | } | ||
1798 | } | ||
1799 | |||
1800 | } | ||
1801 | |||
1802 | static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx) | ||
1803 | { | ||
1804 | assert_spin_locked(&dlm->spinlock); | ||
1805 | |||
1806 | /* check to see if the node is already considered dead */ | ||
1807 | if (!test_bit(idx, dlm->live_nodes_map)) { | ||
1808 | mlog(0, "for domain %s, node %d is already dead. " | ||
1809 | "another node likely did recovery already.\n", | ||
1810 | dlm->name, idx); | ||
1811 | return; | ||
1812 | } | ||
1813 | |||
1814 | /* check to see if we do not care about this node */ | ||
1815 | if (!test_bit(idx, dlm->domain_map)) { | ||
1816 | /* This also catches the case that we get a node down | ||
1817 | * but haven't joined the domain yet. */ | ||
1818 | mlog(0, "node %u already removed from domain!\n", idx); | ||
1819 | return; | ||
1820 | } | ||
1821 | |||
1822 | clear_bit(idx, dlm->live_nodes_map); | ||
1823 | |||
1824 | /* Clean up join state on node death. */ | ||
1825 | if (dlm->joining_node == idx) { | ||
1826 | mlog(0, "Clearing join state for node %u\n", idx); | ||
1827 | __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); | ||
1828 | } | ||
1829 | |||
1830 | /* make sure local cleanup occurs before the heartbeat events */ | ||
1831 | if (!test_bit(idx, dlm->recovery_map)) | ||
1832 | dlm_do_local_recovery_cleanup(dlm, idx); | ||
1833 | |||
1834 | /* notify anything attached to the heartbeat events */ | ||
1835 | dlm_hb_event_notify_attached(dlm, idx, 0); | ||
1836 | |||
1837 | mlog(0, "node %u being removed from domain map!\n", idx); | ||
1838 | clear_bit(idx, dlm->domain_map); | ||
1839 | /* wake up migration waiters if a node goes down. | ||
1840 | * perhaps later we can genericize this for other waiters. */ | ||
1841 | wake_up(&dlm->migration_wq); | ||
1842 | |||
1843 | if (test_bit(idx, dlm->recovery_map)) | ||
1844 | mlog(0, "domain %s, node %u already added " | ||
1845 | "to recovery map!\n", dlm->name, idx); | ||
1846 | else | ||
1847 | set_bit(idx, dlm->recovery_map); | ||
1848 | } | ||
1849 | |||
1850 | void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data) | ||
1851 | { | ||
1852 | struct dlm_ctxt *dlm = data; | ||
1853 | |||
1854 | if (!dlm_grab(dlm)) | ||
1855 | return; | ||
1856 | |||
1857 | spin_lock(&dlm->spinlock); | ||
1858 | __dlm_hb_node_down(dlm, idx); | ||
1859 | spin_unlock(&dlm->spinlock); | ||
1860 | |||
1861 | dlm_put(dlm); | ||
1862 | } | ||
1863 | |||
1864 | void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data) | ||
1865 | { | ||
1866 | struct dlm_ctxt *dlm = data; | ||
1867 | |||
1868 | if (!dlm_grab(dlm)) | ||
1869 | return; | ||
1870 | |||
1871 | spin_lock(&dlm->spinlock); | ||
1872 | |||
1873 | set_bit(idx, dlm->live_nodes_map); | ||
1874 | |||
1875 | /* notify any mles attached to the heartbeat events */ | ||
1876 | dlm_hb_event_notify_attached(dlm, idx, 1); | ||
1877 | |||
1878 | spin_unlock(&dlm->spinlock); | ||
1879 | |||
1880 | dlm_put(dlm); | ||
1881 | } | ||
1882 | |||
1883 | static void dlm_reco_ast(void *astdata) | ||
1884 | { | ||
1885 | struct dlm_ctxt *dlm = astdata; | ||
1886 | mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n", | ||
1887 | dlm->node_num, dlm->name); | ||
1888 | } | ||
1889 | static void dlm_reco_bast(void *astdata, int blocked_type) | ||
1890 | { | ||
1891 | struct dlm_ctxt *dlm = astdata; | ||
1892 | mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n", | ||
1893 | dlm->node_num, dlm->name); | ||
1894 | } | ||
1895 | static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st) | ||
1896 | { | ||
1897 | mlog(0, "unlockast for recovery lock fired!\n"); | ||
1898 | } | ||
1899 | |||
1900 | |||
1901 | static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) | ||
1902 | { | ||
1903 | enum dlm_status ret; | ||
1904 | struct dlm_lockstatus lksb; | ||
1905 | int status = -EINVAL; | ||
1906 | |||
1907 | mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n", | ||
1908 | dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num); | ||
1909 | retry: | ||
1910 | memset(&lksb, 0, sizeof(lksb)); | ||
1911 | |||
1912 | ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, | ||
1913 | DLM_RECOVERY_LOCK_NAME, dlm_reco_ast, dlm, dlm_reco_bast); | ||
1914 | |||
1915 | if (ret == DLM_NORMAL) { | ||
1916 | mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n", | ||
1917 | dlm->name, dlm->node_num); | ||
1918 | /* I am master, send message to all nodes saying | ||
1919 | * that I am beginning a recovery session */ | ||
1920 | status = dlm_send_begin_reco_message(dlm, | ||
1921 | dlm->reco.dead_node); | ||
1922 | |||
1923 | /* recovery lock is a special case. ast will not get fired, | ||
1924 | * so just go ahead and unlock it. */ | ||
1925 | ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm); | ||
1926 | if (ret != DLM_NORMAL) { | ||
1927 | /* this would really suck. this could only happen | ||
1928 | * if there was a network error during the unlock | ||
1929 | * because of node death. this means the unlock | ||
1930 | * is actually "done" and the lock structure is | ||
1931 | * even freed. we can continue, but only | ||
1932 | * because this specific lock name is special. */ | ||
1933 | mlog(0, "dlmunlock returned %d\n", ret); | ||
1934 | } | ||
1935 | |||
1936 | if (status < 0) { | ||
1937 | mlog(0, "failed to send recovery message. " | ||
1938 | "must retry with new node map.\n"); | ||
1939 | goto retry; | ||
1940 | } | ||
1941 | } else if (ret == DLM_NOTQUEUED) { | ||
1942 | mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n", | ||
1943 | dlm->name, dlm->node_num); | ||
1944 | /* another node is master. wait on | ||
1945 | * reco.new_master != O2NM_INVALID_NODE_NUM */ | ||
1946 | status = -EEXIST; | ||
1947 | } | ||
1948 | |||
1949 | return status; | ||
1950 | } | ||
1951 | |||
1952 | static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node) | ||
1953 | { | ||
1954 | struct dlm_begin_reco br; | ||
1955 | int ret = 0; | ||
1956 | struct dlm_node_iter iter; | ||
1957 | int nodenum; | ||
1958 | int status; | ||
1959 | |||
1960 | mlog_entry("%u\n", dead_node); | ||
1961 | |||
1962 | mlog(0, "dead node is %u\n", dead_node); | ||
1963 | |||
1964 | spin_lock(&dlm->spinlock); | ||
1965 | dlm_node_iter_init(dlm->domain_map, &iter); | ||
1966 | spin_unlock(&dlm->spinlock); | ||
1967 | |||
1968 | clear_bit(dead_node, iter.node_map); | ||
1969 | |||
1970 | memset(&br, 0, sizeof(br)); | ||
1971 | br.node_idx = dlm->node_num; | ||
1972 | br.dead_node = dead_node; | ||
1973 | |||
1974 | while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { | ||
1975 | ret = 0; | ||
1976 | if (nodenum == dead_node) { | ||
1977 | mlog(0, "not sending begin reco to dead node " | ||
1978 | "%u\n", dead_node); | ||
1979 | continue; | ||
1980 | } | ||
1981 | if (nodenum == dlm->node_num) { | ||
1982 | mlog(0, "not sending begin reco to self\n"); | ||
1983 | continue; | ||
1984 | } | ||
1985 | |||
1986 | ret = -EINVAL; | ||
1987 | mlog(0, "attempting to send begin reco msg to %d\n", | ||
1988 | nodenum); | ||
1989 | ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key, | ||
1990 | &br, sizeof(br), nodenum, &status); | ||
1991 | /* negative status is handled ok by caller here */ | ||
1992 | if (ret >= 0) | ||
1993 | ret = status; | ||
1994 | if (ret < 0) { | ||
1995 | struct dlm_lock_resource *res; | ||
1996 | mlog_errno(ret); | ||
1997 | mlog(ML_ERROR, "begin reco of dlm %s to node %u " | ||
1998 | " returned %d\n", dlm->name, nodenum, ret); | ||
1999 | res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, | ||
2000 | DLM_RECOVERY_LOCK_NAME_LEN); | ||
2001 | if (res) { | ||
2002 | dlm_print_one_lock_resource(res); | ||
2003 | dlm_lockres_put(res); | ||
2004 | } else { | ||
2005 | mlog(ML_ERROR, "recovery lock not found\n"); | ||
2006 | } | ||
2007 | break; | ||
2008 | } | ||
2009 | } | ||
2010 | |||
2011 | return ret; | ||
2012 | } | ||
2013 | |||
2014 | int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data) | ||
2015 | { | ||
2016 | struct dlm_ctxt *dlm = data; | ||
2017 | struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf; | ||
2018 | |||
2019 | /* ok to return 0, domain has gone away */ | ||
2020 | if (!dlm_grab(dlm)) | ||
2021 | return 0; | ||
2022 | |||
2023 | mlog(0, "node %u wants to recover node %u\n", | ||
2024 | br->node_idx, br->dead_node); | ||
2025 | |||
2026 | dlm_fire_domain_eviction_callbacks(dlm, br->dead_node); | ||
2027 | |||
2028 | spin_lock(&dlm->spinlock); | ||
2029 | if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { | ||
2030 | mlog(0, "new_master already set to %u!\n", | ||
2031 | dlm->reco.new_master); | ||
2032 | } | ||
2033 | if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) { | ||
2034 | mlog(0, "dead_node already set to %u!\n", | ||
2035 | dlm->reco.dead_node); | ||
2036 | } | ||
2037 | dlm->reco.new_master = br->node_idx; | ||
2038 | dlm->reco.dead_node = br->dead_node; | ||
2039 | if (!test_bit(br->dead_node, dlm->recovery_map)) { | ||
2040 | mlog(ML_ERROR, "recovery master %u sees %u as dead, but this " | ||
2041 | "node has not yet. marking %u as dead\n", | ||
2042 | br->node_idx, br->dead_node, br->dead_node); | ||
2043 | __dlm_hb_node_down(dlm, br->dead_node); | ||
2044 | } | ||
2045 | spin_unlock(&dlm->spinlock); | ||
2046 | |||
2047 | dlm_kick_recovery_thread(dlm); | ||
2048 | dlm_put(dlm); | ||
2049 | return 0; | ||
2050 | } | ||
2051 | |||
2052 | static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm) | ||
2053 | { | ||
2054 | int ret = 0; | ||
2055 | struct dlm_finalize_reco fr; | ||
2056 | struct dlm_node_iter iter; | ||
2057 | int nodenum; | ||
2058 | int status; | ||
2059 | |||
2060 | mlog(0, "finishing recovery for node %s:%u\n", | ||
2061 | dlm->name, dlm->reco.dead_node); | ||
2062 | |||
2063 | spin_lock(&dlm->spinlock); | ||
2064 | dlm_node_iter_init(dlm->domain_map, &iter); | ||
2065 | spin_unlock(&dlm->spinlock); | ||
2066 | |||
2067 | memset(&fr, 0, sizeof(fr)); | ||
2068 | fr.node_idx = dlm->node_num; | ||
2069 | fr.dead_node = dlm->reco.dead_node; | ||
2070 | |||
2071 | while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { | ||
2072 | if (nodenum == dlm->node_num) | ||
2073 | continue; | ||
2074 | ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key, | ||
2075 | &fr, sizeof(fr), nodenum, &status); | ||
2076 | if (ret >= 0) { | ||
2077 | ret = status; | ||
2078 | if (dlm_is_host_down(ret)) { | ||
2079 | /* this has no effect on this recovery | ||
2080 | * session, so set the status to zero to | ||
2081 | * finish out the last recovery */ | ||
2082 | mlog(ML_ERROR, "node %u went down after this " | ||
2083 | "node finished recovery.\n", nodenum); | ||
2084 | ret = 0; | ||
2085 | } | ||
2086 | } | ||
2087 | if (ret < 0) { | ||
2088 | mlog_errno(ret); | ||
2089 | break; | ||
2090 | } | ||
2091 | } | ||
2092 | |||
2093 | return ret; | ||
2094 | } | ||
2095 | |||
2096 | int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data) | ||
2097 | { | ||
2098 | struct dlm_ctxt *dlm = data; | ||
2099 | struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf; | ||
2100 | |||
2101 | /* ok to return 0, domain has gone away */ | ||
2102 | if (!dlm_grab(dlm)) | ||
2103 | return 0; | ||
2104 | |||
2105 | mlog(0, "node %u finalizing recovery of node %u\n", | ||
2106 | fr->node_idx, fr->dead_node); | ||
2107 | |||
2108 | spin_lock(&dlm->spinlock); | ||
2109 | |||
2110 | if (dlm->reco.new_master != fr->node_idx) { | ||
2111 | mlog(ML_ERROR, "node %u sent recovery finalize msg, but node " | ||
2112 | "%u is supposed to be the new master, dead=%u\n", | ||
2113 | fr->node_idx, dlm->reco.new_master, fr->dead_node); | ||
2114 | BUG(); | ||
2115 | } | ||
2116 | if (dlm->reco.dead_node != fr->dead_node) { | ||
2117 | mlog(ML_ERROR, "node %u sent recovery finalize msg for dead " | ||
2118 | "node %u, but node %u is supposed to be dead\n", | ||
2119 | fr->node_idx, fr->dead_node, dlm->reco.dead_node); | ||
2120 | BUG(); | ||
2121 | } | ||
2122 | |||
2123 | dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx); | ||
2124 | |||
2125 | spin_unlock(&dlm->spinlock); | ||
2126 | |||
2127 | dlm_reset_recovery(dlm); | ||
2128 | |||
2129 | dlm_kick_recovery_thread(dlm); | ||
2130 | dlm_put(dlm); | ||
2131 | return 0; | ||
2132 | } | ||
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c new file mode 100644 index 000000000000..92cd5cd66db8 --- /dev/null +++ b/fs/ocfs2/dlm/dlmthread.c | |||
@@ -0,0 +1,695 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmthread.c | ||
5 | * | ||
6 | * standalone DLM module | ||
7 | * | ||
8 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | |||
28 | #include <linux/module.h> | ||
29 | #include <linux/fs.h> | ||
30 | #include <linux/types.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/utsname.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/sysctl.h> | ||
36 | #include <linux/random.h> | ||
37 | #include <linux/blkdev.h> | ||
38 | #include <linux/socket.h> | ||
39 | #include <linux/inet.h> | ||
40 | #include <linux/timer.h> | ||
41 | #include <linux/kthread.h> | ||
42 | |||
43 | |||
44 | #include "cluster/heartbeat.h" | ||
45 | #include "cluster/nodemanager.h" | ||
46 | #include "cluster/tcp.h" | ||
47 | |||
48 | #include "dlmapi.h" | ||
49 | #include "dlmcommon.h" | ||
50 | #include "dlmdomain.h" | ||
51 | |||
52 | #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD) | ||
53 | #include "cluster/masklog.h" | ||
54 | |||
55 | extern spinlock_t dlm_domain_lock; | ||
56 | extern struct list_head dlm_domains; | ||
57 | |||
58 | static int dlm_thread(void *data); | ||
59 | |||
60 | static void dlm_flush_asts(struct dlm_ctxt *dlm); | ||
61 | |||
62 | #define dlm_lock_is_remote(dlm, lock) ((lock)->ml.node != (dlm)->node_num) | ||
63 | |||
64 | /* will exit holding res->spinlock, but may drop in function */ | ||
65 | /* waits until flags are cleared on res->state */ | ||
66 | void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags) | ||
67 | { | ||
68 | DECLARE_WAITQUEUE(wait, current); | ||
69 | |||
70 | assert_spin_locked(&res->spinlock); | ||
71 | |||
72 | add_wait_queue(&res->wq, &wait); | ||
73 | repeat: | ||
74 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
75 | if (res->state & flags) { | ||
76 | spin_unlock(&res->spinlock); | ||
77 | schedule(); | ||
78 | spin_lock(&res->spinlock); | ||
79 | goto repeat; | ||
80 | } | ||
81 | remove_wait_queue(&res->wq, &wait); | ||
82 | current->state = TASK_RUNNING; | ||
83 | } | ||
84 | |||
85 | |||
86 | static int __dlm_lockres_unused(struct dlm_lock_resource *res) | ||
87 | { | ||
88 | if (list_empty(&res->granted) && | ||
89 | list_empty(&res->converting) && | ||
90 | list_empty(&res->blocked) && | ||
91 | list_empty(&res->dirty)) | ||
92 | return 1; | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | |||
97 | /* Call whenever you may have added or deleted something from one of | ||
98 | * the lockres queue's. This will figure out whether it belongs on the | ||
99 | * unused list or not and does the appropriate thing. */ | ||
100 | void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | ||
101 | struct dlm_lock_resource *res) | ||
102 | { | ||
103 | mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); | ||
104 | |||
105 | assert_spin_locked(&dlm->spinlock); | ||
106 | assert_spin_locked(&res->spinlock); | ||
107 | |||
108 | if (__dlm_lockres_unused(res)){ | ||
109 | if (list_empty(&res->purge)) { | ||
110 | mlog(0, "putting lockres %.*s from purge list\n", | ||
111 | res->lockname.len, res->lockname.name); | ||
112 | |||
113 | res->last_used = jiffies; | ||
114 | list_add_tail(&res->purge, &dlm->purge_list); | ||
115 | dlm->purge_count++; | ||
116 | } | ||
117 | } else if (!list_empty(&res->purge)) { | ||
118 | mlog(0, "removing lockres %.*s from purge list\n", | ||
119 | res->lockname.len, res->lockname.name); | ||
120 | |||
121 | list_del_init(&res->purge); | ||
122 | dlm->purge_count--; | ||
123 | } | ||
124 | } | ||
125 | |||
126 | void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | ||
127 | struct dlm_lock_resource *res) | ||
128 | { | ||
129 | mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); | ||
130 | spin_lock(&dlm->spinlock); | ||
131 | spin_lock(&res->spinlock); | ||
132 | |||
133 | __dlm_lockres_calc_usage(dlm, res); | ||
134 | |||
135 | spin_unlock(&res->spinlock); | ||
136 | spin_unlock(&dlm->spinlock); | ||
137 | } | ||
138 | |||
139 | /* TODO: Eventual API: Called with the dlm spinlock held, may drop it | ||
140 | * to do migration, but will re-acquire before exit. */ | ||
141 | void dlm_purge_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *lockres) | ||
142 | { | ||
143 | int master; | ||
144 | int ret; | ||
145 | |||
146 | spin_lock(&lockres->spinlock); | ||
147 | master = lockres->owner == dlm->node_num; | ||
148 | spin_unlock(&lockres->spinlock); | ||
149 | |||
150 | mlog(0, "purging lockres %.*s, master = %d\n", lockres->lockname.len, | ||
151 | lockres->lockname.name, master); | ||
152 | |||
153 | /* Non master is the easy case -- no migration required, just | ||
154 | * quit. */ | ||
155 | if (!master) | ||
156 | goto finish; | ||
157 | |||
158 | /* Wheee! Migrate lockres here! */ | ||
159 | spin_unlock(&dlm->spinlock); | ||
160 | again: | ||
161 | |||
162 | ret = dlm_migrate_lockres(dlm, lockres, O2NM_MAX_NODES); | ||
163 | if (ret == -ENOTEMPTY) { | ||
164 | mlog(ML_ERROR, "lockres %.*s still has local locks!\n", | ||
165 | lockres->lockname.len, lockres->lockname.name); | ||
166 | |||
167 | BUG(); | ||
168 | } else if (ret < 0) { | ||
169 | mlog(ML_NOTICE, "lockres %.*s: migrate failed, retrying\n", | ||
170 | lockres->lockname.len, lockres->lockname.name); | ||
171 | goto again; | ||
172 | } | ||
173 | |||
174 | spin_lock(&dlm->spinlock); | ||
175 | |||
176 | finish: | ||
177 | if (!list_empty(&lockres->purge)) { | ||
178 | list_del_init(&lockres->purge); | ||
179 | dlm->purge_count--; | ||
180 | } | ||
181 | __dlm_unhash_lockres(lockres); | ||
182 | } | ||
183 | |||
184 | static void dlm_run_purge_list(struct dlm_ctxt *dlm, | ||
185 | int purge_now) | ||
186 | { | ||
187 | unsigned int run_max, unused; | ||
188 | unsigned long purge_jiffies; | ||
189 | struct dlm_lock_resource *lockres; | ||
190 | |||
191 | spin_lock(&dlm->spinlock); | ||
192 | run_max = dlm->purge_count; | ||
193 | |||
194 | while(run_max && !list_empty(&dlm->purge_list)) { | ||
195 | run_max--; | ||
196 | |||
197 | lockres = list_entry(dlm->purge_list.next, | ||
198 | struct dlm_lock_resource, purge); | ||
199 | |||
200 | /* Status of the lockres *might* change so double | ||
201 | * check. If the lockres is unused, holding the dlm | ||
202 | * spinlock will prevent people from getting and more | ||
203 | * refs on it -- there's no need to keep the lockres | ||
204 | * spinlock. */ | ||
205 | spin_lock(&lockres->spinlock); | ||
206 | unused = __dlm_lockres_unused(lockres); | ||
207 | spin_unlock(&lockres->spinlock); | ||
208 | |||
209 | if (!unused) | ||
210 | continue; | ||
211 | |||
212 | purge_jiffies = lockres->last_used + | ||
213 | msecs_to_jiffies(DLM_PURGE_INTERVAL_MS); | ||
214 | |||
215 | /* Make sure that we want to be processing this guy at | ||
216 | * this time. */ | ||
217 | if (!purge_now && time_after(purge_jiffies, jiffies)) { | ||
218 | /* Since resources are added to the purge list | ||
219 | * in tail order, we can stop at the first | ||
220 | * unpurgable resource -- anyone added after | ||
221 | * him will have a greater last_used value */ | ||
222 | break; | ||
223 | } | ||
224 | |||
225 | list_del_init(&lockres->purge); | ||
226 | dlm->purge_count--; | ||
227 | |||
228 | /* This may drop and reacquire the dlm spinlock if it | ||
229 | * has to do migration. */ | ||
230 | mlog(0, "calling dlm_purge_lockres!\n"); | ||
231 | dlm_purge_lockres(dlm, lockres); | ||
232 | mlog(0, "DONE calling dlm_purge_lockres!\n"); | ||
233 | |||
234 | /* Avoid adding any scheduling latencies */ | ||
235 | cond_resched_lock(&dlm->spinlock); | ||
236 | } | ||
237 | |||
238 | spin_unlock(&dlm->spinlock); | ||
239 | } | ||
240 | |||
241 | static void dlm_shuffle_lists(struct dlm_ctxt *dlm, | ||
242 | struct dlm_lock_resource *res) | ||
243 | { | ||
244 | struct dlm_lock *lock, *target; | ||
245 | struct list_head *iter; | ||
246 | struct list_head *head; | ||
247 | int can_grant = 1; | ||
248 | |||
249 | //mlog(0, "res->lockname.len=%d\n", res->lockname.len); | ||
250 | //mlog(0, "res->lockname.name=%p\n", res->lockname.name); | ||
251 | //mlog(0, "shuffle res %.*s\n", res->lockname.len, | ||
252 | // res->lockname.name); | ||
253 | |||
254 | /* because this function is called with the lockres | ||
255 | * spinlock, and because we know that it is not migrating/ | ||
256 | * recovering/in-progress, it is fine to reserve asts and | ||
257 | * basts right before queueing them all throughout */ | ||
258 | assert_spin_locked(&res->spinlock); | ||
259 | BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING| | ||
260 | DLM_LOCK_RES_RECOVERING| | ||
261 | DLM_LOCK_RES_IN_PROGRESS))); | ||
262 | |||
263 | converting: | ||
264 | if (list_empty(&res->converting)) | ||
265 | goto blocked; | ||
266 | mlog(0, "res %.*s has locks on a convert queue\n", res->lockname.len, | ||
267 | res->lockname.name); | ||
268 | |||
269 | target = list_entry(res->converting.next, struct dlm_lock, list); | ||
270 | if (target->ml.convert_type == LKM_IVMODE) { | ||
271 | mlog(ML_ERROR, "%.*s: converting a lock with no " | ||
272 | "convert_type!\n", res->lockname.len, res->lockname.name); | ||
273 | BUG(); | ||
274 | } | ||
275 | head = &res->granted; | ||
276 | list_for_each(iter, head) { | ||
277 | lock = list_entry(iter, struct dlm_lock, list); | ||
278 | if (lock==target) | ||
279 | continue; | ||
280 | if (!dlm_lock_compatible(lock->ml.type, | ||
281 | target->ml.convert_type)) { | ||
282 | can_grant = 0; | ||
283 | /* queue the BAST if not already */ | ||
284 | if (lock->ml.highest_blocked == LKM_IVMODE) { | ||
285 | __dlm_lockres_reserve_ast(res); | ||
286 | dlm_queue_bast(dlm, lock); | ||
287 | } | ||
288 | /* update the highest_blocked if needed */ | ||
289 | if (lock->ml.highest_blocked < target->ml.convert_type) | ||
290 | lock->ml.highest_blocked = | ||
291 | target->ml.convert_type; | ||
292 | } | ||
293 | } | ||
294 | head = &res->converting; | ||
295 | list_for_each(iter, head) { | ||
296 | lock = list_entry(iter, struct dlm_lock, list); | ||
297 | if (lock==target) | ||
298 | continue; | ||
299 | if (!dlm_lock_compatible(lock->ml.type, | ||
300 | target->ml.convert_type)) { | ||
301 | can_grant = 0; | ||
302 | if (lock->ml.highest_blocked == LKM_IVMODE) { | ||
303 | __dlm_lockres_reserve_ast(res); | ||
304 | dlm_queue_bast(dlm, lock); | ||
305 | } | ||
306 | if (lock->ml.highest_blocked < target->ml.convert_type) | ||
307 | lock->ml.highest_blocked = | ||
308 | target->ml.convert_type; | ||
309 | } | ||
310 | } | ||
311 | |||
312 | /* we can convert the lock */ | ||
313 | if (can_grant) { | ||
314 | spin_lock(&target->spinlock); | ||
315 | BUG_ON(target->ml.highest_blocked != LKM_IVMODE); | ||
316 | |||
317 | mlog(0, "calling ast for converting lock: %.*s, have: %d, " | ||
318 | "granting: %d, node: %u\n", res->lockname.len, | ||
319 | res->lockname.name, target->ml.type, | ||
320 | target->ml.convert_type, target->ml.node); | ||
321 | |||
322 | target->ml.type = target->ml.convert_type; | ||
323 | target->ml.convert_type = LKM_IVMODE; | ||
324 | list_del_init(&target->list); | ||
325 | list_add_tail(&target->list, &res->granted); | ||
326 | |||
327 | BUG_ON(!target->lksb); | ||
328 | target->lksb->status = DLM_NORMAL; | ||
329 | |||
330 | spin_unlock(&target->spinlock); | ||
331 | |||
332 | __dlm_lockres_reserve_ast(res); | ||
333 | dlm_queue_ast(dlm, target); | ||
334 | /* go back and check for more */ | ||
335 | goto converting; | ||
336 | } | ||
337 | |||
338 | blocked: | ||
339 | if (list_empty(&res->blocked)) | ||
340 | goto leave; | ||
341 | target = list_entry(res->blocked.next, struct dlm_lock, list); | ||
342 | |||
343 | head = &res->granted; | ||
344 | list_for_each(iter, head) { | ||
345 | lock = list_entry(iter, struct dlm_lock, list); | ||
346 | if (lock==target) | ||
347 | continue; | ||
348 | if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) { | ||
349 | can_grant = 0; | ||
350 | if (lock->ml.highest_blocked == LKM_IVMODE) { | ||
351 | __dlm_lockres_reserve_ast(res); | ||
352 | dlm_queue_bast(dlm, lock); | ||
353 | } | ||
354 | if (lock->ml.highest_blocked < target->ml.type) | ||
355 | lock->ml.highest_blocked = target->ml.type; | ||
356 | } | ||
357 | } | ||
358 | |||
359 | head = &res->converting; | ||
360 | list_for_each(iter, head) { | ||
361 | lock = list_entry(iter, struct dlm_lock, list); | ||
362 | if (lock==target) | ||
363 | continue; | ||
364 | if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) { | ||
365 | can_grant = 0; | ||
366 | if (lock->ml.highest_blocked == LKM_IVMODE) { | ||
367 | __dlm_lockres_reserve_ast(res); | ||
368 | dlm_queue_bast(dlm, lock); | ||
369 | } | ||
370 | if (lock->ml.highest_blocked < target->ml.type) | ||
371 | lock->ml.highest_blocked = target->ml.type; | ||
372 | } | ||
373 | } | ||
374 | |||
375 | /* we can grant the blocked lock (only | ||
376 | * possible if converting list empty) */ | ||
377 | if (can_grant) { | ||
378 | spin_lock(&target->spinlock); | ||
379 | BUG_ON(target->ml.highest_blocked != LKM_IVMODE); | ||
380 | |||
381 | mlog(0, "calling ast for blocked lock: %.*s, granting: %d, " | ||
382 | "node: %u\n", res->lockname.len, res->lockname.name, | ||
383 | target->ml.type, target->ml.node); | ||
384 | |||
385 | // target->ml.type is already correct | ||
386 | list_del_init(&target->list); | ||
387 | list_add_tail(&target->list, &res->granted); | ||
388 | |||
389 | BUG_ON(!target->lksb); | ||
390 | target->lksb->status = DLM_NORMAL; | ||
391 | |||
392 | spin_unlock(&target->spinlock); | ||
393 | |||
394 | __dlm_lockres_reserve_ast(res); | ||
395 | dlm_queue_ast(dlm, target); | ||
396 | /* go back and check for more */ | ||
397 | goto converting; | ||
398 | } | ||
399 | |||
400 | leave: | ||
401 | return; | ||
402 | } | ||
403 | |||
404 | /* must have NO locks when calling this with res !=NULL * */ | ||
405 | void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | ||
406 | { | ||
407 | mlog_entry("dlm=%p, res=%p\n", dlm, res); | ||
408 | if (res) { | ||
409 | spin_lock(&dlm->spinlock); | ||
410 | spin_lock(&res->spinlock); | ||
411 | __dlm_dirty_lockres(dlm, res); | ||
412 | spin_unlock(&res->spinlock); | ||
413 | spin_unlock(&dlm->spinlock); | ||
414 | } | ||
415 | wake_up(&dlm->dlm_thread_wq); | ||
416 | } | ||
417 | |||
418 | void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | ||
419 | { | ||
420 | mlog_entry("dlm=%p, res=%p\n", dlm, res); | ||
421 | |||
422 | assert_spin_locked(&dlm->spinlock); | ||
423 | assert_spin_locked(&res->spinlock); | ||
424 | |||
425 | /* don't shuffle secondary queues */ | ||
426 | if ((res->owner == dlm->node_num) && | ||
427 | !(res->state & DLM_LOCK_RES_DIRTY)) { | ||
428 | list_add_tail(&res->dirty, &dlm->dirty_list); | ||
429 | res->state |= DLM_LOCK_RES_DIRTY; | ||
430 | } | ||
431 | } | ||
432 | |||
433 | |||
434 | /* Launch the NM thread for the mounted volume */ | ||
435 | int dlm_launch_thread(struct dlm_ctxt *dlm) | ||
436 | { | ||
437 | mlog(0, "starting dlm thread...\n"); | ||
438 | |||
439 | dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread"); | ||
440 | if (IS_ERR(dlm->dlm_thread_task)) { | ||
441 | mlog_errno(PTR_ERR(dlm->dlm_thread_task)); | ||
442 | dlm->dlm_thread_task = NULL; | ||
443 | return -EINVAL; | ||
444 | } | ||
445 | |||
446 | return 0; | ||
447 | } | ||
448 | |||
449 | void dlm_complete_thread(struct dlm_ctxt *dlm) | ||
450 | { | ||
451 | if (dlm->dlm_thread_task) { | ||
452 | mlog(ML_KTHREAD, "waiting for dlm thread to exit\n"); | ||
453 | kthread_stop(dlm->dlm_thread_task); | ||
454 | dlm->dlm_thread_task = NULL; | ||
455 | } | ||
456 | } | ||
457 | |||
458 | static int dlm_dirty_list_empty(struct dlm_ctxt *dlm) | ||
459 | { | ||
460 | int empty; | ||
461 | |||
462 | spin_lock(&dlm->spinlock); | ||
463 | empty = list_empty(&dlm->dirty_list); | ||
464 | spin_unlock(&dlm->spinlock); | ||
465 | |||
466 | return empty; | ||
467 | } | ||
468 | |||
469 | static void dlm_flush_asts(struct dlm_ctxt *dlm) | ||
470 | { | ||
471 | int ret; | ||
472 | struct dlm_lock *lock; | ||
473 | struct dlm_lock_resource *res; | ||
474 | u8 hi; | ||
475 | |||
476 | spin_lock(&dlm->ast_lock); | ||
477 | while (!list_empty(&dlm->pending_asts)) { | ||
478 | lock = list_entry(dlm->pending_asts.next, | ||
479 | struct dlm_lock, ast_list); | ||
480 | /* get an extra ref on lock */ | ||
481 | dlm_lock_get(lock); | ||
482 | res = lock->lockres; | ||
483 | mlog(0, "delivering an ast for this lockres\n"); | ||
484 | |||
485 | BUG_ON(!lock->ast_pending); | ||
486 | |||
487 | /* remove from list (including ref) */ | ||
488 | list_del_init(&lock->ast_list); | ||
489 | dlm_lock_put(lock); | ||
490 | spin_unlock(&dlm->ast_lock); | ||
491 | |||
492 | if (lock->ml.node != dlm->node_num) { | ||
493 | ret = dlm_do_remote_ast(dlm, res, lock); | ||
494 | if (ret < 0) | ||
495 | mlog_errno(ret); | ||
496 | } else | ||
497 | dlm_do_local_ast(dlm, res, lock); | ||
498 | |||
499 | spin_lock(&dlm->ast_lock); | ||
500 | |||
501 | /* possible that another ast was queued while | ||
502 | * we were delivering the last one */ | ||
503 | if (!list_empty(&lock->ast_list)) { | ||
504 | mlog(0, "aha another ast got queued while " | ||
505 | "we were finishing the last one. will " | ||
506 | "keep the ast_pending flag set.\n"); | ||
507 | } else | ||
508 | lock->ast_pending = 0; | ||
509 | |||
510 | /* drop the extra ref. | ||
511 | * this may drop it completely. */ | ||
512 | dlm_lock_put(lock); | ||
513 | dlm_lockres_release_ast(dlm, res); | ||
514 | } | ||
515 | |||
516 | while (!list_empty(&dlm->pending_basts)) { | ||
517 | lock = list_entry(dlm->pending_basts.next, | ||
518 | struct dlm_lock, bast_list); | ||
519 | /* get an extra ref on lock */ | ||
520 | dlm_lock_get(lock); | ||
521 | res = lock->lockres; | ||
522 | |||
523 | BUG_ON(!lock->bast_pending); | ||
524 | |||
525 | /* get the highest blocked lock, and reset */ | ||
526 | spin_lock(&lock->spinlock); | ||
527 | BUG_ON(lock->ml.highest_blocked <= LKM_IVMODE); | ||
528 | hi = lock->ml.highest_blocked; | ||
529 | lock->ml.highest_blocked = LKM_IVMODE; | ||
530 | spin_unlock(&lock->spinlock); | ||
531 | |||
532 | /* remove from list (including ref) */ | ||
533 | list_del_init(&lock->bast_list); | ||
534 | dlm_lock_put(lock); | ||
535 | spin_unlock(&dlm->ast_lock); | ||
536 | |||
537 | mlog(0, "delivering a bast for this lockres " | ||
538 | "(blocked = %d\n", hi); | ||
539 | |||
540 | if (lock->ml.node != dlm->node_num) { | ||
541 | ret = dlm_send_proxy_bast(dlm, res, lock, hi); | ||
542 | if (ret < 0) | ||
543 | mlog_errno(ret); | ||
544 | } else | ||
545 | dlm_do_local_bast(dlm, res, lock, hi); | ||
546 | |||
547 | spin_lock(&dlm->ast_lock); | ||
548 | |||
549 | /* possible that another bast was queued while | ||
550 | * we were delivering the last one */ | ||
551 | if (!list_empty(&lock->bast_list)) { | ||
552 | mlog(0, "aha another bast got queued while " | ||
553 | "we were finishing the last one. will " | ||
554 | "keep the bast_pending flag set.\n"); | ||
555 | } else | ||
556 | lock->bast_pending = 0; | ||
557 | |||
558 | /* drop the extra ref. | ||
559 | * this may drop it completely. */ | ||
560 | dlm_lock_put(lock); | ||
561 | dlm_lockres_release_ast(dlm, res); | ||
562 | } | ||
563 | wake_up(&dlm->ast_wq); | ||
564 | spin_unlock(&dlm->ast_lock); | ||
565 | } | ||
566 | |||
567 | |||
568 | #define DLM_THREAD_TIMEOUT_MS (4 * 1000) | ||
569 | #define DLM_THREAD_MAX_DIRTY 100 | ||
570 | #define DLM_THREAD_MAX_ASTS 10 | ||
571 | |||
572 | static int dlm_thread(void *data) | ||
573 | { | ||
574 | struct dlm_lock_resource *res; | ||
575 | struct dlm_ctxt *dlm = data; | ||
576 | unsigned long timeout = msecs_to_jiffies(DLM_THREAD_TIMEOUT_MS); | ||
577 | |||
578 | mlog(0, "dlm thread running for %s...\n", dlm->name); | ||
579 | |||
580 | while (!kthread_should_stop()) { | ||
581 | int n = DLM_THREAD_MAX_DIRTY; | ||
582 | |||
583 | /* dlm_shutting_down is very point-in-time, but that | ||
584 | * doesn't matter as we'll just loop back around if we | ||
585 | * get false on the leading edge of a state | ||
586 | * transition. */ | ||
587 | dlm_run_purge_list(dlm, dlm_shutting_down(dlm)); | ||
588 | |||
589 | /* We really don't want to hold dlm->spinlock while | ||
590 | * calling dlm_shuffle_lists on each lockres that | ||
591 | * needs to have its queues adjusted and AST/BASTs | ||
592 | * run. So let's pull each entry off the dirty_list | ||
593 | * and drop dlm->spinlock ASAP. Once off the list, | ||
594 | * res->spinlock needs to be taken again to protect | ||
595 | * the queues while calling dlm_shuffle_lists. */ | ||
596 | spin_lock(&dlm->spinlock); | ||
597 | while (!list_empty(&dlm->dirty_list)) { | ||
598 | int delay = 0; | ||
599 | res = list_entry(dlm->dirty_list.next, | ||
600 | struct dlm_lock_resource, dirty); | ||
601 | |||
602 | /* peel a lockres off, remove it from the list, | ||
603 | * unset the dirty flag and drop the dlm lock */ | ||
604 | BUG_ON(!res); | ||
605 | dlm_lockres_get(res); | ||
606 | |||
607 | spin_lock(&res->spinlock); | ||
608 | res->state &= ~DLM_LOCK_RES_DIRTY; | ||
609 | list_del_init(&res->dirty); | ||
610 | spin_unlock(&res->spinlock); | ||
611 | spin_unlock(&dlm->spinlock); | ||
612 | |||
613 | /* lockres can be re-dirtied/re-added to the | ||
614 | * dirty_list in this gap, but that is ok */ | ||
615 | |||
616 | spin_lock(&res->spinlock); | ||
617 | if (res->owner != dlm->node_num) { | ||
618 | __dlm_print_one_lock_resource(res); | ||
619 | mlog(ML_ERROR, "inprog:%s, mig:%s, reco:%s, dirty:%s\n", | ||
620 | res->state & DLM_LOCK_RES_IN_PROGRESS ? "yes" : "no", | ||
621 | res->state & DLM_LOCK_RES_MIGRATING ? "yes" : "no", | ||
622 | res->state & DLM_LOCK_RES_RECOVERING ? "yes" : "no", | ||
623 | res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no"); | ||
624 | } | ||
625 | BUG_ON(res->owner != dlm->node_num); | ||
626 | |||
627 | /* it is now ok to move lockreses in these states | ||
628 | * to the dirty list, assuming that they will only be | ||
629 | * dirty for a short while. */ | ||
630 | if (res->state & (DLM_LOCK_RES_IN_PROGRESS | | ||
631 | DLM_LOCK_RES_MIGRATING | | ||
632 | DLM_LOCK_RES_RECOVERING)) { | ||
633 | /* move it to the tail and keep going */ | ||
634 | spin_unlock(&res->spinlock); | ||
635 | mlog(0, "delaying list shuffling for in-" | ||
636 | "progress lockres %.*s, state=%d\n", | ||
637 | res->lockname.len, res->lockname.name, | ||
638 | res->state); | ||
639 | delay = 1; | ||
640 | goto in_progress; | ||
641 | } | ||
642 | |||
643 | /* at this point the lockres is not migrating/ | ||
644 | * recovering/in-progress. we have the lockres | ||
645 | * spinlock and do NOT have the dlm lock. | ||
646 | * safe to reserve/queue asts and run the lists. */ | ||
647 | |||
648 | mlog(0, "calling dlm_shuffle_lists with dlm=%p, " | ||
649 | "res=%p\n", dlm, res); | ||
650 | |||
651 | /* called while holding lockres lock */ | ||
652 | dlm_shuffle_lists(dlm, res); | ||
653 | spin_unlock(&res->spinlock); | ||
654 | |||
655 | dlm_lockres_calc_usage(dlm, res); | ||
656 | |||
657 | in_progress: | ||
658 | |||
659 | spin_lock(&dlm->spinlock); | ||
660 | /* if the lock was in-progress, stick | ||
661 | * it on the back of the list */ | ||
662 | if (delay) { | ||
663 | spin_lock(&res->spinlock); | ||
664 | list_add_tail(&res->dirty, &dlm->dirty_list); | ||
665 | res->state |= DLM_LOCK_RES_DIRTY; | ||
666 | spin_unlock(&res->spinlock); | ||
667 | } | ||
668 | dlm_lockres_put(res); | ||
669 | |||
670 | /* unlikely, but we may need to give time to | ||
671 | * other tasks */ | ||
672 | if (!--n) { | ||
673 | mlog(0, "throttling dlm_thread\n"); | ||
674 | break; | ||
675 | } | ||
676 | } | ||
677 | |||
678 | spin_unlock(&dlm->spinlock); | ||
679 | dlm_flush_asts(dlm); | ||
680 | |||
681 | /* yield and continue right away if there is more work to do */ | ||
682 | if (!n) { | ||
683 | yield(); | ||
684 | continue; | ||
685 | } | ||
686 | |||
687 | wait_event_interruptible_timeout(dlm->dlm_thread_wq, | ||
688 | !dlm_dirty_list_empty(dlm) || | ||
689 | kthread_should_stop(), | ||
690 | timeout); | ||
691 | } | ||
692 | |||
693 | mlog(0, "quitting DLM thread\n"); | ||
694 | return 0; | ||
695 | } | ||
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c new file mode 100644 index 000000000000..cec2ce1cd318 --- /dev/null +++ b/fs/ocfs2/dlm/dlmunlock.c | |||
@@ -0,0 +1,672 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmunlock.c | ||
5 | * | ||
6 | * underlying calls for unlocking locks | ||
7 | * | ||
8 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | |||
28 | #include <linux/module.h> | ||
29 | #include <linux/fs.h> | ||
30 | #include <linux/types.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/utsname.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/sysctl.h> | ||
36 | #include <linux/random.h> | ||
37 | #include <linux/blkdev.h> | ||
38 | #include <linux/socket.h> | ||
39 | #include <linux/inet.h> | ||
40 | #include <linux/spinlock.h> | ||
41 | #include <linux/delay.h> | ||
42 | |||
43 | #include "cluster/heartbeat.h" | ||
44 | #include "cluster/nodemanager.h" | ||
45 | #include "cluster/tcp.h" | ||
46 | |||
47 | #include "dlmapi.h" | ||
48 | #include "dlmcommon.h" | ||
49 | |||
50 | #define MLOG_MASK_PREFIX ML_DLM | ||
51 | #include "cluster/masklog.h" | ||
52 | |||
53 | #define DLM_UNLOCK_FREE_LOCK 0x00000001 | ||
54 | #define DLM_UNLOCK_CALL_AST 0x00000002 | ||
55 | #define DLM_UNLOCK_REMOVE_LOCK 0x00000004 | ||
56 | #define DLM_UNLOCK_REGRANT_LOCK 0x00000008 | ||
57 | #define DLM_UNLOCK_CLEAR_CONVERT_TYPE 0x00000010 | ||
58 | |||
59 | |||
60 | static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm, | ||
61 | struct dlm_lock_resource *res, | ||
62 | struct dlm_lock *lock, | ||
63 | struct dlm_lockstatus *lksb, | ||
64 | int *actions); | ||
65 | static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm, | ||
66 | struct dlm_lock_resource *res, | ||
67 | struct dlm_lock *lock, | ||
68 | struct dlm_lockstatus *lksb, | ||
69 | int *actions); | ||
70 | |||
71 | static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm, | ||
72 | struct dlm_lock_resource *res, | ||
73 | struct dlm_lock *lock, | ||
74 | struct dlm_lockstatus *lksb, | ||
75 | int flags, | ||
76 | u8 owner); | ||
77 | |||
78 | |||
79 | /* | ||
80 | * according to the spec: | ||
81 | * http://opendlm.sourceforge.net/cvsmirror/opendlm/docs/dlmbook_final.pdf | ||
82 | * | ||
83 | * flags & LKM_CANCEL != 0: must be converting or blocked | ||
84 | * flags & LKM_CANCEL == 0: must be granted | ||
85 | * | ||
86 | * So to unlock a converting lock, you must first cancel the | ||
87 | * convert (passing LKM_CANCEL in flags), then call the unlock | ||
88 | * again (with no LKM_CANCEL in flags). | ||
89 | */ | ||
90 | |||
91 | |||
92 | /* | ||
93 | * locking: | ||
94 | * caller needs: none | ||
95 | * taken: res->spinlock and lock->spinlock taken and dropped | ||
96 | * held on exit: none | ||
97 | * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network | ||
98 | * all callers should have taken an extra ref on lock coming in | ||
99 | */ | ||
100 | static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, | ||
101 | struct dlm_lock_resource *res, | ||
102 | struct dlm_lock *lock, | ||
103 | struct dlm_lockstatus *lksb, | ||
104 | int flags, int *call_ast, | ||
105 | int master_node) | ||
106 | { | ||
107 | enum dlm_status status; | ||
108 | int actions = 0; | ||
109 | int in_use; | ||
110 | u8 owner; | ||
111 | |||
112 | mlog(0, "master_node = %d, valblk = %d\n", master_node, | ||
113 | flags & LKM_VALBLK); | ||
114 | |||
115 | if (master_node) | ||
116 | BUG_ON(res->owner != dlm->node_num); | ||
117 | else | ||
118 | BUG_ON(res->owner == dlm->node_num); | ||
119 | |||
120 | spin_lock(&dlm->spinlock); | ||
121 | /* We want to be sure that we're not freeing a lock | ||
122 | * that still has AST's pending... */ | ||
123 | in_use = !list_empty(&lock->ast_list); | ||
124 | spin_unlock(&dlm->spinlock); | ||
125 | if (in_use) { | ||
126 | mlog(ML_ERROR, "lockres %.*s: Someone is calling dlmunlock " | ||
127 | "while waiting for an ast!", res->lockname.len, | ||
128 | res->lockname.name); | ||
129 | return DLM_BADPARAM; | ||
130 | } | ||
131 | |||
132 | spin_lock(&res->spinlock); | ||
133 | if (res->state & DLM_LOCK_RES_IN_PROGRESS) { | ||
134 | if (master_node) { | ||
135 | mlog(ML_ERROR, "lockres in progress!\n"); | ||
136 | spin_unlock(&res->spinlock); | ||
137 | return DLM_FORWARD; | ||
138 | } | ||
139 | /* ok for this to sleep if not in a network handler */ | ||
140 | __dlm_wait_on_lockres(res); | ||
141 | res->state |= DLM_LOCK_RES_IN_PROGRESS; | ||
142 | } | ||
143 | spin_lock(&lock->spinlock); | ||
144 | |||
145 | if (res->state & DLM_LOCK_RES_RECOVERING) { | ||
146 | status = DLM_RECOVERING; | ||
147 | goto leave; | ||
148 | } | ||
149 | |||
150 | |||
151 | /* see above for what the spec says about | ||
152 | * LKM_CANCEL and the lock queue state */ | ||
153 | if (flags & LKM_CANCEL) | ||
154 | status = dlm_get_cancel_actions(dlm, res, lock, lksb, &actions); | ||
155 | else | ||
156 | status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions); | ||
157 | |||
158 | if (status != DLM_NORMAL) | ||
159 | goto leave; | ||
160 | |||
161 | /* By now this has been masked out of cancel requests. */ | ||
162 | if (flags & LKM_VALBLK) { | ||
163 | /* make the final update to the lvb */ | ||
164 | if (master_node) | ||
165 | memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN); | ||
166 | else | ||
167 | flags |= LKM_PUT_LVB; /* let the send function | ||
168 | * handle it. */ | ||
169 | } | ||
170 | |||
171 | if (!master_node) { | ||
172 | owner = res->owner; | ||
173 | /* drop locks and send message */ | ||
174 | if (flags & LKM_CANCEL) | ||
175 | lock->cancel_pending = 1; | ||
176 | else | ||
177 | lock->unlock_pending = 1; | ||
178 | spin_unlock(&lock->spinlock); | ||
179 | spin_unlock(&res->spinlock); | ||
180 | status = dlm_send_remote_unlock_request(dlm, res, lock, lksb, | ||
181 | flags, owner); | ||
182 | spin_lock(&res->spinlock); | ||
183 | spin_lock(&lock->spinlock); | ||
184 | /* if the master told us the lock was already granted, | ||
185 | * let the ast handle all of these actions */ | ||
186 | if (status == DLM_NORMAL && | ||
187 | lksb->status == DLM_CANCELGRANT) { | ||
188 | actions &= ~(DLM_UNLOCK_REMOVE_LOCK| | ||
189 | DLM_UNLOCK_REGRANT_LOCK| | ||
190 | DLM_UNLOCK_CLEAR_CONVERT_TYPE); | ||
191 | } | ||
192 | if (flags & LKM_CANCEL) | ||
193 | lock->cancel_pending = 0; | ||
194 | else | ||
195 | lock->unlock_pending = 0; | ||
196 | |||
197 | } | ||
198 | |||
199 | /* get an extra ref on lock. if we are just switching | ||
200 | * lists here, we dont want the lock to go away. */ | ||
201 | dlm_lock_get(lock); | ||
202 | |||
203 | if (actions & DLM_UNLOCK_REMOVE_LOCK) { | ||
204 | list_del_init(&lock->list); | ||
205 | dlm_lock_put(lock); | ||
206 | } | ||
207 | if (actions & DLM_UNLOCK_REGRANT_LOCK) { | ||
208 | dlm_lock_get(lock); | ||
209 | list_add_tail(&lock->list, &res->granted); | ||
210 | } | ||
211 | if (actions & DLM_UNLOCK_CLEAR_CONVERT_TYPE) { | ||
212 | mlog(0, "clearing convert_type at %smaster node\n", | ||
213 | master_node ? "" : "non-"); | ||
214 | lock->ml.convert_type = LKM_IVMODE; | ||
215 | } | ||
216 | |||
217 | /* remove the extra ref on lock */ | ||
218 | dlm_lock_put(lock); | ||
219 | |||
220 | leave: | ||
221 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; | ||
222 | if (!dlm_lock_on_list(&res->converting, lock)) | ||
223 | BUG_ON(lock->ml.convert_type != LKM_IVMODE); | ||
224 | else | ||
225 | BUG_ON(lock->ml.convert_type == LKM_IVMODE); | ||
226 | spin_unlock(&lock->spinlock); | ||
227 | spin_unlock(&res->spinlock); | ||
228 | wake_up(&res->wq); | ||
229 | |||
230 | /* let the caller's final dlm_lock_put handle the actual kfree */ | ||
231 | if (actions & DLM_UNLOCK_FREE_LOCK) { | ||
232 | /* this should always be coupled with list removal */ | ||
233 | BUG_ON(!(actions & DLM_UNLOCK_REMOVE_LOCK)); | ||
234 | mlog(0, "lock %"MLFu64" should be gone now! refs=%d\n", | ||
235 | lock->ml.cookie, atomic_read(&lock->lock_refs.refcount)-1); | ||
236 | dlm_lock_put(lock); | ||
237 | } | ||
238 | if (actions & DLM_UNLOCK_CALL_AST) | ||
239 | *call_ast = 1; | ||
240 | |||
241 | /* if cancel or unlock succeeded, lvb work is done */ | ||
242 | if (status == DLM_NORMAL) | ||
243 | lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB); | ||
244 | |||
245 | return status; | ||
246 | } | ||
247 | |||
248 | void dlm_commit_pending_unlock(struct dlm_lock_resource *res, | ||
249 | struct dlm_lock *lock) | ||
250 | { | ||
251 | /* leave DLM_LKSB_PUT_LVB on the lksb so any final | ||
252 | * update of the lvb will be sent to the new master */ | ||
253 | list_del_init(&lock->list); | ||
254 | } | ||
255 | |||
256 | void dlm_commit_pending_cancel(struct dlm_lock_resource *res, | ||
257 | struct dlm_lock *lock) | ||
258 | { | ||
259 | list_del_init(&lock->list); | ||
260 | list_add_tail(&lock->list, &res->granted); | ||
261 | lock->ml.convert_type = LKM_IVMODE; | ||
262 | } | ||
263 | |||
264 | |||
265 | static inline enum dlm_status dlmunlock_master(struct dlm_ctxt *dlm, | ||
266 | struct dlm_lock_resource *res, | ||
267 | struct dlm_lock *lock, | ||
268 | struct dlm_lockstatus *lksb, | ||
269 | int flags, | ||
270 | int *call_ast) | ||
271 | { | ||
272 | return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 1); | ||
273 | } | ||
274 | |||
275 | static inline enum dlm_status dlmunlock_remote(struct dlm_ctxt *dlm, | ||
276 | struct dlm_lock_resource *res, | ||
277 | struct dlm_lock *lock, | ||
278 | struct dlm_lockstatus *lksb, | ||
279 | int flags, int *call_ast) | ||
280 | { | ||
281 | return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 0); | ||
282 | } | ||
283 | |||
284 | /* | ||
285 | * locking: | ||
286 | * caller needs: none | ||
287 | * taken: none | ||
288 | * held on exit: none | ||
289 | * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network | ||
290 | */ | ||
291 | static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm, | ||
292 | struct dlm_lock_resource *res, | ||
293 | struct dlm_lock *lock, | ||
294 | struct dlm_lockstatus *lksb, | ||
295 | int flags, | ||
296 | u8 owner) | ||
297 | { | ||
298 | struct dlm_unlock_lock unlock; | ||
299 | int tmpret; | ||
300 | enum dlm_status ret; | ||
301 | int status = 0; | ||
302 | struct kvec vec[2]; | ||
303 | size_t veclen = 1; | ||
304 | |||
305 | mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); | ||
306 | |||
307 | memset(&unlock, 0, sizeof(unlock)); | ||
308 | unlock.node_idx = dlm->node_num; | ||
309 | unlock.flags = cpu_to_be32(flags); | ||
310 | unlock.cookie = lock->ml.cookie; | ||
311 | unlock.namelen = res->lockname.len; | ||
312 | memcpy(unlock.name, res->lockname.name, unlock.namelen); | ||
313 | |||
314 | vec[0].iov_len = sizeof(struct dlm_unlock_lock); | ||
315 | vec[0].iov_base = &unlock; | ||
316 | |||
317 | if (flags & LKM_PUT_LVB) { | ||
318 | /* extra data to send if we are updating lvb */ | ||
319 | vec[1].iov_len = DLM_LVB_LEN; | ||
320 | vec[1].iov_base = lock->lksb->lvb; | ||
321 | veclen++; | ||
322 | } | ||
323 | |||
324 | tmpret = o2net_send_message_vec(DLM_UNLOCK_LOCK_MSG, dlm->key, | ||
325 | vec, veclen, owner, &status); | ||
326 | if (tmpret >= 0) { | ||
327 | // successfully sent and received | ||
328 | if (status == DLM_CANCELGRANT) | ||
329 | ret = DLM_NORMAL; | ||
330 | else if (status == DLM_FORWARD) { | ||
331 | mlog(0, "master was in-progress. retry\n"); | ||
332 | ret = DLM_FORWARD; | ||
333 | } else | ||
334 | ret = status; | ||
335 | lksb->status = status; | ||
336 | } else { | ||
337 | mlog_errno(tmpret); | ||
338 | if (dlm_is_host_down(tmpret)) { | ||
339 | /* NOTE: this seems strange, but it is what we want. | ||
340 | * when the master goes down during a cancel or | ||
341 | * unlock, the recovery code completes the operation | ||
342 | * as if the master had not died, then passes the | ||
343 | * updated state to the recovery master. this thread | ||
344 | * just needs to finish out the operation and call | ||
345 | * the unlockast. */ | ||
346 | ret = DLM_NORMAL; | ||
347 | } else { | ||
348 | /* something bad. this will BUG in ocfs2 */ | ||
349 | ret = dlm_err_to_dlm_status(tmpret); | ||
350 | } | ||
351 | lksb->status = ret; | ||
352 | } | ||
353 | |||
354 | return ret; | ||
355 | } | ||
356 | |||
357 | /* | ||
358 | * locking: | ||
359 | * caller needs: none | ||
360 | * taken: takes and drops res->spinlock | ||
361 | * held on exit: none | ||
362 | * returns: DLM_NORMAL, DLM_BADARGS, DLM_IVLOCKID, | ||
363 | * return value from dlmunlock_master | ||
364 | */ | ||
365 | int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data) | ||
366 | { | ||
367 | struct dlm_ctxt *dlm = data; | ||
368 | struct dlm_unlock_lock *unlock = (struct dlm_unlock_lock *)msg->buf; | ||
369 | struct dlm_lock_resource *res = NULL; | ||
370 | struct list_head *iter; | ||
371 | struct dlm_lock *lock = NULL; | ||
372 | enum dlm_status status = DLM_NORMAL; | ||
373 | int found = 0, i; | ||
374 | struct dlm_lockstatus *lksb = NULL; | ||
375 | int ignore; | ||
376 | u32 flags; | ||
377 | struct list_head *queue; | ||
378 | |||
379 | flags = be32_to_cpu(unlock->flags); | ||
380 | |||
381 | if (flags & LKM_GET_LVB) { | ||
382 | mlog(ML_ERROR, "bad args! GET_LVB specified on unlock!\n"); | ||
383 | return DLM_BADARGS; | ||
384 | } | ||
385 | |||
386 | if ((flags & (LKM_PUT_LVB|LKM_CANCEL)) == (LKM_PUT_LVB|LKM_CANCEL)) { | ||
387 | mlog(ML_ERROR, "bad args! cannot modify lvb on a CANCEL " | ||
388 | "request!\n"); | ||
389 | return DLM_BADARGS; | ||
390 | } | ||
391 | |||
392 | if (unlock->namelen > DLM_LOCKID_NAME_MAX) { | ||
393 | mlog(ML_ERROR, "Invalid name length in unlock handler!\n"); | ||
394 | return DLM_IVBUFLEN; | ||
395 | } | ||
396 | |||
397 | if (!dlm_grab(dlm)) | ||
398 | return DLM_REJECTED; | ||
399 | |||
400 | mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), | ||
401 | "Domain %s not fully joined!\n", dlm->name); | ||
402 | |||
403 | mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : "none"); | ||
404 | |||
405 | res = dlm_lookup_lockres(dlm, unlock->name, unlock->namelen); | ||
406 | if (!res) { | ||
407 | /* We assume here that a no lock resource simply means | ||
408 | * it was migrated away and destroyed before the other | ||
409 | * node could detect it. */ | ||
410 | mlog(0, "returning DLM_FORWARD -- res no longer exists\n"); | ||
411 | status = DLM_FORWARD; | ||
412 | goto not_found; | ||
413 | } | ||
414 | |||
415 | queue=&res->granted; | ||
416 | found = 0; | ||
417 | spin_lock(&res->spinlock); | ||
418 | if (res->state & DLM_LOCK_RES_RECOVERING) { | ||
419 | spin_unlock(&res->spinlock); | ||
420 | mlog(0, "returning DLM_RECOVERING\n"); | ||
421 | status = DLM_RECOVERING; | ||
422 | goto leave; | ||
423 | } | ||
424 | |||
425 | if (res->state & DLM_LOCK_RES_MIGRATING) { | ||
426 | spin_unlock(&res->spinlock); | ||
427 | mlog(0, "returning DLM_MIGRATING\n"); | ||
428 | status = DLM_MIGRATING; | ||
429 | goto leave; | ||
430 | } | ||
431 | |||
432 | if (res->owner != dlm->node_num) { | ||
433 | spin_unlock(&res->spinlock); | ||
434 | mlog(0, "returning DLM_FORWARD -- not master\n"); | ||
435 | status = DLM_FORWARD; | ||
436 | goto leave; | ||
437 | } | ||
438 | |||
439 | for (i=0; i<3; i++) { | ||
440 | list_for_each(iter, queue) { | ||
441 | lock = list_entry(iter, struct dlm_lock, list); | ||
442 | if (lock->ml.cookie == unlock->cookie && | ||
443 | lock->ml.node == unlock->node_idx) { | ||
444 | dlm_lock_get(lock); | ||
445 | found = 1; | ||
446 | break; | ||
447 | } | ||
448 | } | ||
449 | if (found) | ||
450 | break; | ||
451 | /* scan granted -> converting -> blocked queues */ | ||
452 | queue++; | ||
453 | } | ||
454 | spin_unlock(&res->spinlock); | ||
455 | if (!found) { | ||
456 | status = DLM_IVLOCKID; | ||
457 | goto not_found; | ||
458 | } | ||
459 | |||
460 | /* lock was found on queue */ | ||
461 | lksb = lock->lksb; | ||
462 | /* unlockast only called on originating node */ | ||
463 | if (flags & LKM_PUT_LVB) { | ||
464 | lksb->flags |= DLM_LKSB_PUT_LVB; | ||
465 | memcpy(&lksb->lvb[0], &unlock->lvb[0], DLM_LVB_LEN); | ||
466 | } | ||
467 | |||
468 | /* if this is in-progress, propagate the DLM_FORWARD | ||
469 | * all the way back out */ | ||
470 | status = dlmunlock_master(dlm, res, lock, lksb, flags, &ignore); | ||
471 | if (status == DLM_FORWARD) | ||
472 | mlog(0, "lockres is in progress\n"); | ||
473 | |||
474 | if (flags & LKM_PUT_LVB) | ||
475 | lksb->flags &= ~DLM_LKSB_PUT_LVB; | ||
476 | |||
477 | dlm_lockres_calc_usage(dlm, res); | ||
478 | dlm_kick_thread(dlm, res); | ||
479 | |||
480 | not_found: | ||
481 | if (!found) | ||
482 | mlog(ML_ERROR, "failed to find lock to unlock! " | ||
483 | "cookie=%"MLFu64"\n", | ||
484 | unlock->cookie); | ||
485 | else { | ||
486 | /* send the lksb->status back to the other node */ | ||
487 | status = lksb->status; | ||
488 | dlm_lock_put(lock); | ||
489 | } | ||
490 | |||
491 | leave: | ||
492 | if (res) | ||
493 | dlm_lockres_put(res); | ||
494 | |||
495 | dlm_put(dlm); | ||
496 | |||
497 | return status; | ||
498 | } | ||
499 | |||
500 | |||
501 | static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm, | ||
502 | struct dlm_lock_resource *res, | ||
503 | struct dlm_lock *lock, | ||
504 | struct dlm_lockstatus *lksb, | ||
505 | int *actions) | ||
506 | { | ||
507 | enum dlm_status status; | ||
508 | |||
509 | if (dlm_lock_on_list(&res->blocked, lock)) { | ||
510 | /* cancel this outright */ | ||
511 | lksb->status = DLM_NORMAL; | ||
512 | status = DLM_NORMAL; | ||
513 | *actions = (DLM_UNLOCK_CALL_AST | | ||
514 | DLM_UNLOCK_REMOVE_LOCK); | ||
515 | } else if (dlm_lock_on_list(&res->converting, lock)) { | ||
516 | /* cancel the request, put back on granted */ | ||
517 | lksb->status = DLM_NORMAL; | ||
518 | status = DLM_NORMAL; | ||
519 | *actions = (DLM_UNLOCK_CALL_AST | | ||
520 | DLM_UNLOCK_REMOVE_LOCK | | ||
521 | DLM_UNLOCK_REGRANT_LOCK | | ||
522 | DLM_UNLOCK_CLEAR_CONVERT_TYPE); | ||
523 | } else if (dlm_lock_on_list(&res->granted, lock)) { | ||
524 | /* too late, already granted. DLM_CANCELGRANT */ | ||
525 | lksb->status = DLM_CANCELGRANT; | ||
526 | status = DLM_NORMAL; | ||
527 | *actions = DLM_UNLOCK_CALL_AST; | ||
528 | } else { | ||
529 | mlog(ML_ERROR, "lock to cancel is not on any list!\n"); | ||
530 | lksb->status = DLM_IVLOCKID; | ||
531 | status = DLM_IVLOCKID; | ||
532 | *actions = 0; | ||
533 | } | ||
534 | return status; | ||
535 | } | ||
536 | |||
537 | static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm, | ||
538 | struct dlm_lock_resource *res, | ||
539 | struct dlm_lock *lock, | ||
540 | struct dlm_lockstatus *lksb, | ||
541 | int *actions) | ||
542 | { | ||
543 | enum dlm_status status; | ||
544 | |||
545 | /* unlock request */ | ||
546 | if (!dlm_lock_on_list(&res->granted, lock)) { | ||
547 | lksb->status = DLM_DENIED; | ||
548 | status = DLM_DENIED; | ||
549 | dlm_error(status); | ||
550 | *actions = 0; | ||
551 | } else { | ||
552 | /* unlock granted lock */ | ||
553 | lksb->status = DLM_NORMAL; | ||
554 | status = DLM_NORMAL; | ||
555 | *actions = (DLM_UNLOCK_FREE_LOCK | | ||
556 | DLM_UNLOCK_CALL_AST | | ||
557 | DLM_UNLOCK_REMOVE_LOCK); | ||
558 | } | ||
559 | return status; | ||
560 | } | ||
561 | |||
562 | /* there seems to be no point in doing this async | ||
563 | * since (even for the remote case) there is really | ||
564 | * no work to queue up... so just do it and fire the | ||
565 | * unlockast by hand when done... */ | ||
566 | enum dlm_status dlmunlock(struct dlm_ctxt *dlm, struct dlm_lockstatus *lksb, | ||
567 | int flags, dlm_astunlockfunc_t *unlockast, void *data) | ||
568 | { | ||
569 | enum dlm_status status; | ||
570 | struct dlm_lock_resource *res; | ||
571 | struct dlm_lock *lock = NULL; | ||
572 | int call_ast, is_master; | ||
573 | |||
574 | mlog_entry_void(); | ||
575 | |||
576 | if (!lksb) { | ||
577 | dlm_error(DLM_BADARGS); | ||
578 | return DLM_BADARGS; | ||
579 | } | ||
580 | |||
581 | if (flags & ~(LKM_CANCEL | LKM_VALBLK | LKM_INVVALBLK)) { | ||
582 | dlm_error(DLM_BADPARAM); | ||
583 | return DLM_BADPARAM; | ||
584 | } | ||
585 | |||
586 | if ((flags & (LKM_VALBLK | LKM_CANCEL)) == (LKM_VALBLK | LKM_CANCEL)) { | ||
587 | mlog(0, "VALBLK given with CANCEL: ignoring VALBLK\n"); | ||
588 | flags &= ~LKM_VALBLK; | ||
589 | } | ||
590 | |||
591 | if (!lksb->lockid || !lksb->lockid->lockres) { | ||
592 | dlm_error(DLM_BADPARAM); | ||
593 | return DLM_BADPARAM; | ||
594 | } | ||
595 | |||
596 | lock = lksb->lockid; | ||
597 | BUG_ON(!lock); | ||
598 | dlm_lock_get(lock); | ||
599 | |||
600 | res = lock->lockres; | ||
601 | BUG_ON(!res); | ||
602 | dlm_lockres_get(res); | ||
603 | retry: | ||
604 | call_ast = 0; | ||
605 | /* need to retry up here because owner may have changed */ | ||
606 | mlog(0, "lock=%p res=%p\n", lock, res); | ||
607 | |||
608 | spin_lock(&res->spinlock); | ||
609 | is_master = (res->owner == dlm->node_num); | ||
610 | spin_unlock(&res->spinlock); | ||
611 | |||
612 | if (is_master) { | ||
613 | status = dlmunlock_master(dlm, res, lock, lksb, flags, | ||
614 | &call_ast); | ||
615 | mlog(0, "done calling dlmunlock_master: returned %d, " | ||
616 | "call_ast is %d\n", status, call_ast); | ||
617 | } else { | ||
618 | status = dlmunlock_remote(dlm, res, lock, lksb, flags, | ||
619 | &call_ast); | ||
620 | mlog(0, "done calling dlmunlock_remote: returned %d, " | ||
621 | "call_ast is %d\n", status, call_ast); | ||
622 | } | ||
623 | |||
624 | if (status == DLM_RECOVERING || | ||
625 | status == DLM_MIGRATING || | ||
626 | status == DLM_FORWARD) { | ||
627 | /* We want to go away for a tiny bit to allow recovery | ||
628 | * / migration to complete on this resource. I don't | ||
629 | * know of any wait queue we could sleep on as this | ||
630 | * may be happening on another node. Perhaps the | ||
631 | * proper solution is to queue up requests on the | ||
632 | * other end? */ | ||
633 | |||
634 | /* do we want to yield(); ?? */ | ||
635 | msleep(50); | ||
636 | |||
637 | mlog(0, "retrying unlock due to pending recovery/" | ||
638 | "migration/in-progress\n"); | ||
639 | goto retry; | ||
640 | } | ||
641 | |||
642 | if (call_ast) { | ||
643 | mlog(0, "calling unlockast(%p, %d)\n", data, lksb->status); | ||
644 | if (is_master) { | ||
645 | /* it is possible that there is one last bast | ||
646 | * pending. make sure it is flushed, then | ||
647 | * call the unlockast. | ||
648 | * not an issue if this is a mastered remotely, | ||
649 | * since this lock has been removed from the | ||
650 | * lockres queues and cannot be found. */ | ||
651 | dlm_kick_thread(dlm, NULL); | ||
652 | wait_event(dlm->ast_wq, | ||
653 | dlm_lock_basts_flushed(dlm, lock)); | ||
654 | } | ||
655 | (*unlockast)(data, lksb->status); | ||
656 | } | ||
657 | |||
658 | if (status == DLM_NORMAL) { | ||
659 | mlog(0, "kicking the thread\n"); | ||
660 | dlm_kick_thread(dlm, res); | ||
661 | } else | ||
662 | dlm_error(status); | ||
663 | |||
664 | dlm_lockres_calc_usage(dlm, res); | ||
665 | dlm_lockres_put(res); | ||
666 | dlm_lock_put(lock); | ||
667 | |||
668 | mlog(0, "returning status=%d!\n", status); | ||
669 | return status; | ||
670 | } | ||
671 | EXPORT_SYMBOL_GPL(dlmunlock); | ||
672 | |||
diff --git a/fs/ocfs2/dlm/dlmver.c b/fs/ocfs2/dlm/dlmver.c new file mode 100644 index 000000000000..7ef2653f8f41 --- /dev/null +++ b/fs/ocfs2/dlm/dlmver.c | |||
@@ -0,0 +1,42 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmver.c | ||
5 | * | ||
6 | * version string | ||
7 | * | ||
8 | * Copyright (C) 2002, 2005 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | */ | ||
25 | |||
26 | #include <linux/module.h> | ||
27 | #include <linux/kernel.h> | ||
28 | |||
29 | #include "dlmver.h" | ||
30 | |||
31 | #define DLM_BUILD_VERSION "1.3.3" | ||
32 | |||
33 | #define VERSION_STR "OCFS2 DLM " DLM_BUILD_VERSION | ||
34 | |||
35 | void dlm_print_version(void) | ||
36 | { | ||
37 | printk(KERN_INFO "%s\n", VERSION_STR); | ||
38 | } | ||
39 | |||
40 | MODULE_DESCRIPTION(VERSION_STR); | ||
41 | |||
42 | MODULE_VERSION(DLM_BUILD_VERSION); | ||
diff --git a/fs/ocfs2/dlm/dlmver.h b/fs/ocfs2/dlm/dlmver.h new file mode 100644 index 000000000000..f674aee77a16 --- /dev/null +++ b/fs/ocfs2/dlm/dlmver.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmfsver.h | ||
5 | * | ||
6 | * Function prototypes | ||
7 | * | ||
8 | * Copyright (C) 2005 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | */ | ||
25 | |||
26 | #ifndef DLM_VER_H | ||
27 | #define DLM_VER_H | ||
28 | |||
29 | void dlm_print_version(void); | ||
30 | |||
31 | #endif /* DLM_VER_H */ | ||