diff options
Diffstat (limited to 'fs/ocfs2/dlm')
-rw-r--r-- | fs/ocfs2/dlm/Makefile | 8 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmapi.h | 214 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmast.c | 466 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmcommon.h | 884 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmconvert.c | 530 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmconvert.h | 35 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmdebug.c | 246 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmdebug.h | 30 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmdomain.c | 1469 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmdomain.h | 36 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmfs.c | 640 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmfsver.c | 42 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmfsver.h | 31 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmlock.c | 676 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmmaster.c | 2664 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmrecovery.c | 2132 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmthread.c | 692 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmunlock.c | 672 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmver.c | 42 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmver.h | 31 | ||||
-rw-r--r-- | fs/ocfs2/dlm/userdlm.c | 658 | ||||
-rw-r--r-- | fs/ocfs2/dlm/userdlm.h | 111 |
22 files changed, 12309 insertions, 0 deletions
diff --git a/fs/ocfs2/dlm/Makefile b/fs/ocfs2/dlm/Makefile new file mode 100644 index 000000000000..ce3f7c29d270 --- /dev/null +++ b/fs/ocfs2/dlm/Makefile | |||
@@ -0,0 +1,8 @@ | |||
1 | EXTRA_CFLAGS += -Ifs/ocfs2 | ||
2 | |||
3 | obj-$(CONFIG_OCFS2_FS) += ocfs2_dlm.o ocfs2_dlmfs.o | ||
4 | |||
5 | ocfs2_dlm-objs := dlmdomain.o dlmdebug.o dlmthread.o dlmrecovery.o \ | ||
6 | dlmmaster.o dlmast.o dlmconvert.o dlmlock.o dlmunlock.o dlmver.o | ||
7 | |||
8 | ocfs2_dlmfs-objs := userdlm.o dlmfs.o dlmfsver.o | ||
diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h new file mode 100644 index 000000000000..53652f51c0e1 --- /dev/null +++ b/fs/ocfs2/dlm/dlmapi.h | |||
@@ -0,0 +1,214 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmapi.h | ||
5 | * | ||
6 | * externally exported dlm interfaces | ||
7 | * | ||
8 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #ifndef DLMAPI_H | ||
28 | #define DLMAPI_H | ||
29 | |||
30 | struct dlm_lock; | ||
31 | struct dlm_ctxt; | ||
32 | |||
33 | /* NOTE: changes made to this enum should be reflected in dlmdebug.c */ | ||
34 | enum dlm_status { | ||
35 | DLM_NORMAL = 0, /* 0: request in progress */ | ||
36 | DLM_GRANTED, /* 1: request granted */ | ||
37 | DLM_DENIED, /* 2: request denied */ | ||
38 | DLM_DENIED_NOLOCKS, /* 3: request denied, out of system resources */ | ||
39 | DLM_WORKING, /* 4: async request in progress */ | ||
40 | DLM_BLOCKED, /* 5: lock request blocked */ | ||
41 | DLM_BLOCKED_ORPHAN, /* 6: lock request blocked by a orphan lock*/ | ||
42 | DLM_DENIED_GRACE_PERIOD, /* 7: topological change in progress */ | ||
43 | DLM_SYSERR, /* 8: system error */ | ||
44 | DLM_NOSUPPORT, /* 9: unsupported */ | ||
45 | DLM_CANCELGRANT, /* 10: can't cancel convert: already granted */ | ||
46 | DLM_IVLOCKID, /* 11: bad lockid */ | ||
47 | DLM_SYNC, /* 12: synchronous request granted */ | ||
48 | DLM_BADTYPE, /* 13: bad resource type */ | ||
49 | DLM_BADRESOURCE, /* 14: bad resource handle */ | ||
50 | DLM_MAXHANDLES, /* 15: no more resource handles */ | ||
51 | DLM_NOCLINFO, /* 16: can't contact cluster manager */ | ||
52 | DLM_NOLOCKMGR, /* 17: can't contact lock manager */ | ||
53 | DLM_NOPURGED, /* 18: can't contact purge daemon */ | ||
54 | DLM_BADARGS, /* 19: bad api args */ | ||
55 | DLM_VOID, /* 20: no status */ | ||
56 | DLM_NOTQUEUED, /* 21: NOQUEUE was specified and request failed */ | ||
57 | DLM_IVBUFLEN, /* 22: invalid resource name length */ | ||
58 | DLM_CVTUNGRANT, /* 23: attempted to convert ungranted lock */ | ||
59 | DLM_BADPARAM, /* 24: invalid lock mode specified */ | ||
60 | DLM_VALNOTVALID, /* 25: value block has been invalidated */ | ||
61 | DLM_REJECTED, /* 26: request rejected, unrecognized client */ | ||
62 | DLM_ABORT, /* 27: blocked lock request cancelled */ | ||
63 | DLM_CANCEL, /* 28: conversion request cancelled */ | ||
64 | DLM_IVRESHANDLE, /* 29: invalid resource handle */ | ||
65 | DLM_DEADLOCK, /* 30: deadlock recovery refused this request */ | ||
66 | DLM_DENIED_NOASTS, /* 31: failed to allocate AST */ | ||
67 | DLM_FORWARD, /* 32: request must wait for primary's response */ | ||
68 | DLM_TIMEOUT, /* 33: timeout value for lock has expired */ | ||
69 | DLM_IVGROUPID, /* 34: invalid group specification */ | ||
70 | DLM_VERS_CONFLICT, /* 35: version conflicts prevent request handling */ | ||
71 | DLM_BAD_DEVICE_PATH, /* 36: Locks device does not exist or path wrong */ | ||
72 | DLM_NO_DEVICE_PERMISSION, /* 37: Client has insufficient pers for device */ | ||
73 | DLM_NO_CONTROL_DEVICE, /* 38: Cannot set options on opened device */ | ||
74 | |||
75 | DLM_RECOVERING, /* 39: extension, allows caller to fail a lock | ||
76 | request if it is being recovered */ | ||
77 | DLM_MIGRATING, /* 40: extension, allows caller to fail a lock | ||
78 | request if it is being migrated */ | ||
79 | DLM_MAXSTATS, /* 41: upper limit for return code validation */ | ||
80 | }; | ||
81 | |||
82 | /* for pretty-printing dlm_status error messages */ | ||
83 | const char *dlm_errmsg(enum dlm_status err); | ||
84 | /* for pretty-printing dlm_status error names */ | ||
85 | const char *dlm_errname(enum dlm_status err); | ||
86 | |||
87 | /* Eventually the DLM will use standard errno values, but in the | ||
88 | * meantime this lets us track dlm errors as they bubble up. When we | ||
89 | * bring its error reporting into line with the rest of the stack, | ||
90 | * these can just be replaced with calls to mlog_errno. */ | ||
91 | #define dlm_error(st) do { \ | ||
92 | if ((st) != DLM_RECOVERING && \ | ||
93 | (st) != DLM_MIGRATING && \ | ||
94 | (st) != DLM_FORWARD) \ | ||
95 | mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \ | ||
96 | } while (0) | ||
97 | |||
98 | #define DLM_LKSB_UNUSED1 0x01 | ||
99 | #define DLM_LKSB_PUT_LVB 0x02 | ||
100 | #define DLM_LKSB_GET_LVB 0x04 | ||
101 | #define DLM_LKSB_UNUSED2 0x08 | ||
102 | #define DLM_LKSB_UNUSED3 0x10 | ||
103 | #define DLM_LKSB_UNUSED4 0x20 | ||
104 | #define DLM_LKSB_UNUSED5 0x40 | ||
105 | #define DLM_LKSB_UNUSED6 0x80 | ||
106 | |||
107 | #define DLM_LVB_LEN 64 | ||
108 | |||
109 | /* Callers are only allowed access to the lvb and status members of | ||
110 | * this struct. */ | ||
111 | struct dlm_lockstatus { | ||
112 | enum dlm_status status; | ||
113 | u32 flags; | ||
114 | struct dlm_lock *lockid; | ||
115 | char lvb[DLM_LVB_LEN]; | ||
116 | }; | ||
117 | |||
118 | /* Valid lock modes. */ | ||
119 | #define LKM_IVMODE (-1) /* invalid mode */ | ||
120 | #define LKM_NLMODE 0 /* null lock */ | ||
121 | #define LKM_CRMODE 1 /* concurrent read unsupported */ | ||
122 | #define LKM_CWMODE 2 /* concurrent write unsupported */ | ||
123 | #define LKM_PRMODE 3 /* protected read */ | ||
124 | #define LKM_PWMODE 4 /* protected write unsupported */ | ||
125 | #define LKM_EXMODE 5 /* exclusive */ | ||
126 | #define LKM_MAXMODE 5 | ||
127 | #define LKM_MODEMASK 0xff | ||
128 | |||
129 | /* Flags passed to dlmlock and dlmunlock: | ||
130 | * reserved: flags used by the "real" dlm | ||
131 | * only a few are supported by this dlm | ||
132 | * (U) = unsupported by ocfs2 dlm */ | ||
133 | #define LKM_ORPHAN 0x00000010 /* this lock is orphanable (U) */ | ||
134 | #define LKM_PARENTABLE 0x00000020 /* this lock was orphaned (U) */ | ||
135 | #define LKM_BLOCK 0x00000040 /* blocking lock request (U) */ | ||
136 | #define LKM_LOCAL 0x00000080 /* local lock request */ | ||
137 | #define LKM_VALBLK 0x00000100 /* lock value block request */ | ||
138 | #define LKM_NOQUEUE 0x00000200 /* non blocking request */ | ||
139 | #define LKM_CONVERT 0x00000400 /* conversion request */ | ||
140 | #define LKM_NODLCKWT 0x00000800 /* this lock wont deadlock (U) */ | ||
141 | #define LKM_UNLOCK 0x00001000 /* deallocate this lock */ | ||
142 | #define LKM_CANCEL 0x00002000 /* cancel conversion request */ | ||
143 | #define LKM_DEQALL 0x00004000 /* remove all locks held by proc (U) */ | ||
144 | #define LKM_INVVALBLK 0x00008000 /* invalidate lock value block */ | ||
145 | #define LKM_SYNCSTS 0x00010000 /* return synchronous status if poss (U) */ | ||
146 | #define LKM_TIMEOUT 0x00020000 /* lock request contains timeout (U) */ | ||
147 | #define LKM_SNGLDLCK 0x00040000 /* request can self-deadlock (U) */ | ||
148 | #define LKM_FINDLOCAL 0x00080000 /* find local lock request (U) */ | ||
149 | #define LKM_PROC_OWNED 0x00100000 /* owned by process, not group (U) */ | ||
150 | #define LKM_XID 0x00200000 /* use transaction id for deadlock (U) */ | ||
151 | #define LKM_XID_CONFLICT 0x00400000 /* do not allow lock inheritance (U) */ | ||
152 | #define LKM_FORCE 0x00800000 /* force unlock flag */ | ||
153 | #define LKM_REVVALBLK 0x01000000 /* temporary solution: re-validate | ||
154 | lock value block (U) */ | ||
155 | /* unused */ | ||
156 | #define LKM_UNUSED1 0x00000001 /* unused */ | ||
157 | #define LKM_UNUSED2 0x00000002 /* unused */ | ||
158 | #define LKM_UNUSED3 0x00000004 /* unused */ | ||
159 | #define LKM_UNUSED4 0x00000008 /* unused */ | ||
160 | #define LKM_UNUSED5 0x02000000 /* unused */ | ||
161 | #define LKM_UNUSED6 0x04000000 /* unused */ | ||
162 | #define LKM_UNUSED7 0x08000000 /* unused */ | ||
163 | |||
164 | /* ocfs2 extensions: internal only | ||
165 | * should never be used by caller */ | ||
166 | #define LKM_MIGRATION 0x10000000 /* extension: lockres is to be migrated | ||
167 | to another node */ | ||
168 | #define LKM_PUT_LVB 0x20000000 /* extension: lvb is being passed | ||
169 | should be applied to lockres */ | ||
170 | #define LKM_GET_LVB 0x40000000 /* extension: lvb should be copied | ||
171 | from lockres when lock is granted */ | ||
172 | #define LKM_RECOVERY 0x80000000 /* extension: flag for recovery lock | ||
173 | used to avoid recovery rwsem */ | ||
174 | |||
175 | |||
176 | typedef void (dlm_astlockfunc_t)(void *); | ||
177 | typedef void (dlm_bastlockfunc_t)(void *, int); | ||
178 | typedef void (dlm_astunlockfunc_t)(void *, enum dlm_status); | ||
179 | |||
180 | enum dlm_status dlmlock(struct dlm_ctxt *dlm, | ||
181 | int mode, | ||
182 | struct dlm_lockstatus *lksb, | ||
183 | int flags, | ||
184 | const char *name, | ||
185 | dlm_astlockfunc_t *ast, | ||
186 | void *data, | ||
187 | dlm_bastlockfunc_t *bast); | ||
188 | |||
189 | enum dlm_status dlmunlock(struct dlm_ctxt *dlm, | ||
190 | struct dlm_lockstatus *lksb, | ||
191 | int flags, | ||
192 | dlm_astunlockfunc_t *unlockast, | ||
193 | void *data); | ||
194 | |||
195 | struct dlm_ctxt * dlm_register_domain(const char *domain, u32 key); | ||
196 | |||
197 | void dlm_unregister_domain(struct dlm_ctxt *dlm); | ||
198 | |||
199 | void dlm_print_one_lock(struct dlm_lock *lockid); | ||
200 | |||
201 | typedef void (dlm_eviction_func)(int, void *); | ||
202 | struct dlm_eviction_cb { | ||
203 | struct list_head ec_item; | ||
204 | dlm_eviction_func *ec_func; | ||
205 | void *ec_data; | ||
206 | }; | ||
207 | void dlm_setup_eviction_cb(struct dlm_eviction_cb *cb, | ||
208 | dlm_eviction_func *f, | ||
209 | void *data); | ||
210 | void dlm_register_eviction_cb(struct dlm_ctxt *dlm, | ||
211 | struct dlm_eviction_cb *cb); | ||
212 | void dlm_unregister_eviction_cb(struct dlm_eviction_cb *cb); | ||
213 | |||
214 | #endif /* DLMAPI_H */ | ||
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c new file mode 100644 index 000000000000..8d17d28ef91c --- /dev/null +++ b/fs/ocfs2/dlm/dlmast.c | |||
@@ -0,0 +1,466 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmast.c | ||
5 | * | ||
6 | * AST and BAST functionality for local and remote nodes | ||
7 | * | ||
8 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | |||
28 | #include <linux/module.h> | ||
29 | #include <linux/fs.h> | ||
30 | #include <linux/types.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/utsname.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/sysctl.h> | ||
36 | #include <linux/random.h> | ||
37 | #include <linux/blkdev.h> | ||
38 | #include <linux/socket.h> | ||
39 | #include <linux/inet.h> | ||
40 | #include <linux/spinlock.h> | ||
41 | |||
42 | |||
43 | #include "cluster/heartbeat.h" | ||
44 | #include "cluster/nodemanager.h" | ||
45 | #include "cluster/tcp.h" | ||
46 | #include "cluster/endian.h" | ||
47 | |||
48 | #include "dlmapi.h" | ||
49 | #include "dlmcommon.h" | ||
50 | |||
51 | #define MLOG_MASK_PREFIX ML_DLM | ||
52 | #include "cluster/masklog.h" | ||
53 | |||
54 | static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | ||
55 | struct dlm_lock *lock); | ||
56 | static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); | ||
57 | |||
58 | /* Should be called as an ast gets queued to see if the new | ||
59 | * lock level will obsolete a pending bast. | ||
60 | * For example, if dlm_thread queued a bast for an EX lock that | ||
61 | * was blocking another EX, but before sending the bast the | ||
62 | * lock owner downconverted to NL, the bast is now obsolete. | ||
63 | * Only the ast should be sent. | ||
64 | * This is needed because the lock and convert paths can queue | ||
65 | * asts out-of-band (not waiting for dlm_thread) in order to | ||
66 | * allow for LKM_NOQUEUE to get immediate responses. */ | ||
67 | static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | ||
68 | { | ||
69 | assert_spin_locked(&dlm->ast_lock); | ||
70 | assert_spin_locked(&lock->spinlock); | ||
71 | |||
72 | if (lock->ml.highest_blocked == LKM_IVMODE) | ||
73 | return 0; | ||
74 | BUG_ON(lock->ml.highest_blocked == LKM_NLMODE); | ||
75 | |||
76 | if (lock->bast_pending && | ||
77 | list_empty(&lock->bast_list)) | ||
78 | /* old bast already sent, ok */ | ||
79 | return 0; | ||
80 | |||
81 | if (lock->ml.type == LKM_EXMODE) | ||
82 | /* EX blocks anything left, any bast still valid */ | ||
83 | return 0; | ||
84 | else if (lock->ml.type == LKM_NLMODE) | ||
85 | /* NL blocks nothing, no reason to send any bast, cancel it */ | ||
86 | return 1; | ||
87 | else if (lock->ml.highest_blocked != LKM_EXMODE) | ||
88 | /* PR only blocks EX */ | ||
89 | return 1; | ||
90 | |||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | ||
95 | { | ||
96 | mlog_entry_void(); | ||
97 | |||
98 | BUG_ON(!dlm); | ||
99 | BUG_ON(!lock); | ||
100 | |||
101 | assert_spin_locked(&dlm->ast_lock); | ||
102 | if (!list_empty(&lock->ast_list)) { | ||
103 | mlog(ML_ERROR, "ast list not empty!! pending=%d, newlevel=%d\n", | ||
104 | lock->ast_pending, lock->ml.type); | ||
105 | BUG(); | ||
106 | } | ||
107 | BUG_ON(!list_empty(&lock->ast_list)); | ||
108 | if (lock->ast_pending) | ||
109 | mlog(0, "lock has an ast getting flushed right now\n"); | ||
110 | |||
111 | /* putting lock on list, add a ref */ | ||
112 | dlm_lock_get(lock); | ||
113 | spin_lock(&lock->spinlock); | ||
114 | |||
115 | /* check to see if this ast obsoletes the bast */ | ||
116 | if (dlm_should_cancel_bast(dlm, lock)) { | ||
117 | struct dlm_lock_resource *res = lock->lockres; | ||
118 | mlog(0, "%s: cancelling bast for %.*s\n", | ||
119 | dlm->name, res->lockname.len, res->lockname.name); | ||
120 | lock->bast_pending = 0; | ||
121 | list_del_init(&lock->bast_list); | ||
122 | lock->ml.highest_blocked = LKM_IVMODE; | ||
123 | /* removing lock from list, remove a ref. guaranteed | ||
124 | * this won't be the last ref because of the get above, | ||
125 | * so res->spinlock will not be taken here */ | ||
126 | dlm_lock_put(lock); | ||
127 | /* free up the reserved bast that we are cancelling. | ||
128 | * guaranteed that this will not be the last reserved | ||
129 | * ast because *both* an ast and a bast were reserved | ||
130 | * to get to this point. the res->spinlock will not be | ||
131 | * taken here */ | ||
132 | dlm_lockres_release_ast(dlm, res); | ||
133 | } | ||
134 | list_add_tail(&lock->ast_list, &dlm->pending_asts); | ||
135 | lock->ast_pending = 1; | ||
136 | spin_unlock(&lock->spinlock); | ||
137 | } | ||
138 | |||
139 | void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | ||
140 | { | ||
141 | mlog_entry_void(); | ||
142 | |||
143 | BUG_ON(!dlm); | ||
144 | BUG_ON(!lock); | ||
145 | |||
146 | spin_lock(&dlm->ast_lock); | ||
147 | __dlm_queue_ast(dlm, lock); | ||
148 | spin_unlock(&dlm->ast_lock); | ||
149 | } | ||
150 | |||
151 | |||
152 | static void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | ||
153 | { | ||
154 | mlog_entry_void(); | ||
155 | |||
156 | BUG_ON(!dlm); | ||
157 | BUG_ON(!lock); | ||
158 | assert_spin_locked(&dlm->ast_lock); | ||
159 | |||
160 | BUG_ON(!list_empty(&lock->bast_list)); | ||
161 | if (lock->bast_pending) | ||
162 | mlog(0, "lock has a bast getting flushed right now\n"); | ||
163 | |||
164 | /* putting lock on list, add a ref */ | ||
165 | dlm_lock_get(lock); | ||
166 | spin_lock(&lock->spinlock); | ||
167 | list_add_tail(&lock->bast_list, &dlm->pending_basts); | ||
168 | lock->bast_pending = 1; | ||
169 | spin_unlock(&lock->spinlock); | ||
170 | } | ||
171 | |||
172 | void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | ||
173 | { | ||
174 | mlog_entry_void(); | ||
175 | |||
176 | BUG_ON(!dlm); | ||
177 | BUG_ON(!lock); | ||
178 | |||
179 | spin_lock(&dlm->ast_lock); | ||
180 | __dlm_queue_bast(dlm, lock); | ||
181 | spin_unlock(&dlm->ast_lock); | ||
182 | } | ||
183 | |||
184 | static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | ||
185 | struct dlm_lock *lock) | ||
186 | { | ||
187 | struct dlm_lockstatus *lksb = lock->lksb; | ||
188 | BUG_ON(!lksb); | ||
189 | |||
190 | /* only updates if this node masters the lockres */ | ||
191 | if (res->owner == dlm->node_num) { | ||
192 | |||
193 | spin_lock(&res->spinlock); | ||
194 | /* check the lksb flags for the direction */ | ||
195 | if (lksb->flags & DLM_LKSB_GET_LVB) { | ||
196 | mlog(0, "getting lvb from lockres for %s node\n", | ||
197 | lock->ml.node == dlm->node_num ? "master" : | ||
198 | "remote"); | ||
199 | memcpy(lksb->lvb, res->lvb, DLM_LVB_LEN); | ||
200 | } else if (lksb->flags & DLM_LKSB_PUT_LVB) { | ||
201 | mlog(0, "setting lvb from lockres for %s node\n", | ||
202 | lock->ml.node == dlm->node_num ? "master" : | ||
203 | "remote"); | ||
204 | memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN); | ||
205 | } | ||
206 | spin_unlock(&res->spinlock); | ||
207 | } | ||
208 | |||
209 | /* reset any lvb flags on the lksb */ | ||
210 | lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB); | ||
211 | } | ||
212 | |||
213 | void dlm_do_local_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | ||
214 | struct dlm_lock *lock) | ||
215 | { | ||
216 | dlm_astlockfunc_t *fn; | ||
217 | struct dlm_lockstatus *lksb; | ||
218 | |||
219 | mlog_entry_void(); | ||
220 | |||
221 | lksb = lock->lksb; | ||
222 | fn = lock->ast; | ||
223 | BUG_ON(lock->ml.node != dlm->node_num); | ||
224 | |||
225 | dlm_update_lvb(dlm, res, lock); | ||
226 | (*fn)(lock->astdata); | ||
227 | } | ||
228 | |||
229 | |||
230 | int dlm_do_remote_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | ||
231 | struct dlm_lock *lock) | ||
232 | { | ||
233 | int ret; | ||
234 | struct dlm_lockstatus *lksb; | ||
235 | int lksbflags; | ||
236 | |||
237 | mlog_entry_void(); | ||
238 | |||
239 | lksb = lock->lksb; | ||
240 | BUG_ON(lock->ml.node == dlm->node_num); | ||
241 | |||
242 | lksbflags = lksb->flags; | ||
243 | dlm_update_lvb(dlm, res, lock); | ||
244 | |||
245 | /* lock request came from another node | ||
246 | * go do the ast over there */ | ||
247 | ret = dlm_send_proxy_ast(dlm, res, lock, lksbflags); | ||
248 | return ret; | ||
249 | } | ||
250 | |||
251 | void dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | ||
252 | struct dlm_lock *lock, int blocked_type) | ||
253 | { | ||
254 | dlm_bastlockfunc_t *fn = lock->bast; | ||
255 | |||
256 | mlog_entry_void(); | ||
257 | BUG_ON(lock->ml.node != dlm->node_num); | ||
258 | |||
259 | (*fn)(lock->astdata, blocked_type); | ||
260 | } | ||
261 | |||
262 | |||
263 | |||
264 | int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data) | ||
265 | { | ||
266 | int ret; | ||
267 | unsigned int locklen; | ||
268 | struct dlm_ctxt *dlm = data; | ||
269 | struct dlm_lock_resource *res = NULL; | ||
270 | struct dlm_lock *lock = NULL; | ||
271 | struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf; | ||
272 | char *name; | ||
273 | struct list_head *iter, *head=NULL; | ||
274 | u64 cookie; | ||
275 | u32 flags; | ||
276 | |||
277 | if (!dlm_grab(dlm)) { | ||
278 | dlm_error(DLM_REJECTED); | ||
279 | return DLM_REJECTED; | ||
280 | } | ||
281 | |||
282 | mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), | ||
283 | "Domain %s not fully joined!\n", dlm->name); | ||
284 | |||
285 | name = past->name; | ||
286 | locklen = past->namelen; | ||
287 | cookie = be64_to_cpu(past->cookie); | ||
288 | flags = be32_to_cpu(past->flags); | ||
289 | |||
290 | if (locklen > DLM_LOCKID_NAME_MAX) { | ||
291 | ret = DLM_IVBUFLEN; | ||
292 | mlog(ML_ERROR, "Invalid name length in proxy ast handler!\n"); | ||
293 | goto leave; | ||
294 | } | ||
295 | |||
296 | if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) == | ||
297 | (LKM_PUT_LVB|LKM_GET_LVB)) { | ||
298 | mlog(ML_ERROR, "both PUT and GET lvb specified\n"); | ||
299 | ret = DLM_BADARGS; | ||
300 | goto leave; | ||
301 | } | ||
302 | |||
303 | mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : | ||
304 | (flags & LKM_GET_LVB ? "get lvb" : "none")); | ||
305 | |||
306 | mlog(0, "type=%d, blocked_type=%d\n", past->type, past->blocked_type); | ||
307 | |||
308 | if (past->type != DLM_AST && | ||
309 | past->type != DLM_BAST) { | ||
310 | mlog(ML_ERROR, "Unknown ast type! %d, cookie=%"MLFu64", " | ||
311 | "name=%.*s\n", past->type, cookie, locklen, name); | ||
312 | ret = DLM_IVLOCKID; | ||
313 | goto leave; | ||
314 | } | ||
315 | |||
316 | res = dlm_lookup_lockres(dlm, name, locklen); | ||
317 | if (!res) { | ||
318 | mlog(ML_ERROR, "got %sast for unknown lockres! " | ||
319 | "cookie=%"MLFu64", name=%.*s, namelen=%u\n", | ||
320 | past->type == DLM_AST ? "" : "b", | ||
321 | cookie, locklen, name, locklen); | ||
322 | ret = DLM_IVLOCKID; | ||
323 | goto leave; | ||
324 | } | ||
325 | |||
326 | /* cannot get a proxy ast message if this node owns it */ | ||
327 | BUG_ON(res->owner == dlm->node_num); | ||
328 | |||
329 | mlog(0, "lockres %.*s\n", res->lockname.len, res->lockname.name); | ||
330 | |||
331 | spin_lock(&res->spinlock); | ||
332 | if (res->state & DLM_LOCK_RES_RECOVERING) { | ||
333 | mlog(0, "responding with DLM_RECOVERING!\n"); | ||
334 | ret = DLM_RECOVERING; | ||
335 | goto unlock_out; | ||
336 | } | ||
337 | if (res->state & DLM_LOCK_RES_MIGRATING) { | ||
338 | mlog(0, "responding with DLM_MIGRATING!\n"); | ||
339 | ret = DLM_MIGRATING; | ||
340 | goto unlock_out; | ||
341 | } | ||
342 | /* try convert queue for both ast/bast */ | ||
343 | head = &res->converting; | ||
344 | lock = NULL; | ||
345 | list_for_each(iter, head) { | ||
346 | lock = list_entry (iter, struct dlm_lock, list); | ||
347 | if (be64_to_cpu(lock->ml.cookie) == cookie) | ||
348 | goto do_ast; | ||
349 | } | ||
350 | |||
351 | /* if not on convert, try blocked for ast, granted for bast */ | ||
352 | if (past->type == DLM_AST) | ||
353 | head = &res->blocked; | ||
354 | else | ||
355 | head = &res->granted; | ||
356 | |||
357 | list_for_each(iter, head) { | ||
358 | lock = list_entry (iter, struct dlm_lock, list); | ||
359 | if (be64_to_cpu(lock->ml.cookie) == cookie) | ||
360 | goto do_ast; | ||
361 | } | ||
362 | |||
363 | mlog(ML_ERROR, "got %sast for unknown lock! cookie=%"MLFu64", " | ||
364 | "name=%.*s, namelen=%u\n", | ||
365 | past->type == DLM_AST ? "" : "b", cookie, locklen, name, locklen); | ||
366 | |||
367 | ret = DLM_NORMAL; | ||
368 | unlock_out: | ||
369 | spin_unlock(&res->spinlock); | ||
370 | goto leave; | ||
371 | |||
372 | do_ast: | ||
373 | ret = DLM_NORMAL; | ||
374 | if (past->type == DLM_AST) { | ||
375 | /* do not alter lock refcount. switching lists. */ | ||
376 | list_del_init(&lock->list); | ||
377 | list_add_tail(&lock->list, &res->granted); | ||
378 | mlog(0, "ast: adding to granted list... type=%d, " | ||
379 | "convert_type=%d\n", lock->ml.type, lock->ml.convert_type); | ||
380 | if (lock->ml.convert_type != LKM_IVMODE) { | ||
381 | lock->ml.type = lock->ml.convert_type; | ||
382 | lock->ml.convert_type = LKM_IVMODE; | ||
383 | } else { | ||
384 | // should already be there.... | ||
385 | } | ||
386 | |||
387 | lock->lksb->status = DLM_NORMAL; | ||
388 | |||
389 | /* if we requested the lvb, fetch it into our lksb now */ | ||
390 | if (flags & LKM_GET_LVB) { | ||
391 | BUG_ON(!(lock->lksb->flags & DLM_LKSB_GET_LVB)); | ||
392 | memcpy(lock->lksb->lvb, past->lvb, DLM_LVB_LEN); | ||
393 | } | ||
394 | } | ||
395 | spin_unlock(&res->spinlock); | ||
396 | |||
397 | if (past->type == DLM_AST) | ||
398 | dlm_do_local_ast(dlm, res, lock); | ||
399 | else | ||
400 | dlm_do_local_bast(dlm, res, lock, past->blocked_type); | ||
401 | |||
402 | leave: | ||
403 | |||
404 | if (res) | ||
405 | dlm_lockres_put(res); | ||
406 | |||
407 | dlm_put(dlm); | ||
408 | return ret; | ||
409 | } | ||
410 | |||
411 | |||
412 | |||
413 | int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | ||
414 | struct dlm_lock *lock, int msg_type, | ||
415 | int blocked_type, int flags) | ||
416 | { | ||
417 | int ret = 0; | ||
418 | struct dlm_proxy_ast past; | ||
419 | struct kvec vec[2]; | ||
420 | size_t veclen = 1; | ||
421 | int status; | ||
422 | |||
423 | mlog_entry("res %.*s, to=%u, type=%d, blocked_type=%d\n", | ||
424 | res->lockname.len, res->lockname.name, lock->ml.node, | ||
425 | msg_type, blocked_type); | ||
426 | |||
427 | memset(&past, 0, sizeof(struct dlm_proxy_ast)); | ||
428 | past.node_idx = dlm->node_num; | ||
429 | past.type = msg_type; | ||
430 | past.blocked_type = blocked_type; | ||
431 | past.namelen = res->lockname.len; | ||
432 | memcpy(past.name, res->lockname.name, past.namelen); | ||
433 | past.cookie = lock->ml.cookie; | ||
434 | |||
435 | vec[0].iov_len = sizeof(struct dlm_proxy_ast); | ||
436 | vec[0].iov_base = &past; | ||
437 | if (flags & DLM_LKSB_GET_LVB) { | ||
438 | mlog(0, "returning requested LVB data\n"); | ||
439 | be32_add_cpu(&past.flags, LKM_GET_LVB); | ||
440 | vec[1].iov_len = DLM_LVB_LEN; | ||
441 | vec[1].iov_base = lock->lksb->lvb; | ||
442 | veclen++; | ||
443 | } | ||
444 | |||
445 | ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen, | ||
446 | lock->ml.node, &status); | ||
447 | if (ret < 0) | ||
448 | mlog_errno(ret); | ||
449 | else { | ||
450 | if (status == DLM_RECOVERING) { | ||
451 | mlog(ML_ERROR, "sent AST to node %u, it thinks this " | ||
452 | "node is dead!\n", lock->ml.node); | ||
453 | BUG(); | ||
454 | } else if (status == DLM_MIGRATING) { | ||
455 | mlog(ML_ERROR, "sent AST to node %u, it returned " | ||
456 | "DLM_MIGRATING!\n", lock->ml.node); | ||
457 | BUG(); | ||
458 | } else if (status != DLM_NORMAL) { | ||
459 | mlog(ML_ERROR, "AST to node %u returned %d!\n", | ||
460 | lock->ml.node, status); | ||
461 | /* ignore it */ | ||
462 | } | ||
463 | ret = 0; | ||
464 | } | ||
465 | return ret; | ||
466 | } | ||
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h new file mode 100644 index 000000000000..3fecba0a6023 --- /dev/null +++ b/fs/ocfs2/dlm/dlmcommon.h | |||
@@ -0,0 +1,884 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmcommon.h | ||
5 | * | ||
6 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public | ||
10 | * License as published by the Free Software Foundation; either | ||
11 | * version 2 of the License, or (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public | ||
19 | * License along with this program; if not, write to the | ||
20 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
21 | * Boston, MA 021110-1307, USA. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #ifndef DLMCOMMON_H | ||
26 | #define DLMCOMMON_H | ||
27 | |||
28 | #include <linux/kref.h> | ||
29 | |||
30 | #define DLM_HB_NODE_DOWN_PRI (0xf000000) | ||
31 | #define DLM_HB_NODE_UP_PRI (0x8000000) | ||
32 | |||
33 | #define DLM_LOCKID_NAME_MAX 32 | ||
34 | |||
35 | #define DLM_DOMAIN_NAME_MAX_LEN 255 | ||
36 | #define DLM_LOCK_RES_OWNER_UNKNOWN O2NM_MAX_NODES | ||
37 | #define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes | ||
38 | #define DLM_THREAD_MS 200 // flush at least every 200 ms | ||
39 | |||
40 | #define DLM_HASH_BITS 7 | ||
41 | #define DLM_HASH_SIZE (1 << DLM_HASH_BITS) | ||
42 | #define DLM_HASH_MASK (DLM_HASH_SIZE - 1) | ||
43 | |||
44 | enum dlm_ast_type { | ||
45 | DLM_AST = 0, | ||
46 | DLM_BAST, | ||
47 | DLM_ASTUNLOCK | ||
48 | }; | ||
49 | |||
50 | |||
51 | #define LKM_VALID_FLAGS (LKM_VALBLK | LKM_CONVERT | LKM_UNLOCK | \ | ||
52 | LKM_CANCEL | LKM_INVVALBLK | LKM_FORCE | \ | ||
53 | LKM_RECOVERY | LKM_LOCAL | LKM_NOQUEUE) | ||
54 | |||
55 | #define DLM_RECOVERY_LOCK_NAME "$RECOVERY" | ||
56 | #define DLM_RECOVERY_LOCK_NAME_LEN 9 | ||
57 | |||
58 | static inline int dlm_is_recovery_lock(const char *lock_name, int name_len) | ||
59 | { | ||
60 | if (name_len == DLM_RECOVERY_LOCK_NAME_LEN && | ||
61 | memcmp(lock_name, DLM_RECOVERY_LOCK_NAME, name_len)==0) | ||
62 | return 1; | ||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | #define DLM_RECO_STATE_ACTIVE 0x0001 | ||
67 | |||
68 | struct dlm_recovery_ctxt | ||
69 | { | ||
70 | struct list_head resources; | ||
71 | struct list_head received; | ||
72 | struct list_head node_data; | ||
73 | u8 new_master; | ||
74 | u8 dead_node; | ||
75 | u16 state; | ||
76 | unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
77 | wait_queue_head_t event; | ||
78 | }; | ||
79 | |||
80 | enum dlm_ctxt_state { | ||
81 | DLM_CTXT_NEW = 0, | ||
82 | DLM_CTXT_JOINED, | ||
83 | DLM_CTXT_IN_SHUTDOWN, | ||
84 | DLM_CTXT_LEAVING, | ||
85 | }; | ||
86 | |||
87 | struct dlm_ctxt | ||
88 | { | ||
89 | struct list_head list; | ||
90 | struct list_head *resources; | ||
91 | struct list_head dirty_list; | ||
92 | struct list_head purge_list; | ||
93 | struct list_head pending_asts; | ||
94 | struct list_head pending_basts; | ||
95 | unsigned int purge_count; | ||
96 | spinlock_t spinlock; | ||
97 | spinlock_t ast_lock; | ||
98 | char *name; | ||
99 | u8 node_num; | ||
100 | u32 key; | ||
101 | u8 joining_node; | ||
102 | wait_queue_head_t dlm_join_events; | ||
103 | unsigned long live_nodes_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
104 | unsigned long domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
105 | unsigned long recovery_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
106 | struct dlm_recovery_ctxt reco; | ||
107 | spinlock_t master_lock; | ||
108 | struct list_head master_list; | ||
109 | struct list_head mle_hb_events; | ||
110 | |||
111 | /* these give a really vague idea of the system load */ | ||
112 | atomic_t local_resources; | ||
113 | atomic_t remote_resources; | ||
114 | atomic_t unknown_resources; | ||
115 | |||
116 | /* NOTE: Next three are protected by dlm_domain_lock */ | ||
117 | struct kref dlm_refs; | ||
118 | enum dlm_ctxt_state dlm_state; | ||
119 | unsigned int num_joins; | ||
120 | |||
121 | struct o2hb_callback_func dlm_hb_up; | ||
122 | struct o2hb_callback_func dlm_hb_down; | ||
123 | struct task_struct *dlm_thread_task; | ||
124 | struct task_struct *dlm_reco_thread_task; | ||
125 | wait_queue_head_t dlm_thread_wq; | ||
126 | wait_queue_head_t dlm_reco_thread_wq; | ||
127 | wait_queue_head_t ast_wq; | ||
128 | wait_queue_head_t migration_wq; | ||
129 | |||
130 | struct work_struct dispatched_work; | ||
131 | struct list_head work_list; | ||
132 | spinlock_t work_lock; | ||
133 | struct list_head dlm_domain_handlers; | ||
134 | struct list_head dlm_eviction_callbacks; | ||
135 | }; | ||
136 | |||
137 | /* these keventd work queue items are for less-frequently | ||
138 | * called functions that cannot be directly called from the | ||
139 | * net message handlers for some reason, usually because | ||
140 | * they need to send net messages of their own. */ | ||
141 | void dlm_dispatch_work(void *data); | ||
142 | |||
143 | struct dlm_lock_resource; | ||
144 | struct dlm_work_item; | ||
145 | |||
146 | typedef void (dlm_workfunc_t)(struct dlm_work_item *, void *); | ||
147 | |||
148 | struct dlm_request_all_locks_priv | ||
149 | { | ||
150 | u8 reco_master; | ||
151 | u8 dead_node; | ||
152 | }; | ||
153 | |||
154 | struct dlm_mig_lockres_priv | ||
155 | { | ||
156 | struct dlm_lock_resource *lockres; | ||
157 | u8 real_master; | ||
158 | }; | ||
159 | |||
160 | struct dlm_assert_master_priv | ||
161 | { | ||
162 | struct dlm_lock_resource *lockres; | ||
163 | u8 request_from; | ||
164 | u32 flags; | ||
165 | unsigned ignore_higher:1; | ||
166 | }; | ||
167 | |||
168 | |||
169 | struct dlm_work_item | ||
170 | { | ||
171 | struct list_head list; | ||
172 | dlm_workfunc_t *func; | ||
173 | struct dlm_ctxt *dlm; | ||
174 | void *data; | ||
175 | union { | ||
176 | struct dlm_request_all_locks_priv ral; | ||
177 | struct dlm_mig_lockres_priv ml; | ||
178 | struct dlm_assert_master_priv am; | ||
179 | } u; | ||
180 | }; | ||
181 | |||
182 | static inline void dlm_init_work_item(struct dlm_ctxt *dlm, | ||
183 | struct dlm_work_item *i, | ||
184 | dlm_workfunc_t *f, void *data) | ||
185 | { | ||
186 | memset(i, 0, sizeof(*i)); | ||
187 | i->func = f; | ||
188 | INIT_LIST_HEAD(&i->list); | ||
189 | i->data = data; | ||
190 | i->dlm = dlm; /* must have already done a dlm_grab on this! */ | ||
191 | } | ||
192 | |||
193 | |||
194 | |||
195 | static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm, | ||
196 | u8 node) | ||
197 | { | ||
198 | assert_spin_locked(&dlm->spinlock); | ||
199 | |||
200 | dlm->joining_node = node; | ||
201 | wake_up(&dlm->dlm_join_events); | ||
202 | } | ||
203 | |||
204 | #define DLM_LOCK_RES_UNINITED 0x00000001 | ||
205 | #define DLM_LOCK_RES_RECOVERING 0x00000002 | ||
206 | #define DLM_LOCK_RES_READY 0x00000004 | ||
207 | #define DLM_LOCK_RES_DIRTY 0x00000008 | ||
208 | #define DLM_LOCK_RES_IN_PROGRESS 0x00000010 | ||
209 | #define DLM_LOCK_RES_MIGRATING 0x00000020 | ||
210 | |||
211 | #define DLM_PURGE_INTERVAL_MS (8 * 1000) | ||
212 | |||
213 | struct dlm_lock_resource | ||
214 | { | ||
215 | /* WARNING: Please see the comment in dlm_init_lockres before | ||
216 | * adding fields here. */ | ||
217 | struct list_head list; | ||
218 | struct kref refs; | ||
219 | |||
220 | /* please keep these next 3 in this order | ||
221 | * some funcs want to iterate over all lists */ | ||
222 | struct list_head granted; | ||
223 | struct list_head converting; | ||
224 | struct list_head blocked; | ||
225 | |||
226 | struct list_head dirty; | ||
227 | struct list_head recovering; // dlm_recovery_ctxt.resources list | ||
228 | |||
229 | /* unused lock resources have their last_used stamped and are | ||
230 | * put on a list for the dlm thread to run. */ | ||
231 | struct list_head purge; | ||
232 | unsigned long last_used; | ||
233 | |||
234 | unsigned migration_pending:1; | ||
235 | atomic_t asts_reserved; | ||
236 | spinlock_t spinlock; | ||
237 | wait_queue_head_t wq; | ||
238 | u8 owner; //node which owns the lock resource, or unknown | ||
239 | u16 state; | ||
240 | struct qstr lockname; | ||
241 | char lvb[DLM_LVB_LEN]; | ||
242 | }; | ||
243 | |||
244 | struct dlm_migratable_lock | ||
245 | { | ||
246 | __be64 cookie; | ||
247 | |||
248 | /* these 3 are just padding for the in-memory structure, but | ||
249 | * list and flags are actually used when sent over the wire */ | ||
250 | __be16 pad1; | ||
251 | u8 list; // 0=granted, 1=converting, 2=blocked | ||
252 | u8 flags; | ||
253 | |||
254 | s8 type; | ||
255 | s8 convert_type; | ||
256 | s8 highest_blocked; | ||
257 | u8 node; | ||
258 | }; // 16 bytes | ||
259 | |||
260 | struct dlm_lock | ||
261 | { | ||
262 | struct dlm_migratable_lock ml; | ||
263 | |||
264 | struct list_head list; | ||
265 | struct list_head ast_list; | ||
266 | struct list_head bast_list; | ||
267 | struct dlm_lock_resource *lockres; | ||
268 | spinlock_t spinlock; | ||
269 | struct kref lock_refs; | ||
270 | |||
271 | // ast and bast must be callable while holding a spinlock! | ||
272 | dlm_astlockfunc_t *ast; | ||
273 | dlm_bastlockfunc_t *bast; | ||
274 | void *astdata; | ||
275 | struct dlm_lockstatus *lksb; | ||
276 | unsigned ast_pending:1, | ||
277 | bast_pending:1, | ||
278 | convert_pending:1, | ||
279 | lock_pending:1, | ||
280 | cancel_pending:1, | ||
281 | unlock_pending:1, | ||
282 | lksb_kernel_allocated:1; | ||
283 | }; | ||
284 | |||
285 | |||
286 | #define DLM_LKSB_UNUSED1 0x01 | ||
287 | #define DLM_LKSB_PUT_LVB 0x02 | ||
288 | #define DLM_LKSB_GET_LVB 0x04 | ||
289 | #define DLM_LKSB_UNUSED2 0x08 | ||
290 | #define DLM_LKSB_UNUSED3 0x10 | ||
291 | #define DLM_LKSB_UNUSED4 0x20 | ||
292 | #define DLM_LKSB_UNUSED5 0x40 | ||
293 | #define DLM_LKSB_UNUSED6 0x80 | ||
294 | |||
295 | |||
296 | enum dlm_lockres_list { | ||
297 | DLM_GRANTED_LIST = 0, | ||
298 | DLM_CONVERTING_LIST, | ||
299 | DLM_BLOCKED_LIST | ||
300 | }; | ||
301 | |||
302 | static inline struct list_head * | ||
303 | dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx) | ||
304 | { | ||
305 | struct list_head *ret = NULL; | ||
306 | if (idx == DLM_GRANTED_LIST) | ||
307 | ret = &res->granted; | ||
308 | else if (idx == DLM_CONVERTING_LIST) | ||
309 | ret = &res->converting; | ||
310 | else if (idx == DLM_BLOCKED_LIST) | ||
311 | ret = &res->blocked; | ||
312 | else | ||
313 | BUG(); | ||
314 | return ret; | ||
315 | } | ||
316 | |||
317 | |||
318 | |||
319 | |||
320 | struct dlm_node_iter | ||
321 | { | ||
322 | unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
323 | int curnode; | ||
324 | }; | ||
325 | |||
326 | |||
327 | enum { | ||
328 | DLM_MASTER_REQUEST_MSG = 500, | ||
329 | DLM_UNUSED_MSG1, /* 501 */ | ||
330 | DLM_ASSERT_MASTER_MSG, /* 502 */ | ||
331 | DLM_CREATE_LOCK_MSG, /* 503 */ | ||
332 | DLM_CONVERT_LOCK_MSG, /* 504 */ | ||
333 | DLM_PROXY_AST_MSG, /* 505 */ | ||
334 | DLM_UNLOCK_LOCK_MSG, /* 506 */ | ||
335 | DLM_UNUSED_MSG2, /* 507 */ | ||
336 | DLM_MIGRATE_REQUEST_MSG, /* 508 */ | ||
337 | DLM_MIG_LOCKRES_MSG, /* 509 */ | ||
338 | DLM_QUERY_JOIN_MSG, /* 510 */ | ||
339 | DLM_ASSERT_JOINED_MSG, /* 511 */ | ||
340 | DLM_CANCEL_JOIN_MSG, /* 512 */ | ||
341 | DLM_EXIT_DOMAIN_MSG, /* 513 */ | ||
342 | DLM_MASTER_REQUERY_MSG, /* 514 */ | ||
343 | DLM_LOCK_REQUEST_MSG, /* 515 */ | ||
344 | DLM_RECO_DATA_DONE_MSG, /* 516 */ | ||
345 | DLM_BEGIN_RECO_MSG, /* 517 */ | ||
346 | DLM_FINALIZE_RECO_MSG /* 518 */ | ||
347 | }; | ||
348 | |||
349 | struct dlm_reco_node_data | ||
350 | { | ||
351 | int state; | ||
352 | u8 node_num; | ||
353 | struct list_head list; | ||
354 | }; | ||
355 | |||
356 | enum { | ||
357 | DLM_RECO_NODE_DATA_DEAD = -1, | ||
358 | DLM_RECO_NODE_DATA_INIT = 0, | ||
359 | DLM_RECO_NODE_DATA_REQUESTING, | ||
360 | DLM_RECO_NODE_DATA_REQUESTED, | ||
361 | DLM_RECO_NODE_DATA_RECEIVING, | ||
362 | DLM_RECO_NODE_DATA_DONE, | ||
363 | DLM_RECO_NODE_DATA_FINALIZE_SENT, | ||
364 | }; | ||
365 | |||
366 | |||
367 | enum { | ||
368 | DLM_MASTER_RESP_NO = 0, | ||
369 | DLM_MASTER_RESP_YES, | ||
370 | DLM_MASTER_RESP_MAYBE, | ||
371 | DLM_MASTER_RESP_ERROR | ||
372 | }; | ||
373 | |||
374 | |||
375 | struct dlm_master_request | ||
376 | { | ||
377 | u8 node_idx; | ||
378 | u8 namelen; | ||
379 | __be16 pad1; | ||
380 | __be32 flags; | ||
381 | |||
382 | u8 name[O2NM_MAX_NAME_LEN]; | ||
383 | }; | ||
384 | |||
385 | #define DLM_ASSERT_MASTER_MLE_CLEANUP 0x00000001 | ||
386 | #define DLM_ASSERT_MASTER_REQUERY 0x00000002 | ||
387 | #define DLM_ASSERT_MASTER_FINISH_MIGRATION 0x00000004 | ||
388 | struct dlm_assert_master | ||
389 | { | ||
390 | u8 node_idx; | ||
391 | u8 namelen; | ||
392 | __be16 pad1; | ||
393 | __be32 flags; | ||
394 | |||
395 | u8 name[O2NM_MAX_NAME_LEN]; | ||
396 | }; | ||
397 | |||
398 | struct dlm_migrate_request | ||
399 | { | ||
400 | u8 master; | ||
401 | u8 new_master; | ||
402 | u8 namelen; | ||
403 | u8 pad1; | ||
404 | __be32 pad2; | ||
405 | u8 name[O2NM_MAX_NAME_LEN]; | ||
406 | }; | ||
407 | |||
408 | struct dlm_master_requery | ||
409 | { | ||
410 | u8 pad1; | ||
411 | u8 pad2; | ||
412 | u8 node_idx; | ||
413 | u8 namelen; | ||
414 | __be32 pad3; | ||
415 | u8 name[O2NM_MAX_NAME_LEN]; | ||
416 | }; | ||
417 | |||
418 | #define DLM_MRES_RECOVERY 0x01 | ||
419 | #define DLM_MRES_MIGRATION 0x02 | ||
420 | #define DLM_MRES_ALL_DONE 0x04 | ||
421 | |||
422 | /* | ||
423 | * We would like to get one whole lockres into a single network | ||
424 | * message whenever possible. Generally speaking, there will be | ||
425 | * at most one dlm_lock on a lockres for each node in the cluster, | ||
426 | * plus (infrequently) any additional locks coming in from userdlm. | ||
427 | * | ||
428 | * struct _dlm_lockres_page | ||
429 | * { | ||
430 | * dlm_migratable_lockres mres; | ||
431 | * dlm_migratable_lock ml[DLM_MAX_MIGRATABLE_LOCKS]; | ||
432 | * u8 pad[DLM_MIG_LOCKRES_RESERVED]; | ||
433 | * }; | ||
434 | * | ||
435 | * from ../cluster/tcp.h | ||
436 | * NET_MAX_PAYLOAD_BYTES (4096 - sizeof(net_msg)) | ||
437 | * (roughly 4080 bytes) | ||
438 | * and sizeof(dlm_migratable_lockres) = 112 bytes | ||
439 | * and sizeof(dlm_migratable_lock) = 16 bytes | ||
440 | * | ||
441 | * Choosing DLM_MAX_MIGRATABLE_LOCKS=240 and | ||
442 | * DLM_MIG_LOCKRES_RESERVED=128 means we have this: | ||
443 | * | ||
444 | * (DLM_MAX_MIGRATABLE_LOCKS * sizeof(dlm_migratable_lock)) + | ||
445 | * sizeof(dlm_migratable_lockres) + DLM_MIG_LOCKRES_RESERVED = | ||
446 | * NET_MAX_PAYLOAD_BYTES | ||
447 | * (240 * 16) + 112 + 128 = 4080 | ||
448 | * | ||
449 | * So a lockres would need more than 240 locks before it would | ||
450 | * use more than one network packet to recover. Not too bad. | ||
451 | */ | ||
452 | #define DLM_MAX_MIGRATABLE_LOCKS 240 | ||
453 | |||
454 | struct dlm_migratable_lockres | ||
455 | { | ||
456 | u8 master; | ||
457 | u8 lockname_len; | ||
458 | u8 num_locks; // locks sent in this structure | ||
459 | u8 flags; | ||
460 | __be32 total_locks; // locks to be sent for this migration cookie | ||
461 | __be64 mig_cookie; // cookie for this lockres migration | ||
462 | // or zero if not needed | ||
463 | // 16 bytes | ||
464 | u8 lockname[DLM_LOCKID_NAME_MAX]; | ||
465 | // 48 bytes | ||
466 | u8 lvb[DLM_LVB_LEN]; | ||
467 | // 112 bytes | ||
468 | struct dlm_migratable_lock ml[0]; // 16 bytes each, begins at byte 112 | ||
469 | }; | ||
470 | #define DLM_MIG_LOCKRES_MAX_LEN \ | ||
471 | (sizeof(struct dlm_migratable_lockres) + \ | ||
472 | (sizeof(struct dlm_migratable_lock) * \ | ||
473 | DLM_MAX_MIGRATABLE_LOCKS) ) | ||
474 | |||
475 | /* from above, 128 bytes | ||
476 | * for some undetermined future use */ | ||
477 | #define DLM_MIG_LOCKRES_RESERVED (NET_MAX_PAYLOAD_BYTES - \ | ||
478 | DLM_MIG_LOCKRES_MAX_LEN) | ||
479 | |||
480 | struct dlm_create_lock | ||
481 | { | ||
482 | __be64 cookie; | ||
483 | |||
484 | __be32 flags; | ||
485 | u8 pad1; | ||
486 | u8 node_idx; | ||
487 | s8 requested_type; | ||
488 | u8 namelen; | ||
489 | |||
490 | u8 name[O2NM_MAX_NAME_LEN]; | ||
491 | }; | ||
492 | |||
493 | struct dlm_convert_lock | ||
494 | { | ||
495 | __be64 cookie; | ||
496 | |||
497 | __be32 flags; | ||
498 | u8 pad1; | ||
499 | u8 node_idx; | ||
500 | s8 requested_type; | ||
501 | u8 namelen; | ||
502 | |||
503 | u8 name[O2NM_MAX_NAME_LEN]; | ||
504 | |||
505 | s8 lvb[0]; | ||
506 | }; | ||
507 | #define DLM_CONVERT_LOCK_MAX_LEN (sizeof(struct dlm_convert_lock)+DLM_LVB_LEN) | ||
508 | |||
509 | struct dlm_unlock_lock | ||
510 | { | ||
511 | __be64 cookie; | ||
512 | |||
513 | __be32 flags; | ||
514 | __be16 pad1; | ||
515 | u8 node_idx; | ||
516 | u8 namelen; | ||
517 | |||
518 | u8 name[O2NM_MAX_NAME_LEN]; | ||
519 | |||
520 | s8 lvb[0]; | ||
521 | }; | ||
522 | #define DLM_UNLOCK_LOCK_MAX_LEN (sizeof(struct dlm_unlock_lock)+DLM_LVB_LEN) | ||
523 | |||
524 | struct dlm_proxy_ast | ||
525 | { | ||
526 | __be64 cookie; | ||
527 | |||
528 | __be32 flags; | ||
529 | u8 node_idx; | ||
530 | u8 type; | ||
531 | u8 blocked_type; | ||
532 | u8 namelen; | ||
533 | |||
534 | u8 name[O2NM_MAX_NAME_LEN]; | ||
535 | |||
536 | s8 lvb[0]; | ||
537 | }; | ||
538 | #define DLM_PROXY_AST_MAX_LEN (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN) | ||
539 | |||
540 | #define DLM_MOD_KEY (0x666c6172) | ||
541 | enum dlm_query_join_response { | ||
542 | JOIN_DISALLOW = 0, | ||
543 | JOIN_OK, | ||
544 | JOIN_OK_NO_MAP, | ||
545 | }; | ||
546 | |||
547 | struct dlm_lock_request | ||
548 | { | ||
549 | u8 node_idx; | ||
550 | u8 dead_node; | ||
551 | __be16 pad1; | ||
552 | __be32 pad2; | ||
553 | }; | ||
554 | |||
555 | struct dlm_reco_data_done | ||
556 | { | ||
557 | u8 node_idx; | ||
558 | u8 dead_node; | ||
559 | __be16 pad1; | ||
560 | __be32 pad2; | ||
561 | |||
562 | /* unused for now */ | ||
563 | /* eventually we can use this to attempt | ||
564 | * lvb recovery based on each node's info */ | ||
565 | u8 reco_lvb[DLM_LVB_LEN]; | ||
566 | }; | ||
567 | |||
568 | struct dlm_begin_reco | ||
569 | { | ||
570 | u8 node_idx; | ||
571 | u8 dead_node; | ||
572 | __be16 pad1; | ||
573 | __be32 pad2; | ||
574 | }; | ||
575 | |||
576 | |||
577 | struct dlm_query_join_request | ||
578 | { | ||
579 | u8 node_idx; | ||
580 | u8 pad1[2]; | ||
581 | u8 name_len; | ||
582 | u8 domain[O2NM_MAX_NAME_LEN]; | ||
583 | }; | ||
584 | |||
585 | struct dlm_assert_joined | ||
586 | { | ||
587 | u8 node_idx; | ||
588 | u8 pad1[2]; | ||
589 | u8 name_len; | ||
590 | u8 domain[O2NM_MAX_NAME_LEN]; | ||
591 | }; | ||
592 | |||
593 | struct dlm_cancel_join | ||
594 | { | ||
595 | u8 node_idx; | ||
596 | u8 pad1[2]; | ||
597 | u8 name_len; | ||
598 | u8 domain[O2NM_MAX_NAME_LEN]; | ||
599 | }; | ||
600 | |||
601 | struct dlm_exit_domain | ||
602 | { | ||
603 | u8 node_idx; | ||
604 | u8 pad1[3]; | ||
605 | }; | ||
606 | |||
607 | struct dlm_finalize_reco | ||
608 | { | ||
609 | u8 node_idx; | ||
610 | u8 dead_node; | ||
611 | __be16 pad1; | ||
612 | __be32 pad2; | ||
613 | }; | ||
614 | |||
615 | static inline enum dlm_status | ||
616 | __dlm_lockres_state_to_status(struct dlm_lock_resource *res) | ||
617 | { | ||
618 | enum dlm_status status = DLM_NORMAL; | ||
619 | |||
620 | assert_spin_locked(&res->spinlock); | ||
621 | |||
622 | if (res->state & DLM_LOCK_RES_RECOVERING) | ||
623 | status = DLM_RECOVERING; | ||
624 | else if (res->state & DLM_LOCK_RES_MIGRATING) | ||
625 | status = DLM_MIGRATING; | ||
626 | else if (res->state & DLM_LOCK_RES_IN_PROGRESS) | ||
627 | status = DLM_FORWARD; | ||
628 | |||
629 | return status; | ||
630 | } | ||
631 | |||
632 | struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie, | ||
633 | struct dlm_lockstatus *lksb); | ||
634 | void dlm_lock_get(struct dlm_lock *lock); | ||
635 | void dlm_lock_put(struct dlm_lock *lock); | ||
636 | |||
637 | void dlm_lock_attach_lockres(struct dlm_lock *lock, | ||
638 | struct dlm_lock_resource *res); | ||
639 | |||
640 | int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data); | ||
641 | int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data); | ||
642 | int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data); | ||
643 | |||
644 | void dlm_revert_pending_convert(struct dlm_lock_resource *res, | ||
645 | struct dlm_lock *lock); | ||
646 | void dlm_revert_pending_lock(struct dlm_lock_resource *res, | ||
647 | struct dlm_lock *lock); | ||
648 | |||
649 | int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data); | ||
650 | void dlm_commit_pending_cancel(struct dlm_lock_resource *res, | ||
651 | struct dlm_lock *lock); | ||
652 | void dlm_commit_pending_unlock(struct dlm_lock_resource *res, | ||
653 | struct dlm_lock *lock); | ||
654 | |||
655 | int dlm_launch_thread(struct dlm_ctxt *dlm); | ||
656 | void dlm_complete_thread(struct dlm_ctxt *dlm); | ||
657 | int dlm_launch_recovery_thread(struct dlm_ctxt *dlm); | ||
658 | void dlm_complete_recovery_thread(struct dlm_ctxt *dlm); | ||
659 | void dlm_wait_for_recovery(struct dlm_ctxt *dlm); | ||
660 | |||
661 | void dlm_put(struct dlm_ctxt *dlm); | ||
662 | struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm); | ||
663 | int dlm_domain_fully_joined(struct dlm_ctxt *dlm); | ||
664 | |||
665 | void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | ||
666 | struct dlm_lock_resource *res); | ||
667 | void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | ||
668 | struct dlm_lock_resource *res); | ||
669 | void dlm_purge_lockres(struct dlm_ctxt *dlm, | ||
670 | struct dlm_lock_resource *lockres); | ||
671 | void dlm_lockres_get(struct dlm_lock_resource *res); | ||
672 | void dlm_lockres_put(struct dlm_lock_resource *res); | ||
673 | void __dlm_unhash_lockres(struct dlm_lock_resource *res); | ||
674 | void __dlm_insert_lockres(struct dlm_ctxt *dlm, | ||
675 | struct dlm_lock_resource *res); | ||
676 | struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, | ||
677 | const char *name, | ||
678 | unsigned int len); | ||
679 | struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm, | ||
680 | const char *name, | ||
681 | unsigned int len); | ||
682 | |||
683 | int dlm_is_host_down(int errno); | ||
684 | void dlm_change_lockres_owner(struct dlm_ctxt *dlm, | ||
685 | struct dlm_lock_resource *res, | ||
686 | u8 owner); | ||
687 | struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, | ||
688 | const char *lockid, | ||
689 | int flags); | ||
690 | struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, | ||
691 | const char *name, | ||
692 | unsigned int namelen); | ||
693 | |||
694 | void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); | ||
695 | void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); | ||
696 | void dlm_do_local_ast(struct dlm_ctxt *dlm, | ||
697 | struct dlm_lock_resource *res, | ||
698 | struct dlm_lock *lock); | ||
699 | int dlm_do_remote_ast(struct dlm_ctxt *dlm, | ||
700 | struct dlm_lock_resource *res, | ||
701 | struct dlm_lock *lock); | ||
702 | void dlm_do_local_bast(struct dlm_ctxt *dlm, | ||
703 | struct dlm_lock_resource *res, | ||
704 | struct dlm_lock *lock, | ||
705 | int blocked_type); | ||
706 | int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, | ||
707 | struct dlm_lock_resource *res, | ||
708 | struct dlm_lock *lock, | ||
709 | int msg_type, | ||
710 | int blocked_type, int flags); | ||
711 | static inline int dlm_send_proxy_bast(struct dlm_ctxt *dlm, | ||
712 | struct dlm_lock_resource *res, | ||
713 | struct dlm_lock *lock, | ||
714 | int blocked_type) | ||
715 | { | ||
716 | return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_BAST, | ||
717 | blocked_type, 0); | ||
718 | } | ||
719 | |||
720 | static inline int dlm_send_proxy_ast(struct dlm_ctxt *dlm, | ||
721 | struct dlm_lock_resource *res, | ||
722 | struct dlm_lock *lock, | ||
723 | int flags) | ||
724 | { | ||
725 | return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_AST, | ||
726 | 0, flags); | ||
727 | } | ||
728 | |||
729 | void dlm_print_one_lock_resource(struct dlm_lock_resource *res); | ||
730 | void __dlm_print_one_lock_resource(struct dlm_lock_resource *res); | ||
731 | |||
732 | u8 dlm_nm_this_node(struct dlm_ctxt *dlm); | ||
733 | void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); | ||
734 | void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); | ||
735 | |||
736 | |||
737 | int dlm_nm_init(struct dlm_ctxt *dlm); | ||
738 | int dlm_heartbeat_init(struct dlm_ctxt *dlm); | ||
739 | void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data); | ||
740 | void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data); | ||
741 | |||
742 | int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); | ||
743 | int dlm_migrate_lockres(struct dlm_ctxt *dlm, | ||
744 | struct dlm_lock_resource *res, | ||
745 | u8 target); | ||
746 | int dlm_finish_migration(struct dlm_ctxt *dlm, | ||
747 | struct dlm_lock_resource *res, | ||
748 | u8 old_master); | ||
749 | void dlm_lockres_release_ast(struct dlm_ctxt *dlm, | ||
750 | struct dlm_lock_resource *res); | ||
751 | void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res); | ||
752 | |||
753 | int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data); | ||
754 | int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data); | ||
755 | int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data); | ||
756 | int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data); | ||
757 | int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data); | ||
758 | int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data); | ||
759 | int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data); | ||
760 | int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data); | ||
761 | int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data); | ||
762 | |||
763 | int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, | ||
764 | struct dlm_lock_resource *res, | ||
765 | int ignore_higher, | ||
766 | u8 request_from, | ||
767 | u32 flags); | ||
768 | |||
769 | |||
770 | int dlm_send_one_lockres(struct dlm_ctxt *dlm, | ||
771 | struct dlm_lock_resource *res, | ||
772 | struct dlm_migratable_lockres *mres, | ||
773 | u8 send_to, | ||
774 | u8 flags); | ||
775 | void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, | ||
776 | struct dlm_lock_resource *res); | ||
777 | |||
778 | /* will exit holding res->spinlock, but may drop in function */ | ||
779 | void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags); | ||
780 | void __dlm_wait_on_lockres_flags_set(struct dlm_lock_resource *res, int flags); | ||
781 | |||
782 | /* will exit holding res->spinlock, but may drop in function */ | ||
783 | static inline void __dlm_wait_on_lockres(struct dlm_lock_resource *res) | ||
784 | { | ||
785 | __dlm_wait_on_lockres_flags(res, (DLM_LOCK_RES_IN_PROGRESS| | ||
786 | DLM_LOCK_RES_RECOVERING| | ||
787 | DLM_LOCK_RES_MIGRATING)); | ||
788 | } | ||
789 | |||
790 | |||
791 | int dlm_init_mle_cache(void); | ||
792 | void dlm_destroy_mle_cache(void); | ||
793 | void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up); | ||
794 | void dlm_clean_master_list(struct dlm_ctxt *dlm, | ||
795 | u8 dead_node); | ||
796 | int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock); | ||
797 | |||
798 | |||
799 | static inline const char * dlm_lock_mode_name(int mode) | ||
800 | { | ||
801 | switch (mode) { | ||
802 | case LKM_EXMODE: | ||
803 | return "EX"; | ||
804 | case LKM_PRMODE: | ||
805 | return "PR"; | ||
806 | case LKM_NLMODE: | ||
807 | return "NL"; | ||
808 | } | ||
809 | return "UNKNOWN"; | ||
810 | } | ||
811 | |||
812 | |||
813 | static inline int dlm_lock_compatible(int existing, int request) | ||
814 | { | ||
815 | /* NO_LOCK compatible with all */ | ||
816 | if (request == LKM_NLMODE || | ||
817 | existing == LKM_NLMODE) | ||
818 | return 1; | ||
819 | |||
820 | /* EX incompatible with all non-NO_LOCK */ | ||
821 | if (request == LKM_EXMODE) | ||
822 | return 0; | ||
823 | |||
824 | /* request must be PR, which is compatible with PR */ | ||
825 | if (existing == LKM_PRMODE) | ||
826 | return 1; | ||
827 | |||
828 | return 0; | ||
829 | } | ||
830 | |||
831 | static inline int dlm_lock_on_list(struct list_head *head, | ||
832 | struct dlm_lock *lock) | ||
833 | { | ||
834 | struct list_head *iter; | ||
835 | struct dlm_lock *tmplock; | ||
836 | |||
837 | list_for_each(iter, head) { | ||
838 | tmplock = list_entry(iter, struct dlm_lock, list); | ||
839 | if (tmplock == lock) | ||
840 | return 1; | ||
841 | } | ||
842 | return 0; | ||
843 | } | ||
844 | |||
845 | |||
846 | static inline enum dlm_status dlm_err_to_dlm_status(int err) | ||
847 | { | ||
848 | enum dlm_status ret; | ||
849 | if (err == -ENOMEM) | ||
850 | ret = DLM_SYSERR; | ||
851 | else if (err == -ETIMEDOUT || o2net_link_down(err, NULL)) | ||
852 | ret = DLM_NOLOCKMGR; | ||
853 | else if (err == -EINVAL) | ||
854 | ret = DLM_BADPARAM; | ||
855 | else if (err == -ENAMETOOLONG) | ||
856 | ret = DLM_IVBUFLEN; | ||
857 | else | ||
858 | ret = DLM_BADARGS; | ||
859 | return ret; | ||
860 | } | ||
861 | |||
862 | |||
863 | static inline void dlm_node_iter_init(unsigned long *map, | ||
864 | struct dlm_node_iter *iter) | ||
865 | { | ||
866 | memcpy(iter->node_map, map, sizeof(iter->node_map)); | ||
867 | iter->curnode = -1; | ||
868 | } | ||
869 | |||
870 | static inline int dlm_node_iter_next(struct dlm_node_iter *iter) | ||
871 | { | ||
872 | int bit; | ||
873 | bit = find_next_bit(iter->node_map, O2NM_MAX_NODES, iter->curnode+1); | ||
874 | if (bit >= O2NM_MAX_NODES) { | ||
875 | iter->curnode = O2NM_MAX_NODES; | ||
876 | return -ENOENT; | ||
877 | } | ||
878 | iter->curnode = bit; | ||
879 | return bit; | ||
880 | } | ||
881 | |||
882 | |||
883 | |||
884 | #endif /* DLMCOMMON_H */ | ||
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c new file mode 100644 index 000000000000..6001b22a997d --- /dev/null +++ b/fs/ocfs2/dlm/dlmconvert.c | |||
@@ -0,0 +1,530 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmconvert.c | ||
5 | * | ||
6 | * underlying calls for lock conversion | ||
7 | * | ||
8 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | |||
28 | #include <linux/module.h> | ||
29 | #include <linux/fs.h> | ||
30 | #include <linux/types.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/utsname.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/sysctl.h> | ||
36 | #include <linux/random.h> | ||
37 | #include <linux/blkdev.h> | ||
38 | #include <linux/socket.h> | ||
39 | #include <linux/inet.h> | ||
40 | #include <linux/spinlock.h> | ||
41 | |||
42 | |||
43 | #include "cluster/heartbeat.h" | ||
44 | #include "cluster/nodemanager.h" | ||
45 | #include "cluster/tcp.h" | ||
46 | |||
47 | #include "dlmapi.h" | ||
48 | #include "dlmcommon.h" | ||
49 | |||
50 | #include "dlmconvert.h" | ||
51 | |||
52 | #define MLOG_MASK_PREFIX ML_DLM | ||
53 | #include "cluster/masklog.h" | ||
54 | |||
55 | /* NOTE: __dlmconvert_master is the only function in here that | ||
56 | * needs a spinlock held on entry (res->spinlock) and it is the | ||
57 | * only one that holds a lock on exit (res->spinlock). | ||
58 | * All other functions in here need no locks and drop all of | ||
59 | * the locks that they acquire. */ | ||
60 | static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm, | ||
61 | struct dlm_lock_resource *res, | ||
62 | struct dlm_lock *lock, int flags, | ||
63 | int type, int *call_ast, | ||
64 | int *kick_thread); | ||
65 | static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, | ||
66 | struct dlm_lock_resource *res, | ||
67 | struct dlm_lock *lock, int flags, int type); | ||
68 | |||
69 | /* | ||
70 | * this is only called directly by dlmlock(), and only when the | ||
71 | * local node is the owner of the lockres | ||
72 | * locking: | ||
73 | * caller needs: none | ||
74 | * taken: takes and drops res->spinlock | ||
75 | * held on exit: none | ||
76 | * returns: see __dlmconvert_master | ||
77 | */ | ||
78 | enum dlm_status dlmconvert_master(struct dlm_ctxt *dlm, | ||
79 | struct dlm_lock_resource *res, | ||
80 | struct dlm_lock *lock, int flags, int type) | ||
81 | { | ||
82 | int call_ast = 0, kick_thread = 0; | ||
83 | enum dlm_status status; | ||
84 | |||
85 | spin_lock(&res->spinlock); | ||
86 | /* we are not in a network handler, this is fine */ | ||
87 | __dlm_wait_on_lockres(res); | ||
88 | __dlm_lockres_reserve_ast(res); | ||
89 | res->state |= DLM_LOCK_RES_IN_PROGRESS; | ||
90 | |||
91 | status = __dlmconvert_master(dlm, res, lock, flags, type, | ||
92 | &call_ast, &kick_thread); | ||
93 | |||
94 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; | ||
95 | spin_unlock(&res->spinlock); | ||
96 | wake_up(&res->wq); | ||
97 | if (status != DLM_NORMAL && status != DLM_NOTQUEUED) | ||
98 | dlm_error(status); | ||
99 | |||
100 | /* either queue the ast or release it */ | ||
101 | if (call_ast) | ||
102 | dlm_queue_ast(dlm, lock); | ||
103 | else | ||
104 | dlm_lockres_release_ast(dlm, res); | ||
105 | |||
106 | if (kick_thread) | ||
107 | dlm_kick_thread(dlm, res); | ||
108 | |||
109 | return status; | ||
110 | } | ||
111 | |||
112 | /* performs lock conversion at the lockres master site | ||
113 | * locking: | ||
114 | * caller needs: res->spinlock | ||
115 | * taken: takes and drops lock->spinlock | ||
116 | * held on exit: res->spinlock | ||
117 | * returns: DLM_NORMAL, DLM_NOTQUEUED, DLM_DENIED | ||
118 | * call_ast: whether ast should be called for this lock | ||
119 | * kick_thread: whether dlm_kick_thread should be called | ||
120 | */ | ||
121 | static enum dlm_status __dlmconvert_master(struct dlm_ctxt *dlm, | ||
122 | struct dlm_lock_resource *res, | ||
123 | struct dlm_lock *lock, int flags, | ||
124 | int type, int *call_ast, | ||
125 | int *kick_thread) | ||
126 | { | ||
127 | enum dlm_status status = DLM_NORMAL; | ||
128 | struct list_head *iter; | ||
129 | struct dlm_lock *tmplock=NULL; | ||
130 | |||
131 | assert_spin_locked(&res->spinlock); | ||
132 | |||
133 | mlog_entry("type=%d, convert_type=%d, new convert_type=%d\n", | ||
134 | lock->ml.type, lock->ml.convert_type, type); | ||
135 | |||
136 | spin_lock(&lock->spinlock); | ||
137 | |||
138 | /* already converting? */ | ||
139 | if (lock->ml.convert_type != LKM_IVMODE) { | ||
140 | mlog(ML_ERROR, "attempted to convert a lock with a lock " | ||
141 | "conversion pending\n"); | ||
142 | status = DLM_DENIED; | ||
143 | goto unlock_exit; | ||
144 | } | ||
145 | |||
146 | /* must be on grant queue to convert */ | ||
147 | if (!dlm_lock_on_list(&res->granted, lock)) { | ||
148 | mlog(ML_ERROR, "attempted to convert a lock not on grant " | ||
149 | "queue\n"); | ||
150 | status = DLM_DENIED; | ||
151 | goto unlock_exit; | ||
152 | } | ||
153 | |||
154 | if (flags & LKM_VALBLK) { | ||
155 | switch (lock->ml.type) { | ||
156 | case LKM_EXMODE: | ||
157 | /* EX + LKM_VALBLK + convert == set lvb */ | ||
158 | mlog(0, "will set lvb: converting %s->%s\n", | ||
159 | dlm_lock_mode_name(lock->ml.type), | ||
160 | dlm_lock_mode_name(type)); | ||
161 | lock->lksb->flags |= DLM_LKSB_PUT_LVB; | ||
162 | break; | ||
163 | case LKM_PRMODE: | ||
164 | case LKM_NLMODE: | ||
165 | /* refetch if new level is not NL */ | ||
166 | if (type > LKM_NLMODE) { | ||
167 | mlog(0, "will fetch new value into " | ||
168 | "lvb: converting %s->%s\n", | ||
169 | dlm_lock_mode_name(lock->ml.type), | ||
170 | dlm_lock_mode_name(type)); | ||
171 | lock->lksb->flags |= DLM_LKSB_GET_LVB; | ||
172 | } else { | ||
173 | mlog(0, "will NOT fetch new value " | ||
174 | "into lvb: converting %s->%s\n", | ||
175 | dlm_lock_mode_name(lock->ml.type), | ||
176 | dlm_lock_mode_name(type)); | ||
177 | flags &= ~(LKM_VALBLK); | ||
178 | } | ||
179 | break; | ||
180 | } | ||
181 | } | ||
182 | |||
183 | |||
184 | /* in-place downconvert? */ | ||
185 | if (type <= lock->ml.type) | ||
186 | goto grant; | ||
187 | |||
188 | /* upconvert from here on */ | ||
189 | status = DLM_NORMAL; | ||
190 | list_for_each(iter, &res->granted) { | ||
191 | tmplock = list_entry(iter, struct dlm_lock, list); | ||
192 | if (tmplock == lock) | ||
193 | continue; | ||
194 | if (!dlm_lock_compatible(tmplock->ml.type, type)) | ||
195 | goto switch_queues; | ||
196 | } | ||
197 | |||
198 | list_for_each(iter, &res->converting) { | ||
199 | tmplock = list_entry(iter, struct dlm_lock, list); | ||
200 | if (!dlm_lock_compatible(tmplock->ml.type, type)) | ||
201 | goto switch_queues; | ||
202 | /* existing conversion requests take precedence */ | ||
203 | if (!dlm_lock_compatible(tmplock->ml.convert_type, type)) | ||
204 | goto switch_queues; | ||
205 | } | ||
206 | |||
207 | /* fall thru to grant */ | ||
208 | |||
209 | grant: | ||
210 | mlog(0, "res %.*s, granting %s lock\n", res->lockname.len, | ||
211 | res->lockname.name, dlm_lock_mode_name(type)); | ||
212 | /* immediately grant the new lock type */ | ||
213 | lock->lksb->status = DLM_NORMAL; | ||
214 | if (lock->ml.node == dlm->node_num) | ||
215 | mlog(0, "doing in-place convert for nonlocal lock\n"); | ||
216 | lock->ml.type = type; | ||
217 | status = DLM_NORMAL; | ||
218 | *call_ast = 1; | ||
219 | goto unlock_exit; | ||
220 | |||
221 | switch_queues: | ||
222 | if (flags & LKM_NOQUEUE) { | ||
223 | mlog(0, "failed to convert NOQUEUE lock %.*s from " | ||
224 | "%d to %d...\n", res->lockname.len, res->lockname.name, | ||
225 | lock->ml.type, type); | ||
226 | status = DLM_NOTQUEUED; | ||
227 | goto unlock_exit; | ||
228 | } | ||
229 | mlog(0, "res %.*s, queueing...\n", res->lockname.len, | ||
230 | res->lockname.name); | ||
231 | |||
232 | lock->ml.convert_type = type; | ||
233 | /* do not alter lock refcount. switching lists. */ | ||
234 | list_del_init(&lock->list); | ||
235 | list_add_tail(&lock->list, &res->converting); | ||
236 | |||
237 | unlock_exit: | ||
238 | spin_unlock(&lock->spinlock); | ||
239 | if (status == DLM_DENIED) { | ||
240 | __dlm_print_one_lock_resource(res); | ||
241 | } | ||
242 | if (status == DLM_NORMAL) | ||
243 | *kick_thread = 1; | ||
244 | return status; | ||
245 | } | ||
246 | |||
247 | void dlm_revert_pending_convert(struct dlm_lock_resource *res, | ||
248 | struct dlm_lock *lock) | ||
249 | { | ||
250 | /* do not alter lock refcount. switching lists. */ | ||
251 | list_del_init(&lock->list); | ||
252 | list_add_tail(&lock->list, &res->granted); | ||
253 | lock->ml.convert_type = LKM_IVMODE; | ||
254 | lock->lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB); | ||
255 | } | ||
256 | |||
257 | /* messages the master site to do lock conversion | ||
258 | * locking: | ||
259 | * caller needs: none | ||
260 | * taken: takes and drops res->spinlock, uses DLM_LOCK_RES_IN_PROGRESS | ||
261 | * held on exit: none | ||
262 | * returns: DLM_NORMAL, DLM_RECOVERING, status from remote node | ||
263 | */ | ||
264 | enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, | ||
265 | struct dlm_lock_resource *res, | ||
266 | struct dlm_lock *lock, int flags, int type) | ||
267 | { | ||
268 | enum dlm_status status; | ||
269 | |||
270 | mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type, | ||
271 | lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS); | ||
272 | |||
273 | spin_lock(&res->spinlock); | ||
274 | if (res->state & DLM_LOCK_RES_RECOVERING) { | ||
275 | mlog(0, "bailing out early since res is RECOVERING " | ||
276 | "on secondary queue\n"); | ||
277 | /* __dlm_print_one_lock_resource(res); */ | ||
278 | status = DLM_RECOVERING; | ||
279 | goto bail; | ||
280 | } | ||
281 | /* will exit this call with spinlock held */ | ||
282 | __dlm_wait_on_lockres(res); | ||
283 | |||
284 | if (lock->ml.convert_type != LKM_IVMODE) { | ||
285 | __dlm_print_one_lock_resource(res); | ||
286 | mlog(ML_ERROR, "converting a remote lock that is already " | ||
287 | "converting! (cookie=%"MLFu64", conv=%d)\n", | ||
288 | lock->ml.cookie, lock->ml.convert_type); | ||
289 | status = DLM_DENIED; | ||
290 | goto bail; | ||
291 | } | ||
292 | res->state |= DLM_LOCK_RES_IN_PROGRESS; | ||
293 | /* move lock to local convert queue */ | ||
294 | /* do not alter lock refcount. switching lists. */ | ||
295 | list_del_init(&lock->list); | ||
296 | list_add_tail(&lock->list, &res->converting); | ||
297 | lock->convert_pending = 1; | ||
298 | lock->ml.convert_type = type; | ||
299 | |||
300 | if (flags & LKM_VALBLK) { | ||
301 | if (lock->ml.type == LKM_EXMODE) { | ||
302 | flags |= LKM_PUT_LVB; | ||
303 | lock->lksb->flags |= DLM_LKSB_PUT_LVB; | ||
304 | } else { | ||
305 | if (lock->ml.convert_type == LKM_NLMODE) | ||
306 | flags &= ~LKM_VALBLK; | ||
307 | else { | ||
308 | flags |= LKM_GET_LVB; | ||
309 | lock->lksb->flags |= DLM_LKSB_GET_LVB; | ||
310 | } | ||
311 | } | ||
312 | } | ||
313 | spin_unlock(&res->spinlock); | ||
314 | |||
315 | /* no locks held here. | ||
316 | * need to wait for a reply as to whether it got queued or not. */ | ||
317 | status = dlm_send_remote_convert_request(dlm, res, lock, flags, type); | ||
318 | |||
319 | spin_lock(&res->spinlock); | ||
320 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; | ||
321 | lock->convert_pending = 0; | ||
322 | /* if it failed, move it back to granted queue */ | ||
323 | if (status != DLM_NORMAL) { | ||
324 | if (status != DLM_NOTQUEUED) | ||
325 | dlm_error(status); | ||
326 | dlm_revert_pending_convert(res, lock); | ||
327 | } | ||
328 | bail: | ||
329 | spin_unlock(&res->spinlock); | ||
330 | |||
331 | /* TODO: should this be a wake_one? */ | ||
332 | /* wake up any IN_PROGRESS waiters */ | ||
333 | wake_up(&res->wq); | ||
334 | |||
335 | return status; | ||
336 | } | ||
337 | |||
338 | /* sends DLM_CONVERT_LOCK_MSG to master site | ||
339 | * locking: | ||
340 | * caller needs: none | ||
341 | * taken: none | ||
342 | * held on exit: none | ||
343 | * returns: DLM_NOLOCKMGR, status from remote node | ||
344 | */ | ||
345 | static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm, | ||
346 | struct dlm_lock_resource *res, | ||
347 | struct dlm_lock *lock, int flags, int type) | ||
348 | { | ||
349 | struct dlm_convert_lock convert; | ||
350 | int tmpret; | ||
351 | enum dlm_status ret; | ||
352 | int status = 0; | ||
353 | struct kvec vec[2]; | ||
354 | size_t veclen = 1; | ||
355 | |||
356 | mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); | ||
357 | |||
358 | memset(&convert, 0, sizeof(struct dlm_convert_lock)); | ||
359 | convert.node_idx = dlm->node_num; | ||
360 | convert.requested_type = type; | ||
361 | convert.cookie = lock->ml.cookie; | ||
362 | convert.namelen = res->lockname.len; | ||
363 | convert.flags = cpu_to_be32(flags); | ||
364 | memcpy(convert.name, res->lockname.name, convert.namelen); | ||
365 | |||
366 | vec[0].iov_len = sizeof(struct dlm_convert_lock); | ||
367 | vec[0].iov_base = &convert; | ||
368 | |||
369 | if (flags & LKM_PUT_LVB) { | ||
370 | /* extra data to send if we are updating lvb */ | ||
371 | vec[1].iov_len = DLM_LVB_LEN; | ||
372 | vec[1].iov_base = lock->lksb->lvb; | ||
373 | veclen++; | ||
374 | } | ||
375 | |||
376 | tmpret = o2net_send_message_vec(DLM_CONVERT_LOCK_MSG, dlm->key, | ||
377 | vec, veclen, res->owner, &status); | ||
378 | if (tmpret >= 0) { | ||
379 | // successfully sent and received | ||
380 | ret = status; // this is already a dlm_status | ||
381 | if (ret == DLM_RECOVERING) { | ||
382 | mlog(0, "node %u returned DLM_RECOVERING from convert " | ||
383 | "message!\n", res->owner); | ||
384 | } else if (ret == DLM_MIGRATING) { | ||
385 | mlog(0, "node %u returned DLM_MIGRATING from convert " | ||
386 | "message!\n", res->owner); | ||
387 | } else if (ret == DLM_FORWARD) { | ||
388 | mlog(0, "node %u returned DLM_FORWARD from convert " | ||
389 | "message!\n", res->owner); | ||
390 | } else if (ret != DLM_NORMAL && ret != DLM_NOTQUEUED) | ||
391 | dlm_error(ret); | ||
392 | } else { | ||
393 | mlog_errno(tmpret); | ||
394 | if (dlm_is_host_down(tmpret)) { | ||
395 | ret = DLM_RECOVERING; | ||
396 | mlog(0, "node %u died so returning DLM_RECOVERING " | ||
397 | "from convert message!\n", res->owner); | ||
398 | } else { | ||
399 | ret = dlm_err_to_dlm_status(tmpret); | ||
400 | } | ||
401 | } | ||
402 | |||
403 | return ret; | ||
404 | } | ||
405 | |||
406 | /* handler for DLM_CONVERT_LOCK_MSG on master site | ||
407 | * locking: | ||
408 | * caller needs: none | ||
409 | * taken: takes and drop res->spinlock | ||
410 | * held on exit: none | ||
411 | * returns: DLM_NORMAL, DLM_IVLOCKID, DLM_BADARGS, | ||
412 | * status from __dlmconvert_master | ||
413 | */ | ||
414 | int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data) | ||
415 | { | ||
416 | struct dlm_ctxt *dlm = data; | ||
417 | struct dlm_convert_lock *cnv = (struct dlm_convert_lock *)msg->buf; | ||
418 | struct dlm_lock_resource *res = NULL; | ||
419 | struct list_head *iter; | ||
420 | struct dlm_lock *lock = NULL; | ||
421 | struct dlm_lockstatus *lksb; | ||
422 | enum dlm_status status = DLM_NORMAL; | ||
423 | u32 flags; | ||
424 | int call_ast = 0, kick_thread = 0; | ||
425 | |||
426 | if (!dlm_grab(dlm)) { | ||
427 | dlm_error(DLM_REJECTED); | ||
428 | return DLM_REJECTED; | ||
429 | } | ||
430 | |||
431 | mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), | ||
432 | "Domain %s not fully joined!\n", dlm->name); | ||
433 | |||
434 | if (cnv->namelen > DLM_LOCKID_NAME_MAX) { | ||
435 | status = DLM_IVBUFLEN; | ||
436 | dlm_error(status); | ||
437 | goto leave; | ||
438 | } | ||
439 | |||
440 | flags = be32_to_cpu(cnv->flags); | ||
441 | |||
442 | if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) == | ||
443 | (LKM_PUT_LVB|LKM_GET_LVB)) { | ||
444 | mlog(ML_ERROR, "both PUT and GET lvb specified\n"); | ||
445 | status = DLM_BADARGS; | ||
446 | goto leave; | ||
447 | } | ||
448 | |||
449 | mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : | ||
450 | (flags & LKM_GET_LVB ? "get lvb" : "none")); | ||
451 | |||
452 | status = DLM_IVLOCKID; | ||
453 | res = dlm_lookup_lockres(dlm, cnv->name, cnv->namelen); | ||
454 | if (!res) { | ||
455 | dlm_error(status); | ||
456 | goto leave; | ||
457 | } | ||
458 | |||
459 | spin_lock(&res->spinlock); | ||
460 | list_for_each(iter, &res->granted) { | ||
461 | lock = list_entry(iter, struct dlm_lock, list); | ||
462 | if (lock->ml.cookie == cnv->cookie && | ||
463 | lock->ml.node == cnv->node_idx) { | ||
464 | dlm_lock_get(lock); | ||
465 | break; | ||
466 | } | ||
467 | lock = NULL; | ||
468 | } | ||
469 | spin_unlock(&res->spinlock); | ||
470 | if (!lock) { | ||
471 | status = DLM_IVLOCKID; | ||
472 | dlm_error(status); | ||
473 | goto leave; | ||
474 | } | ||
475 | |||
476 | /* found the lock */ | ||
477 | lksb = lock->lksb; | ||
478 | |||
479 | /* see if caller needed to get/put lvb */ | ||
480 | if (flags & LKM_PUT_LVB) { | ||
481 | BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); | ||
482 | lksb->flags |= DLM_LKSB_PUT_LVB; | ||
483 | memcpy(&lksb->lvb[0], &cnv->lvb[0], DLM_LVB_LEN); | ||
484 | } else if (flags & LKM_GET_LVB) { | ||
485 | BUG_ON(lksb->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); | ||
486 | lksb->flags |= DLM_LKSB_GET_LVB; | ||
487 | } | ||
488 | |||
489 | spin_lock(&res->spinlock); | ||
490 | status = __dlm_lockres_state_to_status(res); | ||
491 | if (status == DLM_NORMAL) { | ||
492 | __dlm_lockres_reserve_ast(res); | ||
493 | res->state |= DLM_LOCK_RES_IN_PROGRESS; | ||
494 | status = __dlmconvert_master(dlm, res, lock, flags, | ||
495 | cnv->requested_type, | ||
496 | &call_ast, &kick_thread); | ||
497 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; | ||
498 | } | ||
499 | spin_unlock(&res->spinlock); | ||
500 | |||
501 | if (status != DLM_NORMAL) { | ||
502 | if (status != DLM_NOTQUEUED) | ||
503 | dlm_error(status); | ||
504 | lksb->flags &= ~(DLM_LKSB_GET_LVB|DLM_LKSB_PUT_LVB); | ||
505 | } | ||
506 | |||
507 | leave: | ||
508 | if (!lock) | ||
509 | mlog(ML_ERROR, "did not find lock to convert on grant queue! " | ||
510 | "cookie=%"MLFu64"\n", | ||
511 | cnv->cookie); | ||
512 | else | ||
513 | dlm_lock_put(lock); | ||
514 | |||
515 | /* either queue the ast or release it */ | ||
516 | if (call_ast) | ||
517 | dlm_queue_ast(dlm, lock); | ||
518 | else | ||
519 | dlm_lockres_release_ast(dlm, res); | ||
520 | |||
521 | if (kick_thread) | ||
522 | dlm_kick_thread(dlm, res); | ||
523 | |||
524 | if (res) | ||
525 | dlm_lockres_put(res); | ||
526 | |||
527 | dlm_put(dlm); | ||
528 | |||
529 | return status; | ||
530 | } | ||
diff --git a/fs/ocfs2/dlm/dlmconvert.h b/fs/ocfs2/dlm/dlmconvert.h new file mode 100644 index 000000000000..b2e3677df878 --- /dev/null +++ b/fs/ocfs2/dlm/dlmconvert.h | |||
@@ -0,0 +1,35 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmconvert.h | ||
5 | * | ||
6 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public | ||
10 | * License as published by the Free Software Foundation; either | ||
11 | * version 2 of the License, or (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public | ||
19 | * License along with this program; if not, write to the | ||
20 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
21 | * Boston, MA 021110-1307, USA. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #ifndef DLMCONVERT_H | ||
26 | #define DLMCONVERT_H | ||
27 | |||
28 | enum dlm_status dlmconvert_master(struct dlm_ctxt *dlm, | ||
29 | struct dlm_lock_resource *res, | ||
30 | struct dlm_lock *lock, int flags, int type); | ||
31 | enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm, | ||
32 | struct dlm_lock_resource *res, | ||
33 | struct dlm_lock *lock, int flags, int type); | ||
34 | |||
35 | #endif | ||
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c new file mode 100644 index 000000000000..f339fe27975a --- /dev/null +++ b/fs/ocfs2/dlm/dlmdebug.c | |||
@@ -0,0 +1,246 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmdebug.c | ||
5 | * | ||
6 | * debug functionality for the dlm | ||
7 | * | ||
8 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include <linux/types.h> | ||
28 | #include <linux/slab.h> | ||
29 | #include <linux/highmem.h> | ||
30 | #include <linux/utsname.h> | ||
31 | #include <linux/sysctl.h> | ||
32 | #include <linux/spinlock.h> | ||
33 | |||
34 | #include "cluster/heartbeat.h" | ||
35 | #include "cluster/nodemanager.h" | ||
36 | #include "cluster/tcp.h" | ||
37 | |||
38 | #include "dlmapi.h" | ||
39 | #include "dlmcommon.h" | ||
40 | #include "dlmdebug.h" | ||
41 | |||
42 | #include "dlmdomain.h" | ||
43 | #include "dlmdebug.h" | ||
44 | |||
45 | #define MLOG_MASK_PREFIX ML_DLM | ||
46 | #include "cluster/masklog.h" | ||
47 | |||
48 | void dlm_print_one_lock_resource(struct dlm_lock_resource *res) | ||
49 | { | ||
50 | mlog(ML_NOTICE, "lockres: %.*s, owner=%u, state=%u\n", | ||
51 | res->lockname.len, res->lockname.name, | ||
52 | res->owner, res->state); | ||
53 | spin_lock(&res->spinlock); | ||
54 | __dlm_print_one_lock_resource(res); | ||
55 | spin_unlock(&res->spinlock); | ||
56 | } | ||
57 | |||
58 | void __dlm_print_one_lock_resource(struct dlm_lock_resource *res) | ||
59 | { | ||
60 | struct list_head *iter2; | ||
61 | struct dlm_lock *lock; | ||
62 | |||
63 | assert_spin_locked(&res->spinlock); | ||
64 | |||
65 | mlog(ML_NOTICE, "lockres: %.*s, owner=%u, state=%u\n", | ||
66 | res->lockname.len, res->lockname.name, | ||
67 | res->owner, res->state); | ||
68 | mlog(ML_NOTICE, " last used: %lu, on purge list: %s\n", | ||
69 | res->last_used, list_empty(&res->purge) ? "no" : "yes"); | ||
70 | mlog(ML_NOTICE, " granted queue: \n"); | ||
71 | list_for_each(iter2, &res->granted) { | ||
72 | lock = list_entry(iter2, struct dlm_lock, list); | ||
73 | spin_lock(&lock->spinlock); | ||
74 | mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, " | ||
75 | "cookie=%"MLFu64", ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n", | ||
76 | lock->ml.type, lock->ml.convert_type, lock->ml.node, lock->ml.cookie, | ||
77 | list_empty(&lock->ast_list) ? 'y' : 'n', | ||
78 | lock->ast_pending ? 'y' : 'n', | ||
79 | list_empty(&lock->bast_list) ? 'y' : 'n', | ||
80 | lock->bast_pending ? 'y' : 'n'); | ||
81 | spin_unlock(&lock->spinlock); | ||
82 | } | ||
83 | mlog(ML_NOTICE, " converting queue: \n"); | ||
84 | list_for_each(iter2, &res->converting) { | ||
85 | lock = list_entry(iter2, struct dlm_lock, list); | ||
86 | spin_lock(&lock->spinlock); | ||
87 | mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, " | ||
88 | "cookie=%"MLFu64", ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n", | ||
89 | lock->ml.type, lock->ml.convert_type, lock->ml.node, lock->ml.cookie, | ||
90 | list_empty(&lock->ast_list) ? 'y' : 'n', | ||
91 | lock->ast_pending ? 'y' : 'n', | ||
92 | list_empty(&lock->bast_list) ? 'y' : 'n', | ||
93 | lock->bast_pending ? 'y' : 'n'); | ||
94 | spin_unlock(&lock->spinlock); | ||
95 | } | ||
96 | mlog(ML_NOTICE, " blocked queue: \n"); | ||
97 | list_for_each(iter2, &res->blocked) { | ||
98 | lock = list_entry(iter2, struct dlm_lock, list); | ||
99 | spin_lock(&lock->spinlock); | ||
100 | mlog(ML_NOTICE, " type=%d, conv=%d, node=%u, " | ||
101 | "cookie=%"MLFu64", ast=(empty=%c,pend=%c), bast=(empty=%c,pend=%c)\n", | ||
102 | lock->ml.type, lock->ml.convert_type, lock->ml.node, lock->ml.cookie, | ||
103 | list_empty(&lock->ast_list) ? 'y' : 'n', | ||
104 | lock->ast_pending ? 'y' : 'n', | ||
105 | list_empty(&lock->bast_list) ? 'y' : 'n', | ||
106 | lock->bast_pending ? 'y' : 'n'); | ||
107 | spin_unlock(&lock->spinlock); | ||
108 | } | ||
109 | } | ||
110 | |||
111 | void dlm_print_one_lock(struct dlm_lock *lockid) | ||
112 | { | ||
113 | dlm_print_one_lock_resource(lockid->lockres); | ||
114 | } | ||
115 | EXPORT_SYMBOL_GPL(dlm_print_one_lock); | ||
116 | |||
117 | void dlm_dump_lock_resources(struct dlm_ctxt *dlm) | ||
118 | { | ||
119 | struct dlm_lock_resource *res; | ||
120 | struct list_head *iter; | ||
121 | struct list_head *bucket; | ||
122 | int i; | ||
123 | |||
124 | mlog(ML_NOTICE, "struct dlm_ctxt: %s, node=%u, key=%u\n", | ||
125 | dlm->name, dlm->node_num, dlm->key); | ||
126 | if (!dlm || !dlm->name) { | ||
127 | mlog(ML_ERROR, "dlm=%p\n", dlm); | ||
128 | return; | ||
129 | } | ||
130 | |||
131 | spin_lock(&dlm->spinlock); | ||
132 | for (i=0; i<DLM_HASH_SIZE; i++) { | ||
133 | bucket = &(dlm->resources[i]); | ||
134 | list_for_each(iter, bucket) { | ||
135 | res = list_entry(iter, struct dlm_lock_resource, list); | ||
136 | dlm_print_one_lock_resource(res); | ||
137 | } | ||
138 | } | ||
139 | spin_unlock(&dlm->spinlock); | ||
140 | } | ||
141 | |||
142 | static const char *dlm_errnames[] = { | ||
143 | [DLM_NORMAL] = "DLM_NORMAL", | ||
144 | [DLM_GRANTED] = "DLM_GRANTED", | ||
145 | [DLM_DENIED] = "DLM_DENIED", | ||
146 | [DLM_DENIED_NOLOCKS] = "DLM_DENIED_NOLOCKS", | ||
147 | [DLM_WORKING] = "DLM_WORKING", | ||
148 | [DLM_BLOCKED] = "DLM_BLOCKED", | ||
149 | [DLM_BLOCKED_ORPHAN] = "DLM_BLOCKED_ORPHAN", | ||
150 | [DLM_DENIED_GRACE_PERIOD] = "DLM_DENIED_GRACE_PERIOD", | ||
151 | [DLM_SYSERR] = "DLM_SYSERR", | ||
152 | [DLM_NOSUPPORT] = "DLM_NOSUPPORT", | ||
153 | [DLM_CANCELGRANT] = "DLM_CANCELGRANT", | ||
154 | [DLM_IVLOCKID] = "DLM_IVLOCKID", | ||
155 | [DLM_SYNC] = "DLM_SYNC", | ||
156 | [DLM_BADTYPE] = "DLM_BADTYPE", | ||
157 | [DLM_BADRESOURCE] = "DLM_BADRESOURCE", | ||
158 | [DLM_MAXHANDLES] = "DLM_MAXHANDLES", | ||
159 | [DLM_NOCLINFO] = "DLM_NOCLINFO", | ||
160 | [DLM_NOLOCKMGR] = "DLM_NOLOCKMGR", | ||
161 | [DLM_NOPURGED] = "DLM_NOPURGED", | ||
162 | [DLM_BADARGS] = "DLM_BADARGS", | ||
163 | [DLM_VOID] = "DLM_VOID", | ||
164 | [DLM_NOTQUEUED] = "DLM_NOTQUEUED", | ||
165 | [DLM_IVBUFLEN] = "DLM_IVBUFLEN", | ||
166 | [DLM_CVTUNGRANT] = "DLM_CVTUNGRANT", | ||
167 | [DLM_BADPARAM] = "DLM_BADPARAM", | ||
168 | [DLM_VALNOTVALID] = "DLM_VALNOTVALID", | ||
169 | [DLM_REJECTED] = "DLM_REJECTED", | ||
170 | [DLM_ABORT] = "DLM_ABORT", | ||
171 | [DLM_CANCEL] = "DLM_CANCEL", | ||
172 | [DLM_IVRESHANDLE] = "DLM_IVRESHANDLE", | ||
173 | [DLM_DEADLOCK] = "DLM_DEADLOCK", | ||
174 | [DLM_DENIED_NOASTS] = "DLM_DENIED_NOASTS", | ||
175 | [DLM_FORWARD] = "DLM_FORWARD", | ||
176 | [DLM_TIMEOUT] = "DLM_TIMEOUT", | ||
177 | [DLM_IVGROUPID] = "DLM_IVGROUPID", | ||
178 | [DLM_VERS_CONFLICT] = "DLM_VERS_CONFLICT", | ||
179 | [DLM_BAD_DEVICE_PATH] = "DLM_BAD_DEVICE_PATH", | ||
180 | [DLM_NO_DEVICE_PERMISSION] = "DLM_NO_DEVICE_PERMISSION", | ||
181 | [DLM_NO_CONTROL_DEVICE ] = "DLM_NO_CONTROL_DEVICE ", | ||
182 | [DLM_RECOVERING] = "DLM_RECOVERING", | ||
183 | [DLM_MIGRATING] = "DLM_MIGRATING", | ||
184 | [DLM_MAXSTATS] = "DLM_MAXSTATS", | ||
185 | }; | ||
186 | |||
187 | static const char *dlm_errmsgs[] = { | ||
188 | [DLM_NORMAL] = "request in progress", | ||
189 | [DLM_GRANTED] = "request granted", | ||
190 | [DLM_DENIED] = "request denied", | ||
191 | [DLM_DENIED_NOLOCKS] = "request denied, out of system resources", | ||
192 | [DLM_WORKING] = "async request in progress", | ||
193 | [DLM_BLOCKED] = "lock request blocked", | ||
194 | [DLM_BLOCKED_ORPHAN] = "lock request blocked by a orphan lock", | ||
195 | [DLM_DENIED_GRACE_PERIOD] = "topological change in progress", | ||
196 | [DLM_SYSERR] = "system error", | ||
197 | [DLM_NOSUPPORT] = "unsupported", | ||
198 | [DLM_CANCELGRANT] = "can't cancel convert: already granted", | ||
199 | [DLM_IVLOCKID] = "bad lockid", | ||
200 | [DLM_SYNC] = "synchronous request granted", | ||
201 | [DLM_BADTYPE] = "bad resource type", | ||
202 | [DLM_BADRESOURCE] = "bad resource handle", | ||
203 | [DLM_MAXHANDLES] = "no more resource handles", | ||
204 | [DLM_NOCLINFO] = "can't contact cluster manager", | ||
205 | [DLM_NOLOCKMGR] = "can't contact lock manager", | ||
206 | [DLM_NOPURGED] = "can't contact purge daemon", | ||
207 | [DLM_BADARGS] = "bad api args", | ||
208 | [DLM_VOID] = "no status", | ||
209 | [DLM_NOTQUEUED] = "NOQUEUE was specified and request failed", | ||
210 | [DLM_IVBUFLEN] = "invalid resource name length", | ||
211 | [DLM_CVTUNGRANT] = "attempted to convert ungranted lock", | ||
212 | [DLM_BADPARAM] = "invalid lock mode specified", | ||
213 | [DLM_VALNOTVALID] = "value block has been invalidated", | ||
214 | [DLM_REJECTED] = "request rejected, unrecognized client", | ||
215 | [DLM_ABORT] = "blocked lock request cancelled", | ||
216 | [DLM_CANCEL] = "conversion request cancelled", | ||
217 | [DLM_IVRESHANDLE] = "invalid resource handle", | ||
218 | [DLM_DEADLOCK] = "deadlock recovery refused this request", | ||
219 | [DLM_DENIED_NOASTS] = "failed to allocate AST", | ||
220 | [DLM_FORWARD] = "request must wait for primary's response", | ||
221 | [DLM_TIMEOUT] = "timeout value for lock has expired", | ||
222 | [DLM_IVGROUPID] = "invalid group specification", | ||
223 | [DLM_VERS_CONFLICT] = "version conflicts prevent request handling", | ||
224 | [DLM_BAD_DEVICE_PATH] = "Locks device does not exist or path wrong", | ||
225 | [DLM_NO_DEVICE_PERMISSION] = "Client has insufficient perms for device", | ||
226 | [DLM_NO_CONTROL_DEVICE] = "Cannot set options on opened device ", | ||
227 | [DLM_RECOVERING] = "lock resource being recovered", | ||
228 | [DLM_MIGRATING] = "lock resource being migrated", | ||
229 | [DLM_MAXSTATS] = "invalid error number", | ||
230 | }; | ||
231 | |||
232 | const char *dlm_errmsg(enum dlm_status err) | ||
233 | { | ||
234 | if (err >= DLM_MAXSTATS || err < 0) | ||
235 | return dlm_errmsgs[DLM_MAXSTATS]; | ||
236 | return dlm_errmsgs[err]; | ||
237 | } | ||
238 | EXPORT_SYMBOL_GPL(dlm_errmsg); | ||
239 | |||
240 | const char *dlm_errname(enum dlm_status err) | ||
241 | { | ||
242 | if (err >= DLM_MAXSTATS || err < 0) | ||
243 | return dlm_errnames[DLM_MAXSTATS]; | ||
244 | return dlm_errnames[err]; | ||
245 | } | ||
246 | EXPORT_SYMBOL_GPL(dlm_errname); | ||
diff --git a/fs/ocfs2/dlm/dlmdebug.h b/fs/ocfs2/dlm/dlmdebug.h new file mode 100644 index 000000000000..6858510c3ccd --- /dev/null +++ b/fs/ocfs2/dlm/dlmdebug.h | |||
@@ -0,0 +1,30 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmdebug.h | ||
5 | * | ||
6 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public | ||
10 | * License as published by the Free Software Foundation; either | ||
11 | * version 2 of the License, or (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public | ||
19 | * License along with this program; if not, write to the | ||
20 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
21 | * Boston, MA 021110-1307, USA. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #ifndef DLMDEBUG_H | ||
26 | #define DLMDEBUG_H | ||
27 | |||
28 | void dlm_dump_lock_resources(struct dlm_ctxt *dlm); | ||
29 | |||
30 | #endif | ||
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c new file mode 100644 index 000000000000..da3c22045f89 --- /dev/null +++ b/fs/ocfs2/dlm/dlmdomain.c | |||
@@ -0,0 +1,1469 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmdomain.c | ||
5 | * | ||
6 | * defines domain join / leave apis | ||
7 | * | ||
8 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | #include <linux/module.h> | ||
28 | #include <linux/types.h> | ||
29 | #include <linux/slab.h> | ||
30 | #include <linux/highmem.h> | ||
31 | #include <linux/utsname.h> | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/spinlock.h> | ||
34 | #include <linux/delay.h> | ||
35 | #include <linux/err.h> | ||
36 | |||
37 | #include "cluster/heartbeat.h" | ||
38 | #include "cluster/nodemanager.h" | ||
39 | #include "cluster/tcp.h" | ||
40 | |||
41 | #include "dlmapi.h" | ||
42 | #include "dlmcommon.h" | ||
43 | |||
44 | #include "dlmdebug.h" | ||
45 | #include "dlmdomain.h" | ||
46 | |||
47 | #include "dlmver.h" | ||
48 | |||
49 | #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN) | ||
50 | #include "cluster/masklog.h" | ||
51 | |||
52 | /* | ||
53 | * | ||
54 | * spinlock lock ordering: if multiple locks are needed, obey this ordering: | ||
55 | * dlm_domain_lock | ||
56 | * struct dlm_ctxt->spinlock | ||
57 | * struct dlm_lock_resource->spinlock | ||
58 | * struct dlm_ctxt->master_lock | ||
59 | * struct dlm_ctxt->ast_lock | ||
60 | * dlm_master_list_entry->spinlock | ||
61 | * dlm_lock->spinlock | ||
62 | * | ||
63 | */ | ||
64 | |||
65 | spinlock_t dlm_domain_lock = SPIN_LOCK_UNLOCKED; | ||
66 | LIST_HEAD(dlm_domains); | ||
67 | static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events); | ||
68 | |||
69 | #define DLM_DOMAIN_BACKOFF_MS 200 | ||
70 | |||
71 | static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data); | ||
72 | static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data); | ||
73 | static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data); | ||
74 | static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data); | ||
75 | |||
76 | static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm); | ||
77 | |||
78 | void __dlm_unhash_lockres(struct dlm_lock_resource *lockres) | ||
79 | { | ||
80 | list_del_init(&lockres->list); | ||
81 | dlm_lockres_put(lockres); | ||
82 | } | ||
83 | |||
84 | void __dlm_insert_lockres(struct dlm_ctxt *dlm, | ||
85 | struct dlm_lock_resource *res) | ||
86 | { | ||
87 | struct list_head *bucket; | ||
88 | struct qstr *q; | ||
89 | |||
90 | assert_spin_locked(&dlm->spinlock); | ||
91 | |||
92 | q = &res->lockname; | ||
93 | q->hash = full_name_hash(q->name, q->len); | ||
94 | bucket = &(dlm->resources[q->hash & DLM_HASH_MASK]); | ||
95 | |||
96 | /* get a reference for our hashtable */ | ||
97 | dlm_lockres_get(res); | ||
98 | |||
99 | list_add_tail(&res->list, bucket); | ||
100 | } | ||
101 | |||
102 | struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, | ||
103 | const char *name, | ||
104 | unsigned int len) | ||
105 | { | ||
106 | unsigned int hash; | ||
107 | struct list_head *iter; | ||
108 | struct dlm_lock_resource *tmpres=NULL; | ||
109 | struct list_head *bucket; | ||
110 | |||
111 | mlog_entry("%.*s\n", len, name); | ||
112 | |||
113 | assert_spin_locked(&dlm->spinlock); | ||
114 | |||
115 | hash = full_name_hash(name, len); | ||
116 | |||
117 | bucket = &(dlm->resources[hash & DLM_HASH_MASK]); | ||
118 | |||
119 | /* check for pre-existing lock */ | ||
120 | list_for_each(iter, bucket) { | ||
121 | tmpres = list_entry(iter, struct dlm_lock_resource, list); | ||
122 | if (tmpres->lockname.len == len && | ||
123 | memcmp(tmpres->lockname.name, name, len) == 0) { | ||
124 | dlm_lockres_get(tmpres); | ||
125 | break; | ||
126 | } | ||
127 | |||
128 | tmpres = NULL; | ||
129 | } | ||
130 | return tmpres; | ||
131 | } | ||
132 | |||
133 | struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm, | ||
134 | const char *name, | ||
135 | unsigned int len) | ||
136 | { | ||
137 | struct dlm_lock_resource *res; | ||
138 | |||
139 | spin_lock(&dlm->spinlock); | ||
140 | res = __dlm_lookup_lockres(dlm, name, len); | ||
141 | spin_unlock(&dlm->spinlock); | ||
142 | return res; | ||
143 | } | ||
144 | |||
145 | static struct dlm_ctxt * __dlm_lookup_domain_full(const char *domain, int len) | ||
146 | { | ||
147 | struct dlm_ctxt *tmp = NULL; | ||
148 | struct list_head *iter; | ||
149 | |||
150 | assert_spin_locked(&dlm_domain_lock); | ||
151 | |||
152 | /* tmp->name here is always NULL terminated, | ||
153 | * but domain may not be! */ | ||
154 | list_for_each(iter, &dlm_domains) { | ||
155 | tmp = list_entry (iter, struct dlm_ctxt, list); | ||
156 | if (strlen(tmp->name) == len && | ||
157 | memcmp(tmp->name, domain, len)==0) | ||
158 | break; | ||
159 | tmp = NULL; | ||
160 | } | ||
161 | |||
162 | return tmp; | ||
163 | } | ||
164 | |||
165 | /* For null terminated domain strings ONLY */ | ||
166 | static struct dlm_ctxt * __dlm_lookup_domain(const char *domain) | ||
167 | { | ||
168 | assert_spin_locked(&dlm_domain_lock); | ||
169 | |||
170 | return __dlm_lookup_domain_full(domain, strlen(domain)); | ||
171 | } | ||
172 | |||
173 | |||
174 | /* returns true on one of two conditions: | ||
175 | * 1) the domain does not exist | ||
176 | * 2) the domain exists and it's state is "joined" */ | ||
177 | static int dlm_wait_on_domain_helper(const char *domain) | ||
178 | { | ||
179 | int ret = 0; | ||
180 | struct dlm_ctxt *tmp = NULL; | ||
181 | |||
182 | spin_lock(&dlm_domain_lock); | ||
183 | |||
184 | tmp = __dlm_lookup_domain(domain); | ||
185 | if (!tmp) | ||
186 | ret = 1; | ||
187 | else if (tmp->dlm_state == DLM_CTXT_JOINED) | ||
188 | ret = 1; | ||
189 | |||
190 | spin_unlock(&dlm_domain_lock); | ||
191 | return ret; | ||
192 | } | ||
193 | |||
194 | static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm) | ||
195 | { | ||
196 | if (dlm->resources) | ||
197 | free_page((unsigned long) dlm->resources); | ||
198 | |||
199 | if (dlm->name) | ||
200 | kfree(dlm->name); | ||
201 | |||
202 | kfree(dlm); | ||
203 | } | ||
204 | |||
205 | /* A little strange - this function will be called while holding | ||
206 | * dlm_domain_lock and is expected to be holding it on the way out. We | ||
207 | * will however drop and reacquire it multiple times */ | ||
208 | static void dlm_ctxt_release(struct kref *kref) | ||
209 | { | ||
210 | struct dlm_ctxt *dlm; | ||
211 | |||
212 | dlm = container_of(kref, struct dlm_ctxt, dlm_refs); | ||
213 | |||
214 | BUG_ON(dlm->num_joins); | ||
215 | BUG_ON(dlm->dlm_state == DLM_CTXT_JOINED); | ||
216 | |||
217 | /* we may still be in the list if we hit an error during join. */ | ||
218 | list_del_init(&dlm->list); | ||
219 | |||
220 | spin_unlock(&dlm_domain_lock); | ||
221 | |||
222 | mlog(0, "freeing memory from domain %s\n", dlm->name); | ||
223 | |||
224 | wake_up(&dlm_domain_events); | ||
225 | |||
226 | dlm_free_ctxt_mem(dlm); | ||
227 | |||
228 | spin_lock(&dlm_domain_lock); | ||
229 | } | ||
230 | |||
231 | void dlm_put(struct dlm_ctxt *dlm) | ||
232 | { | ||
233 | spin_lock(&dlm_domain_lock); | ||
234 | kref_put(&dlm->dlm_refs, dlm_ctxt_release); | ||
235 | spin_unlock(&dlm_domain_lock); | ||
236 | } | ||
237 | |||
238 | static void __dlm_get(struct dlm_ctxt *dlm) | ||
239 | { | ||
240 | kref_get(&dlm->dlm_refs); | ||
241 | } | ||
242 | |||
243 | /* given a questionable reference to a dlm object, gets a reference if | ||
244 | * it can find it in the list, otherwise returns NULL in which case | ||
245 | * you shouldn't trust your pointer. */ | ||
246 | struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm) | ||
247 | { | ||
248 | struct list_head *iter; | ||
249 | struct dlm_ctxt *target = NULL; | ||
250 | |||
251 | spin_lock(&dlm_domain_lock); | ||
252 | |||
253 | list_for_each(iter, &dlm_domains) { | ||
254 | target = list_entry (iter, struct dlm_ctxt, list); | ||
255 | |||
256 | if (target == dlm) { | ||
257 | __dlm_get(target); | ||
258 | break; | ||
259 | } | ||
260 | |||
261 | target = NULL; | ||
262 | } | ||
263 | |||
264 | spin_unlock(&dlm_domain_lock); | ||
265 | |||
266 | return target; | ||
267 | } | ||
268 | |||
269 | int dlm_domain_fully_joined(struct dlm_ctxt *dlm) | ||
270 | { | ||
271 | int ret; | ||
272 | |||
273 | spin_lock(&dlm_domain_lock); | ||
274 | ret = (dlm->dlm_state == DLM_CTXT_JOINED) || | ||
275 | (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN); | ||
276 | spin_unlock(&dlm_domain_lock); | ||
277 | |||
278 | return ret; | ||
279 | } | ||
280 | |||
281 | static void dlm_complete_dlm_shutdown(struct dlm_ctxt *dlm) | ||
282 | { | ||
283 | dlm_unregister_domain_handlers(dlm); | ||
284 | dlm_complete_thread(dlm); | ||
285 | dlm_complete_recovery_thread(dlm); | ||
286 | |||
287 | /* We've left the domain. Now we can take ourselves out of the | ||
288 | * list and allow the kref stuff to help us free the | ||
289 | * memory. */ | ||
290 | spin_lock(&dlm_domain_lock); | ||
291 | list_del_init(&dlm->list); | ||
292 | spin_unlock(&dlm_domain_lock); | ||
293 | |||
294 | /* Wake up anyone waiting for us to remove this domain */ | ||
295 | wake_up(&dlm_domain_events); | ||
296 | } | ||
297 | |||
298 | static void dlm_migrate_all_locks(struct dlm_ctxt *dlm) | ||
299 | { | ||
300 | int i; | ||
301 | struct dlm_lock_resource *res; | ||
302 | |||
303 | mlog(0, "Migrating locks from domain %s\n", dlm->name); | ||
304 | restart: | ||
305 | spin_lock(&dlm->spinlock); | ||
306 | for (i=0; i<DLM_HASH_SIZE; i++) { | ||
307 | while (!list_empty(&dlm->resources[i])) { | ||
308 | res = list_entry(dlm->resources[i].next, | ||
309 | struct dlm_lock_resource, list); | ||
310 | /* need reference when manually grabbing lockres */ | ||
311 | dlm_lockres_get(res); | ||
312 | /* this should unhash the lockres | ||
313 | * and exit with dlm->spinlock */ | ||
314 | mlog(0, "purging res=%p\n", res); | ||
315 | if (dlm_lockres_is_dirty(dlm, res)) { | ||
316 | /* HACK! this should absolutely go. | ||
317 | * need to figure out why some empty | ||
318 | * lockreses are still marked dirty */ | ||
319 | mlog(ML_ERROR, "lockres %.*s dirty!\n", | ||
320 | res->lockname.len, res->lockname.name); | ||
321 | |||
322 | spin_unlock(&dlm->spinlock); | ||
323 | dlm_kick_thread(dlm, res); | ||
324 | wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res)); | ||
325 | dlm_lockres_put(res); | ||
326 | goto restart; | ||
327 | } | ||
328 | dlm_purge_lockres(dlm, res); | ||
329 | dlm_lockres_put(res); | ||
330 | } | ||
331 | } | ||
332 | spin_unlock(&dlm->spinlock); | ||
333 | |||
334 | mlog(0, "DONE Migrating locks from domain %s\n", dlm->name); | ||
335 | } | ||
336 | |||
337 | static int dlm_no_joining_node(struct dlm_ctxt *dlm) | ||
338 | { | ||
339 | int ret; | ||
340 | |||
341 | spin_lock(&dlm->spinlock); | ||
342 | ret = dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN; | ||
343 | spin_unlock(&dlm->spinlock); | ||
344 | |||
345 | return ret; | ||
346 | } | ||
347 | |||
348 | static void dlm_mark_domain_leaving(struct dlm_ctxt *dlm) | ||
349 | { | ||
350 | /* Yikes, a double spinlock! I need domain_lock for the dlm | ||
351 | * state and the dlm spinlock for join state... Sorry! */ | ||
352 | again: | ||
353 | spin_lock(&dlm_domain_lock); | ||
354 | spin_lock(&dlm->spinlock); | ||
355 | |||
356 | if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
357 | mlog(0, "Node %d is joining, we wait on it.\n", | ||
358 | dlm->joining_node); | ||
359 | spin_unlock(&dlm->spinlock); | ||
360 | spin_unlock(&dlm_domain_lock); | ||
361 | |||
362 | wait_event(dlm->dlm_join_events, dlm_no_joining_node(dlm)); | ||
363 | goto again; | ||
364 | } | ||
365 | |||
366 | dlm->dlm_state = DLM_CTXT_LEAVING; | ||
367 | spin_unlock(&dlm->spinlock); | ||
368 | spin_unlock(&dlm_domain_lock); | ||
369 | } | ||
370 | |||
371 | static void __dlm_print_nodes(struct dlm_ctxt *dlm) | ||
372 | { | ||
373 | int node = -1; | ||
374 | |||
375 | assert_spin_locked(&dlm->spinlock); | ||
376 | |||
377 | mlog(ML_NOTICE, "Nodes in my domain (\"%s\"):\n", dlm->name); | ||
378 | |||
379 | while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, | ||
380 | node + 1)) < O2NM_MAX_NODES) { | ||
381 | mlog(ML_NOTICE, " node %d\n", node); | ||
382 | } | ||
383 | } | ||
384 | |||
385 | static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data) | ||
386 | { | ||
387 | struct dlm_ctxt *dlm = data; | ||
388 | unsigned int node; | ||
389 | struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf; | ||
390 | |||
391 | mlog_entry("%p %u %p", msg, len, data); | ||
392 | |||
393 | if (!dlm_grab(dlm)) | ||
394 | return 0; | ||
395 | |||
396 | node = exit_msg->node_idx; | ||
397 | |||
398 | mlog(0, "Node %u leaves domain %s\n", node, dlm->name); | ||
399 | |||
400 | spin_lock(&dlm->spinlock); | ||
401 | clear_bit(node, dlm->domain_map); | ||
402 | __dlm_print_nodes(dlm); | ||
403 | |||
404 | /* notify anything attached to the heartbeat events */ | ||
405 | dlm_hb_event_notify_attached(dlm, node, 0); | ||
406 | |||
407 | spin_unlock(&dlm->spinlock); | ||
408 | |||
409 | dlm_put(dlm); | ||
410 | |||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | static int dlm_send_one_domain_exit(struct dlm_ctxt *dlm, | ||
415 | unsigned int node) | ||
416 | { | ||
417 | int status; | ||
418 | struct dlm_exit_domain leave_msg; | ||
419 | |||
420 | mlog(0, "Asking node %u if we can leave the domain %s me = %u\n", | ||
421 | node, dlm->name, dlm->node_num); | ||
422 | |||
423 | memset(&leave_msg, 0, sizeof(leave_msg)); | ||
424 | leave_msg.node_idx = dlm->node_num; | ||
425 | |||
426 | status = o2net_send_message(DLM_EXIT_DOMAIN_MSG, dlm->key, | ||
427 | &leave_msg, sizeof(leave_msg), node, | ||
428 | NULL); | ||
429 | |||
430 | mlog(0, "status return %d from o2net_send_message\n", status); | ||
431 | |||
432 | return status; | ||
433 | } | ||
434 | |||
435 | |||
436 | static void dlm_leave_domain(struct dlm_ctxt *dlm) | ||
437 | { | ||
438 | int node, clear_node, status; | ||
439 | |||
440 | /* At this point we've migrated away all our locks and won't | ||
441 | * accept mastership of new ones. The dlm is responsible for | ||
442 | * almost nothing now. We make sure not to confuse any joining | ||
443 | * nodes and then commence shutdown procedure. */ | ||
444 | |||
445 | spin_lock(&dlm->spinlock); | ||
446 | /* Clear ourselves from the domain map */ | ||
447 | clear_bit(dlm->node_num, dlm->domain_map); | ||
448 | while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, | ||
449 | 0)) < O2NM_MAX_NODES) { | ||
450 | /* Drop the dlm spinlock. This is safe wrt the domain_map. | ||
451 | * -nodes cannot be added now as the | ||
452 | * query_join_handlers knows to respond with OK_NO_MAP | ||
453 | * -we catch the right network errors if a node is | ||
454 | * removed from the map while we're sending him the | ||
455 | * exit message. */ | ||
456 | spin_unlock(&dlm->spinlock); | ||
457 | |||
458 | clear_node = 1; | ||
459 | |||
460 | status = dlm_send_one_domain_exit(dlm, node); | ||
461 | if (status < 0 && | ||
462 | status != -ENOPROTOOPT && | ||
463 | status != -ENOTCONN) { | ||
464 | mlog(ML_NOTICE, "Error %d sending domain exit message " | ||
465 | "to node %d\n", status, node); | ||
466 | |||
467 | /* Not sure what to do here but lets sleep for | ||
468 | * a bit in case this was a transient | ||
469 | * error... */ | ||
470 | msleep(DLM_DOMAIN_BACKOFF_MS); | ||
471 | clear_node = 0; | ||
472 | } | ||
473 | |||
474 | spin_lock(&dlm->spinlock); | ||
475 | /* If we're not clearing the node bit then we intend | ||
476 | * to loop back around to try again. */ | ||
477 | if (clear_node) | ||
478 | clear_bit(node, dlm->domain_map); | ||
479 | } | ||
480 | spin_unlock(&dlm->spinlock); | ||
481 | } | ||
482 | |||
483 | int dlm_joined(struct dlm_ctxt *dlm) | ||
484 | { | ||
485 | int ret = 0; | ||
486 | |||
487 | spin_lock(&dlm_domain_lock); | ||
488 | |||
489 | if (dlm->dlm_state == DLM_CTXT_JOINED) | ||
490 | ret = 1; | ||
491 | |||
492 | spin_unlock(&dlm_domain_lock); | ||
493 | |||
494 | return ret; | ||
495 | } | ||
496 | |||
497 | int dlm_shutting_down(struct dlm_ctxt *dlm) | ||
498 | { | ||
499 | int ret = 0; | ||
500 | |||
501 | spin_lock(&dlm_domain_lock); | ||
502 | |||
503 | if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN) | ||
504 | ret = 1; | ||
505 | |||
506 | spin_unlock(&dlm_domain_lock); | ||
507 | |||
508 | return ret; | ||
509 | } | ||
510 | |||
511 | void dlm_unregister_domain(struct dlm_ctxt *dlm) | ||
512 | { | ||
513 | int leave = 0; | ||
514 | |||
515 | spin_lock(&dlm_domain_lock); | ||
516 | BUG_ON(dlm->dlm_state != DLM_CTXT_JOINED); | ||
517 | BUG_ON(!dlm->num_joins); | ||
518 | |||
519 | dlm->num_joins--; | ||
520 | if (!dlm->num_joins) { | ||
521 | /* We mark it "in shutdown" now so new register | ||
522 | * requests wait until we've completely left the | ||
523 | * domain. Don't use DLM_CTXT_LEAVING yet as we still | ||
524 | * want new domain joins to communicate with us at | ||
525 | * least until we've completed migration of our | ||
526 | * resources. */ | ||
527 | dlm->dlm_state = DLM_CTXT_IN_SHUTDOWN; | ||
528 | leave = 1; | ||
529 | } | ||
530 | spin_unlock(&dlm_domain_lock); | ||
531 | |||
532 | if (leave) { | ||
533 | mlog(0, "shutting down domain %s\n", dlm->name); | ||
534 | |||
535 | /* We changed dlm state, notify the thread */ | ||
536 | dlm_kick_thread(dlm, NULL); | ||
537 | |||
538 | dlm_migrate_all_locks(dlm); | ||
539 | dlm_mark_domain_leaving(dlm); | ||
540 | dlm_leave_domain(dlm); | ||
541 | dlm_complete_dlm_shutdown(dlm); | ||
542 | } | ||
543 | dlm_put(dlm); | ||
544 | } | ||
545 | EXPORT_SYMBOL_GPL(dlm_unregister_domain); | ||
546 | |||
547 | static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data) | ||
548 | { | ||
549 | struct dlm_query_join_request *query; | ||
550 | enum dlm_query_join_response response; | ||
551 | struct dlm_ctxt *dlm = NULL; | ||
552 | |||
553 | query = (struct dlm_query_join_request *) msg->buf; | ||
554 | |||
555 | mlog(0, "node %u wants to join domain %s\n", query->node_idx, | ||
556 | query->domain); | ||
557 | |||
558 | /* | ||
559 | * If heartbeat doesn't consider the node live, tell it | ||
560 | * to back off and try again. This gives heartbeat a chance | ||
561 | * to catch up. | ||
562 | */ | ||
563 | if (!o2hb_check_node_heartbeating(query->node_idx)) { | ||
564 | mlog(0, "node %u is not in our live map yet\n", | ||
565 | query->node_idx); | ||
566 | |||
567 | response = JOIN_DISALLOW; | ||
568 | goto respond; | ||
569 | } | ||
570 | |||
571 | response = JOIN_OK_NO_MAP; | ||
572 | |||
573 | spin_lock(&dlm_domain_lock); | ||
574 | dlm = __dlm_lookup_domain_full(query->domain, query->name_len); | ||
575 | /* Once the dlm ctxt is marked as leaving then we don't want | ||
576 | * to be put in someone's domain map. */ | ||
577 | if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) { | ||
578 | spin_lock(&dlm->spinlock); | ||
579 | |||
580 | if (dlm->dlm_state == DLM_CTXT_NEW && | ||
581 | dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
582 | /*If this is a brand new context and we | ||
583 | * haven't started our join process yet, then | ||
584 | * the other node won the race. */ | ||
585 | response = JOIN_OK_NO_MAP; | ||
586 | } else if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
587 | /* Disallow parallel joins. */ | ||
588 | response = JOIN_DISALLOW; | ||
589 | } else { | ||
590 | /* Alright we're fully a part of this domain | ||
591 | * so we keep some state as to who's joining | ||
592 | * and indicate to him that needs to be fixed | ||
593 | * up. */ | ||
594 | response = JOIN_OK; | ||
595 | __dlm_set_joining_node(dlm, query->node_idx); | ||
596 | } | ||
597 | |||
598 | spin_unlock(&dlm->spinlock); | ||
599 | } | ||
600 | spin_unlock(&dlm_domain_lock); | ||
601 | |||
602 | respond: | ||
603 | mlog(0, "We respond with %u\n", response); | ||
604 | |||
605 | return response; | ||
606 | } | ||
607 | |||
608 | static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data) | ||
609 | { | ||
610 | struct dlm_assert_joined *assert; | ||
611 | struct dlm_ctxt *dlm = NULL; | ||
612 | |||
613 | assert = (struct dlm_assert_joined *) msg->buf; | ||
614 | |||
615 | mlog(0, "node %u asserts join on domain %s\n", assert->node_idx, | ||
616 | assert->domain); | ||
617 | |||
618 | spin_lock(&dlm_domain_lock); | ||
619 | dlm = __dlm_lookup_domain_full(assert->domain, assert->name_len); | ||
620 | /* XXX should we consider no dlm ctxt an error? */ | ||
621 | if (dlm) { | ||
622 | spin_lock(&dlm->spinlock); | ||
623 | |||
624 | /* Alright, this node has officially joined our | ||
625 | * domain. Set him in the map and clean up our | ||
626 | * leftover join state. */ | ||
627 | BUG_ON(dlm->joining_node != assert->node_idx); | ||
628 | set_bit(assert->node_idx, dlm->domain_map); | ||
629 | __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); | ||
630 | |||
631 | __dlm_print_nodes(dlm); | ||
632 | |||
633 | /* notify anything attached to the heartbeat events */ | ||
634 | dlm_hb_event_notify_attached(dlm, assert->node_idx, 1); | ||
635 | |||
636 | spin_unlock(&dlm->spinlock); | ||
637 | } | ||
638 | spin_unlock(&dlm_domain_lock); | ||
639 | |||
640 | return 0; | ||
641 | } | ||
642 | |||
643 | static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data) | ||
644 | { | ||
645 | struct dlm_cancel_join *cancel; | ||
646 | struct dlm_ctxt *dlm = NULL; | ||
647 | |||
648 | cancel = (struct dlm_cancel_join *) msg->buf; | ||
649 | |||
650 | mlog(0, "node %u cancels join on domain %s\n", cancel->node_idx, | ||
651 | cancel->domain); | ||
652 | |||
653 | spin_lock(&dlm_domain_lock); | ||
654 | dlm = __dlm_lookup_domain_full(cancel->domain, cancel->name_len); | ||
655 | |||
656 | if (dlm) { | ||
657 | spin_lock(&dlm->spinlock); | ||
658 | |||
659 | /* Yikes, this guy wants to cancel his join. No | ||
660 | * problem, we simply cleanup our join state. */ | ||
661 | BUG_ON(dlm->joining_node != cancel->node_idx); | ||
662 | __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); | ||
663 | |||
664 | spin_unlock(&dlm->spinlock); | ||
665 | } | ||
666 | spin_unlock(&dlm_domain_lock); | ||
667 | |||
668 | return 0; | ||
669 | } | ||
670 | |||
671 | static int dlm_send_one_join_cancel(struct dlm_ctxt *dlm, | ||
672 | unsigned int node) | ||
673 | { | ||
674 | int status; | ||
675 | struct dlm_cancel_join cancel_msg; | ||
676 | |||
677 | memset(&cancel_msg, 0, sizeof(cancel_msg)); | ||
678 | cancel_msg.node_idx = dlm->node_num; | ||
679 | cancel_msg.name_len = strlen(dlm->name); | ||
680 | memcpy(cancel_msg.domain, dlm->name, cancel_msg.name_len); | ||
681 | |||
682 | status = o2net_send_message(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY, | ||
683 | &cancel_msg, sizeof(cancel_msg), node, | ||
684 | NULL); | ||
685 | if (status < 0) { | ||
686 | mlog_errno(status); | ||
687 | goto bail; | ||
688 | } | ||
689 | |||
690 | bail: | ||
691 | return status; | ||
692 | } | ||
693 | |||
694 | /* map_size should be in bytes. */ | ||
695 | static int dlm_send_join_cancels(struct dlm_ctxt *dlm, | ||
696 | unsigned long *node_map, | ||
697 | unsigned int map_size) | ||
698 | { | ||
699 | int status, tmpstat; | ||
700 | unsigned int node; | ||
701 | |||
702 | if (map_size != (BITS_TO_LONGS(O2NM_MAX_NODES) * | ||
703 | sizeof(unsigned long))) { | ||
704 | mlog(ML_ERROR, | ||
705 | "map_size %u != BITS_TO_LONGS(O2NM_MAX_NODES) %u\n", | ||
706 | map_size, BITS_TO_LONGS(O2NM_MAX_NODES)); | ||
707 | return -EINVAL; | ||
708 | } | ||
709 | |||
710 | status = 0; | ||
711 | node = -1; | ||
712 | while ((node = find_next_bit(node_map, O2NM_MAX_NODES, | ||
713 | node + 1)) < O2NM_MAX_NODES) { | ||
714 | if (node == dlm->node_num) | ||
715 | continue; | ||
716 | |||
717 | tmpstat = dlm_send_one_join_cancel(dlm, node); | ||
718 | if (tmpstat) { | ||
719 | mlog(ML_ERROR, "Error return %d cancelling join on " | ||
720 | "node %d\n", tmpstat, node); | ||
721 | if (!status) | ||
722 | status = tmpstat; | ||
723 | } | ||
724 | } | ||
725 | |||
726 | if (status) | ||
727 | mlog_errno(status); | ||
728 | return status; | ||
729 | } | ||
730 | |||
731 | static int dlm_request_join(struct dlm_ctxt *dlm, | ||
732 | int node, | ||
733 | enum dlm_query_join_response *response) | ||
734 | { | ||
735 | int status, retval; | ||
736 | struct dlm_query_join_request join_msg; | ||
737 | |||
738 | mlog(0, "querying node %d\n", node); | ||
739 | |||
740 | memset(&join_msg, 0, sizeof(join_msg)); | ||
741 | join_msg.node_idx = dlm->node_num; | ||
742 | join_msg.name_len = strlen(dlm->name); | ||
743 | memcpy(join_msg.domain, dlm->name, join_msg.name_len); | ||
744 | |||
745 | status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg, | ||
746 | sizeof(join_msg), node, &retval); | ||
747 | if (status < 0 && status != -ENOPROTOOPT) { | ||
748 | mlog_errno(status); | ||
749 | goto bail; | ||
750 | } | ||
751 | |||
752 | /* -ENOPROTOOPT from the net code means the other side isn't | ||
753 | listening for our message type -- that's fine, it means | ||
754 | his dlm isn't up, so we can consider him a 'yes' but not | ||
755 | joined into the domain. */ | ||
756 | if (status == -ENOPROTOOPT) { | ||
757 | status = 0; | ||
758 | *response = JOIN_OK_NO_MAP; | ||
759 | } else if (retval == JOIN_DISALLOW || | ||
760 | retval == JOIN_OK || | ||
761 | retval == JOIN_OK_NO_MAP) { | ||
762 | *response = retval; | ||
763 | } else { | ||
764 | status = -EINVAL; | ||
765 | mlog(ML_ERROR, "invalid response %d from node %u\n", retval, | ||
766 | node); | ||
767 | } | ||
768 | |||
769 | mlog(0, "status %d, node %d response is %d\n", status, node, | ||
770 | *response); | ||
771 | |||
772 | bail: | ||
773 | return status; | ||
774 | } | ||
775 | |||
776 | static int dlm_send_one_join_assert(struct dlm_ctxt *dlm, | ||
777 | unsigned int node) | ||
778 | { | ||
779 | int status; | ||
780 | struct dlm_assert_joined assert_msg; | ||
781 | |||
782 | mlog(0, "Sending join assert to node %u\n", node); | ||
783 | |||
784 | memset(&assert_msg, 0, sizeof(assert_msg)); | ||
785 | assert_msg.node_idx = dlm->node_num; | ||
786 | assert_msg.name_len = strlen(dlm->name); | ||
787 | memcpy(assert_msg.domain, dlm->name, assert_msg.name_len); | ||
788 | |||
789 | status = o2net_send_message(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY, | ||
790 | &assert_msg, sizeof(assert_msg), node, | ||
791 | NULL); | ||
792 | if (status < 0) | ||
793 | mlog_errno(status); | ||
794 | |||
795 | return status; | ||
796 | } | ||
797 | |||
798 | static void dlm_send_join_asserts(struct dlm_ctxt *dlm, | ||
799 | unsigned long *node_map) | ||
800 | { | ||
801 | int status, node, live; | ||
802 | |||
803 | status = 0; | ||
804 | node = -1; | ||
805 | while ((node = find_next_bit(node_map, O2NM_MAX_NODES, | ||
806 | node + 1)) < O2NM_MAX_NODES) { | ||
807 | if (node == dlm->node_num) | ||
808 | continue; | ||
809 | |||
810 | do { | ||
811 | /* It is very important that this message be | ||
812 | * received so we spin until either the node | ||
813 | * has died or it gets the message. */ | ||
814 | status = dlm_send_one_join_assert(dlm, node); | ||
815 | |||
816 | spin_lock(&dlm->spinlock); | ||
817 | live = test_bit(node, dlm->live_nodes_map); | ||
818 | spin_unlock(&dlm->spinlock); | ||
819 | |||
820 | if (status) { | ||
821 | mlog(ML_ERROR, "Error return %d asserting " | ||
822 | "join on node %d\n", status, node); | ||
823 | |||
824 | /* give us some time between errors... */ | ||
825 | if (live) | ||
826 | msleep(DLM_DOMAIN_BACKOFF_MS); | ||
827 | } | ||
828 | } while (status && live); | ||
829 | } | ||
830 | } | ||
831 | |||
832 | struct domain_join_ctxt { | ||
833 | unsigned long live_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
834 | unsigned long yes_resp_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
835 | }; | ||
836 | |||
837 | static int dlm_should_restart_join(struct dlm_ctxt *dlm, | ||
838 | struct domain_join_ctxt *ctxt, | ||
839 | enum dlm_query_join_response response) | ||
840 | { | ||
841 | int ret; | ||
842 | |||
843 | if (response == JOIN_DISALLOW) { | ||
844 | mlog(0, "Latest response of disallow -- should restart\n"); | ||
845 | return 1; | ||
846 | } | ||
847 | |||
848 | spin_lock(&dlm->spinlock); | ||
849 | /* For now, we restart the process if the node maps have | ||
850 | * changed at all */ | ||
851 | ret = memcmp(ctxt->live_map, dlm->live_nodes_map, | ||
852 | sizeof(dlm->live_nodes_map)); | ||
853 | spin_unlock(&dlm->spinlock); | ||
854 | |||
855 | if (ret) | ||
856 | mlog(0, "Node maps changed -- should restart\n"); | ||
857 | |||
858 | return ret; | ||
859 | } | ||
860 | |||
861 | static int dlm_try_to_join_domain(struct dlm_ctxt *dlm) | ||
862 | { | ||
863 | int status = 0, tmpstat, node; | ||
864 | struct domain_join_ctxt *ctxt; | ||
865 | enum dlm_query_join_response response; | ||
866 | |||
867 | mlog_entry("%p", dlm); | ||
868 | |||
869 | ctxt = kcalloc(1, sizeof(*ctxt), GFP_KERNEL); | ||
870 | if (!ctxt) { | ||
871 | status = -ENOMEM; | ||
872 | mlog_errno(status); | ||
873 | goto bail; | ||
874 | } | ||
875 | |||
876 | /* group sem locking should work for us here -- we're already | ||
877 | * registered for heartbeat events so filling this should be | ||
878 | * atomic wrt getting those handlers called. */ | ||
879 | o2hb_fill_node_map(dlm->live_nodes_map, sizeof(dlm->live_nodes_map)); | ||
880 | |||
881 | spin_lock(&dlm->spinlock); | ||
882 | memcpy(ctxt->live_map, dlm->live_nodes_map, sizeof(ctxt->live_map)); | ||
883 | |||
884 | __dlm_set_joining_node(dlm, dlm->node_num); | ||
885 | |||
886 | spin_unlock(&dlm->spinlock); | ||
887 | |||
888 | node = -1; | ||
889 | while ((node = find_next_bit(ctxt->live_map, O2NM_MAX_NODES, | ||
890 | node + 1)) < O2NM_MAX_NODES) { | ||
891 | if (node == dlm->node_num) | ||
892 | continue; | ||
893 | |||
894 | status = dlm_request_join(dlm, node, &response); | ||
895 | if (status < 0) { | ||
896 | mlog_errno(status); | ||
897 | goto bail; | ||
898 | } | ||
899 | |||
900 | /* Ok, either we got a response or the node doesn't have a | ||
901 | * dlm up. */ | ||
902 | if (response == JOIN_OK) | ||
903 | set_bit(node, ctxt->yes_resp_map); | ||
904 | |||
905 | if (dlm_should_restart_join(dlm, ctxt, response)) { | ||
906 | status = -EAGAIN; | ||
907 | goto bail; | ||
908 | } | ||
909 | } | ||
910 | |||
911 | mlog(0, "Yay, done querying nodes!\n"); | ||
912 | |||
913 | /* Yay, everyone agree's we can join the domain. My domain is | ||
914 | * comprised of all nodes who were put in the | ||
915 | * yes_resp_map. Copy that into our domain map and send a join | ||
916 | * assert message to clean up everyone elses state. */ | ||
917 | spin_lock(&dlm->spinlock); | ||
918 | memcpy(dlm->domain_map, ctxt->yes_resp_map, | ||
919 | sizeof(ctxt->yes_resp_map)); | ||
920 | set_bit(dlm->node_num, dlm->domain_map); | ||
921 | spin_unlock(&dlm->spinlock); | ||
922 | |||
923 | dlm_send_join_asserts(dlm, ctxt->yes_resp_map); | ||
924 | |||
925 | /* Joined state *must* be set before the joining node | ||
926 | * information, otherwise the query_join handler may read no | ||
927 | * current joiner but a state of NEW and tell joining nodes | ||
928 | * we're not in the domain. */ | ||
929 | spin_lock(&dlm_domain_lock); | ||
930 | dlm->dlm_state = DLM_CTXT_JOINED; | ||
931 | dlm->num_joins++; | ||
932 | spin_unlock(&dlm_domain_lock); | ||
933 | |||
934 | bail: | ||
935 | spin_lock(&dlm->spinlock); | ||
936 | __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); | ||
937 | if (!status) | ||
938 | __dlm_print_nodes(dlm); | ||
939 | spin_unlock(&dlm->spinlock); | ||
940 | |||
941 | if (ctxt) { | ||
942 | /* Do we need to send a cancel message to any nodes? */ | ||
943 | if (status < 0) { | ||
944 | tmpstat = dlm_send_join_cancels(dlm, | ||
945 | ctxt->yes_resp_map, | ||
946 | sizeof(ctxt->yes_resp_map)); | ||
947 | if (tmpstat < 0) | ||
948 | mlog_errno(tmpstat); | ||
949 | } | ||
950 | kfree(ctxt); | ||
951 | } | ||
952 | |||
953 | mlog(0, "returning %d\n", status); | ||
954 | return status; | ||
955 | } | ||
956 | |||
957 | static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm) | ||
958 | { | ||
959 | o2hb_unregister_callback(&dlm->dlm_hb_up); | ||
960 | o2hb_unregister_callback(&dlm->dlm_hb_down); | ||
961 | o2net_unregister_handler_list(&dlm->dlm_domain_handlers); | ||
962 | } | ||
963 | |||
964 | static int dlm_register_domain_handlers(struct dlm_ctxt *dlm) | ||
965 | { | ||
966 | int status; | ||
967 | |||
968 | mlog(0, "registering handlers.\n"); | ||
969 | |||
970 | o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB, | ||
971 | dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI); | ||
972 | status = o2hb_register_callback(&dlm->dlm_hb_down); | ||
973 | if (status) | ||
974 | goto bail; | ||
975 | |||
976 | o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB, | ||
977 | dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI); | ||
978 | status = o2hb_register_callback(&dlm->dlm_hb_up); | ||
979 | if (status) | ||
980 | goto bail; | ||
981 | |||
982 | status = o2net_register_handler(DLM_MASTER_REQUEST_MSG, dlm->key, | ||
983 | sizeof(struct dlm_master_request), | ||
984 | dlm_master_request_handler, | ||
985 | dlm, &dlm->dlm_domain_handlers); | ||
986 | if (status) | ||
987 | goto bail; | ||
988 | |||
989 | status = o2net_register_handler(DLM_ASSERT_MASTER_MSG, dlm->key, | ||
990 | sizeof(struct dlm_assert_master), | ||
991 | dlm_assert_master_handler, | ||
992 | dlm, &dlm->dlm_domain_handlers); | ||
993 | if (status) | ||
994 | goto bail; | ||
995 | |||
996 | status = o2net_register_handler(DLM_CREATE_LOCK_MSG, dlm->key, | ||
997 | sizeof(struct dlm_create_lock), | ||
998 | dlm_create_lock_handler, | ||
999 | dlm, &dlm->dlm_domain_handlers); | ||
1000 | if (status) | ||
1001 | goto bail; | ||
1002 | |||
1003 | status = o2net_register_handler(DLM_CONVERT_LOCK_MSG, dlm->key, | ||
1004 | DLM_CONVERT_LOCK_MAX_LEN, | ||
1005 | dlm_convert_lock_handler, | ||
1006 | dlm, &dlm->dlm_domain_handlers); | ||
1007 | if (status) | ||
1008 | goto bail; | ||
1009 | |||
1010 | status = o2net_register_handler(DLM_UNLOCK_LOCK_MSG, dlm->key, | ||
1011 | DLM_UNLOCK_LOCK_MAX_LEN, | ||
1012 | dlm_unlock_lock_handler, | ||
1013 | dlm, &dlm->dlm_domain_handlers); | ||
1014 | if (status) | ||
1015 | goto bail; | ||
1016 | |||
1017 | status = o2net_register_handler(DLM_PROXY_AST_MSG, dlm->key, | ||
1018 | DLM_PROXY_AST_MAX_LEN, | ||
1019 | dlm_proxy_ast_handler, | ||
1020 | dlm, &dlm->dlm_domain_handlers); | ||
1021 | if (status) | ||
1022 | goto bail; | ||
1023 | |||
1024 | status = o2net_register_handler(DLM_EXIT_DOMAIN_MSG, dlm->key, | ||
1025 | sizeof(struct dlm_exit_domain), | ||
1026 | dlm_exit_domain_handler, | ||
1027 | dlm, &dlm->dlm_domain_handlers); | ||
1028 | if (status) | ||
1029 | goto bail; | ||
1030 | |||
1031 | status = o2net_register_handler(DLM_MIGRATE_REQUEST_MSG, dlm->key, | ||
1032 | sizeof(struct dlm_migrate_request), | ||
1033 | dlm_migrate_request_handler, | ||
1034 | dlm, &dlm->dlm_domain_handlers); | ||
1035 | if (status) | ||
1036 | goto bail; | ||
1037 | |||
1038 | status = o2net_register_handler(DLM_MIG_LOCKRES_MSG, dlm->key, | ||
1039 | DLM_MIG_LOCKRES_MAX_LEN, | ||
1040 | dlm_mig_lockres_handler, | ||
1041 | dlm, &dlm->dlm_domain_handlers); | ||
1042 | if (status) | ||
1043 | goto bail; | ||
1044 | |||
1045 | status = o2net_register_handler(DLM_MASTER_REQUERY_MSG, dlm->key, | ||
1046 | sizeof(struct dlm_master_requery), | ||
1047 | dlm_master_requery_handler, | ||
1048 | dlm, &dlm->dlm_domain_handlers); | ||
1049 | if (status) | ||
1050 | goto bail; | ||
1051 | |||
1052 | status = o2net_register_handler(DLM_LOCK_REQUEST_MSG, dlm->key, | ||
1053 | sizeof(struct dlm_lock_request), | ||
1054 | dlm_request_all_locks_handler, | ||
1055 | dlm, &dlm->dlm_domain_handlers); | ||
1056 | if (status) | ||
1057 | goto bail; | ||
1058 | |||
1059 | status = o2net_register_handler(DLM_RECO_DATA_DONE_MSG, dlm->key, | ||
1060 | sizeof(struct dlm_reco_data_done), | ||
1061 | dlm_reco_data_done_handler, | ||
1062 | dlm, &dlm->dlm_domain_handlers); | ||
1063 | if (status) | ||
1064 | goto bail; | ||
1065 | |||
1066 | status = o2net_register_handler(DLM_BEGIN_RECO_MSG, dlm->key, | ||
1067 | sizeof(struct dlm_begin_reco), | ||
1068 | dlm_begin_reco_handler, | ||
1069 | dlm, &dlm->dlm_domain_handlers); | ||
1070 | if (status) | ||
1071 | goto bail; | ||
1072 | |||
1073 | status = o2net_register_handler(DLM_FINALIZE_RECO_MSG, dlm->key, | ||
1074 | sizeof(struct dlm_finalize_reco), | ||
1075 | dlm_finalize_reco_handler, | ||
1076 | dlm, &dlm->dlm_domain_handlers); | ||
1077 | if (status) | ||
1078 | goto bail; | ||
1079 | |||
1080 | bail: | ||
1081 | if (status) | ||
1082 | dlm_unregister_domain_handlers(dlm); | ||
1083 | |||
1084 | return status; | ||
1085 | } | ||
1086 | |||
1087 | static int dlm_join_domain(struct dlm_ctxt *dlm) | ||
1088 | { | ||
1089 | int status; | ||
1090 | |||
1091 | BUG_ON(!dlm); | ||
1092 | |||
1093 | mlog(0, "Join domain %s\n", dlm->name); | ||
1094 | |||
1095 | status = dlm_register_domain_handlers(dlm); | ||
1096 | if (status) { | ||
1097 | mlog_errno(status); | ||
1098 | goto bail; | ||
1099 | } | ||
1100 | |||
1101 | status = dlm_launch_thread(dlm); | ||
1102 | if (status < 0) { | ||
1103 | mlog_errno(status); | ||
1104 | goto bail; | ||
1105 | } | ||
1106 | |||
1107 | status = dlm_launch_recovery_thread(dlm); | ||
1108 | if (status < 0) { | ||
1109 | mlog_errno(status); | ||
1110 | goto bail; | ||
1111 | } | ||
1112 | |||
1113 | do { | ||
1114 | unsigned int backoff; | ||
1115 | status = dlm_try_to_join_domain(dlm); | ||
1116 | |||
1117 | /* If we're racing another node to the join, then we | ||
1118 | * need to back off temporarily and let them | ||
1119 | * complete. */ | ||
1120 | if (status == -EAGAIN) { | ||
1121 | if (signal_pending(current)) { | ||
1122 | status = -ERESTARTSYS; | ||
1123 | goto bail; | ||
1124 | } | ||
1125 | |||
1126 | /* | ||
1127 | * <chip> After you! | ||
1128 | * <dale> No, after you! | ||
1129 | * <chip> I insist! | ||
1130 | * <dale> But you first! | ||
1131 | * ... | ||
1132 | */ | ||
1133 | backoff = (unsigned int)(jiffies & 0x3); | ||
1134 | backoff *= DLM_DOMAIN_BACKOFF_MS; | ||
1135 | mlog(0, "backoff %d\n", backoff); | ||
1136 | msleep(backoff); | ||
1137 | } | ||
1138 | } while (status == -EAGAIN); | ||
1139 | |||
1140 | if (status < 0) { | ||
1141 | mlog_errno(status); | ||
1142 | goto bail; | ||
1143 | } | ||
1144 | |||
1145 | status = 0; | ||
1146 | bail: | ||
1147 | wake_up(&dlm_domain_events); | ||
1148 | |||
1149 | if (status) { | ||
1150 | dlm_unregister_domain_handlers(dlm); | ||
1151 | dlm_complete_thread(dlm); | ||
1152 | dlm_complete_recovery_thread(dlm); | ||
1153 | } | ||
1154 | |||
1155 | return status; | ||
1156 | } | ||
1157 | |||
1158 | static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, | ||
1159 | u32 key) | ||
1160 | { | ||
1161 | int i; | ||
1162 | struct dlm_ctxt *dlm = NULL; | ||
1163 | |||
1164 | dlm = kcalloc(1, sizeof(*dlm), GFP_KERNEL); | ||
1165 | if (!dlm) { | ||
1166 | mlog_errno(-ENOMEM); | ||
1167 | goto leave; | ||
1168 | } | ||
1169 | |||
1170 | dlm->name = kmalloc(strlen(domain) + 1, GFP_KERNEL); | ||
1171 | if (dlm->name == NULL) { | ||
1172 | mlog_errno(-ENOMEM); | ||
1173 | kfree(dlm); | ||
1174 | dlm = NULL; | ||
1175 | goto leave; | ||
1176 | } | ||
1177 | |||
1178 | dlm->resources = (struct list_head *) __get_free_page(GFP_KERNEL); | ||
1179 | if (!dlm->resources) { | ||
1180 | mlog_errno(-ENOMEM); | ||
1181 | kfree(dlm->name); | ||
1182 | kfree(dlm); | ||
1183 | dlm = NULL; | ||
1184 | goto leave; | ||
1185 | } | ||
1186 | memset(dlm->resources, 0, PAGE_SIZE); | ||
1187 | |||
1188 | for (i=0; i<DLM_HASH_SIZE; i++) | ||
1189 | INIT_LIST_HEAD(&dlm->resources[i]); | ||
1190 | |||
1191 | strcpy(dlm->name, domain); | ||
1192 | dlm->key = key; | ||
1193 | dlm->node_num = o2nm_this_node(); | ||
1194 | |||
1195 | spin_lock_init(&dlm->spinlock); | ||
1196 | spin_lock_init(&dlm->master_lock); | ||
1197 | spin_lock_init(&dlm->ast_lock); | ||
1198 | INIT_LIST_HEAD(&dlm->list); | ||
1199 | INIT_LIST_HEAD(&dlm->dirty_list); | ||
1200 | INIT_LIST_HEAD(&dlm->reco.resources); | ||
1201 | INIT_LIST_HEAD(&dlm->reco.received); | ||
1202 | INIT_LIST_HEAD(&dlm->reco.node_data); | ||
1203 | INIT_LIST_HEAD(&dlm->purge_list); | ||
1204 | INIT_LIST_HEAD(&dlm->dlm_domain_handlers); | ||
1205 | dlm->reco.state = 0; | ||
1206 | |||
1207 | INIT_LIST_HEAD(&dlm->pending_asts); | ||
1208 | INIT_LIST_HEAD(&dlm->pending_basts); | ||
1209 | |||
1210 | mlog(0, "dlm->recovery_map=%p, &(dlm->recovery_map[0])=%p\n", | ||
1211 | dlm->recovery_map, &(dlm->recovery_map[0])); | ||
1212 | |||
1213 | memset(dlm->recovery_map, 0, sizeof(dlm->recovery_map)); | ||
1214 | memset(dlm->live_nodes_map, 0, sizeof(dlm->live_nodes_map)); | ||
1215 | memset(dlm->domain_map, 0, sizeof(dlm->domain_map)); | ||
1216 | |||
1217 | dlm->dlm_thread_task = NULL; | ||
1218 | dlm->dlm_reco_thread_task = NULL; | ||
1219 | init_waitqueue_head(&dlm->dlm_thread_wq); | ||
1220 | init_waitqueue_head(&dlm->dlm_reco_thread_wq); | ||
1221 | init_waitqueue_head(&dlm->reco.event); | ||
1222 | init_waitqueue_head(&dlm->ast_wq); | ||
1223 | init_waitqueue_head(&dlm->migration_wq); | ||
1224 | INIT_LIST_HEAD(&dlm->master_list); | ||
1225 | INIT_LIST_HEAD(&dlm->mle_hb_events); | ||
1226 | |||
1227 | dlm->joining_node = DLM_LOCK_RES_OWNER_UNKNOWN; | ||
1228 | init_waitqueue_head(&dlm->dlm_join_events); | ||
1229 | |||
1230 | dlm->reco.new_master = O2NM_INVALID_NODE_NUM; | ||
1231 | dlm->reco.dead_node = O2NM_INVALID_NODE_NUM; | ||
1232 | atomic_set(&dlm->local_resources, 0); | ||
1233 | atomic_set(&dlm->remote_resources, 0); | ||
1234 | atomic_set(&dlm->unknown_resources, 0); | ||
1235 | |||
1236 | spin_lock_init(&dlm->work_lock); | ||
1237 | INIT_LIST_HEAD(&dlm->work_list); | ||
1238 | INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work, dlm); | ||
1239 | |||
1240 | kref_init(&dlm->dlm_refs); | ||
1241 | dlm->dlm_state = DLM_CTXT_NEW; | ||
1242 | |||
1243 | INIT_LIST_HEAD(&dlm->dlm_eviction_callbacks); | ||
1244 | |||
1245 | mlog(0, "context init: refcount %u\n", | ||
1246 | atomic_read(&dlm->dlm_refs.refcount)); | ||
1247 | |||
1248 | leave: | ||
1249 | return dlm; | ||
1250 | } | ||
1251 | |||
1252 | /* | ||
1253 | * dlm_register_domain: one-time setup per "domain" | ||
1254 | */ | ||
1255 | struct dlm_ctxt * dlm_register_domain(const char *domain, | ||
1256 | u32 key) | ||
1257 | { | ||
1258 | int ret; | ||
1259 | struct dlm_ctxt *dlm = NULL; | ||
1260 | struct dlm_ctxt *new_ctxt = NULL; | ||
1261 | |||
1262 | if (strlen(domain) > O2NM_MAX_NAME_LEN) { | ||
1263 | ret = -ENAMETOOLONG; | ||
1264 | mlog(ML_ERROR, "domain name length too long\n"); | ||
1265 | goto leave; | ||
1266 | } | ||
1267 | |||
1268 | if (!o2hb_check_local_node_heartbeating()) { | ||
1269 | mlog(ML_ERROR, "the local node has not been configured, or is " | ||
1270 | "not heartbeating\n"); | ||
1271 | ret = -EPROTO; | ||
1272 | goto leave; | ||
1273 | } | ||
1274 | |||
1275 | mlog(0, "register called for domain \"%s\"\n", domain); | ||
1276 | |||
1277 | retry: | ||
1278 | dlm = NULL; | ||
1279 | if (signal_pending(current)) { | ||
1280 | ret = -ERESTARTSYS; | ||
1281 | mlog_errno(ret); | ||
1282 | goto leave; | ||
1283 | } | ||
1284 | |||
1285 | spin_lock(&dlm_domain_lock); | ||
1286 | |||
1287 | dlm = __dlm_lookup_domain(domain); | ||
1288 | if (dlm) { | ||
1289 | if (dlm->dlm_state != DLM_CTXT_JOINED) { | ||
1290 | spin_unlock(&dlm_domain_lock); | ||
1291 | |||
1292 | mlog(0, "This ctxt is not joined yet!\n"); | ||
1293 | wait_event_interruptible(dlm_domain_events, | ||
1294 | dlm_wait_on_domain_helper( | ||
1295 | domain)); | ||
1296 | goto retry; | ||
1297 | } | ||
1298 | |||
1299 | __dlm_get(dlm); | ||
1300 | dlm->num_joins++; | ||
1301 | |||
1302 | spin_unlock(&dlm_domain_lock); | ||
1303 | |||
1304 | ret = 0; | ||
1305 | goto leave; | ||
1306 | } | ||
1307 | |||
1308 | /* doesn't exist */ | ||
1309 | if (!new_ctxt) { | ||
1310 | spin_unlock(&dlm_domain_lock); | ||
1311 | |||
1312 | new_ctxt = dlm_alloc_ctxt(domain, key); | ||
1313 | if (new_ctxt) | ||
1314 | goto retry; | ||
1315 | |||
1316 | ret = -ENOMEM; | ||
1317 | mlog_errno(ret); | ||
1318 | goto leave; | ||
1319 | } | ||
1320 | |||
1321 | /* a little variable switch-a-roo here... */ | ||
1322 | dlm = new_ctxt; | ||
1323 | new_ctxt = NULL; | ||
1324 | |||
1325 | /* add the new domain */ | ||
1326 | list_add_tail(&dlm->list, &dlm_domains); | ||
1327 | spin_unlock(&dlm_domain_lock); | ||
1328 | |||
1329 | ret = dlm_join_domain(dlm); | ||
1330 | if (ret) { | ||
1331 | mlog_errno(ret); | ||
1332 | dlm_put(dlm); | ||
1333 | goto leave; | ||
1334 | } | ||
1335 | |||
1336 | ret = 0; | ||
1337 | leave: | ||
1338 | if (new_ctxt) | ||
1339 | dlm_free_ctxt_mem(new_ctxt); | ||
1340 | |||
1341 | if (ret < 0) | ||
1342 | dlm = ERR_PTR(ret); | ||
1343 | |||
1344 | return dlm; | ||
1345 | } | ||
1346 | EXPORT_SYMBOL_GPL(dlm_register_domain); | ||
1347 | |||
1348 | static LIST_HEAD(dlm_join_handlers); | ||
1349 | |||
1350 | static void dlm_unregister_net_handlers(void) | ||
1351 | { | ||
1352 | o2net_unregister_handler_list(&dlm_join_handlers); | ||
1353 | } | ||
1354 | |||
1355 | static int dlm_register_net_handlers(void) | ||
1356 | { | ||
1357 | int status = 0; | ||
1358 | |||
1359 | status = o2net_register_handler(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, | ||
1360 | sizeof(struct dlm_query_join_request), | ||
1361 | dlm_query_join_handler, | ||
1362 | NULL, &dlm_join_handlers); | ||
1363 | if (status) | ||
1364 | goto bail; | ||
1365 | |||
1366 | status = o2net_register_handler(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY, | ||
1367 | sizeof(struct dlm_assert_joined), | ||
1368 | dlm_assert_joined_handler, | ||
1369 | NULL, &dlm_join_handlers); | ||
1370 | if (status) | ||
1371 | goto bail; | ||
1372 | |||
1373 | status = o2net_register_handler(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY, | ||
1374 | sizeof(struct dlm_cancel_join), | ||
1375 | dlm_cancel_join_handler, | ||
1376 | NULL, &dlm_join_handlers); | ||
1377 | |||
1378 | bail: | ||
1379 | if (status < 0) | ||
1380 | dlm_unregister_net_handlers(); | ||
1381 | |||
1382 | return status; | ||
1383 | } | ||
1384 | |||
1385 | /* Domain eviction callback handling. | ||
1386 | * | ||
1387 | * The file system requires notification of node death *before* the | ||
1388 | * dlm completes it's recovery work, otherwise it may be able to | ||
1389 | * acquire locks on resources requiring recovery. Since the dlm can | ||
1390 | * evict a node from it's domain *before* heartbeat fires, a similar | ||
1391 | * mechanism is required. */ | ||
1392 | |||
1393 | /* Eviction is not expected to happen often, so a per-domain lock is | ||
1394 | * not necessary. Eviction callbacks are allowed to sleep for short | ||
1395 | * periods of time. */ | ||
1396 | static DECLARE_RWSEM(dlm_callback_sem); | ||
1397 | |||
1398 | void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm, | ||
1399 | int node_num) | ||
1400 | { | ||
1401 | struct list_head *iter; | ||
1402 | struct dlm_eviction_cb *cb; | ||
1403 | |||
1404 | down_read(&dlm_callback_sem); | ||
1405 | list_for_each(iter, &dlm->dlm_eviction_callbacks) { | ||
1406 | cb = list_entry(iter, struct dlm_eviction_cb, ec_item); | ||
1407 | |||
1408 | cb->ec_func(node_num, cb->ec_data); | ||
1409 | } | ||
1410 | up_read(&dlm_callback_sem); | ||
1411 | } | ||
1412 | |||
1413 | void dlm_setup_eviction_cb(struct dlm_eviction_cb *cb, | ||
1414 | dlm_eviction_func *f, | ||
1415 | void *data) | ||
1416 | { | ||
1417 | INIT_LIST_HEAD(&cb->ec_item); | ||
1418 | cb->ec_func = f; | ||
1419 | cb->ec_data = data; | ||
1420 | } | ||
1421 | EXPORT_SYMBOL_GPL(dlm_setup_eviction_cb); | ||
1422 | |||
1423 | void dlm_register_eviction_cb(struct dlm_ctxt *dlm, | ||
1424 | struct dlm_eviction_cb *cb) | ||
1425 | { | ||
1426 | down_write(&dlm_callback_sem); | ||
1427 | list_add_tail(&cb->ec_item, &dlm->dlm_eviction_callbacks); | ||
1428 | up_write(&dlm_callback_sem); | ||
1429 | } | ||
1430 | EXPORT_SYMBOL_GPL(dlm_register_eviction_cb); | ||
1431 | |||
1432 | void dlm_unregister_eviction_cb(struct dlm_eviction_cb *cb) | ||
1433 | { | ||
1434 | down_write(&dlm_callback_sem); | ||
1435 | list_del_init(&cb->ec_item); | ||
1436 | up_write(&dlm_callback_sem); | ||
1437 | } | ||
1438 | EXPORT_SYMBOL_GPL(dlm_unregister_eviction_cb); | ||
1439 | |||
1440 | static int __init dlm_init(void) | ||
1441 | { | ||
1442 | int status; | ||
1443 | |||
1444 | dlm_print_version(); | ||
1445 | |||
1446 | status = dlm_init_mle_cache(); | ||
1447 | if (status) | ||
1448 | return -1; | ||
1449 | |||
1450 | status = dlm_register_net_handlers(); | ||
1451 | if (status) { | ||
1452 | dlm_destroy_mle_cache(); | ||
1453 | return -1; | ||
1454 | } | ||
1455 | |||
1456 | return 0; | ||
1457 | } | ||
1458 | |||
1459 | static void __exit dlm_exit (void) | ||
1460 | { | ||
1461 | dlm_unregister_net_handlers(); | ||
1462 | dlm_destroy_mle_cache(); | ||
1463 | } | ||
1464 | |||
1465 | MODULE_AUTHOR("Oracle"); | ||
1466 | MODULE_LICENSE("GPL"); | ||
1467 | |||
1468 | module_init(dlm_init); | ||
1469 | module_exit(dlm_exit); | ||
diff --git a/fs/ocfs2/dlm/dlmdomain.h b/fs/ocfs2/dlm/dlmdomain.h new file mode 100644 index 000000000000..2f7f60bfeb3b --- /dev/null +++ b/fs/ocfs2/dlm/dlmdomain.h | |||
@@ -0,0 +1,36 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmdomain.h | ||
5 | * | ||
6 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public | ||
10 | * License as published by the Free Software Foundation; either | ||
11 | * version 2 of the License, or (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public | ||
19 | * License along with this program; if not, write to the | ||
20 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
21 | * Boston, MA 021110-1307, USA. | ||
22 | * | ||
23 | */ | ||
24 | |||
25 | #ifndef DLMDOMAIN_H | ||
26 | #define DLMDOMAIN_H | ||
27 | |||
28 | extern spinlock_t dlm_domain_lock; | ||
29 | extern struct list_head dlm_domains; | ||
30 | |||
31 | int dlm_joined(struct dlm_ctxt *dlm); | ||
32 | int dlm_shutting_down(struct dlm_ctxt *dlm); | ||
33 | void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm, | ||
34 | int node_num); | ||
35 | |||
36 | #endif | ||
diff --git a/fs/ocfs2/dlm/dlmfs.c b/fs/ocfs2/dlm/dlmfs.c new file mode 100644 index 000000000000..dd2d24dc25e0 --- /dev/null +++ b/fs/ocfs2/dlm/dlmfs.c | |||
@@ -0,0 +1,640 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmfs.c | ||
5 | * | ||
6 | * Code which implements the kernel side of a minimal userspace | ||
7 | * interface to our DLM. This file handles the virtual file system | ||
8 | * used for communication with userspace. Credit should go to ramfs, | ||
9 | * which was a template for the fs side of this module. | ||
10 | * | ||
11 | * Copyright (C) 2003, 2004 Oracle. All rights reserved. | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or | ||
14 | * modify it under the terms of the GNU General Public | ||
15 | * License as published by the Free Software Foundation; either | ||
16 | * version 2 of the License, or (at your option) any later version. | ||
17 | * | ||
18 | * This program is distributed in the hope that it will be useful, | ||
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
21 | * General Public License for more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public | ||
24 | * License along with this program; if not, write to the | ||
25 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
26 | * Boston, MA 021110-1307, USA. | ||
27 | */ | ||
28 | |||
29 | /* Simple VFS hooks based on: */ | ||
30 | /* | ||
31 | * Resizable simple ram filesystem for Linux. | ||
32 | * | ||
33 | * Copyright (C) 2000 Linus Torvalds. | ||
34 | * 2000 Transmeta Corp. | ||
35 | */ | ||
36 | |||
37 | #include <linux/module.h> | ||
38 | #include <linux/fs.h> | ||
39 | #include <linux/pagemap.h> | ||
40 | #include <linux/types.h> | ||
41 | #include <linux/slab.h> | ||
42 | #include <linux/highmem.h> | ||
43 | #include <linux/init.h> | ||
44 | #include <linux/string.h> | ||
45 | #include <linux/smp_lock.h> | ||
46 | #include <linux/backing-dev.h> | ||
47 | |||
48 | #include <asm/uaccess.h> | ||
49 | |||
50 | |||
51 | #include "cluster/nodemanager.h" | ||
52 | #include "cluster/heartbeat.h" | ||
53 | #include "cluster/tcp.h" | ||
54 | |||
55 | #include "dlmapi.h" | ||
56 | |||
57 | #include "userdlm.h" | ||
58 | |||
59 | #include "dlmfsver.h" | ||
60 | |||
61 | #define MLOG_MASK_PREFIX ML_DLMFS | ||
62 | #include "cluster/masklog.h" | ||
63 | |||
64 | static struct super_operations dlmfs_ops; | ||
65 | static struct file_operations dlmfs_file_operations; | ||
66 | static struct inode_operations dlmfs_dir_inode_operations; | ||
67 | static struct inode_operations dlmfs_root_inode_operations; | ||
68 | static struct inode_operations dlmfs_file_inode_operations; | ||
69 | static kmem_cache_t *dlmfs_inode_cache; | ||
70 | |||
71 | struct workqueue_struct *user_dlm_worker; | ||
72 | |||
73 | /* | ||
74 | * decodes a set of open flags into a valid lock level and a set of flags. | ||
75 | * returns < 0 if we have invalid flags | ||
76 | * flags which mean something to us: | ||
77 | * O_RDONLY -> PRMODE level | ||
78 | * O_WRONLY -> EXMODE level | ||
79 | * | ||
80 | * O_NONBLOCK -> LKM_NOQUEUE | ||
81 | */ | ||
82 | static int dlmfs_decode_open_flags(int open_flags, | ||
83 | int *level, | ||
84 | int *flags) | ||
85 | { | ||
86 | if (open_flags & (O_WRONLY|O_RDWR)) | ||
87 | *level = LKM_EXMODE; | ||
88 | else | ||
89 | *level = LKM_PRMODE; | ||
90 | |||
91 | *flags = 0; | ||
92 | if (open_flags & O_NONBLOCK) | ||
93 | *flags |= LKM_NOQUEUE; | ||
94 | |||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | static int dlmfs_file_open(struct inode *inode, | ||
99 | struct file *file) | ||
100 | { | ||
101 | int status, level, flags; | ||
102 | struct dlmfs_filp_private *fp = NULL; | ||
103 | struct dlmfs_inode_private *ip; | ||
104 | |||
105 | if (S_ISDIR(inode->i_mode)) | ||
106 | BUG(); | ||
107 | |||
108 | mlog(0, "open called on inode %lu, flags 0x%x\n", inode->i_ino, | ||
109 | file->f_flags); | ||
110 | |||
111 | status = dlmfs_decode_open_flags(file->f_flags, &level, &flags); | ||
112 | if (status < 0) | ||
113 | goto bail; | ||
114 | |||
115 | /* We don't want to honor O_APPEND at read/write time as it | ||
116 | * doesn't make sense for LVB writes. */ | ||
117 | file->f_flags &= ~O_APPEND; | ||
118 | |||
119 | fp = kmalloc(sizeof(*fp), GFP_KERNEL); | ||
120 | if (!fp) { | ||
121 | status = -ENOMEM; | ||
122 | goto bail; | ||
123 | } | ||
124 | fp->fp_lock_level = level; | ||
125 | |||
126 | ip = DLMFS_I(inode); | ||
127 | |||
128 | status = user_dlm_cluster_lock(&ip->ip_lockres, level, flags); | ||
129 | if (status < 0) { | ||
130 | /* this is a strange error to return here but I want | ||
131 | * to be able userspace to be able to distinguish a | ||
132 | * valid lock request from one that simply couldn't be | ||
133 | * granted. */ | ||
134 | if (flags & LKM_NOQUEUE && status == -EAGAIN) | ||
135 | status = -ETXTBSY; | ||
136 | kfree(fp); | ||
137 | goto bail; | ||
138 | } | ||
139 | |||
140 | file->private_data = fp; | ||
141 | bail: | ||
142 | return status; | ||
143 | } | ||
144 | |||
145 | static int dlmfs_file_release(struct inode *inode, | ||
146 | struct file *file) | ||
147 | { | ||
148 | int level, status; | ||
149 | struct dlmfs_inode_private *ip = DLMFS_I(inode); | ||
150 | struct dlmfs_filp_private *fp = | ||
151 | (struct dlmfs_filp_private *) file->private_data; | ||
152 | |||
153 | if (S_ISDIR(inode->i_mode)) | ||
154 | BUG(); | ||
155 | |||
156 | mlog(0, "close called on inode %lu\n", inode->i_ino); | ||
157 | |||
158 | status = 0; | ||
159 | if (fp) { | ||
160 | level = fp->fp_lock_level; | ||
161 | if (level != LKM_IVMODE) | ||
162 | user_dlm_cluster_unlock(&ip->ip_lockres, level); | ||
163 | |||
164 | kfree(fp); | ||
165 | file->private_data = NULL; | ||
166 | } | ||
167 | |||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | static ssize_t dlmfs_file_read(struct file *filp, | ||
172 | char __user *buf, | ||
173 | size_t count, | ||
174 | loff_t *ppos) | ||
175 | { | ||
176 | int bytes_left; | ||
177 | ssize_t readlen; | ||
178 | char *lvb_buf; | ||
179 | struct inode *inode = filp->f_dentry->d_inode; | ||
180 | |||
181 | mlog(0, "inode %lu, count = %zu, *ppos = %llu\n", | ||
182 | inode->i_ino, count, *ppos); | ||
183 | |||
184 | if (*ppos >= i_size_read(inode)) | ||
185 | return 0; | ||
186 | |||
187 | if (!count) | ||
188 | return 0; | ||
189 | |||
190 | if (!access_ok(VERIFY_WRITE, buf, count)) | ||
191 | return -EFAULT; | ||
192 | |||
193 | /* don't read past the lvb */ | ||
194 | if ((count + *ppos) > i_size_read(inode)) | ||
195 | readlen = i_size_read(inode) - *ppos; | ||
196 | else | ||
197 | readlen = count - *ppos; | ||
198 | |||
199 | lvb_buf = kmalloc(readlen, GFP_KERNEL); | ||
200 | if (!lvb_buf) | ||
201 | return -ENOMEM; | ||
202 | |||
203 | user_dlm_read_lvb(inode, lvb_buf, readlen); | ||
204 | bytes_left = __copy_to_user(buf, lvb_buf, readlen); | ||
205 | readlen -= bytes_left; | ||
206 | |||
207 | kfree(lvb_buf); | ||
208 | |||
209 | *ppos = *ppos + readlen; | ||
210 | |||
211 | mlog(0, "read %zd bytes\n", readlen); | ||
212 | return readlen; | ||
213 | } | ||
214 | |||
215 | static ssize_t dlmfs_file_write(struct file *filp, | ||
216 | const char __user *buf, | ||
217 | size_t count, | ||
218 | loff_t *ppos) | ||
219 | { | ||
220 | int bytes_left; | ||
221 | ssize_t writelen; | ||
222 | char *lvb_buf; | ||
223 | struct inode *inode = filp->f_dentry->d_inode; | ||
224 | |||
225 | mlog(0, "inode %lu, count = %zu, *ppos = %llu\n", | ||
226 | inode->i_ino, count, *ppos); | ||
227 | |||
228 | if (*ppos >= i_size_read(inode)) | ||
229 | return -ENOSPC; | ||
230 | |||
231 | if (!count) | ||
232 | return 0; | ||
233 | |||
234 | if (!access_ok(VERIFY_READ, buf, count)) | ||
235 | return -EFAULT; | ||
236 | |||
237 | /* don't write past the lvb */ | ||
238 | if ((count + *ppos) > i_size_read(inode)) | ||
239 | writelen = i_size_read(inode) - *ppos; | ||
240 | else | ||
241 | writelen = count - *ppos; | ||
242 | |||
243 | lvb_buf = kmalloc(writelen, GFP_KERNEL); | ||
244 | if (!lvb_buf) | ||
245 | return -ENOMEM; | ||
246 | |||
247 | bytes_left = copy_from_user(lvb_buf, buf, writelen); | ||
248 | writelen -= bytes_left; | ||
249 | if (writelen) | ||
250 | user_dlm_write_lvb(inode, lvb_buf, writelen); | ||
251 | |||
252 | kfree(lvb_buf); | ||
253 | |||
254 | *ppos = *ppos + writelen; | ||
255 | mlog(0, "wrote %zd bytes\n", writelen); | ||
256 | return writelen; | ||
257 | } | ||
258 | |||
259 | static void dlmfs_init_once(void *foo, | ||
260 | kmem_cache_t *cachep, | ||
261 | unsigned long flags) | ||
262 | { | ||
263 | struct dlmfs_inode_private *ip = | ||
264 | (struct dlmfs_inode_private *) foo; | ||
265 | |||
266 | if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == | ||
267 | SLAB_CTOR_CONSTRUCTOR) { | ||
268 | ip->ip_dlm = NULL; | ||
269 | ip->ip_parent = NULL; | ||
270 | |||
271 | inode_init_once(&ip->ip_vfs_inode); | ||
272 | } | ||
273 | } | ||
274 | |||
275 | static struct inode *dlmfs_alloc_inode(struct super_block *sb) | ||
276 | { | ||
277 | struct dlmfs_inode_private *ip; | ||
278 | |||
279 | ip = kmem_cache_alloc(dlmfs_inode_cache, SLAB_NOFS); | ||
280 | if (!ip) | ||
281 | return NULL; | ||
282 | |||
283 | return &ip->ip_vfs_inode; | ||
284 | } | ||
285 | |||
286 | static void dlmfs_destroy_inode(struct inode *inode) | ||
287 | { | ||
288 | kmem_cache_free(dlmfs_inode_cache, DLMFS_I(inode)); | ||
289 | } | ||
290 | |||
291 | static void dlmfs_clear_inode(struct inode *inode) | ||
292 | { | ||
293 | int status; | ||
294 | struct dlmfs_inode_private *ip; | ||
295 | |||
296 | if (!inode) | ||
297 | return; | ||
298 | |||
299 | mlog(0, "inode %lu\n", inode->i_ino); | ||
300 | |||
301 | ip = DLMFS_I(inode); | ||
302 | |||
303 | if (S_ISREG(inode->i_mode)) { | ||
304 | status = user_dlm_destroy_lock(&ip->ip_lockres); | ||
305 | if (status < 0) | ||
306 | mlog_errno(status); | ||
307 | iput(ip->ip_parent); | ||
308 | goto clear_fields; | ||
309 | } | ||
310 | |||
311 | mlog(0, "we're a directory, ip->ip_dlm = 0x%p\n", ip->ip_dlm); | ||
312 | /* we must be a directory. If required, lets unregister the | ||
313 | * dlm context now. */ | ||
314 | if (ip->ip_dlm) | ||
315 | user_dlm_unregister_context(ip->ip_dlm); | ||
316 | clear_fields: | ||
317 | ip->ip_parent = NULL; | ||
318 | ip->ip_dlm = NULL; | ||
319 | } | ||
320 | |||
321 | static struct backing_dev_info dlmfs_backing_dev_info = { | ||
322 | .ra_pages = 0, /* No readahead */ | ||
323 | .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, | ||
324 | }; | ||
325 | |||
326 | static struct inode *dlmfs_get_root_inode(struct super_block *sb) | ||
327 | { | ||
328 | struct inode *inode = new_inode(sb); | ||
329 | int mode = S_IFDIR | 0755; | ||
330 | struct dlmfs_inode_private *ip; | ||
331 | |||
332 | if (inode) { | ||
333 | ip = DLMFS_I(inode); | ||
334 | |||
335 | inode->i_mode = mode; | ||
336 | inode->i_uid = current->fsuid; | ||
337 | inode->i_gid = current->fsgid; | ||
338 | inode->i_blksize = PAGE_CACHE_SIZE; | ||
339 | inode->i_blocks = 0; | ||
340 | inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info; | ||
341 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | ||
342 | inode->i_nlink++; | ||
343 | |||
344 | inode->i_fop = &simple_dir_operations; | ||
345 | inode->i_op = &dlmfs_root_inode_operations; | ||
346 | } | ||
347 | |||
348 | return inode; | ||
349 | } | ||
350 | |||
351 | static struct inode *dlmfs_get_inode(struct inode *parent, | ||
352 | struct dentry *dentry, | ||
353 | int mode) | ||
354 | { | ||
355 | struct super_block *sb = parent->i_sb; | ||
356 | struct inode * inode = new_inode(sb); | ||
357 | struct dlmfs_inode_private *ip; | ||
358 | |||
359 | if (!inode) | ||
360 | return NULL; | ||
361 | |||
362 | inode->i_mode = mode; | ||
363 | inode->i_uid = current->fsuid; | ||
364 | inode->i_gid = current->fsgid; | ||
365 | inode->i_blksize = PAGE_CACHE_SIZE; | ||
366 | inode->i_blocks = 0; | ||
367 | inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info; | ||
368 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | ||
369 | |||
370 | ip = DLMFS_I(inode); | ||
371 | ip->ip_dlm = DLMFS_I(parent)->ip_dlm; | ||
372 | |||
373 | switch (mode & S_IFMT) { | ||
374 | default: | ||
375 | /* for now we don't support anything other than | ||
376 | * directories and regular files. */ | ||
377 | BUG(); | ||
378 | break; | ||
379 | case S_IFREG: | ||
380 | inode->i_op = &dlmfs_file_inode_operations; | ||
381 | inode->i_fop = &dlmfs_file_operations; | ||
382 | |||
383 | i_size_write(inode, DLM_LVB_LEN); | ||
384 | |||
385 | user_dlm_lock_res_init(&ip->ip_lockres, dentry); | ||
386 | |||
387 | /* released at clear_inode time, this insures that we | ||
388 | * get to drop the dlm reference on each lock *before* | ||
389 | * we call the unregister code for releasing parent | ||
390 | * directories. */ | ||
391 | ip->ip_parent = igrab(parent); | ||
392 | BUG_ON(!ip->ip_parent); | ||
393 | break; | ||
394 | case S_IFDIR: | ||
395 | inode->i_op = &dlmfs_dir_inode_operations; | ||
396 | inode->i_fop = &simple_dir_operations; | ||
397 | |||
398 | /* directory inodes start off with i_nlink == | ||
399 | * 2 (for "." entry) */ | ||
400 | inode->i_nlink++; | ||
401 | break; | ||
402 | } | ||
403 | |||
404 | if (parent->i_mode & S_ISGID) { | ||
405 | inode->i_gid = parent->i_gid; | ||
406 | if (S_ISDIR(mode)) | ||
407 | inode->i_mode |= S_ISGID; | ||
408 | } | ||
409 | |||
410 | return inode; | ||
411 | } | ||
412 | |||
413 | /* | ||
414 | * File creation. Allocate an inode, and we're done.. | ||
415 | */ | ||
416 | /* SMP-safe */ | ||
417 | static int dlmfs_mkdir(struct inode * dir, | ||
418 | struct dentry * dentry, | ||
419 | int mode) | ||
420 | { | ||
421 | int status; | ||
422 | struct inode *inode = NULL; | ||
423 | struct qstr *domain = &dentry->d_name; | ||
424 | struct dlmfs_inode_private *ip; | ||
425 | struct dlm_ctxt *dlm; | ||
426 | |||
427 | mlog(0, "mkdir %.*s\n", domain->len, domain->name); | ||
428 | |||
429 | /* verify that we have a proper domain */ | ||
430 | if (domain->len >= O2NM_MAX_NAME_LEN) { | ||
431 | status = -EINVAL; | ||
432 | mlog(ML_ERROR, "invalid domain name for directory.\n"); | ||
433 | goto bail; | ||
434 | } | ||
435 | |||
436 | inode = dlmfs_get_inode(dir, dentry, mode | S_IFDIR); | ||
437 | if (!inode) { | ||
438 | status = -ENOMEM; | ||
439 | mlog_errno(status); | ||
440 | goto bail; | ||
441 | } | ||
442 | |||
443 | ip = DLMFS_I(inode); | ||
444 | |||
445 | dlm = user_dlm_register_context(domain); | ||
446 | if (IS_ERR(dlm)) { | ||
447 | status = PTR_ERR(dlm); | ||
448 | mlog(ML_ERROR, "Error %d could not register domain \"%.*s\"\n", | ||
449 | status, domain->len, domain->name); | ||
450 | goto bail; | ||
451 | } | ||
452 | ip->ip_dlm = dlm; | ||
453 | |||
454 | dir->i_nlink++; | ||
455 | d_instantiate(dentry, inode); | ||
456 | dget(dentry); /* Extra count - pin the dentry in core */ | ||
457 | |||
458 | status = 0; | ||
459 | bail: | ||
460 | if (status < 0) | ||
461 | iput(inode); | ||
462 | return status; | ||
463 | } | ||
464 | |||
465 | static int dlmfs_create(struct inode *dir, | ||
466 | struct dentry *dentry, | ||
467 | int mode, | ||
468 | struct nameidata *nd) | ||
469 | { | ||
470 | int status = 0; | ||
471 | struct inode *inode; | ||
472 | struct qstr *name = &dentry->d_name; | ||
473 | |||
474 | mlog(0, "create %.*s\n", name->len, name->name); | ||
475 | |||
476 | /* verify name is valid and doesn't contain any dlm reserved | ||
477 | * characters */ | ||
478 | if (name->len >= USER_DLM_LOCK_ID_MAX_LEN || | ||
479 | name->name[0] == '$') { | ||
480 | status = -EINVAL; | ||
481 | mlog(ML_ERROR, "invalid lock name, %.*s\n", name->len, | ||
482 | name->name); | ||
483 | goto bail; | ||
484 | } | ||
485 | |||
486 | inode = dlmfs_get_inode(dir, dentry, mode | S_IFREG); | ||
487 | if (!inode) { | ||
488 | status = -ENOMEM; | ||
489 | mlog_errno(status); | ||
490 | goto bail; | ||
491 | } | ||
492 | |||
493 | d_instantiate(dentry, inode); | ||
494 | dget(dentry); /* Extra count - pin the dentry in core */ | ||
495 | bail: | ||
496 | return status; | ||
497 | } | ||
498 | |||
499 | static int dlmfs_unlink(struct inode *dir, | ||
500 | struct dentry *dentry) | ||
501 | { | ||
502 | int status; | ||
503 | struct inode *inode = dentry->d_inode; | ||
504 | |||
505 | mlog(0, "unlink inode %lu\n", inode->i_ino); | ||
506 | |||
507 | /* if there are no current holders, or none that are waiting | ||
508 | * to acquire a lock, this basically destroys our lockres. */ | ||
509 | status = user_dlm_destroy_lock(&DLMFS_I(inode)->ip_lockres); | ||
510 | if (status < 0) { | ||
511 | mlog(ML_ERROR, "unlink %.*s, error %d from destroy\n", | ||
512 | dentry->d_name.len, dentry->d_name.name, status); | ||
513 | goto bail; | ||
514 | } | ||
515 | status = simple_unlink(dir, dentry); | ||
516 | bail: | ||
517 | return status; | ||
518 | } | ||
519 | |||
520 | static int dlmfs_fill_super(struct super_block * sb, | ||
521 | void * data, | ||
522 | int silent) | ||
523 | { | ||
524 | struct inode * inode; | ||
525 | struct dentry * root; | ||
526 | |||
527 | sb->s_maxbytes = MAX_LFS_FILESIZE; | ||
528 | sb->s_blocksize = PAGE_CACHE_SIZE; | ||
529 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; | ||
530 | sb->s_magic = DLMFS_MAGIC; | ||
531 | sb->s_op = &dlmfs_ops; | ||
532 | inode = dlmfs_get_root_inode(sb); | ||
533 | if (!inode) | ||
534 | return -ENOMEM; | ||
535 | |||
536 | root = d_alloc_root(inode); | ||
537 | if (!root) { | ||
538 | iput(inode); | ||
539 | return -ENOMEM; | ||
540 | } | ||
541 | sb->s_root = root; | ||
542 | return 0; | ||
543 | } | ||
544 | |||
545 | static struct file_operations dlmfs_file_operations = { | ||
546 | .open = dlmfs_file_open, | ||
547 | .release = dlmfs_file_release, | ||
548 | .read = dlmfs_file_read, | ||
549 | .write = dlmfs_file_write, | ||
550 | }; | ||
551 | |||
552 | static struct inode_operations dlmfs_dir_inode_operations = { | ||
553 | .create = dlmfs_create, | ||
554 | .lookup = simple_lookup, | ||
555 | .unlink = dlmfs_unlink, | ||
556 | }; | ||
557 | |||
558 | /* this way we can restrict mkdir to only the toplevel of the fs. */ | ||
559 | static struct inode_operations dlmfs_root_inode_operations = { | ||
560 | .lookup = simple_lookup, | ||
561 | .mkdir = dlmfs_mkdir, | ||
562 | .rmdir = simple_rmdir, | ||
563 | }; | ||
564 | |||
565 | static struct super_operations dlmfs_ops = { | ||
566 | .statfs = simple_statfs, | ||
567 | .alloc_inode = dlmfs_alloc_inode, | ||
568 | .destroy_inode = dlmfs_destroy_inode, | ||
569 | .clear_inode = dlmfs_clear_inode, | ||
570 | .drop_inode = generic_delete_inode, | ||
571 | }; | ||
572 | |||
573 | static struct inode_operations dlmfs_file_inode_operations = { | ||
574 | .getattr = simple_getattr, | ||
575 | }; | ||
576 | |||
577 | static struct super_block *dlmfs_get_sb(struct file_system_type *fs_type, | ||
578 | int flags, const char *dev_name, void *data) | ||
579 | { | ||
580 | return get_sb_nodev(fs_type, flags, data, dlmfs_fill_super); | ||
581 | } | ||
582 | |||
583 | static struct file_system_type dlmfs_fs_type = { | ||
584 | .owner = THIS_MODULE, | ||
585 | .name = "ocfs2_dlmfs", | ||
586 | .get_sb = dlmfs_get_sb, | ||
587 | .kill_sb = kill_litter_super, | ||
588 | }; | ||
589 | |||
590 | static int __init init_dlmfs_fs(void) | ||
591 | { | ||
592 | int status; | ||
593 | int cleanup_inode = 0, cleanup_worker = 0; | ||
594 | |||
595 | dlmfs_print_version(); | ||
596 | |||
597 | dlmfs_inode_cache = kmem_cache_create("dlmfs_inode_cache", | ||
598 | sizeof(struct dlmfs_inode_private), | ||
599 | 0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT, | ||
600 | dlmfs_init_once, NULL); | ||
601 | if (!dlmfs_inode_cache) | ||
602 | return -ENOMEM; | ||
603 | cleanup_inode = 1; | ||
604 | |||
605 | user_dlm_worker = create_singlethread_workqueue("user_dlm"); | ||
606 | if (!user_dlm_worker) { | ||
607 | status = -ENOMEM; | ||
608 | goto bail; | ||
609 | } | ||
610 | cleanup_worker = 1; | ||
611 | |||
612 | status = register_filesystem(&dlmfs_fs_type); | ||
613 | bail: | ||
614 | if (status) { | ||
615 | if (cleanup_inode) | ||
616 | kmem_cache_destroy(dlmfs_inode_cache); | ||
617 | if (cleanup_worker) | ||
618 | destroy_workqueue(user_dlm_worker); | ||
619 | } else | ||
620 | printk("OCFS2 User DLM kernel interface loaded\n"); | ||
621 | return status; | ||
622 | } | ||
623 | |||
624 | static void __exit exit_dlmfs_fs(void) | ||
625 | { | ||
626 | unregister_filesystem(&dlmfs_fs_type); | ||
627 | |||
628 | flush_workqueue(user_dlm_worker); | ||
629 | destroy_workqueue(user_dlm_worker); | ||
630 | |||
631 | if (kmem_cache_destroy(dlmfs_inode_cache)) | ||
632 | printk(KERN_INFO "dlmfs_inode_cache: not all structures " | ||
633 | "were freed\n"); | ||
634 | } | ||
635 | |||
636 | MODULE_AUTHOR("Oracle"); | ||
637 | MODULE_LICENSE("GPL"); | ||
638 | |||
639 | module_init(init_dlmfs_fs) | ||
640 | module_exit(exit_dlmfs_fs) | ||
diff --git a/fs/ocfs2/dlm/dlmfsver.c b/fs/ocfs2/dlm/dlmfsver.c new file mode 100644 index 000000000000..d2be3ad841f9 --- /dev/null +++ b/fs/ocfs2/dlm/dlmfsver.c | |||
@@ -0,0 +1,42 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmfsver.c | ||
5 | * | ||
6 | * version string | ||
7 | * | ||
8 | * Copyright (C) 2002, 2005 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | */ | ||
25 | |||
26 | #include <linux/module.h> | ||
27 | #include <linux/kernel.h> | ||
28 | |||
29 | #include "dlmfsver.h" | ||
30 | |||
31 | #define DLM_BUILD_VERSION "1.3.3" | ||
32 | |||
33 | #define VERSION_STR "OCFS2 DLMFS " DLM_BUILD_VERSION | ||
34 | |||
35 | void dlmfs_print_version(void) | ||
36 | { | ||
37 | printk(KERN_INFO "%s\n", VERSION_STR); | ||
38 | } | ||
39 | |||
40 | MODULE_DESCRIPTION(VERSION_STR); | ||
41 | |||
42 | MODULE_VERSION(DLM_BUILD_VERSION); | ||
diff --git a/fs/ocfs2/dlm/dlmfsver.h b/fs/ocfs2/dlm/dlmfsver.h new file mode 100644 index 000000000000..f35eadbed25c --- /dev/null +++ b/fs/ocfs2/dlm/dlmfsver.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmver.h | ||
5 | * | ||
6 | * Function prototypes | ||
7 | * | ||
8 | * Copyright (C) 2005 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | */ | ||
25 | |||
26 | #ifndef DLMFS_VER_H | ||
27 | #define DLMFS_VER_H | ||
28 | |||
29 | void dlmfs_print_version(void); | ||
30 | |||
31 | #endif /* DLMFS_VER_H */ | ||
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c new file mode 100644 index 000000000000..d1a0038557a3 --- /dev/null +++ b/fs/ocfs2/dlm/dlmlock.c | |||
@@ -0,0 +1,676 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmlock.c | ||
5 | * | ||
6 | * underlying calls for lock creation | ||
7 | * | ||
8 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | |||
28 | #include <linux/module.h> | ||
29 | #include <linux/fs.h> | ||
30 | #include <linux/types.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/utsname.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/sysctl.h> | ||
36 | #include <linux/random.h> | ||
37 | #include <linux/blkdev.h> | ||
38 | #include <linux/socket.h> | ||
39 | #include <linux/inet.h> | ||
40 | #include <linux/spinlock.h> | ||
41 | #include <linux/delay.h> | ||
42 | |||
43 | |||
44 | #include "cluster/heartbeat.h" | ||
45 | #include "cluster/nodemanager.h" | ||
46 | #include "cluster/tcp.h" | ||
47 | |||
48 | #include "dlmapi.h" | ||
49 | #include "dlmcommon.h" | ||
50 | |||
51 | #include "dlmconvert.h" | ||
52 | |||
53 | #define MLOG_MASK_PREFIX ML_DLM | ||
54 | #include "cluster/masklog.h" | ||
55 | |||
56 | static spinlock_t dlm_cookie_lock = SPIN_LOCK_UNLOCKED; | ||
57 | static u64 dlm_next_cookie = 1; | ||
58 | |||
59 | static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, | ||
60 | struct dlm_lock_resource *res, | ||
61 | struct dlm_lock *lock, int flags); | ||
62 | static void dlm_init_lock(struct dlm_lock *newlock, int type, | ||
63 | u8 node, u64 cookie); | ||
64 | static void dlm_lock_release(struct kref *kref); | ||
65 | static void dlm_lock_detach_lockres(struct dlm_lock *lock); | ||
66 | |||
67 | /* Tell us whether we can grant a new lock request. | ||
68 | * locking: | ||
69 | * caller needs: res->spinlock | ||
70 | * taken: none | ||
71 | * held on exit: none | ||
72 | * returns: 1 if the lock can be granted, 0 otherwise. | ||
73 | */ | ||
74 | static int dlm_can_grant_new_lock(struct dlm_lock_resource *res, | ||
75 | struct dlm_lock *lock) | ||
76 | { | ||
77 | struct list_head *iter; | ||
78 | struct dlm_lock *tmplock; | ||
79 | |||
80 | list_for_each(iter, &res->granted) { | ||
81 | tmplock = list_entry(iter, struct dlm_lock, list); | ||
82 | |||
83 | if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) | ||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | list_for_each(iter, &res->converting) { | ||
88 | tmplock = list_entry(iter, struct dlm_lock, list); | ||
89 | |||
90 | if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | return 1; | ||
95 | } | ||
96 | |||
97 | /* performs lock creation at the lockres master site | ||
98 | * locking: | ||
99 | * caller needs: none | ||
100 | * taken: takes and drops res->spinlock | ||
101 | * held on exit: none | ||
102 | * returns: DLM_NORMAL, DLM_NOTQUEUED | ||
103 | */ | ||
104 | static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm, | ||
105 | struct dlm_lock_resource *res, | ||
106 | struct dlm_lock *lock, int flags) | ||
107 | { | ||
108 | int call_ast = 0, kick_thread = 0; | ||
109 | enum dlm_status status = DLM_NORMAL; | ||
110 | |||
111 | mlog_entry("type=%d\n", lock->ml.type); | ||
112 | |||
113 | spin_lock(&res->spinlock); | ||
114 | /* if called from dlm_create_lock_handler, need to | ||
115 | * ensure it will not sleep in dlm_wait_on_lockres */ | ||
116 | status = __dlm_lockres_state_to_status(res); | ||
117 | if (status != DLM_NORMAL && | ||
118 | lock->ml.node != dlm->node_num) { | ||
119 | /* erf. state changed after lock was dropped. */ | ||
120 | spin_unlock(&res->spinlock); | ||
121 | dlm_error(status); | ||
122 | return status; | ||
123 | } | ||
124 | __dlm_wait_on_lockres(res); | ||
125 | __dlm_lockres_reserve_ast(res); | ||
126 | |||
127 | if (dlm_can_grant_new_lock(res, lock)) { | ||
128 | mlog(0, "I can grant this lock right away\n"); | ||
129 | /* got it right away */ | ||
130 | lock->lksb->status = DLM_NORMAL; | ||
131 | status = DLM_NORMAL; | ||
132 | dlm_lock_get(lock); | ||
133 | list_add_tail(&lock->list, &res->granted); | ||
134 | |||
135 | /* for the recovery lock, we can't allow the ast | ||
136 | * to be queued since the dlmthread is already | ||
137 | * frozen. but the recovery lock is always locked | ||
138 | * with LKM_NOQUEUE so we do not need the ast in | ||
139 | * this special case */ | ||
140 | if (!dlm_is_recovery_lock(res->lockname.name, | ||
141 | res->lockname.len)) { | ||
142 | kick_thread = 1; | ||
143 | call_ast = 1; | ||
144 | } | ||
145 | } else { | ||
146 | /* for NOQUEUE request, unless we get the | ||
147 | * lock right away, return DLM_NOTQUEUED */ | ||
148 | if (flags & LKM_NOQUEUE) | ||
149 | status = DLM_NOTQUEUED; | ||
150 | else { | ||
151 | dlm_lock_get(lock); | ||
152 | list_add_tail(&lock->list, &res->blocked); | ||
153 | kick_thread = 1; | ||
154 | } | ||
155 | } | ||
156 | |||
157 | spin_unlock(&res->spinlock); | ||
158 | wake_up(&res->wq); | ||
159 | |||
160 | /* either queue the ast or release it */ | ||
161 | if (call_ast) | ||
162 | dlm_queue_ast(dlm, lock); | ||
163 | else | ||
164 | dlm_lockres_release_ast(dlm, res); | ||
165 | |||
166 | dlm_lockres_calc_usage(dlm, res); | ||
167 | if (kick_thread) | ||
168 | dlm_kick_thread(dlm, res); | ||
169 | |||
170 | return status; | ||
171 | } | ||
172 | |||
173 | void dlm_revert_pending_lock(struct dlm_lock_resource *res, | ||
174 | struct dlm_lock *lock) | ||
175 | { | ||
176 | /* remove from local queue if it failed */ | ||
177 | list_del_init(&lock->list); | ||
178 | lock->lksb->flags &= ~DLM_LKSB_GET_LVB; | ||
179 | } | ||
180 | |||
181 | |||
182 | /* | ||
183 | * locking: | ||
184 | * caller needs: none | ||
185 | * taken: takes and drops res->spinlock | ||
186 | * held on exit: none | ||
187 | * returns: DLM_DENIED, DLM_RECOVERING, or net status | ||
188 | */ | ||
189 | static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, | ||
190 | struct dlm_lock_resource *res, | ||
191 | struct dlm_lock *lock, int flags) | ||
192 | { | ||
193 | enum dlm_status status = DLM_DENIED; | ||
194 | |||
195 | mlog_entry("type=%d\n", lock->ml.type); | ||
196 | mlog(0, "lockres %.*s, flags = 0x%x\n", res->lockname.len, | ||
197 | res->lockname.name, flags); | ||
198 | |||
199 | spin_lock(&res->spinlock); | ||
200 | |||
201 | /* will exit this call with spinlock held */ | ||
202 | __dlm_wait_on_lockres(res); | ||
203 | res->state |= DLM_LOCK_RES_IN_PROGRESS; | ||
204 | |||
205 | /* add lock to local (secondary) queue */ | ||
206 | dlm_lock_get(lock); | ||
207 | list_add_tail(&lock->list, &res->blocked); | ||
208 | lock->lock_pending = 1; | ||
209 | spin_unlock(&res->spinlock); | ||
210 | |||
211 | /* spec seems to say that you will get DLM_NORMAL when the lock | ||
212 | * has been queued, meaning we need to wait for a reply here. */ | ||
213 | status = dlm_send_remote_lock_request(dlm, res, lock, flags); | ||
214 | |||
215 | spin_lock(&res->spinlock); | ||
216 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; | ||
217 | lock->lock_pending = 0; | ||
218 | if (status != DLM_NORMAL) { | ||
219 | if (status != DLM_NOTQUEUED) | ||
220 | dlm_error(status); | ||
221 | dlm_revert_pending_lock(res, lock); | ||
222 | dlm_lock_put(lock); | ||
223 | } | ||
224 | spin_unlock(&res->spinlock); | ||
225 | |||
226 | dlm_lockres_calc_usage(dlm, res); | ||
227 | |||
228 | wake_up(&res->wq); | ||
229 | return status; | ||
230 | } | ||
231 | |||
232 | |||
233 | /* for remote lock creation. | ||
234 | * locking: | ||
235 | * caller needs: none, but need res->state & DLM_LOCK_RES_IN_PROGRESS | ||
236 | * taken: none | ||
237 | * held on exit: none | ||
238 | * returns: DLM_NOLOCKMGR, or net status | ||
239 | */ | ||
240 | static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, | ||
241 | struct dlm_lock_resource *res, | ||
242 | struct dlm_lock *lock, int flags) | ||
243 | { | ||
244 | struct dlm_create_lock create; | ||
245 | int tmpret, status = 0; | ||
246 | enum dlm_status ret; | ||
247 | |||
248 | mlog_entry_void(); | ||
249 | |||
250 | memset(&create, 0, sizeof(create)); | ||
251 | create.node_idx = dlm->node_num; | ||
252 | create.requested_type = lock->ml.type; | ||
253 | create.cookie = lock->ml.cookie; | ||
254 | create.namelen = res->lockname.len; | ||
255 | create.flags = cpu_to_be32(flags); | ||
256 | memcpy(create.name, res->lockname.name, create.namelen); | ||
257 | |||
258 | tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create, | ||
259 | sizeof(create), res->owner, &status); | ||
260 | if (tmpret >= 0) { | ||
261 | // successfully sent and received | ||
262 | ret = status; // this is already a dlm_status | ||
263 | } else { | ||
264 | mlog_errno(tmpret); | ||
265 | if (dlm_is_host_down(tmpret)) { | ||
266 | ret = DLM_RECOVERING; | ||
267 | mlog(0, "node %u died so returning DLM_RECOVERING " | ||
268 | "from lock message!\n", res->owner); | ||
269 | } else { | ||
270 | ret = dlm_err_to_dlm_status(tmpret); | ||
271 | } | ||
272 | } | ||
273 | |||
274 | return ret; | ||
275 | } | ||
276 | |||
277 | void dlm_lock_get(struct dlm_lock *lock) | ||
278 | { | ||
279 | kref_get(&lock->lock_refs); | ||
280 | } | ||
281 | |||
282 | void dlm_lock_put(struct dlm_lock *lock) | ||
283 | { | ||
284 | kref_put(&lock->lock_refs, dlm_lock_release); | ||
285 | } | ||
286 | |||
287 | static void dlm_lock_release(struct kref *kref) | ||
288 | { | ||
289 | struct dlm_lock *lock; | ||
290 | |||
291 | lock = container_of(kref, struct dlm_lock, lock_refs); | ||
292 | |||
293 | BUG_ON(!list_empty(&lock->list)); | ||
294 | BUG_ON(!list_empty(&lock->ast_list)); | ||
295 | BUG_ON(!list_empty(&lock->bast_list)); | ||
296 | BUG_ON(lock->ast_pending); | ||
297 | BUG_ON(lock->bast_pending); | ||
298 | |||
299 | dlm_lock_detach_lockres(lock); | ||
300 | |||
301 | if (lock->lksb_kernel_allocated) { | ||
302 | mlog(0, "freeing kernel-allocated lksb\n"); | ||
303 | kfree(lock->lksb); | ||
304 | } | ||
305 | kfree(lock); | ||
306 | } | ||
307 | |||
308 | /* associate a lock with it's lockres, getting a ref on the lockres */ | ||
309 | void dlm_lock_attach_lockres(struct dlm_lock *lock, | ||
310 | struct dlm_lock_resource *res) | ||
311 | { | ||
312 | dlm_lockres_get(res); | ||
313 | lock->lockres = res; | ||
314 | } | ||
315 | |||
316 | /* drop ref on lockres, if there is still one associated with lock */ | ||
317 | static void dlm_lock_detach_lockres(struct dlm_lock *lock) | ||
318 | { | ||
319 | struct dlm_lock_resource *res; | ||
320 | |||
321 | res = lock->lockres; | ||
322 | if (res) { | ||
323 | lock->lockres = NULL; | ||
324 | mlog(0, "removing lock's lockres reference\n"); | ||
325 | dlm_lockres_put(res); | ||
326 | } | ||
327 | } | ||
328 | |||
329 | static void dlm_init_lock(struct dlm_lock *newlock, int type, | ||
330 | u8 node, u64 cookie) | ||
331 | { | ||
332 | INIT_LIST_HEAD(&newlock->list); | ||
333 | INIT_LIST_HEAD(&newlock->ast_list); | ||
334 | INIT_LIST_HEAD(&newlock->bast_list); | ||
335 | spin_lock_init(&newlock->spinlock); | ||
336 | newlock->ml.type = type; | ||
337 | newlock->ml.convert_type = LKM_IVMODE; | ||
338 | newlock->ml.highest_blocked = LKM_IVMODE; | ||
339 | newlock->ml.node = node; | ||
340 | newlock->ml.pad1 = 0; | ||
341 | newlock->ml.list = 0; | ||
342 | newlock->ml.flags = 0; | ||
343 | newlock->ast = NULL; | ||
344 | newlock->bast = NULL; | ||
345 | newlock->astdata = NULL; | ||
346 | newlock->ml.cookie = cpu_to_be64(cookie); | ||
347 | newlock->ast_pending = 0; | ||
348 | newlock->bast_pending = 0; | ||
349 | newlock->convert_pending = 0; | ||
350 | newlock->lock_pending = 0; | ||
351 | newlock->unlock_pending = 0; | ||
352 | newlock->cancel_pending = 0; | ||
353 | newlock->lksb_kernel_allocated = 0; | ||
354 | |||
355 | kref_init(&newlock->lock_refs); | ||
356 | } | ||
357 | |||
358 | struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie, | ||
359 | struct dlm_lockstatus *lksb) | ||
360 | { | ||
361 | struct dlm_lock *lock; | ||
362 | int kernel_allocated = 0; | ||
363 | |||
364 | lock = kcalloc(1, sizeof(*lock), GFP_KERNEL); | ||
365 | if (!lock) | ||
366 | return NULL; | ||
367 | |||
368 | if (!lksb) { | ||
369 | /* zero memory only if kernel-allocated */ | ||
370 | lksb = kcalloc(1, sizeof(*lksb), GFP_KERNEL); | ||
371 | if (!lksb) { | ||
372 | kfree(lock); | ||
373 | return NULL; | ||
374 | } | ||
375 | kernel_allocated = 1; | ||
376 | } | ||
377 | |||
378 | dlm_init_lock(lock, type, node, cookie); | ||
379 | if (kernel_allocated) | ||
380 | lock->lksb_kernel_allocated = 1; | ||
381 | lock->lksb = lksb; | ||
382 | lksb->lockid = lock; | ||
383 | return lock; | ||
384 | } | ||
385 | |||
386 | /* handler for lock creation net message | ||
387 | * locking: | ||
388 | * caller needs: none | ||
389 | * taken: takes and drops res->spinlock | ||
390 | * held on exit: none | ||
391 | * returns: DLM_NORMAL, DLM_SYSERR, DLM_IVLOCKID, DLM_NOTQUEUED | ||
392 | */ | ||
393 | int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data) | ||
394 | { | ||
395 | struct dlm_ctxt *dlm = data; | ||
396 | struct dlm_create_lock *create = (struct dlm_create_lock *)msg->buf; | ||
397 | struct dlm_lock_resource *res = NULL; | ||
398 | struct dlm_lock *newlock = NULL; | ||
399 | struct dlm_lockstatus *lksb = NULL; | ||
400 | enum dlm_status status = DLM_NORMAL; | ||
401 | char *name; | ||
402 | unsigned int namelen; | ||
403 | |||
404 | BUG_ON(!dlm); | ||
405 | |||
406 | mlog_entry_void(); | ||
407 | |||
408 | if (!dlm_grab(dlm)) | ||
409 | return DLM_REJECTED; | ||
410 | |||
411 | mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), | ||
412 | "Domain %s not fully joined!\n", dlm->name); | ||
413 | |||
414 | name = create->name; | ||
415 | namelen = create->namelen; | ||
416 | |||
417 | status = DLM_IVBUFLEN; | ||
418 | if (namelen > DLM_LOCKID_NAME_MAX) { | ||
419 | dlm_error(status); | ||
420 | goto leave; | ||
421 | } | ||
422 | |||
423 | status = DLM_SYSERR; | ||
424 | newlock = dlm_new_lock(create->requested_type, | ||
425 | create->node_idx, | ||
426 | be64_to_cpu(create->cookie), NULL); | ||
427 | if (!newlock) { | ||
428 | dlm_error(status); | ||
429 | goto leave; | ||
430 | } | ||
431 | |||
432 | lksb = newlock->lksb; | ||
433 | |||
434 | if (be32_to_cpu(create->flags) & LKM_GET_LVB) { | ||
435 | lksb->flags |= DLM_LKSB_GET_LVB; | ||
436 | mlog(0, "set DLM_LKSB_GET_LVB flag\n"); | ||
437 | } | ||
438 | |||
439 | status = DLM_IVLOCKID; | ||
440 | res = dlm_lookup_lockres(dlm, name, namelen); | ||
441 | if (!res) { | ||
442 | dlm_error(status); | ||
443 | goto leave; | ||
444 | } | ||
445 | |||
446 | spin_lock(&res->spinlock); | ||
447 | status = __dlm_lockres_state_to_status(res); | ||
448 | spin_unlock(&res->spinlock); | ||
449 | |||
450 | if (status != DLM_NORMAL) { | ||
451 | mlog(0, "lockres recovering/migrating/in-progress\n"); | ||
452 | goto leave; | ||
453 | } | ||
454 | |||
455 | dlm_lock_attach_lockres(newlock, res); | ||
456 | |||
457 | status = dlmlock_master(dlm, res, newlock, be32_to_cpu(create->flags)); | ||
458 | leave: | ||
459 | if (status != DLM_NORMAL) | ||
460 | if (newlock) | ||
461 | dlm_lock_put(newlock); | ||
462 | |||
463 | if (res) | ||
464 | dlm_lockres_put(res); | ||
465 | |||
466 | dlm_put(dlm); | ||
467 | |||
468 | return status; | ||
469 | } | ||
470 | |||
471 | |||
472 | /* fetch next node-local (u8 nodenum + u56 cookie) into u64 */ | ||
473 | static inline void dlm_get_next_cookie(u8 node_num, u64 *cookie) | ||
474 | { | ||
475 | u64 tmpnode = node_num; | ||
476 | |||
477 | /* shift single byte of node num into top 8 bits */ | ||
478 | tmpnode <<= 56; | ||
479 | |||
480 | spin_lock(&dlm_cookie_lock); | ||
481 | *cookie = (dlm_next_cookie | tmpnode); | ||
482 | if (++dlm_next_cookie & 0xff00000000000000ull) { | ||
483 | mlog(0, "This node's cookie will now wrap!\n"); | ||
484 | dlm_next_cookie = 1; | ||
485 | } | ||
486 | spin_unlock(&dlm_cookie_lock); | ||
487 | } | ||
488 | |||
489 | enum dlm_status dlmlock(struct dlm_ctxt *dlm, int mode, | ||
490 | struct dlm_lockstatus *lksb, int flags, | ||
491 | const char *name, dlm_astlockfunc_t *ast, void *data, | ||
492 | dlm_bastlockfunc_t *bast) | ||
493 | { | ||
494 | enum dlm_status status; | ||
495 | struct dlm_lock_resource *res = NULL; | ||
496 | struct dlm_lock *lock = NULL; | ||
497 | int convert = 0, recovery = 0; | ||
498 | |||
499 | /* yes this function is a mess. | ||
500 | * TODO: clean this up. lots of common code in the | ||
501 | * lock and convert paths, especially in the retry blocks */ | ||
502 | if (!lksb) { | ||
503 | dlm_error(DLM_BADARGS); | ||
504 | return DLM_BADARGS; | ||
505 | } | ||
506 | |||
507 | status = DLM_BADPARAM; | ||
508 | if (mode != LKM_EXMODE && mode != LKM_PRMODE && mode != LKM_NLMODE) { | ||
509 | dlm_error(status); | ||
510 | goto error; | ||
511 | } | ||
512 | |||
513 | if (flags & ~LKM_VALID_FLAGS) { | ||
514 | dlm_error(status); | ||
515 | goto error; | ||
516 | } | ||
517 | |||
518 | convert = (flags & LKM_CONVERT); | ||
519 | recovery = (flags & LKM_RECOVERY); | ||
520 | |||
521 | if (recovery && | ||
522 | (!dlm_is_recovery_lock(name, strlen(name)) || convert) ) { | ||
523 | dlm_error(status); | ||
524 | goto error; | ||
525 | } | ||
526 | if (convert && (flags & LKM_LOCAL)) { | ||
527 | mlog(ML_ERROR, "strange LOCAL convert request!\n"); | ||
528 | goto error; | ||
529 | } | ||
530 | |||
531 | if (convert) { | ||
532 | /* CONVERT request */ | ||
533 | |||
534 | /* if converting, must pass in a valid dlm_lock */ | ||
535 | lock = lksb->lockid; | ||
536 | if (!lock) { | ||
537 | mlog(ML_ERROR, "NULL lock pointer in convert " | ||
538 | "request\n"); | ||
539 | goto error; | ||
540 | } | ||
541 | |||
542 | res = lock->lockres; | ||
543 | if (!res) { | ||
544 | mlog(ML_ERROR, "NULL lockres pointer in convert " | ||
545 | "request\n"); | ||
546 | goto error; | ||
547 | } | ||
548 | dlm_lockres_get(res); | ||
549 | |||
550 | /* XXX: for ocfs2 purposes, the ast/bast/astdata/lksb are | ||
551 | * static after the original lock call. convert requests will | ||
552 | * ensure that everything is the same, or return DLM_BADARGS. | ||
553 | * this means that DLM_DENIED_NOASTS will never be returned. | ||
554 | */ | ||
555 | if (lock->lksb != lksb || lock->ast != ast || | ||
556 | lock->bast != bast || lock->astdata != data) { | ||
557 | status = DLM_BADARGS; | ||
558 | mlog(ML_ERROR, "new args: lksb=%p, ast=%p, bast=%p, " | ||
559 | "astdata=%p\n", lksb, ast, bast, data); | ||
560 | mlog(ML_ERROR, "orig args: lksb=%p, ast=%p, bast=%p, " | ||
561 | "astdata=%p\n", lock->lksb, lock->ast, | ||
562 | lock->bast, lock->astdata); | ||
563 | goto error; | ||
564 | } | ||
565 | retry_convert: | ||
566 | dlm_wait_for_recovery(dlm); | ||
567 | |||
568 | if (res->owner == dlm->node_num) | ||
569 | status = dlmconvert_master(dlm, res, lock, flags, mode); | ||
570 | else | ||
571 | status = dlmconvert_remote(dlm, res, lock, flags, mode); | ||
572 | if (status == DLM_RECOVERING || status == DLM_MIGRATING || | ||
573 | status == DLM_FORWARD) { | ||
574 | /* for now, see how this works without sleeping | ||
575 | * and just retry right away. I suspect the reco | ||
576 | * or migration will complete fast enough that | ||
577 | * no waiting will be necessary */ | ||
578 | mlog(0, "retrying convert with migration/recovery/" | ||
579 | "in-progress\n"); | ||
580 | msleep(100); | ||
581 | goto retry_convert; | ||
582 | } | ||
583 | } else { | ||
584 | u64 tmpcookie; | ||
585 | |||
586 | /* LOCK request */ | ||
587 | status = DLM_BADARGS; | ||
588 | if (!name) { | ||
589 | dlm_error(status); | ||
590 | goto error; | ||
591 | } | ||
592 | |||
593 | status = DLM_IVBUFLEN; | ||
594 | if (strlen(name) > DLM_LOCKID_NAME_MAX || strlen(name) < 1) { | ||
595 | dlm_error(status); | ||
596 | goto error; | ||
597 | } | ||
598 | |||
599 | dlm_get_next_cookie(dlm->node_num, &tmpcookie); | ||
600 | lock = dlm_new_lock(mode, dlm->node_num, tmpcookie, lksb); | ||
601 | if (!lock) { | ||
602 | dlm_error(status); | ||
603 | goto error; | ||
604 | } | ||
605 | |||
606 | if (!recovery) | ||
607 | dlm_wait_for_recovery(dlm); | ||
608 | |||
609 | /* find or create the lock resource */ | ||
610 | res = dlm_get_lock_resource(dlm, name, flags); | ||
611 | if (!res) { | ||
612 | status = DLM_IVLOCKID; | ||
613 | dlm_error(status); | ||
614 | goto error; | ||
615 | } | ||
616 | |||
617 | mlog(0, "type=%d, flags = 0x%x\n", mode, flags); | ||
618 | mlog(0, "creating lock: lock=%p res=%p\n", lock, res); | ||
619 | |||
620 | dlm_lock_attach_lockres(lock, res); | ||
621 | lock->ast = ast; | ||
622 | lock->bast = bast; | ||
623 | lock->astdata = data; | ||
624 | |||
625 | retry_lock: | ||
626 | if (flags & LKM_VALBLK) { | ||
627 | mlog(0, "LKM_VALBLK passed by caller\n"); | ||
628 | |||
629 | /* LVB requests for non PR, PW or EX locks are | ||
630 | * ignored. */ | ||
631 | if (mode < LKM_PRMODE) | ||
632 | flags &= ~LKM_VALBLK; | ||
633 | else { | ||
634 | flags |= LKM_GET_LVB; | ||
635 | lock->lksb->flags |= DLM_LKSB_GET_LVB; | ||
636 | } | ||
637 | } | ||
638 | |||
639 | if (res->owner == dlm->node_num) | ||
640 | status = dlmlock_master(dlm, res, lock, flags); | ||
641 | else | ||
642 | status = dlmlock_remote(dlm, res, lock, flags); | ||
643 | |||
644 | if (status == DLM_RECOVERING || status == DLM_MIGRATING || | ||
645 | status == DLM_FORWARD) { | ||
646 | mlog(0, "retrying lock with migration/" | ||
647 | "recovery/in progress\n"); | ||
648 | msleep(100); | ||
649 | dlm_wait_for_recovery(dlm); | ||
650 | goto retry_lock; | ||
651 | } | ||
652 | |||
653 | if (status != DLM_NORMAL) { | ||
654 | lock->lksb->flags &= ~DLM_LKSB_GET_LVB; | ||
655 | if (status != DLM_NOTQUEUED) | ||
656 | dlm_error(status); | ||
657 | goto error; | ||
658 | } | ||
659 | } | ||
660 | |||
661 | error: | ||
662 | if (status != DLM_NORMAL) { | ||
663 | if (lock && !convert) | ||
664 | dlm_lock_put(lock); | ||
665 | // this is kind of unnecessary | ||
666 | lksb->status = status; | ||
667 | } | ||
668 | |||
669 | /* put lockres ref from the convert path | ||
670 | * or from dlm_get_lock_resource */ | ||
671 | if (res) | ||
672 | dlm_lockres_put(res); | ||
673 | |||
674 | return status; | ||
675 | } | ||
676 | EXPORT_SYMBOL_GPL(dlmlock); | ||
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c new file mode 100644 index 000000000000..27e984f7e4cd --- /dev/null +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -0,0 +1,2664 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmmod.c | ||
5 | * | ||
6 | * standalone DLM module | ||
7 | * | ||
8 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | |||
28 | #include <linux/module.h> | ||
29 | #include <linux/fs.h> | ||
30 | #include <linux/types.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/utsname.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/sysctl.h> | ||
36 | #include <linux/random.h> | ||
37 | #include <linux/blkdev.h> | ||
38 | #include <linux/socket.h> | ||
39 | #include <linux/inet.h> | ||
40 | #include <linux/spinlock.h> | ||
41 | #include <linux/delay.h> | ||
42 | |||
43 | |||
44 | #include "cluster/heartbeat.h" | ||
45 | #include "cluster/nodemanager.h" | ||
46 | #include "cluster/tcp.h" | ||
47 | |||
48 | #include "dlmapi.h" | ||
49 | #include "dlmcommon.h" | ||
50 | #include "dlmdebug.h" | ||
51 | #include "dlmdomain.h" | ||
52 | |||
53 | #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER) | ||
54 | #include "cluster/masklog.h" | ||
55 | |||
56 | enum dlm_mle_type { | ||
57 | DLM_MLE_BLOCK, | ||
58 | DLM_MLE_MASTER, | ||
59 | DLM_MLE_MIGRATION | ||
60 | }; | ||
61 | |||
62 | struct dlm_lock_name | ||
63 | { | ||
64 | u8 len; | ||
65 | u8 name[DLM_LOCKID_NAME_MAX]; | ||
66 | }; | ||
67 | |||
68 | struct dlm_master_list_entry | ||
69 | { | ||
70 | struct list_head list; | ||
71 | struct list_head hb_events; | ||
72 | struct dlm_ctxt *dlm; | ||
73 | spinlock_t spinlock; | ||
74 | wait_queue_head_t wq; | ||
75 | atomic_t woken; | ||
76 | struct kref mle_refs; | ||
77 | unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
78 | unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
79 | unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
80 | unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
81 | u8 master; | ||
82 | u8 new_master; | ||
83 | enum dlm_mle_type type; | ||
84 | struct o2hb_callback_func mle_hb_up; | ||
85 | struct o2hb_callback_func mle_hb_down; | ||
86 | union { | ||
87 | struct dlm_lock_resource *res; | ||
88 | struct dlm_lock_name name; | ||
89 | } u; | ||
90 | }; | ||
91 | |||
92 | static void dlm_mle_node_down(struct dlm_ctxt *dlm, | ||
93 | struct dlm_master_list_entry *mle, | ||
94 | struct o2nm_node *node, | ||
95 | int idx); | ||
96 | static void dlm_mle_node_up(struct dlm_ctxt *dlm, | ||
97 | struct dlm_master_list_entry *mle, | ||
98 | struct o2nm_node *node, | ||
99 | int idx); | ||
100 | |||
101 | static void dlm_assert_master_worker(struct dlm_work_item *item, void *data); | ||
102 | static int dlm_do_assert_master(struct dlm_ctxt *dlm, const char *lockname, | ||
103 | unsigned int namelen, void *nodemap, | ||
104 | u32 flags); | ||
105 | |||
106 | static inline int dlm_mle_equal(struct dlm_ctxt *dlm, | ||
107 | struct dlm_master_list_entry *mle, | ||
108 | const char *name, | ||
109 | unsigned int namelen) | ||
110 | { | ||
111 | struct dlm_lock_resource *res; | ||
112 | |||
113 | if (dlm != mle->dlm) | ||
114 | return 0; | ||
115 | |||
116 | if (mle->type == DLM_MLE_BLOCK || | ||
117 | mle->type == DLM_MLE_MIGRATION) { | ||
118 | if (namelen != mle->u.name.len || | ||
119 | memcmp(name, mle->u.name.name, namelen)!=0) | ||
120 | return 0; | ||
121 | } else { | ||
122 | res = mle->u.res; | ||
123 | if (namelen != res->lockname.len || | ||
124 | memcmp(res->lockname.name, name, namelen) != 0) | ||
125 | return 0; | ||
126 | } | ||
127 | return 1; | ||
128 | } | ||
129 | |||
130 | #if 0 | ||
131 | /* Code here is included but defined out as it aids debugging */ | ||
132 | |||
133 | void dlm_print_one_mle(struct dlm_master_list_entry *mle) | ||
134 | { | ||
135 | int i = 0, refs; | ||
136 | char *type; | ||
137 | char attached; | ||
138 | u8 master; | ||
139 | unsigned int namelen; | ||
140 | const char *name; | ||
141 | struct kref *k; | ||
142 | |||
143 | k = &mle->mle_refs; | ||
144 | if (mle->type == DLM_MLE_BLOCK) | ||
145 | type = "BLK"; | ||
146 | else if (mle->type == DLM_MLE_MASTER) | ||
147 | type = "MAS"; | ||
148 | else | ||
149 | type = "MIG"; | ||
150 | refs = atomic_read(&k->refcount); | ||
151 | master = mle->master; | ||
152 | attached = (list_empty(&mle->hb_events) ? 'N' : 'Y'); | ||
153 | |||
154 | if (mle->type != DLM_MLE_MASTER) { | ||
155 | namelen = mle->u.name.len; | ||
156 | name = mle->u.name.name; | ||
157 | } else { | ||
158 | namelen = mle->u.res->lockname.len; | ||
159 | name = mle->u.res->lockname.name; | ||
160 | } | ||
161 | |||
162 | mlog(ML_NOTICE, " #%3d: %3s %3d %3u %3u %c (%d)%.*s\n", | ||
163 | i, type, refs, master, mle->new_master, attached, | ||
164 | namelen, namelen, name); | ||
165 | } | ||
166 | |||
167 | static void dlm_dump_mles(struct dlm_ctxt *dlm) | ||
168 | { | ||
169 | struct dlm_master_list_entry *mle; | ||
170 | struct list_head *iter; | ||
171 | |||
172 | mlog(ML_NOTICE, "dumping all mles for domain %s:\n", dlm->name); | ||
173 | mlog(ML_NOTICE, " ####: type refs owner new events? lockname nodemap votemap respmap maybemap\n"); | ||
174 | spin_lock(&dlm->master_lock); | ||
175 | list_for_each(iter, &dlm->master_list) { | ||
176 | mle = list_entry(iter, struct dlm_master_list_entry, list); | ||
177 | dlm_print_one_mle(mle); | ||
178 | } | ||
179 | spin_unlock(&dlm->master_lock); | ||
180 | } | ||
181 | |||
182 | int dlm_dump_all_mles(const char __user *data, unsigned int len) | ||
183 | { | ||
184 | struct list_head *iter; | ||
185 | struct dlm_ctxt *dlm; | ||
186 | |||
187 | spin_lock(&dlm_domain_lock); | ||
188 | list_for_each(iter, &dlm_domains) { | ||
189 | dlm = list_entry (iter, struct dlm_ctxt, list); | ||
190 | mlog(ML_NOTICE, "found dlm: %p, name=%s\n", dlm, dlm->name); | ||
191 | dlm_dump_mles(dlm); | ||
192 | } | ||
193 | spin_unlock(&dlm_domain_lock); | ||
194 | return len; | ||
195 | } | ||
196 | EXPORT_SYMBOL_GPL(dlm_dump_all_mles); | ||
197 | |||
198 | #endif /* 0 */ | ||
199 | |||
200 | |||
201 | static kmem_cache_t *dlm_mle_cache = NULL; | ||
202 | |||
203 | |||
204 | static void dlm_mle_release(struct kref *kref); | ||
205 | static void dlm_init_mle(struct dlm_master_list_entry *mle, | ||
206 | enum dlm_mle_type type, | ||
207 | struct dlm_ctxt *dlm, | ||
208 | struct dlm_lock_resource *res, | ||
209 | const char *name, | ||
210 | unsigned int namelen); | ||
211 | static void dlm_put_mle(struct dlm_master_list_entry *mle); | ||
212 | static void __dlm_put_mle(struct dlm_master_list_entry *mle); | ||
213 | static int dlm_find_mle(struct dlm_ctxt *dlm, | ||
214 | struct dlm_master_list_entry **mle, | ||
215 | char *name, unsigned int namelen); | ||
216 | |||
217 | static int dlm_do_master_request(struct dlm_master_list_entry *mle, int to); | ||
218 | |||
219 | |||
220 | static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, | ||
221 | struct dlm_lock_resource *res, | ||
222 | struct dlm_master_list_entry *mle, | ||
223 | int *blocked); | ||
224 | static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, | ||
225 | struct dlm_lock_resource *res, | ||
226 | struct dlm_master_list_entry *mle, | ||
227 | int blocked); | ||
228 | static int dlm_add_migration_mle(struct dlm_ctxt *dlm, | ||
229 | struct dlm_lock_resource *res, | ||
230 | struct dlm_master_list_entry *mle, | ||
231 | struct dlm_master_list_entry **oldmle, | ||
232 | const char *name, unsigned int namelen, | ||
233 | u8 new_master, u8 master); | ||
234 | |||
235 | static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, | ||
236 | struct dlm_lock_resource *res); | ||
237 | static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, | ||
238 | struct dlm_lock_resource *res); | ||
239 | static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, | ||
240 | struct dlm_lock_resource *res, | ||
241 | u8 target); | ||
242 | |||
243 | |||
244 | int dlm_is_host_down(int errno) | ||
245 | { | ||
246 | switch (errno) { | ||
247 | case -EBADF: | ||
248 | case -ECONNREFUSED: | ||
249 | case -ENOTCONN: | ||
250 | case -ECONNRESET: | ||
251 | case -EPIPE: | ||
252 | case -EHOSTDOWN: | ||
253 | case -EHOSTUNREACH: | ||
254 | case -ETIMEDOUT: | ||
255 | case -ECONNABORTED: | ||
256 | case -ENETDOWN: | ||
257 | case -ENETUNREACH: | ||
258 | case -ENETRESET: | ||
259 | case -ESHUTDOWN: | ||
260 | case -ENOPROTOOPT: | ||
261 | case -EINVAL: /* if returned from our tcp code, | ||
262 | this means there is no socket */ | ||
263 | return 1; | ||
264 | } | ||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | |||
269 | /* | ||
270 | * MASTER LIST FUNCTIONS | ||
271 | */ | ||
272 | |||
273 | |||
274 | /* | ||
275 | * regarding master list entries and heartbeat callbacks: | ||
276 | * | ||
277 | * in order to avoid sleeping and allocation that occurs in | ||
278 | * heartbeat, master list entries are simply attached to the | ||
279 | * dlm's established heartbeat callbacks. the mle is attached | ||
280 | * when it is created, and since the dlm->spinlock is held at | ||
281 | * that time, any heartbeat event will be properly discovered | ||
282 | * by the mle. the mle needs to be detached from the | ||
283 | * dlm->mle_hb_events list as soon as heartbeat events are no | ||
284 | * longer useful to the mle, and before the mle is freed. | ||
285 | * | ||
286 | * as a general rule, heartbeat events are no longer needed by | ||
287 | * the mle once an "answer" regarding the lock master has been | ||
288 | * received. | ||
289 | */ | ||
290 | static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm, | ||
291 | struct dlm_master_list_entry *mle) | ||
292 | { | ||
293 | assert_spin_locked(&dlm->spinlock); | ||
294 | |||
295 | list_add_tail(&mle->hb_events, &dlm->mle_hb_events); | ||
296 | } | ||
297 | |||
298 | |||
299 | static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, | ||
300 | struct dlm_master_list_entry *mle) | ||
301 | { | ||
302 | if (!list_empty(&mle->hb_events)) | ||
303 | list_del_init(&mle->hb_events); | ||
304 | } | ||
305 | |||
306 | |||
307 | static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, | ||
308 | struct dlm_master_list_entry *mle) | ||
309 | { | ||
310 | spin_lock(&dlm->spinlock); | ||
311 | __dlm_mle_detach_hb_events(dlm, mle); | ||
312 | spin_unlock(&dlm->spinlock); | ||
313 | } | ||
314 | |||
315 | /* remove from list and free */ | ||
316 | static void __dlm_put_mle(struct dlm_master_list_entry *mle) | ||
317 | { | ||
318 | struct dlm_ctxt *dlm; | ||
319 | dlm = mle->dlm; | ||
320 | |||
321 | assert_spin_locked(&dlm->spinlock); | ||
322 | assert_spin_locked(&dlm->master_lock); | ||
323 | BUG_ON(!atomic_read(&mle->mle_refs.refcount)); | ||
324 | |||
325 | kref_put(&mle->mle_refs, dlm_mle_release); | ||
326 | } | ||
327 | |||
328 | |||
329 | /* must not have any spinlocks coming in */ | ||
330 | static void dlm_put_mle(struct dlm_master_list_entry *mle) | ||
331 | { | ||
332 | struct dlm_ctxt *dlm; | ||
333 | dlm = mle->dlm; | ||
334 | |||
335 | spin_lock(&dlm->spinlock); | ||
336 | spin_lock(&dlm->master_lock); | ||
337 | __dlm_put_mle(mle); | ||
338 | spin_unlock(&dlm->master_lock); | ||
339 | spin_unlock(&dlm->spinlock); | ||
340 | } | ||
341 | |||
342 | static inline void dlm_get_mle(struct dlm_master_list_entry *mle) | ||
343 | { | ||
344 | kref_get(&mle->mle_refs); | ||
345 | } | ||
346 | |||
347 | static void dlm_init_mle(struct dlm_master_list_entry *mle, | ||
348 | enum dlm_mle_type type, | ||
349 | struct dlm_ctxt *dlm, | ||
350 | struct dlm_lock_resource *res, | ||
351 | const char *name, | ||
352 | unsigned int namelen) | ||
353 | { | ||
354 | assert_spin_locked(&dlm->spinlock); | ||
355 | |||
356 | mle->dlm = dlm; | ||
357 | mle->type = type; | ||
358 | INIT_LIST_HEAD(&mle->list); | ||
359 | INIT_LIST_HEAD(&mle->hb_events); | ||
360 | memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); | ||
361 | spin_lock_init(&mle->spinlock); | ||
362 | init_waitqueue_head(&mle->wq); | ||
363 | atomic_set(&mle->woken, 0); | ||
364 | kref_init(&mle->mle_refs); | ||
365 | memset(mle->response_map, 0, sizeof(mle->response_map)); | ||
366 | mle->master = O2NM_MAX_NODES; | ||
367 | mle->new_master = O2NM_MAX_NODES; | ||
368 | |||
369 | if (mle->type == DLM_MLE_MASTER) { | ||
370 | BUG_ON(!res); | ||
371 | mle->u.res = res; | ||
372 | } else if (mle->type == DLM_MLE_BLOCK) { | ||
373 | BUG_ON(!name); | ||
374 | memcpy(mle->u.name.name, name, namelen); | ||
375 | mle->u.name.len = namelen; | ||
376 | } else /* DLM_MLE_MIGRATION */ { | ||
377 | BUG_ON(!name); | ||
378 | memcpy(mle->u.name.name, name, namelen); | ||
379 | mle->u.name.len = namelen; | ||
380 | } | ||
381 | |||
382 | /* copy off the node_map and register hb callbacks on our copy */ | ||
383 | memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map)); | ||
384 | memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map)); | ||
385 | clear_bit(dlm->node_num, mle->vote_map); | ||
386 | clear_bit(dlm->node_num, mle->node_map); | ||
387 | |||
388 | /* attach the mle to the domain node up/down events */ | ||
389 | __dlm_mle_attach_hb_events(dlm, mle); | ||
390 | } | ||
391 | |||
392 | |||
393 | /* returns 1 if found, 0 if not */ | ||
394 | static int dlm_find_mle(struct dlm_ctxt *dlm, | ||
395 | struct dlm_master_list_entry **mle, | ||
396 | char *name, unsigned int namelen) | ||
397 | { | ||
398 | struct dlm_master_list_entry *tmpmle; | ||
399 | struct list_head *iter; | ||
400 | |||
401 | assert_spin_locked(&dlm->master_lock); | ||
402 | |||
403 | list_for_each(iter, &dlm->master_list) { | ||
404 | tmpmle = list_entry(iter, struct dlm_master_list_entry, list); | ||
405 | if (!dlm_mle_equal(dlm, tmpmle, name, namelen)) | ||
406 | continue; | ||
407 | dlm_get_mle(tmpmle); | ||
408 | *mle = tmpmle; | ||
409 | return 1; | ||
410 | } | ||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up) | ||
415 | { | ||
416 | struct dlm_master_list_entry *mle; | ||
417 | struct list_head *iter; | ||
418 | |||
419 | assert_spin_locked(&dlm->spinlock); | ||
420 | |||
421 | list_for_each(iter, &dlm->mle_hb_events) { | ||
422 | mle = list_entry(iter, struct dlm_master_list_entry, | ||
423 | hb_events); | ||
424 | if (node_up) | ||
425 | dlm_mle_node_up(dlm, mle, NULL, idx); | ||
426 | else | ||
427 | dlm_mle_node_down(dlm, mle, NULL, idx); | ||
428 | } | ||
429 | } | ||
430 | |||
431 | static void dlm_mle_node_down(struct dlm_ctxt *dlm, | ||
432 | struct dlm_master_list_entry *mle, | ||
433 | struct o2nm_node *node, int idx) | ||
434 | { | ||
435 | spin_lock(&mle->spinlock); | ||
436 | |||
437 | if (!test_bit(idx, mle->node_map)) | ||
438 | mlog(0, "node %u already removed from nodemap!\n", idx); | ||
439 | else | ||
440 | clear_bit(idx, mle->node_map); | ||
441 | |||
442 | spin_unlock(&mle->spinlock); | ||
443 | } | ||
444 | |||
445 | static void dlm_mle_node_up(struct dlm_ctxt *dlm, | ||
446 | struct dlm_master_list_entry *mle, | ||
447 | struct o2nm_node *node, int idx) | ||
448 | { | ||
449 | spin_lock(&mle->spinlock); | ||
450 | |||
451 | if (test_bit(idx, mle->node_map)) | ||
452 | mlog(0, "node %u already in node map!\n", idx); | ||
453 | else | ||
454 | set_bit(idx, mle->node_map); | ||
455 | |||
456 | spin_unlock(&mle->spinlock); | ||
457 | } | ||
458 | |||
459 | |||
460 | int dlm_init_mle_cache(void) | ||
461 | { | ||
462 | dlm_mle_cache = kmem_cache_create("dlm_mle_cache", | ||
463 | sizeof(struct dlm_master_list_entry), | ||
464 | 0, SLAB_HWCACHE_ALIGN, | ||
465 | NULL, NULL); | ||
466 | if (dlm_mle_cache == NULL) | ||
467 | return -ENOMEM; | ||
468 | return 0; | ||
469 | } | ||
470 | |||
471 | void dlm_destroy_mle_cache(void) | ||
472 | { | ||
473 | if (dlm_mle_cache) | ||
474 | kmem_cache_destroy(dlm_mle_cache); | ||
475 | } | ||
476 | |||
477 | static void dlm_mle_release(struct kref *kref) | ||
478 | { | ||
479 | struct dlm_master_list_entry *mle; | ||
480 | struct dlm_ctxt *dlm; | ||
481 | |||
482 | mlog_entry_void(); | ||
483 | |||
484 | mle = container_of(kref, struct dlm_master_list_entry, mle_refs); | ||
485 | dlm = mle->dlm; | ||
486 | |||
487 | if (mle->type != DLM_MLE_MASTER) { | ||
488 | mlog(0, "calling mle_release for %.*s, type %d\n", | ||
489 | mle->u.name.len, mle->u.name.name, mle->type); | ||
490 | } else { | ||
491 | mlog(0, "calling mle_release for %.*s, type %d\n", | ||
492 | mle->u.res->lockname.len, | ||
493 | mle->u.res->lockname.name, mle->type); | ||
494 | } | ||
495 | assert_spin_locked(&dlm->spinlock); | ||
496 | assert_spin_locked(&dlm->master_lock); | ||
497 | |||
498 | /* remove from list if not already */ | ||
499 | if (!list_empty(&mle->list)) | ||
500 | list_del_init(&mle->list); | ||
501 | |||
502 | /* detach the mle from the domain node up/down events */ | ||
503 | __dlm_mle_detach_hb_events(dlm, mle); | ||
504 | |||
505 | /* NOTE: kfree under spinlock here. | ||
506 | * if this is bad, we can move this to a freelist. */ | ||
507 | kmem_cache_free(dlm_mle_cache, mle); | ||
508 | } | ||
509 | |||
510 | |||
511 | /* | ||
512 | * LOCK RESOURCE FUNCTIONS | ||
513 | */ | ||
514 | |||
515 | static void dlm_set_lockres_owner(struct dlm_ctxt *dlm, | ||
516 | struct dlm_lock_resource *res, | ||
517 | u8 owner) | ||
518 | { | ||
519 | assert_spin_locked(&res->spinlock); | ||
520 | |||
521 | mlog_entry("%.*s, %u\n", res->lockname.len, res->lockname.name, owner); | ||
522 | |||
523 | if (owner == dlm->node_num) | ||
524 | atomic_inc(&dlm->local_resources); | ||
525 | else if (owner == DLM_LOCK_RES_OWNER_UNKNOWN) | ||
526 | atomic_inc(&dlm->unknown_resources); | ||
527 | else | ||
528 | atomic_inc(&dlm->remote_resources); | ||
529 | |||
530 | res->owner = owner; | ||
531 | } | ||
532 | |||
533 | void dlm_change_lockres_owner(struct dlm_ctxt *dlm, | ||
534 | struct dlm_lock_resource *res, u8 owner) | ||
535 | { | ||
536 | assert_spin_locked(&res->spinlock); | ||
537 | |||
538 | if (owner == res->owner) | ||
539 | return; | ||
540 | |||
541 | if (res->owner == dlm->node_num) | ||
542 | atomic_dec(&dlm->local_resources); | ||
543 | else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) | ||
544 | atomic_dec(&dlm->unknown_resources); | ||
545 | else | ||
546 | atomic_dec(&dlm->remote_resources); | ||
547 | |||
548 | dlm_set_lockres_owner(dlm, res, owner); | ||
549 | } | ||
550 | |||
551 | |||
552 | static void dlm_lockres_release(struct kref *kref) | ||
553 | { | ||
554 | struct dlm_lock_resource *res; | ||
555 | |||
556 | res = container_of(kref, struct dlm_lock_resource, refs); | ||
557 | |||
558 | /* This should not happen -- all lockres' have a name | ||
559 | * associated with them at init time. */ | ||
560 | BUG_ON(!res->lockname.name); | ||
561 | |||
562 | mlog(0, "destroying lockres %.*s\n", res->lockname.len, | ||
563 | res->lockname.name); | ||
564 | |||
565 | /* By the time we're ready to blow this guy away, we shouldn't | ||
566 | * be on any lists. */ | ||
567 | BUG_ON(!list_empty(&res->list)); | ||
568 | BUG_ON(!list_empty(&res->granted)); | ||
569 | BUG_ON(!list_empty(&res->converting)); | ||
570 | BUG_ON(!list_empty(&res->blocked)); | ||
571 | BUG_ON(!list_empty(&res->dirty)); | ||
572 | BUG_ON(!list_empty(&res->recovering)); | ||
573 | BUG_ON(!list_empty(&res->purge)); | ||
574 | |||
575 | kfree(res->lockname.name); | ||
576 | |||
577 | kfree(res); | ||
578 | } | ||
579 | |||
580 | void dlm_lockres_get(struct dlm_lock_resource *res) | ||
581 | { | ||
582 | kref_get(&res->refs); | ||
583 | } | ||
584 | |||
585 | void dlm_lockres_put(struct dlm_lock_resource *res) | ||
586 | { | ||
587 | kref_put(&res->refs, dlm_lockres_release); | ||
588 | } | ||
589 | |||
590 | static void dlm_init_lockres(struct dlm_ctxt *dlm, | ||
591 | struct dlm_lock_resource *res, | ||
592 | const char *name, unsigned int namelen) | ||
593 | { | ||
594 | char *qname; | ||
595 | |||
596 | /* If we memset here, we lose our reference to the kmalloc'd | ||
597 | * res->lockname.name, so be sure to init every field | ||
598 | * correctly! */ | ||
599 | |||
600 | qname = (char *) res->lockname.name; | ||
601 | memcpy(qname, name, namelen); | ||
602 | |||
603 | res->lockname.len = namelen; | ||
604 | res->lockname.hash = full_name_hash(name, namelen); | ||
605 | |||
606 | init_waitqueue_head(&res->wq); | ||
607 | spin_lock_init(&res->spinlock); | ||
608 | INIT_LIST_HEAD(&res->list); | ||
609 | INIT_LIST_HEAD(&res->granted); | ||
610 | INIT_LIST_HEAD(&res->converting); | ||
611 | INIT_LIST_HEAD(&res->blocked); | ||
612 | INIT_LIST_HEAD(&res->dirty); | ||
613 | INIT_LIST_HEAD(&res->recovering); | ||
614 | INIT_LIST_HEAD(&res->purge); | ||
615 | atomic_set(&res->asts_reserved, 0); | ||
616 | res->migration_pending = 0; | ||
617 | |||
618 | kref_init(&res->refs); | ||
619 | |||
620 | /* just for consistency */ | ||
621 | spin_lock(&res->spinlock); | ||
622 | dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN); | ||
623 | spin_unlock(&res->spinlock); | ||
624 | |||
625 | res->state = DLM_LOCK_RES_IN_PROGRESS; | ||
626 | |||
627 | res->last_used = 0; | ||
628 | |||
629 | memset(res->lvb, 0, DLM_LVB_LEN); | ||
630 | } | ||
631 | |||
632 | struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, | ||
633 | const char *name, | ||
634 | unsigned int namelen) | ||
635 | { | ||
636 | struct dlm_lock_resource *res; | ||
637 | |||
638 | res = kmalloc(sizeof(struct dlm_lock_resource), GFP_KERNEL); | ||
639 | if (!res) | ||
640 | return NULL; | ||
641 | |||
642 | res->lockname.name = kmalloc(namelen, GFP_KERNEL); | ||
643 | if (!res->lockname.name) { | ||
644 | kfree(res); | ||
645 | return NULL; | ||
646 | } | ||
647 | |||
648 | dlm_init_lockres(dlm, res, name, namelen); | ||
649 | return res; | ||
650 | } | ||
651 | |||
652 | /* | ||
653 | * lookup a lock resource by name. | ||
654 | * may already exist in the hashtable. | ||
655 | * lockid is null terminated | ||
656 | * | ||
657 | * if not, allocate enough for the lockres and for | ||
658 | * the temporary structure used in doing the mastering. | ||
659 | * | ||
660 | * also, do a lookup in the dlm->master_list to see | ||
661 | * if another node has begun mastering the same lock. | ||
662 | * if so, there should be a block entry in there | ||
663 | * for this name, and we should *not* attempt to master | ||
664 | * the lock here. need to wait around for that node | ||
665 | * to assert_master (or die). | ||
666 | * | ||
667 | */ | ||
668 | struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, | ||
669 | const char *lockid, | ||
670 | int flags) | ||
671 | { | ||
672 | struct dlm_lock_resource *tmpres=NULL, *res=NULL; | ||
673 | struct dlm_master_list_entry *mle = NULL; | ||
674 | struct dlm_master_list_entry *alloc_mle = NULL; | ||
675 | int blocked = 0; | ||
676 | int ret, nodenum; | ||
677 | struct dlm_node_iter iter; | ||
678 | unsigned int namelen; | ||
679 | int tries = 0; | ||
680 | |||
681 | BUG_ON(!lockid); | ||
682 | |||
683 | namelen = strlen(lockid); | ||
684 | |||
685 | mlog(0, "get lockres %s (len %d)\n", lockid, namelen); | ||
686 | |||
687 | lookup: | ||
688 | spin_lock(&dlm->spinlock); | ||
689 | tmpres = __dlm_lookup_lockres(dlm, lockid, namelen); | ||
690 | if (tmpres) { | ||
691 | spin_unlock(&dlm->spinlock); | ||
692 | mlog(0, "found in hash!\n"); | ||
693 | if (res) | ||
694 | dlm_lockres_put(res); | ||
695 | res = tmpres; | ||
696 | goto leave; | ||
697 | } | ||
698 | |||
699 | if (!res) { | ||
700 | spin_unlock(&dlm->spinlock); | ||
701 | mlog(0, "allocating a new resource\n"); | ||
702 | /* nothing found and we need to allocate one. */ | ||
703 | alloc_mle = (struct dlm_master_list_entry *) | ||
704 | kmem_cache_alloc(dlm_mle_cache, GFP_KERNEL); | ||
705 | if (!alloc_mle) | ||
706 | goto leave; | ||
707 | res = dlm_new_lockres(dlm, lockid, namelen); | ||
708 | if (!res) | ||
709 | goto leave; | ||
710 | goto lookup; | ||
711 | } | ||
712 | |||
713 | mlog(0, "no lockres found, allocated our own: %p\n", res); | ||
714 | |||
715 | if (flags & LKM_LOCAL) { | ||
716 | /* caller knows it's safe to assume it's not mastered elsewhere | ||
717 | * DONE! return right away */ | ||
718 | spin_lock(&res->spinlock); | ||
719 | dlm_change_lockres_owner(dlm, res, dlm->node_num); | ||
720 | __dlm_insert_lockres(dlm, res); | ||
721 | spin_unlock(&res->spinlock); | ||
722 | spin_unlock(&dlm->spinlock); | ||
723 | /* lockres still marked IN_PROGRESS */ | ||
724 | goto wake_waiters; | ||
725 | } | ||
726 | |||
727 | /* check master list to see if another node has started mastering it */ | ||
728 | spin_lock(&dlm->master_lock); | ||
729 | |||
730 | /* if we found a block, wait for lock to be mastered by another node */ | ||
731 | blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen); | ||
732 | if (blocked) { | ||
733 | if (mle->type == DLM_MLE_MASTER) { | ||
734 | mlog(ML_ERROR, "master entry for nonexistent lock!\n"); | ||
735 | BUG(); | ||
736 | } else if (mle->type == DLM_MLE_MIGRATION) { | ||
737 | /* migration is in progress! */ | ||
738 | /* the good news is that we now know the | ||
739 | * "current" master (mle->master). */ | ||
740 | |||
741 | spin_unlock(&dlm->master_lock); | ||
742 | assert_spin_locked(&dlm->spinlock); | ||
743 | |||
744 | /* set the lockres owner and hash it */ | ||
745 | spin_lock(&res->spinlock); | ||
746 | dlm_set_lockres_owner(dlm, res, mle->master); | ||
747 | __dlm_insert_lockres(dlm, res); | ||
748 | spin_unlock(&res->spinlock); | ||
749 | spin_unlock(&dlm->spinlock); | ||
750 | |||
751 | /* master is known, detach */ | ||
752 | dlm_mle_detach_hb_events(dlm, mle); | ||
753 | dlm_put_mle(mle); | ||
754 | mle = NULL; | ||
755 | goto wake_waiters; | ||
756 | } | ||
757 | } else { | ||
758 | /* go ahead and try to master lock on this node */ | ||
759 | mle = alloc_mle; | ||
760 | /* make sure this does not get freed below */ | ||
761 | alloc_mle = NULL; | ||
762 | dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0); | ||
763 | set_bit(dlm->node_num, mle->maybe_map); | ||
764 | list_add(&mle->list, &dlm->master_list); | ||
765 | } | ||
766 | |||
767 | /* at this point there is either a DLM_MLE_BLOCK or a | ||
768 | * DLM_MLE_MASTER on the master list, so it's safe to add the | ||
769 | * lockres to the hashtable. anyone who finds the lock will | ||
770 | * still have to wait on the IN_PROGRESS. */ | ||
771 | |||
772 | /* finally add the lockres to its hash bucket */ | ||
773 | __dlm_insert_lockres(dlm, res); | ||
774 | /* get an extra ref on the mle in case this is a BLOCK | ||
775 | * if so, the creator of the BLOCK may try to put the last | ||
776 | * ref at this time in the assert master handler, so we | ||
777 | * need an extra one to keep from a bad ptr deref. */ | ||
778 | dlm_get_mle(mle); | ||
779 | spin_unlock(&dlm->master_lock); | ||
780 | spin_unlock(&dlm->spinlock); | ||
781 | |||
782 | /* must wait for lock to be mastered elsewhere */ | ||
783 | if (blocked) | ||
784 | goto wait; | ||
785 | |||
786 | redo_request: | ||
787 | ret = -EINVAL; | ||
788 | dlm_node_iter_init(mle->vote_map, &iter); | ||
789 | while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { | ||
790 | ret = dlm_do_master_request(mle, nodenum); | ||
791 | if (ret < 0) | ||
792 | mlog_errno(ret); | ||
793 | if (mle->master != O2NM_MAX_NODES) { | ||
794 | /* found a master ! */ | ||
795 | break; | ||
796 | } | ||
797 | } | ||
798 | |||
799 | wait: | ||
800 | /* keep going until the response map includes all nodes */ | ||
801 | ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); | ||
802 | if (ret < 0) { | ||
803 | mlog(0, "%s:%.*s: node map changed, redo the " | ||
804 | "master request now, blocked=%d\n", | ||
805 | dlm->name, res->lockname.len, | ||
806 | res->lockname.name, blocked); | ||
807 | if (++tries > 20) { | ||
808 | mlog(ML_ERROR, "%s:%.*s: spinning on " | ||
809 | "dlm_wait_for_lock_mastery, blocked=%d\n", | ||
810 | dlm->name, res->lockname.len, | ||
811 | res->lockname.name, blocked); | ||
812 | dlm_print_one_lock_resource(res); | ||
813 | /* dlm_print_one_mle(mle); */ | ||
814 | tries = 0; | ||
815 | } | ||
816 | goto redo_request; | ||
817 | } | ||
818 | |||
819 | mlog(0, "lockres mastered by %u\n", res->owner); | ||
820 | /* make sure we never continue without this */ | ||
821 | BUG_ON(res->owner == O2NM_MAX_NODES); | ||
822 | |||
823 | /* master is known, detach if not already detached */ | ||
824 | dlm_mle_detach_hb_events(dlm, mle); | ||
825 | dlm_put_mle(mle); | ||
826 | /* put the extra ref */ | ||
827 | dlm_put_mle(mle); | ||
828 | |||
829 | wake_waiters: | ||
830 | spin_lock(&res->spinlock); | ||
831 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; | ||
832 | spin_unlock(&res->spinlock); | ||
833 | wake_up(&res->wq); | ||
834 | |||
835 | leave: | ||
836 | /* need to free the unused mle */ | ||
837 | if (alloc_mle) | ||
838 | kmem_cache_free(dlm_mle_cache, alloc_mle); | ||
839 | |||
840 | return res; | ||
841 | } | ||
842 | |||
843 | |||
844 | #define DLM_MASTERY_TIMEOUT_MS 5000 | ||
845 | |||
846 | static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, | ||
847 | struct dlm_lock_resource *res, | ||
848 | struct dlm_master_list_entry *mle, | ||
849 | int *blocked) | ||
850 | { | ||
851 | u8 m; | ||
852 | int ret, bit; | ||
853 | int map_changed, voting_done; | ||
854 | int assert, sleep; | ||
855 | |||
856 | recheck: | ||
857 | ret = 0; | ||
858 | assert = 0; | ||
859 | |||
860 | /* check if another node has already become the owner */ | ||
861 | spin_lock(&res->spinlock); | ||
862 | if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
863 | spin_unlock(&res->spinlock); | ||
864 | goto leave; | ||
865 | } | ||
866 | spin_unlock(&res->spinlock); | ||
867 | |||
868 | spin_lock(&mle->spinlock); | ||
869 | m = mle->master; | ||
870 | map_changed = (memcmp(mle->vote_map, mle->node_map, | ||
871 | sizeof(mle->vote_map)) != 0); | ||
872 | voting_done = (memcmp(mle->vote_map, mle->response_map, | ||
873 | sizeof(mle->vote_map)) == 0); | ||
874 | |||
875 | /* restart if we hit any errors */ | ||
876 | if (map_changed) { | ||
877 | int b; | ||
878 | mlog(0, "%s: %.*s: node map changed, restarting\n", | ||
879 | dlm->name, res->lockname.len, res->lockname.name); | ||
880 | ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); | ||
881 | b = (mle->type == DLM_MLE_BLOCK); | ||
882 | if ((*blocked && !b) || (!*blocked && b)) { | ||
883 | mlog(0, "%s:%.*s: status change: old=%d new=%d\n", | ||
884 | dlm->name, res->lockname.len, res->lockname.name, | ||
885 | *blocked, b); | ||
886 | *blocked = b; | ||
887 | } | ||
888 | spin_unlock(&mle->spinlock); | ||
889 | if (ret < 0) { | ||
890 | mlog_errno(ret); | ||
891 | goto leave; | ||
892 | } | ||
893 | mlog(0, "%s:%.*s: restart lock mastery succeeded, " | ||
894 | "rechecking now\n", dlm->name, res->lockname.len, | ||
895 | res->lockname.name); | ||
896 | goto recheck; | ||
897 | } | ||
898 | |||
899 | if (m != O2NM_MAX_NODES) { | ||
900 | /* another node has done an assert! | ||
901 | * all done! */ | ||
902 | sleep = 0; | ||
903 | } else { | ||
904 | sleep = 1; | ||
905 | /* have all nodes responded? */ | ||
906 | if (voting_done && !*blocked) { | ||
907 | bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); | ||
908 | if (dlm->node_num <= bit) { | ||
909 | /* my node number is lowest. | ||
910 | * now tell other nodes that I am | ||
911 | * mastering this. */ | ||
912 | mle->master = dlm->node_num; | ||
913 | assert = 1; | ||
914 | sleep = 0; | ||
915 | } | ||
916 | /* if voting is done, but we have not received | ||
917 | * an assert master yet, we must sleep */ | ||
918 | } | ||
919 | } | ||
920 | |||
921 | spin_unlock(&mle->spinlock); | ||
922 | |||
923 | /* sleep if we haven't finished voting yet */ | ||
924 | if (sleep) { | ||
925 | unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS); | ||
926 | |||
927 | /* | ||
928 | if (atomic_read(&mle->mle_refs.refcount) < 2) | ||
929 | mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle, | ||
930 | atomic_read(&mle->mle_refs.refcount), | ||
931 | res->lockname.len, res->lockname.name); | ||
932 | */ | ||
933 | atomic_set(&mle->woken, 0); | ||
934 | (void)wait_event_timeout(mle->wq, | ||
935 | (atomic_read(&mle->woken) == 1), | ||
936 | timeo); | ||
937 | if (res->owner == O2NM_MAX_NODES) { | ||
938 | mlog(0, "waiting again\n"); | ||
939 | goto recheck; | ||
940 | } | ||
941 | mlog(0, "done waiting, master is %u\n", res->owner); | ||
942 | ret = 0; | ||
943 | goto leave; | ||
944 | } | ||
945 | |||
946 | ret = 0; /* done */ | ||
947 | if (assert) { | ||
948 | m = dlm->node_num; | ||
949 | mlog(0, "about to master %.*s here, this=%u\n", | ||
950 | res->lockname.len, res->lockname.name, m); | ||
951 | ret = dlm_do_assert_master(dlm, res->lockname.name, | ||
952 | res->lockname.len, mle->vote_map, 0); | ||
953 | if (ret) { | ||
954 | /* This is a failure in the network path, | ||
955 | * not in the response to the assert_master | ||
956 | * (any nonzero response is a BUG on this node). | ||
957 | * Most likely a socket just got disconnected | ||
958 | * due to node death. */ | ||
959 | mlog_errno(ret); | ||
960 | } | ||
961 | /* no longer need to restart lock mastery. | ||
962 | * all living nodes have been contacted. */ | ||
963 | ret = 0; | ||
964 | } | ||
965 | |||
966 | /* set the lockres owner */ | ||
967 | spin_lock(&res->spinlock); | ||
968 | dlm_change_lockres_owner(dlm, res, m); | ||
969 | spin_unlock(&res->spinlock); | ||
970 | |||
971 | leave: | ||
972 | return ret; | ||
973 | } | ||
974 | |||
975 | struct dlm_bitmap_diff_iter | ||
976 | { | ||
977 | int curnode; | ||
978 | unsigned long *orig_bm; | ||
979 | unsigned long *cur_bm; | ||
980 | unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
981 | }; | ||
982 | |||
983 | enum dlm_node_state_change | ||
984 | { | ||
985 | NODE_DOWN = -1, | ||
986 | NODE_NO_CHANGE = 0, | ||
987 | NODE_UP | ||
988 | }; | ||
989 | |||
990 | static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter, | ||
991 | unsigned long *orig_bm, | ||
992 | unsigned long *cur_bm) | ||
993 | { | ||
994 | unsigned long p1, p2; | ||
995 | int i; | ||
996 | |||
997 | iter->curnode = -1; | ||
998 | iter->orig_bm = orig_bm; | ||
999 | iter->cur_bm = cur_bm; | ||
1000 | |||
1001 | for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) { | ||
1002 | p1 = *(iter->orig_bm + i); | ||
1003 | p2 = *(iter->cur_bm + i); | ||
1004 | iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1); | ||
1005 | } | ||
1006 | } | ||
1007 | |||
1008 | static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter, | ||
1009 | enum dlm_node_state_change *state) | ||
1010 | { | ||
1011 | int bit; | ||
1012 | |||
1013 | if (iter->curnode >= O2NM_MAX_NODES) | ||
1014 | return -ENOENT; | ||
1015 | |||
1016 | bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES, | ||
1017 | iter->curnode+1); | ||
1018 | if (bit >= O2NM_MAX_NODES) { | ||
1019 | iter->curnode = O2NM_MAX_NODES; | ||
1020 | return -ENOENT; | ||
1021 | } | ||
1022 | |||
1023 | /* if it was there in the original then this node died */ | ||
1024 | if (test_bit(bit, iter->orig_bm)) | ||
1025 | *state = NODE_DOWN; | ||
1026 | else | ||
1027 | *state = NODE_UP; | ||
1028 | |||
1029 | iter->curnode = bit; | ||
1030 | return bit; | ||
1031 | } | ||
1032 | |||
1033 | |||
1034 | static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, | ||
1035 | struct dlm_lock_resource *res, | ||
1036 | struct dlm_master_list_entry *mle, | ||
1037 | int blocked) | ||
1038 | { | ||
1039 | struct dlm_bitmap_diff_iter bdi; | ||
1040 | enum dlm_node_state_change sc; | ||
1041 | int node; | ||
1042 | int ret = 0; | ||
1043 | |||
1044 | mlog(0, "something happened such that the " | ||
1045 | "master process may need to be restarted!\n"); | ||
1046 | |||
1047 | assert_spin_locked(&mle->spinlock); | ||
1048 | |||
1049 | dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map); | ||
1050 | node = dlm_bitmap_diff_iter_next(&bdi, &sc); | ||
1051 | while (node >= 0) { | ||
1052 | if (sc == NODE_UP) { | ||
1053 | /* a node came up. easy. might not even need | ||
1054 | * to talk to it if its node number is higher | ||
1055 | * or if we are already blocked. */ | ||
1056 | mlog(0, "node up! %d\n", node); | ||
1057 | if (blocked) | ||
1058 | goto next; | ||
1059 | |||
1060 | if (node > dlm->node_num) { | ||
1061 | mlog(0, "node > this node. skipping.\n"); | ||
1062 | goto next; | ||
1063 | } | ||
1064 | |||
1065 | /* redo the master request, but only for the new node */ | ||
1066 | mlog(0, "sending request to new node\n"); | ||
1067 | clear_bit(node, mle->response_map); | ||
1068 | set_bit(node, mle->vote_map); | ||
1069 | } else { | ||
1070 | mlog(ML_ERROR, "node down! %d\n", node); | ||
1071 | |||
1072 | /* if the node wasn't involved in mastery skip it, | ||
1073 | * but clear it out from the maps so that it will | ||
1074 | * not affect mastery of this lockres */ | ||
1075 | clear_bit(node, mle->response_map); | ||
1076 | clear_bit(node, mle->vote_map); | ||
1077 | if (!test_bit(node, mle->maybe_map)) | ||
1078 | goto next; | ||
1079 | |||
1080 | /* if we're already blocked on lock mastery, and the | ||
1081 | * dead node wasn't the expected master, or there is | ||
1082 | * another node in the maybe_map, keep waiting */ | ||
1083 | if (blocked) { | ||
1084 | int lowest = find_next_bit(mle->maybe_map, | ||
1085 | O2NM_MAX_NODES, 0); | ||
1086 | |||
1087 | /* act like it was never there */ | ||
1088 | clear_bit(node, mle->maybe_map); | ||
1089 | |||
1090 | if (node != lowest) | ||
1091 | goto next; | ||
1092 | |||
1093 | mlog(ML_ERROR, "expected master %u died while " | ||
1094 | "this node was blocked waiting on it!\n", | ||
1095 | node); | ||
1096 | lowest = find_next_bit(mle->maybe_map, | ||
1097 | O2NM_MAX_NODES, | ||
1098 | lowest+1); | ||
1099 | if (lowest < O2NM_MAX_NODES) { | ||
1100 | mlog(0, "still blocked. waiting " | ||
1101 | "on %u now\n", lowest); | ||
1102 | goto next; | ||
1103 | } | ||
1104 | |||
1105 | /* mle is an MLE_BLOCK, but there is now | ||
1106 | * nothing left to block on. we need to return | ||
1107 | * all the way back out and try again with | ||
1108 | * an MLE_MASTER. dlm_do_local_recovery_cleanup | ||
1109 | * has already run, so the mle refcount is ok */ | ||
1110 | mlog(0, "no longer blocking. we can " | ||
1111 | "try to master this here\n"); | ||
1112 | mle->type = DLM_MLE_MASTER; | ||
1113 | memset(mle->maybe_map, 0, | ||
1114 | sizeof(mle->maybe_map)); | ||
1115 | memset(mle->response_map, 0, | ||
1116 | sizeof(mle->maybe_map)); | ||
1117 | memcpy(mle->vote_map, mle->node_map, | ||
1118 | sizeof(mle->node_map)); | ||
1119 | mle->u.res = res; | ||
1120 | set_bit(dlm->node_num, mle->maybe_map); | ||
1121 | |||
1122 | ret = -EAGAIN; | ||
1123 | goto next; | ||
1124 | } | ||
1125 | |||
1126 | clear_bit(node, mle->maybe_map); | ||
1127 | if (node > dlm->node_num) | ||
1128 | goto next; | ||
1129 | |||
1130 | mlog(0, "dead node in map!\n"); | ||
1131 | /* yuck. go back and re-contact all nodes | ||
1132 | * in the vote_map, removing this node. */ | ||
1133 | memset(mle->response_map, 0, | ||
1134 | sizeof(mle->response_map)); | ||
1135 | } | ||
1136 | ret = -EAGAIN; | ||
1137 | next: | ||
1138 | node = dlm_bitmap_diff_iter_next(&bdi, &sc); | ||
1139 | } | ||
1140 | return ret; | ||
1141 | } | ||
1142 | |||
1143 | |||
1144 | /* | ||
1145 | * DLM_MASTER_REQUEST_MSG | ||
1146 | * | ||
1147 | * returns: 0 on success, | ||
1148 | * -errno on a network error | ||
1149 | * | ||
1150 | * on error, the caller should assume the target node is "dead" | ||
1151 | * | ||
1152 | */ | ||
1153 | |||
1154 | static int dlm_do_master_request(struct dlm_master_list_entry *mle, int to) | ||
1155 | { | ||
1156 | struct dlm_ctxt *dlm = mle->dlm; | ||
1157 | struct dlm_master_request request; | ||
1158 | int ret, response=0, resend; | ||
1159 | |||
1160 | memset(&request, 0, sizeof(request)); | ||
1161 | request.node_idx = dlm->node_num; | ||
1162 | |||
1163 | BUG_ON(mle->type == DLM_MLE_MIGRATION); | ||
1164 | |||
1165 | if (mle->type != DLM_MLE_MASTER) { | ||
1166 | request.namelen = mle->u.name.len; | ||
1167 | memcpy(request.name, mle->u.name.name, request.namelen); | ||
1168 | } else { | ||
1169 | request.namelen = mle->u.res->lockname.len; | ||
1170 | memcpy(request.name, mle->u.res->lockname.name, | ||
1171 | request.namelen); | ||
1172 | } | ||
1173 | |||
1174 | again: | ||
1175 | ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request, | ||
1176 | sizeof(request), to, &response); | ||
1177 | if (ret < 0) { | ||
1178 | if (ret == -ESRCH) { | ||
1179 | /* should never happen */ | ||
1180 | mlog(ML_ERROR, "TCP stack not ready!\n"); | ||
1181 | BUG(); | ||
1182 | } else if (ret == -EINVAL) { | ||
1183 | mlog(ML_ERROR, "bad args passed to o2net!\n"); | ||
1184 | BUG(); | ||
1185 | } else if (ret == -ENOMEM) { | ||
1186 | mlog(ML_ERROR, "out of memory while trying to send " | ||
1187 | "network message! retrying\n"); | ||
1188 | /* this is totally crude */ | ||
1189 | msleep(50); | ||
1190 | goto again; | ||
1191 | } else if (!dlm_is_host_down(ret)) { | ||
1192 | /* not a network error. bad. */ | ||
1193 | mlog_errno(ret); | ||
1194 | mlog(ML_ERROR, "unhandled error!"); | ||
1195 | BUG(); | ||
1196 | } | ||
1197 | /* all other errors should be network errors, | ||
1198 | * and likely indicate node death */ | ||
1199 | mlog(ML_ERROR, "link to %d went down!\n", to); | ||
1200 | goto out; | ||
1201 | } | ||
1202 | |||
1203 | ret = 0; | ||
1204 | resend = 0; | ||
1205 | spin_lock(&mle->spinlock); | ||
1206 | switch (response) { | ||
1207 | case DLM_MASTER_RESP_YES: | ||
1208 | set_bit(to, mle->response_map); | ||
1209 | mlog(0, "node %u is the master, response=YES\n", to); | ||
1210 | mle->master = to; | ||
1211 | break; | ||
1212 | case DLM_MASTER_RESP_NO: | ||
1213 | mlog(0, "node %u not master, response=NO\n", to); | ||
1214 | set_bit(to, mle->response_map); | ||
1215 | break; | ||
1216 | case DLM_MASTER_RESP_MAYBE: | ||
1217 | mlog(0, "node %u not master, response=MAYBE\n", to); | ||
1218 | set_bit(to, mle->response_map); | ||
1219 | set_bit(to, mle->maybe_map); | ||
1220 | break; | ||
1221 | case DLM_MASTER_RESP_ERROR: | ||
1222 | mlog(0, "node %u hit an error, resending\n", to); | ||
1223 | resend = 1; | ||
1224 | response = 0; | ||
1225 | break; | ||
1226 | default: | ||
1227 | mlog(ML_ERROR, "bad response! %u\n", response); | ||
1228 | BUG(); | ||
1229 | } | ||
1230 | spin_unlock(&mle->spinlock); | ||
1231 | if (resend) { | ||
1232 | /* this is also totally crude */ | ||
1233 | msleep(50); | ||
1234 | goto again; | ||
1235 | } | ||
1236 | |||
1237 | out: | ||
1238 | return ret; | ||
1239 | } | ||
1240 | |||
1241 | /* | ||
1242 | * locks that can be taken here: | ||
1243 | * dlm->spinlock | ||
1244 | * res->spinlock | ||
1245 | * mle->spinlock | ||
1246 | * dlm->master_list | ||
1247 | * | ||
1248 | * if possible, TRIM THIS DOWN!!! | ||
1249 | */ | ||
1250 | int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data) | ||
1251 | { | ||
1252 | u8 response = DLM_MASTER_RESP_MAYBE; | ||
1253 | struct dlm_ctxt *dlm = data; | ||
1254 | struct dlm_lock_resource *res; | ||
1255 | struct dlm_master_request *request = (struct dlm_master_request *) msg->buf; | ||
1256 | struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL; | ||
1257 | char *name; | ||
1258 | unsigned int namelen; | ||
1259 | int found, ret; | ||
1260 | int set_maybe; | ||
1261 | |||
1262 | if (!dlm_grab(dlm)) | ||
1263 | return DLM_MASTER_RESP_NO; | ||
1264 | |||
1265 | if (!dlm_domain_fully_joined(dlm)) { | ||
1266 | response = DLM_MASTER_RESP_NO; | ||
1267 | goto send_response; | ||
1268 | } | ||
1269 | |||
1270 | name = request->name; | ||
1271 | namelen = request->namelen; | ||
1272 | |||
1273 | if (namelen > DLM_LOCKID_NAME_MAX) { | ||
1274 | response = DLM_IVBUFLEN; | ||
1275 | goto send_response; | ||
1276 | } | ||
1277 | |||
1278 | way_up_top: | ||
1279 | spin_lock(&dlm->spinlock); | ||
1280 | res = __dlm_lookup_lockres(dlm, name, namelen); | ||
1281 | if (res) { | ||
1282 | spin_unlock(&dlm->spinlock); | ||
1283 | |||
1284 | /* take care of the easy cases up front */ | ||
1285 | spin_lock(&res->spinlock); | ||
1286 | if (res->state & DLM_LOCK_RES_RECOVERING) { | ||
1287 | spin_unlock(&res->spinlock); | ||
1288 | mlog(0, "returning DLM_MASTER_RESP_ERROR since res is " | ||
1289 | "being recovered\n"); | ||
1290 | response = DLM_MASTER_RESP_ERROR; | ||
1291 | if (mle) | ||
1292 | kmem_cache_free(dlm_mle_cache, mle); | ||
1293 | goto send_response; | ||
1294 | } | ||
1295 | |||
1296 | if (res->owner == dlm->node_num) { | ||
1297 | u32 flags = DLM_ASSERT_MASTER_MLE_CLEANUP; | ||
1298 | spin_unlock(&res->spinlock); | ||
1299 | // mlog(0, "this node is the master\n"); | ||
1300 | response = DLM_MASTER_RESP_YES; | ||
1301 | if (mle) | ||
1302 | kmem_cache_free(dlm_mle_cache, mle); | ||
1303 | |||
1304 | /* this node is the owner. | ||
1305 | * there is some extra work that needs to | ||
1306 | * happen now. the requesting node has | ||
1307 | * caused all nodes up to this one to | ||
1308 | * create mles. this node now needs to | ||
1309 | * go back and clean those up. */ | ||
1310 | mlog(0, "%u is the owner of %.*s, cleaning everyone else\n", | ||
1311 | dlm->node_num, res->lockname.len, res->lockname.name); | ||
1312 | ret = dlm_dispatch_assert_master(dlm, res, 1, | ||
1313 | request->node_idx, | ||
1314 | flags); | ||
1315 | if (ret < 0) { | ||
1316 | mlog(ML_ERROR, "failed to dispatch assert " | ||
1317 | "master work\n"); | ||
1318 | response = DLM_MASTER_RESP_ERROR; | ||
1319 | } | ||
1320 | goto send_response; | ||
1321 | } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
1322 | spin_unlock(&res->spinlock); | ||
1323 | // mlog(0, "node %u is the master\n", res->owner); | ||
1324 | response = DLM_MASTER_RESP_NO; | ||
1325 | if (mle) | ||
1326 | kmem_cache_free(dlm_mle_cache, mle); | ||
1327 | goto send_response; | ||
1328 | } | ||
1329 | |||
1330 | /* ok, there is no owner. either this node is | ||
1331 | * being blocked, or it is actively trying to | ||
1332 | * master this lock. */ | ||
1333 | if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { | ||
1334 | mlog(ML_ERROR, "lock with no owner should be " | ||
1335 | "in-progress!\n"); | ||
1336 | BUG(); | ||
1337 | } | ||
1338 | |||
1339 | // mlog(0, "lockres is in progress...\n"); | ||
1340 | spin_lock(&dlm->master_lock); | ||
1341 | found = dlm_find_mle(dlm, &tmpmle, name, namelen); | ||
1342 | if (!found) { | ||
1343 | mlog(ML_ERROR, "no mle found for this lock!\n"); | ||
1344 | BUG(); | ||
1345 | } | ||
1346 | set_maybe = 1; | ||
1347 | spin_lock(&tmpmle->spinlock); | ||
1348 | if (tmpmle->type == DLM_MLE_BLOCK) { | ||
1349 | // mlog(0, "this node is waiting for " | ||
1350 | // "lockres to be mastered\n"); | ||
1351 | response = DLM_MASTER_RESP_NO; | ||
1352 | } else if (tmpmle->type == DLM_MLE_MIGRATION) { | ||
1353 | mlog(0, "node %u is master, but trying to migrate to " | ||
1354 | "node %u.\n", tmpmle->master, tmpmle->new_master); | ||
1355 | if (tmpmle->master == dlm->node_num) { | ||
1356 | response = DLM_MASTER_RESP_YES; | ||
1357 | mlog(ML_ERROR, "no owner on lockres, but this " | ||
1358 | "node is trying to migrate it to %u?!\n", | ||
1359 | tmpmle->new_master); | ||
1360 | BUG(); | ||
1361 | } else { | ||
1362 | /* the real master can respond on its own */ | ||
1363 | response = DLM_MASTER_RESP_NO; | ||
1364 | } | ||
1365 | } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
1366 | set_maybe = 0; | ||
1367 | if (tmpmle->master == dlm->node_num) | ||
1368 | response = DLM_MASTER_RESP_YES; | ||
1369 | else | ||
1370 | response = DLM_MASTER_RESP_NO; | ||
1371 | } else { | ||
1372 | // mlog(0, "this node is attempting to " | ||
1373 | // "master lockres\n"); | ||
1374 | response = DLM_MASTER_RESP_MAYBE; | ||
1375 | } | ||
1376 | if (set_maybe) | ||
1377 | set_bit(request->node_idx, tmpmle->maybe_map); | ||
1378 | spin_unlock(&tmpmle->spinlock); | ||
1379 | |||
1380 | spin_unlock(&dlm->master_lock); | ||
1381 | spin_unlock(&res->spinlock); | ||
1382 | |||
1383 | /* keep the mle attached to heartbeat events */ | ||
1384 | dlm_put_mle(tmpmle); | ||
1385 | if (mle) | ||
1386 | kmem_cache_free(dlm_mle_cache, mle); | ||
1387 | goto send_response; | ||
1388 | } | ||
1389 | |||
1390 | /* | ||
1391 | * lockres doesn't exist on this node | ||
1392 | * if there is an MLE_BLOCK, return NO | ||
1393 | * if there is an MLE_MASTER, return MAYBE | ||
1394 | * otherwise, add an MLE_BLOCK, return NO | ||
1395 | */ | ||
1396 | spin_lock(&dlm->master_lock); | ||
1397 | found = dlm_find_mle(dlm, &tmpmle, name, namelen); | ||
1398 | if (!found) { | ||
1399 | /* this lockid has never been seen on this node yet */ | ||
1400 | // mlog(0, "no mle found\n"); | ||
1401 | if (!mle) { | ||
1402 | spin_unlock(&dlm->master_lock); | ||
1403 | spin_unlock(&dlm->spinlock); | ||
1404 | |||
1405 | mle = (struct dlm_master_list_entry *) | ||
1406 | kmem_cache_alloc(dlm_mle_cache, GFP_KERNEL); | ||
1407 | if (!mle) { | ||
1408 | // bad bad bad... this sucks. | ||
1409 | response = DLM_MASTER_RESP_ERROR; | ||
1410 | goto send_response; | ||
1411 | } | ||
1412 | spin_lock(&dlm->spinlock); | ||
1413 | dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, | ||
1414 | name, namelen); | ||
1415 | spin_unlock(&dlm->spinlock); | ||
1416 | goto way_up_top; | ||
1417 | } | ||
1418 | |||
1419 | // mlog(0, "this is second time thru, already allocated, " | ||
1420 | // "add the block.\n"); | ||
1421 | set_bit(request->node_idx, mle->maybe_map); | ||
1422 | list_add(&mle->list, &dlm->master_list); | ||
1423 | response = DLM_MASTER_RESP_NO; | ||
1424 | } else { | ||
1425 | // mlog(0, "mle was found\n"); | ||
1426 | set_maybe = 1; | ||
1427 | spin_lock(&tmpmle->spinlock); | ||
1428 | if (tmpmle->type == DLM_MLE_BLOCK) | ||
1429 | response = DLM_MASTER_RESP_NO; | ||
1430 | else if (tmpmle->type == DLM_MLE_MIGRATION) { | ||
1431 | mlog(0, "migration mle was found (%u->%u)\n", | ||
1432 | tmpmle->master, tmpmle->new_master); | ||
1433 | if (tmpmle->master == dlm->node_num) { | ||
1434 | mlog(ML_ERROR, "no lockres, but migration mle " | ||
1435 | "says that this node is master!\n"); | ||
1436 | BUG(); | ||
1437 | } | ||
1438 | /* real master can respond on its own */ | ||
1439 | response = DLM_MASTER_RESP_NO; | ||
1440 | } else { | ||
1441 | if (tmpmle->master == dlm->node_num) { | ||
1442 | response = DLM_MASTER_RESP_YES; | ||
1443 | set_maybe = 0; | ||
1444 | } else | ||
1445 | response = DLM_MASTER_RESP_MAYBE; | ||
1446 | } | ||
1447 | if (set_maybe) | ||
1448 | set_bit(request->node_idx, tmpmle->maybe_map); | ||
1449 | spin_unlock(&tmpmle->spinlock); | ||
1450 | } | ||
1451 | spin_unlock(&dlm->master_lock); | ||
1452 | spin_unlock(&dlm->spinlock); | ||
1453 | |||
1454 | if (found) { | ||
1455 | /* keep the mle attached to heartbeat events */ | ||
1456 | dlm_put_mle(tmpmle); | ||
1457 | } | ||
1458 | send_response: | ||
1459 | dlm_put(dlm); | ||
1460 | return response; | ||
1461 | } | ||
1462 | |||
1463 | /* | ||
1464 | * DLM_ASSERT_MASTER_MSG | ||
1465 | */ | ||
1466 | |||
1467 | |||
1468 | /* | ||
1469 | * NOTE: this can be used for debugging | ||
1470 | * can periodically run all locks owned by this node | ||
1471 | * and re-assert across the cluster... | ||
1472 | */ | ||
1473 | static int dlm_do_assert_master(struct dlm_ctxt *dlm, const char *lockname, | ||
1474 | unsigned int namelen, void *nodemap, | ||
1475 | u32 flags) | ||
1476 | { | ||
1477 | struct dlm_assert_master assert; | ||
1478 | int to, tmpret; | ||
1479 | struct dlm_node_iter iter; | ||
1480 | int ret = 0; | ||
1481 | |||
1482 | BUG_ON(namelen > O2NM_MAX_NAME_LEN); | ||
1483 | |||
1484 | /* note that if this nodemap is empty, it returns 0 */ | ||
1485 | dlm_node_iter_init(nodemap, &iter); | ||
1486 | while ((to = dlm_node_iter_next(&iter)) >= 0) { | ||
1487 | int r = 0; | ||
1488 | mlog(0, "sending assert master to %d (%.*s)\n", to, | ||
1489 | namelen, lockname); | ||
1490 | memset(&assert, 0, sizeof(assert)); | ||
1491 | assert.node_idx = dlm->node_num; | ||
1492 | assert.namelen = namelen; | ||
1493 | memcpy(assert.name, lockname, namelen); | ||
1494 | assert.flags = cpu_to_be32(flags); | ||
1495 | |||
1496 | tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key, | ||
1497 | &assert, sizeof(assert), to, &r); | ||
1498 | if (tmpret < 0) { | ||
1499 | mlog(ML_ERROR, "assert_master returned %d!\n", tmpret); | ||
1500 | if (!dlm_is_host_down(tmpret)) { | ||
1501 | mlog(ML_ERROR, "unhandled error!\n"); | ||
1502 | BUG(); | ||
1503 | } | ||
1504 | /* a node died. finish out the rest of the nodes. */ | ||
1505 | mlog(ML_ERROR, "link to %d went down!\n", to); | ||
1506 | /* any nonzero status return will do */ | ||
1507 | ret = tmpret; | ||
1508 | } else if (r < 0) { | ||
1509 | /* ok, something horribly messed. kill thyself. */ | ||
1510 | mlog(ML_ERROR,"during assert master of %.*s to %u, " | ||
1511 | "got %d.\n", namelen, lockname, to, r); | ||
1512 | dlm_dump_lock_resources(dlm); | ||
1513 | BUG(); | ||
1514 | } | ||
1515 | } | ||
1516 | |||
1517 | return ret; | ||
1518 | } | ||
1519 | |||
1520 | /* | ||
1521 | * locks that can be taken here: | ||
1522 | * dlm->spinlock | ||
1523 | * res->spinlock | ||
1524 | * mle->spinlock | ||
1525 | * dlm->master_list | ||
1526 | * | ||
1527 | * if possible, TRIM THIS DOWN!!! | ||
1528 | */ | ||
1529 | int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data) | ||
1530 | { | ||
1531 | struct dlm_ctxt *dlm = data; | ||
1532 | struct dlm_master_list_entry *mle = NULL; | ||
1533 | struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf; | ||
1534 | struct dlm_lock_resource *res = NULL; | ||
1535 | char *name; | ||
1536 | unsigned int namelen; | ||
1537 | u32 flags; | ||
1538 | |||
1539 | if (!dlm_grab(dlm)) | ||
1540 | return 0; | ||
1541 | |||
1542 | name = assert->name; | ||
1543 | namelen = assert->namelen; | ||
1544 | flags = be32_to_cpu(assert->flags); | ||
1545 | |||
1546 | if (namelen > DLM_LOCKID_NAME_MAX) { | ||
1547 | mlog(ML_ERROR, "Invalid name length!"); | ||
1548 | goto done; | ||
1549 | } | ||
1550 | |||
1551 | spin_lock(&dlm->spinlock); | ||
1552 | |||
1553 | if (flags) | ||
1554 | mlog(0, "assert_master with flags: %u\n", flags); | ||
1555 | |||
1556 | /* find the MLE */ | ||
1557 | spin_lock(&dlm->master_lock); | ||
1558 | if (!dlm_find_mle(dlm, &mle, name, namelen)) { | ||
1559 | /* not an error, could be master just re-asserting */ | ||
1560 | mlog(0, "just got an assert_master from %u, but no " | ||
1561 | "MLE for it! (%.*s)\n", assert->node_idx, | ||
1562 | namelen, name); | ||
1563 | } else { | ||
1564 | int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0); | ||
1565 | if (bit >= O2NM_MAX_NODES) { | ||
1566 | /* not necessarily an error, though less likely. | ||
1567 | * could be master just re-asserting. */ | ||
1568 | mlog(ML_ERROR, "no bits set in the maybe_map, but %u " | ||
1569 | "is asserting! (%.*s)\n", assert->node_idx, | ||
1570 | namelen, name); | ||
1571 | } else if (bit != assert->node_idx) { | ||
1572 | if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) { | ||
1573 | mlog(0, "master %u was found, %u should " | ||
1574 | "back off\n", assert->node_idx, bit); | ||
1575 | } else { | ||
1576 | /* with the fix for bug 569, a higher node | ||
1577 | * number winning the mastery will respond | ||
1578 | * YES to mastery requests, but this node | ||
1579 | * had no way of knowing. let it pass. */ | ||
1580 | mlog(ML_ERROR, "%u is the lowest node, " | ||
1581 | "%u is asserting. (%.*s) %u must " | ||
1582 | "have begun after %u won.\n", bit, | ||
1583 | assert->node_idx, namelen, name, bit, | ||
1584 | assert->node_idx); | ||
1585 | } | ||
1586 | } | ||
1587 | } | ||
1588 | spin_unlock(&dlm->master_lock); | ||
1589 | |||
1590 | /* ok everything checks out with the MLE | ||
1591 | * now check to see if there is a lockres */ | ||
1592 | res = __dlm_lookup_lockres(dlm, name, namelen); | ||
1593 | if (res) { | ||
1594 | spin_lock(&res->spinlock); | ||
1595 | if (res->state & DLM_LOCK_RES_RECOVERING) { | ||
1596 | mlog(ML_ERROR, "%u asserting but %.*s is " | ||
1597 | "RECOVERING!\n", assert->node_idx, namelen, name); | ||
1598 | goto kill; | ||
1599 | } | ||
1600 | if (!mle) { | ||
1601 | if (res->owner != assert->node_idx) { | ||
1602 | mlog(ML_ERROR, "assert_master from " | ||
1603 | "%u, but current owner is " | ||
1604 | "%u! (%.*s)\n", | ||
1605 | assert->node_idx, res->owner, | ||
1606 | namelen, name); | ||
1607 | goto kill; | ||
1608 | } | ||
1609 | } else if (mle->type != DLM_MLE_MIGRATION) { | ||
1610 | if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
1611 | /* owner is just re-asserting */ | ||
1612 | if (res->owner == assert->node_idx) { | ||
1613 | mlog(0, "owner %u re-asserting on " | ||
1614 | "lock %.*s\n", assert->node_idx, | ||
1615 | namelen, name); | ||
1616 | goto ok; | ||
1617 | } | ||
1618 | mlog(ML_ERROR, "got assert_master from " | ||
1619 | "node %u, but %u is the owner! " | ||
1620 | "(%.*s)\n", assert->node_idx, | ||
1621 | res->owner, namelen, name); | ||
1622 | goto kill; | ||
1623 | } | ||
1624 | if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { | ||
1625 | mlog(ML_ERROR, "got assert from %u, but lock " | ||
1626 | "with no owner should be " | ||
1627 | "in-progress! (%.*s)\n", | ||
1628 | assert->node_idx, | ||
1629 | namelen, name); | ||
1630 | goto kill; | ||
1631 | } | ||
1632 | } else /* mle->type == DLM_MLE_MIGRATION */ { | ||
1633 | /* should only be getting an assert from new master */ | ||
1634 | if (assert->node_idx != mle->new_master) { | ||
1635 | mlog(ML_ERROR, "got assert from %u, but " | ||
1636 | "new master is %u, and old master " | ||
1637 | "was %u (%.*s)\n", | ||
1638 | assert->node_idx, mle->new_master, | ||
1639 | mle->master, namelen, name); | ||
1640 | goto kill; | ||
1641 | } | ||
1642 | |||
1643 | } | ||
1644 | ok: | ||
1645 | spin_unlock(&res->spinlock); | ||
1646 | } | ||
1647 | spin_unlock(&dlm->spinlock); | ||
1648 | |||
1649 | // mlog(0, "woo! got an assert_master from node %u!\n", | ||
1650 | // assert->node_idx); | ||
1651 | if (mle) { | ||
1652 | int extra_ref; | ||
1653 | |||
1654 | spin_lock(&mle->spinlock); | ||
1655 | extra_ref = !!(mle->type == DLM_MLE_BLOCK | ||
1656 | || mle->type == DLM_MLE_MIGRATION); | ||
1657 | mle->master = assert->node_idx; | ||
1658 | atomic_set(&mle->woken, 1); | ||
1659 | wake_up(&mle->wq); | ||
1660 | spin_unlock(&mle->spinlock); | ||
1661 | |||
1662 | if (mle->type == DLM_MLE_MIGRATION && res) { | ||
1663 | mlog(0, "finishing off migration of lockres %.*s, " | ||
1664 | "from %u to %u\n", | ||
1665 | res->lockname.len, res->lockname.name, | ||
1666 | dlm->node_num, mle->new_master); | ||
1667 | spin_lock(&res->spinlock); | ||
1668 | res->state &= ~DLM_LOCK_RES_MIGRATING; | ||
1669 | dlm_change_lockres_owner(dlm, res, mle->new_master); | ||
1670 | BUG_ON(res->state & DLM_LOCK_RES_DIRTY); | ||
1671 | spin_unlock(&res->spinlock); | ||
1672 | } | ||
1673 | /* master is known, detach if not already detached */ | ||
1674 | dlm_mle_detach_hb_events(dlm, mle); | ||
1675 | dlm_put_mle(mle); | ||
1676 | |||
1677 | if (extra_ref) { | ||
1678 | /* the assert master message now balances the extra | ||
1679 | * ref given by the master / migration request message. | ||
1680 | * if this is the last put, it will be removed | ||
1681 | * from the list. */ | ||
1682 | dlm_put_mle(mle); | ||
1683 | } | ||
1684 | } | ||
1685 | |||
1686 | done: | ||
1687 | if (res) | ||
1688 | dlm_lockres_put(res); | ||
1689 | dlm_put(dlm); | ||
1690 | return 0; | ||
1691 | |||
1692 | kill: | ||
1693 | /* kill the caller! */ | ||
1694 | spin_unlock(&res->spinlock); | ||
1695 | spin_unlock(&dlm->spinlock); | ||
1696 | dlm_lockres_put(res); | ||
1697 | mlog(ML_ERROR, "Bad message received from another node. Dumping state " | ||
1698 | "and killing the other node now! This node is OK and can continue.\n"); | ||
1699 | dlm_dump_lock_resources(dlm); | ||
1700 | dlm_put(dlm); | ||
1701 | return -EINVAL; | ||
1702 | } | ||
1703 | |||
1704 | int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, | ||
1705 | struct dlm_lock_resource *res, | ||
1706 | int ignore_higher, u8 request_from, u32 flags) | ||
1707 | { | ||
1708 | struct dlm_work_item *item; | ||
1709 | item = kcalloc(1, sizeof(*item), GFP_KERNEL); | ||
1710 | if (!item) | ||
1711 | return -ENOMEM; | ||
1712 | |||
1713 | |||
1714 | /* queue up work for dlm_assert_master_worker */ | ||
1715 | dlm_grab(dlm); /* get an extra ref for the work item */ | ||
1716 | dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL); | ||
1717 | item->u.am.lockres = res; /* already have a ref */ | ||
1718 | /* can optionally ignore node numbers higher than this node */ | ||
1719 | item->u.am.ignore_higher = ignore_higher; | ||
1720 | item->u.am.request_from = request_from; | ||
1721 | item->u.am.flags = flags; | ||
1722 | |||
1723 | spin_lock(&dlm->work_lock); | ||
1724 | list_add_tail(&item->list, &dlm->work_list); | ||
1725 | spin_unlock(&dlm->work_lock); | ||
1726 | |||
1727 | schedule_work(&dlm->dispatched_work); | ||
1728 | return 0; | ||
1729 | } | ||
1730 | |||
1731 | static void dlm_assert_master_worker(struct dlm_work_item *item, void *data) | ||
1732 | { | ||
1733 | struct dlm_ctxt *dlm = data; | ||
1734 | int ret = 0; | ||
1735 | struct dlm_lock_resource *res; | ||
1736 | unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
1737 | int ignore_higher; | ||
1738 | int bit; | ||
1739 | u8 request_from; | ||
1740 | u32 flags; | ||
1741 | |||
1742 | dlm = item->dlm; | ||
1743 | res = item->u.am.lockres; | ||
1744 | ignore_higher = item->u.am.ignore_higher; | ||
1745 | request_from = item->u.am.request_from; | ||
1746 | flags = item->u.am.flags; | ||
1747 | |||
1748 | spin_lock(&dlm->spinlock); | ||
1749 | memcpy(nodemap, dlm->domain_map, sizeof(nodemap)); | ||
1750 | spin_unlock(&dlm->spinlock); | ||
1751 | |||
1752 | clear_bit(dlm->node_num, nodemap); | ||
1753 | if (ignore_higher) { | ||
1754 | /* if is this just to clear up mles for nodes below | ||
1755 | * this node, do not send the message to the original | ||
1756 | * caller or any node number higher than this */ | ||
1757 | clear_bit(request_from, nodemap); | ||
1758 | bit = dlm->node_num; | ||
1759 | while (1) { | ||
1760 | bit = find_next_bit(nodemap, O2NM_MAX_NODES, | ||
1761 | bit+1); | ||
1762 | if (bit >= O2NM_MAX_NODES) | ||
1763 | break; | ||
1764 | clear_bit(bit, nodemap); | ||
1765 | } | ||
1766 | } | ||
1767 | |||
1768 | /* this call now finishes out the nodemap | ||
1769 | * even if one or more nodes die */ | ||
1770 | mlog(0, "worker about to master %.*s here, this=%u\n", | ||
1771 | res->lockname.len, res->lockname.name, dlm->node_num); | ||
1772 | ret = dlm_do_assert_master(dlm, res->lockname.name, | ||
1773 | res->lockname.len, | ||
1774 | nodemap, flags); | ||
1775 | if (ret < 0) { | ||
1776 | /* no need to restart, we are done */ | ||
1777 | mlog_errno(ret); | ||
1778 | } | ||
1779 | |||
1780 | dlm_lockres_put(res); | ||
1781 | |||
1782 | mlog(0, "finished with dlm_assert_master_worker\n"); | ||
1783 | } | ||
1784 | |||
1785 | |||
1786 | /* | ||
1787 | * DLM_MIGRATE_LOCKRES | ||
1788 | */ | ||
1789 | |||
1790 | |||
1791 | int dlm_migrate_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | ||
1792 | u8 target) | ||
1793 | { | ||
1794 | struct dlm_master_list_entry *mle = NULL; | ||
1795 | struct dlm_master_list_entry *oldmle = NULL; | ||
1796 | struct dlm_migratable_lockres *mres = NULL; | ||
1797 | int ret = -EINVAL; | ||
1798 | const char *name; | ||
1799 | unsigned int namelen; | ||
1800 | int mle_added = 0; | ||
1801 | struct list_head *queue, *iter; | ||
1802 | int i; | ||
1803 | struct dlm_lock *lock; | ||
1804 | int empty = 1; | ||
1805 | |||
1806 | if (!dlm_grab(dlm)) | ||
1807 | return -EINVAL; | ||
1808 | |||
1809 | name = res->lockname.name; | ||
1810 | namelen = res->lockname.len; | ||
1811 | |||
1812 | mlog(0, "migrating %.*s to %u\n", namelen, name, target); | ||
1813 | |||
1814 | /* | ||
1815 | * ensure this lockres is a proper candidate for migration | ||
1816 | */ | ||
1817 | spin_lock(&res->spinlock); | ||
1818 | if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
1819 | mlog(0, "cannot migrate lockres with unknown owner!\n"); | ||
1820 | spin_unlock(&res->spinlock); | ||
1821 | goto leave; | ||
1822 | } | ||
1823 | if (res->owner != dlm->node_num) { | ||
1824 | mlog(0, "cannot migrate lockres this node doesn't own!\n"); | ||
1825 | spin_unlock(&res->spinlock); | ||
1826 | goto leave; | ||
1827 | } | ||
1828 | mlog(0, "checking queues...\n"); | ||
1829 | queue = &res->granted; | ||
1830 | for (i=0; i<3; i++) { | ||
1831 | list_for_each(iter, queue) { | ||
1832 | lock = list_entry (iter, struct dlm_lock, list); | ||
1833 | empty = 0; | ||
1834 | if (lock->ml.node == dlm->node_num) { | ||
1835 | mlog(0, "found a lock owned by this node " | ||
1836 | "still on the %s queue! will not " | ||
1837 | "migrate this lockres\n", | ||
1838 | i==0 ? "granted" : | ||
1839 | (i==1 ? "converting" : "blocked")); | ||
1840 | spin_unlock(&res->spinlock); | ||
1841 | ret = -ENOTEMPTY; | ||
1842 | goto leave; | ||
1843 | } | ||
1844 | } | ||
1845 | queue++; | ||
1846 | } | ||
1847 | mlog(0, "all locks on this lockres are nonlocal. continuing\n"); | ||
1848 | spin_unlock(&res->spinlock); | ||
1849 | |||
1850 | /* no work to do */ | ||
1851 | if (empty) { | ||
1852 | mlog(0, "no locks were found on this lockres! done!\n"); | ||
1853 | ret = 0; | ||
1854 | goto leave; | ||
1855 | } | ||
1856 | |||
1857 | /* | ||
1858 | * preallocate up front | ||
1859 | * if this fails, abort | ||
1860 | */ | ||
1861 | |||
1862 | ret = -ENOMEM; | ||
1863 | mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_KERNEL); | ||
1864 | if (!mres) { | ||
1865 | mlog_errno(ret); | ||
1866 | goto leave; | ||
1867 | } | ||
1868 | |||
1869 | mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache, | ||
1870 | GFP_KERNEL); | ||
1871 | if (!mle) { | ||
1872 | mlog_errno(ret); | ||
1873 | goto leave; | ||
1874 | } | ||
1875 | ret = 0; | ||
1876 | |||
1877 | /* | ||
1878 | * find a node to migrate the lockres to | ||
1879 | */ | ||
1880 | |||
1881 | mlog(0, "picking a migration node\n"); | ||
1882 | spin_lock(&dlm->spinlock); | ||
1883 | /* pick a new node */ | ||
1884 | if (!test_bit(target, dlm->domain_map) || | ||
1885 | target >= O2NM_MAX_NODES) { | ||
1886 | target = dlm_pick_migration_target(dlm, res); | ||
1887 | } | ||
1888 | mlog(0, "node %u chosen for migration\n", target); | ||
1889 | |||
1890 | if (target >= O2NM_MAX_NODES || | ||
1891 | !test_bit(target, dlm->domain_map)) { | ||
1892 | /* target chosen is not alive */ | ||
1893 | ret = -EINVAL; | ||
1894 | } | ||
1895 | |||
1896 | if (ret) { | ||
1897 | spin_unlock(&dlm->spinlock); | ||
1898 | goto fail; | ||
1899 | } | ||
1900 | |||
1901 | mlog(0, "continuing with target = %u\n", target); | ||
1902 | |||
1903 | /* | ||
1904 | * clear any existing master requests and | ||
1905 | * add the migration mle to the list | ||
1906 | */ | ||
1907 | spin_lock(&dlm->master_lock); | ||
1908 | ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, | ||
1909 | namelen, target, dlm->node_num); | ||
1910 | spin_unlock(&dlm->master_lock); | ||
1911 | spin_unlock(&dlm->spinlock); | ||
1912 | |||
1913 | if (ret == -EEXIST) { | ||
1914 | mlog(0, "another process is already migrating it\n"); | ||
1915 | goto fail; | ||
1916 | } | ||
1917 | mle_added = 1; | ||
1918 | |||
1919 | /* | ||
1920 | * set the MIGRATING flag and flush asts | ||
1921 | * if we fail after this we need to re-dirty the lockres | ||
1922 | */ | ||
1923 | if (dlm_mark_lockres_migrating(dlm, res, target) < 0) { | ||
1924 | mlog(ML_ERROR, "tried to migrate %.*s to %u, but " | ||
1925 | "the target went down.\n", res->lockname.len, | ||
1926 | res->lockname.name, target); | ||
1927 | spin_lock(&res->spinlock); | ||
1928 | res->state &= ~DLM_LOCK_RES_MIGRATING; | ||
1929 | spin_unlock(&res->spinlock); | ||
1930 | ret = -EINVAL; | ||
1931 | } | ||
1932 | |||
1933 | fail: | ||
1934 | if (oldmle) { | ||
1935 | /* master is known, detach if not already detached */ | ||
1936 | dlm_mle_detach_hb_events(dlm, oldmle); | ||
1937 | dlm_put_mle(oldmle); | ||
1938 | } | ||
1939 | |||
1940 | if (ret < 0) { | ||
1941 | if (mle_added) { | ||
1942 | dlm_mle_detach_hb_events(dlm, mle); | ||
1943 | dlm_put_mle(mle); | ||
1944 | } else if (mle) { | ||
1945 | kmem_cache_free(dlm_mle_cache, mle); | ||
1946 | } | ||
1947 | goto leave; | ||
1948 | } | ||
1949 | |||
1950 | /* | ||
1951 | * at this point, we have a migration target, an mle | ||
1952 | * in the master list, and the MIGRATING flag set on | ||
1953 | * the lockres | ||
1954 | */ | ||
1955 | |||
1956 | |||
1957 | /* get an extra reference on the mle. | ||
1958 | * otherwise the assert_master from the new | ||
1959 | * master will destroy this. | ||
1960 | * also, make sure that all callers of dlm_get_mle | ||
1961 | * take both dlm->spinlock and dlm->master_lock */ | ||
1962 | spin_lock(&dlm->spinlock); | ||
1963 | spin_lock(&dlm->master_lock); | ||
1964 | dlm_get_mle(mle); | ||
1965 | spin_unlock(&dlm->master_lock); | ||
1966 | spin_unlock(&dlm->spinlock); | ||
1967 | |||
1968 | /* notify new node and send all lock state */ | ||
1969 | /* call send_one_lockres with migration flag. | ||
1970 | * this serves as notice to the target node that a | ||
1971 | * migration is starting. */ | ||
1972 | ret = dlm_send_one_lockres(dlm, res, mres, target, | ||
1973 | DLM_MRES_MIGRATION); | ||
1974 | |||
1975 | if (ret < 0) { | ||
1976 | mlog(0, "migration to node %u failed with %d\n", | ||
1977 | target, ret); | ||
1978 | /* migration failed, detach and clean up mle */ | ||
1979 | dlm_mle_detach_hb_events(dlm, mle); | ||
1980 | dlm_put_mle(mle); | ||
1981 | dlm_put_mle(mle); | ||
1982 | goto leave; | ||
1983 | } | ||
1984 | |||
1985 | /* at this point, the target sends a message to all nodes, | ||
1986 | * (using dlm_do_migrate_request). this node is skipped since | ||
1987 | * we had to put an mle in the list to begin the process. this | ||
1988 | * node now waits for target to do an assert master. this node | ||
1989 | * will be the last one notified, ensuring that the migration | ||
1990 | * is complete everywhere. if the target dies while this is | ||
1991 | * going on, some nodes could potentially see the target as the | ||
1992 | * master, so it is important that my recovery finds the migration | ||
1993 | * mle and sets the master to UNKNONWN. */ | ||
1994 | |||
1995 | |||
1996 | /* wait for new node to assert master */ | ||
1997 | while (1) { | ||
1998 | ret = wait_event_interruptible_timeout(mle->wq, | ||
1999 | (atomic_read(&mle->woken) == 1), | ||
2000 | msecs_to_jiffies(5000)); | ||
2001 | |||
2002 | if (ret >= 0) { | ||
2003 | if (atomic_read(&mle->woken) == 1 || | ||
2004 | res->owner == target) | ||
2005 | break; | ||
2006 | |||
2007 | mlog(0, "timed out during migration\n"); | ||
2008 | } | ||
2009 | if (ret == -ERESTARTSYS) { | ||
2010 | /* migration failed, detach and clean up mle */ | ||
2011 | dlm_mle_detach_hb_events(dlm, mle); | ||
2012 | dlm_put_mle(mle); | ||
2013 | dlm_put_mle(mle); | ||
2014 | goto leave; | ||
2015 | } | ||
2016 | /* TODO: if node died: stop, clean up, return error */ | ||
2017 | } | ||
2018 | |||
2019 | /* all done, set the owner, clear the flag */ | ||
2020 | spin_lock(&res->spinlock); | ||
2021 | dlm_set_lockres_owner(dlm, res, target); | ||
2022 | res->state &= ~DLM_LOCK_RES_MIGRATING; | ||
2023 | dlm_remove_nonlocal_locks(dlm, res); | ||
2024 | spin_unlock(&res->spinlock); | ||
2025 | wake_up(&res->wq); | ||
2026 | |||
2027 | /* master is known, detach if not already detached */ | ||
2028 | dlm_mle_detach_hb_events(dlm, mle); | ||
2029 | dlm_put_mle(mle); | ||
2030 | ret = 0; | ||
2031 | |||
2032 | dlm_lockres_calc_usage(dlm, res); | ||
2033 | |||
2034 | leave: | ||
2035 | /* re-dirty the lockres if we failed */ | ||
2036 | if (ret < 0) | ||
2037 | dlm_kick_thread(dlm, res); | ||
2038 | |||
2039 | /* TODO: cleanup */ | ||
2040 | if (mres) | ||
2041 | free_page((unsigned long)mres); | ||
2042 | |||
2043 | dlm_put(dlm); | ||
2044 | |||
2045 | mlog(0, "returning %d\n", ret); | ||
2046 | return ret; | ||
2047 | } | ||
2048 | EXPORT_SYMBOL_GPL(dlm_migrate_lockres); | ||
2049 | |||
2050 | int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock) | ||
2051 | { | ||
2052 | int ret; | ||
2053 | spin_lock(&dlm->ast_lock); | ||
2054 | spin_lock(&lock->spinlock); | ||
2055 | ret = (list_empty(&lock->bast_list) && !lock->bast_pending); | ||
2056 | spin_unlock(&lock->spinlock); | ||
2057 | spin_unlock(&dlm->ast_lock); | ||
2058 | return ret; | ||
2059 | } | ||
2060 | |||
2061 | static int dlm_migration_can_proceed(struct dlm_ctxt *dlm, | ||
2062 | struct dlm_lock_resource *res, | ||
2063 | u8 mig_target) | ||
2064 | { | ||
2065 | int can_proceed; | ||
2066 | spin_lock(&res->spinlock); | ||
2067 | can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); | ||
2068 | spin_unlock(&res->spinlock); | ||
2069 | |||
2070 | /* target has died, so make the caller break out of the | ||
2071 | * wait_event, but caller must recheck the domain_map */ | ||
2072 | spin_lock(&dlm->spinlock); | ||
2073 | if (!test_bit(mig_target, dlm->domain_map)) | ||
2074 | can_proceed = 1; | ||
2075 | spin_unlock(&dlm->spinlock); | ||
2076 | return can_proceed; | ||
2077 | } | ||
2078 | |||
2079 | int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | ||
2080 | { | ||
2081 | int ret; | ||
2082 | spin_lock(&res->spinlock); | ||
2083 | ret = !!(res->state & DLM_LOCK_RES_DIRTY); | ||
2084 | spin_unlock(&res->spinlock); | ||
2085 | return ret; | ||
2086 | } | ||
2087 | |||
2088 | |||
2089 | static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, | ||
2090 | struct dlm_lock_resource *res, | ||
2091 | u8 target) | ||
2092 | { | ||
2093 | int ret = 0; | ||
2094 | |||
2095 | mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n", | ||
2096 | res->lockname.len, res->lockname.name, dlm->node_num, | ||
2097 | target); | ||
2098 | /* need to set MIGRATING flag on lockres. this is done by | ||
2099 | * ensuring that all asts have been flushed for this lockres. */ | ||
2100 | spin_lock(&res->spinlock); | ||
2101 | BUG_ON(res->migration_pending); | ||
2102 | res->migration_pending = 1; | ||
2103 | /* strategy is to reserve an extra ast then release | ||
2104 | * it below, letting the release do all of the work */ | ||
2105 | __dlm_lockres_reserve_ast(res); | ||
2106 | spin_unlock(&res->spinlock); | ||
2107 | |||
2108 | /* now flush all the pending asts.. hang out for a bit */ | ||
2109 | dlm_kick_thread(dlm, res); | ||
2110 | wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res)); | ||
2111 | dlm_lockres_release_ast(dlm, res); | ||
2112 | |||
2113 | mlog(0, "about to wait on migration_wq, dirty=%s\n", | ||
2114 | res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no"); | ||
2115 | /* if the extra ref we just put was the final one, this | ||
2116 | * will pass thru immediately. otherwise, we need to wait | ||
2117 | * for the last ast to finish. */ | ||
2118 | again: | ||
2119 | ret = wait_event_interruptible_timeout(dlm->migration_wq, | ||
2120 | dlm_migration_can_proceed(dlm, res, target), | ||
2121 | msecs_to_jiffies(1000)); | ||
2122 | if (ret < 0) { | ||
2123 | mlog(0, "woken again: migrating? %s, dead? %s\n", | ||
2124 | res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", | ||
2125 | test_bit(target, dlm->domain_map) ? "no":"yes"); | ||
2126 | } else { | ||
2127 | mlog(0, "all is well: migrating? %s, dead? %s\n", | ||
2128 | res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", | ||
2129 | test_bit(target, dlm->domain_map) ? "no":"yes"); | ||
2130 | } | ||
2131 | if (!dlm_migration_can_proceed(dlm, res, target)) { | ||
2132 | mlog(0, "trying again...\n"); | ||
2133 | goto again; | ||
2134 | } | ||
2135 | |||
2136 | /* did the target go down or die? */ | ||
2137 | spin_lock(&dlm->spinlock); | ||
2138 | if (!test_bit(target, dlm->domain_map)) { | ||
2139 | mlog(ML_ERROR, "aha. migration target %u just went down\n", | ||
2140 | target); | ||
2141 | ret = -EHOSTDOWN; | ||
2142 | } | ||
2143 | spin_unlock(&dlm->spinlock); | ||
2144 | |||
2145 | /* | ||
2146 | * at this point: | ||
2147 | * | ||
2148 | * o the DLM_LOCK_RES_MIGRATING flag is set | ||
2149 | * o there are no pending asts on this lockres | ||
2150 | * o all processes trying to reserve an ast on this | ||
2151 | * lockres must wait for the MIGRATING flag to clear | ||
2152 | */ | ||
2153 | return ret; | ||
2154 | } | ||
2155 | |||
2156 | /* last step in the migration process. | ||
2157 | * original master calls this to free all of the dlm_lock | ||
2158 | * structures that used to be for other nodes. */ | ||
2159 | static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, | ||
2160 | struct dlm_lock_resource *res) | ||
2161 | { | ||
2162 | struct list_head *iter, *iter2; | ||
2163 | struct list_head *queue = &res->granted; | ||
2164 | int i; | ||
2165 | struct dlm_lock *lock; | ||
2166 | |||
2167 | assert_spin_locked(&res->spinlock); | ||
2168 | |||
2169 | BUG_ON(res->owner == dlm->node_num); | ||
2170 | |||
2171 | for (i=0; i<3; i++) { | ||
2172 | list_for_each_safe(iter, iter2, queue) { | ||
2173 | lock = list_entry (iter, struct dlm_lock, list); | ||
2174 | if (lock->ml.node != dlm->node_num) { | ||
2175 | mlog(0, "putting lock for node %u\n", | ||
2176 | lock->ml.node); | ||
2177 | /* be extra careful */ | ||
2178 | BUG_ON(!list_empty(&lock->ast_list)); | ||
2179 | BUG_ON(!list_empty(&lock->bast_list)); | ||
2180 | BUG_ON(lock->ast_pending); | ||
2181 | BUG_ON(lock->bast_pending); | ||
2182 | list_del_init(&lock->list); | ||
2183 | dlm_lock_put(lock); | ||
2184 | } | ||
2185 | } | ||
2186 | queue++; | ||
2187 | } | ||
2188 | } | ||
2189 | |||
2190 | /* for now this is not too intelligent. we will | ||
2191 | * need stats to make this do the right thing. | ||
2192 | * this just finds the first lock on one of the | ||
2193 | * queues and uses that node as the target. */ | ||
2194 | static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, | ||
2195 | struct dlm_lock_resource *res) | ||
2196 | { | ||
2197 | int i; | ||
2198 | struct list_head *queue = &res->granted; | ||
2199 | struct list_head *iter; | ||
2200 | struct dlm_lock *lock; | ||
2201 | int nodenum; | ||
2202 | |||
2203 | assert_spin_locked(&dlm->spinlock); | ||
2204 | |||
2205 | spin_lock(&res->spinlock); | ||
2206 | for (i=0; i<3; i++) { | ||
2207 | list_for_each(iter, queue) { | ||
2208 | /* up to the caller to make sure this node | ||
2209 | * is alive */ | ||
2210 | lock = list_entry (iter, struct dlm_lock, list); | ||
2211 | if (lock->ml.node != dlm->node_num) { | ||
2212 | spin_unlock(&res->spinlock); | ||
2213 | return lock->ml.node; | ||
2214 | } | ||
2215 | } | ||
2216 | queue++; | ||
2217 | } | ||
2218 | spin_unlock(&res->spinlock); | ||
2219 | mlog(0, "have not found a suitable target yet! checking domain map\n"); | ||
2220 | |||
2221 | /* ok now we're getting desperate. pick anyone alive. */ | ||
2222 | nodenum = -1; | ||
2223 | while (1) { | ||
2224 | nodenum = find_next_bit(dlm->domain_map, | ||
2225 | O2NM_MAX_NODES, nodenum+1); | ||
2226 | mlog(0, "found %d in domain map\n", nodenum); | ||
2227 | if (nodenum >= O2NM_MAX_NODES) | ||
2228 | break; | ||
2229 | if (nodenum != dlm->node_num) { | ||
2230 | mlog(0, "picking %d\n", nodenum); | ||
2231 | return nodenum; | ||
2232 | } | ||
2233 | } | ||
2234 | |||
2235 | mlog(0, "giving up. no master to migrate to\n"); | ||
2236 | return DLM_LOCK_RES_OWNER_UNKNOWN; | ||
2237 | } | ||
2238 | |||
2239 | |||
2240 | |||
2241 | /* this is called by the new master once all lockres | ||
2242 | * data has been received */ | ||
2243 | static int dlm_do_migrate_request(struct dlm_ctxt *dlm, | ||
2244 | struct dlm_lock_resource *res, | ||
2245 | u8 master, u8 new_master, | ||
2246 | struct dlm_node_iter *iter) | ||
2247 | { | ||
2248 | struct dlm_migrate_request migrate; | ||
2249 | int ret, status = 0; | ||
2250 | int nodenum; | ||
2251 | |||
2252 | memset(&migrate, 0, sizeof(migrate)); | ||
2253 | migrate.namelen = res->lockname.len; | ||
2254 | memcpy(migrate.name, res->lockname.name, migrate.namelen); | ||
2255 | migrate.new_master = new_master; | ||
2256 | migrate.master = master; | ||
2257 | |||
2258 | ret = 0; | ||
2259 | |||
2260 | /* send message to all nodes, except the master and myself */ | ||
2261 | while ((nodenum = dlm_node_iter_next(iter)) >= 0) { | ||
2262 | if (nodenum == master || | ||
2263 | nodenum == new_master) | ||
2264 | continue; | ||
2265 | |||
2266 | ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key, | ||
2267 | &migrate, sizeof(migrate), nodenum, | ||
2268 | &status); | ||
2269 | if (ret < 0) | ||
2270 | mlog_errno(ret); | ||
2271 | else if (status < 0) { | ||
2272 | mlog(0, "migrate request (node %u) returned %d!\n", | ||
2273 | nodenum, status); | ||
2274 | ret = status; | ||
2275 | } | ||
2276 | } | ||
2277 | |||
2278 | if (ret < 0) | ||
2279 | mlog_errno(ret); | ||
2280 | |||
2281 | mlog(0, "returning ret=%d\n", ret); | ||
2282 | return ret; | ||
2283 | } | ||
2284 | |||
2285 | |||
2286 | /* if there is an existing mle for this lockres, we now know who the master is. | ||
2287 | * (the one who sent us *this* message) we can clear it up right away. | ||
2288 | * since the process that put the mle on the list still has a reference to it, | ||
2289 | * we can unhash it now, set the master and wake the process. as a result, | ||
2290 | * we will have no mle in the list to start with. now we can add an mle for | ||
2291 | * the migration and this should be the only one found for those scanning the | ||
2292 | * list. */ | ||
2293 | int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data) | ||
2294 | { | ||
2295 | struct dlm_ctxt *dlm = data; | ||
2296 | struct dlm_lock_resource *res = NULL; | ||
2297 | struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf; | ||
2298 | struct dlm_master_list_entry *mle = NULL, *oldmle = NULL; | ||
2299 | const char *name; | ||
2300 | unsigned int namelen; | ||
2301 | int ret = 0; | ||
2302 | |||
2303 | if (!dlm_grab(dlm)) | ||
2304 | return -EINVAL; | ||
2305 | |||
2306 | name = migrate->name; | ||
2307 | namelen = migrate->namelen; | ||
2308 | |||
2309 | /* preallocate.. if this fails, abort */ | ||
2310 | mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache, | ||
2311 | GFP_KERNEL); | ||
2312 | |||
2313 | if (!mle) { | ||
2314 | ret = -ENOMEM; | ||
2315 | goto leave; | ||
2316 | } | ||
2317 | |||
2318 | /* check for pre-existing lock */ | ||
2319 | spin_lock(&dlm->spinlock); | ||
2320 | res = __dlm_lookup_lockres(dlm, name, namelen); | ||
2321 | spin_lock(&dlm->master_lock); | ||
2322 | |||
2323 | if (res) { | ||
2324 | spin_lock(&res->spinlock); | ||
2325 | if (res->state & DLM_LOCK_RES_RECOVERING) { | ||
2326 | /* if all is working ok, this can only mean that we got | ||
2327 | * a migrate request from a node that we now see as | ||
2328 | * dead. what can we do here? drop it to the floor? */ | ||
2329 | spin_unlock(&res->spinlock); | ||
2330 | mlog(ML_ERROR, "Got a migrate request, but the " | ||
2331 | "lockres is marked as recovering!"); | ||
2332 | kmem_cache_free(dlm_mle_cache, mle); | ||
2333 | ret = -EINVAL; /* need a better solution */ | ||
2334 | goto unlock; | ||
2335 | } | ||
2336 | res->state |= DLM_LOCK_RES_MIGRATING; | ||
2337 | spin_unlock(&res->spinlock); | ||
2338 | } | ||
2339 | |||
2340 | /* ignore status. only nonzero status would BUG. */ | ||
2341 | ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, | ||
2342 | name, namelen, | ||
2343 | migrate->new_master, | ||
2344 | migrate->master); | ||
2345 | |||
2346 | unlock: | ||
2347 | spin_unlock(&dlm->master_lock); | ||
2348 | spin_unlock(&dlm->spinlock); | ||
2349 | |||
2350 | if (oldmle) { | ||
2351 | /* master is known, detach if not already detached */ | ||
2352 | dlm_mle_detach_hb_events(dlm, oldmle); | ||
2353 | dlm_put_mle(oldmle); | ||
2354 | } | ||
2355 | |||
2356 | if (res) | ||
2357 | dlm_lockres_put(res); | ||
2358 | leave: | ||
2359 | dlm_put(dlm); | ||
2360 | return ret; | ||
2361 | } | ||
2362 | |||
2363 | /* must be holding dlm->spinlock and dlm->master_lock | ||
2364 | * when adding a migration mle, we can clear any other mles | ||
2365 | * in the master list because we know with certainty that | ||
2366 | * the master is "master". so we remove any old mle from | ||
2367 | * the list after setting it's master field, and then add | ||
2368 | * the new migration mle. this way we can hold with the rule | ||
2369 | * of having only one mle for a given lock name at all times. */ | ||
2370 | static int dlm_add_migration_mle(struct dlm_ctxt *dlm, | ||
2371 | struct dlm_lock_resource *res, | ||
2372 | struct dlm_master_list_entry *mle, | ||
2373 | struct dlm_master_list_entry **oldmle, | ||
2374 | const char *name, unsigned int namelen, | ||
2375 | u8 new_master, u8 master) | ||
2376 | { | ||
2377 | int found; | ||
2378 | int ret = 0; | ||
2379 | |||
2380 | *oldmle = NULL; | ||
2381 | |||
2382 | mlog_entry_void(); | ||
2383 | |||
2384 | assert_spin_locked(&dlm->spinlock); | ||
2385 | assert_spin_locked(&dlm->master_lock); | ||
2386 | |||
2387 | /* caller is responsible for any ref taken here on oldmle */ | ||
2388 | found = dlm_find_mle(dlm, oldmle, (char *)name, namelen); | ||
2389 | if (found) { | ||
2390 | struct dlm_master_list_entry *tmp = *oldmle; | ||
2391 | spin_lock(&tmp->spinlock); | ||
2392 | if (tmp->type == DLM_MLE_MIGRATION) { | ||
2393 | if (master == dlm->node_num) { | ||
2394 | /* ah another process raced me to it */ | ||
2395 | mlog(0, "tried to migrate %.*s, but some " | ||
2396 | "process beat me to it\n", | ||
2397 | namelen, name); | ||
2398 | ret = -EEXIST; | ||
2399 | } else { | ||
2400 | /* bad. 2 NODES are trying to migrate! */ | ||
2401 | mlog(ML_ERROR, "migration error mle: " | ||
2402 | "master=%u new_master=%u // request: " | ||
2403 | "master=%u new_master=%u // " | ||
2404 | "lockres=%.*s\n", | ||
2405 | tmp->master, tmp->new_master, | ||
2406 | master, new_master, | ||
2407 | namelen, name); | ||
2408 | BUG(); | ||
2409 | } | ||
2410 | } else { | ||
2411 | /* this is essentially what assert_master does */ | ||
2412 | tmp->master = master; | ||
2413 | atomic_set(&tmp->woken, 1); | ||
2414 | wake_up(&tmp->wq); | ||
2415 | /* remove it from the list so that only one | ||
2416 | * mle will be found */ | ||
2417 | list_del_init(&tmp->list); | ||
2418 | } | ||
2419 | spin_unlock(&tmp->spinlock); | ||
2420 | } | ||
2421 | |||
2422 | /* now add a migration mle to the tail of the list */ | ||
2423 | dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen); | ||
2424 | mle->new_master = new_master; | ||
2425 | mle->master = master; | ||
2426 | /* do this for consistency with other mle types */ | ||
2427 | set_bit(new_master, mle->maybe_map); | ||
2428 | list_add(&mle->list, &dlm->master_list); | ||
2429 | |||
2430 | return ret; | ||
2431 | } | ||
2432 | |||
2433 | |||
2434 | void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node) | ||
2435 | { | ||
2436 | struct list_head *iter, *iter2; | ||
2437 | struct dlm_master_list_entry *mle; | ||
2438 | struct dlm_lock_resource *res; | ||
2439 | |||
2440 | mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node); | ||
2441 | top: | ||
2442 | assert_spin_locked(&dlm->spinlock); | ||
2443 | |||
2444 | /* clean the master list */ | ||
2445 | spin_lock(&dlm->master_lock); | ||
2446 | list_for_each_safe(iter, iter2, &dlm->master_list) { | ||
2447 | mle = list_entry(iter, struct dlm_master_list_entry, list); | ||
2448 | |||
2449 | BUG_ON(mle->type != DLM_MLE_BLOCK && | ||
2450 | mle->type != DLM_MLE_MASTER && | ||
2451 | mle->type != DLM_MLE_MIGRATION); | ||
2452 | |||
2453 | /* MASTER mles are initiated locally. the waiting | ||
2454 | * process will notice the node map change | ||
2455 | * shortly. let that happen as normal. */ | ||
2456 | if (mle->type == DLM_MLE_MASTER) | ||
2457 | continue; | ||
2458 | |||
2459 | |||
2460 | /* BLOCK mles are initiated by other nodes. | ||
2461 | * need to clean up if the dead node would have | ||
2462 | * been the master. */ | ||
2463 | if (mle->type == DLM_MLE_BLOCK) { | ||
2464 | int bit; | ||
2465 | |||
2466 | spin_lock(&mle->spinlock); | ||
2467 | bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); | ||
2468 | if (bit != dead_node) { | ||
2469 | mlog(0, "mle found, but dead node %u would " | ||
2470 | "not have been master\n", dead_node); | ||
2471 | spin_unlock(&mle->spinlock); | ||
2472 | } else { | ||
2473 | /* must drop the refcount by one since the | ||
2474 | * assert_master will never arrive. this | ||
2475 | * may result in the mle being unlinked and | ||
2476 | * freed, but there may still be a process | ||
2477 | * waiting in the dlmlock path which is fine. */ | ||
2478 | mlog(ML_ERROR, "node %u was expected master\n", | ||
2479 | dead_node); | ||
2480 | atomic_set(&mle->woken, 1); | ||
2481 | spin_unlock(&mle->spinlock); | ||
2482 | wake_up(&mle->wq); | ||
2483 | /* final put will take care of list removal */ | ||
2484 | __dlm_put_mle(mle); | ||
2485 | } | ||
2486 | continue; | ||
2487 | } | ||
2488 | |||
2489 | /* everything else is a MIGRATION mle */ | ||
2490 | |||
2491 | /* the rule for MIGRATION mles is that the master | ||
2492 | * becomes UNKNOWN if *either* the original or | ||
2493 | * the new master dies. all UNKNOWN lockreses | ||
2494 | * are sent to whichever node becomes the recovery | ||
2495 | * master. the new master is responsible for | ||
2496 | * determining if there is still a master for | ||
2497 | * this lockres, or if he needs to take over | ||
2498 | * mastery. either way, this node should expect | ||
2499 | * another message to resolve this. */ | ||
2500 | if (mle->master != dead_node && | ||
2501 | mle->new_master != dead_node) | ||
2502 | continue; | ||
2503 | |||
2504 | /* if we have reached this point, this mle needs to | ||
2505 | * be removed from the list and freed. */ | ||
2506 | |||
2507 | /* remove from the list early. NOTE: unlinking | ||
2508 | * list_head while in list_for_each_safe */ | ||
2509 | spin_lock(&mle->spinlock); | ||
2510 | list_del_init(&mle->list); | ||
2511 | atomic_set(&mle->woken, 1); | ||
2512 | spin_unlock(&mle->spinlock); | ||
2513 | wake_up(&mle->wq); | ||
2514 | |||
2515 | mlog(0, "node %u died during migration from " | ||
2516 | "%u to %u!\n", dead_node, | ||
2517 | mle->master, mle->new_master); | ||
2518 | /* if there is a lockres associated with this | ||
2519 | * mle, find it and set its owner to UNKNOWN */ | ||
2520 | res = __dlm_lookup_lockres(dlm, mle->u.name.name, | ||
2521 | mle->u.name.len); | ||
2522 | if (res) { | ||
2523 | /* unfortunately if we hit this rare case, our | ||
2524 | * lock ordering is messed. we need to drop | ||
2525 | * the master lock so that we can take the | ||
2526 | * lockres lock, meaning that we will have to | ||
2527 | * restart from the head of list. */ | ||
2528 | spin_unlock(&dlm->master_lock); | ||
2529 | |||
2530 | /* move lockres onto recovery list */ | ||
2531 | spin_lock(&res->spinlock); | ||
2532 | dlm_set_lockres_owner(dlm, res, | ||
2533 | DLM_LOCK_RES_OWNER_UNKNOWN); | ||
2534 | dlm_move_lockres_to_recovery_list(dlm, res); | ||
2535 | spin_unlock(&res->spinlock); | ||
2536 | dlm_lockres_put(res); | ||
2537 | |||
2538 | /* dump the mle */ | ||
2539 | spin_lock(&dlm->master_lock); | ||
2540 | __dlm_put_mle(mle); | ||
2541 | spin_unlock(&dlm->master_lock); | ||
2542 | |||
2543 | /* restart */ | ||
2544 | goto top; | ||
2545 | } | ||
2546 | |||
2547 | /* this may be the last reference */ | ||
2548 | __dlm_put_mle(mle); | ||
2549 | } | ||
2550 | spin_unlock(&dlm->master_lock); | ||
2551 | } | ||
2552 | |||
2553 | |||
2554 | int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | ||
2555 | u8 old_master) | ||
2556 | { | ||
2557 | struct dlm_node_iter iter; | ||
2558 | int ret = 0; | ||
2559 | |||
2560 | spin_lock(&dlm->spinlock); | ||
2561 | dlm_node_iter_init(dlm->domain_map, &iter); | ||
2562 | clear_bit(old_master, iter.node_map); | ||
2563 | clear_bit(dlm->node_num, iter.node_map); | ||
2564 | spin_unlock(&dlm->spinlock); | ||
2565 | |||
2566 | mlog(0, "now time to do a migrate request to other nodes\n"); | ||
2567 | ret = dlm_do_migrate_request(dlm, res, old_master, | ||
2568 | dlm->node_num, &iter); | ||
2569 | if (ret < 0) { | ||
2570 | mlog_errno(ret); | ||
2571 | goto leave; | ||
2572 | } | ||
2573 | |||
2574 | mlog(0, "doing assert master of %.*s to all except the original node\n", | ||
2575 | res->lockname.len, res->lockname.name); | ||
2576 | /* this call now finishes out the nodemap | ||
2577 | * even if one or more nodes die */ | ||
2578 | ret = dlm_do_assert_master(dlm, res->lockname.name, | ||
2579 | res->lockname.len, iter.node_map, | ||
2580 | DLM_ASSERT_MASTER_FINISH_MIGRATION); | ||
2581 | if (ret < 0) { | ||
2582 | /* no longer need to retry. all living nodes contacted. */ | ||
2583 | mlog_errno(ret); | ||
2584 | ret = 0; | ||
2585 | } | ||
2586 | |||
2587 | memset(iter.node_map, 0, sizeof(iter.node_map)); | ||
2588 | set_bit(old_master, iter.node_map); | ||
2589 | mlog(0, "doing assert master of %.*s back to %u\n", | ||
2590 | res->lockname.len, res->lockname.name, old_master); | ||
2591 | ret = dlm_do_assert_master(dlm, res->lockname.name, | ||
2592 | res->lockname.len, iter.node_map, | ||
2593 | DLM_ASSERT_MASTER_FINISH_MIGRATION); | ||
2594 | if (ret < 0) { | ||
2595 | mlog(0, "assert master to original master failed " | ||
2596 | "with %d.\n", ret); | ||
2597 | /* the only nonzero status here would be because of | ||
2598 | * a dead original node. we're done. */ | ||
2599 | ret = 0; | ||
2600 | } | ||
2601 | |||
2602 | /* all done, set the owner, clear the flag */ | ||
2603 | spin_lock(&res->spinlock); | ||
2604 | dlm_set_lockres_owner(dlm, res, dlm->node_num); | ||
2605 | res->state &= ~DLM_LOCK_RES_MIGRATING; | ||
2606 | spin_unlock(&res->spinlock); | ||
2607 | /* re-dirty it on the new master */ | ||
2608 | dlm_kick_thread(dlm, res); | ||
2609 | wake_up(&res->wq); | ||
2610 | leave: | ||
2611 | return ret; | ||
2612 | } | ||
2613 | |||
2614 | /* | ||
2615 | * LOCKRES AST REFCOUNT | ||
2616 | * this is integral to migration | ||
2617 | */ | ||
2618 | |||
2619 | /* for future intent to call an ast, reserve one ahead of time. | ||
2620 | * this should be called only after waiting on the lockres | ||
2621 | * with dlm_wait_on_lockres, and while still holding the | ||
2622 | * spinlock after the call. */ | ||
2623 | void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res) | ||
2624 | { | ||
2625 | assert_spin_locked(&res->spinlock); | ||
2626 | if (res->state & DLM_LOCK_RES_MIGRATING) { | ||
2627 | __dlm_print_one_lock_resource(res); | ||
2628 | } | ||
2629 | BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); | ||
2630 | |||
2631 | atomic_inc(&res->asts_reserved); | ||
2632 | } | ||
2633 | |||
2634 | /* | ||
2635 | * used to drop the reserved ast, either because it went unused, | ||
2636 | * or because the ast/bast was actually called. | ||
2637 | * | ||
2638 | * also, if there is a pending migration on this lockres, | ||
2639 | * and this was the last pending ast on the lockres, | ||
2640 | * atomically set the MIGRATING flag before we drop the lock. | ||
2641 | * this is how we ensure that migration can proceed with no | ||
2642 | * asts in progress. note that it is ok if the state of the | ||
2643 | * queues is such that a lock should be granted in the future | ||
2644 | * or that a bast should be fired, because the new master will | ||
2645 | * shuffle the lists on this lockres as soon as it is migrated. | ||
2646 | */ | ||
2647 | void dlm_lockres_release_ast(struct dlm_ctxt *dlm, | ||
2648 | struct dlm_lock_resource *res) | ||
2649 | { | ||
2650 | if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock)) | ||
2651 | return; | ||
2652 | |||
2653 | if (!res->migration_pending) { | ||
2654 | spin_unlock(&res->spinlock); | ||
2655 | return; | ||
2656 | } | ||
2657 | |||
2658 | BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); | ||
2659 | res->migration_pending = 0; | ||
2660 | res->state |= DLM_LOCK_RES_MIGRATING; | ||
2661 | spin_unlock(&res->spinlock); | ||
2662 | wake_up(&res->wq); | ||
2663 | wake_up(&dlm->migration_wq); | ||
2664 | } | ||
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c new file mode 100644 index 000000000000..0c8eb1093f00 --- /dev/null +++ b/fs/ocfs2/dlm/dlmrecovery.c | |||
@@ -0,0 +1,2132 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmrecovery.c | ||
5 | * | ||
6 | * recovery stuff | ||
7 | * | ||
8 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | |||
28 | #include <linux/module.h> | ||
29 | #include <linux/fs.h> | ||
30 | #include <linux/types.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/utsname.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/sysctl.h> | ||
36 | #include <linux/random.h> | ||
37 | #include <linux/blkdev.h> | ||
38 | #include <linux/socket.h> | ||
39 | #include <linux/inet.h> | ||
40 | #include <linux/timer.h> | ||
41 | #include <linux/kthread.h> | ||
42 | |||
43 | |||
44 | #include "cluster/heartbeat.h" | ||
45 | #include "cluster/nodemanager.h" | ||
46 | #include "cluster/tcp.h" | ||
47 | |||
48 | #include "dlmapi.h" | ||
49 | #include "dlmcommon.h" | ||
50 | #include "dlmdomain.h" | ||
51 | |||
52 | #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY) | ||
53 | #include "cluster/masklog.h" | ||
54 | |||
55 | static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node); | ||
56 | |||
57 | static int dlm_recovery_thread(void *data); | ||
58 | void dlm_complete_recovery_thread(struct dlm_ctxt *dlm); | ||
59 | int dlm_launch_recovery_thread(struct dlm_ctxt *dlm); | ||
60 | static void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); | ||
61 | static int dlm_do_recovery(struct dlm_ctxt *dlm); | ||
62 | |||
63 | static int dlm_pick_recovery_master(struct dlm_ctxt *dlm); | ||
64 | static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node); | ||
65 | static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); | ||
66 | static int dlm_request_all_locks(struct dlm_ctxt *dlm, | ||
67 | u8 request_from, u8 dead_node); | ||
68 | static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); | ||
69 | |||
70 | static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res); | ||
71 | static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, | ||
72 | const char *lockname, int namelen, | ||
73 | int total_locks, u64 cookie, | ||
74 | u8 flags, u8 master); | ||
75 | static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, | ||
76 | struct dlm_migratable_lockres *mres, | ||
77 | u8 send_to, | ||
78 | struct dlm_lock_resource *res, | ||
79 | int total_locks); | ||
80 | static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, | ||
81 | struct dlm_lock_resource *res, | ||
82 | u8 *real_master); | ||
83 | static int dlm_process_recovery_data(struct dlm_ctxt *dlm, | ||
84 | struct dlm_lock_resource *res, | ||
85 | struct dlm_migratable_lockres *mres); | ||
86 | static int dlm_do_master_requery(struct dlm_ctxt *dlm, | ||
87 | struct dlm_lock_resource *res, | ||
88 | u8 nodenum, u8 *real_master); | ||
89 | static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm); | ||
90 | static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, | ||
91 | u8 dead_node, u8 send_to); | ||
92 | static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node); | ||
93 | static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, | ||
94 | struct list_head *list, u8 dead_node); | ||
95 | static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, | ||
96 | u8 dead_node, u8 new_master); | ||
97 | static void dlm_reco_ast(void *astdata); | ||
98 | static void dlm_reco_bast(void *astdata, int blocked_type); | ||
99 | static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st); | ||
100 | static void dlm_request_all_locks_worker(struct dlm_work_item *item, | ||
101 | void *data); | ||
102 | static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data); | ||
103 | |||
104 | static u64 dlm_get_next_mig_cookie(void); | ||
105 | |||
106 | static spinlock_t dlm_reco_state_lock = SPIN_LOCK_UNLOCKED; | ||
107 | static spinlock_t dlm_mig_cookie_lock = SPIN_LOCK_UNLOCKED; | ||
108 | static u64 dlm_mig_cookie = 1; | ||
109 | |||
110 | static u64 dlm_get_next_mig_cookie(void) | ||
111 | { | ||
112 | u64 c; | ||
113 | spin_lock(&dlm_mig_cookie_lock); | ||
114 | c = dlm_mig_cookie; | ||
115 | if (dlm_mig_cookie == (~0ULL)) | ||
116 | dlm_mig_cookie = 1; | ||
117 | else | ||
118 | dlm_mig_cookie++; | ||
119 | spin_unlock(&dlm_mig_cookie_lock); | ||
120 | return c; | ||
121 | } | ||
122 | |||
123 | static inline void dlm_reset_recovery(struct dlm_ctxt *dlm) | ||
124 | { | ||
125 | spin_lock(&dlm->spinlock); | ||
126 | clear_bit(dlm->reco.dead_node, dlm->recovery_map); | ||
127 | dlm->reco.dead_node = O2NM_INVALID_NODE_NUM; | ||
128 | dlm->reco.new_master = O2NM_INVALID_NODE_NUM; | ||
129 | spin_unlock(&dlm->spinlock); | ||
130 | } | ||
131 | |||
132 | /* Worker function used during recovery. */ | ||
133 | void dlm_dispatch_work(void *data) | ||
134 | { | ||
135 | struct dlm_ctxt *dlm = (struct dlm_ctxt *)data; | ||
136 | LIST_HEAD(tmp_list); | ||
137 | struct list_head *iter, *iter2; | ||
138 | struct dlm_work_item *item; | ||
139 | dlm_workfunc_t *workfunc; | ||
140 | |||
141 | spin_lock(&dlm->work_lock); | ||
142 | list_splice_init(&dlm->work_list, &tmp_list); | ||
143 | spin_unlock(&dlm->work_lock); | ||
144 | |||
145 | list_for_each_safe(iter, iter2, &tmp_list) { | ||
146 | item = list_entry(iter, struct dlm_work_item, list); | ||
147 | workfunc = item->func; | ||
148 | list_del_init(&item->list); | ||
149 | |||
150 | /* already have ref on dlm to avoid having | ||
151 | * it disappear. just double-check. */ | ||
152 | BUG_ON(item->dlm != dlm); | ||
153 | |||
154 | /* this is allowed to sleep and | ||
155 | * call network stuff */ | ||
156 | workfunc(item, item->data); | ||
157 | |||
158 | dlm_put(dlm); | ||
159 | kfree(item); | ||
160 | } | ||
161 | } | ||
162 | |||
163 | /* | ||
164 | * RECOVERY THREAD | ||
165 | */ | ||
166 | |||
167 | static void dlm_kick_recovery_thread(struct dlm_ctxt *dlm) | ||
168 | { | ||
169 | /* wake the recovery thread | ||
170 | * this will wake the reco thread in one of three places | ||
171 | * 1) sleeping with no recovery happening | ||
172 | * 2) sleeping with recovery mastered elsewhere | ||
173 | * 3) recovery mastered here, waiting on reco data */ | ||
174 | |||
175 | wake_up(&dlm->dlm_reco_thread_wq); | ||
176 | } | ||
177 | |||
178 | /* Launch the recovery thread */ | ||
179 | int dlm_launch_recovery_thread(struct dlm_ctxt *dlm) | ||
180 | { | ||
181 | mlog(0, "starting dlm recovery thread...\n"); | ||
182 | |||
183 | dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm, | ||
184 | "dlm_reco_thread"); | ||
185 | if (IS_ERR(dlm->dlm_reco_thread_task)) { | ||
186 | mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task)); | ||
187 | dlm->dlm_reco_thread_task = NULL; | ||
188 | return -EINVAL; | ||
189 | } | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | void dlm_complete_recovery_thread(struct dlm_ctxt *dlm) | ||
195 | { | ||
196 | if (dlm->dlm_reco_thread_task) { | ||
197 | mlog(0, "waiting for dlm recovery thread to exit\n"); | ||
198 | kthread_stop(dlm->dlm_reco_thread_task); | ||
199 | dlm->dlm_reco_thread_task = NULL; | ||
200 | } | ||
201 | } | ||
202 | |||
203 | |||
204 | |||
205 | /* | ||
206 | * this is lame, but here's how recovery works... | ||
207 | * 1) all recovery threads cluster wide will work on recovering | ||
208 | * ONE node at a time | ||
209 | * 2) negotiate who will take over all the locks for the dead node. | ||
210 | * thats right... ALL the locks. | ||
211 | * 3) once a new master is chosen, everyone scans all locks | ||
212 | * and moves aside those mastered by the dead guy | ||
213 | * 4) each of these locks should be locked until recovery is done | ||
214 | * 5) the new master collects up all of secondary lock queue info | ||
215 | * one lock at a time, forcing each node to communicate back | ||
216 | * before continuing | ||
217 | * 6) each secondary lock queue responds with the full known lock info | ||
218 | * 7) once the new master has run all its locks, it sends a ALLDONE! | ||
219 | * message to everyone | ||
220 | * 8) upon receiving this message, the secondary queue node unlocks | ||
221 | * and responds to the ALLDONE | ||
222 | * 9) once the new master gets responses from everyone, he unlocks | ||
223 | * everything and recovery for this dead node is done | ||
224 | *10) go back to 2) while there are still dead nodes | ||
225 | * | ||
226 | */ | ||
227 | |||
228 | |||
229 | #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000) | ||
230 | |||
231 | static int dlm_recovery_thread(void *data) | ||
232 | { | ||
233 | int status; | ||
234 | struct dlm_ctxt *dlm = data; | ||
235 | unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS); | ||
236 | |||
237 | mlog(0, "dlm thread running for %s...\n", dlm->name); | ||
238 | |||
239 | while (!kthread_should_stop()) { | ||
240 | if (dlm_joined(dlm)) { | ||
241 | status = dlm_do_recovery(dlm); | ||
242 | if (status == -EAGAIN) { | ||
243 | /* do not sleep, recheck immediately. */ | ||
244 | continue; | ||
245 | } | ||
246 | if (status < 0) | ||
247 | mlog_errno(status); | ||
248 | } | ||
249 | |||
250 | wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, | ||
251 | kthread_should_stop(), | ||
252 | timeout); | ||
253 | } | ||
254 | |||
255 | mlog(0, "quitting DLM recovery thread\n"); | ||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | /* callers of the top-level api calls (dlmlock/dlmunlock) should | ||
260 | * block on the dlm->reco.event when recovery is in progress. | ||
261 | * the dlm recovery thread will set this state when it begins | ||
262 | * recovering a dead node (as the new master or not) and clear | ||
263 | * the state and wake as soon as all affected lock resources have | ||
264 | * been marked with the RECOVERY flag */ | ||
265 | static int dlm_in_recovery(struct dlm_ctxt *dlm) | ||
266 | { | ||
267 | int in_recovery; | ||
268 | spin_lock(&dlm->spinlock); | ||
269 | in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE); | ||
270 | spin_unlock(&dlm->spinlock); | ||
271 | return in_recovery; | ||
272 | } | ||
273 | |||
274 | |||
275 | void dlm_wait_for_recovery(struct dlm_ctxt *dlm) | ||
276 | { | ||
277 | wait_event(dlm->reco.event, !dlm_in_recovery(dlm)); | ||
278 | } | ||
279 | |||
280 | static void dlm_begin_recovery(struct dlm_ctxt *dlm) | ||
281 | { | ||
282 | spin_lock(&dlm->spinlock); | ||
283 | BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE); | ||
284 | dlm->reco.state |= DLM_RECO_STATE_ACTIVE; | ||
285 | spin_unlock(&dlm->spinlock); | ||
286 | } | ||
287 | |||
288 | static void dlm_end_recovery(struct dlm_ctxt *dlm) | ||
289 | { | ||
290 | spin_lock(&dlm->spinlock); | ||
291 | BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE)); | ||
292 | dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE; | ||
293 | spin_unlock(&dlm->spinlock); | ||
294 | wake_up(&dlm->reco.event); | ||
295 | } | ||
296 | |||
297 | static int dlm_do_recovery(struct dlm_ctxt *dlm) | ||
298 | { | ||
299 | int status = 0; | ||
300 | |||
301 | spin_lock(&dlm->spinlock); | ||
302 | |||
303 | /* check to see if the new master has died */ | ||
304 | if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM && | ||
305 | test_bit(dlm->reco.new_master, dlm->recovery_map)) { | ||
306 | mlog(0, "new master %u died while recovering %u!\n", | ||
307 | dlm->reco.new_master, dlm->reco.dead_node); | ||
308 | /* unset the new_master, leave dead_node */ | ||
309 | dlm->reco.new_master = O2NM_INVALID_NODE_NUM; | ||
310 | } | ||
311 | |||
312 | /* select a target to recover */ | ||
313 | if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { | ||
314 | int bit; | ||
315 | |||
316 | bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0); | ||
317 | if (bit >= O2NM_MAX_NODES || bit < 0) | ||
318 | dlm->reco.dead_node = O2NM_INVALID_NODE_NUM; | ||
319 | else | ||
320 | dlm->reco.dead_node = bit; | ||
321 | } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) { | ||
322 | /* BUG? */ | ||
323 | mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n", | ||
324 | dlm->reco.dead_node); | ||
325 | dlm->reco.dead_node = O2NM_INVALID_NODE_NUM; | ||
326 | } | ||
327 | |||
328 | if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { | ||
329 | // mlog(0, "nothing to recover! sleeping now!\n"); | ||
330 | spin_unlock(&dlm->spinlock); | ||
331 | /* return to main thread loop and sleep. */ | ||
332 | return 0; | ||
333 | } | ||
334 | mlog(0, "recovery thread found node %u in the recovery map!\n", | ||
335 | dlm->reco.dead_node); | ||
336 | spin_unlock(&dlm->spinlock); | ||
337 | |||
338 | /* take write barrier */ | ||
339 | /* (stops the list reshuffling thread, proxy ast handling) */ | ||
340 | dlm_begin_recovery(dlm); | ||
341 | |||
342 | if (dlm->reco.new_master == dlm->node_num) | ||
343 | goto master_here; | ||
344 | |||
345 | if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { | ||
346 | /* choose a new master */ | ||
347 | if (!dlm_pick_recovery_master(dlm)) { | ||
348 | /* already notified everyone. go. */ | ||
349 | dlm->reco.new_master = dlm->node_num; | ||
350 | goto master_here; | ||
351 | } | ||
352 | mlog(0, "another node will master this recovery session.\n"); | ||
353 | } | ||
354 | mlog(0, "dlm=%s, new_master=%u, this node=%u, dead_node=%u\n", | ||
355 | dlm->name, dlm->reco.new_master, | ||
356 | dlm->node_num, dlm->reco.dead_node); | ||
357 | |||
358 | /* it is safe to start everything back up here | ||
359 | * because all of the dead node's lock resources | ||
360 | * have been marked as in-recovery */ | ||
361 | dlm_end_recovery(dlm); | ||
362 | |||
363 | /* sleep out in main dlm_recovery_thread loop. */ | ||
364 | return 0; | ||
365 | |||
366 | master_here: | ||
367 | mlog(0, "mastering recovery of %s:%u here(this=%u)!\n", | ||
368 | dlm->name, dlm->reco.dead_node, dlm->node_num); | ||
369 | |||
370 | status = dlm_remaster_locks(dlm, dlm->reco.dead_node); | ||
371 | if (status < 0) { | ||
372 | mlog(ML_ERROR, "error %d remastering locks for node %u, " | ||
373 | "retrying.\n", status, dlm->reco.dead_node); | ||
374 | } else { | ||
375 | /* success! see if any other nodes need recovery */ | ||
376 | dlm_reset_recovery(dlm); | ||
377 | } | ||
378 | dlm_end_recovery(dlm); | ||
379 | |||
380 | /* continue and look for another dead node */ | ||
381 | return -EAGAIN; | ||
382 | } | ||
383 | |||
384 | static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) | ||
385 | { | ||
386 | int status = 0; | ||
387 | struct dlm_reco_node_data *ndata; | ||
388 | struct list_head *iter; | ||
389 | int all_nodes_done; | ||
390 | int destroy = 0; | ||
391 | int pass = 0; | ||
392 | |||
393 | status = dlm_init_recovery_area(dlm, dead_node); | ||
394 | if (status < 0) | ||
395 | goto leave; | ||
396 | |||
397 | /* safe to access the node data list without a lock, since this | ||
398 | * process is the only one to change the list */ | ||
399 | list_for_each(iter, &dlm->reco.node_data) { | ||
400 | ndata = list_entry (iter, struct dlm_reco_node_data, list); | ||
401 | BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT); | ||
402 | ndata->state = DLM_RECO_NODE_DATA_REQUESTING; | ||
403 | |||
404 | mlog(0, "requesting lock info from node %u\n", | ||
405 | ndata->node_num); | ||
406 | |||
407 | if (ndata->node_num == dlm->node_num) { | ||
408 | ndata->state = DLM_RECO_NODE_DATA_DONE; | ||
409 | continue; | ||
410 | } | ||
411 | |||
412 | status = dlm_request_all_locks(dlm, ndata->node_num, dead_node); | ||
413 | if (status < 0) { | ||
414 | mlog_errno(status); | ||
415 | if (dlm_is_host_down(status)) | ||
416 | ndata->state = DLM_RECO_NODE_DATA_DEAD; | ||
417 | else { | ||
418 | destroy = 1; | ||
419 | goto leave; | ||
420 | } | ||
421 | } | ||
422 | |||
423 | switch (ndata->state) { | ||
424 | case DLM_RECO_NODE_DATA_INIT: | ||
425 | case DLM_RECO_NODE_DATA_FINALIZE_SENT: | ||
426 | case DLM_RECO_NODE_DATA_REQUESTED: | ||
427 | BUG(); | ||
428 | break; | ||
429 | case DLM_RECO_NODE_DATA_DEAD: | ||
430 | mlog(0, "node %u died after requesting " | ||
431 | "recovery info for node %u\n", | ||
432 | ndata->node_num, dead_node); | ||
433 | // start all over | ||
434 | destroy = 1; | ||
435 | status = -EAGAIN; | ||
436 | goto leave; | ||
437 | case DLM_RECO_NODE_DATA_REQUESTING: | ||
438 | ndata->state = DLM_RECO_NODE_DATA_REQUESTED; | ||
439 | mlog(0, "now receiving recovery data from " | ||
440 | "node %u for dead node %u\n", | ||
441 | ndata->node_num, dead_node); | ||
442 | break; | ||
443 | case DLM_RECO_NODE_DATA_RECEIVING: | ||
444 | mlog(0, "already receiving recovery data from " | ||
445 | "node %u for dead node %u\n", | ||
446 | ndata->node_num, dead_node); | ||
447 | break; | ||
448 | case DLM_RECO_NODE_DATA_DONE: | ||
449 | mlog(0, "already DONE receiving recovery data " | ||
450 | "from node %u for dead node %u\n", | ||
451 | ndata->node_num, dead_node); | ||
452 | break; | ||
453 | } | ||
454 | } | ||
455 | |||
456 | mlog(0, "done requesting all lock info\n"); | ||
457 | |||
458 | /* nodes should be sending reco data now | ||
459 | * just need to wait */ | ||
460 | |||
461 | while (1) { | ||
462 | /* check all the nodes now to see if we are | ||
463 | * done, or if anyone died */ | ||
464 | all_nodes_done = 1; | ||
465 | spin_lock(&dlm_reco_state_lock); | ||
466 | list_for_each(iter, &dlm->reco.node_data) { | ||
467 | ndata = list_entry (iter, struct dlm_reco_node_data, list); | ||
468 | |||
469 | mlog(0, "checking recovery state of node %u\n", | ||
470 | ndata->node_num); | ||
471 | switch (ndata->state) { | ||
472 | case DLM_RECO_NODE_DATA_INIT: | ||
473 | case DLM_RECO_NODE_DATA_REQUESTING: | ||
474 | mlog(ML_ERROR, "bad ndata state for " | ||
475 | "node %u: state=%d\n", | ||
476 | ndata->node_num, ndata->state); | ||
477 | BUG(); | ||
478 | break; | ||
479 | case DLM_RECO_NODE_DATA_DEAD: | ||
480 | mlog(0, "node %u died after " | ||
481 | "requesting recovery info for " | ||
482 | "node %u\n", ndata->node_num, | ||
483 | dead_node); | ||
484 | spin_unlock(&dlm_reco_state_lock); | ||
485 | // start all over | ||
486 | destroy = 1; | ||
487 | status = -EAGAIN; | ||
488 | goto leave; | ||
489 | case DLM_RECO_NODE_DATA_RECEIVING: | ||
490 | case DLM_RECO_NODE_DATA_REQUESTED: | ||
491 | all_nodes_done = 0; | ||
492 | break; | ||
493 | case DLM_RECO_NODE_DATA_DONE: | ||
494 | break; | ||
495 | case DLM_RECO_NODE_DATA_FINALIZE_SENT: | ||
496 | break; | ||
497 | } | ||
498 | } | ||
499 | spin_unlock(&dlm_reco_state_lock); | ||
500 | |||
501 | mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass, | ||
502 | all_nodes_done?"yes":"no"); | ||
503 | if (all_nodes_done) { | ||
504 | int ret; | ||
505 | |||
506 | /* all nodes are now in DLM_RECO_NODE_DATA_DONE state | ||
507 | * just send a finalize message to everyone and | ||
508 | * clean up */ | ||
509 | mlog(0, "all nodes are done! send finalize\n"); | ||
510 | ret = dlm_send_finalize_reco_message(dlm); | ||
511 | if (ret < 0) | ||
512 | mlog_errno(ret); | ||
513 | |||
514 | spin_lock(&dlm->spinlock); | ||
515 | dlm_finish_local_lockres_recovery(dlm, dead_node, | ||
516 | dlm->node_num); | ||
517 | spin_unlock(&dlm->spinlock); | ||
518 | mlog(0, "should be done with recovery!\n"); | ||
519 | |||
520 | mlog(0, "finishing recovery of %s at %lu, " | ||
521 | "dead=%u, this=%u, new=%u\n", dlm->name, | ||
522 | jiffies, dlm->reco.dead_node, | ||
523 | dlm->node_num, dlm->reco.new_master); | ||
524 | destroy = 1; | ||
525 | status = ret; | ||
526 | /* rescan everything marked dirty along the way */ | ||
527 | dlm_kick_thread(dlm, NULL); | ||
528 | break; | ||
529 | } | ||
530 | /* wait to be signalled, with periodic timeout | ||
531 | * to check for node death */ | ||
532 | wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, | ||
533 | kthread_should_stop(), | ||
534 | msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS)); | ||
535 | |||
536 | } | ||
537 | |||
538 | leave: | ||
539 | if (destroy) | ||
540 | dlm_destroy_recovery_area(dlm, dead_node); | ||
541 | |||
542 | mlog_exit(status); | ||
543 | return status; | ||
544 | } | ||
545 | |||
546 | static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) | ||
547 | { | ||
548 | int num=0; | ||
549 | struct dlm_reco_node_data *ndata; | ||
550 | |||
551 | spin_lock(&dlm->spinlock); | ||
552 | memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map)); | ||
553 | /* nodes can only be removed (by dying) after dropping | ||
554 | * this lock, and death will be trapped later, so this should do */ | ||
555 | spin_unlock(&dlm->spinlock); | ||
556 | |||
557 | while (1) { | ||
558 | num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num); | ||
559 | if (num >= O2NM_MAX_NODES) { | ||
560 | break; | ||
561 | } | ||
562 | BUG_ON(num == dead_node); | ||
563 | |||
564 | ndata = kcalloc(1, sizeof(*ndata), GFP_KERNEL); | ||
565 | if (!ndata) { | ||
566 | dlm_destroy_recovery_area(dlm, dead_node); | ||
567 | return -ENOMEM; | ||
568 | } | ||
569 | ndata->node_num = num; | ||
570 | ndata->state = DLM_RECO_NODE_DATA_INIT; | ||
571 | spin_lock(&dlm_reco_state_lock); | ||
572 | list_add_tail(&ndata->list, &dlm->reco.node_data); | ||
573 | spin_unlock(&dlm_reco_state_lock); | ||
574 | num++; | ||
575 | } | ||
576 | |||
577 | return 0; | ||
578 | } | ||
579 | |||
580 | static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) | ||
581 | { | ||
582 | struct list_head *iter, *iter2; | ||
583 | struct dlm_reco_node_data *ndata; | ||
584 | LIST_HEAD(tmplist); | ||
585 | |||
586 | spin_lock(&dlm_reco_state_lock); | ||
587 | list_splice_init(&dlm->reco.node_data, &tmplist); | ||
588 | spin_unlock(&dlm_reco_state_lock); | ||
589 | |||
590 | list_for_each_safe(iter, iter2, &tmplist) { | ||
591 | ndata = list_entry (iter, struct dlm_reco_node_data, list); | ||
592 | list_del_init(&ndata->list); | ||
593 | kfree(ndata); | ||
594 | } | ||
595 | } | ||
596 | |||
597 | static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, | ||
598 | u8 dead_node) | ||
599 | { | ||
600 | struct dlm_lock_request lr; | ||
601 | enum dlm_status ret; | ||
602 | |||
603 | mlog(0, "\n"); | ||
604 | |||
605 | |||
606 | mlog(0, "dlm_request_all_locks: dead node is %u, sending request " | ||
607 | "to %u\n", dead_node, request_from); | ||
608 | |||
609 | memset(&lr, 0, sizeof(lr)); | ||
610 | lr.node_idx = dlm->node_num; | ||
611 | lr.dead_node = dead_node; | ||
612 | |||
613 | // send message | ||
614 | ret = DLM_NOLOCKMGR; | ||
615 | ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key, | ||
616 | &lr, sizeof(lr), request_from, NULL); | ||
617 | |||
618 | /* negative status is handled by caller */ | ||
619 | if (ret < 0) | ||
620 | mlog_errno(ret); | ||
621 | |||
622 | // return from here, then | ||
623 | // sleep until all received or error | ||
624 | return ret; | ||
625 | |||
626 | } | ||
627 | |||
628 | int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data) | ||
629 | { | ||
630 | struct dlm_ctxt *dlm = data; | ||
631 | struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf; | ||
632 | char *buf = NULL; | ||
633 | struct dlm_work_item *item = NULL; | ||
634 | |||
635 | if (!dlm_grab(dlm)) | ||
636 | return -EINVAL; | ||
637 | |||
638 | BUG_ON(lr->dead_node != dlm->reco.dead_node); | ||
639 | |||
640 | item = kcalloc(1, sizeof(*item), GFP_KERNEL); | ||
641 | if (!item) { | ||
642 | dlm_put(dlm); | ||
643 | return -ENOMEM; | ||
644 | } | ||
645 | |||
646 | /* this will get freed by dlm_request_all_locks_worker */ | ||
647 | buf = (char *) __get_free_page(GFP_KERNEL); | ||
648 | if (!buf) { | ||
649 | kfree(item); | ||
650 | dlm_put(dlm); | ||
651 | return -ENOMEM; | ||
652 | } | ||
653 | |||
654 | /* queue up work for dlm_request_all_locks_worker */ | ||
655 | dlm_grab(dlm); /* get an extra ref for the work item */ | ||
656 | dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf); | ||
657 | item->u.ral.reco_master = lr->node_idx; | ||
658 | item->u.ral.dead_node = lr->dead_node; | ||
659 | spin_lock(&dlm->work_lock); | ||
660 | list_add_tail(&item->list, &dlm->work_list); | ||
661 | spin_unlock(&dlm->work_lock); | ||
662 | schedule_work(&dlm->dispatched_work); | ||
663 | |||
664 | dlm_put(dlm); | ||
665 | return 0; | ||
666 | } | ||
667 | |||
668 | static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data) | ||
669 | { | ||
670 | struct dlm_migratable_lockres *mres; | ||
671 | struct dlm_lock_resource *res; | ||
672 | struct dlm_ctxt *dlm; | ||
673 | LIST_HEAD(resources); | ||
674 | struct list_head *iter; | ||
675 | int ret; | ||
676 | u8 dead_node, reco_master; | ||
677 | |||
678 | dlm = item->dlm; | ||
679 | dead_node = item->u.ral.dead_node; | ||
680 | reco_master = item->u.ral.reco_master; | ||
681 | BUG_ON(dead_node != dlm->reco.dead_node); | ||
682 | BUG_ON(reco_master != dlm->reco.new_master); | ||
683 | |||
684 | mres = (struct dlm_migratable_lockres *)data; | ||
685 | |||
686 | /* lock resources should have already been moved to the | ||
687 | * dlm->reco.resources list. now move items from that list | ||
688 | * to a temp list if the dead owner matches. note that the | ||
689 | * whole cluster recovers only one node at a time, so we | ||
690 | * can safely move UNKNOWN lock resources for each recovery | ||
691 | * session. */ | ||
692 | dlm_move_reco_locks_to_list(dlm, &resources, dead_node); | ||
693 | |||
694 | /* now we can begin blasting lockreses without the dlm lock */ | ||
695 | list_for_each(iter, &resources) { | ||
696 | res = list_entry (iter, struct dlm_lock_resource, recovering); | ||
697 | ret = dlm_send_one_lockres(dlm, res, mres, reco_master, | ||
698 | DLM_MRES_RECOVERY); | ||
699 | if (ret < 0) | ||
700 | mlog_errno(ret); | ||
701 | } | ||
702 | |||
703 | /* move the resources back to the list */ | ||
704 | spin_lock(&dlm->spinlock); | ||
705 | list_splice_init(&resources, &dlm->reco.resources); | ||
706 | spin_unlock(&dlm->spinlock); | ||
707 | |||
708 | ret = dlm_send_all_done_msg(dlm, dead_node, reco_master); | ||
709 | if (ret < 0) | ||
710 | mlog_errno(ret); | ||
711 | |||
712 | free_page((unsigned long)data); | ||
713 | } | ||
714 | |||
715 | |||
716 | static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to) | ||
717 | { | ||
718 | int ret, tmpret; | ||
719 | struct dlm_reco_data_done done_msg; | ||
720 | |||
721 | memset(&done_msg, 0, sizeof(done_msg)); | ||
722 | done_msg.node_idx = dlm->node_num; | ||
723 | done_msg.dead_node = dead_node; | ||
724 | mlog(0, "sending DATA DONE message to %u, " | ||
725 | "my node=%u, dead node=%u\n", send_to, done_msg.node_idx, | ||
726 | done_msg.dead_node); | ||
727 | |||
728 | ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, | ||
729 | sizeof(done_msg), send_to, &tmpret); | ||
730 | /* negative status is ignored by the caller */ | ||
731 | if (ret >= 0) | ||
732 | ret = tmpret; | ||
733 | return ret; | ||
734 | } | ||
735 | |||
736 | |||
737 | int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data) | ||
738 | { | ||
739 | struct dlm_ctxt *dlm = data; | ||
740 | struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf; | ||
741 | struct list_head *iter; | ||
742 | struct dlm_reco_node_data *ndata = NULL; | ||
743 | int ret = -EINVAL; | ||
744 | |||
745 | if (!dlm_grab(dlm)) | ||
746 | return -EINVAL; | ||
747 | |||
748 | mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, " | ||
749 | "node_idx=%u, this node=%u\n", done->dead_node, | ||
750 | dlm->reco.dead_node, done->node_idx, dlm->node_num); | ||
751 | BUG_ON(done->dead_node != dlm->reco.dead_node); | ||
752 | |||
753 | spin_lock(&dlm_reco_state_lock); | ||
754 | list_for_each(iter, &dlm->reco.node_data) { | ||
755 | ndata = list_entry (iter, struct dlm_reco_node_data, list); | ||
756 | if (ndata->node_num != done->node_idx) | ||
757 | continue; | ||
758 | |||
759 | switch (ndata->state) { | ||
760 | case DLM_RECO_NODE_DATA_INIT: | ||
761 | case DLM_RECO_NODE_DATA_DEAD: | ||
762 | case DLM_RECO_NODE_DATA_DONE: | ||
763 | case DLM_RECO_NODE_DATA_FINALIZE_SENT: | ||
764 | mlog(ML_ERROR, "bad ndata state for node %u:" | ||
765 | " state=%d\n", ndata->node_num, | ||
766 | ndata->state); | ||
767 | BUG(); | ||
768 | break; | ||
769 | case DLM_RECO_NODE_DATA_RECEIVING: | ||
770 | case DLM_RECO_NODE_DATA_REQUESTED: | ||
771 | case DLM_RECO_NODE_DATA_REQUESTING: | ||
772 | mlog(0, "node %u is DONE sending " | ||
773 | "recovery data!\n", | ||
774 | ndata->node_num); | ||
775 | |||
776 | ndata->state = DLM_RECO_NODE_DATA_DONE; | ||
777 | ret = 0; | ||
778 | break; | ||
779 | } | ||
780 | } | ||
781 | spin_unlock(&dlm_reco_state_lock); | ||
782 | |||
783 | /* wake the recovery thread, some node is done */ | ||
784 | if (!ret) | ||
785 | dlm_kick_recovery_thread(dlm); | ||
786 | |||
787 | if (ret < 0) | ||
788 | mlog(ML_ERROR, "failed to find recovery node data for node " | ||
789 | "%u\n", done->node_idx); | ||
790 | dlm_put(dlm); | ||
791 | |||
792 | mlog(0, "leaving reco data done handler, ret=%d\n", ret); | ||
793 | return ret; | ||
794 | } | ||
795 | |||
796 | static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, | ||
797 | struct list_head *list, | ||
798 | u8 dead_node) | ||
799 | { | ||
800 | struct dlm_lock_resource *res; | ||
801 | struct list_head *iter, *iter2; | ||
802 | |||
803 | spin_lock(&dlm->spinlock); | ||
804 | list_for_each_safe(iter, iter2, &dlm->reco.resources) { | ||
805 | res = list_entry (iter, struct dlm_lock_resource, recovering); | ||
806 | if (dlm_is_recovery_lock(res->lockname.name, | ||
807 | res->lockname.len)) | ||
808 | continue; | ||
809 | if (res->owner == dead_node) { | ||
810 | mlog(0, "found lockres owned by dead node while " | ||
811 | "doing recovery for node %u. sending it.\n", | ||
812 | dead_node); | ||
813 | list_del_init(&res->recovering); | ||
814 | list_add_tail(&res->recovering, list); | ||
815 | } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
816 | mlog(0, "found UNKNOWN owner while doing recovery " | ||
817 | "for node %u. sending it.\n", dead_node); | ||
818 | list_del_init(&res->recovering); | ||
819 | list_add_tail(&res->recovering, list); | ||
820 | } | ||
821 | } | ||
822 | spin_unlock(&dlm->spinlock); | ||
823 | } | ||
824 | |||
825 | static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res) | ||
826 | { | ||
827 | int total_locks = 0; | ||
828 | struct list_head *iter, *queue = &res->granted; | ||
829 | int i; | ||
830 | |||
831 | for (i=0; i<3; i++) { | ||
832 | list_for_each(iter, queue) | ||
833 | total_locks++; | ||
834 | queue++; | ||
835 | } | ||
836 | return total_locks; | ||
837 | } | ||
838 | |||
839 | |||
840 | static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, | ||
841 | struct dlm_migratable_lockres *mres, | ||
842 | u8 send_to, | ||
843 | struct dlm_lock_resource *res, | ||
844 | int total_locks) | ||
845 | { | ||
846 | u64 mig_cookie = be64_to_cpu(mres->mig_cookie); | ||
847 | int mres_total_locks = be32_to_cpu(mres->total_locks); | ||
848 | int sz, ret = 0, status = 0; | ||
849 | u8 orig_flags = mres->flags, | ||
850 | orig_master = mres->master; | ||
851 | |||
852 | BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS); | ||
853 | if (!mres->num_locks) | ||
854 | return 0; | ||
855 | |||
856 | sz = sizeof(struct dlm_migratable_lockres) + | ||
857 | (mres->num_locks * sizeof(struct dlm_migratable_lock)); | ||
858 | |||
859 | /* add an all-done flag if we reached the last lock */ | ||
860 | orig_flags = mres->flags; | ||
861 | BUG_ON(total_locks > mres_total_locks); | ||
862 | if (total_locks == mres_total_locks) | ||
863 | mres->flags |= DLM_MRES_ALL_DONE; | ||
864 | |||
865 | /* send it */ | ||
866 | ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres, | ||
867 | sz, send_to, &status); | ||
868 | if (ret < 0) { | ||
869 | /* XXX: negative status is not handled. | ||
870 | * this will end up killing this node. */ | ||
871 | mlog_errno(ret); | ||
872 | } else { | ||
873 | /* might get an -ENOMEM back here */ | ||
874 | ret = status; | ||
875 | if (ret < 0) { | ||
876 | mlog_errno(ret); | ||
877 | |||
878 | if (ret == -EFAULT) { | ||
879 | mlog(ML_ERROR, "node %u told me to kill " | ||
880 | "myself!\n", send_to); | ||
881 | BUG(); | ||
882 | } | ||
883 | } | ||
884 | } | ||
885 | |||
886 | /* zero and reinit the message buffer */ | ||
887 | dlm_init_migratable_lockres(mres, res->lockname.name, | ||
888 | res->lockname.len, mres_total_locks, | ||
889 | mig_cookie, orig_flags, orig_master); | ||
890 | return ret; | ||
891 | } | ||
892 | |||
893 | static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, | ||
894 | const char *lockname, int namelen, | ||
895 | int total_locks, u64 cookie, | ||
896 | u8 flags, u8 master) | ||
897 | { | ||
898 | /* mres here is one full page */ | ||
899 | memset(mres, 0, PAGE_SIZE); | ||
900 | mres->lockname_len = namelen; | ||
901 | memcpy(mres->lockname, lockname, namelen); | ||
902 | mres->num_locks = 0; | ||
903 | mres->total_locks = cpu_to_be32(total_locks); | ||
904 | mres->mig_cookie = cpu_to_be64(cookie); | ||
905 | mres->flags = flags; | ||
906 | mres->master = master; | ||
907 | } | ||
908 | |||
909 | |||
910 | /* returns 1 if this lock fills the network structure, | ||
911 | * 0 otherwise */ | ||
912 | static int dlm_add_lock_to_array(struct dlm_lock *lock, | ||
913 | struct dlm_migratable_lockres *mres, int queue) | ||
914 | { | ||
915 | struct dlm_migratable_lock *ml; | ||
916 | int lock_num = mres->num_locks; | ||
917 | |||
918 | ml = &(mres->ml[lock_num]); | ||
919 | ml->cookie = lock->ml.cookie; | ||
920 | ml->type = lock->ml.type; | ||
921 | ml->convert_type = lock->ml.convert_type; | ||
922 | ml->highest_blocked = lock->ml.highest_blocked; | ||
923 | ml->list = queue; | ||
924 | if (lock->lksb) { | ||
925 | ml->flags = lock->lksb->flags; | ||
926 | /* send our current lvb */ | ||
927 | if (ml->type == LKM_EXMODE || | ||
928 | ml->type == LKM_PRMODE) { | ||
929 | /* if it is already set, this had better be a PR | ||
930 | * and it has to match */ | ||
931 | if (mres->lvb[0] && (ml->type == LKM_EXMODE || | ||
932 | memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) { | ||
933 | mlog(ML_ERROR, "mismatched lvbs!\n"); | ||
934 | __dlm_print_one_lock_resource(lock->lockres); | ||
935 | BUG(); | ||
936 | } | ||
937 | memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); | ||
938 | } | ||
939 | } | ||
940 | ml->node = lock->ml.node; | ||
941 | mres->num_locks++; | ||
942 | /* we reached the max, send this network message */ | ||
943 | if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS) | ||
944 | return 1; | ||
945 | return 0; | ||
946 | } | ||
947 | |||
948 | |||
949 | int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, | ||
950 | struct dlm_migratable_lockres *mres, | ||
951 | u8 send_to, u8 flags) | ||
952 | { | ||
953 | struct list_head *queue, *iter; | ||
954 | int total_locks, i; | ||
955 | u64 mig_cookie = 0; | ||
956 | struct dlm_lock *lock; | ||
957 | int ret = 0; | ||
958 | |||
959 | BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); | ||
960 | |||
961 | mlog(0, "sending to %u\n", send_to); | ||
962 | |||
963 | total_locks = dlm_num_locks_in_lockres(res); | ||
964 | if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) { | ||
965 | /* rare, but possible */ | ||
966 | mlog(0, "argh. lockres has %d locks. this will " | ||
967 | "require more than one network packet to " | ||
968 | "migrate\n", total_locks); | ||
969 | mig_cookie = dlm_get_next_mig_cookie(); | ||
970 | } | ||
971 | |||
972 | dlm_init_migratable_lockres(mres, res->lockname.name, | ||
973 | res->lockname.len, total_locks, | ||
974 | mig_cookie, flags, res->owner); | ||
975 | |||
976 | total_locks = 0; | ||
977 | for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) { | ||
978 | queue = dlm_list_idx_to_ptr(res, i); | ||
979 | list_for_each(iter, queue) { | ||
980 | lock = list_entry (iter, struct dlm_lock, list); | ||
981 | |||
982 | /* add another lock. */ | ||
983 | total_locks++; | ||
984 | if (!dlm_add_lock_to_array(lock, mres, i)) | ||
985 | continue; | ||
986 | |||
987 | /* this filled the lock message, | ||
988 | * we must send it immediately. */ | ||
989 | ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, | ||
990 | res, total_locks); | ||
991 | if (ret < 0) { | ||
992 | // TODO | ||
993 | mlog(ML_ERROR, "dlm_send_mig_lockres_msg " | ||
994 | "returned %d, TODO\n", ret); | ||
995 | BUG(); | ||
996 | } | ||
997 | } | ||
998 | } | ||
999 | /* flush any remaining locks */ | ||
1000 | ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks); | ||
1001 | if (ret < 0) { | ||
1002 | // TODO | ||
1003 | mlog(ML_ERROR, "dlm_send_mig_lockres_msg returned %d, " | ||
1004 | "TODO\n", ret); | ||
1005 | BUG(); | ||
1006 | } | ||
1007 | return ret; | ||
1008 | } | ||
1009 | |||
1010 | |||
1011 | |||
1012 | /* | ||
1013 | * this message will contain no more than one page worth of | ||
1014 | * recovery data, and it will work on only one lockres. | ||
1015 | * there may be many locks in this page, and we may need to wait | ||
1016 | * for additional packets to complete all the locks (rare, but | ||
1017 | * possible). | ||
1018 | */ | ||
1019 | /* | ||
1020 | * NOTE: the allocation error cases here are scary | ||
1021 | * we really cannot afford to fail an alloc in recovery | ||
1022 | * do we spin? returning an error only delays the problem really | ||
1023 | */ | ||
1024 | |||
1025 | int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data) | ||
1026 | { | ||
1027 | struct dlm_ctxt *dlm = data; | ||
1028 | struct dlm_migratable_lockres *mres = | ||
1029 | (struct dlm_migratable_lockres *)msg->buf; | ||
1030 | int ret = 0; | ||
1031 | u8 real_master; | ||
1032 | char *buf = NULL; | ||
1033 | struct dlm_work_item *item = NULL; | ||
1034 | struct dlm_lock_resource *res = NULL; | ||
1035 | |||
1036 | if (!dlm_grab(dlm)) | ||
1037 | return -EINVAL; | ||
1038 | |||
1039 | BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); | ||
1040 | |||
1041 | real_master = mres->master; | ||
1042 | if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
1043 | /* cannot migrate a lockres with no master */ | ||
1044 | BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); | ||
1045 | } | ||
1046 | |||
1047 | mlog(0, "%s message received from node %u\n", | ||
1048 | (mres->flags & DLM_MRES_RECOVERY) ? | ||
1049 | "recovery" : "migration", mres->master); | ||
1050 | if (mres->flags & DLM_MRES_ALL_DONE) | ||
1051 | mlog(0, "all done flag. all lockres data received!\n"); | ||
1052 | |||
1053 | ret = -ENOMEM; | ||
1054 | buf = kmalloc(be16_to_cpu(msg->data_len), GFP_KERNEL); | ||
1055 | item = kcalloc(1, sizeof(*item), GFP_KERNEL); | ||
1056 | if (!buf || !item) | ||
1057 | goto leave; | ||
1058 | |||
1059 | /* lookup the lock to see if we have a secondary queue for this | ||
1060 | * already... just add the locks in and this will have its owner | ||
1061 | * and RECOVERY flag changed when it completes. */ | ||
1062 | res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len); | ||
1063 | if (res) { | ||
1064 | /* this will get a ref on res */ | ||
1065 | /* mark it as recovering/migrating and hash it */ | ||
1066 | spin_lock(&res->spinlock); | ||
1067 | if (mres->flags & DLM_MRES_RECOVERY) { | ||
1068 | res->state |= DLM_LOCK_RES_RECOVERING; | ||
1069 | } else { | ||
1070 | if (res->state & DLM_LOCK_RES_MIGRATING) { | ||
1071 | /* this is at least the second | ||
1072 | * lockres message */ | ||
1073 | mlog(0, "lock %.*s is already migrating\n", | ||
1074 | mres->lockname_len, | ||
1075 | mres->lockname); | ||
1076 | } else if (res->state & DLM_LOCK_RES_RECOVERING) { | ||
1077 | /* caller should BUG */ | ||
1078 | mlog(ML_ERROR, "node is attempting to migrate " | ||
1079 | "lock %.*s, but marked as recovering!\n", | ||
1080 | mres->lockname_len, mres->lockname); | ||
1081 | ret = -EFAULT; | ||
1082 | spin_unlock(&res->spinlock); | ||
1083 | goto leave; | ||
1084 | } | ||
1085 | res->state |= DLM_LOCK_RES_MIGRATING; | ||
1086 | } | ||
1087 | spin_unlock(&res->spinlock); | ||
1088 | } else { | ||
1089 | /* need to allocate, just like if it was | ||
1090 | * mastered here normally */ | ||
1091 | res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len); | ||
1092 | if (!res) | ||
1093 | goto leave; | ||
1094 | |||
1095 | /* to match the ref that we would have gotten if | ||
1096 | * dlm_lookup_lockres had succeeded */ | ||
1097 | dlm_lockres_get(res); | ||
1098 | |||
1099 | /* mark it as recovering/migrating and hash it */ | ||
1100 | if (mres->flags & DLM_MRES_RECOVERY) | ||
1101 | res->state |= DLM_LOCK_RES_RECOVERING; | ||
1102 | else | ||
1103 | res->state |= DLM_LOCK_RES_MIGRATING; | ||
1104 | |||
1105 | spin_lock(&dlm->spinlock); | ||
1106 | __dlm_insert_lockres(dlm, res); | ||
1107 | spin_unlock(&dlm->spinlock); | ||
1108 | |||
1109 | /* now that the new lockres is inserted, | ||
1110 | * make it usable by other processes */ | ||
1111 | spin_lock(&res->spinlock); | ||
1112 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; | ||
1113 | spin_unlock(&res->spinlock); | ||
1114 | |||
1115 | /* add an extra ref for just-allocated lockres | ||
1116 | * otherwise the lockres will be purged immediately */ | ||
1117 | dlm_lockres_get(res); | ||
1118 | |||
1119 | } | ||
1120 | |||
1121 | /* at this point we have allocated everything we need, | ||
1122 | * and we have a hashed lockres with an extra ref and | ||
1123 | * the proper res->state flags. */ | ||
1124 | ret = 0; | ||
1125 | if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
1126 | /* migration cannot have an unknown master */ | ||
1127 | BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); | ||
1128 | mlog(0, "recovery has passed me a lockres with an " | ||
1129 | "unknown owner.. will need to requery: " | ||
1130 | "%.*s\n", mres->lockname_len, mres->lockname); | ||
1131 | } else { | ||
1132 | spin_lock(&res->spinlock); | ||
1133 | dlm_change_lockres_owner(dlm, res, dlm->node_num); | ||
1134 | spin_unlock(&res->spinlock); | ||
1135 | } | ||
1136 | |||
1137 | /* queue up work for dlm_mig_lockres_worker */ | ||
1138 | dlm_grab(dlm); /* get an extra ref for the work item */ | ||
1139 | memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */ | ||
1140 | dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf); | ||
1141 | item->u.ml.lockres = res; /* already have a ref */ | ||
1142 | item->u.ml.real_master = real_master; | ||
1143 | spin_lock(&dlm->work_lock); | ||
1144 | list_add_tail(&item->list, &dlm->work_list); | ||
1145 | spin_unlock(&dlm->work_lock); | ||
1146 | schedule_work(&dlm->dispatched_work); | ||
1147 | |||
1148 | leave: | ||
1149 | dlm_put(dlm); | ||
1150 | if (ret < 0) { | ||
1151 | if (buf) | ||
1152 | kfree(buf); | ||
1153 | if (item) | ||
1154 | kfree(item); | ||
1155 | } | ||
1156 | |||
1157 | mlog_exit(ret); | ||
1158 | return ret; | ||
1159 | } | ||
1160 | |||
1161 | |||
1162 | static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data) | ||
1163 | { | ||
1164 | struct dlm_ctxt *dlm = data; | ||
1165 | struct dlm_migratable_lockres *mres; | ||
1166 | int ret = 0; | ||
1167 | struct dlm_lock_resource *res; | ||
1168 | u8 real_master; | ||
1169 | |||
1170 | dlm = item->dlm; | ||
1171 | mres = (struct dlm_migratable_lockres *)data; | ||
1172 | |||
1173 | res = item->u.ml.lockres; | ||
1174 | real_master = item->u.ml.real_master; | ||
1175 | |||
1176 | if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
1177 | /* this case is super-rare. only occurs if | ||
1178 | * node death happens during migration. */ | ||
1179 | again: | ||
1180 | ret = dlm_lockres_master_requery(dlm, res, &real_master); | ||
1181 | if (ret < 0) { | ||
1182 | mlog(0, "dlm_lockres_master_requery failure: %d\n", | ||
1183 | ret); | ||
1184 | goto again; | ||
1185 | } | ||
1186 | if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
1187 | mlog(0, "lockres %.*s not claimed. " | ||
1188 | "this node will take it.\n", | ||
1189 | res->lockname.len, res->lockname.name); | ||
1190 | } else { | ||
1191 | mlog(0, "master needs to respond to sender " | ||
1192 | "that node %u still owns %.*s\n", | ||
1193 | real_master, res->lockname.len, | ||
1194 | res->lockname.name); | ||
1195 | /* cannot touch this lockres */ | ||
1196 | goto leave; | ||
1197 | } | ||
1198 | } | ||
1199 | |||
1200 | ret = dlm_process_recovery_data(dlm, res, mres); | ||
1201 | if (ret < 0) | ||
1202 | mlog(0, "dlm_process_recovery_data returned %d\n", ret); | ||
1203 | else | ||
1204 | mlog(0, "dlm_process_recovery_data succeeded\n"); | ||
1205 | |||
1206 | if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) == | ||
1207 | (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) { | ||
1208 | ret = dlm_finish_migration(dlm, res, mres->master); | ||
1209 | if (ret < 0) | ||
1210 | mlog_errno(ret); | ||
1211 | } | ||
1212 | |||
1213 | leave: | ||
1214 | kfree(data); | ||
1215 | mlog_exit(ret); | ||
1216 | } | ||
1217 | |||
1218 | |||
1219 | |||
1220 | static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, | ||
1221 | struct dlm_lock_resource *res, | ||
1222 | u8 *real_master) | ||
1223 | { | ||
1224 | struct dlm_node_iter iter; | ||
1225 | int nodenum; | ||
1226 | int ret = 0; | ||
1227 | |||
1228 | *real_master = DLM_LOCK_RES_OWNER_UNKNOWN; | ||
1229 | |||
1230 | /* we only reach here if one of the two nodes in a | ||
1231 | * migration died while the migration was in progress. | ||
1232 | * at this point we need to requery the master. we | ||
1233 | * know that the new_master got as far as creating | ||
1234 | * an mle on at least one node, but we do not know | ||
1235 | * if any nodes had actually cleared the mle and set | ||
1236 | * the master to the new_master. the old master | ||
1237 | * is supposed to set the owner to UNKNOWN in the | ||
1238 | * event of a new_master death, so the only possible | ||
1239 | * responses that we can get from nodes here are | ||
1240 | * that the master is new_master, or that the master | ||
1241 | * is UNKNOWN. | ||
1242 | * if all nodes come back with UNKNOWN then we know | ||
1243 | * the lock needs remastering here. | ||
1244 | * if any node comes back with a valid master, check | ||
1245 | * to see if that master is the one that we are | ||
1246 | * recovering. if so, then the new_master died and | ||
1247 | * we need to remaster this lock. if not, then the | ||
1248 | * new_master survived and that node will respond to | ||
1249 | * other nodes about the owner. | ||
1250 | * if there is an owner, this node needs to dump this | ||
1251 | * lockres and alert the sender that this lockres | ||
1252 | * was rejected. */ | ||
1253 | spin_lock(&dlm->spinlock); | ||
1254 | dlm_node_iter_init(dlm->domain_map, &iter); | ||
1255 | spin_unlock(&dlm->spinlock); | ||
1256 | |||
1257 | while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { | ||
1258 | /* do not send to self */ | ||
1259 | if (nodenum == dlm->node_num) | ||
1260 | continue; | ||
1261 | ret = dlm_do_master_requery(dlm, res, nodenum, real_master); | ||
1262 | if (ret < 0) { | ||
1263 | mlog_errno(ret); | ||
1264 | BUG(); | ||
1265 | /* TODO: need to figure a way to restart this */ | ||
1266 | } | ||
1267 | if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
1268 | mlog(0, "lock master is %u\n", *real_master); | ||
1269 | break; | ||
1270 | } | ||
1271 | } | ||
1272 | return ret; | ||
1273 | } | ||
1274 | |||
1275 | |||
1276 | static int dlm_do_master_requery(struct dlm_ctxt *dlm, | ||
1277 | struct dlm_lock_resource *res, | ||
1278 | u8 nodenum, u8 *real_master) | ||
1279 | { | ||
1280 | int ret = -EINVAL; | ||
1281 | struct dlm_master_requery req; | ||
1282 | int status = DLM_LOCK_RES_OWNER_UNKNOWN; | ||
1283 | |||
1284 | memset(&req, 0, sizeof(req)); | ||
1285 | req.node_idx = dlm->node_num; | ||
1286 | req.namelen = res->lockname.len; | ||
1287 | memcpy(req.name, res->lockname.name, res->lockname.len); | ||
1288 | |||
1289 | ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key, | ||
1290 | &req, sizeof(req), nodenum, &status); | ||
1291 | /* XXX: negative status not handled properly here. */ | ||
1292 | if (ret < 0) | ||
1293 | mlog_errno(ret); | ||
1294 | else { | ||
1295 | BUG_ON(status < 0); | ||
1296 | BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN); | ||
1297 | *real_master = (u8) (status & 0xff); | ||
1298 | mlog(0, "node %u responded to master requery with %u\n", | ||
1299 | nodenum, *real_master); | ||
1300 | ret = 0; | ||
1301 | } | ||
1302 | return ret; | ||
1303 | } | ||
1304 | |||
1305 | |||
1306 | /* this function cannot error, so unless the sending | ||
1307 | * or receiving of the message failed, the owner can | ||
1308 | * be trusted */ | ||
1309 | int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data) | ||
1310 | { | ||
1311 | struct dlm_ctxt *dlm = data; | ||
1312 | struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf; | ||
1313 | struct dlm_lock_resource *res = NULL; | ||
1314 | int master = DLM_LOCK_RES_OWNER_UNKNOWN; | ||
1315 | u32 flags = DLM_ASSERT_MASTER_REQUERY; | ||
1316 | |||
1317 | if (!dlm_grab(dlm)) { | ||
1318 | /* since the domain has gone away on this | ||
1319 | * node, the proper response is UNKNOWN */ | ||
1320 | return master; | ||
1321 | } | ||
1322 | |||
1323 | spin_lock(&dlm->spinlock); | ||
1324 | res = __dlm_lookup_lockres(dlm, req->name, req->namelen); | ||
1325 | if (res) { | ||
1326 | spin_lock(&res->spinlock); | ||
1327 | master = res->owner; | ||
1328 | if (master == dlm->node_num) { | ||
1329 | int ret = dlm_dispatch_assert_master(dlm, res, | ||
1330 | 0, 0, flags); | ||
1331 | if (ret < 0) { | ||
1332 | mlog_errno(-ENOMEM); | ||
1333 | /* retry!? */ | ||
1334 | BUG(); | ||
1335 | } | ||
1336 | } | ||
1337 | spin_unlock(&res->spinlock); | ||
1338 | } | ||
1339 | spin_unlock(&dlm->spinlock); | ||
1340 | |||
1341 | dlm_put(dlm); | ||
1342 | return master; | ||
1343 | } | ||
1344 | |||
1345 | static inline struct list_head * | ||
1346 | dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num) | ||
1347 | { | ||
1348 | struct list_head *ret; | ||
1349 | BUG_ON(list_num < 0); | ||
1350 | BUG_ON(list_num > 2); | ||
1351 | ret = &(res->granted); | ||
1352 | ret += list_num; | ||
1353 | return ret; | ||
1354 | } | ||
1355 | /* TODO: do ast flush business | ||
1356 | * TODO: do MIGRATING and RECOVERING spinning | ||
1357 | */ | ||
1358 | |||
1359 | /* | ||
1360 | * NOTE about in-flight requests during migration: | ||
1361 | * | ||
1362 | * Before attempting the migrate, the master has marked the lockres as | ||
1363 | * MIGRATING and then flushed all of its pending ASTS. So any in-flight | ||
1364 | * requests either got queued before the MIGRATING flag got set, in which | ||
1365 | * case the lock data will reflect the change and a return message is on | ||
1366 | * the way, or the request failed to get in before MIGRATING got set. In | ||
1367 | * this case, the caller will be told to spin and wait for the MIGRATING | ||
1368 | * flag to be dropped, then recheck the master. | ||
1369 | * This holds true for the convert, cancel and unlock cases, and since lvb | ||
1370 | * updates are tied to these same messages, it applies to lvb updates as | ||
1371 | * well. For the lock case, there is no way a lock can be on the master | ||
1372 | * queue and not be on the secondary queue since the lock is always added | ||
1373 | * locally first. This means that the new target node will never be sent | ||
1374 | * a lock that he doesn't already have on the list. | ||
1375 | * In total, this means that the local lock is correct and should not be | ||
1376 | * updated to match the one sent by the master. Any messages sent back | ||
1377 | * from the master before the MIGRATING flag will bring the lock properly | ||
1378 | * up-to-date, and the change will be ordered properly for the waiter. | ||
1379 | * We will *not* attempt to modify the lock underneath the waiter. | ||
1380 | */ | ||
1381 | |||
1382 | static int dlm_process_recovery_data(struct dlm_ctxt *dlm, | ||
1383 | struct dlm_lock_resource *res, | ||
1384 | struct dlm_migratable_lockres *mres) | ||
1385 | { | ||
1386 | struct dlm_migratable_lock *ml; | ||
1387 | struct list_head *queue; | ||
1388 | struct dlm_lock *newlock = NULL; | ||
1389 | struct dlm_lockstatus *lksb = NULL; | ||
1390 | int ret = 0; | ||
1391 | int i; | ||
1392 | struct list_head *iter; | ||
1393 | struct dlm_lock *lock = NULL; | ||
1394 | |||
1395 | mlog(0, "running %d locks for this lockres\n", mres->num_locks); | ||
1396 | for (i=0; i<mres->num_locks; i++) { | ||
1397 | ml = &(mres->ml[i]); | ||
1398 | BUG_ON(ml->highest_blocked != LKM_IVMODE); | ||
1399 | newlock = NULL; | ||
1400 | lksb = NULL; | ||
1401 | |||
1402 | queue = dlm_list_num_to_pointer(res, ml->list); | ||
1403 | |||
1404 | /* if the lock is for the local node it needs to | ||
1405 | * be moved to the proper location within the queue. | ||
1406 | * do not allocate a new lock structure. */ | ||
1407 | if (ml->node == dlm->node_num) { | ||
1408 | /* MIGRATION ONLY! */ | ||
1409 | BUG_ON(!(mres->flags & DLM_MRES_MIGRATION)); | ||
1410 | |||
1411 | spin_lock(&res->spinlock); | ||
1412 | list_for_each(iter, queue) { | ||
1413 | lock = list_entry (iter, struct dlm_lock, list); | ||
1414 | if (lock->ml.cookie != ml->cookie) | ||
1415 | lock = NULL; | ||
1416 | else | ||
1417 | break; | ||
1418 | } | ||
1419 | |||
1420 | /* lock is always created locally first, and | ||
1421 | * destroyed locally last. it must be on the list */ | ||
1422 | if (!lock) { | ||
1423 | mlog(ML_ERROR, "could not find local lock " | ||
1424 | "with cookie %"MLFu64"!\n", | ||
1425 | ml->cookie); | ||
1426 | BUG(); | ||
1427 | } | ||
1428 | BUG_ON(lock->ml.node != ml->node); | ||
1429 | |||
1430 | /* see NOTE above about why we do not update | ||
1431 | * to match the master here */ | ||
1432 | |||
1433 | /* move the lock to its proper place */ | ||
1434 | /* do not alter lock refcount. switching lists. */ | ||
1435 | list_del_init(&lock->list); | ||
1436 | list_add_tail(&lock->list, queue); | ||
1437 | spin_unlock(&res->spinlock); | ||
1438 | |||
1439 | mlog(0, "just reordered a local lock!\n"); | ||
1440 | continue; | ||
1441 | } | ||
1442 | |||
1443 | /* lock is for another node. */ | ||
1444 | newlock = dlm_new_lock(ml->type, ml->node, | ||
1445 | be64_to_cpu(ml->cookie), NULL); | ||
1446 | if (!newlock) { | ||
1447 | ret = -ENOMEM; | ||
1448 | goto leave; | ||
1449 | } | ||
1450 | lksb = newlock->lksb; | ||
1451 | dlm_lock_attach_lockres(newlock, res); | ||
1452 | |||
1453 | if (ml->convert_type != LKM_IVMODE) { | ||
1454 | BUG_ON(queue != &res->converting); | ||
1455 | newlock->ml.convert_type = ml->convert_type; | ||
1456 | } | ||
1457 | lksb->flags |= (ml->flags & | ||
1458 | (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); | ||
1459 | |||
1460 | if (mres->lvb[0]) { | ||
1461 | if (lksb->flags & DLM_LKSB_PUT_LVB) { | ||
1462 | /* other node was trying to update | ||
1463 | * lvb when node died. recreate the | ||
1464 | * lksb with the updated lvb. */ | ||
1465 | memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN); | ||
1466 | } else { | ||
1467 | /* otherwise, the node is sending its | ||
1468 | * most recent valid lvb info */ | ||
1469 | BUG_ON(ml->type != LKM_EXMODE && | ||
1470 | ml->type != LKM_PRMODE); | ||
1471 | if (res->lvb[0] && (ml->type == LKM_EXMODE || | ||
1472 | memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) { | ||
1473 | mlog(ML_ERROR, "received bad lvb!\n"); | ||
1474 | __dlm_print_one_lock_resource(res); | ||
1475 | BUG(); | ||
1476 | } | ||
1477 | memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); | ||
1478 | } | ||
1479 | } | ||
1480 | |||
1481 | |||
1482 | /* NOTE: | ||
1483 | * wrt lock queue ordering and recovery: | ||
1484 | * 1. order of locks on granted queue is | ||
1485 | * meaningless. | ||
1486 | * 2. order of locks on converting queue is | ||
1487 | * LOST with the node death. sorry charlie. | ||
1488 | * 3. order of locks on the blocked queue is | ||
1489 | * also LOST. | ||
1490 | * order of locks does not affect integrity, it | ||
1491 | * just means that a lock request may get pushed | ||
1492 | * back in line as a result of the node death. | ||
1493 | * also note that for a given node the lock order | ||
1494 | * for its secondary queue locks is preserved | ||
1495 | * relative to each other, but clearly *not* | ||
1496 | * preserved relative to locks from other nodes. | ||
1497 | */ | ||
1498 | spin_lock(&res->spinlock); | ||
1499 | dlm_lock_get(newlock); | ||
1500 | list_add_tail(&newlock->list, queue); | ||
1501 | spin_unlock(&res->spinlock); | ||
1502 | } | ||
1503 | mlog(0, "done running all the locks\n"); | ||
1504 | |||
1505 | leave: | ||
1506 | if (ret < 0) { | ||
1507 | mlog_errno(ret); | ||
1508 | if (newlock) | ||
1509 | dlm_lock_put(newlock); | ||
1510 | } | ||
1511 | |||
1512 | mlog_exit(ret); | ||
1513 | return ret; | ||
1514 | } | ||
1515 | |||
1516 | void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, | ||
1517 | struct dlm_lock_resource *res) | ||
1518 | { | ||
1519 | int i; | ||
1520 | struct list_head *queue, *iter, *iter2; | ||
1521 | struct dlm_lock *lock; | ||
1522 | |||
1523 | res->state |= DLM_LOCK_RES_RECOVERING; | ||
1524 | if (!list_empty(&res->recovering)) | ||
1525 | list_del_init(&res->recovering); | ||
1526 | list_add_tail(&res->recovering, &dlm->reco.resources); | ||
1527 | |||
1528 | /* find any pending locks and put them back on proper list */ | ||
1529 | for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) { | ||
1530 | queue = dlm_list_idx_to_ptr(res, i); | ||
1531 | list_for_each_safe(iter, iter2, queue) { | ||
1532 | lock = list_entry (iter, struct dlm_lock, list); | ||
1533 | dlm_lock_get(lock); | ||
1534 | if (lock->convert_pending) { | ||
1535 | /* move converting lock back to granted */ | ||
1536 | BUG_ON(i != DLM_CONVERTING_LIST); | ||
1537 | mlog(0, "node died with convert pending " | ||
1538 | "on %.*s. move back to granted list.\n", | ||
1539 | res->lockname.len, res->lockname.name); | ||
1540 | dlm_revert_pending_convert(res, lock); | ||
1541 | lock->convert_pending = 0; | ||
1542 | } else if (lock->lock_pending) { | ||
1543 | /* remove pending lock requests completely */ | ||
1544 | BUG_ON(i != DLM_BLOCKED_LIST); | ||
1545 | mlog(0, "node died with lock pending " | ||
1546 | "on %.*s. remove from blocked list and skip.\n", | ||
1547 | res->lockname.len, res->lockname.name); | ||
1548 | /* lock will be floating until ref in | ||
1549 | * dlmlock_remote is freed after the network | ||
1550 | * call returns. ok for it to not be on any | ||
1551 | * list since no ast can be called | ||
1552 | * (the master is dead). */ | ||
1553 | dlm_revert_pending_lock(res, lock); | ||
1554 | lock->lock_pending = 0; | ||
1555 | } else if (lock->unlock_pending) { | ||
1556 | /* if an unlock was in progress, treat as | ||
1557 | * if this had completed successfully | ||
1558 | * before sending this lock state to the | ||
1559 | * new master. note that the dlm_unlock | ||
1560 | * call is still responsible for calling | ||
1561 | * the unlockast. that will happen after | ||
1562 | * the network call times out. for now, | ||
1563 | * just move lists to prepare the new | ||
1564 | * recovery master. */ | ||
1565 | BUG_ON(i != DLM_GRANTED_LIST); | ||
1566 | mlog(0, "node died with unlock pending " | ||
1567 | "on %.*s. remove from blocked list and skip.\n", | ||
1568 | res->lockname.len, res->lockname.name); | ||
1569 | dlm_commit_pending_unlock(res, lock); | ||
1570 | lock->unlock_pending = 0; | ||
1571 | } else if (lock->cancel_pending) { | ||
1572 | /* if a cancel was in progress, treat as | ||
1573 | * if this had completed successfully | ||
1574 | * before sending this lock state to the | ||
1575 | * new master */ | ||
1576 | BUG_ON(i != DLM_CONVERTING_LIST); | ||
1577 | mlog(0, "node died with cancel pending " | ||
1578 | "on %.*s. move back to granted list.\n", | ||
1579 | res->lockname.len, res->lockname.name); | ||
1580 | dlm_commit_pending_cancel(res, lock); | ||
1581 | lock->cancel_pending = 0; | ||
1582 | } | ||
1583 | dlm_lock_put(lock); | ||
1584 | } | ||
1585 | } | ||
1586 | } | ||
1587 | |||
1588 | |||
1589 | |||
1590 | /* removes all recovered locks from the recovery list. | ||
1591 | * sets the res->owner to the new master. | ||
1592 | * unsets the RECOVERY flag and wakes waiters. */ | ||
1593 | static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, | ||
1594 | u8 dead_node, u8 new_master) | ||
1595 | { | ||
1596 | int i; | ||
1597 | struct list_head *iter, *iter2, *bucket; | ||
1598 | struct dlm_lock_resource *res; | ||
1599 | |||
1600 | mlog_entry_void(); | ||
1601 | |||
1602 | assert_spin_locked(&dlm->spinlock); | ||
1603 | |||
1604 | list_for_each_safe(iter, iter2, &dlm->reco.resources) { | ||
1605 | res = list_entry (iter, struct dlm_lock_resource, recovering); | ||
1606 | if (res->owner == dead_node) { | ||
1607 | list_del_init(&res->recovering); | ||
1608 | spin_lock(&res->spinlock); | ||
1609 | dlm_change_lockres_owner(dlm, res, new_master); | ||
1610 | res->state &= ~DLM_LOCK_RES_RECOVERING; | ||
1611 | __dlm_dirty_lockres(dlm, res); | ||
1612 | spin_unlock(&res->spinlock); | ||
1613 | wake_up(&res->wq); | ||
1614 | } | ||
1615 | } | ||
1616 | |||
1617 | /* this will become unnecessary eventually, but | ||
1618 | * for now we need to run the whole hash, clear | ||
1619 | * the RECOVERING state and set the owner | ||
1620 | * if necessary */ | ||
1621 | for (i=0; i<DLM_HASH_SIZE; i++) { | ||
1622 | bucket = &(dlm->resources[i]); | ||
1623 | list_for_each(iter, bucket) { | ||
1624 | res = list_entry (iter, struct dlm_lock_resource, list); | ||
1625 | if (res->state & DLM_LOCK_RES_RECOVERING) { | ||
1626 | if (res->owner == dead_node) { | ||
1627 | mlog(0, "(this=%u) res %.*s owner=%u " | ||
1628 | "was not on recovering list, but " | ||
1629 | "clearing state anyway\n", | ||
1630 | dlm->node_num, res->lockname.len, | ||
1631 | res->lockname.name, new_master); | ||
1632 | } else if (res->owner == dlm->node_num) { | ||
1633 | mlog(0, "(this=%u) res %.*s owner=%u " | ||
1634 | "was not on recovering list, " | ||
1635 | "owner is THIS node, clearing\n", | ||
1636 | dlm->node_num, res->lockname.len, | ||
1637 | res->lockname.name, new_master); | ||
1638 | } else | ||
1639 | continue; | ||
1640 | |||
1641 | spin_lock(&res->spinlock); | ||
1642 | dlm_change_lockres_owner(dlm, res, new_master); | ||
1643 | res->state &= ~DLM_LOCK_RES_RECOVERING; | ||
1644 | __dlm_dirty_lockres(dlm, res); | ||
1645 | spin_unlock(&res->spinlock); | ||
1646 | wake_up(&res->wq); | ||
1647 | } | ||
1648 | } | ||
1649 | } | ||
1650 | } | ||
1651 | |||
1652 | static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local) | ||
1653 | { | ||
1654 | if (local) { | ||
1655 | if (lock->ml.type != LKM_EXMODE && | ||
1656 | lock->ml.type != LKM_PRMODE) | ||
1657 | return 1; | ||
1658 | } else if (lock->ml.type == LKM_EXMODE) | ||
1659 | return 1; | ||
1660 | return 0; | ||
1661 | } | ||
1662 | |||
1663 | static void dlm_revalidate_lvb(struct dlm_ctxt *dlm, | ||
1664 | struct dlm_lock_resource *res, u8 dead_node) | ||
1665 | { | ||
1666 | struct list_head *iter, *queue; | ||
1667 | struct dlm_lock *lock; | ||
1668 | int blank_lvb = 0, local = 0; | ||
1669 | int i; | ||
1670 | u8 search_node; | ||
1671 | |||
1672 | assert_spin_locked(&dlm->spinlock); | ||
1673 | assert_spin_locked(&res->spinlock); | ||
1674 | |||
1675 | if (res->owner == dlm->node_num) | ||
1676 | /* if this node owned the lockres, and if the dead node | ||
1677 | * had an EX when he died, blank out the lvb */ | ||
1678 | search_node = dead_node; | ||
1679 | else { | ||
1680 | /* if this is a secondary lockres, and we had no EX or PR | ||
1681 | * locks granted, we can no longer trust the lvb */ | ||
1682 | search_node = dlm->node_num; | ||
1683 | local = 1; /* check local state for valid lvb */ | ||
1684 | } | ||
1685 | |||
1686 | for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) { | ||
1687 | queue = dlm_list_idx_to_ptr(res, i); | ||
1688 | list_for_each(iter, queue) { | ||
1689 | lock = list_entry (iter, struct dlm_lock, list); | ||
1690 | if (lock->ml.node == search_node) { | ||
1691 | if (dlm_lvb_needs_invalidation(lock, local)) { | ||
1692 | /* zero the lksb lvb and lockres lvb */ | ||
1693 | blank_lvb = 1; | ||
1694 | memset(lock->lksb->lvb, 0, DLM_LVB_LEN); | ||
1695 | } | ||
1696 | } | ||
1697 | } | ||
1698 | } | ||
1699 | |||
1700 | if (blank_lvb) { | ||
1701 | mlog(0, "clearing %.*s lvb, dead node %u had EX\n", | ||
1702 | res->lockname.len, res->lockname.name, dead_node); | ||
1703 | memset(res->lvb, 0, DLM_LVB_LEN); | ||
1704 | } | ||
1705 | } | ||
1706 | |||
1707 | static void dlm_free_dead_locks(struct dlm_ctxt *dlm, | ||
1708 | struct dlm_lock_resource *res, u8 dead_node) | ||
1709 | { | ||
1710 | struct list_head *iter, *tmpiter; | ||
1711 | struct dlm_lock *lock; | ||
1712 | |||
1713 | /* this node is the lockres master: | ||
1714 | * 1) remove any stale locks for the dead node | ||
1715 | * 2) if the dead node had an EX when he died, blank out the lvb | ||
1716 | */ | ||
1717 | assert_spin_locked(&dlm->spinlock); | ||
1718 | assert_spin_locked(&res->spinlock); | ||
1719 | |||
1720 | /* TODO: check pending_asts, pending_basts here */ | ||
1721 | list_for_each_safe(iter, tmpiter, &res->granted) { | ||
1722 | lock = list_entry (iter, struct dlm_lock, list); | ||
1723 | if (lock->ml.node == dead_node) { | ||
1724 | list_del_init(&lock->list); | ||
1725 | dlm_lock_put(lock); | ||
1726 | } | ||
1727 | } | ||
1728 | list_for_each_safe(iter, tmpiter, &res->converting) { | ||
1729 | lock = list_entry (iter, struct dlm_lock, list); | ||
1730 | if (lock->ml.node == dead_node) { | ||
1731 | list_del_init(&lock->list); | ||
1732 | dlm_lock_put(lock); | ||
1733 | } | ||
1734 | } | ||
1735 | list_for_each_safe(iter, tmpiter, &res->blocked) { | ||
1736 | lock = list_entry (iter, struct dlm_lock, list); | ||
1737 | if (lock->ml.node == dead_node) { | ||
1738 | list_del_init(&lock->list); | ||
1739 | dlm_lock_put(lock); | ||
1740 | } | ||
1741 | } | ||
1742 | |||
1743 | /* do not kick thread yet */ | ||
1744 | __dlm_dirty_lockres(dlm, res); | ||
1745 | } | ||
1746 | |||
1747 | /* if this node is the recovery master, and there are no | ||
1748 | * locks for a given lockres owned by this node that are in | ||
1749 | * either PR or EX mode, zero out the lvb before requesting. | ||
1750 | * | ||
1751 | */ | ||
1752 | |||
1753 | |||
1754 | static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) | ||
1755 | { | ||
1756 | struct list_head *iter; | ||
1757 | struct dlm_lock_resource *res; | ||
1758 | int i; | ||
1759 | struct list_head *bucket; | ||
1760 | |||
1761 | |||
1762 | /* purge any stale mles */ | ||
1763 | dlm_clean_master_list(dlm, dead_node); | ||
1764 | |||
1765 | /* | ||
1766 | * now clean up all lock resources. there are two rules: | ||
1767 | * | ||
1768 | * 1) if the dead node was the master, move the lockres | ||
1769 | * to the recovering list. set the RECOVERING flag. | ||
1770 | * this lockres needs to be cleaned up before it can | ||
1771 | * be used further. | ||
1772 | * | ||
1773 | * 2) if this node was the master, remove all locks from | ||
1774 | * each of the lockres queues that were owned by the | ||
1775 | * dead node. once recovery finishes, the dlm thread | ||
1776 | * can be kicked again to see if any ASTs or BASTs | ||
1777 | * need to be fired as a result. | ||
1778 | */ | ||
1779 | for (i=0; i<DLM_HASH_SIZE; i++) { | ||
1780 | bucket = &(dlm->resources[i]); | ||
1781 | list_for_each(iter, bucket) { | ||
1782 | res = list_entry (iter, struct dlm_lock_resource, list); | ||
1783 | if (dlm_is_recovery_lock(res->lockname.name, | ||
1784 | res->lockname.len)) | ||
1785 | continue; | ||
1786 | |||
1787 | spin_lock(&res->spinlock); | ||
1788 | /* zero the lvb if necessary */ | ||
1789 | dlm_revalidate_lvb(dlm, res, dead_node); | ||
1790 | if (res->owner == dead_node) | ||
1791 | dlm_move_lockres_to_recovery_list(dlm, res); | ||
1792 | else if (res->owner == dlm->node_num) { | ||
1793 | dlm_free_dead_locks(dlm, res, dead_node); | ||
1794 | __dlm_lockres_calc_usage(dlm, res); | ||
1795 | } | ||
1796 | spin_unlock(&res->spinlock); | ||
1797 | } | ||
1798 | } | ||
1799 | |||
1800 | } | ||
1801 | |||
1802 | static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx) | ||
1803 | { | ||
1804 | assert_spin_locked(&dlm->spinlock); | ||
1805 | |||
1806 | /* check to see if the node is already considered dead */ | ||
1807 | if (!test_bit(idx, dlm->live_nodes_map)) { | ||
1808 | mlog(0, "for domain %s, node %d is already dead. " | ||
1809 | "another node likely did recovery already.\n", | ||
1810 | dlm->name, idx); | ||
1811 | return; | ||
1812 | } | ||
1813 | |||
1814 | /* check to see if we do not care about this node */ | ||
1815 | if (!test_bit(idx, dlm->domain_map)) { | ||
1816 | /* This also catches the case that we get a node down | ||
1817 | * but haven't joined the domain yet. */ | ||
1818 | mlog(0, "node %u already removed from domain!\n", idx); | ||
1819 | return; | ||
1820 | } | ||
1821 | |||
1822 | clear_bit(idx, dlm->live_nodes_map); | ||
1823 | |||
1824 | /* Clean up join state on node death. */ | ||
1825 | if (dlm->joining_node == idx) { | ||
1826 | mlog(0, "Clearing join state for node %u\n", idx); | ||
1827 | __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); | ||
1828 | } | ||
1829 | |||
1830 | /* make sure local cleanup occurs before the heartbeat events */ | ||
1831 | if (!test_bit(idx, dlm->recovery_map)) | ||
1832 | dlm_do_local_recovery_cleanup(dlm, idx); | ||
1833 | |||
1834 | /* notify anything attached to the heartbeat events */ | ||
1835 | dlm_hb_event_notify_attached(dlm, idx, 0); | ||
1836 | |||
1837 | mlog(0, "node %u being removed from domain map!\n", idx); | ||
1838 | clear_bit(idx, dlm->domain_map); | ||
1839 | /* wake up migration waiters if a node goes down. | ||
1840 | * perhaps later we can genericize this for other waiters. */ | ||
1841 | wake_up(&dlm->migration_wq); | ||
1842 | |||
1843 | if (test_bit(idx, dlm->recovery_map)) | ||
1844 | mlog(0, "domain %s, node %u already added " | ||
1845 | "to recovery map!\n", dlm->name, idx); | ||
1846 | else | ||
1847 | set_bit(idx, dlm->recovery_map); | ||
1848 | } | ||
1849 | |||
1850 | void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data) | ||
1851 | { | ||
1852 | struct dlm_ctxt *dlm = data; | ||
1853 | |||
1854 | if (!dlm_grab(dlm)) | ||
1855 | return; | ||
1856 | |||
1857 | spin_lock(&dlm->spinlock); | ||
1858 | __dlm_hb_node_down(dlm, idx); | ||
1859 | spin_unlock(&dlm->spinlock); | ||
1860 | |||
1861 | dlm_put(dlm); | ||
1862 | } | ||
1863 | |||
1864 | void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data) | ||
1865 | { | ||
1866 | struct dlm_ctxt *dlm = data; | ||
1867 | |||
1868 | if (!dlm_grab(dlm)) | ||
1869 | return; | ||
1870 | |||
1871 | spin_lock(&dlm->spinlock); | ||
1872 | |||
1873 | set_bit(idx, dlm->live_nodes_map); | ||
1874 | |||
1875 | /* notify any mles attached to the heartbeat events */ | ||
1876 | dlm_hb_event_notify_attached(dlm, idx, 1); | ||
1877 | |||
1878 | spin_unlock(&dlm->spinlock); | ||
1879 | |||
1880 | dlm_put(dlm); | ||
1881 | } | ||
1882 | |||
1883 | static void dlm_reco_ast(void *astdata) | ||
1884 | { | ||
1885 | struct dlm_ctxt *dlm = astdata; | ||
1886 | mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n", | ||
1887 | dlm->node_num, dlm->name); | ||
1888 | } | ||
1889 | static void dlm_reco_bast(void *astdata, int blocked_type) | ||
1890 | { | ||
1891 | struct dlm_ctxt *dlm = astdata; | ||
1892 | mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n", | ||
1893 | dlm->node_num, dlm->name); | ||
1894 | } | ||
1895 | static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st) | ||
1896 | { | ||
1897 | mlog(0, "unlockast for recovery lock fired!\n"); | ||
1898 | } | ||
1899 | |||
1900 | |||
1901 | static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) | ||
1902 | { | ||
1903 | enum dlm_status ret; | ||
1904 | struct dlm_lockstatus lksb; | ||
1905 | int status = -EINVAL; | ||
1906 | |||
1907 | mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n", | ||
1908 | dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num); | ||
1909 | retry: | ||
1910 | memset(&lksb, 0, sizeof(lksb)); | ||
1911 | |||
1912 | ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, | ||
1913 | DLM_RECOVERY_LOCK_NAME, dlm_reco_ast, dlm, dlm_reco_bast); | ||
1914 | |||
1915 | if (ret == DLM_NORMAL) { | ||
1916 | mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n", | ||
1917 | dlm->name, dlm->node_num); | ||
1918 | /* I am master, send message to all nodes saying | ||
1919 | * that I am beginning a recovery session */ | ||
1920 | status = dlm_send_begin_reco_message(dlm, | ||
1921 | dlm->reco.dead_node); | ||
1922 | |||
1923 | /* recovery lock is a special case. ast will not get fired, | ||
1924 | * so just go ahead and unlock it. */ | ||
1925 | ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm); | ||
1926 | if (ret != DLM_NORMAL) { | ||
1927 | /* this would really suck. this could only happen | ||
1928 | * if there was a network error during the unlock | ||
1929 | * because of node death. this means the unlock | ||
1930 | * is actually "done" and the lock structure is | ||
1931 | * even freed. we can continue, but only | ||
1932 | * because this specific lock name is special. */ | ||
1933 | mlog(0, "dlmunlock returned %d\n", ret); | ||
1934 | } | ||
1935 | |||
1936 | if (status < 0) { | ||
1937 | mlog(0, "failed to send recovery message. " | ||
1938 | "must retry with new node map.\n"); | ||
1939 | goto retry; | ||
1940 | } | ||
1941 | } else if (ret == DLM_NOTQUEUED) { | ||
1942 | mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n", | ||
1943 | dlm->name, dlm->node_num); | ||
1944 | /* another node is master. wait on | ||
1945 | * reco.new_master != O2NM_INVALID_NODE_NUM */ | ||
1946 | status = -EEXIST; | ||
1947 | } | ||
1948 | |||
1949 | return status; | ||
1950 | } | ||
1951 | |||
1952 | static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node) | ||
1953 | { | ||
1954 | struct dlm_begin_reco br; | ||
1955 | int ret = 0; | ||
1956 | struct dlm_node_iter iter; | ||
1957 | int nodenum; | ||
1958 | int status; | ||
1959 | |||
1960 | mlog_entry("%u\n", dead_node); | ||
1961 | |||
1962 | mlog(0, "dead node is %u\n", dead_node); | ||
1963 | |||
1964 | spin_lock(&dlm->spinlock); | ||
1965 | dlm_node_iter_init(dlm->domain_map, &iter); | ||
1966 | spin_unlock(&dlm->spinlock); | ||
1967 | |||
1968 | clear_bit(dead_node, iter.node_map); | ||
1969 | |||
1970 | memset(&br, 0, sizeof(br)); | ||
1971 | br.node_idx = dlm->node_num; | ||
1972 | br.dead_node = dead_node; | ||
1973 | |||
1974 | while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { | ||
1975 | ret = 0; | ||
1976 | if (nodenum == dead_node) { | ||
1977 | mlog(0, "not sending begin reco to dead node " | ||
1978 | "%u\n", dead_node); | ||
1979 | continue; | ||
1980 | } | ||
1981 | if (nodenum == dlm->node_num) { | ||
1982 | mlog(0, "not sending begin reco to self\n"); | ||
1983 | continue; | ||
1984 | } | ||
1985 | |||
1986 | ret = -EINVAL; | ||
1987 | mlog(0, "attempting to send begin reco msg to %d\n", | ||
1988 | nodenum); | ||
1989 | ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key, | ||
1990 | &br, sizeof(br), nodenum, &status); | ||
1991 | /* negative status is handled ok by caller here */ | ||
1992 | if (ret >= 0) | ||
1993 | ret = status; | ||
1994 | if (ret < 0) { | ||
1995 | struct dlm_lock_resource *res; | ||
1996 | mlog_errno(ret); | ||
1997 | mlog(ML_ERROR, "begin reco of dlm %s to node %u " | ||
1998 | " returned %d\n", dlm->name, nodenum, ret); | ||
1999 | res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, | ||
2000 | DLM_RECOVERY_LOCK_NAME_LEN); | ||
2001 | if (res) { | ||
2002 | dlm_print_one_lock_resource(res); | ||
2003 | dlm_lockres_put(res); | ||
2004 | } else { | ||
2005 | mlog(ML_ERROR, "recovery lock not found\n"); | ||
2006 | } | ||
2007 | break; | ||
2008 | } | ||
2009 | } | ||
2010 | |||
2011 | return ret; | ||
2012 | } | ||
2013 | |||
2014 | int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data) | ||
2015 | { | ||
2016 | struct dlm_ctxt *dlm = data; | ||
2017 | struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf; | ||
2018 | |||
2019 | /* ok to return 0, domain has gone away */ | ||
2020 | if (!dlm_grab(dlm)) | ||
2021 | return 0; | ||
2022 | |||
2023 | mlog(0, "node %u wants to recover node %u\n", | ||
2024 | br->node_idx, br->dead_node); | ||
2025 | |||
2026 | dlm_fire_domain_eviction_callbacks(dlm, br->dead_node); | ||
2027 | |||
2028 | spin_lock(&dlm->spinlock); | ||
2029 | if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { | ||
2030 | mlog(0, "new_master already set to %u!\n", | ||
2031 | dlm->reco.new_master); | ||
2032 | } | ||
2033 | if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) { | ||
2034 | mlog(0, "dead_node already set to %u!\n", | ||
2035 | dlm->reco.dead_node); | ||
2036 | } | ||
2037 | dlm->reco.new_master = br->node_idx; | ||
2038 | dlm->reco.dead_node = br->dead_node; | ||
2039 | if (!test_bit(br->dead_node, dlm->recovery_map)) { | ||
2040 | mlog(ML_ERROR, "recovery master %u sees %u as dead, but this " | ||
2041 | "node has not yet. marking %u as dead\n", | ||
2042 | br->node_idx, br->dead_node, br->dead_node); | ||
2043 | __dlm_hb_node_down(dlm, br->dead_node); | ||
2044 | } | ||
2045 | spin_unlock(&dlm->spinlock); | ||
2046 | |||
2047 | dlm_kick_recovery_thread(dlm); | ||
2048 | dlm_put(dlm); | ||
2049 | return 0; | ||
2050 | } | ||
2051 | |||
2052 | static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm) | ||
2053 | { | ||
2054 | int ret = 0; | ||
2055 | struct dlm_finalize_reco fr; | ||
2056 | struct dlm_node_iter iter; | ||
2057 | int nodenum; | ||
2058 | int status; | ||
2059 | |||
2060 | mlog(0, "finishing recovery for node %s:%u\n", | ||
2061 | dlm->name, dlm->reco.dead_node); | ||
2062 | |||
2063 | spin_lock(&dlm->spinlock); | ||
2064 | dlm_node_iter_init(dlm->domain_map, &iter); | ||
2065 | spin_unlock(&dlm->spinlock); | ||
2066 | |||
2067 | memset(&fr, 0, sizeof(fr)); | ||
2068 | fr.node_idx = dlm->node_num; | ||
2069 | fr.dead_node = dlm->reco.dead_node; | ||
2070 | |||
2071 | while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { | ||
2072 | if (nodenum == dlm->node_num) | ||
2073 | continue; | ||
2074 | ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key, | ||
2075 | &fr, sizeof(fr), nodenum, &status); | ||
2076 | if (ret >= 0) { | ||
2077 | ret = status; | ||
2078 | if (dlm_is_host_down(ret)) { | ||
2079 | /* this has no effect on this recovery | ||
2080 | * session, so set the status to zero to | ||
2081 | * finish out the last recovery */ | ||
2082 | mlog(ML_ERROR, "node %u went down after this " | ||
2083 | "node finished recovery.\n", nodenum); | ||
2084 | ret = 0; | ||
2085 | } | ||
2086 | } | ||
2087 | if (ret < 0) { | ||
2088 | mlog_errno(ret); | ||
2089 | break; | ||
2090 | } | ||
2091 | } | ||
2092 | |||
2093 | return ret; | ||
2094 | } | ||
2095 | |||
2096 | int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data) | ||
2097 | { | ||
2098 | struct dlm_ctxt *dlm = data; | ||
2099 | struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf; | ||
2100 | |||
2101 | /* ok to return 0, domain has gone away */ | ||
2102 | if (!dlm_grab(dlm)) | ||
2103 | return 0; | ||
2104 | |||
2105 | mlog(0, "node %u finalizing recovery of node %u\n", | ||
2106 | fr->node_idx, fr->dead_node); | ||
2107 | |||
2108 | spin_lock(&dlm->spinlock); | ||
2109 | |||
2110 | if (dlm->reco.new_master != fr->node_idx) { | ||
2111 | mlog(ML_ERROR, "node %u sent recovery finalize msg, but node " | ||
2112 | "%u is supposed to be the new master, dead=%u\n", | ||
2113 | fr->node_idx, dlm->reco.new_master, fr->dead_node); | ||
2114 | BUG(); | ||
2115 | } | ||
2116 | if (dlm->reco.dead_node != fr->dead_node) { | ||
2117 | mlog(ML_ERROR, "node %u sent recovery finalize msg for dead " | ||
2118 | "node %u, but node %u is supposed to be dead\n", | ||
2119 | fr->node_idx, fr->dead_node, dlm->reco.dead_node); | ||
2120 | BUG(); | ||
2121 | } | ||
2122 | |||
2123 | dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx); | ||
2124 | |||
2125 | spin_unlock(&dlm->spinlock); | ||
2126 | |||
2127 | dlm_reset_recovery(dlm); | ||
2128 | |||
2129 | dlm_kick_recovery_thread(dlm); | ||
2130 | dlm_put(dlm); | ||
2131 | return 0; | ||
2132 | } | ||
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c new file mode 100644 index 000000000000..5be9d14f12cb --- /dev/null +++ b/fs/ocfs2/dlm/dlmthread.c | |||
@@ -0,0 +1,692 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmthread.c | ||
5 | * | ||
6 | * standalone DLM module | ||
7 | * | ||
8 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | |||
28 | #include <linux/module.h> | ||
29 | #include <linux/fs.h> | ||
30 | #include <linux/types.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/utsname.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/sysctl.h> | ||
36 | #include <linux/random.h> | ||
37 | #include <linux/blkdev.h> | ||
38 | #include <linux/socket.h> | ||
39 | #include <linux/inet.h> | ||
40 | #include <linux/timer.h> | ||
41 | #include <linux/kthread.h> | ||
42 | |||
43 | |||
44 | #include "cluster/heartbeat.h" | ||
45 | #include "cluster/nodemanager.h" | ||
46 | #include "cluster/tcp.h" | ||
47 | |||
48 | #include "dlmapi.h" | ||
49 | #include "dlmcommon.h" | ||
50 | #include "dlmdomain.h" | ||
51 | |||
52 | #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD) | ||
53 | #include "cluster/masklog.h" | ||
54 | |||
55 | static int dlm_thread(void *data); | ||
56 | |||
57 | static void dlm_flush_asts(struct dlm_ctxt *dlm); | ||
58 | |||
59 | #define dlm_lock_is_remote(dlm, lock) ((lock)->ml.node != (dlm)->node_num) | ||
60 | |||
61 | /* will exit holding res->spinlock, but may drop in function */ | ||
62 | /* waits until flags are cleared on res->state */ | ||
63 | void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags) | ||
64 | { | ||
65 | DECLARE_WAITQUEUE(wait, current); | ||
66 | |||
67 | assert_spin_locked(&res->spinlock); | ||
68 | |||
69 | add_wait_queue(&res->wq, &wait); | ||
70 | repeat: | ||
71 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
72 | if (res->state & flags) { | ||
73 | spin_unlock(&res->spinlock); | ||
74 | schedule(); | ||
75 | spin_lock(&res->spinlock); | ||
76 | goto repeat; | ||
77 | } | ||
78 | remove_wait_queue(&res->wq, &wait); | ||
79 | current->state = TASK_RUNNING; | ||
80 | } | ||
81 | |||
82 | |||
83 | static int __dlm_lockres_unused(struct dlm_lock_resource *res) | ||
84 | { | ||
85 | if (list_empty(&res->granted) && | ||
86 | list_empty(&res->converting) && | ||
87 | list_empty(&res->blocked) && | ||
88 | list_empty(&res->dirty)) | ||
89 | return 1; | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | |||
94 | /* Call whenever you may have added or deleted something from one of | ||
95 | * the lockres queue's. This will figure out whether it belongs on the | ||
96 | * unused list or not and does the appropriate thing. */ | ||
97 | void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | ||
98 | struct dlm_lock_resource *res) | ||
99 | { | ||
100 | mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); | ||
101 | |||
102 | assert_spin_locked(&dlm->spinlock); | ||
103 | assert_spin_locked(&res->spinlock); | ||
104 | |||
105 | if (__dlm_lockres_unused(res)){ | ||
106 | if (list_empty(&res->purge)) { | ||
107 | mlog(0, "putting lockres %.*s from purge list\n", | ||
108 | res->lockname.len, res->lockname.name); | ||
109 | |||
110 | res->last_used = jiffies; | ||
111 | list_add_tail(&res->purge, &dlm->purge_list); | ||
112 | dlm->purge_count++; | ||
113 | } | ||
114 | } else if (!list_empty(&res->purge)) { | ||
115 | mlog(0, "removing lockres %.*s from purge list\n", | ||
116 | res->lockname.len, res->lockname.name); | ||
117 | |||
118 | list_del_init(&res->purge); | ||
119 | dlm->purge_count--; | ||
120 | } | ||
121 | } | ||
122 | |||
123 | void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | ||
124 | struct dlm_lock_resource *res) | ||
125 | { | ||
126 | mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); | ||
127 | spin_lock(&dlm->spinlock); | ||
128 | spin_lock(&res->spinlock); | ||
129 | |||
130 | __dlm_lockres_calc_usage(dlm, res); | ||
131 | |||
132 | spin_unlock(&res->spinlock); | ||
133 | spin_unlock(&dlm->spinlock); | ||
134 | } | ||
135 | |||
136 | /* TODO: Eventual API: Called with the dlm spinlock held, may drop it | ||
137 | * to do migration, but will re-acquire before exit. */ | ||
138 | void dlm_purge_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *lockres) | ||
139 | { | ||
140 | int master; | ||
141 | int ret; | ||
142 | |||
143 | spin_lock(&lockres->spinlock); | ||
144 | master = lockres->owner == dlm->node_num; | ||
145 | spin_unlock(&lockres->spinlock); | ||
146 | |||
147 | mlog(0, "purging lockres %.*s, master = %d\n", lockres->lockname.len, | ||
148 | lockres->lockname.name, master); | ||
149 | |||
150 | /* Non master is the easy case -- no migration required, just | ||
151 | * quit. */ | ||
152 | if (!master) | ||
153 | goto finish; | ||
154 | |||
155 | /* Wheee! Migrate lockres here! */ | ||
156 | spin_unlock(&dlm->spinlock); | ||
157 | again: | ||
158 | |||
159 | ret = dlm_migrate_lockres(dlm, lockres, O2NM_MAX_NODES); | ||
160 | if (ret == -ENOTEMPTY) { | ||
161 | mlog(ML_ERROR, "lockres %.*s still has local locks!\n", | ||
162 | lockres->lockname.len, lockres->lockname.name); | ||
163 | |||
164 | BUG(); | ||
165 | } else if (ret < 0) { | ||
166 | mlog(ML_NOTICE, "lockres %.*s: migrate failed, retrying\n", | ||
167 | lockres->lockname.len, lockres->lockname.name); | ||
168 | goto again; | ||
169 | } | ||
170 | |||
171 | spin_lock(&dlm->spinlock); | ||
172 | |||
173 | finish: | ||
174 | if (!list_empty(&lockres->purge)) { | ||
175 | list_del_init(&lockres->purge); | ||
176 | dlm->purge_count--; | ||
177 | } | ||
178 | __dlm_unhash_lockres(lockres); | ||
179 | } | ||
180 | |||
181 | static void dlm_run_purge_list(struct dlm_ctxt *dlm, | ||
182 | int purge_now) | ||
183 | { | ||
184 | unsigned int run_max, unused; | ||
185 | unsigned long purge_jiffies; | ||
186 | struct dlm_lock_resource *lockres; | ||
187 | |||
188 | spin_lock(&dlm->spinlock); | ||
189 | run_max = dlm->purge_count; | ||
190 | |||
191 | while(run_max && !list_empty(&dlm->purge_list)) { | ||
192 | run_max--; | ||
193 | |||
194 | lockres = list_entry(dlm->purge_list.next, | ||
195 | struct dlm_lock_resource, purge); | ||
196 | |||
197 | /* Status of the lockres *might* change so double | ||
198 | * check. If the lockres is unused, holding the dlm | ||
199 | * spinlock will prevent people from getting and more | ||
200 | * refs on it -- there's no need to keep the lockres | ||
201 | * spinlock. */ | ||
202 | spin_lock(&lockres->spinlock); | ||
203 | unused = __dlm_lockres_unused(lockres); | ||
204 | spin_unlock(&lockres->spinlock); | ||
205 | |||
206 | if (!unused) | ||
207 | continue; | ||
208 | |||
209 | purge_jiffies = lockres->last_used + | ||
210 | msecs_to_jiffies(DLM_PURGE_INTERVAL_MS); | ||
211 | |||
212 | /* Make sure that we want to be processing this guy at | ||
213 | * this time. */ | ||
214 | if (!purge_now && time_after(purge_jiffies, jiffies)) { | ||
215 | /* Since resources are added to the purge list | ||
216 | * in tail order, we can stop at the first | ||
217 | * unpurgable resource -- anyone added after | ||
218 | * him will have a greater last_used value */ | ||
219 | break; | ||
220 | } | ||
221 | |||
222 | list_del_init(&lockres->purge); | ||
223 | dlm->purge_count--; | ||
224 | |||
225 | /* This may drop and reacquire the dlm spinlock if it | ||
226 | * has to do migration. */ | ||
227 | mlog(0, "calling dlm_purge_lockres!\n"); | ||
228 | dlm_purge_lockres(dlm, lockres); | ||
229 | mlog(0, "DONE calling dlm_purge_lockres!\n"); | ||
230 | |||
231 | /* Avoid adding any scheduling latencies */ | ||
232 | cond_resched_lock(&dlm->spinlock); | ||
233 | } | ||
234 | |||
235 | spin_unlock(&dlm->spinlock); | ||
236 | } | ||
237 | |||
238 | static void dlm_shuffle_lists(struct dlm_ctxt *dlm, | ||
239 | struct dlm_lock_resource *res) | ||
240 | { | ||
241 | struct dlm_lock *lock, *target; | ||
242 | struct list_head *iter; | ||
243 | struct list_head *head; | ||
244 | int can_grant = 1; | ||
245 | |||
246 | //mlog(0, "res->lockname.len=%d\n", res->lockname.len); | ||
247 | //mlog(0, "res->lockname.name=%p\n", res->lockname.name); | ||
248 | //mlog(0, "shuffle res %.*s\n", res->lockname.len, | ||
249 | // res->lockname.name); | ||
250 | |||
251 | /* because this function is called with the lockres | ||
252 | * spinlock, and because we know that it is not migrating/ | ||
253 | * recovering/in-progress, it is fine to reserve asts and | ||
254 | * basts right before queueing them all throughout */ | ||
255 | assert_spin_locked(&res->spinlock); | ||
256 | BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING| | ||
257 | DLM_LOCK_RES_RECOVERING| | ||
258 | DLM_LOCK_RES_IN_PROGRESS))); | ||
259 | |||
260 | converting: | ||
261 | if (list_empty(&res->converting)) | ||
262 | goto blocked; | ||
263 | mlog(0, "res %.*s has locks on a convert queue\n", res->lockname.len, | ||
264 | res->lockname.name); | ||
265 | |||
266 | target = list_entry(res->converting.next, struct dlm_lock, list); | ||
267 | if (target->ml.convert_type == LKM_IVMODE) { | ||
268 | mlog(ML_ERROR, "%.*s: converting a lock with no " | ||
269 | "convert_type!\n", res->lockname.len, res->lockname.name); | ||
270 | BUG(); | ||
271 | } | ||
272 | head = &res->granted; | ||
273 | list_for_each(iter, head) { | ||
274 | lock = list_entry(iter, struct dlm_lock, list); | ||
275 | if (lock==target) | ||
276 | continue; | ||
277 | if (!dlm_lock_compatible(lock->ml.type, | ||
278 | target->ml.convert_type)) { | ||
279 | can_grant = 0; | ||
280 | /* queue the BAST if not already */ | ||
281 | if (lock->ml.highest_blocked == LKM_IVMODE) { | ||
282 | __dlm_lockres_reserve_ast(res); | ||
283 | dlm_queue_bast(dlm, lock); | ||
284 | } | ||
285 | /* update the highest_blocked if needed */ | ||
286 | if (lock->ml.highest_blocked < target->ml.convert_type) | ||
287 | lock->ml.highest_blocked = | ||
288 | target->ml.convert_type; | ||
289 | } | ||
290 | } | ||
291 | head = &res->converting; | ||
292 | list_for_each(iter, head) { | ||
293 | lock = list_entry(iter, struct dlm_lock, list); | ||
294 | if (lock==target) | ||
295 | continue; | ||
296 | if (!dlm_lock_compatible(lock->ml.type, | ||
297 | target->ml.convert_type)) { | ||
298 | can_grant = 0; | ||
299 | if (lock->ml.highest_blocked == LKM_IVMODE) { | ||
300 | __dlm_lockres_reserve_ast(res); | ||
301 | dlm_queue_bast(dlm, lock); | ||
302 | } | ||
303 | if (lock->ml.highest_blocked < target->ml.convert_type) | ||
304 | lock->ml.highest_blocked = | ||
305 | target->ml.convert_type; | ||
306 | } | ||
307 | } | ||
308 | |||
309 | /* we can convert the lock */ | ||
310 | if (can_grant) { | ||
311 | spin_lock(&target->spinlock); | ||
312 | BUG_ON(target->ml.highest_blocked != LKM_IVMODE); | ||
313 | |||
314 | mlog(0, "calling ast for converting lock: %.*s, have: %d, " | ||
315 | "granting: %d, node: %u\n", res->lockname.len, | ||
316 | res->lockname.name, target->ml.type, | ||
317 | target->ml.convert_type, target->ml.node); | ||
318 | |||
319 | target->ml.type = target->ml.convert_type; | ||
320 | target->ml.convert_type = LKM_IVMODE; | ||
321 | list_del_init(&target->list); | ||
322 | list_add_tail(&target->list, &res->granted); | ||
323 | |||
324 | BUG_ON(!target->lksb); | ||
325 | target->lksb->status = DLM_NORMAL; | ||
326 | |||
327 | spin_unlock(&target->spinlock); | ||
328 | |||
329 | __dlm_lockres_reserve_ast(res); | ||
330 | dlm_queue_ast(dlm, target); | ||
331 | /* go back and check for more */ | ||
332 | goto converting; | ||
333 | } | ||
334 | |||
335 | blocked: | ||
336 | if (list_empty(&res->blocked)) | ||
337 | goto leave; | ||
338 | target = list_entry(res->blocked.next, struct dlm_lock, list); | ||
339 | |||
340 | head = &res->granted; | ||
341 | list_for_each(iter, head) { | ||
342 | lock = list_entry(iter, struct dlm_lock, list); | ||
343 | if (lock==target) | ||
344 | continue; | ||
345 | if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) { | ||
346 | can_grant = 0; | ||
347 | if (lock->ml.highest_blocked == LKM_IVMODE) { | ||
348 | __dlm_lockres_reserve_ast(res); | ||
349 | dlm_queue_bast(dlm, lock); | ||
350 | } | ||
351 | if (lock->ml.highest_blocked < target->ml.type) | ||
352 | lock->ml.highest_blocked = target->ml.type; | ||
353 | } | ||
354 | } | ||
355 | |||
356 | head = &res->converting; | ||
357 | list_for_each(iter, head) { | ||
358 | lock = list_entry(iter, struct dlm_lock, list); | ||
359 | if (lock==target) | ||
360 | continue; | ||
361 | if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) { | ||
362 | can_grant = 0; | ||
363 | if (lock->ml.highest_blocked == LKM_IVMODE) { | ||
364 | __dlm_lockres_reserve_ast(res); | ||
365 | dlm_queue_bast(dlm, lock); | ||
366 | } | ||
367 | if (lock->ml.highest_blocked < target->ml.type) | ||
368 | lock->ml.highest_blocked = target->ml.type; | ||
369 | } | ||
370 | } | ||
371 | |||
372 | /* we can grant the blocked lock (only | ||
373 | * possible if converting list empty) */ | ||
374 | if (can_grant) { | ||
375 | spin_lock(&target->spinlock); | ||
376 | BUG_ON(target->ml.highest_blocked != LKM_IVMODE); | ||
377 | |||
378 | mlog(0, "calling ast for blocked lock: %.*s, granting: %d, " | ||
379 | "node: %u\n", res->lockname.len, res->lockname.name, | ||
380 | target->ml.type, target->ml.node); | ||
381 | |||
382 | // target->ml.type is already correct | ||
383 | list_del_init(&target->list); | ||
384 | list_add_tail(&target->list, &res->granted); | ||
385 | |||
386 | BUG_ON(!target->lksb); | ||
387 | target->lksb->status = DLM_NORMAL; | ||
388 | |||
389 | spin_unlock(&target->spinlock); | ||
390 | |||
391 | __dlm_lockres_reserve_ast(res); | ||
392 | dlm_queue_ast(dlm, target); | ||
393 | /* go back and check for more */ | ||
394 | goto converting; | ||
395 | } | ||
396 | |||
397 | leave: | ||
398 | return; | ||
399 | } | ||
400 | |||
401 | /* must have NO locks when calling this with res !=NULL * */ | ||
402 | void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | ||
403 | { | ||
404 | mlog_entry("dlm=%p, res=%p\n", dlm, res); | ||
405 | if (res) { | ||
406 | spin_lock(&dlm->spinlock); | ||
407 | spin_lock(&res->spinlock); | ||
408 | __dlm_dirty_lockres(dlm, res); | ||
409 | spin_unlock(&res->spinlock); | ||
410 | spin_unlock(&dlm->spinlock); | ||
411 | } | ||
412 | wake_up(&dlm->dlm_thread_wq); | ||
413 | } | ||
414 | |||
415 | void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | ||
416 | { | ||
417 | mlog_entry("dlm=%p, res=%p\n", dlm, res); | ||
418 | |||
419 | assert_spin_locked(&dlm->spinlock); | ||
420 | assert_spin_locked(&res->spinlock); | ||
421 | |||
422 | /* don't shuffle secondary queues */ | ||
423 | if ((res->owner == dlm->node_num) && | ||
424 | !(res->state & DLM_LOCK_RES_DIRTY)) { | ||
425 | list_add_tail(&res->dirty, &dlm->dirty_list); | ||
426 | res->state |= DLM_LOCK_RES_DIRTY; | ||
427 | } | ||
428 | } | ||
429 | |||
430 | |||
431 | /* Launch the NM thread for the mounted volume */ | ||
432 | int dlm_launch_thread(struct dlm_ctxt *dlm) | ||
433 | { | ||
434 | mlog(0, "starting dlm thread...\n"); | ||
435 | |||
436 | dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread"); | ||
437 | if (IS_ERR(dlm->dlm_thread_task)) { | ||
438 | mlog_errno(PTR_ERR(dlm->dlm_thread_task)); | ||
439 | dlm->dlm_thread_task = NULL; | ||
440 | return -EINVAL; | ||
441 | } | ||
442 | |||
443 | return 0; | ||
444 | } | ||
445 | |||
446 | void dlm_complete_thread(struct dlm_ctxt *dlm) | ||
447 | { | ||
448 | if (dlm->dlm_thread_task) { | ||
449 | mlog(ML_KTHREAD, "waiting for dlm thread to exit\n"); | ||
450 | kthread_stop(dlm->dlm_thread_task); | ||
451 | dlm->dlm_thread_task = NULL; | ||
452 | } | ||
453 | } | ||
454 | |||
455 | static int dlm_dirty_list_empty(struct dlm_ctxt *dlm) | ||
456 | { | ||
457 | int empty; | ||
458 | |||
459 | spin_lock(&dlm->spinlock); | ||
460 | empty = list_empty(&dlm->dirty_list); | ||
461 | spin_unlock(&dlm->spinlock); | ||
462 | |||
463 | return empty; | ||
464 | } | ||
465 | |||
466 | static void dlm_flush_asts(struct dlm_ctxt *dlm) | ||
467 | { | ||
468 | int ret; | ||
469 | struct dlm_lock *lock; | ||
470 | struct dlm_lock_resource *res; | ||
471 | u8 hi; | ||
472 | |||
473 | spin_lock(&dlm->ast_lock); | ||
474 | while (!list_empty(&dlm->pending_asts)) { | ||
475 | lock = list_entry(dlm->pending_asts.next, | ||
476 | struct dlm_lock, ast_list); | ||
477 | /* get an extra ref on lock */ | ||
478 | dlm_lock_get(lock); | ||
479 | res = lock->lockres; | ||
480 | mlog(0, "delivering an ast for this lockres\n"); | ||
481 | |||
482 | BUG_ON(!lock->ast_pending); | ||
483 | |||
484 | /* remove from list (including ref) */ | ||
485 | list_del_init(&lock->ast_list); | ||
486 | dlm_lock_put(lock); | ||
487 | spin_unlock(&dlm->ast_lock); | ||
488 | |||
489 | if (lock->ml.node != dlm->node_num) { | ||
490 | ret = dlm_do_remote_ast(dlm, res, lock); | ||
491 | if (ret < 0) | ||
492 | mlog_errno(ret); | ||
493 | } else | ||
494 | dlm_do_local_ast(dlm, res, lock); | ||
495 | |||
496 | spin_lock(&dlm->ast_lock); | ||
497 | |||
498 | /* possible that another ast was queued while | ||
499 | * we were delivering the last one */ | ||
500 | if (!list_empty(&lock->ast_list)) { | ||
501 | mlog(0, "aha another ast got queued while " | ||
502 | "we were finishing the last one. will " | ||
503 | "keep the ast_pending flag set.\n"); | ||
504 | } else | ||
505 | lock->ast_pending = 0; | ||
506 | |||
507 | /* drop the extra ref. | ||
508 | * this may drop it completely. */ | ||
509 | dlm_lock_put(lock); | ||
510 | dlm_lockres_release_ast(dlm, res); | ||
511 | } | ||
512 | |||
513 | while (!list_empty(&dlm->pending_basts)) { | ||
514 | lock = list_entry(dlm->pending_basts.next, | ||
515 | struct dlm_lock, bast_list); | ||
516 | /* get an extra ref on lock */ | ||
517 | dlm_lock_get(lock); | ||
518 | res = lock->lockres; | ||
519 | |||
520 | BUG_ON(!lock->bast_pending); | ||
521 | |||
522 | /* get the highest blocked lock, and reset */ | ||
523 | spin_lock(&lock->spinlock); | ||
524 | BUG_ON(lock->ml.highest_blocked <= LKM_IVMODE); | ||
525 | hi = lock->ml.highest_blocked; | ||
526 | lock->ml.highest_blocked = LKM_IVMODE; | ||
527 | spin_unlock(&lock->spinlock); | ||
528 | |||
529 | /* remove from list (including ref) */ | ||
530 | list_del_init(&lock->bast_list); | ||
531 | dlm_lock_put(lock); | ||
532 | spin_unlock(&dlm->ast_lock); | ||
533 | |||
534 | mlog(0, "delivering a bast for this lockres " | ||
535 | "(blocked = %d\n", hi); | ||
536 | |||
537 | if (lock->ml.node != dlm->node_num) { | ||
538 | ret = dlm_send_proxy_bast(dlm, res, lock, hi); | ||
539 | if (ret < 0) | ||
540 | mlog_errno(ret); | ||
541 | } else | ||
542 | dlm_do_local_bast(dlm, res, lock, hi); | ||
543 | |||
544 | spin_lock(&dlm->ast_lock); | ||
545 | |||
546 | /* possible that another bast was queued while | ||
547 | * we were delivering the last one */ | ||
548 | if (!list_empty(&lock->bast_list)) { | ||
549 | mlog(0, "aha another bast got queued while " | ||
550 | "we were finishing the last one. will " | ||
551 | "keep the bast_pending flag set.\n"); | ||
552 | } else | ||
553 | lock->bast_pending = 0; | ||
554 | |||
555 | /* drop the extra ref. | ||
556 | * this may drop it completely. */ | ||
557 | dlm_lock_put(lock); | ||
558 | dlm_lockres_release_ast(dlm, res); | ||
559 | } | ||
560 | wake_up(&dlm->ast_wq); | ||
561 | spin_unlock(&dlm->ast_lock); | ||
562 | } | ||
563 | |||
564 | |||
565 | #define DLM_THREAD_TIMEOUT_MS (4 * 1000) | ||
566 | #define DLM_THREAD_MAX_DIRTY 100 | ||
567 | #define DLM_THREAD_MAX_ASTS 10 | ||
568 | |||
569 | static int dlm_thread(void *data) | ||
570 | { | ||
571 | struct dlm_lock_resource *res; | ||
572 | struct dlm_ctxt *dlm = data; | ||
573 | unsigned long timeout = msecs_to_jiffies(DLM_THREAD_TIMEOUT_MS); | ||
574 | |||
575 | mlog(0, "dlm thread running for %s...\n", dlm->name); | ||
576 | |||
577 | while (!kthread_should_stop()) { | ||
578 | int n = DLM_THREAD_MAX_DIRTY; | ||
579 | |||
580 | /* dlm_shutting_down is very point-in-time, but that | ||
581 | * doesn't matter as we'll just loop back around if we | ||
582 | * get false on the leading edge of a state | ||
583 | * transition. */ | ||
584 | dlm_run_purge_list(dlm, dlm_shutting_down(dlm)); | ||
585 | |||
586 | /* We really don't want to hold dlm->spinlock while | ||
587 | * calling dlm_shuffle_lists on each lockres that | ||
588 | * needs to have its queues adjusted and AST/BASTs | ||
589 | * run. So let's pull each entry off the dirty_list | ||
590 | * and drop dlm->spinlock ASAP. Once off the list, | ||
591 | * res->spinlock needs to be taken again to protect | ||
592 | * the queues while calling dlm_shuffle_lists. */ | ||
593 | spin_lock(&dlm->spinlock); | ||
594 | while (!list_empty(&dlm->dirty_list)) { | ||
595 | int delay = 0; | ||
596 | res = list_entry(dlm->dirty_list.next, | ||
597 | struct dlm_lock_resource, dirty); | ||
598 | |||
599 | /* peel a lockres off, remove it from the list, | ||
600 | * unset the dirty flag and drop the dlm lock */ | ||
601 | BUG_ON(!res); | ||
602 | dlm_lockres_get(res); | ||
603 | |||
604 | spin_lock(&res->spinlock); | ||
605 | res->state &= ~DLM_LOCK_RES_DIRTY; | ||
606 | list_del_init(&res->dirty); | ||
607 | spin_unlock(&res->spinlock); | ||
608 | spin_unlock(&dlm->spinlock); | ||
609 | |||
610 | /* lockres can be re-dirtied/re-added to the | ||
611 | * dirty_list in this gap, but that is ok */ | ||
612 | |||
613 | spin_lock(&res->spinlock); | ||
614 | if (res->owner != dlm->node_num) { | ||
615 | __dlm_print_one_lock_resource(res); | ||
616 | mlog(ML_ERROR, "inprog:%s, mig:%s, reco:%s, dirty:%s\n", | ||
617 | res->state & DLM_LOCK_RES_IN_PROGRESS ? "yes" : "no", | ||
618 | res->state & DLM_LOCK_RES_MIGRATING ? "yes" : "no", | ||
619 | res->state & DLM_LOCK_RES_RECOVERING ? "yes" : "no", | ||
620 | res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no"); | ||
621 | } | ||
622 | BUG_ON(res->owner != dlm->node_num); | ||
623 | |||
624 | /* it is now ok to move lockreses in these states | ||
625 | * to the dirty list, assuming that they will only be | ||
626 | * dirty for a short while. */ | ||
627 | if (res->state & (DLM_LOCK_RES_IN_PROGRESS | | ||
628 | DLM_LOCK_RES_MIGRATING | | ||
629 | DLM_LOCK_RES_RECOVERING)) { | ||
630 | /* move it to the tail and keep going */ | ||
631 | spin_unlock(&res->spinlock); | ||
632 | mlog(0, "delaying list shuffling for in-" | ||
633 | "progress lockres %.*s, state=%d\n", | ||
634 | res->lockname.len, res->lockname.name, | ||
635 | res->state); | ||
636 | delay = 1; | ||
637 | goto in_progress; | ||
638 | } | ||
639 | |||
640 | /* at this point the lockres is not migrating/ | ||
641 | * recovering/in-progress. we have the lockres | ||
642 | * spinlock and do NOT have the dlm lock. | ||
643 | * safe to reserve/queue asts and run the lists. */ | ||
644 | |||
645 | mlog(0, "calling dlm_shuffle_lists with dlm=%p, " | ||
646 | "res=%p\n", dlm, res); | ||
647 | |||
648 | /* called while holding lockres lock */ | ||
649 | dlm_shuffle_lists(dlm, res); | ||
650 | spin_unlock(&res->spinlock); | ||
651 | |||
652 | dlm_lockres_calc_usage(dlm, res); | ||
653 | |||
654 | in_progress: | ||
655 | |||
656 | spin_lock(&dlm->spinlock); | ||
657 | /* if the lock was in-progress, stick | ||
658 | * it on the back of the list */ | ||
659 | if (delay) { | ||
660 | spin_lock(&res->spinlock); | ||
661 | list_add_tail(&res->dirty, &dlm->dirty_list); | ||
662 | res->state |= DLM_LOCK_RES_DIRTY; | ||
663 | spin_unlock(&res->spinlock); | ||
664 | } | ||
665 | dlm_lockres_put(res); | ||
666 | |||
667 | /* unlikely, but we may need to give time to | ||
668 | * other tasks */ | ||
669 | if (!--n) { | ||
670 | mlog(0, "throttling dlm_thread\n"); | ||
671 | break; | ||
672 | } | ||
673 | } | ||
674 | |||
675 | spin_unlock(&dlm->spinlock); | ||
676 | dlm_flush_asts(dlm); | ||
677 | |||
678 | /* yield and continue right away if there is more work to do */ | ||
679 | if (!n) { | ||
680 | yield(); | ||
681 | continue; | ||
682 | } | ||
683 | |||
684 | wait_event_interruptible_timeout(dlm->dlm_thread_wq, | ||
685 | !dlm_dirty_list_empty(dlm) || | ||
686 | kthread_should_stop(), | ||
687 | timeout); | ||
688 | } | ||
689 | |||
690 | mlog(0, "quitting DLM thread\n"); | ||
691 | return 0; | ||
692 | } | ||
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c new file mode 100644 index 000000000000..cec2ce1cd318 --- /dev/null +++ b/fs/ocfs2/dlm/dlmunlock.c | |||
@@ -0,0 +1,672 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmunlock.c | ||
5 | * | ||
6 | * underlying calls for unlocking locks | ||
7 | * | ||
8 | * Copyright (C) 2004 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | |||
28 | #include <linux/module.h> | ||
29 | #include <linux/fs.h> | ||
30 | #include <linux/types.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/utsname.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/sysctl.h> | ||
36 | #include <linux/random.h> | ||
37 | #include <linux/blkdev.h> | ||
38 | #include <linux/socket.h> | ||
39 | #include <linux/inet.h> | ||
40 | #include <linux/spinlock.h> | ||
41 | #include <linux/delay.h> | ||
42 | |||
43 | #include "cluster/heartbeat.h" | ||
44 | #include "cluster/nodemanager.h" | ||
45 | #include "cluster/tcp.h" | ||
46 | |||
47 | #include "dlmapi.h" | ||
48 | #include "dlmcommon.h" | ||
49 | |||
50 | #define MLOG_MASK_PREFIX ML_DLM | ||
51 | #include "cluster/masklog.h" | ||
52 | |||
53 | #define DLM_UNLOCK_FREE_LOCK 0x00000001 | ||
54 | #define DLM_UNLOCK_CALL_AST 0x00000002 | ||
55 | #define DLM_UNLOCK_REMOVE_LOCK 0x00000004 | ||
56 | #define DLM_UNLOCK_REGRANT_LOCK 0x00000008 | ||
57 | #define DLM_UNLOCK_CLEAR_CONVERT_TYPE 0x00000010 | ||
58 | |||
59 | |||
60 | static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm, | ||
61 | struct dlm_lock_resource *res, | ||
62 | struct dlm_lock *lock, | ||
63 | struct dlm_lockstatus *lksb, | ||
64 | int *actions); | ||
65 | static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm, | ||
66 | struct dlm_lock_resource *res, | ||
67 | struct dlm_lock *lock, | ||
68 | struct dlm_lockstatus *lksb, | ||
69 | int *actions); | ||
70 | |||
71 | static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm, | ||
72 | struct dlm_lock_resource *res, | ||
73 | struct dlm_lock *lock, | ||
74 | struct dlm_lockstatus *lksb, | ||
75 | int flags, | ||
76 | u8 owner); | ||
77 | |||
78 | |||
79 | /* | ||
80 | * according to the spec: | ||
81 | * http://opendlm.sourceforge.net/cvsmirror/opendlm/docs/dlmbook_final.pdf | ||
82 | * | ||
83 | * flags & LKM_CANCEL != 0: must be converting or blocked | ||
84 | * flags & LKM_CANCEL == 0: must be granted | ||
85 | * | ||
86 | * So to unlock a converting lock, you must first cancel the | ||
87 | * convert (passing LKM_CANCEL in flags), then call the unlock | ||
88 | * again (with no LKM_CANCEL in flags). | ||
89 | */ | ||
90 | |||
91 | |||
92 | /* | ||
93 | * locking: | ||
94 | * caller needs: none | ||
95 | * taken: res->spinlock and lock->spinlock taken and dropped | ||
96 | * held on exit: none | ||
97 | * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network | ||
98 | * all callers should have taken an extra ref on lock coming in | ||
99 | */ | ||
100 | static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, | ||
101 | struct dlm_lock_resource *res, | ||
102 | struct dlm_lock *lock, | ||
103 | struct dlm_lockstatus *lksb, | ||
104 | int flags, int *call_ast, | ||
105 | int master_node) | ||
106 | { | ||
107 | enum dlm_status status; | ||
108 | int actions = 0; | ||
109 | int in_use; | ||
110 | u8 owner; | ||
111 | |||
112 | mlog(0, "master_node = %d, valblk = %d\n", master_node, | ||
113 | flags & LKM_VALBLK); | ||
114 | |||
115 | if (master_node) | ||
116 | BUG_ON(res->owner != dlm->node_num); | ||
117 | else | ||
118 | BUG_ON(res->owner == dlm->node_num); | ||
119 | |||
120 | spin_lock(&dlm->spinlock); | ||
121 | /* We want to be sure that we're not freeing a lock | ||
122 | * that still has AST's pending... */ | ||
123 | in_use = !list_empty(&lock->ast_list); | ||
124 | spin_unlock(&dlm->spinlock); | ||
125 | if (in_use) { | ||
126 | mlog(ML_ERROR, "lockres %.*s: Someone is calling dlmunlock " | ||
127 | "while waiting for an ast!", res->lockname.len, | ||
128 | res->lockname.name); | ||
129 | return DLM_BADPARAM; | ||
130 | } | ||
131 | |||
132 | spin_lock(&res->spinlock); | ||
133 | if (res->state & DLM_LOCK_RES_IN_PROGRESS) { | ||
134 | if (master_node) { | ||
135 | mlog(ML_ERROR, "lockres in progress!\n"); | ||
136 | spin_unlock(&res->spinlock); | ||
137 | return DLM_FORWARD; | ||
138 | } | ||
139 | /* ok for this to sleep if not in a network handler */ | ||
140 | __dlm_wait_on_lockres(res); | ||
141 | res->state |= DLM_LOCK_RES_IN_PROGRESS; | ||
142 | } | ||
143 | spin_lock(&lock->spinlock); | ||
144 | |||
145 | if (res->state & DLM_LOCK_RES_RECOVERING) { | ||
146 | status = DLM_RECOVERING; | ||
147 | goto leave; | ||
148 | } | ||
149 | |||
150 | |||
151 | /* see above for what the spec says about | ||
152 | * LKM_CANCEL and the lock queue state */ | ||
153 | if (flags & LKM_CANCEL) | ||
154 | status = dlm_get_cancel_actions(dlm, res, lock, lksb, &actions); | ||
155 | else | ||
156 | status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions); | ||
157 | |||
158 | if (status != DLM_NORMAL) | ||
159 | goto leave; | ||
160 | |||
161 | /* By now this has been masked out of cancel requests. */ | ||
162 | if (flags & LKM_VALBLK) { | ||
163 | /* make the final update to the lvb */ | ||
164 | if (master_node) | ||
165 | memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN); | ||
166 | else | ||
167 | flags |= LKM_PUT_LVB; /* let the send function | ||
168 | * handle it. */ | ||
169 | } | ||
170 | |||
171 | if (!master_node) { | ||
172 | owner = res->owner; | ||
173 | /* drop locks and send message */ | ||
174 | if (flags & LKM_CANCEL) | ||
175 | lock->cancel_pending = 1; | ||
176 | else | ||
177 | lock->unlock_pending = 1; | ||
178 | spin_unlock(&lock->spinlock); | ||
179 | spin_unlock(&res->spinlock); | ||
180 | status = dlm_send_remote_unlock_request(dlm, res, lock, lksb, | ||
181 | flags, owner); | ||
182 | spin_lock(&res->spinlock); | ||
183 | spin_lock(&lock->spinlock); | ||
184 | /* if the master told us the lock was already granted, | ||
185 | * let the ast handle all of these actions */ | ||
186 | if (status == DLM_NORMAL && | ||
187 | lksb->status == DLM_CANCELGRANT) { | ||
188 | actions &= ~(DLM_UNLOCK_REMOVE_LOCK| | ||
189 | DLM_UNLOCK_REGRANT_LOCK| | ||
190 | DLM_UNLOCK_CLEAR_CONVERT_TYPE); | ||
191 | } | ||
192 | if (flags & LKM_CANCEL) | ||
193 | lock->cancel_pending = 0; | ||
194 | else | ||
195 | lock->unlock_pending = 0; | ||
196 | |||
197 | } | ||
198 | |||
199 | /* get an extra ref on lock. if we are just switching | ||
200 | * lists here, we dont want the lock to go away. */ | ||
201 | dlm_lock_get(lock); | ||
202 | |||
203 | if (actions & DLM_UNLOCK_REMOVE_LOCK) { | ||
204 | list_del_init(&lock->list); | ||
205 | dlm_lock_put(lock); | ||
206 | } | ||
207 | if (actions & DLM_UNLOCK_REGRANT_LOCK) { | ||
208 | dlm_lock_get(lock); | ||
209 | list_add_tail(&lock->list, &res->granted); | ||
210 | } | ||
211 | if (actions & DLM_UNLOCK_CLEAR_CONVERT_TYPE) { | ||
212 | mlog(0, "clearing convert_type at %smaster node\n", | ||
213 | master_node ? "" : "non-"); | ||
214 | lock->ml.convert_type = LKM_IVMODE; | ||
215 | } | ||
216 | |||
217 | /* remove the extra ref on lock */ | ||
218 | dlm_lock_put(lock); | ||
219 | |||
220 | leave: | ||
221 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; | ||
222 | if (!dlm_lock_on_list(&res->converting, lock)) | ||
223 | BUG_ON(lock->ml.convert_type != LKM_IVMODE); | ||
224 | else | ||
225 | BUG_ON(lock->ml.convert_type == LKM_IVMODE); | ||
226 | spin_unlock(&lock->spinlock); | ||
227 | spin_unlock(&res->spinlock); | ||
228 | wake_up(&res->wq); | ||
229 | |||
230 | /* let the caller's final dlm_lock_put handle the actual kfree */ | ||
231 | if (actions & DLM_UNLOCK_FREE_LOCK) { | ||
232 | /* this should always be coupled with list removal */ | ||
233 | BUG_ON(!(actions & DLM_UNLOCK_REMOVE_LOCK)); | ||
234 | mlog(0, "lock %"MLFu64" should be gone now! refs=%d\n", | ||
235 | lock->ml.cookie, atomic_read(&lock->lock_refs.refcount)-1); | ||
236 | dlm_lock_put(lock); | ||
237 | } | ||
238 | if (actions & DLM_UNLOCK_CALL_AST) | ||
239 | *call_ast = 1; | ||
240 | |||
241 | /* if cancel or unlock succeeded, lvb work is done */ | ||
242 | if (status == DLM_NORMAL) | ||
243 | lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB); | ||
244 | |||
245 | return status; | ||
246 | } | ||
247 | |||
248 | void dlm_commit_pending_unlock(struct dlm_lock_resource *res, | ||
249 | struct dlm_lock *lock) | ||
250 | { | ||
251 | /* leave DLM_LKSB_PUT_LVB on the lksb so any final | ||
252 | * update of the lvb will be sent to the new master */ | ||
253 | list_del_init(&lock->list); | ||
254 | } | ||
255 | |||
256 | void dlm_commit_pending_cancel(struct dlm_lock_resource *res, | ||
257 | struct dlm_lock *lock) | ||
258 | { | ||
259 | list_del_init(&lock->list); | ||
260 | list_add_tail(&lock->list, &res->granted); | ||
261 | lock->ml.convert_type = LKM_IVMODE; | ||
262 | } | ||
263 | |||
264 | |||
265 | static inline enum dlm_status dlmunlock_master(struct dlm_ctxt *dlm, | ||
266 | struct dlm_lock_resource *res, | ||
267 | struct dlm_lock *lock, | ||
268 | struct dlm_lockstatus *lksb, | ||
269 | int flags, | ||
270 | int *call_ast) | ||
271 | { | ||
272 | return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 1); | ||
273 | } | ||
274 | |||
275 | static inline enum dlm_status dlmunlock_remote(struct dlm_ctxt *dlm, | ||
276 | struct dlm_lock_resource *res, | ||
277 | struct dlm_lock *lock, | ||
278 | struct dlm_lockstatus *lksb, | ||
279 | int flags, int *call_ast) | ||
280 | { | ||
281 | return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 0); | ||
282 | } | ||
283 | |||
284 | /* | ||
285 | * locking: | ||
286 | * caller needs: none | ||
287 | * taken: none | ||
288 | * held on exit: none | ||
289 | * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network | ||
290 | */ | ||
291 | static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm, | ||
292 | struct dlm_lock_resource *res, | ||
293 | struct dlm_lock *lock, | ||
294 | struct dlm_lockstatus *lksb, | ||
295 | int flags, | ||
296 | u8 owner) | ||
297 | { | ||
298 | struct dlm_unlock_lock unlock; | ||
299 | int tmpret; | ||
300 | enum dlm_status ret; | ||
301 | int status = 0; | ||
302 | struct kvec vec[2]; | ||
303 | size_t veclen = 1; | ||
304 | |||
305 | mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); | ||
306 | |||
307 | memset(&unlock, 0, sizeof(unlock)); | ||
308 | unlock.node_idx = dlm->node_num; | ||
309 | unlock.flags = cpu_to_be32(flags); | ||
310 | unlock.cookie = lock->ml.cookie; | ||
311 | unlock.namelen = res->lockname.len; | ||
312 | memcpy(unlock.name, res->lockname.name, unlock.namelen); | ||
313 | |||
314 | vec[0].iov_len = sizeof(struct dlm_unlock_lock); | ||
315 | vec[0].iov_base = &unlock; | ||
316 | |||
317 | if (flags & LKM_PUT_LVB) { | ||
318 | /* extra data to send if we are updating lvb */ | ||
319 | vec[1].iov_len = DLM_LVB_LEN; | ||
320 | vec[1].iov_base = lock->lksb->lvb; | ||
321 | veclen++; | ||
322 | } | ||
323 | |||
324 | tmpret = o2net_send_message_vec(DLM_UNLOCK_LOCK_MSG, dlm->key, | ||
325 | vec, veclen, owner, &status); | ||
326 | if (tmpret >= 0) { | ||
327 | // successfully sent and received | ||
328 | if (status == DLM_CANCELGRANT) | ||
329 | ret = DLM_NORMAL; | ||
330 | else if (status == DLM_FORWARD) { | ||
331 | mlog(0, "master was in-progress. retry\n"); | ||
332 | ret = DLM_FORWARD; | ||
333 | } else | ||
334 | ret = status; | ||
335 | lksb->status = status; | ||
336 | } else { | ||
337 | mlog_errno(tmpret); | ||
338 | if (dlm_is_host_down(tmpret)) { | ||
339 | /* NOTE: this seems strange, but it is what we want. | ||
340 | * when the master goes down during a cancel or | ||
341 | * unlock, the recovery code completes the operation | ||
342 | * as if the master had not died, then passes the | ||
343 | * updated state to the recovery master. this thread | ||
344 | * just needs to finish out the operation and call | ||
345 | * the unlockast. */ | ||
346 | ret = DLM_NORMAL; | ||
347 | } else { | ||
348 | /* something bad. this will BUG in ocfs2 */ | ||
349 | ret = dlm_err_to_dlm_status(tmpret); | ||
350 | } | ||
351 | lksb->status = ret; | ||
352 | } | ||
353 | |||
354 | return ret; | ||
355 | } | ||
356 | |||
357 | /* | ||
358 | * locking: | ||
359 | * caller needs: none | ||
360 | * taken: takes and drops res->spinlock | ||
361 | * held on exit: none | ||
362 | * returns: DLM_NORMAL, DLM_BADARGS, DLM_IVLOCKID, | ||
363 | * return value from dlmunlock_master | ||
364 | */ | ||
365 | int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data) | ||
366 | { | ||
367 | struct dlm_ctxt *dlm = data; | ||
368 | struct dlm_unlock_lock *unlock = (struct dlm_unlock_lock *)msg->buf; | ||
369 | struct dlm_lock_resource *res = NULL; | ||
370 | struct list_head *iter; | ||
371 | struct dlm_lock *lock = NULL; | ||
372 | enum dlm_status status = DLM_NORMAL; | ||
373 | int found = 0, i; | ||
374 | struct dlm_lockstatus *lksb = NULL; | ||
375 | int ignore; | ||
376 | u32 flags; | ||
377 | struct list_head *queue; | ||
378 | |||
379 | flags = be32_to_cpu(unlock->flags); | ||
380 | |||
381 | if (flags & LKM_GET_LVB) { | ||
382 | mlog(ML_ERROR, "bad args! GET_LVB specified on unlock!\n"); | ||
383 | return DLM_BADARGS; | ||
384 | } | ||
385 | |||
386 | if ((flags & (LKM_PUT_LVB|LKM_CANCEL)) == (LKM_PUT_LVB|LKM_CANCEL)) { | ||
387 | mlog(ML_ERROR, "bad args! cannot modify lvb on a CANCEL " | ||
388 | "request!\n"); | ||
389 | return DLM_BADARGS; | ||
390 | } | ||
391 | |||
392 | if (unlock->namelen > DLM_LOCKID_NAME_MAX) { | ||
393 | mlog(ML_ERROR, "Invalid name length in unlock handler!\n"); | ||
394 | return DLM_IVBUFLEN; | ||
395 | } | ||
396 | |||
397 | if (!dlm_grab(dlm)) | ||
398 | return DLM_REJECTED; | ||
399 | |||
400 | mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), | ||
401 | "Domain %s not fully joined!\n", dlm->name); | ||
402 | |||
403 | mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : "none"); | ||
404 | |||
405 | res = dlm_lookup_lockres(dlm, unlock->name, unlock->namelen); | ||
406 | if (!res) { | ||
407 | /* We assume here that a no lock resource simply means | ||
408 | * it was migrated away and destroyed before the other | ||
409 | * node could detect it. */ | ||
410 | mlog(0, "returning DLM_FORWARD -- res no longer exists\n"); | ||
411 | status = DLM_FORWARD; | ||
412 | goto not_found; | ||
413 | } | ||
414 | |||
415 | queue=&res->granted; | ||
416 | found = 0; | ||
417 | spin_lock(&res->spinlock); | ||
418 | if (res->state & DLM_LOCK_RES_RECOVERING) { | ||
419 | spin_unlock(&res->spinlock); | ||
420 | mlog(0, "returning DLM_RECOVERING\n"); | ||
421 | status = DLM_RECOVERING; | ||
422 | goto leave; | ||
423 | } | ||
424 | |||
425 | if (res->state & DLM_LOCK_RES_MIGRATING) { | ||
426 | spin_unlock(&res->spinlock); | ||
427 | mlog(0, "returning DLM_MIGRATING\n"); | ||
428 | status = DLM_MIGRATING; | ||
429 | goto leave; | ||
430 | } | ||
431 | |||
432 | if (res->owner != dlm->node_num) { | ||
433 | spin_unlock(&res->spinlock); | ||
434 | mlog(0, "returning DLM_FORWARD -- not master\n"); | ||
435 | status = DLM_FORWARD; | ||
436 | goto leave; | ||
437 | } | ||
438 | |||
439 | for (i=0; i<3; i++) { | ||
440 | list_for_each(iter, queue) { | ||
441 | lock = list_entry(iter, struct dlm_lock, list); | ||
442 | if (lock->ml.cookie == unlock->cookie && | ||
443 | lock->ml.node == unlock->node_idx) { | ||
444 | dlm_lock_get(lock); | ||
445 | found = 1; | ||
446 | break; | ||
447 | } | ||
448 | } | ||
449 | if (found) | ||
450 | break; | ||
451 | /* scan granted -> converting -> blocked queues */ | ||
452 | queue++; | ||
453 | } | ||
454 | spin_unlock(&res->spinlock); | ||
455 | if (!found) { | ||
456 | status = DLM_IVLOCKID; | ||
457 | goto not_found; | ||
458 | } | ||
459 | |||
460 | /* lock was found on queue */ | ||
461 | lksb = lock->lksb; | ||
462 | /* unlockast only called on originating node */ | ||
463 | if (flags & LKM_PUT_LVB) { | ||
464 | lksb->flags |= DLM_LKSB_PUT_LVB; | ||
465 | memcpy(&lksb->lvb[0], &unlock->lvb[0], DLM_LVB_LEN); | ||
466 | } | ||
467 | |||
468 | /* if this is in-progress, propagate the DLM_FORWARD | ||
469 | * all the way back out */ | ||
470 | status = dlmunlock_master(dlm, res, lock, lksb, flags, &ignore); | ||
471 | if (status == DLM_FORWARD) | ||
472 | mlog(0, "lockres is in progress\n"); | ||
473 | |||
474 | if (flags & LKM_PUT_LVB) | ||
475 | lksb->flags &= ~DLM_LKSB_PUT_LVB; | ||
476 | |||
477 | dlm_lockres_calc_usage(dlm, res); | ||
478 | dlm_kick_thread(dlm, res); | ||
479 | |||
480 | not_found: | ||
481 | if (!found) | ||
482 | mlog(ML_ERROR, "failed to find lock to unlock! " | ||
483 | "cookie=%"MLFu64"\n", | ||
484 | unlock->cookie); | ||
485 | else { | ||
486 | /* send the lksb->status back to the other node */ | ||
487 | status = lksb->status; | ||
488 | dlm_lock_put(lock); | ||
489 | } | ||
490 | |||
491 | leave: | ||
492 | if (res) | ||
493 | dlm_lockres_put(res); | ||
494 | |||
495 | dlm_put(dlm); | ||
496 | |||
497 | return status; | ||
498 | } | ||
499 | |||
500 | |||
501 | static enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm, | ||
502 | struct dlm_lock_resource *res, | ||
503 | struct dlm_lock *lock, | ||
504 | struct dlm_lockstatus *lksb, | ||
505 | int *actions) | ||
506 | { | ||
507 | enum dlm_status status; | ||
508 | |||
509 | if (dlm_lock_on_list(&res->blocked, lock)) { | ||
510 | /* cancel this outright */ | ||
511 | lksb->status = DLM_NORMAL; | ||
512 | status = DLM_NORMAL; | ||
513 | *actions = (DLM_UNLOCK_CALL_AST | | ||
514 | DLM_UNLOCK_REMOVE_LOCK); | ||
515 | } else if (dlm_lock_on_list(&res->converting, lock)) { | ||
516 | /* cancel the request, put back on granted */ | ||
517 | lksb->status = DLM_NORMAL; | ||
518 | status = DLM_NORMAL; | ||
519 | *actions = (DLM_UNLOCK_CALL_AST | | ||
520 | DLM_UNLOCK_REMOVE_LOCK | | ||
521 | DLM_UNLOCK_REGRANT_LOCK | | ||
522 | DLM_UNLOCK_CLEAR_CONVERT_TYPE); | ||
523 | } else if (dlm_lock_on_list(&res->granted, lock)) { | ||
524 | /* too late, already granted. DLM_CANCELGRANT */ | ||
525 | lksb->status = DLM_CANCELGRANT; | ||
526 | status = DLM_NORMAL; | ||
527 | *actions = DLM_UNLOCK_CALL_AST; | ||
528 | } else { | ||
529 | mlog(ML_ERROR, "lock to cancel is not on any list!\n"); | ||
530 | lksb->status = DLM_IVLOCKID; | ||
531 | status = DLM_IVLOCKID; | ||
532 | *actions = 0; | ||
533 | } | ||
534 | return status; | ||
535 | } | ||
536 | |||
537 | static enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm, | ||
538 | struct dlm_lock_resource *res, | ||
539 | struct dlm_lock *lock, | ||
540 | struct dlm_lockstatus *lksb, | ||
541 | int *actions) | ||
542 | { | ||
543 | enum dlm_status status; | ||
544 | |||
545 | /* unlock request */ | ||
546 | if (!dlm_lock_on_list(&res->granted, lock)) { | ||
547 | lksb->status = DLM_DENIED; | ||
548 | status = DLM_DENIED; | ||
549 | dlm_error(status); | ||
550 | *actions = 0; | ||
551 | } else { | ||
552 | /* unlock granted lock */ | ||
553 | lksb->status = DLM_NORMAL; | ||
554 | status = DLM_NORMAL; | ||
555 | *actions = (DLM_UNLOCK_FREE_LOCK | | ||
556 | DLM_UNLOCK_CALL_AST | | ||
557 | DLM_UNLOCK_REMOVE_LOCK); | ||
558 | } | ||
559 | return status; | ||
560 | } | ||
561 | |||
562 | /* there seems to be no point in doing this async | ||
563 | * since (even for the remote case) there is really | ||
564 | * no work to queue up... so just do it and fire the | ||
565 | * unlockast by hand when done... */ | ||
566 | enum dlm_status dlmunlock(struct dlm_ctxt *dlm, struct dlm_lockstatus *lksb, | ||
567 | int flags, dlm_astunlockfunc_t *unlockast, void *data) | ||
568 | { | ||
569 | enum dlm_status status; | ||
570 | struct dlm_lock_resource *res; | ||
571 | struct dlm_lock *lock = NULL; | ||
572 | int call_ast, is_master; | ||
573 | |||
574 | mlog_entry_void(); | ||
575 | |||
576 | if (!lksb) { | ||
577 | dlm_error(DLM_BADARGS); | ||
578 | return DLM_BADARGS; | ||
579 | } | ||
580 | |||
581 | if (flags & ~(LKM_CANCEL | LKM_VALBLK | LKM_INVVALBLK)) { | ||
582 | dlm_error(DLM_BADPARAM); | ||
583 | return DLM_BADPARAM; | ||
584 | } | ||
585 | |||
586 | if ((flags & (LKM_VALBLK | LKM_CANCEL)) == (LKM_VALBLK | LKM_CANCEL)) { | ||
587 | mlog(0, "VALBLK given with CANCEL: ignoring VALBLK\n"); | ||
588 | flags &= ~LKM_VALBLK; | ||
589 | } | ||
590 | |||
591 | if (!lksb->lockid || !lksb->lockid->lockres) { | ||
592 | dlm_error(DLM_BADPARAM); | ||
593 | return DLM_BADPARAM; | ||
594 | } | ||
595 | |||
596 | lock = lksb->lockid; | ||
597 | BUG_ON(!lock); | ||
598 | dlm_lock_get(lock); | ||
599 | |||
600 | res = lock->lockres; | ||
601 | BUG_ON(!res); | ||
602 | dlm_lockres_get(res); | ||
603 | retry: | ||
604 | call_ast = 0; | ||
605 | /* need to retry up here because owner may have changed */ | ||
606 | mlog(0, "lock=%p res=%p\n", lock, res); | ||
607 | |||
608 | spin_lock(&res->spinlock); | ||
609 | is_master = (res->owner == dlm->node_num); | ||
610 | spin_unlock(&res->spinlock); | ||
611 | |||
612 | if (is_master) { | ||
613 | status = dlmunlock_master(dlm, res, lock, lksb, flags, | ||
614 | &call_ast); | ||
615 | mlog(0, "done calling dlmunlock_master: returned %d, " | ||
616 | "call_ast is %d\n", status, call_ast); | ||
617 | } else { | ||
618 | status = dlmunlock_remote(dlm, res, lock, lksb, flags, | ||
619 | &call_ast); | ||
620 | mlog(0, "done calling dlmunlock_remote: returned %d, " | ||
621 | "call_ast is %d\n", status, call_ast); | ||
622 | } | ||
623 | |||
624 | if (status == DLM_RECOVERING || | ||
625 | status == DLM_MIGRATING || | ||
626 | status == DLM_FORWARD) { | ||
627 | /* We want to go away for a tiny bit to allow recovery | ||
628 | * / migration to complete on this resource. I don't | ||
629 | * know of any wait queue we could sleep on as this | ||
630 | * may be happening on another node. Perhaps the | ||
631 | * proper solution is to queue up requests on the | ||
632 | * other end? */ | ||
633 | |||
634 | /* do we want to yield(); ?? */ | ||
635 | msleep(50); | ||
636 | |||
637 | mlog(0, "retrying unlock due to pending recovery/" | ||
638 | "migration/in-progress\n"); | ||
639 | goto retry; | ||
640 | } | ||
641 | |||
642 | if (call_ast) { | ||
643 | mlog(0, "calling unlockast(%p, %d)\n", data, lksb->status); | ||
644 | if (is_master) { | ||
645 | /* it is possible that there is one last bast | ||
646 | * pending. make sure it is flushed, then | ||
647 | * call the unlockast. | ||
648 | * not an issue if this is a mastered remotely, | ||
649 | * since this lock has been removed from the | ||
650 | * lockres queues and cannot be found. */ | ||
651 | dlm_kick_thread(dlm, NULL); | ||
652 | wait_event(dlm->ast_wq, | ||
653 | dlm_lock_basts_flushed(dlm, lock)); | ||
654 | } | ||
655 | (*unlockast)(data, lksb->status); | ||
656 | } | ||
657 | |||
658 | if (status == DLM_NORMAL) { | ||
659 | mlog(0, "kicking the thread\n"); | ||
660 | dlm_kick_thread(dlm, res); | ||
661 | } else | ||
662 | dlm_error(status); | ||
663 | |||
664 | dlm_lockres_calc_usage(dlm, res); | ||
665 | dlm_lockres_put(res); | ||
666 | dlm_lock_put(lock); | ||
667 | |||
668 | mlog(0, "returning status=%d!\n", status); | ||
669 | return status; | ||
670 | } | ||
671 | EXPORT_SYMBOL_GPL(dlmunlock); | ||
672 | |||
diff --git a/fs/ocfs2/dlm/dlmver.c b/fs/ocfs2/dlm/dlmver.c new file mode 100644 index 000000000000..7ef2653f8f41 --- /dev/null +++ b/fs/ocfs2/dlm/dlmver.c | |||
@@ -0,0 +1,42 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmver.c | ||
5 | * | ||
6 | * version string | ||
7 | * | ||
8 | * Copyright (C) 2002, 2005 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | */ | ||
25 | |||
26 | #include <linux/module.h> | ||
27 | #include <linux/kernel.h> | ||
28 | |||
29 | #include "dlmver.h" | ||
30 | |||
31 | #define DLM_BUILD_VERSION "1.3.3" | ||
32 | |||
33 | #define VERSION_STR "OCFS2 DLM " DLM_BUILD_VERSION | ||
34 | |||
35 | void dlm_print_version(void) | ||
36 | { | ||
37 | printk(KERN_INFO "%s\n", VERSION_STR); | ||
38 | } | ||
39 | |||
40 | MODULE_DESCRIPTION(VERSION_STR); | ||
41 | |||
42 | MODULE_VERSION(DLM_BUILD_VERSION); | ||
diff --git a/fs/ocfs2/dlm/dlmver.h b/fs/ocfs2/dlm/dlmver.h new file mode 100644 index 000000000000..f674aee77a16 --- /dev/null +++ b/fs/ocfs2/dlm/dlmver.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * dlmfsver.h | ||
5 | * | ||
6 | * Function prototypes | ||
7 | * | ||
8 | * Copyright (C) 2005 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | */ | ||
25 | |||
26 | #ifndef DLM_VER_H | ||
27 | #define DLM_VER_H | ||
28 | |||
29 | void dlm_print_version(void); | ||
30 | |||
31 | #endif /* DLM_VER_H */ | ||
diff --git a/fs/ocfs2/dlm/userdlm.c b/fs/ocfs2/dlm/userdlm.c new file mode 100644 index 000000000000..e1fdd288796e --- /dev/null +++ b/fs/ocfs2/dlm/userdlm.c | |||
@@ -0,0 +1,658 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * userdlm.c | ||
5 | * | ||
6 | * Code which implements the kernel side of a minimal userspace | ||
7 | * interface to our DLM. | ||
8 | * | ||
9 | * Many of the functions here are pared down versions of dlmglue.c | ||
10 | * functions. | ||
11 | * | ||
12 | * Copyright (C) 2003, 2004 Oracle. All rights reserved. | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or | ||
15 | * modify it under the terms of the GNU General Public | ||
16 | * License as published by the Free Software Foundation; either | ||
17 | * version 2 of the License, or (at your option) any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
22 | * General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public | ||
25 | * License along with this program; if not, write to the | ||
26 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
27 | * Boston, MA 021110-1307, USA. | ||
28 | */ | ||
29 | |||
30 | #include <asm/signal.h> | ||
31 | |||
32 | #include <linux/module.h> | ||
33 | #include <linux/fs.h> | ||
34 | #include <linux/types.h> | ||
35 | #include <linux/crc32.h> | ||
36 | |||
37 | |||
38 | #include "cluster/nodemanager.h" | ||
39 | #include "cluster/heartbeat.h" | ||
40 | #include "cluster/tcp.h" | ||
41 | |||
42 | #include "dlmapi.h" | ||
43 | |||
44 | #include "userdlm.h" | ||
45 | |||
46 | #define MLOG_MASK_PREFIX ML_DLMFS | ||
47 | #include "cluster/masklog.h" | ||
48 | |||
49 | static inline int user_check_wait_flag(struct user_lock_res *lockres, | ||
50 | int flag) | ||
51 | { | ||
52 | int ret; | ||
53 | |||
54 | spin_lock(&lockres->l_lock); | ||
55 | ret = lockres->l_flags & flag; | ||
56 | spin_unlock(&lockres->l_lock); | ||
57 | |||
58 | return ret; | ||
59 | } | ||
60 | |||
61 | static inline void user_wait_on_busy_lock(struct user_lock_res *lockres) | ||
62 | |||
63 | { | ||
64 | wait_event(lockres->l_event, | ||
65 | !user_check_wait_flag(lockres, USER_LOCK_BUSY)); | ||
66 | } | ||
67 | |||
68 | static inline void user_wait_on_blocked_lock(struct user_lock_res *lockres) | ||
69 | |||
70 | { | ||
71 | wait_event(lockres->l_event, | ||
72 | !user_check_wait_flag(lockres, USER_LOCK_BLOCKED)); | ||
73 | } | ||
74 | |||
75 | /* I heart container_of... */ | ||
76 | static inline struct dlm_ctxt * | ||
77 | dlm_ctxt_from_user_lockres(struct user_lock_res *lockres) | ||
78 | { | ||
79 | struct dlmfs_inode_private *ip; | ||
80 | |||
81 | ip = container_of(lockres, | ||
82 | struct dlmfs_inode_private, | ||
83 | ip_lockres); | ||
84 | return ip->ip_dlm; | ||
85 | } | ||
86 | |||
87 | static struct inode * | ||
88 | user_dlm_inode_from_user_lockres(struct user_lock_res *lockres) | ||
89 | { | ||
90 | struct dlmfs_inode_private *ip; | ||
91 | |||
92 | ip = container_of(lockres, | ||
93 | struct dlmfs_inode_private, | ||
94 | ip_lockres); | ||
95 | return &ip->ip_vfs_inode; | ||
96 | } | ||
97 | |||
98 | static inline void user_recover_from_dlm_error(struct user_lock_res *lockres) | ||
99 | { | ||
100 | spin_lock(&lockres->l_lock); | ||
101 | lockres->l_flags &= ~USER_LOCK_BUSY; | ||
102 | spin_unlock(&lockres->l_lock); | ||
103 | } | ||
104 | |||
105 | #define user_log_dlm_error(_func, _stat, _lockres) do { \ | ||
106 | mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on " \ | ||
107 | "resource %s: %s\n", dlm_errname(_stat), _func, \ | ||
108 | _lockres->l_name, dlm_errmsg(_stat)); \ | ||
109 | } while (0) | ||
110 | |||
111 | /* WARNING: This function lives in a world where the only three lock | ||
112 | * levels are EX, PR, and NL. It *will* have to be adjusted when more | ||
113 | * lock types are added. */ | ||
114 | static inline int user_highest_compat_lock_level(int level) | ||
115 | { | ||
116 | int new_level = LKM_EXMODE; | ||
117 | |||
118 | if (level == LKM_EXMODE) | ||
119 | new_level = LKM_NLMODE; | ||
120 | else if (level == LKM_PRMODE) | ||
121 | new_level = LKM_PRMODE; | ||
122 | return new_level; | ||
123 | } | ||
124 | |||
125 | static void user_ast(void *opaque) | ||
126 | { | ||
127 | struct user_lock_res *lockres = opaque; | ||
128 | struct dlm_lockstatus *lksb; | ||
129 | |||
130 | mlog(0, "AST fired for lockres %s\n", lockres->l_name); | ||
131 | |||
132 | spin_lock(&lockres->l_lock); | ||
133 | |||
134 | lksb = &(lockres->l_lksb); | ||
135 | if (lksb->status != DLM_NORMAL) { | ||
136 | mlog(ML_ERROR, "lksb status value of %u on lockres %s\n", | ||
137 | lksb->status, lockres->l_name); | ||
138 | spin_unlock(&lockres->l_lock); | ||
139 | return; | ||
140 | } | ||
141 | |||
142 | /* we're downconverting. */ | ||
143 | if (lockres->l_requested < lockres->l_level) { | ||
144 | if (lockres->l_requested <= | ||
145 | user_highest_compat_lock_level(lockres->l_blocking)) { | ||
146 | lockres->l_blocking = LKM_NLMODE; | ||
147 | lockres->l_flags &= ~USER_LOCK_BLOCKED; | ||
148 | } | ||
149 | } | ||
150 | |||
151 | lockres->l_level = lockres->l_requested; | ||
152 | lockres->l_requested = LKM_IVMODE; | ||
153 | lockres->l_flags |= USER_LOCK_ATTACHED; | ||
154 | lockres->l_flags &= ~USER_LOCK_BUSY; | ||
155 | |||
156 | spin_unlock(&lockres->l_lock); | ||
157 | |||
158 | wake_up(&lockres->l_event); | ||
159 | } | ||
160 | |||
161 | static inline void user_dlm_grab_inode_ref(struct user_lock_res *lockres) | ||
162 | { | ||
163 | struct inode *inode; | ||
164 | inode = user_dlm_inode_from_user_lockres(lockres); | ||
165 | if (!igrab(inode)) | ||
166 | BUG(); | ||
167 | } | ||
168 | |||
169 | static void user_dlm_unblock_lock(void *opaque); | ||
170 | |||
171 | static void __user_dlm_queue_lockres(struct user_lock_res *lockres) | ||
172 | { | ||
173 | if (!(lockres->l_flags & USER_LOCK_QUEUED)) { | ||
174 | user_dlm_grab_inode_ref(lockres); | ||
175 | |||
176 | INIT_WORK(&lockres->l_work, user_dlm_unblock_lock, | ||
177 | lockres); | ||
178 | |||
179 | queue_work(user_dlm_worker, &lockres->l_work); | ||
180 | lockres->l_flags |= USER_LOCK_QUEUED; | ||
181 | } | ||
182 | } | ||
183 | |||
184 | static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres) | ||
185 | { | ||
186 | int queue = 0; | ||
187 | |||
188 | if (!(lockres->l_flags & USER_LOCK_BLOCKED)) | ||
189 | return; | ||
190 | |||
191 | switch (lockres->l_blocking) { | ||
192 | case LKM_EXMODE: | ||
193 | if (!lockres->l_ex_holders && !lockres->l_ro_holders) | ||
194 | queue = 1; | ||
195 | break; | ||
196 | case LKM_PRMODE: | ||
197 | if (!lockres->l_ex_holders) | ||
198 | queue = 1; | ||
199 | break; | ||
200 | default: | ||
201 | BUG(); | ||
202 | } | ||
203 | |||
204 | if (queue) | ||
205 | __user_dlm_queue_lockres(lockres); | ||
206 | } | ||
207 | |||
208 | static void user_bast(void *opaque, int level) | ||
209 | { | ||
210 | struct user_lock_res *lockres = opaque; | ||
211 | |||
212 | mlog(0, "Blocking AST fired for lockres %s. Blocking level %d\n", | ||
213 | lockres->l_name, level); | ||
214 | |||
215 | spin_lock(&lockres->l_lock); | ||
216 | lockres->l_flags |= USER_LOCK_BLOCKED; | ||
217 | if (level > lockres->l_blocking) | ||
218 | lockres->l_blocking = level; | ||
219 | |||
220 | __user_dlm_queue_lockres(lockres); | ||
221 | spin_unlock(&lockres->l_lock); | ||
222 | |||
223 | wake_up(&lockres->l_event); | ||
224 | } | ||
225 | |||
226 | static void user_unlock_ast(void *opaque, enum dlm_status status) | ||
227 | { | ||
228 | struct user_lock_res *lockres = opaque; | ||
229 | |||
230 | mlog(0, "UNLOCK AST called on lock %s\n", lockres->l_name); | ||
231 | |||
232 | if (status != DLM_NORMAL) | ||
233 | mlog(ML_ERROR, "Dlm returns status %d\n", status); | ||
234 | |||
235 | spin_lock(&lockres->l_lock); | ||
236 | if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) | ||
237 | lockres->l_level = LKM_IVMODE; | ||
238 | else { | ||
239 | lockres->l_requested = LKM_IVMODE; /* cancel an | ||
240 | * upconvert | ||
241 | * request. */ | ||
242 | lockres->l_flags &= ~USER_LOCK_IN_CANCEL; | ||
243 | /* we want the unblock thread to look at it again | ||
244 | * now. */ | ||
245 | __user_dlm_queue_lockres(lockres); | ||
246 | } | ||
247 | |||
248 | lockres->l_flags &= ~USER_LOCK_BUSY; | ||
249 | spin_unlock(&lockres->l_lock); | ||
250 | |||
251 | wake_up(&lockres->l_event); | ||
252 | } | ||
253 | |||
254 | static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres) | ||
255 | { | ||
256 | struct inode *inode; | ||
257 | inode = user_dlm_inode_from_user_lockres(lockres); | ||
258 | iput(inode); | ||
259 | } | ||
260 | |||
261 | static void user_dlm_unblock_lock(void *opaque) | ||
262 | { | ||
263 | int new_level, status; | ||
264 | struct user_lock_res *lockres = (struct user_lock_res *) opaque; | ||
265 | struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); | ||
266 | |||
267 | mlog(0, "processing lockres %s\n", lockres->l_name); | ||
268 | |||
269 | spin_lock(&lockres->l_lock); | ||
270 | |||
271 | BUG_ON(!(lockres->l_flags & USER_LOCK_BLOCKED)); | ||
272 | BUG_ON(!(lockres->l_flags & USER_LOCK_QUEUED)); | ||
273 | |||
274 | /* notice that we don't clear USER_LOCK_BLOCKED here. That's | ||
275 | * for user_ast to do. */ | ||
276 | lockres->l_flags &= ~USER_LOCK_QUEUED; | ||
277 | |||
278 | if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) { | ||
279 | mlog(0, "lock is in teardown so we do nothing\n"); | ||
280 | spin_unlock(&lockres->l_lock); | ||
281 | goto drop_ref; | ||
282 | } | ||
283 | |||
284 | if (lockres->l_flags & USER_LOCK_BUSY) { | ||
285 | mlog(0, "BUSY flag detected...\n"); | ||
286 | if (lockres->l_flags & USER_LOCK_IN_CANCEL) { | ||
287 | spin_unlock(&lockres->l_lock); | ||
288 | goto drop_ref; | ||
289 | } | ||
290 | |||
291 | lockres->l_flags |= USER_LOCK_IN_CANCEL; | ||
292 | spin_unlock(&lockres->l_lock); | ||
293 | |||
294 | status = dlmunlock(dlm, | ||
295 | &lockres->l_lksb, | ||
296 | LKM_CANCEL, | ||
297 | user_unlock_ast, | ||
298 | lockres); | ||
299 | if (status == DLM_CANCELGRANT) { | ||
300 | /* If we got this, then the ast was fired | ||
301 | * before we could cancel. We cleanup our | ||
302 | * state, and restart the function. */ | ||
303 | spin_lock(&lockres->l_lock); | ||
304 | lockres->l_flags &= ~USER_LOCK_IN_CANCEL; | ||
305 | spin_unlock(&lockres->l_lock); | ||
306 | } else if (status != DLM_NORMAL) | ||
307 | user_log_dlm_error("dlmunlock", status, lockres); | ||
308 | goto drop_ref; | ||
309 | } | ||
310 | |||
311 | /* If there are still incompat holders, we can exit safely | ||
312 | * without worrying about re-queueing this lock as that will | ||
313 | * happen on the last call to user_cluster_unlock. */ | ||
314 | if ((lockres->l_blocking == LKM_EXMODE) | ||
315 | && (lockres->l_ex_holders || lockres->l_ro_holders)) { | ||
316 | spin_unlock(&lockres->l_lock); | ||
317 | mlog(0, "can't downconvert for ex: ro = %u, ex = %u\n", | ||
318 | lockres->l_ro_holders, lockres->l_ex_holders); | ||
319 | goto drop_ref; | ||
320 | } | ||
321 | |||
322 | if ((lockres->l_blocking == LKM_PRMODE) | ||
323 | && lockres->l_ex_holders) { | ||
324 | spin_unlock(&lockres->l_lock); | ||
325 | mlog(0, "can't downconvert for pr: ex = %u\n", | ||
326 | lockres->l_ex_holders); | ||
327 | goto drop_ref; | ||
328 | } | ||
329 | |||
330 | /* yay, we can downconvert now. */ | ||
331 | new_level = user_highest_compat_lock_level(lockres->l_blocking); | ||
332 | lockres->l_requested = new_level; | ||
333 | lockres->l_flags |= USER_LOCK_BUSY; | ||
334 | mlog(0, "Downconvert lock from %d to %d\n", | ||
335 | lockres->l_level, new_level); | ||
336 | spin_unlock(&lockres->l_lock); | ||
337 | |||
338 | /* need lock downconvert request now... */ | ||
339 | status = dlmlock(dlm, | ||
340 | new_level, | ||
341 | &lockres->l_lksb, | ||
342 | LKM_CONVERT|LKM_VALBLK, | ||
343 | lockres->l_name, | ||
344 | user_ast, | ||
345 | lockres, | ||
346 | user_bast); | ||
347 | if (status != DLM_NORMAL) { | ||
348 | user_log_dlm_error("dlmlock", status, lockres); | ||
349 | user_recover_from_dlm_error(lockres); | ||
350 | } | ||
351 | |||
352 | drop_ref: | ||
353 | user_dlm_drop_inode_ref(lockres); | ||
354 | } | ||
355 | |||
356 | static inline void user_dlm_inc_holders(struct user_lock_res *lockres, | ||
357 | int level) | ||
358 | { | ||
359 | switch(level) { | ||
360 | case LKM_EXMODE: | ||
361 | lockres->l_ex_holders++; | ||
362 | break; | ||
363 | case LKM_PRMODE: | ||
364 | lockres->l_ro_holders++; | ||
365 | break; | ||
366 | default: | ||
367 | BUG(); | ||
368 | } | ||
369 | } | ||
370 | |||
371 | /* predict what lock level we'll be dropping down to on behalf | ||
372 | * of another node, and return true if the currently wanted | ||
373 | * level will be compatible with it. */ | ||
374 | static inline int | ||
375 | user_may_continue_on_blocked_lock(struct user_lock_res *lockres, | ||
376 | int wanted) | ||
377 | { | ||
378 | BUG_ON(!(lockres->l_flags & USER_LOCK_BLOCKED)); | ||
379 | |||
380 | return wanted <= user_highest_compat_lock_level(lockres->l_blocking); | ||
381 | } | ||
382 | |||
383 | int user_dlm_cluster_lock(struct user_lock_res *lockres, | ||
384 | int level, | ||
385 | int lkm_flags) | ||
386 | { | ||
387 | int status, local_flags; | ||
388 | struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); | ||
389 | |||
390 | if (level != LKM_EXMODE && | ||
391 | level != LKM_PRMODE) { | ||
392 | mlog(ML_ERROR, "lockres %s: invalid request!\n", | ||
393 | lockres->l_name); | ||
394 | status = -EINVAL; | ||
395 | goto bail; | ||
396 | } | ||
397 | |||
398 | mlog(0, "lockres %s: asking for %s lock, passed flags = 0x%x\n", | ||
399 | lockres->l_name, | ||
400 | (level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE", | ||
401 | lkm_flags); | ||
402 | |||
403 | again: | ||
404 | if (signal_pending(current)) { | ||
405 | status = -ERESTARTSYS; | ||
406 | goto bail; | ||
407 | } | ||
408 | |||
409 | spin_lock(&lockres->l_lock); | ||
410 | |||
411 | /* We only compare against the currently granted level | ||
412 | * here. If the lock is blocked waiting on a downconvert, | ||
413 | * we'll get caught below. */ | ||
414 | if ((lockres->l_flags & USER_LOCK_BUSY) && | ||
415 | (level > lockres->l_level)) { | ||
416 | /* is someone sitting in dlm_lock? If so, wait on | ||
417 | * them. */ | ||
418 | spin_unlock(&lockres->l_lock); | ||
419 | |||
420 | user_wait_on_busy_lock(lockres); | ||
421 | goto again; | ||
422 | } | ||
423 | |||
424 | if ((lockres->l_flags & USER_LOCK_BLOCKED) && | ||
425 | (!user_may_continue_on_blocked_lock(lockres, level))) { | ||
426 | /* is the lock is currently blocked on behalf of | ||
427 | * another node */ | ||
428 | spin_unlock(&lockres->l_lock); | ||
429 | |||
430 | user_wait_on_blocked_lock(lockres); | ||
431 | goto again; | ||
432 | } | ||
433 | |||
434 | if (level > lockres->l_level) { | ||
435 | local_flags = lkm_flags | LKM_VALBLK; | ||
436 | if (lockres->l_level != LKM_IVMODE) | ||
437 | local_flags |= LKM_CONVERT; | ||
438 | |||
439 | lockres->l_requested = level; | ||
440 | lockres->l_flags |= USER_LOCK_BUSY; | ||
441 | spin_unlock(&lockres->l_lock); | ||
442 | |||
443 | BUG_ON(level == LKM_IVMODE); | ||
444 | BUG_ON(level == LKM_NLMODE); | ||
445 | |||
446 | mlog(0, "lock %s, get lock from %d to level = %d\n", | ||
447 | lockres->l_name, lockres->l_level, level); | ||
448 | |||
449 | /* call dlm_lock to upgrade lock now */ | ||
450 | status = dlmlock(dlm, | ||
451 | level, | ||
452 | &lockres->l_lksb, | ||
453 | local_flags, | ||
454 | lockres->l_name, | ||
455 | user_ast, | ||
456 | lockres, | ||
457 | user_bast); | ||
458 | if (status != DLM_NORMAL) { | ||
459 | if ((lkm_flags & LKM_NOQUEUE) && | ||
460 | (status == DLM_NOTQUEUED)) | ||
461 | status = -EAGAIN; | ||
462 | else { | ||
463 | user_log_dlm_error("dlmlock", status, lockres); | ||
464 | status = -EINVAL; | ||
465 | } | ||
466 | user_recover_from_dlm_error(lockres); | ||
467 | goto bail; | ||
468 | } | ||
469 | |||
470 | mlog(0, "lock %s, successfull return from dlmlock\n", | ||
471 | lockres->l_name); | ||
472 | |||
473 | user_wait_on_busy_lock(lockres); | ||
474 | goto again; | ||
475 | } | ||
476 | |||
477 | user_dlm_inc_holders(lockres, level); | ||
478 | spin_unlock(&lockres->l_lock); | ||
479 | |||
480 | mlog(0, "lockres %s: Got %s lock!\n", lockres->l_name, | ||
481 | (level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE"); | ||
482 | |||
483 | status = 0; | ||
484 | bail: | ||
485 | return status; | ||
486 | } | ||
487 | |||
488 | static inline void user_dlm_dec_holders(struct user_lock_res *lockres, | ||
489 | int level) | ||
490 | { | ||
491 | switch(level) { | ||
492 | case LKM_EXMODE: | ||
493 | BUG_ON(!lockres->l_ex_holders); | ||
494 | lockres->l_ex_holders--; | ||
495 | break; | ||
496 | case LKM_PRMODE: | ||
497 | BUG_ON(!lockres->l_ro_holders); | ||
498 | lockres->l_ro_holders--; | ||
499 | break; | ||
500 | default: | ||
501 | BUG(); | ||
502 | } | ||
503 | } | ||
504 | |||
505 | void user_dlm_cluster_unlock(struct user_lock_res *lockres, | ||
506 | int level) | ||
507 | { | ||
508 | if (level != LKM_EXMODE && | ||
509 | level != LKM_PRMODE) { | ||
510 | mlog(ML_ERROR, "lockres %s: invalid request!\n", lockres->l_name); | ||
511 | return; | ||
512 | } | ||
513 | |||
514 | mlog(0, "lockres %s: dropping %s lock\n", lockres->l_name, | ||
515 | (level == LKM_EXMODE) ? "LKM_EXMODE" : "LKM_PRMODE"); | ||
516 | |||
517 | spin_lock(&lockres->l_lock); | ||
518 | user_dlm_dec_holders(lockres, level); | ||
519 | __user_dlm_cond_queue_lockres(lockres); | ||
520 | spin_unlock(&lockres->l_lock); | ||
521 | } | ||
522 | |||
523 | void user_dlm_write_lvb(struct inode *inode, | ||
524 | const char *val, | ||
525 | unsigned int len) | ||
526 | { | ||
527 | struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres; | ||
528 | char *lvb = lockres->l_lksb.lvb; | ||
529 | |||
530 | BUG_ON(len > DLM_LVB_LEN); | ||
531 | |||
532 | spin_lock(&lockres->l_lock); | ||
533 | |||
534 | BUG_ON(lockres->l_level < LKM_EXMODE); | ||
535 | memcpy(lvb, val, len); | ||
536 | |||
537 | spin_unlock(&lockres->l_lock); | ||
538 | } | ||
539 | |||
540 | void user_dlm_read_lvb(struct inode *inode, | ||
541 | char *val, | ||
542 | unsigned int len) | ||
543 | { | ||
544 | struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres; | ||
545 | char *lvb = lockres->l_lksb.lvb; | ||
546 | |||
547 | BUG_ON(len > DLM_LVB_LEN); | ||
548 | |||
549 | spin_lock(&lockres->l_lock); | ||
550 | |||
551 | BUG_ON(lockres->l_level < LKM_PRMODE); | ||
552 | memcpy(val, lvb, len); | ||
553 | |||
554 | spin_unlock(&lockres->l_lock); | ||
555 | } | ||
556 | |||
557 | void user_dlm_lock_res_init(struct user_lock_res *lockres, | ||
558 | struct dentry *dentry) | ||
559 | { | ||
560 | memset(lockres, 0, sizeof(*lockres)); | ||
561 | |||
562 | spin_lock_init(&lockres->l_lock); | ||
563 | init_waitqueue_head(&lockres->l_event); | ||
564 | lockres->l_level = LKM_IVMODE; | ||
565 | lockres->l_requested = LKM_IVMODE; | ||
566 | lockres->l_blocking = LKM_IVMODE; | ||
567 | |||
568 | /* should have been checked before getting here. */ | ||
569 | BUG_ON(dentry->d_name.len >= USER_DLM_LOCK_ID_MAX_LEN); | ||
570 | |||
571 | memcpy(lockres->l_name, | ||
572 | dentry->d_name.name, | ||
573 | dentry->d_name.len); | ||
574 | } | ||
575 | |||
576 | int user_dlm_destroy_lock(struct user_lock_res *lockres) | ||
577 | { | ||
578 | int status = -EBUSY; | ||
579 | struct dlm_ctxt *dlm = dlm_ctxt_from_user_lockres(lockres); | ||
580 | |||
581 | mlog(0, "asked to destroy %s\n", lockres->l_name); | ||
582 | |||
583 | spin_lock(&lockres->l_lock); | ||
584 | while (lockres->l_flags & USER_LOCK_BUSY) { | ||
585 | spin_unlock(&lockres->l_lock); | ||
586 | |||
587 | mlog(0, "lock %s is busy\n", lockres->l_name); | ||
588 | |||
589 | user_wait_on_busy_lock(lockres); | ||
590 | |||
591 | spin_lock(&lockres->l_lock); | ||
592 | } | ||
593 | |||
594 | if (lockres->l_ro_holders || lockres->l_ex_holders) { | ||
595 | spin_unlock(&lockres->l_lock); | ||
596 | mlog(0, "lock %s has holders\n", lockres->l_name); | ||
597 | goto bail; | ||
598 | } | ||
599 | |||
600 | status = 0; | ||
601 | if (!(lockres->l_flags & USER_LOCK_ATTACHED)) { | ||
602 | spin_unlock(&lockres->l_lock); | ||
603 | mlog(0, "lock %s is not attached\n", lockres->l_name); | ||
604 | goto bail; | ||
605 | } | ||
606 | |||
607 | lockres->l_flags &= ~USER_LOCK_ATTACHED; | ||
608 | lockres->l_flags |= USER_LOCK_BUSY; | ||
609 | lockres->l_flags |= USER_LOCK_IN_TEARDOWN; | ||
610 | spin_unlock(&lockres->l_lock); | ||
611 | |||
612 | mlog(0, "unlocking lockres %s\n", lockres->l_name); | ||
613 | status = dlmunlock(dlm, | ||
614 | &lockres->l_lksb, | ||
615 | LKM_VALBLK, | ||
616 | user_unlock_ast, | ||
617 | lockres); | ||
618 | if (status != DLM_NORMAL) { | ||
619 | user_log_dlm_error("dlmunlock", status, lockres); | ||
620 | status = -EINVAL; | ||
621 | goto bail; | ||
622 | } | ||
623 | |||
624 | user_wait_on_busy_lock(lockres); | ||
625 | |||
626 | status = 0; | ||
627 | bail: | ||
628 | return status; | ||
629 | } | ||
630 | |||
631 | struct dlm_ctxt *user_dlm_register_context(struct qstr *name) | ||
632 | { | ||
633 | struct dlm_ctxt *dlm; | ||
634 | u32 dlm_key; | ||
635 | char *domain; | ||
636 | |||
637 | domain = kmalloc(name->len + 1, GFP_KERNEL); | ||
638 | if (!domain) { | ||
639 | mlog_errno(-ENOMEM); | ||
640 | return ERR_PTR(-ENOMEM); | ||
641 | } | ||
642 | |||
643 | dlm_key = crc32_le(0, name->name, name->len); | ||
644 | |||
645 | snprintf(domain, name->len + 1, "%.*s", name->len, name->name); | ||
646 | |||
647 | dlm = dlm_register_domain(domain, dlm_key); | ||
648 | if (IS_ERR(dlm)) | ||
649 | mlog_errno(PTR_ERR(dlm)); | ||
650 | |||
651 | kfree(domain); | ||
652 | return dlm; | ||
653 | } | ||
654 | |||
655 | void user_dlm_unregister_context(struct dlm_ctxt *dlm) | ||
656 | { | ||
657 | dlm_unregister_domain(dlm); | ||
658 | } | ||
diff --git a/fs/ocfs2/dlm/userdlm.h b/fs/ocfs2/dlm/userdlm.h new file mode 100644 index 000000000000..04178bc40b76 --- /dev/null +++ b/fs/ocfs2/dlm/userdlm.h | |||
@@ -0,0 +1,111 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * userdlm.h | ||
5 | * | ||
6 | * Userspace dlm defines | ||
7 | * | ||
8 | * Copyright (C) 2002, 2004 Oracle. All rights reserved. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public | ||
12 | * License as published by the Free Software Foundation; either | ||
13 | * version 2 of the License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public | ||
21 | * License along with this program; if not, write to the | ||
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 021110-1307, USA. | ||
24 | */ | ||
25 | |||
26 | |||
27 | #ifndef USERDLM_H | ||
28 | #define USERDLM_H | ||
29 | |||
30 | #include <linux/module.h> | ||
31 | #include <linux/fs.h> | ||
32 | #include <linux/types.h> | ||
33 | #include <linux/workqueue.h> | ||
34 | |||
35 | /* user_lock_res->l_flags flags. */ | ||
36 | #define USER_LOCK_ATTACHED (0x00000001) /* have we initialized | ||
37 | * the lvb */ | ||
38 | #define USER_LOCK_BUSY (0x00000002) /* we are currently in | ||
39 | * dlm_lock */ | ||
40 | #define USER_LOCK_BLOCKED (0x00000004) /* blocked waiting to | ||
41 | * downconvert*/ | ||
42 | #define USER_LOCK_IN_TEARDOWN (0x00000008) /* we're currently | ||
43 | * destroying this | ||
44 | * lock. */ | ||
45 | #define USER_LOCK_QUEUED (0x00000010) /* lock is on the | ||
46 | * workqueue */ | ||
47 | #define USER_LOCK_IN_CANCEL (0x00000020) | ||
48 | |||
49 | struct user_lock_res { | ||
50 | spinlock_t l_lock; | ||
51 | |||
52 | int l_flags; | ||
53 | |||
54 | #define USER_DLM_LOCK_ID_MAX_LEN 32 | ||
55 | char l_name[USER_DLM_LOCK_ID_MAX_LEN]; | ||
56 | int l_level; | ||
57 | unsigned int l_ro_holders; | ||
58 | unsigned int l_ex_holders; | ||
59 | struct dlm_lockstatus l_lksb; | ||
60 | |||
61 | int l_requested; | ||
62 | int l_blocking; | ||
63 | |||
64 | wait_queue_head_t l_event; | ||
65 | |||
66 | struct work_struct l_work; | ||
67 | }; | ||
68 | |||
69 | extern struct workqueue_struct *user_dlm_worker; | ||
70 | |||
71 | void user_dlm_lock_res_init(struct user_lock_res *lockres, | ||
72 | struct dentry *dentry); | ||
73 | int user_dlm_destroy_lock(struct user_lock_res *lockres); | ||
74 | int user_dlm_cluster_lock(struct user_lock_res *lockres, | ||
75 | int level, | ||
76 | int lkm_flags); | ||
77 | void user_dlm_cluster_unlock(struct user_lock_res *lockres, | ||
78 | int level); | ||
79 | void user_dlm_write_lvb(struct inode *inode, | ||
80 | const char *val, | ||
81 | unsigned int len); | ||
82 | void user_dlm_read_lvb(struct inode *inode, | ||
83 | char *val, | ||
84 | unsigned int len); | ||
85 | struct dlm_ctxt *user_dlm_register_context(struct qstr *name); | ||
86 | void user_dlm_unregister_context(struct dlm_ctxt *dlm); | ||
87 | |||
88 | struct dlmfs_inode_private { | ||
89 | struct dlm_ctxt *ip_dlm; | ||
90 | |||
91 | struct user_lock_res ip_lockres; /* unused for directories. */ | ||
92 | struct inode *ip_parent; | ||
93 | |||
94 | struct inode ip_vfs_inode; | ||
95 | }; | ||
96 | |||
97 | static inline struct dlmfs_inode_private * | ||
98 | DLMFS_I(struct inode *inode) | ||
99 | { | ||
100 | return container_of(inode, | ||
101 | struct dlmfs_inode_private, | ||
102 | ip_vfs_inode); | ||
103 | } | ||
104 | |||
105 | struct dlmfs_filp_private { | ||
106 | int fp_lock_level; | ||
107 | }; | ||
108 | |||
109 | #define DLMFS_MAGIC 0x76a9f425 | ||
110 | |||
111 | #endif /* USERDLM_H */ | ||