aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dlm
diff options
context:
space:
mode:
Diffstat (limited to 'fs/dlm')
-rw-r--r--fs/dlm/Kconfig30
-rw-r--r--fs/dlm/Makefile21
-rw-r--r--fs/dlm/ast.c167
-rw-r--r--fs/dlm/ast.h26
-rw-r--r--fs/dlm/config.c787
-rw-r--r--fs/dlm/config.h42
-rw-r--r--fs/dlm/debug_fs.c296
-rw-r--r--fs/dlm/device.c1093
-rw-r--r--fs/dlm/dir.c423
-rw-r--r--fs/dlm/dir.h30
-rw-r--r--fs/dlm/dlm_internal.h493
-rw-r--r--fs/dlm/lock.c3525
-rw-r--r--fs/dlm/lock.h50
-rw-r--r--fs/dlm/lockspace.c665
-rw-r--r--fs/dlm/lockspace.h24
-rw-r--r--fs/dlm/lowcomms.c1218
-rw-r--r--fs/dlm/lowcomms.h25
-rw-r--r--fs/dlm/lvb_table.h18
-rw-r--r--fs/dlm/main.c89
-rw-r--r--fs/dlm/member.c313
-rw-r--r--fs/dlm/member.h24
-rw-r--r--fs/dlm/memory.c106
-rw-r--r--fs/dlm/memory.h29
-rw-r--r--fs/dlm/midcomms.c140
-rw-r--r--fs/dlm/midcomms.h21
-rw-r--r--fs/dlm/rcom.c457
-rw-r--r--fs/dlm/rcom.h24
-rw-r--r--fs/dlm/recover.c762
-rw-r--r--fs/dlm/recover.h34
-rw-r--r--fs/dlm/recoverd.c285
-rw-r--r--fs/dlm/recoverd.h24
-rw-r--r--fs/dlm/requestqueue.c184
-rw-r--r--fs/dlm/requestqueue.h22
-rw-r--r--fs/dlm/util.c161
-rw-r--r--fs/dlm/util.h22
35 files changed, 11630 insertions, 0 deletions
diff --git a/fs/dlm/Kconfig b/fs/dlm/Kconfig
new file mode 100644
index 000000000000..d01f735e6e06
--- /dev/null
+++ b/fs/dlm/Kconfig
@@ -0,0 +1,30 @@
1menu "Distributed Lock Manager"
2 depends on INET && EXPERIMENTAL
3
4config DLM
5 tristate "Distributed Lock Manager (DLM)"
6 depends on SYSFS
7 depends on IPV6 || IPV6=n
8 select IP_SCTP
9 select CONFIGFS_FS
10 help
11 A general purpose distributed lock manager for kernel or userspace
12 applications.
13
14config DLM_DEVICE
15 tristate "DLM device for userspace access"
16 depends on DLM
17 help
18 This module creates a misc device through which the dlm lockspace
19 and locking functions become available to userspace applications
20 (usually through the libdlm library).
21
22config DLM_DEBUG
23 bool "DLM debugging"
24 depends on DLM
25 help
26 Under the debugfs mount point, the name of each lockspace will
27 appear as a file in the "dlm" directory. The output is the
28 list of resource and locks the local node knows about.
29
30endmenu
diff --git a/fs/dlm/Makefile b/fs/dlm/Makefile
new file mode 100644
index 000000000000..1e6232e7d8e5
--- /dev/null
+++ b/fs/dlm/Makefile
@@ -0,0 +1,21 @@
1obj-$(CONFIG_DLM) += dlm.o
2obj-$(CONFIG_DLM_DEVICE) += dlm_device.o
3
4dlm-y := ast.o \
5 config.o \
6 dir.o \
7 lock.o \
8 lockspace.o \
9 lowcomms.o \
10 main.o \
11 member.o \
12 memory.o \
13 midcomms.o \
14 rcom.o \
15 recover.o \
16 recoverd.o \
17 requestqueue.o \
18 util.o
19dlm-$(CONFIG_DLM_DEBUG) += debug_fs.o
20
21dlm_device-y := device.o
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
new file mode 100644
index 000000000000..57bdf09b520a
--- /dev/null
+++ b/fs/dlm/ast.c
@@ -0,0 +1,167 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "dlm_internal.h"
15#include "lock.h"
16#include "ast.h"
17
18#define WAKE_ASTS 0
19
20static struct list_head ast_queue;
21static spinlock_t ast_queue_lock;
22static struct task_struct * astd_task;
23static unsigned long astd_wakeflags;
24static struct mutex astd_running;
25
26
27void dlm_del_ast(struct dlm_lkb *lkb)
28{
29 spin_lock(&ast_queue_lock);
30 if (lkb->lkb_ast_type & (AST_COMP | AST_BAST))
31 list_del(&lkb->lkb_astqueue);
32 spin_unlock(&ast_queue_lock);
33}
34
35void dlm_add_ast(struct dlm_lkb *lkb, int type)
36{
37 spin_lock(&ast_queue_lock);
38 if (!(lkb->lkb_ast_type & (AST_COMP | AST_BAST))) {
39 kref_get(&lkb->lkb_ref);
40 list_add_tail(&lkb->lkb_astqueue, &ast_queue);
41 }
42 lkb->lkb_ast_type |= type;
43 spin_unlock(&ast_queue_lock);
44
45 set_bit(WAKE_ASTS, &astd_wakeflags);
46 wake_up_process(astd_task);
47}
48
49static void process_asts(void)
50{
51 struct dlm_ls *ls = NULL;
52 struct dlm_rsb *r = NULL;
53 struct dlm_lkb *lkb;
54 void (*cast) (long param);
55 void (*bast) (long param, int mode);
56 int type = 0, found, bmode;
57
58 for (;;) {
59 found = 0;
60 spin_lock(&ast_queue_lock);
61 list_for_each_entry(lkb, &ast_queue, lkb_astqueue) {
62 r = lkb->lkb_resource;
63 ls = r->res_ls;
64
65 if (dlm_locking_stopped(ls))
66 continue;
67
68 list_del(&lkb->lkb_astqueue);
69 type = lkb->lkb_ast_type;
70 lkb->lkb_ast_type = 0;
71 found = 1;
72 break;
73 }
74 spin_unlock(&ast_queue_lock);
75
76 if (!found)
77 break;
78
79 cast = lkb->lkb_astaddr;
80 bast = lkb->lkb_bastaddr;
81 bmode = lkb->lkb_bastmode;
82
83 if ((type & AST_COMP) && cast)
84 cast(lkb->lkb_astparam);
85
86 /* FIXME: Is it safe to look at lkb_grmode here
87 without doing a lock_rsb() ?
88 Look at other checks in v1 to avoid basts. */
89
90 if ((type & AST_BAST) && bast)
91 if (!dlm_modes_compat(lkb->lkb_grmode, bmode))
92 bast(lkb->lkb_astparam, bmode);
93
94 /* this removes the reference added by dlm_add_ast
95 and may result in the lkb being freed */
96 dlm_put_lkb(lkb);
97
98 schedule();
99 }
100}
101
102static inline int no_asts(void)
103{
104 int ret;
105
106 spin_lock(&ast_queue_lock);
107 ret = list_empty(&ast_queue);
108 spin_unlock(&ast_queue_lock);
109 return ret;
110}
111
112static int dlm_astd(void *data)
113{
114 while (!kthread_should_stop()) {
115 set_current_state(TASK_INTERRUPTIBLE);
116 if (!test_bit(WAKE_ASTS, &astd_wakeflags))
117 schedule();
118 set_current_state(TASK_RUNNING);
119
120 mutex_lock(&astd_running);
121 if (test_and_clear_bit(WAKE_ASTS, &astd_wakeflags))
122 process_asts();
123 mutex_unlock(&astd_running);
124 }
125 return 0;
126}
127
128void dlm_astd_wake(void)
129{
130 if (!no_asts()) {
131 set_bit(WAKE_ASTS, &astd_wakeflags);
132 wake_up_process(astd_task);
133 }
134}
135
136int dlm_astd_start(void)
137{
138 struct task_struct *p;
139 int error = 0;
140
141 INIT_LIST_HEAD(&ast_queue);
142 spin_lock_init(&ast_queue_lock);
143 mutex_init(&astd_running);
144
145 p = kthread_run(dlm_astd, NULL, "dlm_astd");
146 if (IS_ERR(p))
147 error = PTR_ERR(p);
148 else
149 astd_task = p;
150 return error;
151}
152
153void dlm_astd_stop(void)
154{
155 kthread_stop(astd_task);
156}
157
158void dlm_astd_suspend(void)
159{
160 mutex_lock(&astd_running);
161}
162
163void dlm_astd_resume(void)
164{
165 mutex_unlock(&astd_running);
166}
167
diff --git a/fs/dlm/ast.h b/fs/dlm/ast.h
new file mode 100644
index 000000000000..6ee276c74c52
--- /dev/null
+++ b/fs/dlm/ast.h
@@ -0,0 +1,26 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
5**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13#ifndef __ASTD_DOT_H__
14#define __ASTD_DOT_H__
15
16void dlm_add_ast(struct dlm_lkb *lkb, int type);
17void dlm_del_ast(struct dlm_lkb *lkb);
18
19void dlm_astd_wake(void);
20int dlm_astd_start(void);
21void dlm_astd_stop(void);
22void dlm_astd_suspend(void);
23void dlm_astd_resume(void);
24
25#endif
26
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
new file mode 100644
index 000000000000..87df9616415e
--- /dev/null
+++ b/fs/dlm/config.c
@@ -0,0 +1,787 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/configfs.h>
17#include <net/sock.h>
18
19#include "config.h"
20
21/*
22 * /config/dlm/<cluster>/spaces/<space>/nodes/<node>/nodeid
23 * /config/dlm/<cluster>/spaces/<space>/nodes/<node>/weight
24 * /config/dlm/<cluster>/comms/<comm>/nodeid
25 * /config/dlm/<cluster>/comms/<comm>/local
26 * /config/dlm/<cluster>/comms/<comm>/addr
27 * The <cluster> level is useless, but I haven't figured out how to avoid it.
28 */
29
30static struct config_group *space_list;
31static struct config_group *comm_list;
32static struct comm *local_comm;
33
34struct clusters;
35struct cluster;
36struct spaces;
37struct space;
38struct comms;
39struct comm;
40struct nodes;
41struct node;
42
43static struct config_group *make_cluster(struct config_group *, const char *);
44static void drop_cluster(struct config_group *, struct config_item *);
45static void release_cluster(struct config_item *);
46static struct config_group *make_space(struct config_group *, const char *);
47static void drop_space(struct config_group *, struct config_item *);
48static void release_space(struct config_item *);
49static struct config_item *make_comm(struct config_group *, const char *);
50static void drop_comm(struct config_group *, struct config_item *);
51static void release_comm(struct config_item *);
52static struct config_item *make_node(struct config_group *, const char *);
53static void drop_node(struct config_group *, struct config_item *);
54static void release_node(struct config_item *);
55
56static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a,
57 char *buf);
58static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a,
59 const char *buf, size_t len);
60static ssize_t show_node(struct config_item *i, struct configfs_attribute *a,
61 char *buf);
62static ssize_t store_node(struct config_item *i, struct configfs_attribute *a,
63 const char *buf, size_t len);
64
65static ssize_t comm_nodeid_read(struct comm *cm, char *buf);
66static ssize_t comm_nodeid_write(struct comm *cm, const char *buf, size_t len);
67static ssize_t comm_local_read(struct comm *cm, char *buf);
68static ssize_t comm_local_write(struct comm *cm, const char *buf, size_t len);
69static ssize_t comm_addr_write(struct comm *cm, const char *buf, size_t len);
70static ssize_t node_nodeid_read(struct node *nd, char *buf);
71static ssize_t node_nodeid_write(struct node *nd, const char *buf, size_t len);
72static ssize_t node_weight_read(struct node *nd, char *buf);
73static ssize_t node_weight_write(struct node *nd, const char *buf, size_t len);
74
75enum {
76 COMM_ATTR_NODEID = 0,
77 COMM_ATTR_LOCAL,
78 COMM_ATTR_ADDR,
79};
80
81struct comm_attribute {
82 struct configfs_attribute attr;
83 ssize_t (*show)(struct comm *, char *);
84 ssize_t (*store)(struct comm *, const char *, size_t);
85};
86
87static struct comm_attribute comm_attr_nodeid = {
88 .attr = { .ca_owner = THIS_MODULE,
89 .ca_name = "nodeid",
90 .ca_mode = S_IRUGO | S_IWUSR },
91 .show = comm_nodeid_read,
92 .store = comm_nodeid_write,
93};
94
95static struct comm_attribute comm_attr_local = {
96 .attr = { .ca_owner = THIS_MODULE,
97 .ca_name = "local",
98 .ca_mode = S_IRUGO | S_IWUSR },
99 .show = comm_local_read,
100 .store = comm_local_write,
101};
102
103static struct comm_attribute comm_attr_addr = {
104 .attr = { .ca_owner = THIS_MODULE,
105 .ca_name = "addr",
106 .ca_mode = S_IRUGO | S_IWUSR },
107 .store = comm_addr_write,
108};
109
110static struct configfs_attribute *comm_attrs[] = {
111 [COMM_ATTR_NODEID] = &comm_attr_nodeid.attr,
112 [COMM_ATTR_LOCAL] = &comm_attr_local.attr,
113 [COMM_ATTR_ADDR] = &comm_attr_addr.attr,
114 NULL,
115};
116
117enum {
118 NODE_ATTR_NODEID = 0,
119 NODE_ATTR_WEIGHT,
120};
121
122struct node_attribute {
123 struct configfs_attribute attr;
124 ssize_t (*show)(struct node *, char *);
125 ssize_t (*store)(struct node *, const char *, size_t);
126};
127
128static struct node_attribute node_attr_nodeid = {
129 .attr = { .ca_owner = THIS_MODULE,
130 .ca_name = "nodeid",
131 .ca_mode = S_IRUGO | S_IWUSR },
132 .show = node_nodeid_read,
133 .store = node_nodeid_write,
134};
135
136static struct node_attribute node_attr_weight = {
137 .attr = { .ca_owner = THIS_MODULE,
138 .ca_name = "weight",
139 .ca_mode = S_IRUGO | S_IWUSR },
140 .show = node_weight_read,
141 .store = node_weight_write,
142};
143
144static struct configfs_attribute *node_attrs[] = {
145 [NODE_ATTR_NODEID] = &node_attr_nodeid.attr,
146 [NODE_ATTR_WEIGHT] = &node_attr_weight.attr,
147 NULL,
148};
149
150struct clusters {
151 struct configfs_subsystem subsys;
152};
153
154struct cluster {
155 struct config_group group;
156};
157
158struct spaces {
159 struct config_group ss_group;
160};
161
162struct space {
163 struct config_group group;
164 struct list_head members;
165 struct mutex members_lock;
166 int members_count;
167};
168
169struct comms {
170 struct config_group cs_group;
171};
172
173struct comm {
174 struct config_item item;
175 int nodeid;
176 int local;
177 int addr_count;
178 struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
179};
180
181struct nodes {
182 struct config_group ns_group;
183};
184
185struct node {
186 struct config_item item;
187 struct list_head list; /* space->members */
188 int nodeid;
189 int weight;
190};
191
192static struct configfs_group_operations clusters_ops = {
193 .make_group = make_cluster,
194 .drop_item = drop_cluster,
195};
196
197static struct configfs_item_operations cluster_ops = {
198 .release = release_cluster,
199};
200
201static struct configfs_group_operations spaces_ops = {
202 .make_group = make_space,
203 .drop_item = drop_space,
204};
205
206static struct configfs_item_operations space_ops = {
207 .release = release_space,
208};
209
210static struct configfs_group_operations comms_ops = {
211 .make_item = make_comm,
212 .drop_item = drop_comm,
213};
214
215static struct configfs_item_operations comm_ops = {
216 .release = release_comm,
217 .show_attribute = show_comm,
218 .store_attribute = store_comm,
219};
220
221static struct configfs_group_operations nodes_ops = {
222 .make_item = make_node,
223 .drop_item = drop_node,
224};
225
226static struct configfs_item_operations node_ops = {
227 .release = release_node,
228 .show_attribute = show_node,
229 .store_attribute = store_node,
230};
231
232static struct config_item_type clusters_type = {
233 .ct_group_ops = &clusters_ops,
234 .ct_owner = THIS_MODULE,
235};
236
237static struct config_item_type cluster_type = {
238 .ct_item_ops = &cluster_ops,
239 .ct_owner = THIS_MODULE,
240};
241
242static struct config_item_type spaces_type = {
243 .ct_group_ops = &spaces_ops,
244 .ct_owner = THIS_MODULE,
245};
246
247static struct config_item_type space_type = {
248 .ct_item_ops = &space_ops,
249 .ct_owner = THIS_MODULE,
250};
251
252static struct config_item_type comms_type = {
253 .ct_group_ops = &comms_ops,
254 .ct_owner = THIS_MODULE,
255};
256
257static struct config_item_type comm_type = {
258 .ct_item_ops = &comm_ops,
259 .ct_attrs = comm_attrs,
260 .ct_owner = THIS_MODULE,
261};
262
263static struct config_item_type nodes_type = {
264 .ct_group_ops = &nodes_ops,
265 .ct_owner = THIS_MODULE,
266};
267
268static struct config_item_type node_type = {
269 .ct_item_ops = &node_ops,
270 .ct_attrs = node_attrs,
271 .ct_owner = THIS_MODULE,
272};
273
274static struct cluster *to_cluster(struct config_item *i)
275{
276 return i ? container_of(to_config_group(i), struct cluster, group):NULL;
277}
278
279static struct space *to_space(struct config_item *i)
280{
281 return i ? container_of(to_config_group(i), struct space, group) : NULL;
282}
283
284static struct comm *to_comm(struct config_item *i)
285{
286 return i ? container_of(i, struct comm, item) : NULL;
287}
288
289static struct node *to_node(struct config_item *i)
290{
291 return i ? container_of(i, struct node, item) : NULL;
292}
293
294static struct config_group *make_cluster(struct config_group *g,
295 const char *name)
296{
297 struct cluster *cl = NULL;
298 struct spaces *sps = NULL;
299 struct comms *cms = NULL;
300 void *gps = NULL;
301
302 cl = kzalloc(sizeof(struct cluster), GFP_KERNEL);
303 gps = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL);
304 sps = kzalloc(sizeof(struct spaces), GFP_KERNEL);
305 cms = kzalloc(sizeof(struct comms), GFP_KERNEL);
306
307 if (!cl || !gps || !sps || !cms)
308 goto fail;
309
310 config_group_init_type_name(&cl->group, name, &cluster_type);
311 config_group_init_type_name(&sps->ss_group, "spaces", &spaces_type);
312 config_group_init_type_name(&cms->cs_group, "comms", &comms_type);
313
314 cl->group.default_groups = gps;
315 cl->group.default_groups[0] = &sps->ss_group;
316 cl->group.default_groups[1] = &cms->cs_group;
317 cl->group.default_groups[2] = NULL;
318
319 space_list = &sps->ss_group;
320 comm_list = &cms->cs_group;
321 return &cl->group;
322
323 fail:
324 kfree(cl);
325 kfree(gps);
326 kfree(sps);
327 kfree(cms);
328 return NULL;
329}
330
331static void drop_cluster(struct config_group *g, struct config_item *i)
332{
333 struct cluster *cl = to_cluster(i);
334 struct config_item *tmp;
335 int j;
336
337 for (j = 0; cl->group.default_groups[j]; j++) {
338 tmp = &cl->group.default_groups[j]->cg_item;
339 cl->group.default_groups[j] = NULL;
340 config_item_put(tmp);
341 }
342
343 space_list = NULL;
344 comm_list = NULL;
345
346 config_item_put(i);
347}
348
349static void release_cluster(struct config_item *i)
350{
351 struct cluster *cl = to_cluster(i);
352 kfree(cl->group.default_groups);
353 kfree(cl);
354}
355
356static struct config_group *make_space(struct config_group *g, const char *name)
357{
358 struct space *sp = NULL;
359 struct nodes *nds = NULL;
360 void *gps = NULL;
361
362 sp = kzalloc(sizeof(struct space), GFP_KERNEL);
363 gps = kcalloc(2, sizeof(struct config_group *), GFP_KERNEL);
364 nds = kzalloc(sizeof(struct nodes), GFP_KERNEL);
365
366 if (!sp || !gps || !nds)
367 goto fail;
368
369 config_group_init_type_name(&sp->group, name, &space_type);
370 config_group_init_type_name(&nds->ns_group, "nodes", &nodes_type);
371
372 sp->group.default_groups = gps;
373 sp->group.default_groups[0] = &nds->ns_group;
374 sp->group.default_groups[1] = NULL;
375
376 INIT_LIST_HEAD(&sp->members);
377 mutex_init(&sp->members_lock);
378 sp->members_count = 0;
379 return &sp->group;
380
381 fail:
382 kfree(sp);
383 kfree(gps);
384 kfree(nds);
385 return NULL;
386}
387
388static void drop_space(struct config_group *g, struct config_item *i)
389{
390 struct space *sp = to_space(i);
391 struct config_item *tmp;
392 int j;
393
394 /* assert list_empty(&sp->members) */
395
396 for (j = 0; sp->group.default_groups[j]; j++) {
397 tmp = &sp->group.default_groups[j]->cg_item;
398 sp->group.default_groups[j] = NULL;
399 config_item_put(tmp);
400 }
401
402 config_item_put(i);
403}
404
405static void release_space(struct config_item *i)
406{
407 struct space *sp = to_space(i);
408 kfree(sp->group.default_groups);
409 kfree(sp);
410}
411
412static struct config_item *make_comm(struct config_group *g, const char *name)
413{
414 struct comm *cm;
415
416 cm = kzalloc(sizeof(struct comm), GFP_KERNEL);
417 if (!cm)
418 return NULL;
419
420 config_item_init_type_name(&cm->item, name, &comm_type);
421 cm->nodeid = -1;
422 cm->local = 0;
423 cm->addr_count = 0;
424 return &cm->item;
425}
426
427static void drop_comm(struct config_group *g, struct config_item *i)
428{
429 struct comm *cm = to_comm(i);
430 if (local_comm == cm)
431 local_comm = NULL;
432 while (cm->addr_count--)
433 kfree(cm->addr[cm->addr_count]);
434 config_item_put(i);
435}
436
437static void release_comm(struct config_item *i)
438{
439 struct comm *cm = to_comm(i);
440 kfree(cm);
441}
442
443static struct config_item *make_node(struct config_group *g, const char *name)
444{
445 struct space *sp = to_space(g->cg_item.ci_parent);
446 struct node *nd;
447
448 nd = kzalloc(sizeof(struct node), GFP_KERNEL);
449 if (!nd)
450 return NULL;
451
452 config_item_init_type_name(&nd->item, name, &node_type);
453 nd->nodeid = -1;
454 nd->weight = 1; /* default weight of 1 if none is set */
455
456 mutex_lock(&sp->members_lock);
457 list_add(&nd->list, &sp->members);
458 sp->members_count++;
459 mutex_unlock(&sp->members_lock);
460
461 return &nd->item;
462}
463
464static void drop_node(struct config_group *g, struct config_item *i)
465{
466 struct space *sp = to_space(g->cg_item.ci_parent);
467 struct node *nd = to_node(i);
468
469 mutex_lock(&sp->members_lock);
470 list_del(&nd->list);
471 sp->members_count--;
472 mutex_unlock(&sp->members_lock);
473
474 config_item_put(i);
475}
476
477static void release_node(struct config_item *i)
478{
479 struct node *nd = to_node(i);
480 kfree(nd);
481}
482
483static struct clusters clusters_root = {
484 .subsys = {
485 .su_group = {
486 .cg_item = {
487 .ci_namebuf = "dlm",
488 .ci_type = &clusters_type,
489 },
490 },
491 },
492};
493
494int dlm_config_init(void)
495{
496 config_group_init(&clusters_root.subsys.su_group);
497 init_MUTEX(&clusters_root.subsys.su_sem);
498 return configfs_register_subsystem(&clusters_root.subsys);
499}
500
501void dlm_config_exit(void)
502{
503 configfs_unregister_subsystem(&clusters_root.subsys);
504}
505
506/*
507 * Functions for user space to read/write attributes
508 */
509
510static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a,
511 char *buf)
512{
513 struct comm *cm = to_comm(i);
514 struct comm_attribute *cma =
515 container_of(a, struct comm_attribute, attr);
516 return cma->show ? cma->show(cm, buf) : 0;
517}
518
519static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a,
520 const char *buf, size_t len)
521{
522 struct comm *cm = to_comm(i);
523 struct comm_attribute *cma =
524 container_of(a, struct comm_attribute, attr);
525 return cma->store ? cma->store(cm, buf, len) : -EINVAL;
526}
527
528static ssize_t comm_nodeid_read(struct comm *cm, char *buf)
529{
530 return sprintf(buf, "%d\n", cm->nodeid);
531}
532
533static ssize_t comm_nodeid_write(struct comm *cm, const char *buf, size_t len)
534{
535 cm->nodeid = simple_strtol(buf, NULL, 0);
536 return len;
537}
538
539static ssize_t comm_local_read(struct comm *cm, char *buf)
540{
541 return sprintf(buf, "%d\n", cm->local);
542}
543
544static ssize_t comm_local_write(struct comm *cm, const char *buf, size_t len)
545{
546 cm->local= simple_strtol(buf, NULL, 0);
547 if (cm->local && !local_comm)
548 local_comm = cm;
549 return len;
550}
551
552static ssize_t comm_addr_write(struct comm *cm, const char *buf, size_t len)
553{
554 struct sockaddr_storage *addr;
555
556 if (len != sizeof(struct sockaddr_storage))
557 return -EINVAL;
558
559 if (cm->addr_count >= DLM_MAX_ADDR_COUNT)
560 return -ENOSPC;
561
562 addr = kzalloc(sizeof(*addr), GFP_KERNEL);
563 if (!addr)
564 return -ENOMEM;
565
566 memcpy(addr, buf, len);
567 cm->addr[cm->addr_count++] = addr;
568 return len;
569}
570
571static ssize_t show_node(struct config_item *i, struct configfs_attribute *a,
572 char *buf)
573{
574 struct node *nd = to_node(i);
575 struct node_attribute *nda =
576 container_of(a, struct node_attribute, attr);
577 return nda->show ? nda->show(nd, buf) : 0;
578}
579
580static ssize_t store_node(struct config_item *i, struct configfs_attribute *a,
581 const char *buf, size_t len)
582{
583 struct node *nd = to_node(i);
584 struct node_attribute *nda =
585 container_of(a, struct node_attribute, attr);
586 return nda->store ? nda->store(nd, buf, len) : -EINVAL;
587}
588
589static ssize_t node_nodeid_read(struct node *nd, char *buf)
590{
591 return sprintf(buf, "%d\n", nd->nodeid);
592}
593
594static ssize_t node_nodeid_write(struct node *nd, const char *buf, size_t len)
595{
596 nd->nodeid = simple_strtol(buf, NULL, 0);
597 return len;
598}
599
600static ssize_t node_weight_read(struct node *nd, char *buf)
601{
602 return sprintf(buf, "%d\n", nd->weight);
603}
604
605static ssize_t node_weight_write(struct node *nd, const char *buf, size_t len)
606{
607 nd->weight = simple_strtol(buf, NULL, 0);
608 return len;
609}
610
611/*
612 * Functions for the dlm to get the info that's been configured
613 */
614
615static struct space *get_space(char *name)
616{
617 if (!space_list)
618 return NULL;
619 return to_space(config_group_find_obj(space_list, name));
620}
621
622static void put_space(struct space *sp)
623{
624 config_item_put(&sp->group.cg_item);
625}
626
627static struct comm *get_comm(int nodeid, struct sockaddr_storage *addr)
628{
629 struct config_item *i;
630 struct comm *cm = NULL;
631 int found = 0;
632
633 if (!comm_list)
634 return NULL;
635
636 down(&clusters_root.subsys.su_sem);
637
638 list_for_each_entry(i, &comm_list->cg_children, ci_entry) {
639 cm = to_comm(i);
640
641 if (nodeid) {
642 if (cm->nodeid != nodeid)
643 continue;
644 found = 1;
645 break;
646 } else {
647 if (!cm->addr_count ||
648 memcmp(cm->addr[0], addr, sizeof(*addr)))
649 continue;
650 found = 1;
651 break;
652 }
653 }
654 up(&clusters_root.subsys.su_sem);
655
656 if (found)
657 config_item_get(i);
658 else
659 cm = NULL;
660 return cm;
661}
662
663static void put_comm(struct comm *cm)
664{
665 config_item_put(&cm->item);
666}
667
668/* caller must free mem */
669int dlm_nodeid_list(char *lsname, int **ids_out)
670{
671 struct space *sp;
672 struct node *nd;
673 int i = 0, rv = 0;
674 int *ids;
675
676 sp = get_space(lsname);
677 if (!sp)
678 return -EEXIST;
679
680 mutex_lock(&sp->members_lock);
681 if (!sp->members_count) {
682 rv = 0;
683 goto out;
684 }
685
686 ids = kcalloc(sp->members_count, sizeof(int), GFP_KERNEL);
687 if (!ids) {
688 rv = -ENOMEM;
689 goto out;
690 }
691
692 rv = sp->members_count;
693 list_for_each_entry(nd, &sp->members, list)
694 ids[i++] = nd->nodeid;
695
696 if (rv != i)
697 printk("bad nodeid count %d %d\n", rv, i);
698
699 *ids_out = ids;
700 out:
701 mutex_unlock(&sp->members_lock);
702 put_space(sp);
703 return rv;
704}
705
706int dlm_node_weight(char *lsname, int nodeid)
707{
708 struct space *sp;
709 struct node *nd;
710 int w = -EEXIST;
711
712 sp = get_space(lsname);
713 if (!sp)
714 goto out;
715
716 mutex_lock(&sp->members_lock);
717 list_for_each_entry(nd, &sp->members, list) {
718 if (nd->nodeid != nodeid)
719 continue;
720 w = nd->weight;
721 break;
722 }
723 mutex_unlock(&sp->members_lock);
724 put_space(sp);
725 out:
726 return w;
727}
728
729int dlm_nodeid_to_addr(int nodeid, struct sockaddr_storage *addr)
730{
731 struct comm *cm = get_comm(nodeid, NULL);
732 if (!cm)
733 return -EEXIST;
734 if (!cm->addr_count)
735 return -ENOENT;
736 memcpy(addr, cm->addr[0], sizeof(*addr));
737 put_comm(cm);
738 return 0;
739}
740
741int dlm_addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
742{
743 struct comm *cm = get_comm(0, addr);
744 if (!cm)
745 return -EEXIST;
746 *nodeid = cm->nodeid;
747 put_comm(cm);
748 return 0;
749}
750
751int dlm_our_nodeid(void)
752{
753 return local_comm ? local_comm->nodeid : 0;
754}
755
756/* num 0 is first addr, num 1 is second addr */
757int dlm_our_addr(struct sockaddr_storage *addr, int num)
758{
759 if (!local_comm)
760 return -1;
761 if (num + 1 > local_comm->addr_count)
762 return -1;
763 memcpy(addr, local_comm->addr[num], sizeof(*addr));
764 return 0;
765}
766
767/* Config file defaults */
768#define DEFAULT_TCP_PORT 21064
769#define DEFAULT_BUFFER_SIZE 4096
770#define DEFAULT_RSBTBL_SIZE 256
771#define DEFAULT_LKBTBL_SIZE 1024
772#define DEFAULT_DIRTBL_SIZE 512
773#define DEFAULT_RECOVER_TIMER 5
774#define DEFAULT_TOSS_SECS 10
775#define DEFAULT_SCAN_SECS 5
776
777struct dlm_config_info dlm_config = {
778 .tcp_port = DEFAULT_TCP_PORT,
779 .buffer_size = DEFAULT_BUFFER_SIZE,
780 .rsbtbl_size = DEFAULT_RSBTBL_SIZE,
781 .lkbtbl_size = DEFAULT_LKBTBL_SIZE,
782 .dirtbl_size = DEFAULT_DIRTBL_SIZE,
783 .recover_timer = DEFAULT_RECOVER_TIMER,
784 .toss_secs = DEFAULT_TOSS_SECS,
785 .scan_secs = DEFAULT_SCAN_SECS
786};
787
diff --git a/fs/dlm/config.h b/fs/dlm/config.h
new file mode 100644
index 000000000000..9da7839958a9
--- /dev/null
+++ b/fs/dlm/config.h
@@ -0,0 +1,42 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#ifndef __CONFIG_DOT_H__
15#define __CONFIG_DOT_H__
16
17#define DLM_MAX_ADDR_COUNT 3
18
19struct dlm_config_info {
20 int tcp_port;
21 int buffer_size;
22 int rsbtbl_size;
23 int lkbtbl_size;
24 int dirtbl_size;
25 int recover_timer;
26 int toss_secs;
27 int scan_secs;
28};
29
30extern struct dlm_config_info dlm_config;
31
32int dlm_config_init(void);
33void dlm_config_exit(void);
34int dlm_node_weight(char *lsname, int nodeid);
35int dlm_nodeid_list(char *lsname, int **ids_out);
36int dlm_nodeid_to_addr(int nodeid, struct sockaddr_storage *addr);
37int dlm_addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid);
38int dlm_our_nodeid(void);
39int dlm_our_addr(struct sockaddr_storage *addr, int num);
40
41#endif /* __CONFIG_DOT_H__ */
42
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
new file mode 100644
index 000000000000..49deca845dba
--- /dev/null
+++ b/fs/dlm/debug_fs.c
@@ -0,0 +1,296 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
5**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13#include <linux/pagemap.h>
14#include <linux/seq_file.h>
15#include <linux/module.h>
16#include <linux/ctype.h>
17#include <linux/debugfs.h>
18
19#include "dlm_internal.h"
20
21
22static struct dentry *dlm_root;
23
24struct rsb_iter {
25 int entry;
26 struct dlm_ls *ls;
27 struct list_head *next;
28 struct dlm_rsb *rsb;
29};
30
31static char *print_lockmode(int mode)
32{
33 switch (mode) {
34 case DLM_LOCK_IV:
35 return "--";
36 case DLM_LOCK_NL:
37 return "NL";
38 case DLM_LOCK_CR:
39 return "CR";
40 case DLM_LOCK_CW:
41 return "CW";
42 case DLM_LOCK_PR:
43 return "PR";
44 case DLM_LOCK_PW:
45 return "PW";
46 case DLM_LOCK_EX:
47 return "EX";
48 default:
49 return "??";
50 }
51}
52
53static void print_lock(struct seq_file *s, struct dlm_lkb *lkb,
54 struct dlm_rsb *res)
55{
56 seq_printf(s, "%08x %s", lkb->lkb_id, print_lockmode(lkb->lkb_grmode));
57
58 if (lkb->lkb_status == DLM_LKSTS_CONVERT
59 || lkb->lkb_status == DLM_LKSTS_WAITING)
60 seq_printf(s, " (%s)", print_lockmode(lkb->lkb_rqmode));
61
62 if (lkb->lkb_nodeid) {
63 if (lkb->lkb_nodeid != res->res_nodeid)
64 seq_printf(s, " Remote: %3d %08x", lkb->lkb_nodeid,
65 lkb->lkb_remid);
66 else
67 seq_printf(s, " Master: %08x", lkb->lkb_remid);
68 }
69
70 if (lkb->lkb_wait_type)
71 seq_printf(s, " wait_type: %d", lkb->lkb_wait_type);
72
73 seq_printf(s, "\n");
74}
75
76static int print_resource(struct dlm_rsb *res, struct seq_file *s)
77{
78 struct dlm_lkb *lkb;
79 int i, lvblen = res->res_ls->ls_lvblen;
80
81 seq_printf(s, "\nResource %p Name (len=%d) \"", res, res->res_length);
82 for (i = 0; i < res->res_length; i++) {
83 if (isprint(res->res_name[i]))
84 seq_printf(s, "%c", res->res_name[i]);
85 else
86 seq_printf(s, "%c", '.');
87 }
88 if (res->res_nodeid > 0)
89 seq_printf(s, "\" \nLocal Copy, Master is node %d\n",
90 res->res_nodeid);
91 else if (res->res_nodeid == 0)
92 seq_printf(s, "\" \nMaster Copy\n");
93 else if (res->res_nodeid == -1)
94 seq_printf(s, "\" \nLooking up master (lkid %x)\n",
95 res->res_first_lkid);
96 else
97 seq_printf(s, "\" \nInvalid master %d\n", res->res_nodeid);
98
99 /* Print the LVB: */
100 if (res->res_lvbptr) {
101 seq_printf(s, "LVB: ");
102 for (i = 0; i < lvblen; i++) {
103 if (i == lvblen / 2)
104 seq_printf(s, "\n ");
105 seq_printf(s, "%02x ",
106 (unsigned char) res->res_lvbptr[i]);
107 }
108 if (rsb_flag(res, RSB_VALNOTVALID))
109 seq_printf(s, " (INVALID)");
110 seq_printf(s, "\n");
111 }
112
113 /* Print the locks attached to this resource */
114 seq_printf(s, "Granted Queue\n");
115 list_for_each_entry(lkb, &res->res_grantqueue, lkb_statequeue)
116 print_lock(s, lkb, res);
117
118 seq_printf(s, "Conversion Queue\n");
119 list_for_each_entry(lkb, &res->res_convertqueue, lkb_statequeue)
120 print_lock(s, lkb, res);
121
122 seq_printf(s, "Waiting Queue\n");
123 list_for_each_entry(lkb, &res->res_waitqueue, lkb_statequeue)
124 print_lock(s, lkb, res);
125
126 return 0;
127}
128
129static int rsb_iter_next(struct rsb_iter *ri)
130{
131 struct dlm_ls *ls = ri->ls;
132 int i;
133
134 if (!ri->next) {
135 top:
136 /* Find the next non-empty hash bucket */
137 for (i = ri->entry; i < ls->ls_rsbtbl_size; i++) {
138 read_lock(&ls->ls_rsbtbl[i].lock);
139 if (!list_empty(&ls->ls_rsbtbl[i].list)) {
140 ri->next = ls->ls_rsbtbl[i].list.next;
141 read_unlock(&ls->ls_rsbtbl[i].lock);
142 break;
143 }
144 read_unlock(&ls->ls_rsbtbl[i].lock);
145 }
146 ri->entry = i;
147
148 if (ri->entry >= ls->ls_rsbtbl_size)
149 return 1;
150 } else {
151 i = ri->entry;
152 read_lock(&ls->ls_rsbtbl[i].lock);
153 ri->next = ri->next->next;
154 if (ri->next->next == ls->ls_rsbtbl[i].list.next) {
155 /* End of list - move to next bucket */
156 ri->next = NULL;
157 ri->entry++;
158 read_unlock(&ls->ls_rsbtbl[i].lock);
159 goto top;
160 }
161 read_unlock(&ls->ls_rsbtbl[i].lock);
162 }
163 ri->rsb = list_entry(ri->next, struct dlm_rsb, res_hashchain);
164
165 return 0;
166}
167
168static void rsb_iter_free(struct rsb_iter *ri)
169{
170 kfree(ri);
171}
172
173static struct rsb_iter *rsb_iter_init(struct dlm_ls *ls)
174{
175 struct rsb_iter *ri;
176
177 ri = kmalloc(sizeof *ri, GFP_KERNEL);
178 if (!ri)
179 return NULL;
180
181 ri->ls = ls;
182 ri->entry = 0;
183 ri->next = NULL;
184
185 if (rsb_iter_next(ri)) {
186 rsb_iter_free(ri);
187 return NULL;
188 }
189
190 return ri;
191}
192
193static void *seq_start(struct seq_file *file, loff_t *pos)
194{
195 struct rsb_iter *ri;
196 loff_t n = *pos;
197
198 ri = rsb_iter_init(file->private);
199 if (!ri)
200 return NULL;
201
202 while (n--) {
203 if (rsb_iter_next(ri)) {
204 rsb_iter_free(ri);
205 return NULL;
206 }
207 }
208
209 return ri;
210}
211
212static void *seq_next(struct seq_file *file, void *iter_ptr, loff_t *pos)
213{
214 struct rsb_iter *ri = iter_ptr;
215
216 (*pos)++;
217
218 if (rsb_iter_next(ri)) {
219 rsb_iter_free(ri);
220 return NULL;
221 }
222
223 return ri;
224}
225
226static void seq_stop(struct seq_file *file, void *iter_ptr)
227{
228 /* nothing for now */
229}
230
231static int seq_show(struct seq_file *file, void *iter_ptr)
232{
233 struct rsb_iter *ri = iter_ptr;
234
235 print_resource(ri->rsb, file);
236
237 return 0;
238}
239
240static struct seq_operations dlm_seq_ops = {
241 .start = seq_start,
242 .next = seq_next,
243 .stop = seq_stop,
244 .show = seq_show,
245};
246
247static int do_open(struct inode *inode, struct file *file)
248{
249 struct seq_file *seq;
250 int ret;
251
252 ret = seq_open(file, &dlm_seq_ops);
253 if (ret)
254 return ret;
255
256 seq = file->private_data;
257 seq->private = inode->u.generic_ip;
258
259 return 0;
260}
261
262static struct file_operations dlm_fops = {
263 .owner = THIS_MODULE,
264 .open = do_open,
265 .read = seq_read,
266 .llseek = seq_lseek,
267 .release = seq_release
268};
269
270int dlm_create_debug_file(struct dlm_ls *ls)
271{
272 ls->ls_debug_dentry = debugfs_create_file(ls->ls_name,
273 S_IFREG | S_IRUGO,
274 dlm_root,
275 ls,
276 &dlm_fops);
277 return ls->ls_debug_dentry ? 0 : -ENOMEM;
278}
279
280void dlm_delete_debug_file(struct dlm_ls *ls)
281{
282 if (ls->ls_debug_dentry)
283 debugfs_remove(ls->ls_debug_dentry);
284}
285
286int dlm_register_debugfs(void)
287{
288 dlm_root = debugfs_create_dir("dlm", NULL);
289 return dlm_root ? 0 : -ENOMEM;
290}
291
292void dlm_unregister_debugfs(void)
293{
294 debugfs_remove(dlm_root);
295}
296
diff --git a/fs/dlm/device.c b/fs/dlm/device.c
new file mode 100644
index 000000000000..99d8b6b07fba
--- /dev/null
+++ b/fs/dlm/device.c
@@ -0,0 +1,1093 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14/*
15 * device.c
16 *
17 * This is the userland interface to the DLM.
18 *
19 * The locking is done via a misc char device (find the
20 * registered minor number in /proc/misc).
21 *
22 * User code should not use this interface directly but
23 * call the library routines in libdlm.a instead.
24 *
25 */
26
27#include <linux/miscdevice.h>
28#include <linux/init.h>
29#include <linux/wait.h>
30#include <linux/module.h>
31#include <linux/file.h>
32#include <linux/fs.h>
33#include <linux/poll.h>
34#include <linux/signal.h>
35#include <linux/spinlock.h>
36#include <linux/idr.h>
37
38#include <linux/dlm.h>
39#include <linux/dlm_device.h>
40
41#include "lvb_table.h"
42
43static struct file_operations _dlm_fops;
44static const char *name_prefix="dlm";
45static struct list_head user_ls_list;
46static struct mutex user_ls_lock;
47
48/* Lock infos are stored in here indexed by lock ID */
49static DEFINE_IDR(lockinfo_idr);
50static rwlock_t lockinfo_lock;
51
52/* Flags in li_flags */
53#define LI_FLAG_COMPLETE 1
54#define LI_FLAG_FIRSTLOCK 2
55#define LI_FLAG_PERSISTENT 3
56#define LI_FLAG_ONLIST 4
57
58/* flags in ls_flags*/
59#define LS_FLAG_DELETED 1
60#define LS_FLAG_AUTOFREE 2
61
62
63#define LOCKINFO_MAGIC 0x53595324
64
65struct lock_info {
66 uint32_t li_magic;
67 uint8_t li_cmd;
68 int8_t li_grmode;
69 int8_t li_rqmode;
70 struct dlm_lksb li_lksb;
71 wait_queue_head_t li_waitq;
72 unsigned long li_flags;
73 void __user *li_castparam;
74 void __user *li_castaddr;
75 void __user *li_bastparam;
76 void __user *li_bastaddr;
77 void __user *li_pend_bastparam;
78 void __user *li_pend_bastaddr;
79 struct list_head li_ownerqueue;
80 struct file_info *li_file;
81 struct dlm_lksb __user *li_user_lksb;
82 struct semaphore li_firstlock;
83};
84
85/* A queued AST no less */
86struct ast_info {
87 struct dlm_lock_result result;
88 struct list_head list;
89 uint32_t lvb_updated;
90 uint32_t progress; /* How much has been read */
91};
92
93/* One of these per userland lockspace */
94struct user_ls {
95 void *ls_lockspace;
96 atomic_t ls_refcnt;
97 long ls_flags;
98
99 /* Passed into misc_register() */
100 struct miscdevice ls_miscinfo;
101 struct list_head ls_list;
102};
103
104/* misc_device info for the control device */
105static struct miscdevice ctl_device;
106
107/*
108 * Stuff we hang off the file struct.
109 * The first two are to cope with unlocking all the
110 * locks help by a process when it dies.
111 */
112struct file_info {
113 struct list_head fi_li_list; /* List of active lock_infos */
114 spinlock_t fi_li_lock;
115 struct list_head fi_ast_list; /* Queue of ASTs to be delivered */
116 spinlock_t fi_ast_lock;
117 wait_queue_head_t fi_wait;
118 struct user_ls *fi_ls;
119 atomic_t fi_refcnt; /* Number of users */
120 unsigned long fi_flags; /* Bit 1 means the device is open */
121};
122
123
124/* get and put ops for file_info.
125 Actually I don't really like "get" and "put", but everyone
126 else seems to use them and I can't think of anything
127 nicer at the moment */
128static void get_file_info(struct file_info *f)
129{
130 atomic_inc(&f->fi_refcnt);
131}
132
133static void put_file_info(struct file_info *f)
134{
135 if (atomic_dec_and_test(&f->fi_refcnt))
136 kfree(f);
137}
138
139static void release_lockinfo(struct lock_info *li)
140{
141 put_file_info(li->li_file);
142
143 write_lock(&lockinfo_lock);
144 idr_remove(&lockinfo_idr, li->li_lksb.sb_lkid);
145 write_unlock(&lockinfo_lock);
146
147 if (li->li_lksb.sb_lvbptr)
148 kfree(li->li_lksb.sb_lvbptr);
149 kfree(li);
150
151 module_put(THIS_MODULE);
152}
153
154static struct lock_info *get_lockinfo(uint32_t lockid)
155{
156 struct lock_info *li;
157
158 read_lock(&lockinfo_lock);
159 li = idr_find(&lockinfo_idr, lockid);
160 read_unlock(&lockinfo_lock);
161
162 return li;
163}
164
165static int add_lockinfo(struct lock_info *li)
166{
167 int n;
168 int r;
169 int ret = -EINVAL;
170
171 write_lock(&lockinfo_lock);
172
173 if (idr_find(&lockinfo_idr, li->li_lksb.sb_lkid))
174 goto out_up;
175
176 ret = -ENOMEM;
177 r = idr_pre_get(&lockinfo_idr, GFP_KERNEL);
178 if (!r)
179 goto out_up;
180
181 r = idr_get_new_above(&lockinfo_idr, li, li->li_lksb.sb_lkid, &n);
182 if (r)
183 goto out_up;
184
185 if (n != li->li_lksb.sb_lkid) {
186 idr_remove(&lockinfo_idr, n);
187 goto out_up;
188 }
189
190 ret = 0;
191
192 out_up:
193 write_unlock(&lockinfo_lock);
194
195 return ret;
196}
197
198
199static struct user_ls *__find_lockspace(int minor)
200{
201 struct user_ls *lsinfo;
202
203 list_for_each_entry(lsinfo, &user_ls_list, ls_list) {
204 if (lsinfo->ls_miscinfo.minor == minor)
205 return lsinfo;
206 }
207 return NULL;
208}
209
210/* Find a lockspace struct given the device minor number */
211static struct user_ls *find_lockspace(int minor)
212{
213 struct user_ls *lsinfo;
214
215 mutex_lock(&user_ls_lock);
216 lsinfo = __find_lockspace(minor);
217 mutex_unlock(&user_ls_lock);
218
219 return lsinfo;
220}
221
222static void add_lockspace_to_list(struct user_ls *lsinfo)
223{
224 mutex_lock(&user_ls_lock);
225 list_add(&lsinfo->ls_list, &user_ls_list);
226 mutex_unlock(&user_ls_lock);
227}
228
229/* Register a lockspace with the DLM and create a misc
230 device for userland to access it */
231static int register_lockspace(char *name, struct user_ls **ls, int flags)
232{
233 struct user_ls *newls;
234 int status;
235 int namelen;
236
237 namelen = strlen(name)+strlen(name_prefix)+2;
238
239 newls = kzalloc(sizeof(struct user_ls), GFP_KERNEL);
240 if (!newls)
241 return -ENOMEM;
242
243 newls->ls_miscinfo.name = kzalloc(namelen, GFP_KERNEL);
244 if (!newls->ls_miscinfo.name) {
245 kfree(newls);
246 return -ENOMEM;
247 }
248
249 status = dlm_new_lockspace(name, strlen(name), &newls->ls_lockspace, 0,
250 DLM_USER_LVB_LEN);
251 if (status != 0) {
252 kfree(newls->ls_miscinfo.name);
253 kfree(newls);
254 return status;
255 }
256
257 snprintf((char*)newls->ls_miscinfo.name, namelen, "%s_%s",
258 name_prefix, name);
259
260 newls->ls_miscinfo.fops = &_dlm_fops;
261 newls->ls_miscinfo.minor = MISC_DYNAMIC_MINOR;
262
263 status = misc_register(&newls->ls_miscinfo);
264 if (status) {
265 printk(KERN_ERR "dlm: misc register failed for %s\n", name);
266 dlm_release_lockspace(newls->ls_lockspace, 0);
267 kfree(newls->ls_miscinfo.name);
268 kfree(newls);
269 return status;
270 }
271
272 if (flags & DLM_USER_LSFLG_AUTOFREE)
273 set_bit(LS_FLAG_AUTOFREE, &newls->ls_flags);
274
275 add_lockspace_to_list(newls);
276 *ls = newls;
277 return 0;
278}
279
280/* Called with the user_ls_lock mutex held */
281static int unregister_lockspace(struct user_ls *lsinfo, int force)
282{
283 int status;
284
285 status = dlm_release_lockspace(lsinfo->ls_lockspace, force);
286 if (status)
287 return status;
288
289 status = misc_deregister(&lsinfo->ls_miscinfo);
290 if (status)
291 return status;
292
293 list_del(&lsinfo->ls_list);
294 set_bit(LS_FLAG_DELETED, &lsinfo->ls_flags);
295 lsinfo->ls_lockspace = NULL;
296 if (atomic_read(&lsinfo->ls_refcnt) == 0) {
297 kfree(lsinfo->ls_miscinfo.name);
298 kfree(lsinfo);
299 }
300
301 return 0;
302}
303
304/* Add it to userland's AST queue */
305static void add_to_astqueue(struct lock_info *li, void *astaddr, void *astparam,
306 int lvb_updated)
307{
308 struct ast_info *ast = kzalloc(sizeof(struct ast_info), GFP_KERNEL);
309 if (!ast)
310 return;
311
312 ast->result.user_astparam = astparam;
313 ast->result.user_astaddr = astaddr;
314 ast->result.user_lksb = li->li_user_lksb;
315 memcpy(&ast->result.lksb, &li->li_lksb, sizeof(struct dlm_lksb));
316 ast->lvb_updated = lvb_updated;
317
318 spin_lock(&li->li_file->fi_ast_lock);
319 list_add_tail(&ast->list, &li->li_file->fi_ast_list);
320 spin_unlock(&li->li_file->fi_ast_lock);
321 wake_up_interruptible(&li->li_file->fi_wait);
322}
323
324static void bast_routine(void *param, int mode)
325{
326 struct lock_info *li = param;
327
328 if (li && li->li_bastaddr)
329 add_to_astqueue(li, li->li_bastaddr, li->li_bastparam, 0);
330}
331
332/*
333 * This is the kernel's AST routine.
334 * All lock, unlock & query operations complete here.
335 * The only syncronous ops are those done during device close.
336 */
337static void ast_routine(void *param)
338{
339 struct lock_info *li = param;
340
341 /* Param may be NULL if a persistent lock is unlocked by someone else */
342 if (!li)
343 return;
344
345 /* If this is a succesful conversion then activate the blocking ast
346 * args from the conversion request */
347 if (!test_bit(LI_FLAG_FIRSTLOCK, &li->li_flags) &&
348 li->li_lksb.sb_status == 0) {
349
350 li->li_bastparam = li->li_pend_bastparam;
351 li->li_bastaddr = li->li_pend_bastaddr;
352 li->li_pend_bastaddr = NULL;
353 }
354
355 /* If it's an async request then post data to the user's AST queue. */
356 if (li->li_castaddr) {
357 int lvb_updated = 0;
358
359 /* See if the lvb has been updated */
360 if (dlm_lvb_operations[li->li_grmode+1][li->li_rqmode+1] == 1)
361 lvb_updated = 1;
362
363 if (li->li_lksb.sb_status == 0)
364 li->li_grmode = li->li_rqmode;
365
366 /* Only queue AST if the device is still open */
367 if (test_bit(1, &li->li_file->fi_flags))
368 add_to_astqueue(li, li->li_castaddr, li->li_castparam,
369 lvb_updated);
370
371 /* If it's a new lock operation that failed, then
372 * remove it from the owner queue and free the
373 * lock_info.
374 */
375 if (test_and_clear_bit(LI_FLAG_FIRSTLOCK, &li->li_flags) &&
376 li->li_lksb.sb_status != 0) {
377
378 /* Wait till dlm_lock() has finished */
379 down(&li->li_firstlock);
380 up(&li->li_firstlock);
381
382 spin_lock(&li->li_file->fi_li_lock);
383 list_del(&li->li_ownerqueue);
384 clear_bit(LI_FLAG_ONLIST, &li->li_flags);
385 spin_unlock(&li->li_file->fi_li_lock);
386 release_lockinfo(li);
387 return;
388 }
389 /* Free unlocks & queries */
390 if (li->li_lksb.sb_status == -DLM_EUNLOCK ||
391 li->li_cmd == DLM_USER_QUERY) {
392 release_lockinfo(li);
393 }
394 } else {
395 /* Synchronous request, just wake up the caller */
396 set_bit(LI_FLAG_COMPLETE, &li->li_flags);
397 wake_up_interruptible(&li->li_waitq);
398 }
399}
400
401/*
402 * Wait for the lock op to complete and return the status.
403 */
404static int wait_for_ast(struct lock_info *li)
405{
406 /* Wait for the AST routine to complete */
407 set_task_state(current, TASK_INTERRUPTIBLE);
408 while (!test_bit(LI_FLAG_COMPLETE, &li->li_flags))
409 schedule();
410
411 set_task_state(current, TASK_RUNNING);
412
413 return li->li_lksb.sb_status;
414}
415
416
417/* Open on control device */
418static int dlm_ctl_open(struct inode *inode, struct file *file)
419{
420 file->private_data = NULL;
421 return 0;
422}
423
424/* Close on control device */
425static int dlm_ctl_close(struct inode *inode, struct file *file)
426{
427 return 0;
428}
429
430/* Open on lockspace device */
431static int dlm_open(struct inode *inode, struct file *file)
432{
433 struct file_info *f;
434 struct user_ls *lsinfo;
435
436 lsinfo = find_lockspace(iminor(inode));
437 if (!lsinfo)
438 return -ENOENT;
439
440 f = kzalloc(sizeof(struct file_info), GFP_KERNEL);
441 if (!f)
442 return -ENOMEM;
443
444 atomic_inc(&lsinfo->ls_refcnt);
445 INIT_LIST_HEAD(&f->fi_li_list);
446 INIT_LIST_HEAD(&f->fi_ast_list);
447 spin_lock_init(&f->fi_li_lock);
448 spin_lock_init(&f->fi_ast_lock);
449 init_waitqueue_head(&f->fi_wait);
450 f->fi_ls = lsinfo;
451 f->fi_flags = 0;
452 get_file_info(f);
453 set_bit(1, &f->fi_flags);
454
455 file->private_data = f;
456
457 return 0;
458}
459
460/* Check the user's version matches ours */
461static int check_version(struct dlm_write_request *req)
462{
463 if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
464 (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
465 req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
466
467 printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
468 "user (%d.%d.%d) kernel (%d.%d.%d)\n",
469 current->comm,
470 current->pid,
471 req->version[0],
472 req->version[1],
473 req->version[2],
474 DLM_DEVICE_VERSION_MAJOR,
475 DLM_DEVICE_VERSION_MINOR,
476 DLM_DEVICE_VERSION_PATCH);
477 return -EINVAL;
478 }
479 return 0;
480}
481
482/* Close on lockspace device */
483static int dlm_close(struct inode *inode, struct file *file)
484{
485 struct file_info *f = file->private_data;
486 struct lock_info li;
487 struct lock_info *old_li, *safe;
488 sigset_t tmpsig;
489 sigset_t allsigs;
490 struct user_ls *lsinfo;
491 DECLARE_WAITQUEUE(wq, current);
492
493 lsinfo = find_lockspace(iminor(inode));
494 if (!lsinfo)
495 return -ENOENT;
496
497 /* Mark this closed so that ASTs will not be delivered any more */
498 clear_bit(1, &f->fi_flags);
499
500 /* Block signals while we are doing this */
501 sigfillset(&allsigs);
502 sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
503
504 /* We use our own lock_info struct here, so that any
505 * outstanding "real" ASTs will be delivered with the
506 * corresponding "real" params, thus freeing the lock_info
507 * that belongs the lock. This catches the corner case where
508 * a lock is BUSY when we try to unlock it here
509 */
510 memset(&li, 0, sizeof(li));
511 clear_bit(LI_FLAG_COMPLETE, &li.li_flags);
512 init_waitqueue_head(&li.li_waitq);
513 add_wait_queue(&li.li_waitq, &wq);
514
515 /*
516 * Free any outstanding locks, they are on the
517 * list in LIFO order so there should be no problems
518 * about unlocking parents before children.
519 */
520 list_for_each_entry_safe(old_li, safe, &f->fi_li_list, li_ownerqueue) {
521 int status;
522 int flags = 0;
523
524 /* Don't unlock persistent locks, just mark them orphaned */
525 if (test_bit(LI_FLAG_PERSISTENT, &old_li->li_flags)) {
526 list_del(&old_li->li_ownerqueue);
527
528 /* Update master copy */
529 /* TODO: Check locking core updates the local and
530 remote ORPHAN flags */
531 li.li_lksb.sb_lkid = old_li->li_lksb.sb_lkid;
532 status = dlm_lock(f->fi_ls->ls_lockspace,
533 old_li->li_grmode, &li.li_lksb,
534 DLM_LKF_CONVERT|DLM_LKF_ORPHAN,
535 NULL, 0, 0, ast_routine, NULL, NULL);
536 if (status != 0)
537 printk("dlm: Error orphaning lock %x: %d\n",
538 old_li->li_lksb.sb_lkid, status);
539
540 /* But tidy our references in it */
541 release_lockinfo(old_li);
542 continue;
543 }
544
545 clear_bit(LI_FLAG_COMPLETE, &li.li_flags);
546
547 flags = DLM_LKF_FORCEUNLOCK;
548 if (old_li->li_grmode >= DLM_LOCK_PW)
549 flags |= DLM_LKF_IVVALBLK;
550
551 status = dlm_unlock(f->fi_ls->ls_lockspace,
552 old_li->li_lksb.sb_lkid, flags,
553 &li.li_lksb, &li);
554
555 /* Must wait for it to complete as the next lock could be its
556 * parent */
557 if (status == 0)
558 wait_for_ast(&li);
559
560 /* Unlock suceeded, free the lock_info struct. */
561 if (status == 0)
562 release_lockinfo(old_li);
563 }
564
565 remove_wait_queue(&li.li_waitq, &wq);
566
567 /*
568 * If this is the last reference to the lockspace
569 * then free the struct. If it's an AUTOFREE lockspace
570 * then free the whole thing.
571 */
572 mutex_lock(&user_ls_lock);
573 if (atomic_dec_and_test(&lsinfo->ls_refcnt)) {
574
575 if (lsinfo->ls_lockspace) {
576 if (test_bit(LS_FLAG_AUTOFREE, &lsinfo->ls_flags)) {
577 unregister_lockspace(lsinfo, 1);
578 }
579 } else {
580 kfree(lsinfo->ls_miscinfo.name);
581 kfree(lsinfo);
582 }
583 }
584 mutex_unlock(&user_ls_lock);
585 put_file_info(f);
586
587 /* Restore signals */
588 sigprocmask(SIG_SETMASK, &tmpsig, NULL);
589 recalc_sigpending();
590
591 return 0;
592}
593
594static int do_user_create_lockspace(struct file_info *fi, uint8_t cmd,
595 struct dlm_lspace_params *kparams)
596{
597 int status;
598 struct user_ls *lsinfo;
599
600 if (!capable(CAP_SYS_ADMIN))
601 return -EPERM;
602
603 status = register_lockspace(kparams->name, &lsinfo, kparams->flags);
604
605 /* If it succeeded then return the minor number */
606 if (status == 0)
607 status = lsinfo->ls_miscinfo.minor;
608
609 return status;
610}
611
612static int do_user_remove_lockspace(struct file_info *fi, uint8_t cmd,
613 struct dlm_lspace_params *kparams)
614{
615 int status;
616 int force = 1;
617 struct user_ls *lsinfo;
618
619 if (!capable(CAP_SYS_ADMIN))
620 return -EPERM;
621
622 mutex_lock(&user_ls_lock);
623 lsinfo = __find_lockspace(kparams->minor);
624 if (!lsinfo) {
625 mutex_unlock(&user_ls_lock);
626 return -EINVAL;
627 }
628
629 if (kparams->flags & DLM_USER_LSFLG_FORCEFREE)
630 force = 2;
631
632 status = unregister_lockspace(lsinfo, force);
633 mutex_unlock(&user_ls_lock);
634
635 return status;
636}
637
638/* Read call, might block if no ASTs are waiting.
639 * It will only ever return one message at a time, regardless
640 * of how many are pending.
641 */
642static ssize_t dlm_read(struct file *file, char __user *buffer, size_t count,
643 loff_t *ppos)
644{
645 struct file_info *fi = file->private_data;
646 struct ast_info *ast;
647 int data_size;
648 int offset;
649 DECLARE_WAITQUEUE(wait, current);
650
651 if (count < sizeof(struct dlm_lock_result))
652 return -EINVAL;
653
654 spin_lock(&fi->fi_ast_lock);
655 if (list_empty(&fi->fi_ast_list)) {
656
657 /* No waiting ASTs.
658 * Return EOF if the lockspace been deleted.
659 */
660 if (test_bit(LS_FLAG_DELETED, &fi->fi_ls->ls_flags))
661 return 0;
662
663 if (file->f_flags & O_NONBLOCK) {
664 spin_unlock(&fi->fi_ast_lock);
665 return -EAGAIN;
666 }
667
668 add_wait_queue(&fi->fi_wait, &wait);
669
670 repeat:
671 set_current_state(TASK_INTERRUPTIBLE);
672 if (list_empty(&fi->fi_ast_list) &&
673 !signal_pending(current)) {
674
675 spin_unlock(&fi->fi_ast_lock);
676 schedule();
677 spin_lock(&fi->fi_ast_lock);
678 goto repeat;
679 }
680
681 current->state = TASK_RUNNING;
682 remove_wait_queue(&fi->fi_wait, &wait);
683
684 if (signal_pending(current)) {
685 spin_unlock(&fi->fi_ast_lock);
686 return -ERESTARTSYS;
687 }
688 }
689
690 ast = list_entry(fi->fi_ast_list.next, struct ast_info, list);
691 list_del(&ast->list);
692 spin_unlock(&fi->fi_ast_lock);
693
694 /* Work out the size of the returned data */
695 data_size = sizeof(struct dlm_lock_result);
696 if (ast->lvb_updated && ast->result.lksb.sb_lvbptr)
697 data_size += DLM_USER_LVB_LEN;
698
699 offset = sizeof(struct dlm_lock_result);
700
701 /* Room for the extended data ? */
702 if (count >= data_size) {
703
704 if (ast->lvb_updated && ast->result.lksb.sb_lvbptr) {
705 if (copy_to_user(buffer+offset,
706 ast->result.lksb.sb_lvbptr,
707 DLM_USER_LVB_LEN))
708 return -EFAULT;
709 ast->result.lvb_offset = offset;
710 offset += DLM_USER_LVB_LEN;
711 }
712 }
713
714 ast->result.length = data_size;
715 /* Copy the header now it has all the offsets in it */
716 if (copy_to_user(buffer, &ast->result, sizeof(struct dlm_lock_result)))
717 offset = -EFAULT;
718
719 /* If we only returned a header and there's more to come then put it
720 back on the list */
721 if (count < data_size) {
722 spin_lock(&fi->fi_ast_lock);
723 list_add(&ast->list, &fi->fi_ast_list);
724 spin_unlock(&fi->fi_ast_lock);
725 } else
726 kfree(ast);
727 return offset;
728}
729
730static unsigned int dlm_poll(struct file *file, poll_table *wait)
731{
732 struct file_info *fi = file->private_data;
733
734 poll_wait(file, &fi->fi_wait, wait);
735
736 spin_lock(&fi->fi_ast_lock);
737 if (!list_empty(&fi->fi_ast_list)) {
738 spin_unlock(&fi->fi_ast_lock);
739 return POLLIN | POLLRDNORM;
740 }
741
742 spin_unlock(&fi->fi_ast_lock);
743 return 0;
744}
745
746static struct lock_info *allocate_lockinfo(struct file_info *fi, uint8_t cmd,
747 struct dlm_lock_params *kparams)
748{
749 struct lock_info *li;
750
751 if (!try_module_get(THIS_MODULE))
752 return NULL;
753
754 li = kzalloc(sizeof(struct lock_info), GFP_KERNEL);
755 if (li) {
756 li->li_magic = LOCKINFO_MAGIC;
757 li->li_file = fi;
758 li->li_cmd = cmd;
759 li->li_flags = 0;
760 li->li_grmode = -1;
761 li->li_rqmode = -1;
762 li->li_pend_bastparam = NULL;
763 li->li_pend_bastaddr = NULL;
764 li->li_castaddr = NULL;
765 li->li_castparam = NULL;
766 li->li_lksb.sb_lvbptr = NULL;
767 li->li_bastaddr = kparams->bastaddr;
768 li->li_bastparam = kparams->bastparam;
769
770 get_file_info(fi);
771 }
772 return li;
773}
774
775static int do_user_lock(struct file_info *fi, uint8_t cmd,
776 struct dlm_lock_params *kparams)
777{
778 struct lock_info *li;
779 int status;
780
781 /*
782 * Validate things that we need to have correct.
783 */
784 if (!kparams->castaddr)
785 return -EINVAL;
786
787 if (!kparams->lksb)
788 return -EINVAL;
789
790 /* Persistent child locks are not available yet */
791 if ((kparams->flags & DLM_LKF_PERSISTENT) && kparams->parent)
792 return -EINVAL;
793
794 /* For conversions, there should already be a lockinfo struct,
795 unless we are adopting an orphaned persistent lock */
796 if (kparams->flags & DLM_LKF_CONVERT) {
797
798 li = get_lockinfo(kparams->lkid);
799
800 /* If this is a persistent lock we will have to create a
801 lockinfo again */
802 if (!li && (kparams->flags & DLM_LKF_PERSISTENT)) {
803 li = allocate_lockinfo(fi, cmd, kparams);
804 if (!li)
805 return -ENOMEM;
806
807 li->li_lksb.sb_lkid = kparams->lkid;
808 li->li_castaddr = kparams->castaddr;
809 li->li_castparam = kparams->castparam;
810
811 /* OK, this isn;t exactly a FIRSTLOCK but it is the
812 first time we've used this lockinfo, and if things
813 fail we want rid of it */
814 init_MUTEX_LOCKED(&li->li_firstlock);
815 set_bit(LI_FLAG_FIRSTLOCK, &li->li_flags);
816 add_lockinfo(li);
817
818 /* TODO: do a query to get the current state ?? */
819 }
820 if (!li)
821 return -EINVAL;
822
823 if (li->li_magic != LOCKINFO_MAGIC)
824 return -EINVAL;
825
826 /* For conversions don't overwrite the current blocking AST
827 info so that:
828 a) if a blocking AST fires before the conversion is queued
829 it runs the current handler
830 b) if the conversion is cancelled, the original blocking AST
831 declaration is active
832 The pend_ info is made active when the conversion
833 completes.
834 */
835 li->li_pend_bastaddr = kparams->bastaddr;
836 li->li_pend_bastparam = kparams->bastparam;
837 } else {
838 li = allocate_lockinfo(fi, cmd, kparams);
839 if (!li)
840 return -ENOMEM;
841
842 /* semaphore to allow us to complete our work before
843 the AST routine runs. In fact we only need (and use) this
844 when the initial lock fails */
845 init_MUTEX_LOCKED(&li->li_firstlock);
846 set_bit(LI_FLAG_FIRSTLOCK, &li->li_flags);
847 }
848
849 li->li_user_lksb = kparams->lksb;
850 li->li_castaddr = kparams->castaddr;
851 li->li_castparam = kparams->castparam;
852 li->li_lksb.sb_lkid = kparams->lkid;
853 li->li_rqmode = kparams->mode;
854 if (kparams->flags & DLM_LKF_PERSISTENT)
855 set_bit(LI_FLAG_PERSISTENT, &li->li_flags);
856
857 /* Copy in the value block */
858 if (kparams->flags & DLM_LKF_VALBLK) {
859 if (!li->li_lksb.sb_lvbptr) {
860 li->li_lksb.sb_lvbptr = kmalloc(DLM_USER_LVB_LEN,
861 GFP_KERNEL);
862 if (!li->li_lksb.sb_lvbptr) {
863 status = -ENOMEM;
864 goto out_err;
865 }
866 }
867
868 memcpy(li->li_lksb.sb_lvbptr, kparams->lvb, DLM_USER_LVB_LEN);
869 }
870
871 /* Lock it ... */
872 status = dlm_lock(fi->fi_ls->ls_lockspace,
873 kparams->mode, &li->li_lksb,
874 kparams->flags,
875 kparams->name, kparams->namelen,
876 kparams->parent,
877 ast_routine,
878 li,
879 (li->li_pend_bastaddr || li->li_bastaddr) ?
880 bast_routine : NULL);
881 if (status)
882 goto out_err;
883
884 /* If it succeeded (this far) with a new lock then keep track of
885 it on the file's lockinfo list */
886 if (!status && test_bit(LI_FLAG_FIRSTLOCK, &li->li_flags)) {
887
888 spin_lock(&fi->fi_li_lock);
889 list_add(&li->li_ownerqueue, &fi->fi_li_list);
890 set_bit(LI_FLAG_ONLIST, &li->li_flags);
891 spin_unlock(&fi->fi_li_lock);
892 if (add_lockinfo(li))
893 printk(KERN_WARNING "Add lockinfo failed\n");
894
895 up(&li->li_firstlock);
896 }
897
898 /* Return the lockid as the user needs it /now/ */
899 return li->li_lksb.sb_lkid;
900
901 out_err:
902 if (test_bit(LI_FLAG_FIRSTLOCK, &li->li_flags))
903 release_lockinfo(li);
904 return status;
905
906}
907
908static int do_user_unlock(struct file_info *fi, uint8_t cmd,
909 struct dlm_lock_params *kparams)
910{
911 struct lock_info *li;
912 int status;
913 int convert_cancel = 0;
914
915 li = get_lockinfo(kparams->lkid);
916 if (!li) {
917 li = allocate_lockinfo(fi, cmd, kparams);
918 if (!li)
919 return -ENOMEM;
920 spin_lock(&fi->fi_li_lock);
921 list_add(&li->li_ownerqueue, &fi->fi_li_list);
922 set_bit(LI_FLAG_ONLIST, &li->li_flags);
923 spin_unlock(&fi->fi_li_lock);
924 }
925
926 if (li->li_magic != LOCKINFO_MAGIC)
927 return -EINVAL;
928
929 li->li_user_lksb = kparams->lksb;
930 li->li_castparam = kparams->castparam;
931 li->li_cmd = cmd;
932
933 /* Cancelling a conversion doesn't remove the lock...*/
934 if (kparams->flags & DLM_LKF_CANCEL && li->li_grmode != -1)
935 convert_cancel = 1;
936
937 /* Wait until dlm_lock() has completed */
938 if (!test_bit(LI_FLAG_ONLIST, &li->li_flags)) {
939 down(&li->li_firstlock);
940 up(&li->li_firstlock);
941 }
942
943 /* dlm_unlock() passes a 0 for castaddr which means don't overwrite
944 the existing li_castaddr as that's the completion routine for
945 unlocks. dlm_unlock_wait() specifies a new AST routine to be
946 executed when the unlock completes. */
947 if (kparams->castaddr)
948 li->li_castaddr = kparams->castaddr;
949
950 /* Use existing lksb & astparams */
951 status = dlm_unlock(fi->fi_ls->ls_lockspace,
952 kparams->lkid,
953 kparams->flags, &li->li_lksb, li);
954
955 if (!status && !convert_cancel) {
956 spin_lock(&fi->fi_li_lock);
957 list_del(&li->li_ownerqueue);
958 clear_bit(LI_FLAG_ONLIST, &li->li_flags);
959 spin_unlock(&fi->fi_li_lock);
960 }
961
962 return status;
963}
964
965/* Write call, submit a locking request */
966static ssize_t dlm_write(struct file *file, const char __user *buffer,
967 size_t count, loff_t *ppos)
968{
969 struct file_info *fi = file->private_data;
970 struct dlm_write_request *kparams;
971 sigset_t tmpsig;
972 sigset_t allsigs;
973 int status;
974
975 /* -1 because lock name is optional */
976 if (count < sizeof(struct dlm_write_request)-1)
977 return -EINVAL;
978
979 /* Has the lockspace been deleted */
980 if (fi && test_bit(LS_FLAG_DELETED, &fi->fi_ls->ls_flags))
981 return -ENOENT;
982
983 kparams = kmalloc(count, GFP_KERNEL);
984 if (!kparams)
985 return -ENOMEM;
986
987 status = -EFAULT;
988 /* Get the command info */
989 if (copy_from_user(kparams, buffer, count))
990 goto out_free;
991
992 status = -EBADE;
993 if (check_version(kparams))
994 goto out_free;
995
996 /* Block signals while we are doing this */
997 sigfillset(&allsigs);
998 sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
999
1000 status = -EINVAL;
1001 switch (kparams->cmd)
1002 {
1003 case DLM_USER_LOCK:
1004 if (!fi) goto out_sig;
1005 status = do_user_lock(fi, kparams->cmd, &kparams->i.lock);
1006 break;
1007
1008 case DLM_USER_UNLOCK:
1009 if (!fi) goto out_sig;
1010 status = do_user_unlock(fi, kparams->cmd, &kparams->i.lock);
1011 break;
1012
1013 case DLM_USER_CREATE_LOCKSPACE:
1014 if (fi) goto out_sig;
1015 status = do_user_create_lockspace(fi, kparams->cmd,
1016 &kparams->i.lspace);
1017 break;
1018
1019 case DLM_USER_REMOVE_LOCKSPACE:
1020 if (fi) goto out_sig;
1021 status = do_user_remove_lockspace(fi, kparams->cmd,
1022 &kparams->i.lspace);
1023 break;
1024 default:
1025 printk("Unknown command passed to DLM device : %d\n",
1026 kparams->cmd);
1027 break;
1028 }
1029
1030 out_sig:
1031 /* Restore signals */
1032 sigprocmask(SIG_SETMASK, &tmpsig, NULL);
1033 recalc_sigpending();
1034
1035 out_free:
1036 kfree(kparams);
1037 if (status == 0)
1038 return count;
1039 else
1040 return status;
1041}
1042
1043static struct file_operations _dlm_fops = {
1044 .open = dlm_open,
1045 .release = dlm_close,
1046 .read = dlm_read,
1047 .write = dlm_write,
1048 .poll = dlm_poll,
1049 .owner = THIS_MODULE,
1050};
1051
1052static struct file_operations _dlm_ctl_fops = {
1053 .open = dlm_ctl_open,
1054 .release = dlm_ctl_close,
1055 .write = dlm_write,
1056 .owner = THIS_MODULE,
1057};
1058
1059/*
1060 * Create control device
1061 */
1062static int __init dlm_device_init(void)
1063{
1064 int r;
1065
1066 INIT_LIST_HEAD(&user_ls_list);
1067 mutex_init(&user_ls_lock);
1068 rwlock_init(&lockinfo_lock);
1069
1070 ctl_device.name = "dlm-control";
1071 ctl_device.fops = &_dlm_ctl_fops;
1072 ctl_device.minor = MISC_DYNAMIC_MINOR;
1073
1074 r = misc_register(&ctl_device);
1075 if (r) {
1076 printk(KERN_ERR "dlm: misc_register failed for control dev\n");
1077 return r;
1078 }
1079
1080 return 0;
1081}
1082
1083static void __exit dlm_device_exit(void)
1084{
1085 misc_deregister(&ctl_device);
1086}
1087
1088MODULE_DESCRIPTION("Distributed Lock Manager device interface");
1089MODULE_AUTHOR("Red Hat, Inc.");
1090MODULE_LICENSE("GPL");
1091
1092module_init(dlm_device_init);
1093module_exit(dlm_device_exit);
diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c
new file mode 100644
index 000000000000..46754553fdcc
--- /dev/null
+++ b/fs/dlm/dir.c
@@ -0,0 +1,423 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "dlm_internal.h"
15#include "lockspace.h"
16#include "member.h"
17#include "lowcomms.h"
18#include "rcom.h"
19#include "config.h"
20#include "memory.h"
21#include "recover.h"
22#include "util.h"
23#include "lock.h"
24#include "dir.h"
25
26
27static void put_free_de(struct dlm_ls *ls, struct dlm_direntry *de)
28{
29 spin_lock(&ls->ls_recover_list_lock);
30 list_add(&de->list, &ls->ls_recover_list);
31 spin_unlock(&ls->ls_recover_list_lock);
32}
33
34static struct dlm_direntry *get_free_de(struct dlm_ls *ls, int len)
35{
36 int found = 0;
37 struct dlm_direntry *de;
38
39 spin_lock(&ls->ls_recover_list_lock);
40 list_for_each_entry(de, &ls->ls_recover_list, list) {
41 if (de->length == len) {
42 list_del(&de->list);
43 de->master_nodeid = 0;
44 memset(de->name, 0, len);
45 found = 1;
46 break;
47 }
48 }
49 spin_unlock(&ls->ls_recover_list_lock);
50
51 if (!found)
52 de = allocate_direntry(ls, len);
53 return de;
54}
55
56void dlm_clear_free_entries(struct dlm_ls *ls)
57{
58 struct dlm_direntry *de;
59
60 spin_lock(&ls->ls_recover_list_lock);
61 while (!list_empty(&ls->ls_recover_list)) {
62 de = list_entry(ls->ls_recover_list.next, struct dlm_direntry,
63 list);
64 list_del(&de->list);
65 free_direntry(de);
66 }
67 spin_unlock(&ls->ls_recover_list_lock);
68}
69
70/*
71 * We use the upper 16 bits of the hash value to select the directory node.
72 * Low bits are used for distribution of rsb's among hash buckets on each node.
73 *
74 * To give the exact range wanted (0 to num_nodes-1), we apply a modulus of
75 * num_nodes to the hash value. This value in the desired range is used as an
76 * offset into the sorted list of nodeid's to give the particular nodeid.
77 */
78
79int dlm_hash2nodeid(struct dlm_ls *ls, uint32_t hash)
80{
81 struct list_head *tmp;
82 struct dlm_member *memb = NULL;
83 uint32_t node, n = 0;
84 int nodeid;
85
86 if (ls->ls_num_nodes == 1) {
87 nodeid = dlm_our_nodeid();
88 goto out;
89 }
90
91 if (ls->ls_node_array) {
92 node = (hash >> 16) % ls->ls_total_weight;
93 nodeid = ls->ls_node_array[node];
94 goto out;
95 }
96
97 /* make_member_array() failed to kmalloc ls_node_array... */
98
99 node = (hash >> 16) % ls->ls_num_nodes;
100
101 list_for_each(tmp, &ls->ls_nodes) {
102 if (n++ != node)
103 continue;
104 memb = list_entry(tmp, struct dlm_member, list);
105 break;
106 }
107
108 DLM_ASSERT(memb , printk("num_nodes=%u n=%u node=%u\n",
109 ls->ls_num_nodes, n, node););
110 nodeid = memb->nodeid;
111 out:
112 return nodeid;
113}
114
115int dlm_dir_nodeid(struct dlm_rsb *r)
116{
117 return dlm_hash2nodeid(r->res_ls, r->res_hash);
118}
119
120static inline uint32_t dir_hash(struct dlm_ls *ls, char *name, int len)
121{
122 uint32_t val;
123
124 val = jhash(name, len, 0);
125 val &= (ls->ls_dirtbl_size - 1);
126
127 return val;
128}
129
130static void add_entry_to_hash(struct dlm_ls *ls, struct dlm_direntry *de)
131{
132 uint32_t bucket;
133
134 bucket = dir_hash(ls, de->name, de->length);
135 list_add_tail(&de->list, &ls->ls_dirtbl[bucket].list);
136}
137
138static struct dlm_direntry *search_bucket(struct dlm_ls *ls, char *name,
139 int namelen, uint32_t bucket)
140{
141 struct dlm_direntry *de;
142
143 list_for_each_entry(de, &ls->ls_dirtbl[bucket].list, list) {
144 if (de->length == namelen && !memcmp(name, de->name, namelen))
145 goto out;
146 }
147 de = NULL;
148 out:
149 return de;
150}
151
152void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int namelen)
153{
154 struct dlm_direntry *de;
155 uint32_t bucket;
156
157 bucket = dir_hash(ls, name, namelen);
158
159 write_lock(&ls->ls_dirtbl[bucket].lock);
160
161 de = search_bucket(ls, name, namelen, bucket);
162
163 if (!de) {
164 log_error(ls, "remove fr %u none", nodeid);
165 goto out;
166 }
167
168 if (de->master_nodeid != nodeid) {
169 log_error(ls, "remove fr %u ID %u", nodeid, de->master_nodeid);
170 goto out;
171 }
172
173 list_del(&de->list);
174 free_direntry(de);
175 out:
176 write_unlock(&ls->ls_dirtbl[bucket].lock);
177}
178
179void dlm_dir_clear(struct dlm_ls *ls)
180{
181 struct list_head *head;
182 struct dlm_direntry *de;
183 int i;
184
185 DLM_ASSERT(list_empty(&ls->ls_recover_list), );
186
187 for (i = 0; i < ls->ls_dirtbl_size; i++) {
188 write_lock(&ls->ls_dirtbl[i].lock);
189 head = &ls->ls_dirtbl[i].list;
190 while (!list_empty(head)) {
191 de = list_entry(head->next, struct dlm_direntry, list);
192 list_del(&de->list);
193 put_free_de(ls, de);
194 }
195 write_unlock(&ls->ls_dirtbl[i].lock);
196 }
197}
198
199int dlm_recover_directory(struct dlm_ls *ls)
200{
201 struct dlm_member *memb;
202 struct dlm_direntry *de;
203 char *b, *last_name = NULL;
204 int error = -ENOMEM, last_len, count = 0;
205 uint16_t namelen;
206
207 log_debug(ls, "dlm_recover_directory");
208
209 if (dlm_no_directory(ls))
210 goto out_status;
211
212 dlm_dir_clear(ls);
213
214 last_name = kmalloc(DLM_RESNAME_MAXLEN, GFP_KERNEL);
215 if (!last_name)
216 goto out;
217
218 list_for_each_entry(memb, &ls->ls_nodes, list) {
219 memset(last_name, 0, DLM_RESNAME_MAXLEN);
220 last_len = 0;
221
222 for (;;) {
223 error = dlm_recovery_stopped(ls);
224 if (error)
225 goto out_free;
226
227 error = dlm_rcom_names(ls, memb->nodeid,
228 last_name, last_len);
229 if (error)
230 goto out_free;
231
232 schedule();
233
234 /*
235 * pick namelen/name pairs out of received buffer
236 */
237
238 b = ls->ls_recover_buf + sizeof(struct dlm_rcom);
239
240 for (;;) {
241 memcpy(&namelen, b, sizeof(uint16_t));
242 namelen = be16_to_cpu(namelen);
243 b += sizeof(uint16_t);
244
245 /* namelen of 0xFFFFF marks end of names for
246 this node; namelen of 0 marks end of the
247 buffer */
248
249 if (namelen == 0xFFFF)
250 goto done;
251 if (!namelen)
252 break;
253
254 error = -ENOMEM;
255 de = get_free_de(ls, namelen);
256 if (!de)
257 goto out_free;
258
259 de->master_nodeid = memb->nodeid;
260 de->length = namelen;
261 last_len = namelen;
262 memcpy(de->name, b, namelen);
263 memcpy(last_name, b, namelen);
264 b += namelen;
265
266 add_entry_to_hash(ls, de);
267 count++;
268 }
269 }
270 done:
271 ;
272 }
273
274 out_status:
275 error = 0;
276 dlm_set_recover_status(ls, DLM_RS_DIR);
277 log_debug(ls, "dlm_recover_directory %d entries", count);
278 out_free:
279 kfree(last_name);
280 out:
281 dlm_clear_free_entries(ls);
282 return error;
283}
284
285static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
286 int namelen, int *r_nodeid)
287{
288 struct dlm_direntry *de, *tmp;
289 uint32_t bucket;
290
291 bucket = dir_hash(ls, name, namelen);
292
293 write_lock(&ls->ls_dirtbl[bucket].lock);
294 de = search_bucket(ls, name, namelen, bucket);
295 if (de) {
296 *r_nodeid = de->master_nodeid;
297 write_unlock(&ls->ls_dirtbl[bucket].lock);
298 if (*r_nodeid == nodeid)
299 return -EEXIST;
300 return 0;
301 }
302
303 write_unlock(&ls->ls_dirtbl[bucket].lock);
304
305 de = allocate_direntry(ls, namelen);
306 if (!de)
307 return -ENOMEM;
308
309 de->master_nodeid = nodeid;
310 de->length = namelen;
311 memcpy(de->name, name, namelen);
312
313 write_lock(&ls->ls_dirtbl[bucket].lock);
314 tmp = search_bucket(ls, name, namelen, bucket);
315 if (tmp) {
316 free_direntry(de);
317 de = tmp;
318 } else {
319 list_add_tail(&de->list, &ls->ls_dirtbl[bucket].list);
320 }
321 *r_nodeid = de->master_nodeid;
322 write_unlock(&ls->ls_dirtbl[bucket].lock);
323 return 0;
324}
325
326int dlm_dir_lookup(struct dlm_ls *ls, int nodeid, char *name, int namelen,
327 int *r_nodeid)
328{
329 return get_entry(ls, nodeid, name, namelen, r_nodeid);
330}
331
332/* Copy the names of master rsb's into the buffer provided.
333 Only select names whose dir node is the given nodeid. */
334
335void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen,
336 char *outbuf, int outlen, int nodeid)
337{
338 struct list_head *list;
339 struct dlm_rsb *start_r = NULL, *r = NULL;
340 int offset = 0, start_namelen, error, dir_nodeid;
341 char *start_name;
342 uint16_t be_namelen;
343
344 /*
345 * Find the rsb where we left off (or start again)
346 */
347
348 start_namelen = inlen;
349 start_name = inbuf;
350
351 if (start_namelen > 1) {
352 /*
353 * We could also use a find_rsb_root() function here that
354 * searched the ls_root_list.
355 */
356 error = dlm_find_rsb(ls, start_name, start_namelen, R_MASTER,
357 &start_r);
358 DLM_ASSERT(!error && start_r,
359 printk("error %d\n", error););
360 DLM_ASSERT(!list_empty(&start_r->res_root_list),
361 dlm_print_rsb(start_r););
362 dlm_put_rsb(start_r);
363 }
364
365 /*
366 * Send rsb names for rsb's we're master of and whose directory node
367 * matches the requesting node.
368 */
369
370 down_read(&ls->ls_root_sem);
371 if (start_r)
372 list = start_r->res_root_list.next;
373 else
374 list = ls->ls_root_list.next;
375
376 for (offset = 0; list != &ls->ls_root_list; list = list->next) {
377 r = list_entry(list, struct dlm_rsb, res_root_list);
378 if (r->res_nodeid)
379 continue;
380
381 dir_nodeid = dlm_dir_nodeid(r);
382 if (dir_nodeid != nodeid)
383 continue;
384
385 /*
386 * The block ends when we can't fit the following in the
387 * remaining buffer space:
388 * namelen (uint16_t) +
389 * name (r->res_length) +
390 * end-of-block record 0x0000 (uint16_t)
391 */
392
393 if (offset + sizeof(uint16_t)*2 + r->res_length > outlen) {
394 /* Write end-of-block record */
395 be_namelen = 0;
396 memcpy(outbuf + offset, &be_namelen, sizeof(uint16_t));
397 offset += sizeof(uint16_t);
398 goto out;
399 }
400
401 be_namelen = cpu_to_be16(r->res_length);
402 memcpy(outbuf + offset, &be_namelen, sizeof(uint16_t));
403 offset += sizeof(uint16_t);
404 memcpy(outbuf + offset, r->res_name, r->res_length);
405 offset += r->res_length;
406 }
407
408 /*
409 * If we've reached the end of the list (and there's room) write a
410 * terminating record.
411 */
412
413 if ((list == &ls->ls_root_list) &&
414 (offset + sizeof(uint16_t) <= outlen)) {
415 be_namelen = 0xFFFF;
416 memcpy(outbuf + offset, &be_namelen, sizeof(uint16_t));
417 offset += sizeof(uint16_t);
418 }
419
420 out:
421 up_read(&ls->ls_root_sem);
422}
423
diff --git a/fs/dlm/dir.h b/fs/dlm/dir.h
new file mode 100644
index 000000000000..0b0eb1267b6e
--- /dev/null
+++ b/fs/dlm/dir.h
@@ -0,0 +1,30 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#ifndef __DIR_DOT_H__
15#define __DIR_DOT_H__
16
17
18int dlm_dir_nodeid(struct dlm_rsb *rsb);
19int dlm_hash2nodeid(struct dlm_ls *ls, uint32_t hash);
20void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int len);
21void dlm_dir_clear(struct dlm_ls *ls);
22void dlm_clear_free_entries(struct dlm_ls *ls);
23int dlm_recover_directory(struct dlm_ls *ls);
24int dlm_dir_lookup(struct dlm_ls *ls, int nodeid, char *name, int namelen,
25 int *r_nodeid);
26void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen,
27 char *outbuf, int outlen, int nodeid);
28
29#endif /* __DIR_DOT_H__ */
30
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
new file mode 100644
index 000000000000..c3299020c8f3
--- /dev/null
+++ b/fs/dlm/dlm_internal.h
@@ -0,0 +1,493 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#ifndef __DLM_INTERNAL_DOT_H__
15#define __DLM_INTERNAL_DOT_H__
16
17/*
18 * This is the main header file to be included in each DLM source file.
19 */
20
21#include <linux/module.h>
22#include <linux/slab.h>
23#include <linux/sched.h>
24#include <linux/types.h>
25#include <linux/ctype.h>
26#include <linux/spinlock.h>
27#include <linux/vmalloc.h>
28#include <linux/list.h>
29#include <linux/errno.h>
30#include <linux/random.h>
31#include <linux/delay.h>
32#include <linux/socket.h>
33#include <linux/kthread.h>
34#include <linux/kobject.h>
35#include <linux/kref.h>
36#include <linux/kernel.h>
37#include <linux/jhash.h>
38#include <linux/mutex.h>
39#include <asm/semaphore.h>
40#include <asm/uaccess.h>
41
42#include <linux/dlm.h>
43
44#define DLM_LOCKSPACE_LEN 64
45
46/* Size of the temp buffer midcomms allocates on the stack.
47 We try to make this large enough so most messages fit.
48 FIXME: should sctp make this unnecessary? */
49
50#define DLM_INBUF_LEN 148
51
52struct dlm_ls;
53struct dlm_lkb;
54struct dlm_rsb;
55struct dlm_member;
56struct dlm_lkbtable;
57struct dlm_rsbtable;
58struct dlm_dirtable;
59struct dlm_direntry;
60struct dlm_recover;
61struct dlm_header;
62struct dlm_message;
63struct dlm_rcom;
64struct dlm_mhandle;
65
66#define log_print(fmt, args...) \
67 printk(KERN_ERR "dlm: "fmt"\n" , ##args)
68#define log_error(ls, fmt, args...) \
69 printk(KERN_ERR "dlm: %s: " fmt "\n", (ls)->ls_name , ##args)
70
71#ifdef DLM_LOG_DEBUG
72#define log_debug(ls, fmt, args...) log_error(ls, fmt, ##args)
73#else
74#define log_debug(ls, fmt, args...)
75#endif
76
77#define DLM_ASSERT(x, do) \
78{ \
79 if (!(x)) \
80 { \
81 printk(KERN_ERR "\nDLM: Assertion failed on line %d of file %s\n" \
82 "DLM: assertion: \"%s\"\n" \
83 "DLM: time = %lu\n", \
84 __LINE__, __FILE__, #x, jiffies); \
85 {do} \
86 printk("\n"); \
87 BUG(); \
88 panic("DLM: Record message above and reboot.\n"); \
89 } \
90}
91
92
93struct dlm_direntry {
94 struct list_head list;
95 uint32_t master_nodeid;
96 uint16_t length;
97 char name[1];
98};
99
100struct dlm_dirtable {
101 struct list_head list;
102 rwlock_t lock;
103};
104
105struct dlm_rsbtable {
106 struct list_head list;
107 struct list_head toss;
108 rwlock_t lock;
109};
110
111struct dlm_lkbtable {
112 struct list_head list;
113 rwlock_t lock;
114 uint16_t counter;
115};
116
117/*
118 * Lockspace member (per node in a ls)
119 */
120
121struct dlm_member {
122 struct list_head list;
123 int nodeid;
124 int weight;
125};
126
127/*
128 * Save and manage recovery state for a lockspace.
129 */
130
131struct dlm_recover {
132 struct list_head list;
133 int *nodeids;
134 int node_count;
135 uint64_t seq;
136};
137
138/*
139 * Pass input args to second stage locking function.
140 */
141
142struct dlm_args {
143 uint32_t flags;
144 void *astaddr;
145 long astparam;
146 void *bastaddr;
147 int mode;
148 struct dlm_lksb *lksb;
149};
150
151
152/*
153 * Lock block
154 *
155 * A lock can be one of three types:
156 *
157 * local copy lock is mastered locally
158 * (lkb_nodeid is zero and DLM_LKF_MSTCPY is not set)
159 * process copy lock is mastered on a remote node
160 * (lkb_nodeid is non-zero and DLM_LKF_MSTCPY is not set)
161 * master copy master node's copy of a lock owned by remote node
162 * (lkb_nodeid is non-zero and DLM_LKF_MSTCPY is set)
163 *
164 * lkb_exflags: a copy of the most recent flags arg provided to dlm_lock or
165 * dlm_unlock. The dlm does not modify these or use any private flags in
166 * this field; it only contains DLM_LKF_ flags from dlm.h. These flags
167 * are sent as-is to the remote master when the lock is remote.
168 *
169 * lkb_flags: internal dlm flags (DLM_IFL_ prefix) from dlm_internal.h.
170 * Some internal flags are shared between the master and process nodes;
171 * these shared flags are kept in the lower two bytes. One of these
172 * flags set on the master copy will be propagated to the process copy
173 * and v.v. Other internal flags are private to the master or process
174 * node (e.g. DLM_IFL_MSTCPY). These are kept in the high two bytes.
175 *
176 * lkb_sbflags: status block flags. These flags are copied directly into
177 * the caller's lksb.sb_flags prior to the dlm_lock/dlm_unlock completion
178 * ast. All defined in dlm.h with DLM_SBF_ prefix.
179 *
180 * lkb_status: the lock status indicates which rsb queue the lock is
181 * on, grant, convert, or wait. DLM_LKSTS_ WAITING/GRANTED/CONVERT
182 *
183 * lkb_wait_type: the dlm message type (DLM_MSG_ prefix) for which a
184 * reply is needed. Only set when the lkb is on the lockspace waiters
185 * list awaiting a reply from a remote node.
186 *
187 * lkb_nodeid: when the lkb is a local copy, nodeid is 0; when the lkb
188 * is a master copy, nodeid specifies the remote lock holder, when the
189 * lkb is a process copy, the nodeid specifies the lock master.
190 */
191
192/* lkb_ast_type */
193
194#define AST_COMP 1
195#define AST_BAST 2
196
197/* lkb_status */
198
199#define DLM_LKSTS_WAITING 1
200#define DLM_LKSTS_GRANTED 2
201#define DLM_LKSTS_CONVERT 3
202
203/* lkb_flags */
204
205#define DLM_IFL_MSTCPY 0x00010000
206#define DLM_IFL_RESEND 0x00020000
207
208struct dlm_lkb {
209 struct dlm_rsb *lkb_resource; /* the rsb */
210 struct kref lkb_ref;
211 int lkb_nodeid; /* copied from rsb */
212 int lkb_ownpid; /* pid of lock owner */
213 uint32_t lkb_id; /* our lock ID */
214 uint32_t lkb_remid; /* lock ID on remote partner */
215 uint32_t lkb_exflags; /* external flags from caller */
216 uint32_t lkb_sbflags; /* lksb flags */
217 uint32_t lkb_flags; /* internal flags */
218 uint32_t lkb_lvbseq; /* lvb sequence number */
219
220 int8_t lkb_status; /* granted, waiting, convert */
221 int8_t lkb_rqmode; /* requested lock mode */
222 int8_t lkb_grmode; /* granted lock mode */
223 int8_t lkb_bastmode; /* requested mode */
224 int8_t lkb_highbast; /* highest mode bast sent for */
225
226 int8_t lkb_wait_type; /* type of reply waiting for */
227 int8_t lkb_ast_type; /* type of ast queued for */
228
229 struct list_head lkb_idtbl_list; /* lockspace lkbtbl */
230 struct list_head lkb_statequeue; /* rsb g/c/w list */
231 struct list_head lkb_rsb_lookup; /* waiting for rsb lookup */
232 struct list_head lkb_wait_reply; /* waiting for remote reply */
233 struct list_head lkb_astqueue; /* need ast to be sent */
234
235 char *lkb_lvbptr;
236 struct dlm_lksb *lkb_lksb; /* caller's status block */
237 void *lkb_astaddr; /* caller's ast function */
238 void *lkb_bastaddr; /* caller's bast function */
239 long lkb_astparam; /* caller's ast arg */
240};
241
242
243struct dlm_rsb {
244 struct dlm_ls *res_ls; /* the lockspace */
245 struct kref res_ref;
246 struct mutex res_mutex;
247 unsigned long res_flags;
248 int res_length; /* length of rsb name */
249 int res_nodeid;
250 uint32_t res_lvbseq;
251 uint32_t res_hash;
252 uint32_t res_bucket; /* rsbtbl */
253 unsigned long res_toss_time;
254 uint32_t res_first_lkid;
255 struct list_head res_lookup; /* lkbs waiting on first */
256 struct list_head res_hashchain; /* rsbtbl */
257 struct list_head res_grantqueue;
258 struct list_head res_convertqueue;
259 struct list_head res_waitqueue;
260
261 struct list_head res_root_list; /* used for recovery */
262 struct list_head res_recover_list; /* used for recovery */
263 int res_recover_locks_count;
264
265 char *res_lvbptr;
266 char res_name[1];
267};
268
269/* find_rsb() flags */
270
271#define R_MASTER 1 /* only return rsb if it's a master */
272#define R_CREATE 2 /* create/add rsb if not found */
273
274/* rsb_flags */
275
276enum rsb_flags {
277 RSB_MASTER_UNCERTAIN,
278 RSB_VALNOTVALID,
279 RSB_VALNOTVALID_PREV,
280 RSB_NEW_MASTER,
281 RSB_NEW_MASTER2,
282 RSB_RECOVER_CONVERT,
283};
284
285static inline void rsb_set_flag(struct dlm_rsb *r, enum rsb_flags flag)
286{
287 __set_bit(flag, &r->res_flags);
288}
289
290static inline void rsb_clear_flag(struct dlm_rsb *r, enum rsb_flags flag)
291{
292 __clear_bit(flag, &r->res_flags);
293}
294
295static inline int rsb_flag(struct dlm_rsb *r, enum rsb_flags flag)
296{
297 return test_bit(flag, &r->res_flags);
298}
299
300
301/* dlm_header is first element of all structs sent between nodes */
302
303#define DLM_HEADER_MAJOR 0x00020000
304#define DLM_HEADER_MINOR 0x00000001
305
306#define DLM_MSG 1
307#define DLM_RCOM 2
308
309struct dlm_header {
310 uint32_t h_version;
311 uint32_t h_lockspace;
312 uint32_t h_nodeid; /* nodeid of sender */
313 uint16_t h_length;
314 uint8_t h_cmd; /* DLM_MSG, DLM_RCOM */
315 uint8_t h_pad;
316};
317
318
319#define DLM_MSG_REQUEST 1
320#define DLM_MSG_CONVERT 2
321#define DLM_MSG_UNLOCK 3
322#define DLM_MSG_CANCEL 4
323#define DLM_MSG_REQUEST_REPLY 5
324#define DLM_MSG_CONVERT_REPLY 6
325#define DLM_MSG_UNLOCK_REPLY 7
326#define DLM_MSG_CANCEL_REPLY 8
327#define DLM_MSG_GRANT 9
328#define DLM_MSG_BAST 10
329#define DLM_MSG_LOOKUP 11
330#define DLM_MSG_REMOVE 12
331#define DLM_MSG_LOOKUP_REPLY 13
332
333struct dlm_message {
334 struct dlm_header m_header;
335 uint32_t m_type; /* DLM_MSG_ */
336 uint32_t m_nodeid;
337 uint32_t m_pid;
338 uint32_t m_lkid; /* lkid on sender */
339 uint32_t m_remid; /* lkid on receiver */
340 uint32_t m_parent_lkid;
341 uint32_t m_parent_remid;
342 uint32_t m_exflags;
343 uint32_t m_sbflags;
344 uint32_t m_flags;
345 uint32_t m_lvbseq;
346 uint32_t m_hash;
347 int m_status;
348 int m_grmode;
349 int m_rqmode;
350 int m_bastmode;
351 int m_asts;
352 int m_result; /* 0 or -EXXX */
353 char m_extra[0]; /* name or lvb */
354};
355
356
357#define DLM_RS_NODES 0x00000001
358#define DLM_RS_NODES_ALL 0x00000002
359#define DLM_RS_DIR 0x00000004
360#define DLM_RS_DIR_ALL 0x00000008
361#define DLM_RS_LOCKS 0x00000010
362#define DLM_RS_LOCKS_ALL 0x00000020
363#define DLM_RS_DONE 0x00000040
364#define DLM_RS_DONE_ALL 0x00000080
365
366#define DLM_RCOM_STATUS 1
367#define DLM_RCOM_NAMES 2
368#define DLM_RCOM_LOOKUP 3
369#define DLM_RCOM_LOCK 4
370#define DLM_RCOM_STATUS_REPLY 5
371#define DLM_RCOM_NAMES_REPLY 6
372#define DLM_RCOM_LOOKUP_REPLY 7
373#define DLM_RCOM_LOCK_REPLY 8
374
375struct dlm_rcom {
376 struct dlm_header rc_header;
377 uint32_t rc_type; /* DLM_RCOM_ */
378 int rc_result; /* multi-purpose */
379 uint64_t rc_id; /* match reply with request */
380 char rc_buf[0];
381};
382
383struct rcom_config {
384 uint32_t rf_lvblen;
385 uint32_t rf_lsflags;
386 uint64_t rf_unused;
387};
388
389struct rcom_lock {
390 uint32_t rl_ownpid;
391 uint32_t rl_lkid;
392 uint32_t rl_remid;
393 uint32_t rl_parent_lkid;
394 uint32_t rl_parent_remid;
395 uint32_t rl_exflags;
396 uint32_t rl_flags;
397 uint32_t rl_lvbseq;
398 int rl_result;
399 int8_t rl_rqmode;
400 int8_t rl_grmode;
401 int8_t rl_status;
402 int8_t rl_asts;
403 uint16_t rl_wait_type;
404 uint16_t rl_namelen;
405 char rl_name[DLM_RESNAME_MAXLEN];
406 char rl_lvb[0];
407};
408
409struct dlm_ls {
410 struct list_head ls_list; /* list of lockspaces */
411 uint32_t ls_global_id; /* global unique lockspace ID */
412 uint32_t ls_exflags;
413 int ls_lvblen;
414 int ls_count; /* reference count */
415 unsigned long ls_flags; /* LSFL_ */
416 struct kobject ls_kobj;
417
418 struct dlm_rsbtable *ls_rsbtbl;
419 uint32_t ls_rsbtbl_size;
420
421 struct dlm_lkbtable *ls_lkbtbl;
422 uint32_t ls_lkbtbl_size;
423
424 struct dlm_dirtable *ls_dirtbl;
425 uint32_t ls_dirtbl_size;
426
427 struct mutex ls_waiters_mutex;
428 struct list_head ls_waiters; /* lkbs needing a reply */
429
430 struct list_head ls_nodes; /* current nodes in ls */
431 struct list_head ls_nodes_gone; /* dead node list, recovery */
432 int ls_num_nodes; /* number of nodes in ls */
433 int ls_low_nodeid;
434 int ls_total_weight;
435 int *ls_node_array;
436
437 struct dlm_rsb ls_stub_rsb; /* for returning errors */
438 struct dlm_lkb ls_stub_lkb; /* for returning errors */
439 struct dlm_message ls_stub_ms; /* for faking a reply */
440
441 struct dentry *ls_debug_dentry; /* debugfs */
442
443 wait_queue_head_t ls_uevent_wait; /* user part of join/leave */
444 int ls_uevent_result;
445
446 /* recovery related */
447
448 struct timer_list ls_timer;
449 struct task_struct *ls_recoverd_task;
450 struct mutex ls_recoverd_active;
451 spinlock_t ls_recover_lock;
452 uint32_t ls_recover_status; /* DLM_RS_ */
453 uint64_t ls_recover_seq;
454 struct dlm_recover *ls_recover_args;
455 struct rw_semaphore ls_in_recovery; /* block local requests */
456 struct list_head ls_requestqueue;/* queue remote requests */
457 struct mutex ls_requestqueue_mutex;
458 char *ls_recover_buf;
459 struct list_head ls_recover_list;
460 spinlock_t ls_recover_list_lock;
461 int ls_recover_list_count;
462 wait_queue_head_t ls_wait_general;
463
464 struct list_head ls_root_list; /* root resources */
465 struct rw_semaphore ls_root_sem; /* protect root_list */
466
467 int ls_namelen;
468 char ls_name[1];
469};
470
471#define LSFL_WORK 0
472#define LSFL_RUNNING 1
473#define LSFL_RECOVERY_STOP 2
474#define LSFL_RCOM_READY 3
475#define LSFL_UEVENT_WAIT 4
476
477static inline int dlm_locking_stopped(struct dlm_ls *ls)
478{
479 return !test_bit(LSFL_RUNNING, &ls->ls_flags);
480}
481
482static inline int dlm_recovery_stopped(struct dlm_ls *ls)
483{
484 return test_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
485}
486
487static inline int dlm_no_directory(struct dlm_ls *ls)
488{
489 return (ls->ls_exflags & DLM_LSFL_NODIR) ? 1 : 0;
490}
491
492#endif /* __DLM_INTERNAL_DOT_H__ */
493
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
new file mode 100644
index 000000000000..80487703d582
--- /dev/null
+++ b/fs/dlm/lock.c
@@ -0,0 +1,3525 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
5**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13/* Central locking logic has four stages:
14
15 dlm_lock()
16 dlm_unlock()
17
18 request_lock(ls, lkb)
19 convert_lock(ls, lkb)
20 unlock_lock(ls, lkb)
21 cancel_lock(ls, lkb)
22
23 _request_lock(r, lkb)
24 _convert_lock(r, lkb)
25 _unlock_lock(r, lkb)
26 _cancel_lock(r, lkb)
27
28 do_request(r, lkb)
29 do_convert(r, lkb)
30 do_unlock(r, lkb)
31 do_cancel(r, lkb)
32
33 Stage 1 (lock, unlock) is mainly about checking input args and
34 splitting into one of the four main operations:
35
36 dlm_lock = request_lock
37 dlm_lock+CONVERT = convert_lock
38 dlm_unlock = unlock_lock
39 dlm_unlock+CANCEL = cancel_lock
40
41 Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
42 provided to the next stage.
43
44 Stage 3, _xxxx_lock(), determines if the operation is local or remote.
45 When remote, it calls send_xxxx(), when local it calls do_xxxx().
46
47 Stage 4, do_xxxx(), is the guts of the operation. It manipulates the
48 given rsb and lkb and queues callbacks.
49
50 For remote operations, send_xxxx() results in the corresponding do_xxxx()
51 function being executed on the remote node. The connecting send/receive
52 calls on local (L) and remote (R) nodes:
53
54 L: send_xxxx() -> R: receive_xxxx()
55 R: do_xxxx()
56 L: receive_xxxx_reply() <- R: send_xxxx_reply()
57*/
58
59#include "dlm_internal.h"
60#include "memory.h"
61#include "lowcomms.h"
62#include "requestqueue.h"
63#include "util.h"
64#include "dir.h"
65#include "member.h"
66#include "lockspace.h"
67#include "ast.h"
68#include "lock.h"
69#include "rcom.h"
70#include "recover.h"
71#include "lvb_table.h"
72#include "config.h"
73
74static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
75static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
76static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
77static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
78static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
79static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
80static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
81static int send_remove(struct dlm_rsb *r);
82static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
83static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
84 struct dlm_message *ms);
85static int receive_extralen(struct dlm_message *ms);
86
87/*
88 * Lock compatibilty matrix - thanks Steve
89 * UN = Unlocked state. Not really a state, used as a flag
90 * PD = Padding. Used to make the matrix a nice power of two in size
91 * Other states are the same as the VMS DLM.
92 * Usage: matrix[grmode+1][rqmode+1] (although m[rq+1][gr+1] is the same)
93 */
94
95static const int __dlm_compat_matrix[8][8] = {
96 /* UN NL CR CW PR PW EX PD */
97 {1, 1, 1, 1, 1, 1, 1, 0}, /* UN */
98 {1, 1, 1, 1, 1, 1, 1, 0}, /* NL */
99 {1, 1, 1, 1, 1, 1, 0, 0}, /* CR */
100 {1, 1, 1, 1, 0, 0, 0, 0}, /* CW */
101 {1, 1, 1, 0, 1, 0, 0, 0}, /* PR */
102 {1, 1, 1, 0, 0, 0, 0, 0}, /* PW */
103 {1, 1, 0, 0, 0, 0, 0, 0}, /* EX */
104 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
105};
106
107/*
108 * This defines the direction of transfer of LVB data.
109 * Granted mode is the row; requested mode is the column.
110 * Usage: matrix[grmode+1][rqmode+1]
111 * 1 = LVB is returned to the caller
112 * 0 = LVB is written to the resource
113 * -1 = nothing happens to the LVB
114 */
115
116const int dlm_lvb_operations[8][8] = {
117 /* UN NL CR CW PR PW EX PD*/
118 { -1, 1, 1, 1, 1, 1, 1, -1 }, /* UN */
119 { -1, 1, 1, 1, 1, 1, 1, 0 }, /* NL */
120 { -1, -1, 1, 1, 1, 1, 1, 0 }, /* CR */
121 { -1, -1, -1, 1, 1, 1, 1, 0 }, /* CW */
122 { -1, -1, -1, -1, 1, 1, 1, 0 }, /* PR */
123 { -1, 0, 0, 0, 0, 0, 1, 0 }, /* PW */
124 { -1, 0, 0, 0, 0, 0, 0, 0 }, /* EX */
125 { -1, 0, 0, 0, 0, 0, 0, 0 } /* PD */
126};
127EXPORT_SYMBOL_GPL(dlm_lvb_operations);
128
129#define modes_compat(gr, rq) \
130 __dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
131
132int dlm_modes_compat(int mode1, int mode2)
133{
134 return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
135}
136
137/*
138 * Compatibility matrix for conversions with QUECVT set.
139 * Granted mode is the row; requested mode is the column.
140 * Usage: matrix[grmode+1][rqmode+1]
141 */
142
143static const int __quecvt_compat_matrix[8][8] = {
144 /* UN NL CR CW PR PW EX PD */
145 {0, 0, 0, 0, 0, 0, 0, 0}, /* UN */
146 {0, 0, 1, 1, 1, 1, 1, 0}, /* NL */
147 {0, 0, 0, 1, 1, 1, 1, 0}, /* CR */
148 {0, 0, 0, 0, 1, 1, 1, 0}, /* CW */
149 {0, 0, 0, 1, 0, 1, 1, 0}, /* PR */
150 {0, 0, 0, 0, 0, 0, 1, 0}, /* PW */
151 {0, 0, 0, 0, 0, 0, 0, 0}, /* EX */
152 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
153};
154
155static void dlm_print_lkb(struct dlm_lkb *lkb)
156{
157 printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x\n"
158 " status %d rqmode %d grmode %d wait_type %d ast_type %d\n",
159 lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
160 lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
161 lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_ast_type);
162}
163
164void dlm_print_rsb(struct dlm_rsb *r)
165{
166 printk(KERN_ERR "rsb: nodeid %d flags %lx first %x rlc %d name %s\n",
167 r->res_nodeid, r->res_flags, r->res_first_lkid,
168 r->res_recover_locks_count, r->res_name);
169}
170
171/* Threads cannot use the lockspace while it's being recovered */
172
173static inline void lock_recovery(struct dlm_ls *ls)
174{
175 down_read(&ls->ls_in_recovery);
176}
177
178static inline void unlock_recovery(struct dlm_ls *ls)
179{
180 up_read(&ls->ls_in_recovery);
181}
182
183static inline int lock_recovery_try(struct dlm_ls *ls)
184{
185 return down_read_trylock(&ls->ls_in_recovery);
186}
187
188static inline int can_be_queued(struct dlm_lkb *lkb)
189{
190 return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
191}
192
193static inline int force_blocking_asts(struct dlm_lkb *lkb)
194{
195 return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
196}
197
198static inline int is_demoted(struct dlm_lkb *lkb)
199{
200 return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
201}
202
203static inline int is_remote(struct dlm_rsb *r)
204{
205 DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
206 return !!r->res_nodeid;
207}
208
209static inline int is_process_copy(struct dlm_lkb *lkb)
210{
211 return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
212}
213
214static inline int is_master_copy(struct dlm_lkb *lkb)
215{
216 if (lkb->lkb_flags & DLM_IFL_MSTCPY)
217 DLM_ASSERT(lkb->lkb_nodeid, dlm_print_lkb(lkb););
218 return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
219}
220
221static inline int middle_conversion(struct dlm_lkb *lkb)
222{
223 if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
224 (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
225 return 1;
226 return 0;
227}
228
229static inline int down_conversion(struct dlm_lkb *lkb)
230{
231 return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
232}
233
234static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
235{
236 if (is_master_copy(lkb))
237 return;
238
239 DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
240
241 lkb->lkb_lksb->sb_status = rv;
242 lkb->lkb_lksb->sb_flags = lkb->lkb_sbflags;
243
244 dlm_add_ast(lkb, AST_COMP);
245}
246
247static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
248{
249 if (is_master_copy(lkb))
250 send_bast(r, lkb, rqmode);
251 else {
252 lkb->lkb_bastmode = rqmode;
253 dlm_add_ast(lkb, AST_BAST);
254 }
255}
256
257/*
258 * Basic operations on rsb's and lkb's
259 */
260
261static struct dlm_rsb *create_rsb(struct dlm_ls *ls, char *name, int len)
262{
263 struct dlm_rsb *r;
264
265 r = allocate_rsb(ls, len);
266 if (!r)
267 return NULL;
268
269 r->res_ls = ls;
270 r->res_length = len;
271 memcpy(r->res_name, name, len);
272 mutex_init(&r->res_mutex);
273
274 INIT_LIST_HEAD(&r->res_lookup);
275 INIT_LIST_HEAD(&r->res_grantqueue);
276 INIT_LIST_HEAD(&r->res_convertqueue);
277 INIT_LIST_HEAD(&r->res_waitqueue);
278 INIT_LIST_HEAD(&r->res_root_list);
279 INIT_LIST_HEAD(&r->res_recover_list);
280
281 return r;
282}
283
284static int search_rsb_list(struct list_head *head, char *name, int len,
285 unsigned int flags, struct dlm_rsb **r_ret)
286{
287 struct dlm_rsb *r;
288 int error = 0;
289
290 list_for_each_entry(r, head, res_hashchain) {
291 if (len == r->res_length && !memcmp(name, r->res_name, len))
292 goto found;
293 }
294 return -ENOENT;
295
296 found:
297 if (r->res_nodeid && (flags & R_MASTER))
298 error = -ENOTBLK;
299 *r_ret = r;
300 return error;
301}
302
303static int _search_rsb(struct dlm_ls *ls, char *name, int len, int b,
304 unsigned int flags, struct dlm_rsb **r_ret)
305{
306 struct dlm_rsb *r;
307 int error;
308
309 error = search_rsb_list(&ls->ls_rsbtbl[b].list, name, len, flags, &r);
310 if (!error) {
311 kref_get(&r->res_ref);
312 goto out;
313 }
314 error = search_rsb_list(&ls->ls_rsbtbl[b].toss, name, len, flags, &r);
315 if (error)
316 goto out;
317
318 list_move(&r->res_hashchain, &ls->ls_rsbtbl[b].list);
319
320 if (dlm_no_directory(ls))
321 goto out;
322
323 if (r->res_nodeid == -1) {
324 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
325 r->res_first_lkid = 0;
326 } else if (r->res_nodeid > 0) {
327 rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
328 r->res_first_lkid = 0;
329 } else {
330 DLM_ASSERT(r->res_nodeid == 0, dlm_print_rsb(r););
331 DLM_ASSERT(!rsb_flag(r, RSB_MASTER_UNCERTAIN),);
332 }
333 out:
334 *r_ret = r;
335 return error;
336}
337
338static int search_rsb(struct dlm_ls *ls, char *name, int len, int b,
339 unsigned int flags, struct dlm_rsb **r_ret)
340{
341 int error;
342 write_lock(&ls->ls_rsbtbl[b].lock);
343 error = _search_rsb(ls, name, len, b, flags, r_ret);
344 write_unlock(&ls->ls_rsbtbl[b].lock);
345 return error;
346}
347
348/*
349 * Find rsb in rsbtbl and potentially create/add one
350 *
351 * Delaying the release of rsb's has a similar benefit to applications keeping
352 * NL locks on an rsb, but without the guarantee that the cached master value
353 * will still be valid when the rsb is reused. Apps aren't always smart enough
354 * to keep NL locks on an rsb that they may lock again shortly; this can lead
355 * to excessive master lookups and removals if we don't delay the release.
356 *
357 * Searching for an rsb means looking through both the normal list and toss
358 * list. When found on the toss list the rsb is moved to the normal list with
359 * ref count of 1; when found on normal list the ref count is incremented.
360 */
361
362static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
363 unsigned int flags, struct dlm_rsb **r_ret)
364{
365 struct dlm_rsb *r, *tmp;
366 uint32_t hash, bucket;
367 int error = 0;
368
369 if (dlm_no_directory(ls))
370 flags |= R_CREATE;
371
372 hash = jhash(name, namelen, 0);
373 bucket = hash & (ls->ls_rsbtbl_size - 1);
374
375 error = search_rsb(ls, name, namelen, bucket, flags, &r);
376 if (!error)
377 goto out;
378
379 if (error == -ENOENT && !(flags & R_CREATE))
380 goto out;
381
382 /* the rsb was found but wasn't a master copy */
383 if (error == -ENOTBLK)
384 goto out;
385
386 error = -ENOMEM;
387 r = create_rsb(ls, name, namelen);
388 if (!r)
389 goto out;
390
391 r->res_hash = hash;
392 r->res_bucket = bucket;
393 r->res_nodeid = -1;
394 kref_init(&r->res_ref);
395
396 /* With no directory, the master can be set immediately */
397 if (dlm_no_directory(ls)) {
398 int nodeid = dlm_dir_nodeid(r);
399 if (nodeid == dlm_our_nodeid())
400 nodeid = 0;
401 r->res_nodeid = nodeid;
402 }
403
404 write_lock(&ls->ls_rsbtbl[bucket].lock);
405 error = _search_rsb(ls, name, namelen, bucket, 0, &tmp);
406 if (!error) {
407 write_unlock(&ls->ls_rsbtbl[bucket].lock);
408 free_rsb(r);
409 r = tmp;
410 goto out;
411 }
412 list_add(&r->res_hashchain, &ls->ls_rsbtbl[bucket].list);
413 write_unlock(&ls->ls_rsbtbl[bucket].lock);
414 error = 0;
415 out:
416 *r_ret = r;
417 return error;
418}
419
420int dlm_find_rsb(struct dlm_ls *ls, char *name, int namelen,
421 unsigned int flags, struct dlm_rsb **r_ret)
422{
423 return find_rsb(ls, name, namelen, flags, r_ret);
424}
425
426/* This is only called to add a reference when the code already holds
427 a valid reference to the rsb, so there's no need for locking. */
428
429static inline void hold_rsb(struct dlm_rsb *r)
430{
431 kref_get(&r->res_ref);
432}
433
434void dlm_hold_rsb(struct dlm_rsb *r)
435{
436 hold_rsb(r);
437}
438
439static void toss_rsb(struct kref *kref)
440{
441 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
442 struct dlm_ls *ls = r->res_ls;
443
444 DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
445 kref_init(&r->res_ref);
446 list_move(&r->res_hashchain, &ls->ls_rsbtbl[r->res_bucket].toss);
447 r->res_toss_time = jiffies;
448 if (r->res_lvbptr) {
449 free_lvb(r->res_lvbptr);
450 r->res_lvbptr = NULL;
451 }
452}
453
454/* When all references to the rsb are gone it's transfered to
455 the tossed list for later disposal. */
456
457static void put_rsb(struct dlm_rsb *r)
458{
459 struct dlm_ls *ls = r->res_ls;
460 uint32_t bucket = r->res_bucket;
461
462 write_lock(&ls->ls_rsbtbl[bucket].lock);
463 kref_put(&r->res_ref, toss_rsb);
464 write_unlock(&ls->ls_rsbtbl[bucket].lock);
465}
466
467void dlm_put_rsb(struct dlm_rsb *r)
468{
469 put_rsb(r);
470}
471
472/* See comment for unhold_lkb */
473
474static void unhold_rsb(struct dlm_rsb *r)
475{
476 int rv;
477 rv = kref_put(&r->res_ref, toss_rsb);
478 DLM_ASSERT(!rv, dlm_print_rsb(r););
479}
480
481static void kill_rsb(struct kref *kref)
482{
483 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
484
485 /* All work is done after the return from kref_put() so we
486 can release the write_lock before the remove and free. */
487
488 DLM_ASSERT(list_empty(&r->res_lookup),);
489 DLM_ASSERT(list_empty(&r->res_grantqueue),);
490 DLM_ASSERT(list_empty(&r->res_convertqueue),);
491 DLM_ASSERT(list_empty(&r->res_waitqueue),);
492 DLM_ASSERT(list_empty(&r->res_root_list),);
493 DLM_ASSERT(list_empty(&r->res_recover_list),);
494}
495
496/* Attaching/detaching lkb's from rsb's is for rsb reference counting.
497 The rsb must exist as long as any lkb's for it do. */
498
499static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
500{
501 hold_rsb(r);
502 lkb->lkb_resource = r;
503}
504
505static void detach_lkb(struct dlm_lkb *lkb)
506{
507 if (lkb->lkb_resource) {
508 put_rsb(lkb->lkb_resource);
509 lkb->lkb_resource = NULL;
510 }
511}
512
513static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
514{
515 struct dlm_lkb *lkb, *tmp;
516 uint32_t lkid = 0;
517 uint16_t bucket;
518
519 lkb = allocate_lkb(ls);
520 if (!lkb)
521 return -ENOMEM;
522
523 lkb->lkb_nodeid = -1;
524 lkb->lkb_grmode = DLM_LOCK_IV;
525 kref_init(&lkb->lkb_ref);
526
527 get_random_bytes(&bucket, sizeof(bucket));
528 bucket &= (ls->ls_lkbtbl_size - 1);
529
530 write_lock(&ls->ls_lkbtbl[bucket].lock);
531
532 /* counter can roll over so we must verify lkid is not in use */
533
534 while (lkid == 0) {
535 lkid = bucket | (ls->ls_lkbtbl[bucket].counter++ << 16);
536
537 list_for_each_entry(tmp, &ls->ls_lkbtbl[bucket].list,
538 lkb_idtbl_list) {
539 if (tmp->lkb_id != lkid)
540 continue;
541 lkid = 0;
542 break;
543 }
544 }
545
546 lkb->lkb_id = lkid;
547 list_add(&lkb->lkb_idtbl_list, &ls->ls_lkbtbl[bucket].list);
548 write_unlock(&ls->ls_lkbtbl[bucket].lock);
549
550 *lkb_ret = lkb;
551 return 0;
552}
553
554static struct dlm_lkb *__find_lkb(struct dlm_ls *ls, uint32_t lkid)
555{
556 uint16_t bucket = lkid & 0xFFFF;
557 struct dlm_lkb *lkb;
558
559 list_for_each_entry(lkb, &ls->ls_lkbtbl[bucket].list, lkb_idtbl_list) {
560 if (lkb->lkb_id == lkid)
561 return lkb;
562 }
563 return NULL;
564}
565
566static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
567{
568 struct dlm_lkb *lkb;
569 uint16_t bucket = lkid & 0xFFFF;
570
571 if (bucket >= ls->ls_lkbtbl_size)
572 return -EBADSLT;
573
574 read_lock(&ls->ls_lkbtbl[bucket].lock);
575 lkb = __find_lkb(ls, lkid);
576 if (lkb)
577 kref_get(&lkb->lkb_ref);
578 read_unlock(&ls->ls_lkbtbl[bucket].lock);
579
580 *lkb_ret = lkb;
581 return lkb ? 0 : -ENOENT;
582}
583
584static void kill_lkb(struct kref *kref)
585{
586 struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
587
588 /* All work is done after the return from kref_put() so we
589 can release the write_lock before the detach_lkb */
590
591 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
592}
593
594static int put_lkb(struct dlm_lkb *lkb)
595{
596 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
597 uint16_t bucket = lkb->lkb_id & 0xFFFF;
598
599 write_lock(&ls->ls_lkbtbl[bucket].lock);
600 if (kref_put(&lkb->lkb_ref, kill_lkb)) {
601 list_del(&lkb->lkb_idtbl_list);
602 write_unlock(&ls->ls_lkbtbl[bucket].lock);
603
604 detach_lkb(lkb);
605
606 /* for local/process lkbs, lvbptr points to caller's lksb */
607 if (lkb->lkb_lvbptr && is_master_copy(lkb))
608 free_lvb(lkb->lkb_lvbptr);
609 free_lkb(lkb);
610 return 1;
611 } else {
612 write_unlock(&ls->ls_lkbtbl[bucket].lock);
613 return 0;
614 }
615}
616
617int dlm_put_lkb(struct dlm_lkb *lkb)
618{
619 return put_lkb(lkb);
620}
621
622/* This is only called to add a reference when the code already holds
623 a valid reference to the lkb, so there's no need for locking. */
624
625static inline void hold_lkb(struct dlm_lkb *lkb)
626{
627 kref_get(&lkb->lkb_ref);
628}
629
630/* This is called when we need to remove a reference and are certain
631 it's not the last ref. e.g. del_lkb is always called between a
632 find_lkb/put_lkb and is always the inverse of a previous add_lkb.
633 put_lkb would work fine, but would involve unnecessary locking */
634
635static inline void unhold_lkb(struct dlm_lkb *lkb)
636{
637 int rv;
638 rv = kref_put(&lkb->lkb_ref, kill_lkb);
639 DLM_ASSERT(!rv, dlm_print_lkb(lkb););
640}
641
642static void lkb_add_ordered(struct list_head *new, struct list_head *head,
643 int mode)
644{
645 struct dlm_lkb *lkb = NULL;
646
647 list_for_each_entry(lkb, head, lkb_statequeue)
648 if (lkb->lkb_rqmode < mode)
649 break;
650
651 if (!lkb)
652 list_add_tail(new, head);
653 else
654 __list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
655}
656
657/* add/remove lkb to rsb's grant/convert/wait queue */
658
659static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
660{
661 kref_get(&lkb->lkb_ref);
662
663 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
664
665 lkb->lkb_status = status;
666
667 switch (status) {
668 case DLM_LKSTS_WAITING:
669 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
670 list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
671 else
672 list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
673 break;
674 case DLM_LKSTS_GRANTED:
675 /* convention says granted locks kept in order of grmode */
676 lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
677 lkb->lkb_grmode);
678 break;
679 case DLM_LKSTS_CONVERT:
680 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
681 list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
682 else
683 list_add_tail(&lkb->lkb_statequeue,
684 &r->res_convertqueue);
685 break;
686 default:
687 DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
688 }
689}
690
691static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
692{
693 lkb->lkb_status = 0;
694 list_del(&lkb->lkb_statequeue);
695 unhold_lkb(lkb);
696}
697
698static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
699{
700 hold_lkb(lkb);
701 del_lkb(r, lkb);
702 add_lkb(r, lkb, sts);
703 unhold_lkb(lkb);
704}
705
706/* add/remove lkb from global waiters list of lkb's waiting for
707 a reply from a remote node */
708
709static void add_to_waiters(struct dlm_lkb *lkb, int mstype)
710{
711 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
712
713 mutex_lock(&ls->ls_waiters_mutex);
714 if (lkb->lkb_wait_type) {
715 log_print("add_to_waiters error %d", lkb->lkb_wait_type);
716 goto out;
717 }
718 lkb->lkb_wait_type = mstype;
719 kref_get(&lkb->lkb_ref);
720 list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
721 out:
722 mutex_unlock(&ls->ls_waiters_mutex);
723}
724
725static int _remove_from_waiters(struct dlm_lkb *lkb)
726{
727 int error = 0;
728
729 if (!lkb->lkb_wait_type) {
730 log_print("remove_from_waiters error");
731 error = -EINVAL;
732 goto out;
733 }
734 lkb->lkb_wait_type = 0;
735 list_del(&lkb->lkb_wait_reply);
736 unhold_lkb(lkb);
737 out:
738 return error;
739}
740
741static int remove_from_waiters(struct dlm_lkb *lkb)
742{
743 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
744 int error;
745
746 mutex_lock(&ls->ls_waiters_mutex);
747 error = _remove_from_waiters(lkb);
748 mutex_unlock(&ls->ls_waiters_mutex);
749 return error;
750}
751
752static void dir_remove(struct dlm_rsb *r)
753{
754 int to_nodeid;
755
756 if (dlm_no_directory(r->res_ls))
757 return;
758
759 to_nodeid = dlm_dir_nodeid(r);
760 if (to_nodeid != dlm_our_nodeid())
761 send_remove(r);
762 else
763 dlm_dir_remove_entry(r->res_ls, to_nodeid,
764 r->res_name, r->res_length);
765}
766
767/* FIXME: shouldn't this be able to exit as soon as one non-due rsb is
768 found since they are in order of newest to oldest? */
769
770static int shrink_bucket(struct dlm_ls *ls, int b)
771{
772 struct dlm_rsb *r;
773 int count = 0, found;
774
775 for (;;) {
776 found = 0;
777 write_lock(&ls->ls_rsbtbl[b].lock);
778 list_for_each_entry_reverse(r, &ls->ls_rsbtbl[b].toss,
779 res_hashchain) {
780 if (!time_after_eq(jiffies, r->res_toss_time +
781 dlm_config.toss_secs * HZ))
782 continue;
783 found = 1;
784 break;
785 }
786
787 if (!found) {
788 write_unlock(&ls->ls_rsbtbl[b].lock);
789 break;
790 }
791
792 if (kref_put(&r->res_ref, kill_rsb)) {
793 list_del(&r->res_hashchain);
794 write_unlock(&ls->ls_rsbtbl[b].lock);
795
796 if (is_master(r))
797 dir_remove(r);
798 free_rsb(r);
799 count++;
800 } else {
801 write_unlock(&ls->ls_rsbtbl[b].lock);
802 log_error(ls, "tossed rsb in use %s", r->res_name);
803 }
804 }
805
806 return count;
807}
808
809void dlm_scan_rsbs(struct dlm_ls *ls)
810{
811 int i;
812
813 if (dlm_locking_stopped(ls))
814 return;
815
816 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
817 shrink_bucket(ls, i);
818 cond_resched();
819 }
820}
821
822/* lkb is master or local copy */
823
824static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
825{
826 int b, len = r->res_ls->ls_lvblen;
827
828 /* b=1 lvb returned to caller
829 b=0 lvb written to rsb or invalidated
830 b=-1 do nothing */
831
832 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
833
834 if (b == 1) {
835 if (!lkb->lkb_lvbptr)
836 return;
837
838 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
839 return;
840
841 if (!r->res_lvbptr)
842 return;
843
844 memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
845 lkb->lkb_lvbseq = r->res_lvbseq;
846
847 } else if (b == 0) {
848 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
849 rsb_set_flag(r, RSB_VALNOTVALID);
850 return;
851 }
852
853 if (!lkb->lkb_lvbptr)
854 return;
855
856 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
857 return;
858
859 if (!r->res_lvbptr)
860 r->res_lvbptr = allocate_lvb(r->res_ls);
861
862 if (!r->res_lvbptr)
863 return;
864
865 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
866 r->res_lvbseq++;
867 lkb->lkb_lvbseq = r->res_lvbseq;
868 rsb_clear_flag(r, RSB_VALNOTVALID);
869 }
870
871 if (rsb_flag(r, RSB_VALNOTVALID))
872 lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
873}
874
875static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
876{
877 if (lkb->lkb_grmode < DLM_LOCK_PW)
878 return;
879
880 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
881 rsb_set_flag(r, RSB_VALNOTVALID);
882 return;
883 }
884
885 if (!lkb->lkb_lvbptr)
886 return;
887
888 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
889 return;
890
891 if (!r->res_lvbptr)
892 r->res_lvbptr = allocate_lvb(r->res_ls);
893
894 if (!r->res_lvbptr)
895 return;
896
897 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
898 r->res_lvbseq++;
899 rsb_clear_flag(r, RSB_VALNOTVALID);
900}
901
902/* lkb is process copy (pc) */
903
904static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
905 struct dlm_message *ms)
906{
907 int b;
908
909 if (!lkb->lkb_lvbptr)
910 return;
911
912 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
913 return;
914
915 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
916 if (b == 1) {
917 int len = receive_extralen(ms);
918 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
919 lkb->lkb_lvbseq = ms->m_lvbseq;
920 }
921}
922
923/* Manipulate lkb's on rsb's convert/granted/waiting queues
924 remove_lock -- used for unlock, removes lkb from granted
925 revert_lock -- used for cancel, moves lkb from convert to granted
926 grant_lock -- used for request and convert, adds lkb to granted or
927 moves lkb from convert or waiting to granted
928
929 Each of these is used for master or local copy lkb's. There is
930 also a _pc() variation used to make the corresponding change on
931 a process copy (pc) lkb. */
932
933static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
934{
935 del_lkb(r, lkb);
936 lkb->lkb_grmode = DLM_LOCK_IV;
937 /* this unhold undoes the original ref from create_lkb()
938 so this leads to the lkb being freed */
939 unhold_lkb(lkb);
940}
941
942static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
943{
944 set_lvb_unlock(r, lkb);
945 _remove_lock(r, lkb);
946}
947
948static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
949{
950 _remove_lock(r, lkb);
951}
952
953static void revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
954{
955 lkb->lkb_rqmode = DLM_LOCK_IV;
956
957 switch (lkb->lkb_status) {
958 case DLM_LKSTS_CONVERT:
959 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
960 break;
961 case DLM_LKSTS_WAITING:
962 del_lkb(r, lkb);
963 lkb->lkb_grmode = DLM_LOCK_IV;
964 /* this unhold undoes the original ref from create_lkb()
965 so this leads to the lkb being freed */
966 unhold_lkb(lkb);
967 break;
968 default:
969 log_print("invalid status for revert %d", lkb->lkb_status);
970 }
971}
972
973static void revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
974{
975 revert_lock(r, lkb);
976}
977
978static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
979{
980 if (lkb->lkb_grmode != lkb->lkb_rqmode) {
981 lkb->lkb_grmode = lkb->lkb_rqmode;
982 if (lkb->lkb_status)
983 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
984 else
985 add_lkb(r, lkb, DLM_LKSTS_GRANTED);
986 }
987
988 lkb->lkb_rqmode = DLM_LOCK_IV;
989}
990
991static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
992{
993 set_lvb_lock(r, lkb);
994 _grant_lock(r, lkb);
995 lkb->lkb_highbast = 0;
996}
997
998static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
999 struct dlm_message *ms)
1000{
1001 set_lvb_lock_pc(r, lkb, ms);
1002 _grant_lock(r, lkb);
1003}
1004
1005/* called by grant_pending_locks() which means an async grant message must
1006 be sent to the requesting node in addition to granting the lock if the
1007 lkb belongs to a remote node. */
1008
1009static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
1010{
1011 grant_lock(r, lkb);
1012 if (is_master_copy(lkb))
1013 send_grant(r, lkb);
1014 else
1015 queue_cast(r, lkb, 0);
1016}
1017
1018static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
1019{
1020 struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
1021 lkb_statequeue);
1022 if (lkb->lkb_id == first->lkb_id)
1023 return 1;
1024
1025 return 0;
1026}
1027
1028/* Check if the given lkb conflicts with another lkb on the queue. */
1029
1030static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
1031{
1032 struct dlm_lkb *this;
1033
1034 list_for_each_entry(this, head, lkb_statequeue) {
1035 if (this == lkb)
1036 continue;
1037 if (!modes_compat(this, lkb))
1038 return 1;
1039 }
1040 return 0;
1041}
1042
1043/*
1044 * "A conversion deadlock arises with a pair of lock requests in the converting
1045 * queue for one resource. The granted mode of each lock blocks the requested
1046 * mode of the other lock."
1047 *
1048 * Part 2: if the granted mode of lkb is preventing the first lkb in the
1049 * convert queue from being granted, then demote lkb (set grmode to NL).
1050 * This second form requires that we check for conv-deadlk even when
1051 * now == 0 in _can_be_granted().
1052 *
1053 * Example:
1054 * Granted Queue: empty
1055 * Convert Queue: NL->EX (first lock)
1056 * PR->EX (second lock)
1057 *
1058 * The first lock can't be granted because of the granted mode of the second
1059 * lock and the second lock can't be granted because it's not first in the
1060 * list. We demote the granted mode of the second lock (the lkb passed to this
1061 * function).
1062 *
1063 * After the resolution, the "grant pending" function needs to go back and try
1064 * to grant locks on the convert queue again since the first lock can now be
1065 * granted.
1066 */
1067
1068static int conversion_deadlock_detect(struct dlm_rsb *rsb, struct dlm_lkb *lkb)
1069{
1070 struct dlm_lkb *this, *first = NULL, *self = NULL;
1071
1072 list_for_each_entry(this, &rsb->res_convertqueue, lkb_statequeue) {
1073 if (!first)
1074 first = this;
1075 if (this == lkb) {
1076 self = lkb;
1077 continue;
1078 }
1079
1080 if (!modes_compat(this, lkb) && !modes_compat(lkb, this))
1081 return 1;
1082 }
1083
1084 /* if lkb is on the convert queue and is preventing the first
1085 from being granted, then there's deadlock and we demote lkb.
1086 multiple converting locks may need to do this before the first
1087 converting lock can be granted. */
1088
1089 if (self && self != first) {
1090 if (!modes_compat(lkb, first) &&
1091 !queue_conflict(&rsb->res_grantqueue, first))
1092 return 1;
1093 }
1094
1095 return 0;
1096}
1097
1098/*
1099 * Return 1 if the lock can be granted, 0 otherwise.
1100 * Also detect and resolve conversion deadlocks.
1101 *
1102 * lkb is the lock to be granted
1103 *
1104 * now is 1 if the function is being called in the context of the
1105 * immediate request, it is 0 if called later, after the lock has been
1106 * queued.
1107 *
1108 * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
1109 */
1110
1111static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now)
1112{
1113 int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
1114
1115 /*
1116 * 6-10: Version 5.4 introduced an option to address the phenomenon of
1117 * a new request for a NL mode lock being blocked.
1118 *
1119 * 6-11: If the optional EXPEDITE flag is used with the new NL mode
1120 * request, then it would be granted. In essence, the use of this flag
1121 * tells the Lock Manager to expedite theis request by not considering
1122 * what may be in the CONVERTING or WAITING queues... As of this
1123 * writing, the EXPEDITE flag can be used only with new requests for NL
1124 * mode locks. This flag is not valid for conversion requests.
1125 *
1126 * A shortcut. Earlier checks return an error if EXPEDITE is used in a
1127 * conversion or used with a non-NL requested mode. We also know an
1128 * EXPEDITE request is always granted immediately, so now must always
1129 * be 1. The full condition to grant an expedite request: (now &&
1130 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
1131 * therefore be shortened to just checking the flag.
1132 */
1133
1134 if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
1135 return 1;
1136
1137 /*
1138 * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
1139 * added to the remaining conditions.
1140 */
1141
1142 if (queue_conflict(&r->res_grantqueue, lkb))
1143 goto out;
1144
1145 /*
1146 * 6-3: By default, a conversion request is immediately granted if the
1147 * requested mode is compatible with the modes of all other granted
1148 * locks
1149 */
1150
1151 if (queue_conflict(&r->res_convertqueue, lkb))
1152 goto out;
1153
1154 /*
1155 * 6-5: But the default algorithm for deciding whether to grant or
1156 * queue conversion requests does not by itself guarantee that such
1157 * requests are serviced on a "first come first serve" basis. This, in
1158 * turn, can lead to a phenomenon known as "indefinate postponement".
1159 *
1160 * 6-7: This issue is dealt with by using the optional QUECVT flag with
1161 * the system service employed to request a lock conversion. This flag
1162 * forces certain conversion requests to be queued, even if they are
1163 * compatible with the granted modes of other locks on the same
1164 * resource. Thus, the use of this flag results in conversion requests
1165 * being ordered on a "first come first servce" basis.
1166 *
1167 * DCT: This condition is all about new conversions being able to occur
1168 * "in place" while the lock remains on the granted queue (assuming
1169 * nothing else conflicts.) IOW if QUECVT isn't set, a conversion
1170 * doesn't _have_ to go onto the convert queue where it's processed in
1171 * order. The "now" variable is necessary to distinguish converts
1172 * being received and processed for the first time now, because once a
1173 * convert is moved to the conversion queue the condition below applies
1174 * requiring fifo granting.
1175 */
1176
1177 if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
1178 return 1;
1179
1180 /*
1181 * The NOORDER flag is set to avoid the standard vms rules on grant
1182 * order.
1183 */
1184
1185 if (lkb->lkb_exflags & DLM_LKF_NOORDER)
1186 return 1;
1187
1188 /*
1189 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
1190 * granted until all other conversion requests ahead of it are granted
1191 * and/or canceled.
1192 */
1193
1194 if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
1195 return 1;
1196
1197 /*
1198 * 6-4: By default, a new request is immediately granted only if all
1199 * three of the following conditions are satisfied when the request is
1200 * issued:
1201 * - The queue of ungranted conversion requests for the resource is
1202 * empty.
1203 * - The queue of ungranted new requests for the resource is empty.
1204 * - The mode of the new request is compatible with the most
1205 * restrictive mode of all granted locks on the resource.
1206 */
1207
1208 if (now && !conv && list_empty(&r->res_convertqueue) &&
1209 list_empty(&r->res_waitqueue))
1210 return 1;
1211
1212 /*
1213 * 6-4: Once a lock request is in the queue of ungranted new requests,
1214 * it cannot be granted until the queue of ungranted conversion
1215 * requests is empty, all ungranted new requests ahead of it are
1216 * granted and/or canceled, and it is compatible with the granted mode
1217 * of the most restrictive lock granted on the resource.
1218 */
1219
1220 if (!now && !conv && list_empty(&r->res_convertqueue) &&
1221 first_in_list(lkb, &r->res_waitqueue))
1222 return 1;
1223
1224 out:
1225 /*
1226 * The following, enabled by CONVDEADLK, departs from VMS.
1227 */
1228
1229 if (conv && (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) &&
1230 conversion_deadlock_detect(r, lkb)) {
1231 lkb->lkb_grmode = DLM_LOCK_NL;
1232 lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
1233 }
1234
1235 return 0;
1236}
1237
1238/*
1239 * The ALTPR and ALTCW flags aren't traditional lock manager flags, but are a
1240 * simple way to provide a big optimization to applications that can use them.
1241 */
1242
1243static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now)
1244{
1245 uint32_t flags = lkb->lkb_exflags;
1246 int rv;
1247 int8_t alt = 0, rqmode = lkb->lkb_rqmode;
1248
1249 rv = _can_be_granted(r, lkb, now);
1250 if (rv)
1251 goto out;
1252
1253 if (lkb->lkb_sbflags & DLM_SBF_DEMOTED)
1254 goto out;
1255
1256 if (rqmode != DLM_LOCK_PR && flags & DLM_LKF_ALTPR)
1257 alt = DLM_LOCK_PR;
1258 else if (rqmode != DLM_LOCK_CW && flags & DLM_LKF_ALTCW)
1259 alt = DLM_LOCK_CW;
1260
1261 if (alt) {
1262 lkb->lkb_rqmode = alt;
1263 rv = _can_be_granted(r, lkb, now);
1264 if (rv)
1265 lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
1266 else
1267 lkb->lkb_rqmode = rqmode;
1268 }
1269 out:
1270 return rv;
1271}
1272
1273static int grant_pending_convert(struct dlm_rsb *r, int high)
1274{
1275 struct dlm_lkb *lkb, *s;
1276 int hi, demoted, quit, grant_restart, demote_restart;
1277
1278 quit = 0;
1279 restart:
1280 grant_restart = 0;
1281 demote_restart = 0;
1282 hi = DLM_LOCK_IV;
1283
1284 list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
1285 demoted = is_demoted(lkb);
1286 if (can_be_granted(r, lkb, 0)) {
1287 grant_lock_pending(r, lkb);
1288 grant_restart = 1;
1289 } else {
1290 hi = max_t(int, lkb->lkb_rqmode, hi);
1291 if (!demoted && is_demoted(lkb))
1292 demote_restart = 1;
1293 }
1294 }
1295
1296 if (grant_restart)
1297 goto restart;
1298 if (demote_restart && !quit) {
1299 quit = 1;
1300 goto restart;
1301 }
1302
1303 return max_t(int, high, hi);
1304}
1305
1306static int grant_pending_wait(struct dlm_rsb *r, int high)
1307{
1308 struct dlm_lkb *lkb, *s;
1309
1310 list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
1311 if (can_be_granted(r, lkb, 0))
1312 grant_lock_pending(r, lkb);
1313 else
1314 high = max_t(int, lkb->lkb_rqmode, high);
1315 }
1316
1317 return high;
1318}
1319
1320static void grant_pending_locks(struct dlm_rsb *r)
1321{
1322 struct dlm_lkb *lkb, *s;
1323 int high = DLM_LOCK_IV;
1324
1325 DLM_ASSERT(is_master(r), dlm_print_rsb(r););
1326
1327 high = grant_pending_convert(r, high);
1328 high = grant_pending_wait(r, high);
1329
1330 if (high == DLM_LOCK_IV)
1331 return;
1332
1333 /*
1334 * If there are locks left on the wait/convert queue then send blocking
1335 * ASTs to granted locks based on the largest requested mode (high)
1336 * found above. FIXME: highbast < high comparison not valid for PR/CW.
1337 */
1338
1339 list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
1340 if (lkb->lkb_bastaddr && (lkb->lkb_highbast < high) &&
1341 !__dlm_compat_matrix[lkb->lkb_grmode+1][high+1]) {
1342 queue_bast(r, lkb, high);
1343 lkb->lkb_highbast = high;
1344 }
1345 }
1346}
1347
1348static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
1349 struct dlm_lkb *lkb)
1350{
1351 struct dlm_lkb *gr;
1352
1353 list_for_each_entry(gr, head, lkb_statequeue) {
1354 if (gr->lkb_bastaddr &&
1355 gr->lkb_highbast < lkb->lkb_rqmode &&
1356 !modes_compat(gr, lkb)) {
1357 queue_bast(r, gr, lkb->lkb_rqmode);
1358 gr->lkb_highbast = lkb->lkb_rqmode;
1359 }
1360 }
1361}
1362
1363static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
1364{
1365 send_bast_queue(r, &r->res_grantqueue, lkb);
1366}
1367
1368static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
1369{
1370 send_bast_queue(r, &r->res_grantqueue, lkb);
1371 send_bast_queue(r, &r->res_convertqueue, lkb);
1372}
1373
1374/* set_master(r, lkb) -- set the master nodeid of a resource
1375
1376 The purpose of this function is to set the nodeid field in the given
1377 lkb using the nodeid field in the given rsb. If the rsb's nodeid is
1378 known, it can just be copied to the lkb and the function will return
1379 0. If the rsb's nodeid is _not_ known, it needs to be looked up
1380 before it can be copied to the lkb.
1381
1382 When the rsb nodeid is being looked up remotely, the initial lkb
1383 causing the lookup is kept on the ls_waiters list waiting for the
1384 lookup reply. Other lkb's waiting for the same rsb lookup are kept
1385 on the rsb's res_lookup list until the master is verified.
1386
1387 Return values:
1388 0: nodeid is set in rsb/lkb and the caller should go ahead and use it
1389 1: the rsb master is not available and the lkb has been placed on
1390 a wait queue
1391*/
1392
1393static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
1394{
1395 struct dlm_ls *ls = r->res_ls;
1396 int error, dir_nodeid, ret_nodeid, our_nodeid = dlm_our_nodeid();
1397
1398 if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
1399 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
1400 r->res_first_lkid = lkb->lkb_id;
1401 lkb->lkb_nodeid = r->res_nodeid;
1402 return 0;
1403 }
1404
1405 if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
1406 list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
1407 return 1;
1408 }
1409
1410 if (r->res_nodeid == 0) {
1411 lkb->lkb_nodeid = 0;
1412 return 0;
1413 }
1414
1415 if (r->res_nodeid > 0) {
1416 lkb->lkb_nodeid = r->res_nodeid;
1417 return 0;
1418 }
1419
1420 DLM_ASSERT(r->res_nodeid == -1, dlm_print_rsb(r););
1421
1422 dir_nodeid = dlm_dir_nodeid(r);
1423
1424 if (dir_nodeid != our_nodeid) {
1425 r->res_first_lkid = lkb->lkb_id;
1426 send_lookup(r, lkb);
1427 return 1;
1428 }
1429
1430 for (;;) {
1431 /* It's possible for dlm_scand to remove an old rsb for
1432 this same resource from the toss list, us to create
1433 a new one, look up the master locally, and find it
1434 already exists just before dlm_scand does the
1435 dir_remove() on the previous rsb. */
1436
1437 error = dlm_dir_lookup(ls, our_nodeid, r->res_name,
1438 r->res_length, &ret_nodeid);
1439 if (!error)
1440 break;
1441 log_debug(ls, "dir_lookup error %d %s", error, r->res_name);
1442 schedule();
1443 }
1444
1445 if (ret_nodeid == our_nodeid) {
1446 r->res_first_lkid = 0;
1447 r->res_nodeid = 0;
1448 lkb->lkb_nodeid = 0;
1449 } else {
1450 r->res_first_lkid = lkb->lkb_id;
1451 r->res_nodeid = ret_nodeid;
1452 lkb->lkb_nodeid = ret_nodeid;
1453 }
1454 return 0;
1455}
1456
1457static void process_lookup_list(struct dlm_rsb *r)
1458{
1459 struct dlm_lkb *lkb, *safe;
1460
1461 list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
1462 list_del(&lkb->lkb_rsb_lookup);
1463 _request_lock(r, lkb);
1464 schedule();
1465 }
1466}
1467
1468/* confirm_master -- confirm (or deny) an rsb's master nodeid */
1469
1470static void confirm_master(struct dlm_rsb *r, int error)
1471{
1472 struct dlm_lkb *lkb;
1473
1474 if (!r->res_first_lkid)
1475 return;
1476
1477 switch (error) {
1478 case 0:
1479 case -EINPROGRESS:
1480 r->res_first_lkid = 0;
1481 process_lookup_list(r);
1482 break;
1483
1484 case -EAGAIN:
1485 /* the remote master didn't queue our NOQUEUE request;
1486 make a waiting lkb the first_lkid */
1487
1488 r->res_first_lkid = 0;
1489
1490 if (!list_empty(&r->res_lookup)) {
1491 lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
1492 lkb_rsb_lookup);
1493 list_del(&lkb->lkb_rsb_lookup);
1494 r->res_first_lkid = lkb->lkb_id;
1495 _request_lock(r, lkb);
1496 } else
1497 r->res_nodeid = -1;
1498 break;
1499
1500 default:
1501 log_error(r->res_ls, "confirm_master unknown error %d", error);
1502 }
1503}
1504
1505static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
1506 int namelen, uint32_t parent_lkid, void *ast,
1507 void *astarg, void *bast, struct dlm_args *args)
1508{
1509 int rv = -EINVAL;
1510
1511 /* check for invalid arg usage */
1512
1513 if (mode < 0 || mode > DLM_LOCK_EX)
1514 goto out;
1515
1516 if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
1517 goto out;
1518
1519 if (flags & DLM_LKF_CANCEL)
1520 goto out;
1521
1522 if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
1523 goto out;
1524
1525 if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
1526 goto out;
1527
1528 if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
1529 goto out;
1530
1531 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
1532 goto out;
1533
1534 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
1535 goto out;
1536
1537 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
1538 goto out;
1539
1540 if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
1541 goto out;
1542
1543 if (!ast || !lksb)
1544 goto out;
1545
1546 if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
1547 goto out;
1548
1549 /* parent/child locks not yet supported */
1550 if (parent_lkid)
1551 goto out;
1552
1553 if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
1554 goto out;
1555
1556 /* these args will be copied to the lkb in validate_lock_args,
1557 it cannot be done now because when converting locks, fields in
1558 an active lkb cannot be modified before locking the rsb */
1559
1560 args->flags = flags;
1561 args->astaddr = ast;
1562 args->astparam = (long) astarg;
1563 args->bastaddr = bast;
1564 args->mode = mode;
1565 args->lksb = lksb;
1566 rv = 0;
1567 out:
1568 return rv;
1569}
1570
1571static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
1572{
1573 if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
1574 DLM_LKF_FORCEUNLOCK))
1575 return -EINVAL;
1576
1577 args->flags = flags;
1578 args->astparam = (long) astarg;
1579 return 0;
1580}
1581
1582static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
1583 struct dlm_args *args)
1584{
1585 int rv = -EINVAL;
1586
1587 if (args->flags & DLM_LKF_CONVERT) {
1588 if (lkb->lkb_flags & DLM_IFL_MSTCPY)
1589 goto out;
1590
1591 if (args->flags & DLM_LKF_QUECVT &&
1592 !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
1593 goto out;
1594
1595 rv = -EBUSY;
1596 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
1597 goto out;
1598
1599 if (lkb->lkb_wait_type)
1600 goto out;
1601 }
1602
1603 lkb->lkb_exflags = args->flags;
1604 lkb->lkb_sbflags = 0;
1605 lkb->lkb_astaddr = args->astaddr;
1606 lkb->lkb_astparam = args->astparam;
1607 lkb->lkb_bastaddr = args->bastaddr;
1608 lkb->lkb_rqmode = args->mode;
1609 lkb->lkb_lksb = args->lksb;
1610 lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
1611 lkb->lkb_ownpid = (int) current->pid;
1612 rv = 0;
1613 out:
1614 return rv;
1615}
1616
1617static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
1618{
1619 int rv = -EINVAL;
1620
1621 if (lkb->lkb_flags & DLM_IFL_MSTCPY)
1622 goto out;
1623
1624 if (args->flags & DLM_LKF_FORCEUNLOCK)
1625 goto out_ok;
1626
1627 if (args->flags & DLM_LKF_CANCEL &&
1628 lkb->lkb_status == DLM_LKSTS_GRANTED)
1629 goto out;
1630
1631 if (!(args->flags & DLM_LKF_CANCEL) &&
1632 lkb->lkb_status != DLM_LKSTS_GRANTED)
1633 goto out;
1634
1635 rv = -EBUSY;
1636 if (lkb->lkb_wait_type)
1637 goto out;
1638
1639 out_ok:
1640 lkb->lkb_exflags = args->flags;
1641 lkb->lkb_sbflags = 0;
1642 lkb->lkb_astparam = args->astparam;
1643
1644 rv = 0;
1645 out:
1646 return rv;
1647}
1648
1649/*
1650 * Four stage 4 varieties:
1651 * do_request(), do_convert(), do_unlock(), do_cancel()
1652 * These are called on the master node for the given lock and
1653 * from the central locking logic.
1654 */
1655
1656static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
1657{
1658 int error = 0;
1659
1660 if (can_be_granted(r, lkb, 1)) {
1661 grant_lock(r, lkb);
1662 queue_cast(r, lkb, 0);
1663 goto out;
1664 }
1665
1666 if (can_be_queued(lkb)) {
1667 error = -EINPROGRESS;
1668 add_lkb(r, lkb, DLM_LKSTS_WAITING);
1669 send_blocking_asts(r, lkb);
1670 goto out;
1671 }
1672
1673 error = -EAGAIN;
1674 if (force_blocking_asts(lkb))
1675 send_blocking_asts_all(r, lkb);
1676 queue_cast(r, lkb, -EAGAIN);
1677
1678 out:
1679 return error;
1680}
1681
1682static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
1683{
1684 int error = 0;
1685
1686 /* changing an existing lock may allow others to be granted */
1687
1688 if (can_be_granted(r, lkb, 1)) {
1689 grant_lock(r, lkb);
1690 queue_cast(r, lkb, 0);
1691 grant_pending_locks(r);
1692 goto out;
1693 }
1694
1695 if (can_be_queued(lkb)) {
1696 if (is_demoted(lkb))
1697 grant_pending_locks(r);
1698 error = -EINPROGRESS;
1699 del_lkb(r, lkb);
1700 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
1701 send_blocking_asts(r, lkb);
1702 goto out;
1703 }
1704
1705 error = -EAGAIN;
1706 if (force_blocking_asts(lkb))
1707 send_blocking_asts_all(r, lkb);
1708 queue_cast(r, lkb, -EAGAIN);
1709
1710 out:
1711 return error;
1712}
1713
1714static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1715{
1716 remove_lock(r, lkb);
1717 queue_cast(r, lkb, -DLM_EUNLOCK);
1718 grant_pending_locks(r);
1719 return -DLM_EUNLOCK;
1720}
1721
1722static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
1723{
1724 revert_lock(r, lkb);
1725 queue_cast(r, lkb, -DLM_ECANCEL);
1726 grant_pending_locks(r);
1727 return -DLM_ECANCEL;
1728}
1729
1730/*
1731 * Four stage 3 varieties:
1732 * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
1733 */
1734
1735/* add a new lkb to a possibly new rsb, called by requesting process */
1736
1737static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1738{
1739 int error;
1740
1741 /* set_master: sets lkb nodeid from r */
1742
1743 error = set_master(r, lkb);
1744 if (error < 0)
1745 goto out;
1746 if (error) {
1747 error = 0;
1748 goto out;
1749 }
1750
1751 if (is_remote(r))
1752 /* receive_request() calls do_request() on remote node */
1753 error = send_request(r, lkb);
1754 else
1755 error = do_request(r, lkb);
1756 out:
1757 return error;
1758}
1759
1760/* change some property of an existing lkb, e.g. mode */
1761
1762static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1763{
1764 int error;
1765
1766 if (is_remote(r))
1767 /* receive_convert() calls do_convert() on remote node */
1768 error = send_convert(r, lkb);
1769 else
1770 error = do_convert(r, lkb);
1771
1772 return error;
1773}
1774
1775/* remove an existing lkb from the granted queue */
1776
1777static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1778{
1779 int error;
1780
1781 if (is_remote(r))
1782 /* receive_unlock() calls do_unlock() on remote node */
1783 error = send_unlock(r, lkb);
1784 else
1785 error = do_unlock(r, lkb);
1786
1787 return error;
1788}
1789
1790/* remove an existing lkb from the convert or wait queue */
1791
1792static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1793{
1794 int error;
1795
1796 if (is_remote(r))
1797 /* receive_cancel() calls do_cancel() on remote node */
1798 error = send_cancel(r, lkb);
1799 else
1800 error = do_cancel(r, lkb);
1801
1802 return error;
1803}
1804
1805/*
1806 * Four stage 2 varieties:
1807 * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
1808 */
1809
1810static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
1811 int len, struct dlm_args *args)
1812{
1813 struct dlm_rsb *r;
1814 int error;
1815
1816 error = validate_lock_args(ls, lkb, args);
1817 if (error)
1818 goto out;
1819
1820 error = find_rsb(ls, name, len, R_CREATE, &r);
1821 if (error)
1822 goto out;
1823
1824 lock_rsb(r);
1825
1826 attach_lkb(r, lkb);
1827 lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
1828
1829 error = _request_lock(r, lkb);
1830
1831 unlock_rsb(r);
1832 put_rsb(r);
1833
1834 out:
1835 return error;
1836}
1837
1838static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
1839 struct dlm_args *args)
1840{
1841 struct dlm_rsb *r;
1842 int error;
1843
1844 r = lkb->lkb_resource;
1845
1846 hold_rsb(r);
1847 lock_rsb(r);
1848
1849 error = validate_lock_args(ls, lkb, args);
1850 if (error)
1851 goto out;
1852
1853 error = _convert_lock(r, lkb);
1854 out:
1855 unlock_rsb(r);
1856 put_rsb(r);
1857 return error;
1858}
1859
1860static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
1861 struct dlm_args *args)
1862{
1863 struct dlm_rsb *r;
1864 int error;
1865
1866 r = lkb->lkb_resource;
1867
1868 hold_rsb(r);
1869 lock_rsb(r);
1870
1871 error = validate_unlock_args(lkb, args);
1872 if (error)
1873 goto out;
1874
1875 error = _unlock_lock(r, lkb);
1876 out:
1877 unlock_rsb(r);
1878 put_rsb(r);
1879 return error;
1880}
1881
1882static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
1883 struct dlm_args *args)
1884{
1885 struct dlm_rsb *r;
1886 int error;
1887
1888 r = lkb->lkb_resource;
1889
1890 hold_rsb(r);
1891 lock_rsb(r);
1892
1893 error = validate_unlock_args(lkb, args);
1894 if (error)
1895 goto out;
1896
1897 error = _cancel_lock(r, lkb);
1898 out:
1899 unlock_rsb(r);
1900 put_rsb(r);
1901 return error;
1902}
1903
1904/*
1905 * Two stage 1 varieties: dlm_lock() and dlm_unlock()
1906 */
1907
1908int dlm_lock(dlm_lockspace_t *lockspace,
1909 int mode,
1910 struct dlm_lksb *lksb,
1911 uint32_t flags,
1912 void *name,
1913 unsigned int namelen,
1914 uint32_t parent_lkid,
1915 void (*ast) (void *astarg),
1916 void *astarg,
1917 void (*bast) (void *astarg, int mode))
1918{
1919 struct dlm_ls *ls;
1920 struct dlm_lkb *lkb;
1921 struct dlm_args args;
1922 int error, convert = flags & DLM_LKF_CONVERT;
1923
1924 ls = dlm_find_lockspace_local(lockspace);
1925 if (!ls)
1926 return -EINVAL;
1927
1928 lock_recovery(ls);
1929
1930 if (convert)
1931 error = find_lkb(ls, lksb->sb_lkid, &lkb);
1932 else
1933 error = create_lkb(ls, &lkb);
1934
1935 if (error)
1936 goto out;
1937
1938 error = set_lock_args(mode, lksb, flags, namelen, parent_lkid, ast,
1939 astarg, bast, &args);
1940 if (error)
1941 goto out_put;
1942
1943 if (convert)
1944 error = convert_lock(ls, lkb, &args);
1945 else
1946 error = request_lock(ls, lkb, name, namelen, &args);
1947
1948 if (error == -EINPROGRESS)
1949 error = 0;
1950 out_put:
1951 if (convert || error)
1952 put_lkb(lkb);
1953 if (error == -EAGAIN)
1954 error = 0;
1955 out:
1956 unlock_recovery(ls);
1957 dlm_put_lockspace(ls);
1958 return error;
1959}
1960
1961int dlm_unlock(dlm_lockspace_t *lockspace,
1962 uint32_t lkid,
1963 uint32_t flags,
1964 struct dlm_lksb *lksb,
1965 void *astarg)
1966{
1967 struct dlm_ls *ls;
1968 struct dlm_lkb *lkb;
1969 struct dlm_args args;
1970 int error;
1971
1972 ls = dlm_find_lockspace_local(lockspace);
1973 if (!ls)
1974 return -EINVAL;
1975
1976 lock_recovery(ls);
1977
1978 error = find_lkb(ls, lkid, &lkb);
1979 if (error)
1980 goto out;
1981
1982 error = set_unlock_args(flags, astarg, &args);
1983 if (error)
1984 goto out_put;
1985
1986 if (flags & DLM_LKF_CANCEL)
1987 error = cancel_lock(ls, lkb, &args);
1988 else
1989 error = unlock_lock(ls, lkb, &args);
1990
1991 if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
1992 error = 0;
1993 out_put:
1994 put_lkb(lkb);
1995 out:
1996 unlock_recovery(ls);
1997 dlm_put_lockspace(ls);
1998 return error;
1999}
2000
2001/*
2002 * send/receive routines for remote operations and replies
2003 *
2004 * send_args
2005 * send_common
2006 * send_request receive_request
2007 * send_convert receive_convert
2008 * send_unlock receive_unlock
2009 * send_cancel receive_cancel
2010 * send_grant receive_grant
2011 * send_bast receive_bast
2012 * send_lookup receive_lookup
2013 * send_remove receive_remove
2014 *
2015 * send_common_reply
2016 * receive_request_reply send_request_reply
2017 * receive_convert_reply send_convert_reply
2018 * receive_unlock_reply send_unlock_reply
2019 * receive_cancel_reply send_cancel_reply
2020 * receive_lookup_reply send_lookup_reply
2021 */
2022
2023static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
2024 int to_nodeid, int mstype,
2025 struct dlm_message **ms_ret,
2026 struct dlm_mhandle **mh_ret)
2027{
2028 struct dlm_message *ms;
2029 struct dlm_mhandle *mh;
2030 char *mb;
2031 int mb_len = sizeof(struct dlm_message);
2032
2033 switch (mstype) {
2034 case DLM_MSG_REQUEST:
2035 case DLM_MSG_LOOKUP:
2036 case DLM_MSG_REMOVE:
2037 mb_len += r->res_length;
2038 break;
2039 case DLM_MSG_CONVERT:
2040 case DLM_MSG_UNLOCK:
2041 case DLM_MSG_REQUEST_REPLY:
2042 case DLM_MSG_CONVERT_REPLY:
2043 case DLM_MSG_GRANT:
2044 if (lkb && lkb->lkb_lvbptr)
2045 mb_len += r->res_ls->ls_lvblen;
2046 break;
2047 }
2048
2049 /* get_buffer gives us a message handle (mh) that we need to
2050 pass into lowcomms_commit and a message buffer (mb) that we
2051 write our data into */
2052
2053 mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_KERNEL, &mb);
2054 if (!mh)
2055 return -ENOBUFS;
2056
2057 memset(mb, 0, mb_len);
2058
2059 ms = (struct dlm_message *) mb;
2060
2061 ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
2062 ms->m_header.h_lockspace = r->res_ls->ls_global_id;
2063 ms->m_header.h_nodeid = dlm_our_nodeid();
2064 ms->m_header.h_length = mb_len;
2065 ms->m_header.h_cmd = DLM_MSG;
2066
2067 ms->m_type = mstype;
2068
2069 *mh_ret = mh;
2070 *ms_ret = ms;
2071 return 0;
2072}
2073
2074/* further lowcomms enhancements or alternate implementations may make
2075 the return value from this function useful at some point */
2076
2077static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
2078{
2079 dlm_message_out(ms);
2080 dlm_lowcomms_commit_buffer(mh);
2081 return 0;
2082}
2083
2084static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
2085 struct dlm_message *ms)
2086{
2087 ms->m_nodeid = lkb->lkb_nodeid;
2088 ms->m_pid = lkb->lkb_ownpid;
2089 ms->m_lkid = lkb->lkb_id;
2090 ms->m_remid = lkb->lkb_remid;
2091 ms->m_exflags = lkb->lkb_exflags;
2092 ms->m_sbflags = lkb->lkb_sbflags;
2093 ms->m_flags = lkb->lkb_flags;
2094 ms->m_lvbseq = lkb->lkb_lvbseq;
2095 ms->m_status = lkb->lkb_status;
2096 ms->m_grmode = lkb->lkb_grmode;
2097 ms->m_rqmode = lkb->lkb_rqmode;
2098 ms->m_hash = r->res_hash;
2099
2100 /* m_result and m_bastmode are set from function args,
2101 not from lkb fields */
2102
2103 if (lkb->lkb_bastaddr)
2104 ms->m_asts |= AST_BAST;
2105 if (lkb->lkb_astaddr)
2106 ms->m_asts |= AST_COMP;
2107
2108 if (ms->m_type == DLM_MSG_REQUEST || ms->m_type == DLM_MSG_LOOKUP)
2109 memcpy(ms->m_extra, r->res_name, r->res_length);
2110
2111 else if (lkb->lkb_lvbptr)
2112 memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
2113
2114}
2115
2116static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
2117{
2118 struct dlm_message *ms;
2119 struct dlm_mhandle *mh;
2120 int to_nodeid, error;
2121
2122 add_to_waiters(lkb, mstype);
2123
2124 to_nodeid = r->res_nodeid;
2125
2126 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
2127 if (error)
2128 goto fail;
2129
2130 send_args(r, lkb, ms);
2131
2132 error = send_message(mh, ms);
2133 if (error)
2134 goto fail;
2135 return 0;
2136
2137 fail:
2138 remove_from_waiters(lkb);
2139 return error;
2140}
2141
2142static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
2143{
2144 return send_common(r, lkb, DLM_MSG_REQUEST);
2145}
2146
2147static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
2148{
2149 int error;
2150
2151 error = send_common(r, lkb, DLM_MSG_CONVERT);
2152
2153 /* down conversions go without a reply from the master */
2154 if (!error && down_conversion(lkb)) {
2155 remove_from_waiters(lkb);
2156 r->res_ls->ls_stub_ms.m_result = 0;
2157 __receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
2158 }
2159
2160 return error;
2161}
2162
2163/* FIXME: if this lkb is the only lock we hold on the rsb, then set
2164 MASTER_UNCERTAIN to force the next request on the rsb to confirm
2165 that the master is still correct. */
2166
2167static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2168{
2169 return send_common(r, lkb, DLM_MSG_UNLOCK);
2170}
2171
2172static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
2173{
2174 return send_common(r, lkb, DLM_MSG_CANCEL);
2175}
2176
2177static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
2178{
2179 struct dlm_message *ms;
2180 struct dlm_mhandle *mh;
2181 int to_nodeid, error;
2182
2183 to_nodeid = lkb->lkb_nodeid;
2184
2185 error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
2186 if (error)
2187 goto out;
2188
2189 send_args(r, lkb, ms);
2190
2191 ms->m_result = 0;
2192
2193 error = send_message(mh, ms);
2194 out:
2195 return error;
2196}
2197
2198static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
2199{
2200 struct dlm_message *ms;
2201 struct dlm_mhandle *mh;
2202 int to_nodeid, error;
2203
2204 to_nodeid = lkb->lkb_nodeid;
2205
2206 error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
2207 if (error)
2208 goto out;
2209
2210 send_args(r, lkb, ms);
2211
2212 ms->m_bastmode = mode;
2213
2214 error = send_message(mh, ms);
2215 out:
2216 return error;
2217}
2218
2219static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
2220{
2221 struct dlm_message *ms;
2222 struct dlm_mhandle *mh;
2223 int to_nodeid, error;
2224
2225 add_to_waiters(lkb, DLM_MSG_LOOKUP);
2226
2227 to_nodeid = dlm_dir_nodeid(r);
2228
2229 error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
2230 if (error)
2231 goto fail;
2232
2233 send_args(r, lkb, ms);
2234
2235 error = send_message(mh, ms);
2236 if (error)
2237 goto fail;
2238 return 0;
2239
2240 fail:
2241 remove_from_waiters(lkb);
2242 return error;
2243}
2244
2245static int send_remove(struct dlm_rsb *r)
2246{
2247 struct dlm_message *ms;
2248 struct dlm_mhandle *mh;
2249 int to_nodeid, error;
2250
2251 to_nodeid = dlm_dir_nodeid(r);
2252
2253 error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
2254 if (error)
2255 goto out;
2256
2257 memcpy(ms->m_extra, r->res_name, r->res_length);
2258 ms->m_hash = r->res_hash;
2259
2260 error = send_message(mh, ms);
2261 out:
2262 return error;
2263}
2264
2265static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
2266 int mstype, int rv)
2267{
2268 struct dlm_message *ms;
2269 struct dlm_mhandle *mh;
2270 int to_nodeid, error;
2271
2272 to_nodeid = lkb->lkb_nodeid;
2273
2274 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
2275 if (error)
2276 goto out;
2277
2278 send_args(r, lkb, ms);
2279
2280 ms->m_result = rv;
2281
2282 error = send_message(mh, ms);
2283 out:
2284 return error;
2285}
2286
2287static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2288{
2289 return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
2290}
2291
2292static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2293{
2294 return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
2295}
2296
2297static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2298{
2299 return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
2300}
2301
2302static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
2303{
2304 return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
2305}
2306
2307static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
2308 int ret_nodeid, int rv)
2309{
2310 struct dlm_rsb *r = &ls->ls_stub_rsb;
2311 struct dlm_message *ms;
2312 struct dlm_mhandle *mh;
2313 int error, nodeid = ms_in->m_header.h_nodeid;
2314
2315 error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
2316 if (error)
2317 goto out;
2318
2319 ms->m_lkid = ms_in->m_lkid;
2320 ms->m_result = rv;
2321 ms->m_nodeid = ret_nodeid;
2322
2323 error = send_message(mh, ms);
2324 out:
2325 return error;
2326}
2327
2328/* which args we save from a received message depends heavily on the type
2329 of message, unlike the send side where we can safely send everything about
2330 the lkb for any type of message */
2331
2332static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
2333{
2334 lkb->lkb_exflags = ms->m_exflags;
2335 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
2336 (ms->m_flags & 0x0000FFFF);
2337}
2338
2339static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
2340{
2341 lkb->lkb_sbflags = ms->m_sbflags;
2342 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
2343 (ms->m_flags & 0x0000FFFF);
2344}
2345
2346static int receive_extralen(struct dlm_message *ms)
2347{
2348 return (ms->m_header.h_length - sizeof(struct dlm_message));
2349}
2350
2351static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
2352 struct dlm_message *ms)
2353{
2354 int len;
2355
2356 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
2357 if (!lkb->lkb_lvbptr)
2358 lkb->lkb_lvbptr = allocate_lvb(ls);
2359 if (!lkb->lkb_lvbptr)
2360 return -ENOMEM;
2361 len = receive_extralen(ms);
2362 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
2363 }
2364 return 0;
2365}
2366
2367static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2368 struct dlm_message *ms)
2369{
2370 lkb->lkb_nodeid = ms->m_header.h_nodeid;
2371 lkb->lkb_ownpid = ms->m_pid;
2372 lkb->lkb_remid = ms->m_lkid;
2373 lkb->lkb_grmode = DLM_LOCK_IV;
2374 lkb->lkb_rqmode = ms->m_rqmode;
2375 lkb->lkb_bastaddr = (void *) (long) (ms->m_asts & AST_BAST);
2376 lkb->lkb_astaddr = (void *) (long) (ms->m_asts & AST_COMP);
2377
2378 DLM_ASSERT(is_master_copy(lkb), dlm_print_lkb(lkb););
2379
2380 if (receive_lvb(ls, lkb, ms))
2381 return -ENOMEM;
2382
2383 return 0;
2384}
2385
2386static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2387 struct dlm_message *ms)
2388{
2389 if (lkb->lkb_nodeid != ms->m_header.h_nodeid) {
2390 log_error(ls, "convert_args nodeid %d %d lkid %x %x",
2391 lkb->lkb_nodeid, ms->m_header.h_nodeid,
2392 lkb->lkb_id, lkb->lkb_remid);
2393 return -EINVAL;
2394 }
2395
2396 if (!is_master_copy(lkb))
2397 return -EINVAL;
2398
2399 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
2400 return -EBUSY;
2401
2402 if (receive_lvb(ls, lkb, ms))
2403 return -ENOMEM;
2404
2405 lkb->lkb_rqmode = ms->m_rqmode;
2406 lkb->lkb_lvbseq = ms->m_lvbseq;
2407
2408 return 0;
2409}
2410
2411static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2412 struct dlm_message *ms)
2413{
2414 if (!is_master_copy(lkb))
2415 return -EINVAL;
2416 if (receive_lvb(ls, lkb, ms))
2417 return -ENOMEM;
2418 return 0;
2419}
2420
2421/* We fill in the stub-lkb fields with the info that send_xxxx_reply()
2422 uses to send a reply and that the remote end uses to process the reply. */
2423
2424static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
2425{
2426 struct dlm_lkb *lkb = &ls->ls_stub_lkb;
2427 lkb->lkb_nodeid = ms->m_header.h_nodeid;
2428 lkb->lkb_remid = ms->m_lkid;
2429}
2430
2431static void receive_request(struct dlm_ls *ls, struct dlm_message *ms)
2432{
2433 struct dlm_lkb *lkb;
2434 struct dlm_rsb *r;
2435 int error, namelen;
2436
2437 error = create_lkb(ls, &lkb);
2438 if (error)
2439 goto fail;
2440
2441 receive_flags(lkb, ms);
2442 lkb->lkb_flags |= DLM_IFL_MSTCPY;
2443 error = receive_request_args(ls, lkb, ms);
2444 if (error) {
2445 put_lkb(lkb);
2446 goto fail;
2447 }
2448
2449 namelen = receive_extralen(ms);
2450
2451 error = find_rsb(ls, ms->m_extra, namelen, R_MASTER, &r);
2452 if (error) {
2453 put_lkb(lkb);
2454 goto fail;
2455 }
2456
2457 lock_rsb(r);
2458
2459 attach_lkb(r, lkb);
2460 error = do_request(r, lkb);
2461 send_request_reply(r, lkb, error);
2462
2463 unlock_rsb(r);
2464 put_rsb(r);
2465
2466 if (error == -EINPROGRESS)
2467 error = 0;
2468 if (error)
2469 put_lkb(lkb);
2470 return;
2471
2472 fail:
2473 setup_stub_lkb(ls, ms);
2474 send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
2475}
2476
2477static void receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
2478{
2479 struct dlm_lkb *lkb;
2480 struct dlm_rsb *r;
2481 int error, reply = 1;
2482
2483 error = find_lkb(ls, ms->m_remid, &lkb);
2484 if (error)
2485 goto fail;
2486
2487 r = lkb->lkb_resource;
2488
2489 hold_rsb(r);
2490 lock_rsb(r);
2491
2492 receive_flags(lkb, ms);
2493 error = receive_convert_args(ls, lkb, ms);
2494 if (error)
2495 goto out;
2496 reply = !down_conversion(lkb);
2497
2498 error = do_convert(r, lkb);
2499 out:
2500 if (reply)
2501 send_convert_reply(r, lkb, error);
2502
2503 unlock_rsb(r);
2504 put_rsb(r);
2505 put_lkb(lkb);
2506 return;
2507
2508 fail:
2509 setup_stub_lkb(ls, ms);
2510 send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
2511}
2512
2513static void receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
2514{
2515 struct dlm_lkb *lkb;
2516 struct dlm_rsb *r;
2517 int error;
2518
2519 error = find_lkb(ls, ms->m_remid, &lkb);
2520 if (error)
2521 goto fail;
2522
2523 r = lkb->lkb_resource;
2524
2525 hold_rsb(r);
2526 lock_rsb(r);
2527
2528 receive_flags(lkb, ms);
2529 error = receive_unlock_args(ls, lkb, ms);
2530 if (error)
2531 goto out;
2532
2533 error = do_unlock(r, lkb);
2534 out:
2535 send_unlock_reply(r, lkb, error);
2536
2537 unlock_rsb(r);
2538 put_rsb(r);
2539 put_lkb(lkb);
2540 return;
2541
2542 fail:
2543 setup_stub_lkb(ls, ms);
2544 send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
2545}
2546
2547static void receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
2548{
2549 struct dlm_lkb *lkb;
2550 struct dlm_rsb *r;
2551 int error;
2552
2553 error = find_lkb(ls, ms->m_remid, &lkb);
2554 if (error)
2555 goto fail;
2556
2557 receive_flags(lkb, ms);
2558
2559 r = lkb->lkb_resource;
2560
2561 hold_rsb(r);
2562 lock_rsb(r);
2563
2564 error = do_cancel(r, lkb);
2565 send_cancel_reply(r, lkb, error);
2566
2567 unlock_rsb(r);
2568 put_rsb(r);
2569 put_lkb(lkb);
2570 return;
2571
2572 fail:
2573 setup_stub_lkb(ls, ms);
2574 send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
2575}
2576
2577static void receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
2578{
2579 struct dlm_lkb *lkb;
2580 struct dlm_rsb *r;
2581 int error;
2582
2583 error = find_lkb(ls, ms->m_remid, &lkb);
2584 if (error) {
2585 log_error(ls, "receive_grant no lkb");
2586 return;
2587 }
2588 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
2589
2590 r = lkb->lkb_resource;
2591
2592 hold_rsb(r);
2593 lock_rsb(r);
2594
2595 receive_flags_reply(lkb, ms);
2596 grant_lock_pc(r, lkb, ms);
2597 queue_cast(r, lkb, 0);
2598
2599 unlock_rsb(r);
2600 put_rsb(r);
2601 put_lkb(lkb);
2602}
2603
2604static void receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
2605{
2606 struct dlm_lkb *lkb;
2607 struct dlm_rsb *r;
2608 int error;
2609
2610 error = find_lkb(ls, ms->m_remid, &lkb);
2611 if (error) {
2612 log_error(ls, "receive_bast no lkb");
2613 return;
2614 }
2615 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
2616
2617 r = lkb->lkb_resource;
2618
2619 hold_rsb(r);
2620 lock_rsb(r);
2621
2622 queue_bast(r, lkb, ms->m_bastmode);
2623
2624 unlock_rsb(r);
2625 put_rsb(r);
2626 put_lkb(lkb);
2627}
2628
2629static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
2630{
2631 int len, error, ret_nodeid, dir_nodeid, from_nodeid, our_nodeid;
2632
2633 from_nodeid = ms->m_header.h_nodeid;
2634 our_nodeid = dlm_our_nodeid();
2635
2636 len = receive_extralen(ms);
2637
2638 dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
2639 if (dir_nodeid != our_nodeid) {
2640 log_error(ls, "lookup dir_nodeid %d from %d",
2641 dir_nodeid, from_nodeid);
2642 error = -EINVAL;
2643 ret_nodeid = -1;
2644 goto out;
2645 }
2646
2647 error = dlm_dir_lookup(ls, from_nodeid, ms->m_extra, len, &ret_nodeid);
2648
2649 /* Optimization: we're master so treat lookup as a request */
2650 if (!error && ret_nodeid == our_nodeid) {
2651 receive_request(ls, ms);
2652 return;
2653 }
2654 out:
2655 send_lookup_reply(ls, ms, ret_nodeid, error);
2656}
2657
2658static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
2659{
2660 int len, dir_nodeid, from_nodeid;
2661
2662 from_nodeid = ms->m_header.h_nodeid;
2663
2664 len = receive_extralen(ms);
2665
2666 dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
2667 if (dir_nodeid != dlm_our_nodeid()) {
2668 log_error(ls, "remove dir entry dir_nodeid %d from %d",
2669 dir_nodeid, from_nodeid);
2670 return;
2671 }
2672
2673 dlm_dir_remove_entry(ls, from_nodeid, ms->m_extra, len);
2674}
2675
2676static void receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
2677{
2678 struct dlm_lkb *lkb;
2679 struct dlm_rsb *r;
2680 int error, mstype;
2681
2682 error = find_lkb(ls, ms->m_remid, &lkb);
2683 if (error) {
2684 log_error(ls, "receive_request_reply no lkb");
2685 return;
2686 }
2687 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
2688
2689 mstype = lkb->lkb_wait_type;
2690 error = remove_from_waiters(lkb);
2691 if (error) {
2692 log_error(ls, "receive_request_reply not on waiters");
2693 goto out;
2694 }
2695
2696 /* this is the value returned from do_request() on the master */
2697 error = ms->m_result;
2698
2699 r = lkb->lkb_resource;
2700 hold_rsb(r);
2701 lock_rsb(r);
2702
2703 /* Optimization: the dir node was also the master, so it took our
2704 lookup as a request and sent request reply instead of lookup reply */
2705 if (mstype == DLM_MSG_LOOKUP) {
2706 r->res_nodeid = ms->m_header.h_nodeid;
2707 lkb->lkb_nodeid = r->res_nodeid;
2708 }
2709
2710 switch (error) {
2711 case -EAGAIN:
2712 /* request would block (be queued) on remote master;
2713 the unhold undoes the original ref from create_lkb()
2714 so it leads to the lkb being freed */
2715 queue_cast(r, lkb, -EAGAIN);
2716 confirm_master(r, -EAGAIN);
2717 unhold_lkb(lkb);
2718 break;
2719
2720 case -EINPROGRESS:
2721 case 0:
2722 /* request was queued or granted on remote master */
2723 receive_flags_reply(lkb, ms);
2724 lkb->lkb_remid = ms->m_lkid;
2725 if (error)
2726 add_lkb(r, lkb, DLM_LKSTS_WAITING);
2727 else {
2728 grant_lock_pc(r, lkb, ms);
2729 queue_cast(r, lkb, 0);
2730 }
2731 confirm_master(r, error);
2732 break;
2733
2734 case -ENOENT:
2735 case -ENOTBLK:
2736 /* find_rsb failed to find rsb or rsb wasn't master */
2737 r->res_nodeid = -1;
2738 lkb->lkb_nodeid = -1;
2739 _request_lock(r, lkb);
2740 break;
2741
2742 default:
2743 log_error(ls, "receive_request_reply error %d", error);
2744 }
2745
2746 unlock_rsb(r);
2747 put_rsb(r);
2748 out:
2749 put_lkb(lkb);
2750}
2751
2752static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
2753 struct dlm_message *ms)
2754{
2755 int error = ms->m_result;
2756
2757 /* this is the value returned from do_convert() on the master */
2758
2759 switch (error) {
2760 case -EAGAIN:
2761 /* convert would block (be queued) on remote master */
2762 queue_cast(r, lkb, -EAGAIN);
2763 break;
2764
2765 case -EINPROGRESS:
2766 /* convert was queued on remote master */
2767 del_lkb(r, lkb);
2768 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
2769 break;
2770
2771 case 0:
2772 /* convert was granted on remote master */
2773 receive_flags_reply(lkb, ms);
2774 grant_lock_pc(r, lkb, ms);
2775 queue_cast(r, lkb, 0);
2776 break;
2777
2778 default:
2779 log_error(r->res_ls, "receive_convert_reply error %d", error);
2780 }
2781}
2782
2783static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
2784{
2785 struct dlm_rsb *r = lkb->lkb_resource;
2786
2787 hold_rsb(r);
2788 lock_rsb(r);
2789
2790 __receive_convert_reply(r, lkb, ms);
2791
2792 unlock_rsb(r);
2793 put_rsb(r);
2794}
2795
2796static void receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
2797{
2798 struct dlm_lkb *lkb;
2799 int error;
2800
2801 error = find_lkb(ls, ms->m_remid, &lkb);
2802 if (error) {
2803 log_error(ls, "receive_convert_reply no lkb");
2804 return;
2805 }
2806 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
2807
2808 error = remove_from_waiters(lkb);
2809 if (error) {
2810 log_error(ls, "receive_convert_reply not on waiters");
2811 goto out;
2812 }
2813
2814 _receive_convert_reply(lkb, ms);
2815 out:
2816 put_lkb(lkb);
2817}
2818
2819static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
2820{
2821 struct dlm_rsb *r = lkb->lkb_resource;
2822 int error = ms->m_result;
2823
2824 hold_rsb(r);
2825 lock_rsb(r);
2826
2827 /* this is the value returned from do_unlock() on the master */
2828
2829 switch (error) {
2830 case -DLM_EUNLOCK:
2831 receive_flags_reply(lkb, ms);
2832 remove_lock_pc(r, lkb);
2833 queue_cast(r, lkb, -DLM_EUNLOCK);
2834 break;
2835 default:
2836 log_error(r->res_ls, "receive_unlock_reply error %d", error);
2837 }
2838
2839 unlock_rsb(r);
2840 put_rsb(r);
2841}
2842
2843static void receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
2844{
2845 struct dlm_lkb *lkb;
2846 int error;
2847
2848 error = find_lkb(ls, ms->m_remid, &lkb);
2849 if (error) {
2850 log_error(ls, "receive_unlock_reply no lkb");
2851 return;
2852 }
2853 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
2854
2855 error = remove_from_waiters(lkb);
2856 if (error) {
2857 log_error(ls, "receive_unlock_reply not on waiters");
2858 goto out;
2859 }
2860
2861 _receive_unlock_reply(lkb, ms);
2862 out:
2863 put_lkb(lkb);
2864}
2865
2866static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
2867{
2868 struct dlm_rsb *r = lkb->lkb_resource;
2869 int error = ms->m_result;
2870
2871 hold_rsb(r);
2872 lock_rsb(r);
2873
2874 /* this is the value returned from do_cancel() on the master */
2875
2876 switch (error) {
2877 case -DLM_ECANCEL:
2878 receive_flags_reply(lkb, ms);
2879 revert_lock_pc(r, lkb);
2880 queue_cast(r, lkb, -DLM_ECANCEL);
2881 break;
2882 default:
2883 log_error(r->res_ls, "receive_cancel_reply error %d", error);
2884 }
2885
2886 unlock_rsb(r);
2887 put_rsb(r);
2888}
2889
2890static void receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
2891{
2892 struct dlm_lkb *lkb;
2893 int error;
2894
2895 error = find_lkb(ls, ms->m_remid, &lkb);
2896 if (error) {
2897 log_error(ls, "receive_cancel_reply no lkb");
2898 return;
2899 }
2900 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
2901
2902 error = remove_from_waiters(lkb);
2903 if (error) {
2904 log_error(ls, "receive_cancel_reply not on waiters");
2905 goto out;
2906 }
2907
2908 _receive_cancel_reply(lkb, ms);
2909 out:
2910 put_lkb(lkb);
2911}
2912
2913static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
2914{
2915 struct dlm_lkb *lkb;
2916 struct dlm_rsb *r;
2917 int error, ret_nodeid;
2918
2919 error = find_lkb(ls, ms->m_lkid, &lkb);
2920 if (error) {
2921 log_error(ls, "receive_lookup_reply no lkb");
2922 return;
2923 }
2924
2925 error = remove_from_waiters(lkb);
2926 if (error) {
2927 log_error(ls, "receive_lookup_reply not on waiters");
2928 goto out;
2929 }
2930
2931 /* this is the value returned by dlm_dir_lookup on dir node
2932 FIXME: will a non-zero error ever be returned? */
2933 error = ms->m_result;
2934
2935 r = lkb->lkb_resource;
2936 hold_rsb(r);
2937 lock_rsb(r);
2938
2939 ret_nodeid = ms->m_nodeid;
2940 if (ret_nodeid == dlm_our_nodeid()) {
2941 r->res_nodeid = 0;
2942 ret_nodeid = 0;
2943 r->res_first_lkid = 0;
2944 } else {
2945 /* set_master() will copy res_nodeid to lkb_nodeid */
2946 r->res_nodeid = ret_nodeid;
2947 }
2948
2949 _request_lock(r, lkb);
2950
2951 if (!ret_nodeid)
2952 process_lookup_list(r);
2953
2954 unlock_rsb(r);
2955 put_rsb(r);
2956 out:
2957 put_lkb(lkb);
2958}
2959
2960int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery)
2961{
2962 struct dlm_message *ms = (struct dlm_message *) hd;
2963 struct dlm_ls *ls;
2964 int error;
2965
2966 if (!recovery)
2967 dlm_message_in(ms);
2968
2969 ls = dlm_find_lockspace_global(hd->h_lockspace);
2970 if (!ls) {
2971 log_print("drop message %d from %d for unknown lockspace %d",
2972 ms->m_type, nodeid, hd->h_lockspace);
2973 return -EINVAL;
2974 }
2975
2976 /* recovery may have just ended leaving a bunch of backed-up requests
2977 in the requestqueue; wait while dlm_recoverd clears them */
2978
2979 if (!recovery)
2980 dlm_wait_requestqueue(ls);
2981
2982 /* recovery may have just started while there were a bunch of
2983 in-flight requests -- save them in requestqueue to be processed
2984 after recovery. we can't let dlm_recvd block on the recovery
2985 lock. if dlm_recoverd is calling this function to clear the
2986 requestqueue, it needs to be interrupted (-EINTR) if another
2987 recovery operation is starting. */
2988
2989 while (1) {
2990 if (dlm_locking_stopped(ls)) {
2991 if (!recovery)
2992 dlm_add_requestqueue(ls, nodeid, hd);
2993 error = -EINTR;
2994 goto out;
2995 }
2996
2997 if (lock_recovery_try(ls))
2998 break;
2999 schedule();
3000 }
3001
3002 switch (ms->m_type) {
3003
3004 /* messages sent to a master node */
3005
3006 case DLM_MSG_REQUEST:
3007 receive_request(ls, ms);
3008 break;
3009
3010 case DLM_MSG_CONVERT:
3011 receive_convert(ls, ms);
3012 break;
3013
3014 case DLM_MSG_UNLOCK:
3015 receive_unlock(ls, ms);
3016 break;
3017
3018 case DLM_MSG_CANCEL:
3019 receive_cancel(ls, ms);
3020 break;
3021
3022 /* messages sent from a master node (replies to above) */
3023
3024 case DLM_MSG_REQUEST_REPLY:
3025 receive_request_reply(ls, ms);
3026 break;
3027
3028 case DLM_MSG_CONVERT_REPLY:
3029 receive_convert_reply(ls, ms);
3030 break;
3031
3032 case DLM_MSG_UNLOCK_REPLY:
3033 receive_unlock_reply(ls, ms);
3034 break;
3035
3036 case DLM_MSG_CANCEL_REPLY:
3037 receive_cancel_reply(ls, ms);
3038 break;
3039
3040 /* messages sent from a master node (only two types of async msg) */
3041
3042 case DLM_MSG_GRANT:
3043 receive_grant(ls, ms);
3044 break;
3045
3046 case DLM_MSG_BAST:
3047 receive_bast(ls, ms);
3048 break;
3049
3050 /* messages sent to a dir node */
3051
3052 case DLM_MSG_LOOKUP:
3053 receive_lookup(ls, ms);
3054 break;
3055
3056 case DLM_MSG_REMOVE:
3057 receive_remove(ls, ms);
3058 break;
3059
3060 /* messages sent from a dir node (remove has no reply) */
3061
3062 case DLM_MSG_LOOKUP_REPLY:
3063 receive_lookup_reply(ls, ms);
3064 break;
3065
3066 default:
3067 log_error(ls, "unknown message type %d", ms->m_type);
3068 }
3069
3070 unlock_recovery(ls);
3071 out:
3072 dlm_put_lockspace(ls);
3073 dlm_astd_wake();
3074 return 0;
3075}
3076
3077
3078/*
3079 * Recovery related
3080 */
3081
3082static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb)
3083{
3084 if (middle_conversion(lkb)) {
3085 hold_lkb(lkb);
3086 ls->ls_stub_ms.m_result = -EINPROGRESS;
3087 _remove_from_waiters(lkb);
3088 _receive_convert_reply(lkb, &ls->ls_stub_ms);
3089
3090 /* Same special case as in receive_rcom_lock_args() */
3091 lkb->lkb_grmode = DLM_LOCK_IV;
3092 rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
3093 unhold_lkb(lkb);
3094
3095 } else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
3096 lkb->lkb_flags |= DLM_IFL_RESEND;
3097 }
3098
3099 /* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
3100 conversions are async; there's no reply from the remote master */
3101}
3102
3103/* A waiting lkb needs recovery if the master node has failed, or
3104 the master node is changing (only when no directory is used) */
3105
3106static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb)
3107{
3108 if (dlm_is_removed(ls, lkb->lkb_nodeid))
3109 return 1;
3110
3111 if (!dlm_no_directory(ls))
3112 return 0;
3113
3114 if (dlm_dir_nodeid(lkb->lkb_resource) != lkb->lkb_nodeid)
3115 return 1;
3116
3117 return 0;
3118}
3119
3120/* Recovery for locks that are waiting for replies from nodes that are now
3121 gone. We can just complete unlocks and cancels by faking a reply from the
3122 dead node. Requests and up-conversions we flag to be resent after
3123 recovery. Down-conversions can just be completed with a fake reply like
3124 unlocks. Conversions between PR and CW need special attention. */
3125
3126void dlm_recover_waiters_pre(struct dlm_ls *ls)
3127{
3128 struct dlm_lkb *lkb, *safe;
3129
3130 mutex_lock(&ls->ls_waiters_mutex);
3131
3132 list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
3133 log_debug(ls, "pre recover waiter lkid %x type %d flags %x",
3134 lkb->lkb_id, lkb->lkb_wait_type, lkb->lkb_flags);
3135
3136 /* all outstanding lookups, regardless of destination will be
3137 resent after recovery is done */
3138
3139 if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
3140 lkb->lkb_flags |= DLM_IFL_RESEND;
3141 continue;
3142 }
3143
3144 if (!waiter_needs_recovery(ls, lkb))
3145 continue;
3146
3147 switch (lkb->lkb_wait_type) {
3148
3149 case DLM_MSG_REQUEST:
3150 lkb->lkb_flags |= DLM_IFL_RESEND;
3151 break;
3152
3153 case DLM_MSG_CONVERT:
3154 recover_convert_waiter(ls, lkb);
3155 break;
3156
3157 case DLM_MSG_UNLOCK:
3158 hold_lkb(lkb);
3159 ls->ls_stub_ms.m_result = -DLM_EUNLOCK;
3160 _remove_from_waiters(lkb);
3161 _receive_unlock_reply(lkb, &ls->ls_stub_ms);
3162 put_lkb(lkb);
3163 break;
3164
3165 case DLM_MSG_CANCEL:
3166 hold_lkb(lkb);
3167 ls->ls_stub_ms.m_result = -DLM_ECANCEL;
3168 _remove_from_waiters(lkb);
3169 _receive_cancel_reply(lkb, &ls->ls_stub_ms);
3170 put_lkb(lkb);
3171 break;
3172
3173 default:
3174 log_error(ls, "invalid lkb wait_type %d",
3175 lkb->lkb_wait_type);
3176 }
3177 }
3178 mutex_unlock(&ls->ls_waiters_mutex);
3179}
3180
3181static int remove_resend_waiter(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
3182{
3183 struct dlm_lkb *lkb;
3184 int rv = 0;
3185
3186 mutex_lock(&ls->ls_waiters_mutex);
3187 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
3188 if (lkb->lkb_flags & DLM_IFL_RESEND) {
3189 rv = lkb->lkb_wait_type;
3190 _remove_from_waiters(lkb);
3191 lkb->lkb_flags &= ~DLM_IFL_RESEND;
3192 break;
3193 }
3194 }
3195 mutex_unlock(&ls->ls_waiters_mutex);
3196
3197 if (!rv)
3198 lkb = NULL;
3199 *lkb_ret = lkb;
3200 return rv;
3201}
3202
3203/* Deal with lookups and lkb's marked RESEND from _pre. We may now be the
3204 master or dir-node for r. Processing the lkb may result in it being placed
3205 back on waiters. */
3206
3207int dlm_recover_waiters_post(struct dlm_ls *ls)
3208{
3209 struct dlm_lkb *lkb;
3210 struct dlm_rsb *r;
3211 int error = 0, mstype;
3212
3213 while (1) {
3214 if (dlm_locking_stopped(ls)) {
3215 log_debug(ls, "recover_waiters_post aborted");
3216 error = -EINTR;
3217 break;
3218 }
3219
3220 mstype = remove_resend_waiter(ls, &lkb);
3221 if (!mstype)
3222 break;
3223
3224 r = lkb->lkb_resource;
3225
3226 log_debug(ls, "recover_waiters_post %x type %d flags %x %s",
3227 lkb->lkb_id, mstype, lkb->lkb_flags, r->res_name);
3228
3229 switch (mstype) {
3230
3231 case DLM_MSG_LOOKUP:
3232 hold_rsb(r);
3233 lock_rsb(r);
3234 _request_lock(r, lkb);
3235 if (is_master(r))
3236 confirm_master(r, 0);
3237 unlock_rsb(r);
3238 put_rsb(r);
3239 break;
3240
3241 case DLM_MSG_REQUEST:
3242 hold_rsb(r);
3243 lock_rsb(r);
3244 _request_lock(r, lkb);
3245 unlock_rsb(r);
3246 put_rsb(r);
3247 break;
3248
3249 case DLM_MSG_CONVERT:
3250 hold_rsb(r);
3251 lock_rsb(r);
3252 _convert_lock(r, lkb);
3253 unlock_rsb(r);
3254 put_rsb(r);
3255 break;
3256
3257 default:
3258 log_error(ls, "recover_waiters_post type %d", mstype);
3259 }
3260 }
3261
3262 return error;
3263}
3264
3265static void purge_queue(struct dlm_rsb *r, struct list_head *queue,
3266 int (*test)(struct dlm_ls *ls, struct dlm_lkb *lkb))
3267{
3268 struct dlm_ls *ls = r->res_ls;
3269 struct dlm_lkb *lkb, *safe;
3270
3271 list_for_each_entry_safe(lkb, safe, queue, lkb_statequeue) {
3272 if (test(ls, lkb)) {
3273 del_lkb(r, lkb);
3274 /* this put should free the lkb */
3275 if (!put_lkb(lkb))
3276 log_error(ls, "purged lkb not released");
3277 }
3278 }
3279}
3280
3281static int purge_dead_test(struct dlm_ls *ls, struct dlm_lkb *lkb)
3282{
3283 return (is_master_copy(lkb) && dlm_is_removed(ls, lkb->lkb_nodeid));
3284}
3285
3286static int purge_mstcpy_test(struct dlm_ls *ls, struct dlm_lkb *lkb)
3287{
3288 return is_master_copy(lkb);
3289}
3290
3291static void purge_dead_locks(struct dlm_rsb *r)
3292{
3293 purge_queue(r, &r->res_grantqueue, &purge_dead_test);
3294 purge_queue(r, &r->res_convertqueue, &purge_dead_test);
3295 purge_queue(r, &r->res_waitqueue, &purge_dead_test);
3296}
3297
3298void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
3299{
3300 purge_queue(r, &r->res_grantqueue, &purge_mstcpy_test);
3301 purge_queue(r, &r->res_convertqueue, &purge_mstcpy_test);
3302 purge_queue(r, &r->res_waitqueue, &purge_mstcpy_test);
3303}
3304
3305/* Get rid of locks held by nodes that are gone. */
3306
3307int dlm_purge_locks(struct dlm_ls *ls)
3308{
3309 struct dlm_rsb *r;
3310
3311 log_debug(ls, "dlm_purge_locks");
3312
3313 down_write(&ls->ls_root_sem);
3314 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
3315 hold_rsb(r);
3316 lock_rsb(r);
3317 if (is_master(r))
3318 purge_dead_locks(r);
3319 unlock_rsb(r);
3320 unhold_rsb(r);
3321
3322 schedule();
3323 }
3324 up_write(&ls->ls_root_sem);
3325
3326 return 0;
3327}
3328
3329int dlm_grant_after_purge(struct dlm_ls *ls)
3330{
3331 struct dlm_rsb *r;
3332 int i;
3333
3334 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
3335 read_lock(&ls->ls_rsbtbl[i].lock);
3336 list_for_each_entry(r, &ls->ls_rsbtbl[i].list, res_hashchain) {
3337 hold_rsb(r);
3338 lock_rsb(r);
3339 if (is_master(r)) {
3340 grant_pending_locks(r);
3341 confirm_master(r, 0);
3342 }
3343 unlock_rsb(r);
3344 put_rsb(r);
3345 }
3346 read_unlock(&ls->ls_rsbtbl[i].lock);
3347 }
3348
3349 return 0;
3350}
3351
3352static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
3353 uint32_t remid)
3354{
3355 struct dlm_lkb *lkb;
3356
3357 list_for_each_entry(lkb, head, lkb_statequeue) {
3358 if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
3359 return lkb;
3360 }
3361 return NULL;
3362}
3363
3364static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
3365 uint32_t remid)
3366{
3367 struct dlm_lkb *lkb;
3368
3369 lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
3370 if (lkb)
3371 return lkb;
3372 lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
3373 if (lkb)
3374 return lkb;
3375 lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
3376 if (lkb)
3377 return lkb;
3378 return NULL;
3379}
3380
3381static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3382 struct dlm_rsb *r, struct dlm_rcom *rc)
3383{
3384 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
3385 int lvblen;
3386
3387 lkb->lkb_nodeid = rc->rc_header.h_nodeid;
3388 lkb->lkb_ownpid = rl->rl_ownpid;
3389 lkb->lkb_remid = rl->rl_lkid;
3390 lkb->lkb_exflags = rl->rl_exflags;
3391 lkb->lkb_flags = rl->rl_flags & 0x0000FFFF;
3392 lkb->lkb_flags |= DLM_IFL_MSTCPY;
3393 lkb->lkb_lvbseq = rl->rl_lvbseq;
3394 lkb->lkb_rqmode = rl->rl_rqmode;
3395 lkb->lkb_grmode = rl->rl_grmode;
3396 /* don't set lkb_status because add_lkb wants to itself */
3397
3398 lkb->lkb_bastaddr = (void *) (long) (rl->rl_asts & AST_BAST);
3399 lkb->lkb_astaddr = (void *) (long) (rl->rl_asts & AST_COMP);
3400
3401 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3402 lkb->lkb_lvbptr = allocate_lvb(ls);
3403 if (!lkb->lkb_lvbptr)
3404 return -ENOMEM;
3405 lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
3406 sizeof(struct rcom_lock);
3407 memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
3408 }
3409
3410 /* Conversions between PR and CW (middle modes) need special handling.
3411 The real granted mode of these converting locks cannot be determined
3412 until all locks have been rebuilt on the rsb (recover_conversion) */
3413
3414 if (rl->rl_wait_type == DLM_MSG_CONVERT && middle_conversion(lkb)) {
3415 rl->rl_status = DLM_LKSTS_CONVERT;
3416 lkb->lkb_grmode = DLM_LOCK_IV;
3417 rsb_set_flag(r, RSB_RECOVER_CONVERT);
3418 }
3419
3420 return 0;
3421}
3422
3423/* This lkb may have been recovered in a previous aborted recovery so we need
3424 to check if the rsb already has an lkb with the given remote nodeid/lkid.
3425 If so we just send back a standard reply. If not, we create a new lkb with
3426 the given values and send back our lkid. We send back our lkid by sending
3427 back the rcom_lock struct we got but with the remid field filled in. */
3428
3429int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
3430{
3431 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
3432 struct dlm_rsb *r;
3433 struct dlm_lkb *lkb;
3434 int error;
3435
3436 if (rl->rl_parent_lkid) {
3437 error = -EOPNOTSUPP;
3438 goto out;
3439 }
3440
3441 error = find_rsb(ls, rl->rl_name, rl->rl_namelen, R_MASTER, &r);
3442 if (error)
3443 goto out;
3444
3445 lock_rsb(r);
3446
3447 lkb = search_remid(r, rc->rc_header.h_nodeid, rl->rl_lkid);
3448 if (lkb) {
3449 error = -EEXIST;
3450 goto out_remid;
3451 }
3452
3453 error = create_lkb(ls, &lkb);
3454 if (error)
3455 goto out_unlock;
3456
3457 error = receive_rcom_lock_args(ls, lkb, r, rc);
3458 if (error) {
3459 put_lkb(lkb);
3460 goto out_unlock;
3461 }
3462
3463 attach_lkb(r, lkb);
3464 add_lkb(r, lkb, rl->rl_status);
3465 error = 0;
3466
3467 out_remid:
3468 /* this is the new value returned to the lock holder for
3469 saving in its process-copy lkb */
3470 rl->rl_remid = lkb->lkb_id;
3471
3472 out_unlock:
3473 unlock_rsb(r);
3474 put_rsb(r);
3475 out:
3476 if (error)
3477 log_print("recover_master_copy %d %x", error, rl->rl_lkid);
3478 rl->rl_result = error;
3479 return error;
3480}
3481
3482int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
3483{
3484 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
3485 struct dlm_rsb *r;
3486 struct dlm_lkb *lkb;
3487 int error;
3488
3489 error = find_lkb(ls, rl->rl_lkid, &lkb);
3490 if (error) {
3491 log_error(ls, "recover_process_copy no lkid %x", rl->rl_lkid);
3492 return error;
3493 }
3494
3495 DLM_ASSERT(is_process_copy(lkb), dlm_print_lkb(lkb););
3496
3497 error = rl->rl_result;
3498
3499 r = lkb->lkb_resource;
3500 hold_rsb(r);
3501 lock_rsb(r);
3502
3503 switch (error) {
3504 case -EEXIST:
3505 log_debug(ls, "master copy exists %x", lkb->lkb_id);
3506 /* fall through */
3507 case 0:
3508 lkb->lkb_remid = rl->rl_remid;
3509 break;
3510 default:
3511 log_error(ls, "dlm_recover_process_copy unknown error %d %x",
3512 error, lkb->lkb_id);
3513 }
3514
3515 /* an ack for dlm_recover_locks() which waits for replies from
3516 all the locks it sends to new masters */
3517 dlm_recovered_lock(r);
3518
3519 unlock_rsb(r);
3520 put_rsb(r);
3521 put_lkb(lkb);
3522
3523 return 0;
3524}
3525
diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h
new file mode 100644
index 000000000000..bffab9c88b1d
--- /dev/null
+++ b/fs/dlm/lock.h
@@ -0,0 +1,50 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
5**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13#ifndef __LOCK_DOT_H__
14#define __LOCK_DOT_H__
15
16void dlm_print_rsb(struct dlm_rsb *r);
17int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery);
18int dlm_modes_compat(int mode1, int mode2);
19int dlm_find_rsb(struct dlm_ls *ls, char *name, int namelen,
20 unsigned int flags, struct dlm_rsb **r_ret);
21void dlm_put_rsb(struct dlm_rsb *r);
22void dlm_hold_rsb(struct dlm_rsb *r);
23int dlm_put_lkb(struct dlm_lkb *lkb);
24void dlm_scan_rsbs(struct dlm_ls *ls);
25
26int dlm_purge_locks(struct dlm_ls *ls);
27void dlm_purge_mstcpy_locks(struct dlm_rsb *r);
28int dlm_grant_after_purge(struct dlm_ls *ls);
29int dlm_recover_waiters_post(struct dlm_ls *ls);
30void dlm_recover_waiters_pre(struct dlm_ls *ls);
31int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc);
32int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc);
33
34static inline int is_master(struct dlm_rsb *r)
35{
36 return !r->res_nodeid;
37}
38
39static inline void lock_rsb(struct dlm_rsb *r)
40{
41 mutex_lock(&r->res_mutex);
42}
43
44static inline void unlock_rsb(struct dlm_rsb *r)
45{
46 mutex_unlock(&r->res_mutex);
47}
48
49#endif
50
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
new file mode 100644
index 000000000000..d2ff505d51cd
--- /dev/null
+++ b/fs/dlm/lockspace.c
@@ -0,0 +1,665 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "dlm_internal.h"
15#include "lockspace.h"
16#include "member.h"
17#include "recoverd.h"
18#include "ast.h"
19#include "dir.h"
20#include "lowcomms.h"
21#include "config.h"
22#include "memory.h"
23#include "lock.h"
24
25#ifdef CONFIG_DLM_DEBUG
26int dlm_create_debug_file(struct dlm_ls *ls);
27void dlm_delete_debug_file(struct dlm_ls *ls);
28#else
29static inline int dlm_create_debug_file(struct dlm_ls *ls) { return 0; }
30static inline void dlm_delete_debug_file(struct dlm_ls *ls) { }
31#endif
32
33static int ls_count;
34static struct mutex ls_lock;
35static struct list_head lslist;
36static spinlock_t lslist_lock;
37static struct task_struct * scand_task;
38
39
40static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
41{
42 ssize_t ret = len;
43 int n = simple_strtol(buf, NULL, 0);
44
45 switch (n) {
46 case 0:
47 dlm_ls_stop(ls);
48 break;
49 case 1:
50 dlm_ls_start(ls);
51 break;
52 default:
53 ret = -EINVAL;
54 }
55 return ret;
56}
57
58static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len)
59{
60 ls->ls_uevent_result = simple_strtol(buf, NULL, 0);
61 set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags);
62 wake_up(&ls->ls_uevent_wait);
63 return len;
64}
65
66static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf)
67{
68 return sprintf(buf, "%u\n", ls->ls_global_id);
69}
70
71static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len)
72{
73 ls->ls_global_id = simple_strtoul(buf, NULL, 0);
74 return len;
75}
76
77struct dlm_attr {
78 struct attribute attr;
79 ssize_t (*show)(struct dlm_ls *, char *);
80 ssize_t (*store)(struct dlm_ls *, const char *, size_t);
81};
82
83static struct dlm_attr dlm_attr_control = {
84 .attr = {.name = "control", .mode = S_IWUSR},
85 .store = dlm_control_store
86};
87
88static struct dlm_attr dlm_attr_event = {
89 .attr = {.name = "event_done", .mode = S_IWUSR},
90 .store = dlm_event_store
91};
92
93static struct dlm_attr dlm_attr_id = {
94 .attr = {.name = "id", .mode = S_IRUGO | S_IWUSR},
95 .show = dlm_id_show,
96 .store = dlm_id_store
97};
98
99static struct attribute *dlm_attrs[] = {
100 &dlm_attr_control.attr,
101 &dlm_attr_event.attr,
102 &dlm_attr_id.attr,
103 NULL,
104};
105
106static ssize_t dlm_attr_show(struct kobject *kobj, struct attribute *attr,
107 char *buf)
108{
109 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
110 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
111 return a->show ? a->show(ls, buf) : 0;
112}
113
114static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr,
115 const char *buf, size_t len)
116{
117 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
118 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
119 return a->store ? a->store(ls, buf, len) : len;
120}
121
122static struct sysfs_ops dlm_attr_ops = {
123 .show = dlm_attr_show,
124 .store = dlm_attr_store,
125};
126
127static struct kobj_type dlm_ktype = {
128 .default_attrs = dlm_attrs,
129 .sysfs_ops = &dlm_attr_ops,
130};
131
132static struct kset dlm_kset = {
133 .subsys = &kernel_subsys,
134 .kobj = {.name = "dlm",},
135 .ktype = &dlm_ktype,
136};
137
138static int kobject_setup(struct dlm_ls *ls)
139{
140 char lsname[DLM_LOCKSPACE_LEN];
141 int error;
142
143 memset(lsname, 0, DLM_LOCKSPACE_LEN);
144 snprintf(lsname, DLM_LOCKSPACE_LEN, "%s", ls->ls_name);
145
146 error = kobject_set_name(&ls->ls_kobj, "%s", lsname);
147 if (error)
148 return error;
149
150 ls->ls_kobj.kset = &dlm_kset;
151 ls->ls_kobj.ktype = &dlm_ktype;
152 return 0;
153}
154
155static int do_uevent(struct dlm_ls *ls, int in)
156{
157 int error;
158
159 if (in)
160 kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
161 else
162 kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
163
164 error = wait_event_interruptible(ls->ls_uevent_wait,
165 test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
166 if (error)
167 goto out;
168
169 error = ls->ls_uevent_result;
170 out:
171 return error;
172}
173
174
175int dlm_lockspace_init(void)
176{
177 int error;
178
179 ls_count = 0;
180 mutex_init(&ls_lock);
181 INIT_LIST_HEAD(&lslist);
182 spin_lock_init(&lslist_lock);
183
184 error = kset_register(&dlm_kset);
185 if (error)
186 printk("dlm_lockspace_init: cannot register kset %d\n", error);
187 return error;
188}
189
190void dlm_lockspace_exit(void)
191{
192 kset_unregister(&dlm_kset);
193}
194
195static int dlm_scand(void *data)
196{
197 struct dlm_ls *ls;
198
199 while (!kthread_should_stop()) {
200 list_for_each_entry(ls, &lslist, ls_list)
201 dlm_scan_rsbs(ls);
202 schedule_timeout_interruptible(dlm_config.scan_secs * HZ);
203 }
204 return 0;
205}
206
207static int dlm_scand_start(void)
208{
209 struct task_struct *p;
210 int error = 0;
211
212 p = kthread_run(dlm_scand, NULL, "dlm_scand");
213 if (IS_ERR(p))
214 error = PTR_ERR(p);
215 else
216 scand_task = p;
217 return error;
218}
219
220static void dlm_scand_stop(void)
221{
222 kthread_stop(scand_task);
223}
224
225static struct dlm_ls *dlm_find_lockspace_name(char *name, int namelen)
226{
227 struct dlm_ls *ls;
228
229 spin_lock(&lslist_lock);
230
231 list_for_each_entry(ls, &lslist, ls_list) {
232 if (ls->ls_namelen == namelen &&
233 memcmp(ls->ls_name, name, namelen) == 0)
234 goto out;
235 }
236 ls = NULL;
237 out:
238 spin_unlock(&lslist_lock);
239 return ls;
240}
241
242struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
243{
244 struct dlm_ls *ls;
245
246 spin_lock(&lslist_lock);
247
248 list_for_each_entry(ls, &lslist, ls_list) {
249 if (ls->ls_global_id == id) {
250 ls->ls_count++;
251 goto out;
252 }
253 }
254 ls = NULL;
255 out:
256 spin_unlock(&lslist_lock);
257 return ls;
258}
259
260struct dlm_ls *dlm_find_lockspace_local(void *id)
261{
262 struct dlm_ls *ls = id;
263
264 spin_lock(&lslist_lock);
265 ls->ls_count++;
266 spin_unlock(&lslist_lock);
267 return ls;
268}
269
270void dlm_put_lockspace(struct dlm_ls *ls)
271{
272 spin_lock(&lslist_lock);
273 ls->ls_count--;
274 spin_unlock(&lslist_lock);
275}
276
277static void remove_lockspace(struct dlm_ls *ls)
278{
279 for (;;) {
280 spin_lock(&lslist_lock);
281 if (ls->ls_count == 0) {
282 list_del(&ls->ls_list);
283 spin_unlock(&lslist_lock);
284 return;
285 }
286 spin_unlock(&lslist_lock);
287 ssleep(1);
288 }
289}
290
291static int threads_start(void)
292{
293 int error;
294
295 /* Thread which process lock requests for all lockspace's */
296 error = dlm_astd_start();
297 if (error) {
298 log_print("cannot start dlm_astd thread %d", error);
299 goto fail;
300 }
301
302 error = dlm_scand_start();
303 if (error) {
304 log_print("cannot start dlm_scand thread %d", error);
305 goto astd_fail;
306 }
307
308 /* Thread for sending/receiving messages for all lockspace's */
309 error = dlm_lowcomms_start();
310 if (error) {
311 log_print("cannot start dlm lowcomms %d", error);
312 goto scand_fail;
313 }
314
315 return 0;
316
317 scand_fail:
318 dlm_scand_stop();
319 astd_fail:
320 dlm_astd_stop();
321 fail:
322 return error;
323}
324
325static void threads_stop(void)
326{
327 dlm_scand_stop();
328 dlm_lowcomms_stop();
329 dlm_astd_stop();
330}
331
332static int new_lockspace(char *name, int namelen, void **lockspace,
333 uint32_t flags, int lvblen)
334{
335 struct dlm_ls *ls;
336 int i, size, error = -ENOMEM;
337
338 if (namelen > DLM_LOCKSPACE_LEN)
339 return -EINVAL;
340
341 if (!lvblen || (lvblen % 8))
342 return -EINVAL;
343
344 if (!try_module_get(THIS_MODULE))
345 return -EINVAL;
346
347 ls = dlm_find_lockspace_name(name, namelen);
348 if (ls) {
349 *lockspace = ls;
350 module_put(THIS_MODULE);
351 return -EEXIST;
352 }
353
354 ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_KERNEL);
355 if (!ls)
356 goto out;
357 memcpy(ls->ls_name, name, namelen);
358 ls->ls_namelen = namelen;
359 ls->ls_exflags = flags;
360 ls->ls_lvblen = lvblen;
361 ls->ls_count = 0;
362 ls->ls_flags = 0;
363
364 size = dlm_config.rsbtbl_size;
365 ls->ls_rsbtbl_size = size;
366
367 ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_KERNEL);
368 if (!ls->ls_rsbtbl)
369 goto out_lsfree;
370 for (i = 0; i < size; i++) {
371 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
372 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
373 rwlock_init(&ls->ls_rsbtbl[i].lock);
374 }
375
376 size = dlm_config.lkbtbl_size;
377 ls->ls_lkbtbl_size = size;
378
379 ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_KERNEL);
380 if (!ls->ls_lkbtbl)
381 goto out_rsbfree;
382 for (i = 0; i < size; i++) {
383 INIT_LIST_HEAD(&ls->ls_lkbtbl[i].list);
384 rwlock_init(&ls->ls_lkbtbl[i].lock);
385 ls->ls_lkbtbl[i].counter = 1;
386 }
387
388 size = dlm_config.dirtbl_size;
389 ls->ls_dirtbl_size = size;
390
391 ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_KERNEL);
392 if (!ls->ls_dirtbl)
393 goto out_lkbfree;
394 for (i = 0; i < size; i++) {
395 INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
396 rwlock_init(&ls->ls_dirtbl[i].lock);
397 }
398
399 INIT_LIST_HEAD(&ls->ls_waiters);
400 mutex_init(&ls->ls_waiters_mutex);
401
402 INIT_LIST_HEAD(&ls->ls_nodes);
403 INIT_LIST_HEAD(&ls->ls_nodes_gone);
404 ls->ls_num_nodes = 0;
405 ls->ls_low_nodeid = 0;
406 ls->ls_total_weight = 0;
407 ls->ls_node_array = NULL;
408
409 memset(&ls->ls_stub_rsb, 0, sizeof(struct dlm_rsb));
410 ls->ls_stub_rsb.res_ls = ls;
411
412 ls->ls_debug_dentry = NULL;
413
414 init_waitqueue_head(&ls->ls_uevent_wait);
415 ls->ls_uevent_result = 0;
416
417 ls->ls_recoverd_task = NULL;
418 mutex_init(&ls->ls_recoverd_active);
419 spin_lock_init(&ls->ls_recover_lock);
420 ls->ls_recover_status = 0;
421 ls->ls_recover_seq = 0;
422 ls->ls_recover_args = NULL;
423 init_rwsem(&ls->ls_in_recovery);
424 INIT_LIST_HEAD(&ls->ls_requestqueue);
425 mutex_init(&ls->ls_requestqueue_mutex);
426
427 ls->ls_recover_buf = kmalloc(dlm_config.buffer_size, GFP_KERNEL);
428 if (!ls->ls_recover_buf)
429 goto out_dirfree;
430
431 INIT_LIST_HEAD(&ls->ls_recover_list);
432 spin_lock_init(&ls->ls_recover_list_lock);
433 ls->ls_recover_list_count = 0;
434 init_waitqueue_head(&ls->ls_wait_general);
435 INIT_LIST_HEAD(&ls->ls_root_list);
436 init_rwsem(&ls->ls_root_sem);
437
438 down_write(&ls->ls_in_recovery);
439
440 error = dlm_recoverd_start(ls);
441 if (error) {
442 log_error(ls, "can't start dlm_recoverd %d", error);
443 goto out_rcomfree;
444 }
445
446 spin_lock(&lslist_lock);
447 list_add(&ls->ls_list, &lslist);
448 spin_unlock(&lslist_lock);
449
450 dlm_create_debug_file(ls);
451
452 error = kobject_setup(ls);
453 if (error)
454 goto out_del;
455
456 error = kobject_register(&ls->ls_kobj);
457 if (error)
458 goto out_del;
459
460 error = do_uevent(ls, 1);
461 if (error)
462 goto out_unreg;
463
464 *lockspace = ls;
465 return 0;
466
467 out_unreg:
468 kobject_unregister(&ls->ls_kobj);
469 out_del:
470 dlm_delete_debug_file(ls);
471 spin_lock(&lslist_lock);
472 list_del(&ls->ls_list);
473 spin_unlock(&lslist_lock);
474 dlm_recoverd_stop(ls);
475 out_rcomfree:
476 kfree(ls->ls_recover_buf);
477 out_dirfree:
478 kfree(ls->ls_dirtbl);
479 out_lkbfree:
480 kfree(ls->ls_lkbtbl);
481 out_rsbfree:
482 kfree(ls->ls_rsbtbl);
483 out_lsfree:
484 kfree(ls);
485 out:
486 module_put(THIS_MODULE);
487 return error;
488}
489
490int dlm_new_lockspace(char *name, int namelen, void **lockspace,
491 uint32_t flags, int lvblen)
492{
493 int error = 0;
494
495 mutex_lock(&ls_lock);
496 if (!ls_count)
497 error = threads_start();
498 if (error)
499 goto out;
500
501 error = new_lockspace(name, namelen, lockspace, flags, lvblen);
502 if (!error)
503 ls_count++;
504 out:
505 mutex_unlock(&ls_lock);
506 return error;
507}
508
509/* Return 1 if the lockspace still has active remote locks,
510 * 2 if the lockspace still has active local locks.
511 */
512static int lockspace_busy(struct dlm_ls *ls)
513{
514 int i, lkb_found = 0;
515 struct dlm_lkb *lkb;
516
517 /* NOTE: We check the lockidtbl here rather than the resource table.
518 This is because there may be LKBs queued as ASTs that have been
519 unlinked from their RSBs and are pending deletion once the AST has
520 been delivered */
521
522 for (i = 0; i < ls->ls_lkbtbl_size; i++) {
523 read_lock(&ls->ls_lkbtbl[i].lock);
524 if (!list_empty(&ls->ls_lkbtbl[i].list)) {
525 lkb_found = 1;
526 list_for_each_entry(lkb, &ls->ls_lkbtbl[i].list,
527 lkb_idtbl_list) {
528 if (!lkb->lkb_nodeid) {
529 read_unlock(&ls->ls_lkbtbl[i].lock);
530 return 2;
531 }
532 }
533 }
534 read_unlock(&ls->ls_lkbtbl[i].lock);
535 }
536 return lkb_found;
537}
538
539static int release_lockspace(struct dlm_ls *ls, int force)
540{
541 struct dlm_lkb *lkb;
542 struct dlm_rsb *rsb;
543 struct list_head *head;
544 int i;
545 int busy = lockspace_busy(ls);
546
547 if (busy > force)
548 return -EBUSY;
549
550 if (force < 3)
551 do_uevent(ls, 0);
552
553 dlm_recoverd_stop(ls);
554
555 remove_lockspace(ls);
556
557 dlm_delete_debug_file(ls);
558
559 dlm_astd_suspend();
560
561 kfree(ls->ls_recover_buf);
562
563 /*
564 * Free direntry structs.
565 */
566
567 dlm_dir_clear(ls);
568 kfree(ls->ls_dirtbl);
569
570 /*
571 * Free all lkb's on lkbtbl[] lists.
572 */
573
574 for (i = 0; i < ls->ls_lkbtbl_size; i++) {
575 head = &ls->ls_lkbtbl[i].list;
576 while (!list_empty(head)) {
577 lkb = list_entry(head->next, struct dlm_lkb,
578 lkb_idtbl_list);
579
580 list_del(&lkb->lkb_idtbl_list);
581
582 dlm_del_ast(lkb);
583
584 if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
585 free_lvb(lkb->lkb_lvbptr);
586
587 free_lkb(lkb);
588 }
589 }
590 dlm_astd_resume();
591
592 kfree(ls->ls_lkbtbl);
593
594 /*
595 * Free all rsb's on rsbtbl[] lists
596 */
597
598 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
599 head = &ls->ls_rsbtbl[i].list;
600 while (!list_empty(head)) {
601 rsb = list_entry(head->next, struct dlm_rsb,
602 res_hashchain);
603
604 list_del(&rsb->res_hashchain);
605 free_rsb(rsb);
606 }
607
608 head = &ls->ls_rsbtbl[i].toss;
609 while (!list_empty(head)) {
610 rsb = list_entry(head->next, struct dlm_rsb,
611 res_hashchain);
612 list_del(&rsb->res_hashchain);
613 free_rsb(rsb);
614 }
615 }
616
617 kfree(ls->ls_rsbtbl);
618
619 /*
620 * Free structures on any other lists
621 */
622
623 kfree(ls->ls_recover_args);
624 dlm_clear_free_entries(ls);
625 dlm_clear_members(ls);
626 dlm_clear_members_gone(ls);
627 kfree(ls->ls_node_array);
628 kobject_unregister(&ls->ls_kobj);
629 kfree(ls);
630
631 mutex_lock(&ls_lock);
632 ls_count--;
633 if (!ls_count)
634 threads_stop();
635 mutex_unlock(&ls_lock);
636
637 module_put(THIS_MODULE);
638 return 0;
639}
640
641/*
642 * Called when a system has released all its locks and is not going to use the
643 * lockspace any longer. We free everything we're managing for this lockspace.
644 * Remaining nodes will go through the recovery process as if we'd died. The
645 * lockspace must continue to function as usual, participating in recoveries,
646 * until this returns.
647 *
648 * Force has 4 possible values:
649 * 0 - don't destroy locksapce if it has any LKBs
650 * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
651 * 2 - destroy lockspace regardless of LKBs
652 * 3 - destroy lockspace as part of a forced shutdown
653 */
654
655int dlm_release_lockspace(void *lockspace, int force)
656{
657 struct dlm_ls *ls;
658
659 ls = dlm_find_lockspace_local(lockspace);
660 if (!ls)
661 return -EINVAL;
662 dlm_put_lockspace(ls);
663 return release_lockspace(ls, force);
664}
665
diff --git a/fs/dlm/lockspace.h b/fs/dlm/lockspace.h
new file mode 100644
index 000000000000..17bd3ba863a9
--- /dev/null
+++ b/fs/dlm/lockspace.h
@@ -0,0 +1,24 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#ifndef __LOCKSPACE_DOT_H__
15#define __LOCKSPACE_DOT_H__
16
17int dlm_lockspace_init(void);
18void dlm_lockspace_exit(void);
19struct dlm_ls *dlm_find_lockspace_global(uint32_t id);
20struct dlm_ls *dlm_find_lockspace_local(void *id);
21void dlm_put_lockspace(struct dlm_ls *ls);
22
23#endif /* __LOCKSPACE_DOT_H__ */
24
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
new file mode 100644
index 000000000000..09b0124f7fc4
--- /dev/null
+++ b/fs/dlm/lowcomms.c
@@ -0,0 +1,1218 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14/*
15 * lowcomms.c
16 *
17 * This is the "low-level" comms layer.
18 *
19 * It is responsible for sending/receiving messages
20 * from other nodes in the cluster.
21 *
22 * Cluster nodes are referred to by their nodeids. nodeids are
23 * simply 32 bit numbers to the locking module - if they need to
24 * be expanded for the cluster infrastructure then that is it's
25 * responsibility. It is this layer's
26 * responsibility to resolve these into IP address or
27 * whatever it needs for inter-node communication.
28 *
29 * The comms level is two kernel threads that deal mainly with
30 * the receiving of messages from other nodes and passing them
31 * up to the mid-level comms layer (which understands the
32 * message format) for execution by the locking core, and
33 * a send thread which does all the setting up of connections
34 * to remote nodes and the sending of data. Threads are not allowed
35 * to send their own data because it may cause them to wait in times
36 * of high load. Also, this way, the sending thread can collect together
37 * messages bound for one node and send them in one block.
38 *
39 * I don't see any problem with the recv thread executing the locking
40 * code on behalf of remote processes as the locking code is
41 * short, efficient and never (well, hardly ever) waits.
42 *
43 */
44
45#include <asm/ioctls.h>
46#include <net/sock.h>
47#include <net/tcp.h>
48#include <net/sctp/user.h>
49#include <linux/pagemap.h>
50#include <linux/socket.h>
51#include <linux/idr.h>
52
53#include "dlm_internal.h"
54#include "lowcomms.h"
55#include "config.h"
56#include "midcomms.h"
57
58static struct sockaddr_storage *local_addr[DLM_MAX_ADDR_COUNT];
59static int local_count;
60static int local_nodeid;
61
62/* One of these per connected node */
63
64#define NI_INIT_PENDING 1
65#define NI_WRITE_PENDING 2
66
67struct nodeinfo {
68 spinlock_t lock;
69 sctp_assoc_t assoc_id;
70 unsigned long flags;
71 struct list_head write_list; /* nodes with pending writes */
72 struct list_head writequeue; /* outgoing writequeue_entries */
73 spinlock_t writequeue_lock;
74 int nodeid;
75};
76
77static DEFINE_IDR(nodeinfo_idr);
78static struct rw_semaphore nodeinfo_lock;
79static int max_nodeid;
80
81struct cbuf {
82 unsigned base;
83 unsigned len;
84 unsigned mask;
85};
86
87/* Just the one of these, now. But this struct keeps
88 the connection-specific variables together */
89
90#define CF_READ_PENDING 1
91
92struct connection {
93 struct socket *sock;
94 unsigned long flags;
95 struct page *rx_page;
96 atomic_t waiting_requests;
97 struct cbuf cb;
98 int eagain_flag;
99};
100
101/* An entry waiting to be sent */
102
103struct writequeue_entry {
104 struct list_head list;
105 struct page *page;
106 int offset;
107 int len;
108 int end;
109 int users;
110 struct nodeinfo *ni;
111};
112
113#define CBUF_ADD(cb, n) do { (cb)->len += n; } while(0)
114#define CBUF_EMPTY(cb) ((cb)->len == 0)
115#define CBUF_MAY_ADD(cb, n) (((cb)->len + (n)) < ((cb)->mask + 1))
116#define CBUF_DATA(cb) (((cb)->base + (cb)->len) & (cb)->mask)
117
118#define CBUF_INIT(cb, size) \
119do { \
120 (cb)->base = (cb)->len = 0; \
121 (cb)->mask = ((size)-1); \
122} while(0)
123
124#define CBUF_EAT(cb, n) \
125do { \
126 (cb)->len -= (n); \
127 (cb)->base += (n); \
128 (cb)->base &= (cb)->mask; \
129} while(0)
130
131
132/* List of nodes which have writes pending */
133static struct list_head write_nodes;
134static spinlock_t write_nodes_lock;
135
136/* Maximum number of incoming messages to process before
137 * doing a schedule()
138 */
139#define MAX_RX_MSG_COUNT 25
140
141/* Manage daemons */
142static struct task_struct *recv_task;
143static struct task_struct *send_task;
144static wait_queue_head_t lowcomms_recv_wait;
145static atomic_t accepting;
146
147/* The SCTP connection */
148static struct connection sctp_con;
149
150
151static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr)
152{
153 struct sockaddr_storage addr;
154 int error;
155
156 if (!local_count)
157 return -1;
158
159 error = dlm_nodeid_to_addr(nodeid, &addr);
160 if (error)
161 return error;
162
163 if (local_addr[0]->ss_family == AF_INET) {
164 struct sockaddr_in *in4 = (struct sockaddr_in *) &addr;
165 struct sockaddr_in *ret4 = (struct sockaddr_in *) retaddr;
166 ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
167 } else {
168 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &addr;
169 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr;
170 memcpy(&ret6->sin6_addr, &in6->sin6_addr,
171 sizeof(in6->sin6_addr));
172 }
173
174 return 0;
175}
176
177static struct nodeinfo *nodeid2nodeinfo(int nodeid, int alloc)
178{
179 struct nodeinfo *ni;
180 int r;
181 int n;
182
183 down_read(&nodeinfo_lock);
184 ni = idr_find(&nodeinfo_idr, nodeid);
185 up_read(&nodeinfo_lock);
186
187 if (!ni && alloc) {
188 down_write(&nodeinfo_lock);
189
190 ni = idr_find(&nodeinfo_idr, nodeid);
191 if (ni)
192 goto out_up;
193
194 r = idr_pre_get(&nodeinfo_idr, alloc);
195 if (!r)
196 goto out_up;
197
198 ni = kmalloc(sizeof(struct nodeinfo), alloc);
199 if (!ni)
200 goto out_up;
201
202 r = idr_get_new_above(&nodeinfo_idr, ni, nodeid, &n);
203 if (r) {
204 kfree(ni);
205 ni = NULL;
206 goto out_up;
207 }
208 if (n != nodeid) {
209 idr_remove(&nodeinfo_idr, n);
210 kfree(ni);
211 ni = NULL;
212 goto out_up;
213 }
214 memset(ni, 0, sizeof(struct nodeinfo));
215 spin_lock_init(&ni->lock);
216 INIT_LIST_HEAD(&ni->writequeue);
217 spin_lock_init(&ni->writequeue_lock);
218 ni->nodeid = nodeid;
219
220 if (nodeid > max_nodeid)
221 max_nodeid = nodeid;
222 out_up:
223 up_write(&nodeinfo_lock);
224 }
225
226 return ni;
227}
228
229/* Don't call this too often... */
230static struct nodeinfo *assoc2nodeinfo(sctp_assoc_t assoc)
231{
232 int i;
233 struct nodeinfo *ni;
234
235 for (i=1; i<=max_nodeid; i++) {
236 ni = nodeid2nodeinfo(i, 0);
237 if (ni && ni->assoc_id == assoc)
238 return ni;
239 }
240 return NULL;
241}
242
243/* Data or notification available on socket */
244static void lowcomms_data_ready(struct sock *sk, int count_unused)
245{
246 atomic_inc(&sctp_con.waiting_requests);
247 if (test_and_set_bit(CF_READ_PENDING, &sctp_con.flags))
248 return;
249
250 wake_up_interruptible(&lowcomms_recv_wait);
251}
252
253
254/* Add the port number to an IP6 or 4 sockaddr and return the address length.
255 Also padd out the struct with zeros to make comparisons meaningful */
256
257static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
258 int *addr_len)
259{
260 struct sockaddr_in *local4_addr;
261 struct sockaddr_in6 *local6_addr;
262
263 if (!local_count)
264 return;
265
266 if (!port) {
267 if (local_addr[0]->ss_family == AF_INET) {
268 local4_addr = (struct sockaddr_in *)local_addr[0];
269 port = be16_to_cpu(local4_addr->sin_port);
270 } else {
271 local6_addr = (struct sockaddr_in6 *)local_addr[0];
272 port = be16_to_cpu(local6_addr->sin6_port);
273 }
274 }
275
276 saddr->ss_family = local_addr[0]->ss_family;
277 if (local_addr[0]->ss_family == AF_INET) {
278 struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
279 in4_addr->sin_port = cpu_to_be16(port);
280 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
281 memset(in4_addr+1, 0, sizeof(struct sockaddr_storage) -
282 sizeof(struct sockaddr_in));
283 *addr_len = sizeof(struct sockaddr_in);
284 } else {
285 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
286 in6_addr->sin6_port = cpu_to_be16(port);
287 memset(in6_addr+1, 0, sizeof(struct sockaddr_storage) -
288 sizeof(struct sockaddr_in6));
289 *addr_len = sizeof(struct sockaddr_in6);
290 }
291}
292
293/* Close the connection and tidy up */
294static void close_connection(void)
295{
296 if (sctp_con.sock) {
297 sock_release(sctp_con.sock);
298 sctp_con.sock = NULL;
299 }
300
301 if (sctp_con.rx_page) {
302 __free_page(sctp_con.rx_page);
303 sctp_con.rx_page = NULL;
304 }
305}
306
307/* We only send shutdown messages to nodes that are not part of the cluster */
308static void send_shutdown(sctp_assoc_t associd)
309{
310 static char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
311 struct msghdr outmessage;
312 struct cmsghdr *cmsg;
313 struct sctp_sndrcvinfo *sinfo;
314 int ret;
315
316 outmessage.msg_name = NULL;
317 outmessage.msg_namelen = 0;
318 outmessage.msg_control = outcmsg;
319 outmessage.msg_controllen = sizeof(outcmsg);
320 outmessage.msg_flags = MSG_EOR;
321
322 cmsg = CMSG_FIRSTHDR(&outmessage);
323 cmsg->cmsg_level = IPPROTO_SCTP;
324 cmsg->cmsg_type = SCTP_SNDRCV;
325 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
326 outmessage.msg_controllen = cmsg->cmsg_len;
327 sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg);
328 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
329
330 sinfo->sinfo_flags |= MSG_EOF;
331 sinfo->sinfo_assoc_id = associd;
332
333 ret = kernel_sendmsg(sctp_con.sock, &outmessage, NULL, 0, 0);
334
335 if (ret != 0)
336 log_print("send EOF to node failed: %d", ret);
337}
338
339
340/* INIT failed but we don't know which node...
341 restart INIT on all pending nodes */
342static void init_failed(void)
343{
344 int i;
345 struct nodeinfo *ni;
346
347 for (i=1; i<=max_nodeid; i++) {
348 ni = nodeid2nodeinfo(i, 0);
349 if (!ni)
350 continue;
351
352 if (test_and_clear_bit(NI_INIT_PENDING, &ni->flags)) {
353 ni->assoc_id = 0;
354 if (!test_and_set_bit(NI_WRITE_PENDING, &ni->flags)) {
355 spin_lock_bh(&write_nodes_lock);
356 list_add_tail(&ni->write_list, &write_nodes);
357 spin_unlock_bh(&write_nodes_lock);
358 }
359 }
360 }
361 wake_up_process(send_task);
362}
363
364/* Something happened to an association */
365static void process_sctp_notification(struct msghdr *msg, char *buf)
366{
367 union sctp_notification *sn = (union sctp_notification *)buf;
368
369 if (sn->sn_header.sn_type == SCTP_ASSOC_CHANGE) {
370 switch (sn->sn_assoc_change.sac_state) {
371
372 case SCTP_COMM_UP:
373 case SCTP_RESTART:
374 {
375 /* Check that the new node is in the lockspace */
376 struct sctp_prim prim;
377 mm_segment_t fs;
378 int nodeid;
379 int prim_len, ret;
380 int addr_len;
381 struct nodeinfo *ni;
382
383 /* This seems to happen when we received a connection
384 * too early... or something... anyway, it happens but
385 * we always seem to get a real message too, see
386 * receive_from_sock */
387
388 if ((int)sn->sn_assoc_change.sac_assoc_id <= 0) {
389 log_print("COMM_UP for invalid assoc ID %d",
390 (int)sn->sn_assoc_change.sac_assoc_id);
391 init_failed();
392 return;
393 }
394 memset(&prim, 0, sizeof(struct sctp_prim));
395 prim_len = sizeof(struct sctp_prim);
396 prim.ssp_assoc_id = sn->sn_assoc_change.sac_assoc_id;
397
398 fs = get_fs();
399 set_fs(get_ds());
400 ret = sctp_con.sock->ops->getsockopt(sctp_con.sock,
401 IPPROTO_SCTP, SCTP_PRIMARY_ADDR,
402 (char*)&prim, &prim_len);
403 set_fs(fs);
404 if (ret < 0) {
405 struct nodeinfo *ni;
406
407 log_print("getsockopt/sctp_primary_addr on "
408 "new assoc %d failed : %d",
409 (int)sn->sn_assoc_change.sac_assoc_id, ret);
410
411 /* Retry INIT later */
412 ni = assoc2nodeinfo(sn->sn_assoc_change.sac_assoc_id);
413 if (ni)
414 clear_bit(NI_INIT_PENDING, &ni->flags);
415 return;
416 }
417 make_sockaddr(&prim.ssp_addr, 0, &addr_len);
418 if (dlm_addr_to_nodeid(&prim.ssp_addr, &nodeid)) {
419 log_print("reject connect from unknown addr");
420 send_shutdown(prim.ssp_assoc_id);
421 return;
422 }
423
424 ni = nodeid2nodeinfo(nodeid, GFP_KERNEL);
425 if (!ni)
426 return;
427
428 /* Save the assoc ID */
429 spin_lock(&ni->lock);
430 ni->assoc_id = sn->sn_assoc_change.sac_assoc_id;
431 spin_unlock(&ni->lock);
432
433 log_print("got new/restarted association %d nodeid %d",
434 (int)sn->sn_assoc_change.sac_assoc_id, nodeid);
435
436 /* Send any pending writes */
437 clear_bit(NI_INIT_PENDING, &ni->flags);
438 if (!test_and_set_bit(NI_WRITE_PENDING, &ni->flags)) {
439 spin_lock_bh(&write_nodes_lock);
440 list_add_tail(&ni->write_list, &write_nodes);
441 spin_unlock_bh(&write_nodes_lock);
442 }
443 wake_up_process(send_task);
444 }
445 break;
446
447 case SCTP_COMM_LOST:
448 case SCTP_SHUTDOWN_COMP:
449 {
450 struct nodeinfo *ni;
451
452 ni = assoc2nodeinfo(sn->sn_assoc_change.sac_assoc_id);
453 if (ni) {
454 spin_lock(&ni->lock);
455 ni->assoc_id = 0;
456 spin_unlock(&ni->lock);
457 }
458 }
459 break;
460
461 /* We don't know which INIT failed, so clear the PENDING flags
462 * on them all. if assoc_id is zero then it will then try
463 * again */
464
465 case SCTP_CANT_STR_ASSOC:
466 {
467 log_print("Can't start SCTP association - retrying");
468 init_failed();
469 }
470 break;
471
472 default:
473 log_print("unexpected SCTP assoc change id=%d state=%d",
474 (int)sn->sn_assoc_change.sac_assoc_id,
475 sn->sn_assoc_change.sac_state);
476 }
477 }
478}
479
480/* Data received from remote end */
481static int receive_from_sock(void)
482{
483 int ret = 0;
484 struct msghdr msg;
485 struct kvec iov[2];
486 unsigned len;
487 int r;
488 struct sctp_sndrcvinfo *sinfo;
489 struct cmsghdr *cmsg;
490 struct nodeinfo *ni;
491
492 /* These two are marginally too big for stack allocation, but this
493 * function is (currently) only called by dlm_recvd so static should be
494 * OK.
495 */
496 static struct sockaddr_storage msgname;
497 static char incmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
498
499 if (sctp_con.sock == NULL)
500 goto out;
501
502 if (sctp_con.rx_page == NULL) {
503 /*
504 * This doesn't need to be atomic, but I think it should
505 * improve performance if it is.
506 */
507 sctp_con.rx_page = alloc_page(GFP_ATOMIC);
508 if (sctp_con.rx_page == NULL)
509 goto out_resched;
510 CBUF_INIT(&sctp_con.cb, PAGE_CACHE_SIZE);
511 }
512
513 memset(&incmsg, 0, sizeof(incmsg));
514 memset(&msgname, 0, sizeof(msgname));
515
516 memset(incmsg, 0, sizeof(incmsg));
517 msg.msg_name = &msgname;
518 msg.msg_namelen = sizeof(msgname);
519 msg.msg_flags = 0;
520 msg.msg_control = incmsg;
521 msg.msg_controllen = sizeof(incmsg);
522
523 /* I don't see why this circular buffer stuff is necessary for SCTP
524 * which is a packet-based protocol, but the whole thing breaks under
525 * load without it! The overhead is minimal (and is in the TCP lowcomms
526 * anyway, of course) so I'll leave it in until I can figure out what's
527 * really happening.
528 */
529
530 /*
531 * iov[0] is the bit of the circular buffer between the current end
532 * point (cb.base + cb.len) and the end of the buffer.
533 */
534 iov[0].iov_len = sctp_con.cb.base - CBUF_DATA(&sctp_con.cb);
535 iov[0].iov_base = page_address(sctp_con.rx_page) +
536 CBUF_DATA(&sctp_con.cb);
537 iov[1].iov_len = 0;
538
539 /*
540 * iov[1] is the bit of the circular buffer between the start of the
541 * buffer and the start of the currently used section (cb.base)
542 */
543 if (CBUF_DATA(&sctp_con.cb) >= sctp_con.cb.base) {
544 iov[0].iov_len = PAGE_CACHE_SIZE - CBUF_DATA(&sctp_con.cb);
545 iov[1].iov_len = sctp_con.cb.base;
546 iov[1].iov_base = page_address(sctp_con.rx_page);
547 msg.msg_iovlen = 2;
548 }
549 len = iov[0].iov_len + iov[1].iov_len;
550
551 r = ret = kernel_recvmsg(sctp_con.sock, &msg, iov, 1, len,
552 MSG_NOSIGNAL | MSG_DONTWAIT);
553 if (ret <= 0)
554 goto out_close;
555
556 msg.msg_control = incmsg;
557 msg.msg_controllen = sizeof(incmsg);
558 cmsg = CMSG_FIRSTHDR(&msg);
559 sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg);
560
561 if (msg.msg_flags & MSG_NOTIFICATION) {
562 process_sctp_notification(&msg, page_address(sctp_con.rx_page));
563 return 0;
564 }
565
566 /* Is this a new association ? */
567 ni = nodeid2nodeinfo(le32_to_cpu(sinfo->sinfo_ppid), GFP_KERNEL);
568 if (ni) {
569 ni->assoc_id = sinfo->sinfo_assoc_id;
570 if (test_and_clear_bit(NI_INIT_PENDING, &ni->flags)) {
571
572 if (!test_and_set_bit(NI_WRITE_PENDING, &ni->flags)) {
573 spin_lock_bh(&write_nodes_lock);
574 list_add_tail(&ni->write_list, &write_nodes);
575 spin_unlock_bh(&write_nodes_lock);
576 }
577 wake_up_process(send_task);
578 }
579 }
580
581 /* INIT sends a message with length of 1 - ignore it */
582 if (r == 1)
583 return 0;
584
585 CBUF_ADD(&sctp_con.cb, ret);
586 ret = dlm_process_incoming_buffer(cpu_to_le32(sinfo->sinfo_ppid),
587 page_address(sctp_con.rx_page),
588 sctp_con.cb.base, sctp_con.cb.len,
589 PAGE_CACHE_SIZE);
590 if (ret < 0)
591 goto out_close;
592 CBUF_EAT(&sctp_con.cb, ret);
593
594 out:
595 ret = 0;
596 goto out_ret;
597
598 out_resched:
599 lowcomms_data_ready(sctp_con.sock->sk, 0);
600 ret = 0;
601 schedule();
602 goto out_ret;
603
604 out_close:
605 if (ret != -EAGAIN)
606 log_print("error reading from sctp socket: %d", ret);
607 out_ret:
608 return ret;
609}
610
611/* Bind to an IP address. SCTP allows multiple address so it can do multi-homing */
612static int add_bind_addr(struct sockaddr_storage *addr, int addr_len, int num)
613{
614 mm_segment_t fs;
615 int result = 0;
616
617 fs = get_fs();
618 set_fs(get_ds());
619 if (num == 1)
620 result = sctp_con.sock->ops->bind(sctp_con.sock,
621 (struct sockaddr *) addr, addr_len);
622 else
623 result = sctp_con.sock->ops->setsockopt(sctp_con.sock, SOL_SCTP,
624 SCTP_SOCKOPT_BINDX_ADD, (char *)addr, addr_len);
625 set_fs(fs);
626
627 if (result < 0)
628 log_print("Can't bind to port %d addr number %d",
629 dlm_config.tcp_port, num);
630
631 return result;
632}
633
634static void init_local(void)
635{
636 struct sockaddr_storage sas, *addr;
637 int i;
638
639 local_nodeid = dlm_our_nodeid();
640
641 for (i = 0; i < DLM_MAX_ADDR_COUNT - 1; i++) {
642 if (dlm_our_addr(&sas, i))
643 break;
644
645 addr = kmalloc(sizeof(*addr), GFP_KERNEL);
646 if (!addr)
647 break;
648 memcpy(addr, &sas, sizeof(*addr));
649 local_addr[local_count++] = addr;
650 }
651}
652
653/* Initialise SCTP socket and bind to all interfaces */
654static int init_sock(void)
655{
656 mm_segment_t fs;
657 struct socket *sock = NULL;
658 struct sockaddr_storage localaddr;
659 struct sctp_event_subscribe subscribe;
660 int result = -EINVAL, num = 1, i, addr_len;
661
662 if (!local_count) {
663 init_local();
664 if (!local_count) {
665 log_print("no local IP address has been set");
666 goto out;
667 }
668 }
669
670 result = sock_create_kern(local_addr[0]->ss_family, SOCK_SEQPACKET,
671 IPPROTO_SCTP, &sock);
672 if (result < 0) {
673 log_print("Can't create comms socket, check SCTP is loaded");
674 goto out;
675 }
676
677 /* Listen for events */
678 memset(&subscribe, 0, sizeof(subscribe));
679 subscribe.sctp_data_io_event = 1;
680 subscribe.sctp_association_event = 1;
681 subscribe.sctp_send_failure_event = 1;
682 subscribe.sctp_shutdown_event = 1;
683 subscribe.sctp_partial_delivery_event = 1;
684
685 fs = get_fs();
686 set_fs(get_ds());
687 result = sock->ops->setsockopt(sock, SOL_SCTP, SCTP_EVENTS,
688 (char *)&subscribe, sizeof(subscribe));
689 set_fs(fs);
690
691 if (result < 0) {
692 log_print("Failed to set SCTP_EVENTS on socket: result=%d",
693 result);
694 goto create_delsock;
695 }
696
697 /* Init con struct */
698 sock->sk->sk_user_data = &sctp_con;
699 sctp_con.sock = sock;
700 sctp_con.sock->sk->sk_data_ready = lowcomms_data_ready;
701
702 /* Bind to all interfaces. */
703 for (i = 0; i < local_count; i++) {
704 memcpy(&localaddr, local_addr[i], sizeof(localaddr));
705 make_sockaddr(&localaddr, dlm_config.tcp_port, &addr_len);
706
707 result = add_bind_addr(&localaddr, addr_len, num);
708 if (result)
709 goto create_delsock;
710 ++num;
711 }
712
713 result = sock->ops->listen(sock, 5);
714 if (result < 0) {
715 log_print("Can't set socket listening");
716 goto create_delsock;
717 }
718
719 return 0;
720
721 create_delsock:
722 sock_release(sock);
723 sctp_con.sock = NULL;
724 out:
725 return result;
726}
727
728
729static struct writequeue_entry *new_writequeue_entry(int allocation)
730{
731 struct writequeue_entry *entry;
732
733 entry = kmalloc(sizeof(struct writequeue_entry), allocation);
734 if (!entry)
735 return NULL;
736
737 entry->page = alloc_page(allocation);
738 if (!entry->page) {
739 kfree(entry);
740 return NULL;
741 }
742
743 entry->offset = 0;
744 entry->len = 0;
745 entry->end = 0;
746 entry->users = 0;
747
748 return entry;
749}
750
751void *dlm_lowcomms_get_buffer(int nodeid, int len, int allocation, char **ppc)
752{
753 struct writequeue_entry *e;
754 int offset = 0;
755 int users = 0;
756 struct nodeinfo *ni;
757
758 if (!atomic_read(&accepting))
759 return NULL;
760
761 ni = nodeid2nodeinfo(nodeid, allocation);
762 if (!ni)
763 return NULL;
764
765 spin_lock(&ni->writequeue_lock);
766 e = list_entry(ni->writequeue.prev, struct writequeue_entry, list);
767 if (((struct list_head *) e == &ni->writequeue) ||
768 (PAGE_CACHE_SIZE - e->end < len)) {
769 e = NULL;
770 } else {
771 offset = e->end;
772 e->end += len;
773 users = e->users++;
774 }
775 spin_unlock(&ni->writequeue_lock);
776
777 if (e) {
778 got_one:
779 if (users == 0)
780 kmap(e->page);
781 *ppc = page_address(e->page) + offset;
782 return e;
783 }
784
785 e = new_writequeue_entry(allocation);
786 if (e) {
787 spin_lock(&ni->writequeue_lock);
788 offset = e->end;
789 e->end += len;
790 e->ni = ni;
791 users = e->users++;
792 list_add_tail(&e->list, &ni->writequeue);
793 spin_unlock(&ni->writequeue_lock);
794 goto got_one;
795 }
796 return NULL;
797}
798
799void dlm_lowcomms_commit_buffer(void *arg)
800{
801 struct writequeue_entry *e = (struct writequeue_entry *) arg;
802 int users;
803 struct nodeinfo *ni = e->ni;
804
805 if (!atomic_read(&accepting))
806 return;
807
808 spin_lock(&ni->writequeue_lock);
809 users = --e->users;
810 if (users)
811 goto out;
812 e->len = e->end - e->offset;
813 kunmap(e->page);
814 spin_unlock(&ni->writequeue_lock);
815
816 if (!test_and_set_bit(NI_WRITE_PENDING, &ni->flags)) {
817 spin_lock_bh(&write_nodes_lock);
818 list_add_tail(&ni->write_list, &write_nodes);
819 spin_unlock_bh(&write_nodes_lock);
820 wake_up_process(send_task);
821 }
822 return;
823
824 out:
825 spin_unlock(&ni->writequeue_lock);
826 return;
827}
828
829static void free_entry(struct writequeue_entry *e)
830{
831 __free_page(e->page);
832 kfree(e);
833}
834
835/* Initiate an SCTP association. In theory we could just use sendmsg() on
836 the first IP address and it should work, but this allows us to set up the
837 association before sending any valuable data that we can't afford to lose.
838 It also keeps the send path clean as it can now always use the association ID */
839static void initiate_association(int nodeid)
840{
841 struct sockaddr_storage rem_addr;
842 static char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
843 struct msghdr outmessage;
844 struct cmsghdr *cmsg;
845 struct sctp_sndrcvinfo *sinfo;
846 int ret;
847 int addrlen;
848 char buf[1];
849 struct kvec iov[1];
850 struct nodeinfo *ni;
851
852 log_print("Initiating association with node %d", nodeid);
853
854 ni = nodeid2nodeinfo(nodeid, GFP_KERNEL);
855 if (!ni)
856 return;
857
858 if (nodeid_to_addr(nodeid, (struct sockaddr *)&rem_addr)) {
859 log_print("no address for nodeid %d", nodeid);
860 return;
861 }
862
863 make_sockaddr(&rem_addr, dlm_config.tcp_port, &addrlen);
864
865 outmessage.msg_name = &rem_addr;
866 outmessage.msg_namelen = addrlen;
867 outmessage.msg_control = outcmsg;
868 outmessage.msg_controllen = sizeof(outcmsg);
869 outmessage.msg_flags = MSG_EOR;
870
871 iov[0].iov_base = buf;
872 iov[0].iov_len = 1;
873
874 /* Real INIT messages seem to cause trouble. Just send a 1 byte message
875 we can afford to lose */
876 cmsg = CMSG_FIRSTHDR(&outmessage);
877 cmsg->cmsg_level = IPPROTO_SCTP;
878 cmsg->cmsg_type = SCTP_SNDRCV;
879 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
880 sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg);
881 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
882 sinfo->sinfo_ppid = cpu_to_le32(local_nodeid);
883
884 outmessage.msg_controllen = cmsg->cmsg_len;
885 ret = kernel_sendmsg(sctp_con.sock, &outmessage, iov, 1, 1);
886 if (ret < 0) {
887 log_print("send INIT to node failed: %d", ret);
888 /* Try again later */
889 clear_bit(NI_INIT_PENDING, &ni->flags);
890 }
891}
892
893/* Send a message */
894static int send_to_sock(struct nodeinfo *ni)
895{
896 int ret = 0;
897 struct writequeue_entry *e;
898 int len, offset;
899 struct msghdr outmsg;
900 static char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))];
901 struct cmsghdr *cmsg;
902 struct sctp_sndrcvinfo *sinfo;
903 struct kvec iov;
904
905 /* See if we need to init an association before we start
906 sending precious messages */
907 spin_lock(&ni->lock);
908 if (!ni->assoc_id && !test_and_set_bit(NI_INIT_PENDING, &ni->flags)) {
909 spin_unlock(&ni->lock);
910 initiate_association(ni->nodeid);
911 return 0;
912 }
913 spin_unlock(&ni->lock);
914
915 outmsg.msg_name = NULL; /* We use assoc_id */
916 outmsg.msg_namelen = 0;
917 outmsg.msg_control = outcmsg;
918 outmsg.msg_controllen = sizeof(outcmsg);
919 outmsg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL | MSG_EOR;
920
921 cmsg = CMSG_FIRSTHDR(&outmsg);
922 cmsg->cmsg_level = IPPROTO_SCTP;
923 cmsg->cmsg_type = SCTP_SNDRCV;
924 cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
925 sinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg);
926 memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo));
927 sinfo->sinfo_ppid = cpu_to_le32(local_nodeid);
928 sinfo->sinfo_assoc_id = ni->assoc_id;
929 outmsg.msg_controllen = cmsg->cmsg_len;
930
931 spin_lock(&ni->writequeue_lock);
932 for (;;) {
933 if (list_empty(&ni->writequeue))
934 break;
935 e = list_entry(ni->writequeue.next, struct writequeue_entry,
936 list);
937 kmap(e->page);
938 len = e->len;
939 offset = e->offset;
940 BUG_ON(len == 0 && e->users == 0);
941 spin_unlock(&ni->writequeue_lock);
942
943 ret = 0;
944 if (len) {
945 iov.iov_base = page_address(e->page)+offset;
946 iov.iov_len = len;
947
948 ret = kernel_sendmsg(sctp_con.sock, &outmsg, &iov, 1,
949 len);
950 if (ret == -EAGAIN) {
951 sctp_con.eagain_flag = 1;
952 goto out;
953 } else if (ret < 0)
954 goto send_error;
955 } else {
956 /* Don't starve people filling buffers */
957 schedule();
958 }
959
960 spin_lock(&ni->writequeue_lock);
961 e->offset += ret;
962 e->len -= ret;
963
964 if (e->len == 0 && e->users == 0) {
965 list_del(&e->list);
966 free_entry(e);
967 continue;
968 }
969 }
970 spin_unlock(&ni->writequeue_lock);
971 out:
972 return ret;
973
974 send_error:
975 log_print("Error sending to node %d %d", ni->nodeid, ret);
976 spin_lock(&ni->lock);
977 if (!test_and_set_bit(NI_INIT_PENDING, &ni->flags)) {
978 ni->assoc_id = 0;
979 spin_unlock(&ni->lock);
980 initiate_association(ni->nodeid);
981 } else
982 spin_unlock(&ni->lock);
983
984 return ret;
985}
986
987/* Try to send any messages that are pending */
988static void process_output_queue(void)
989{
990 struct list_head *list;
991 struct list_head *temp;
992
993 spin_lock_bh(&write_nodes_lock);
994 list_for_each_safe(list, temp, &write_nodes) {
995 struct nodeinfo *ni =
996 list_entry(list, struct nodeinfo, write_list);
997 clear_bit(NI_WRITE_PENDING, &ni->flags);
998 list_del(&ni->write_list);
999
1000 spin_unlock_bh(&write_nodes_lock);
1001
1002 send_to_sock(ni);
1003 spin_lock_bh(&write_nodes_lock);
1004 }
1005 spin_unlock_bh(&write_nodes_lock);
1006}
1007
1008/* Called after we've had -EAGAIN and been woken up */
1009static void refill_write_queue(void)
1010{
1011 int i;
1012
1013 for (i=1; i<=max_nodeid; i++) {
1014 struct nodeinfo *ni = nodeid2nodeinfo(i, 0);
1015
1016 if (ni) {
1017 if (!test_and_set_bit(NI_WRITE_PENDING, &ni->flags)) {
1018 spin_lock_bh(&write_nodes_lock);
1019 list_add_tail(&ni->write_list, &write_nodes);
1020 spin_unlock_bh(&write_nodes_lock);
1021 }
1022 }
1023 }
1024}
1025
1026static void clean_one_writequeue(struct nodeinfo *ni)
1027{
1028 struct list_head *list;
1029 struct list_head *temp;
1030
1031 spin_lock(&ni->writequeue_lock);
1032 list_for_each_safe(list, temp, &ni->writequeue) {
1033 struct writequeue_entry *e =
1034 list_entry(list, struct writequeue_entry, list);
1035 list_del(&e->list);
1036 free_entry(e);
1037 }
1038 spin_unlock(&ni->writequeue_lock);
1039}
1040
1041static void clean_writequeues(void)
1042{
1043 int i;
1044
1045 for (i=1; i<=max_nodeid; i++) {
1046 struct nodeinfo *ni = nodeid2nodeinfo(i, 0);
1047 if (ni)
1048 clean_one_writequeue(ni);
1049 }
1050}
1051
1052
1053static void dealloc_nodeinfo(void)
1054{
1055 int i;
1056
1057 for (i=1; i<=max_nodeid; i++) {
1058 struct nodeinfo *ni = nodeid2nodeinfo(i, 0);
1059 if (ni) {
1060 idr_remove(&nodeinfo_idr, i);
1061 kfree(ni);
1062 }
1063 }
1064}
1065
1066static int write_list_empty(void)
1067{
1068 int status;
1069
1070 spin_lock_bh(&write_nodes_lock);
1071 status = list_empty(&write_nodes);
1072 spin_unlock_bh(&write_nodes_lock);
1073
1074 return status;
1075}
1076
1077static int dlm_recvd(void *data)
1078{
1079 DECLARE_WAITQUEUE(wait, current);
1080
1081 while (!kthread_should_stop()) {
1082 int count = 0;
1083
1084 set_current_state(TASK_INTERRUPTIBLE);
1085 add_wait_queue(&lowcomms_recv_wait, &wait);
1086 if (!test_bit(CF_READ_PENDING, &sctp_con.flags))
1087 schedule();
1088 remove_wait_queue(&lowcomms_recv_wait, &wait);
1089 set_current_state(TASK_RUNNING);
1090
1091 if (test_and_clear_bit(CF_READ_PENDING, &sctp_con.flags)) {
1092 int ret;
1093
1094 do {
1095 ret = receive_from_sock();
1096
1097 /* Don't starve out everyone else */
1098 if (++count >= MAX_RX_MSG_COUNT) {
1099 schedule();
1100 count = 0;
1101 }
1102 } while (!kthread_should_stop() && ret >=0);
1103 }
1104 schedule();
1105 }
1106
1107 return 0;
1108}
1109
1110static int dlm_sendd(void *data)
1111{
1112 DECLARE_WAITQUEUE(wait, current);
1113
1114 add_wait_queue(sctp_con.sock->sk->sk_sleep, &wait);
1115
1116 while (!kthread_should_stop()) {
1117 set_current_state(TASK_INTERRUPTIBLE);
1118 if (write_list_empty())
1119 schedule();
1120 set_current_state(TASK_RUNNING);
1121
1122 if (sctp_con.eagain_flag) {
1123 sctp_con.eagain_flag = 0;
1124 refill_write_queue();
1125 }
1126 process_output_queue();
1127 }
1128
1129 remove_wait_queue(sctp_con.sock->sk->sk_sleep, &wait);
1130
1131 return 0;
1132}
1133
1134static void daemons_stop(void)
1135{
1136 kthread_stop(recv_task);
1137 kthread_stop(send_task);
1138}
1139
1140static int daemons_start(void)
1141{
1142 struct task_struct *p;
1143 int error;
1144
1145 p = kthread_run(dlm_recvd, NULL, "dlm_recvd");
1146 error = IS_ERR(p);
1147 if (error) {
1148 log_print("can't start dlm_recvd %d", error);
1149 return error;
1150 }
1151 recv_task = p;
1152
1153 p = kthread_run(dlm_sendd, NULL, "dlm_sendd");
1154 error = IS_ERR(p);
1155 if (error) {
1156 log_print("can't start dlm_sendd %d", error);
1157 kthread_stop(recv_task);
1158 return error;
1159 }
1160 send_task = p;
1161
1162 return 0;
1163}
1164
1165/*
1166 * This is quite likely to sleep...
1167 */
1168int dlm_lowcomms_start(void)
1169{
1170 int error;
1171
1172 spin_lock_init(&write_nodes_lock);
1173 INIT_LIST_HEAD(&write_nodes);
1174 init_rwsem(&nodeinfo_lock);
1175
1176 error = init_sock();
1177 if (error)
1178 goto fail_sock;
1179 error = daemons_start();
1180 if (error)
1181 goto fail_sock;
1182 atomic_set(&accepting, 1);
1183 return 0;
1184
1185 fail_sock:
1186 close_connection();
1187 return error;
1188}
1189
1190/* Set all the activity flags to prevent any socket activity. */
1191
1192void dlm_lowcomms_stop(void)
1193{
1194 atomic_set(&accepting, 0);
1195 sctp_con.flags = 0x7;
1196 daemons_stop();
1197 clean_writequeues();
1198 close_connection();
1199 dealloc_nodeinfo();
1200 max_nodeid = 0;
1201}
1202
1203int dlm_lowcomms_init(void)
1204{
1205 init_waitqueue_head(&lowcomms_recv_wait);
1206 return 0;
1207}
1208
1209void dlm_lowcomms_exit(void)
1210{
1211 int i;
1212
1213 for (i = 0; i < local_count; i++)
1214 kfree(local_addr[i]);
1215 local_count = 0;
1216 local_nodeid = 0;
1217}
1218
diff --git a/fs/dlm/lowcomms.h b/fs/dlm/lowcomms.h
new file mode 100644
index 000000000000..3af8035ff12f
--- /dev/null
+++ b/fs/dlm/lowcomms.h
@@ -0,0 +1,25 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#ifndef __LOWCOMMS_DOT_H__
15#define __LOWCOMMS_DOT_H__
16
17int dlm_lowcomms_init(void);
18void dlm_lowcomms_exit(void);
19int dlm_lowcomms_start(void);
20void dlm_lowcomms_stop(void);
21void *dlm_lowcomms_get_buffer(int nodeid, int len, int allocation, char **ppc);
22void dlm_lowcomms_commit_buffer(void *mh);
23
24#endif /* __LOWCOMMS_DOT_H__ */
25
diff --git a/fs/dlm/lvb_table.h b/fs/dlm/lvb_table.h
new file mode 100644
index 000000000000..cc3e92f3feef
--- /dev/null
+++ b/fs/dlm/lvb_table.h
@@ -0,0 +1,18 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
5**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13#ifndef __LVB_TABLE_DOT_H__
14#define __LVB_TABLE_DOT_H__
15
16extern const int dlm_lvb_operations[8][8];
17
18#endif
diff --git a/fs/dlm/main.c b/fs/dlm/main.c
new file mode 100644
index 000000000000..81bf4cb22033
--- /dev/null
+++ b/fs/dlm/main.c
@@ -0,0 +1,89 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "dlm_internal.h"
15#include "lockspace.h"
16#include "lock.h"
17#include "memory.h"
18#include "lowcomms.h"
19#include "config.h"
20
21#ifdef CONFIG_DLM_DEBUG
22int dlm_register_debugfs(void);
23void dlm_unregister_debugfs(void);
24#else
25static inline int dlm_register_debugfs(void) { return 0; }
26static inline void dlm_unregister_debugfs(void) { }
27#endif
28
29static int __init init_dlm(void)
30{
31 int error;
32
33 error = dlm_memory_init();
34 if (error)
35 goto out;
36
37 error = dlm_lockspace_init();
38 if (error)
39 goto out_mem;
40
41 error = dlm_config_init();
42 if (error)
43 goto out_lockspace;
44
45 error = dlm_register_debugfs();
46 if (error)
47 goto out_config;
48
49 error = dlm_lowcomms_init();
50 if (error)
51 goto out_debug;
52
53 printk("DLM (built %s %s) installed\n", __DATE__, __TIME__);
54
55 return 0;
56
57 out_debug:
58 dlm_unregister_debugfs();
59 out_config:
60 dlm_config_exit();
61 out_lockspace:
62 dlm_lockspace_exit();
63 out_mem:
64 dlm_memory_exit();
65 out:
66 return error;
67}
68
69static void __exit exit_dlm(void)
70{
71 dlm_lowcomms_exit();
72 dlm_config_exit();
73 dlm_memory_exit();
74 dlm_lockspace_exit();
75 dlm_unregister_debugfs();
76}
77
78module_init(init_dlm);
79module_exit(exit_dlm);
80
81MODULE_DESCRIPTION("Distributed Lock Manager");
82MODULE_AUTHOR("Red Hat, Inc.");
83MODULE_LICENSE("GPL");
84
85EXPORT_SYMBOL_GPL(dlm_new_lockspace);
86EXPORT_SYMBOL_GPL(dlm_release_lockspace);
87EXPORT_SYMBOL_GPL(dlm_lock);
88EXPORT_SYMBOL_GPL(dlm_unlock);
89
diff --git a/fs/dlm/member.c b/fs/dlm/member.c
new file mode 100644
index 000000000000..926cd0cb6bff
--- /dev/null
+++ b/fs/dlm/member.c
@@ -0,0 +1,313 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
5**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13#include "dlm_internal.h"
14#include "lockspace.h"
15#include "member.h"
16#include "recoverd.h"
17#include "recover.h"
18#include "lowcomms.h"
19#include "rcom.h"
20#include "config.h"
21
22/*
23 * Following called by dlm_recoverd thread
24 */
25
26static void add_ordered_member(struct dlm_ls *ls, struct dlm_member *new)
27{
28 struct dlm_member *memb = NULL;
29 struct list_head *tmp;
30 struct list_head *newlist = &new->list;
31 struct list_head *head = &ls->ls_nodes;
32
33 list_for_each(tmp, head) {
34 memb = list_entry(tmp, struct dlm_member, list);
35 if (new->nodeid < memb->nodeid)
36 break;
37 }
38
39 if (!memb)
40 list_add_tail(newlist, head);
41 else {
42 /* FIXME: can use list macro here */
43 newlist->prev = tmp->prev;
44 newlist->next = tmp;
45 tmp->prev->next = newlist;
46 tmp->prev = newlist;
47 }
48}
49
50static int dlm_add_member(struct dlm_ls *ls, int nodeid)
51{
52 struct dlm_member *memb;
53 int w;
54
55 memb = kzalloc(sizeof(struct dlm_member), GFP_KERNEL);
56 if (!memb)
57 return -ENOMEM;
58
59 w = dlm_node_weight(ls->ls_name, nodeid);
60 if (w < 0)
61 return w;
62
63 memb->nodeid = nodeid;
64 memb->weight = w;
65 add_ordered_member(ls, memb);
66 ls->ls_num_nodes++;
67 return 0;
68}
69
70static void dlm_remove_member(struct dlm_ls *ls, struct dlm_member *memb)
71{
72 list_move(&memb->list, &ls->ls_nodes_gone);
73 ls->ls_num_nodes--;
74}
75
76static int dlm_is_member(struct dlm_ls *ls, int nodeid)
77{
78 struct dlm_member *memb;
79
80 list_for_each_entry(memb, &ls->ls_nodes, list) {
81 if (memb->nodeid == nodeid)
82 return 1;
83 }
84 return 0;
85}
86
87int dlm_is_removed(struct dlm_ls *ls, int nodeid)
88{
89 struct dlm_member *memb;
90
91 list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
92 if (memb->nodeid == nodeid)
93 return 1;
94 }
95 return 0;
96}
97
98static void clear_memb_list(struct list_head *head)
99{
100 struct dlm_member *memb;
101
102 while (!list_empty(head)) {
103 memb = list_entry(head->next, struct dlm_member, list);
104 list_del(&memb->list);
105 kfree(memb);
106 }
107}
108
109void dlm_clear_members(struct dlm_ls *ls)
110{
111 clear_memb_list(&ls->ls_nodes);
112 ls->ls_num_nodes = 0;
113}
114
115void dlm_clear_members_gone(struct dlm_ls *ls)
116{
117 clear_memb_list(&ls->ls_nodes_gone);
118}
119
120static void make_member_array(struct dlm_ls *ls)
121{
122 struct dlm_member *memb;
123 int i, w, x = 0, total = 0, all_zero = 0, *array;
124
125 kfree(ls->ls_node_array);
126 ls->ls_node_array = NULL;
127
128 list_for_each_entry(memb, &ls->ls_nodes, list) {
129 if (memb->weight)
130 total += memb->weight;
131 }
132
133 /* all nodes revert to weight of 1 if all have weight 0 */
134
135 if (!total) {
136 total = ls->ls_num_nodes;
137 all_zero = 1;
138 }
139
140 ls->ls_total_weight = total;
141
142 array = kmalloc(sizeof(int) * total, GFP_KERNEL);
143 if (!array)
144 return;
145
146 list_for_each_entry(memb, &ls->ls_nodes, list) {
147 if (!all_zero && !memb->weight)
148 continue;
149
150 if (all_zero)
151 w = 1;
152 else
153 w = memb->weight;
154
155 DLM_ASSERT(x < total, printk("total %d x %d\n", total, x););
156
157 for (i = 0; i < w; i++)
158 array[x++] = memb->nodeid;
159 }
160
161 ls->ls_node_array = array;
162}
163
164/* send a status request to all members just to establish comms connections */
165
166static void ping_members(struct dlm_ls *ls)
167{
168 struct dlm_member *memb;
169 list_for_each_entry(memb, &ls->ls_nodes, list)
170 dlm_rcom_status(ls, memb->nodeid);
171}
172
173int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
174{
175 struct dlm_member *memb, *safe;
176 int i, error, found, pos = 0, neg = 0, low = -1;
177
178 /* move departed members from ls_nodes to ls_nodes_gone */
179
180 list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) {
181 found = 0;
182 for (i = 0; i < rv->node_count; i++) {
183 if (memb->nodeid == rv->nodeids[i]) {
184 found = 1;
185 break;
186 }
187 }
188
189 if (!found) {
190 neg++;
191 dlm_remove_member(ls, memb);
192 log_debug(ls, "remove member %d", memb->nodeid);
193 }
194 }
195
196 /* add new members to ls_nodes */
197
198 for (i = 0; i < rv->node_count; i++) {
199 if (dlm_is_member(ls, rv->nodeids[i]))
200 continue;
201 dlm_add_member(ls, rv->nodeids[i]);
202 pos++;
203 log_debug(ls, "add member %d", rv->nodeids[i]);
204 }
205
206 list_for_each_entry(memb, &ls->ls_nodes, list) {
207 if (low == -1 || memb->nodeid < low)
208 low = memb->nodeid;
209 }
210 ls->ls_low_nodeid = low;
211
212 make_member_array(ls);
213 dlm_set_recover_status(ls, DLM_RS_NODES);
214 *neg_out = neg;
215
216 ping_members(ls);
217
218 error = dlm_recover_members_wait(ls);
219 log_debug(ls, "total members %d", ls->ls_num_nodes);
220 return error;
221}
222
223/*
224 * Following called from lockspace.c
225 */
226
227int dlm_ls_stop(struct dlm_ls *ls)
228{
229 int new;
230
231 /*
232 * A stop cancels any recovery that's in progress (see RECOVERY_STOP,
233 * dlm_recovery_stopped()) and prevents any new locks from being
234 * processed (see RUNNING, dlm_locking_stopped()).
235 */
236
237 spin_lock(&ls->ls_recover_lock);
238 set_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
239 new = test_and_clear_bit(LSFL_RUNNING, &ls->ls_flags);
240 ls->ls_recover_seq++;
241 spin_unlock(&ls->ls_recover_lock);
242
243 /*
244 * This in_recovery lock does two things:
245 *
246 * 1) Keeps this function from returning until all threads are out
247 * of locking routines and locking is truely stopped.
248 * 2) Keeps any new requests from being processed until it's unlocked
249 * when recovery is complete.
250 */
251
252 if (new)
253 down_write(&ls->ls_in_recovery);
254
255 /*
256 * The recoverd suspend/resume makes sure that dlm_recoverd (if
257 * running) has noticed the clearing of RUNNING above and quit
258 * processing the previous recovery. This will be true for all nodes
259 * before any nodes start the new recovery.
260 */
261
262 dlm_recoverd_suspend(ls);
263 ls->ls_recover_status = 0;
264 dlm_recoverd_resume(ls);
265 return 0;
266}
267
268int dlm_ls_start(struct dlm_ls *ls)
269{
270 struct dlm_recover *rv = NULL, *rv_old;
271 int *ids = NULL;
272 int error, count;
273
274 rv = kzalloc(sizeof(struct dlm_recover), GFP_KERNEL);
275 if (!rv)
276 return -ENOMEM;
277
278 error = count = dlm_nodeid_list(ls->ls_name, &ids);
279 if (error <= 0)
280 goto fail;
281
282 spin_lock(&ls->ls_recover_lock);
283
284 /* the lockspace needs to be stopped before it can be started */
285
286 if (!dlm_locking_stopped(ls)) {
287 spin_unlock(&ls->ls_recover_lock);
288 log_error(ls, "start ignored: lockspace running");
289 error = -EINVAL;
290 goto fail;
291 }
292
293 rv->nodeids = ids;
294 rv->node_count = count;
295 rv->seq = ++ls->ls_recover_seq;
296 rv_old = ls->ls_recover_args;
297 ls->ls_recover_args = rv;
298 spin_unlock(&ls->ls_recover_lock);
299
300 if (rv_old) {
301 kfree(rv_old->nodeids);
302 kfree(rv_old);
303 }
304
305 dlm_recoverd_kick(ls);
306 return 0;
307
308 fail:
309 kfree(rv);
310 kfree(ids);
311 return error;
312}
313
diff --git a/fs/dlm/member.h b/fs/dlm/member.h
new file mode 100644
index 000000000000..927c08c19214
--- /dev/null
+++ b/fs/dlm/member.h
@@ -0,0 +1,24 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
5**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13#ifndef __MEMBER_DOT_H__
14#define __MEMBER_DOT_H__
15
16int dlm_ls_stop(struct dlm_ls *ls);
17int dlm_ls_start(struct dlm_ls *ls);
18void dlm_clear_members(struct dlm_ls *ls);
19void dlm_clear_members_gone(struct dlm_ls *ls);
20int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv,int *neg_out);
21int dlm_is_removed(struct dlm_ls *ls, int nodeid);
22
23#endif /* __MEMBER_DOT_H__ */
24
diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c
new file mode 100644
index 000000000000..f7cf4589fae8
--- /dev/null
+++ b/fs/dlm/memory.c
@@ -0,0 +1,106 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "dlm_internal.h"
15#include "config.h"
16#include "memory.h"
17
18static kmem_cache_t *lkb_cache;
19
20
21int dlm_memory_init(void)
22{
23 int ret = 0;
24
25 lkb_cache = kmem_cache_create("dlm_lkb", sizeof(struct dlm_lkb),
26 __alignof__(struct dlm_lkb), 0, NULL, NULL);
27 if (!lkb_cache)
28 ret = -ENOMEM;
29 return ret;
30}
31
32void dlm_memory_exit(void)
33{
34 if (lkb_cache)
35 kmem_cache_destroy(lkb_cache);
36}
37
38char *allocate_lvb(struct dlm_ls *ls)
39{
40 char *p;
41
42 p = kmalloc(ls->ls_lvblen, GFP_KERNEL);
43 if (p)
44 memset(p, 0, ls->ls_lvblen);
45 return p;
46}
47
48void free_lvb(char *p)
49{
50 kfree(p);
51}
52
53/* FIXME: have some minimal space built-in to rsb for the name and
54 kmalloc a separate name if needed, like dentries are done */
55
56struct dlm_rsb *allocate_rsb(struct dlm_ls *ls, int namelen)
57{
58 struct dlm_rsb *r;
59
60 DLM_ASSERT(namelen <= DLM_RESNAME_MAXLEN,);
61
62 r = kmalloc(sizeof(*r) + namelen, GFP_KERNEL);
63 if (r)
64 memset(r, 0, sizeof(*r) + namelen);
65 return r;
66}
67
68void free_rsb(struct dlm_rsb *r)
69{
70 if (r->res_lvbptr)
71 free_lvb(r->res_lvbptr);
72 kfree(r);
73}
74
75struct dlm_lkb *allocate_lkb(struct dlm_ls *ls)
76{
77 struct dlm_lkb *lkb;
78
79 lkb = kmem_cache_alloc(lkb_cache, GFP_KERNEL);
80 if (lkb)
81 memset(lkb, 0, sizeof(*lkb));
82 return lkb;
83}
84
85void free_lkb(struct dlm_lkb *lkb)
86{
87 kmem_cache_free(lkb_cache, lkb);
88}
89
90struct dlm_direntry *allocate_direntry(struct dlm_ls *ls, int namelen)
91{
92 struct dlm_direntry *de;
93
94 DLM_ASSERT(namelen <= DLM_RESNAME_MAXLEN,);
95
96 de = kmalloc(sizeof(*de) + namelen, GFP_KERNEL);
97 if (de)
98 memset(de, 0, sizeof(*de) + namelen);
99 return de;
100}
101
102void free_direntry(struct dlm_direntry *de)
103{
104 kfree(de);
105}
106
diff --git a/fs/dlm/memory.h b/fs/dlm/memory.h
new file mode 100644
index 000000000000..6ead158ccc5c
--- /dev/null
+++ b/fs/dlm/memory.h
@@ -0,0 +1,29 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#ifndef __MEMORY_DOT_H__
15#define __MEMORY_DOT_H__
16
17int dlm_memory_init(void);
18void dlm_memory_exit(void);
19struct dlm_rsb *allocate_rsb(struct dlm_ls *ls, int namelen);
20void free_rsb(struct dlm_rsb *r);
21struct dlm_lkb *allocate_lkb(struct dlm_ls *ls);
22void free_lkb(struct dlm_lkb *l);
23struct dlm_direntry *allocate_direntry(struct dlm_ls *ls, int namelen);
24void free_direntry(struct dlm_direntry *de);
25char *allocate_lvb(struct dlm_ls *ls);
26void free_lvb(char *l);
27
28#endif /* __MEMORY_DOT_H__ */
29
diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
new file mode 100644
index 000000000000..c9b1c3d535f4
--- /dev/null
+++ b/fs/dlm/midcomms.c
@@ -0,0 +1,140 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14/*
15 * midcomms.c
16 *
17 * This is the appallingly named "mid-level" comms layer.
18 *
19 * Its purpose is to take packets from the "real" comms layer,
20 * split them up into packets and pass them to the interested
21 * part of the locking mechanism.
22 *
23 * It also takes messages from the locking layer, formats them
24 * into packets and sends them to the comms layer.
25 */
26
27#include "dlm_internal.h"
28#include "lowcomms.h"
29#include "config.h"
30#include "rcom.h"
31#include "lock.h"
32#include "midcomms.h"
33
34
35static void copy_from_cb(void *dst, const void *base, unsigned offset,
36 unsigned len, unsigned limit)
37{
38 unsigned copy = len;
39
40 if ((copy + offset) > limit)
41 copy = limit - offset;
42 memcpy(dst, base + offset, copy);
43 len -= copy;
44 if (len)
45 memcpy(dst + copy, base, len);
46}
47
48/*
49 * Called from the low-level comms layer to process a buffer of
50 * commands.
51 *
52 * Only complete messages are processed here, any "spare" bytes from
53 * the end of a buffer are saved and tacked onto the front of the next
54 * message that comes in. I doubt this will happen very often but we
55 * need to be able to cope with it and I don't want the task to be waiting
56 * for packets to come in when there is useful work to be done.
57 */
58
59int dlm_process_incoming_buffer(int nodeid, const void *base,
60 unsigned offset, unsigned len, unsigned limit)
61{
62 unsigned char __tmp[DLM_INBUF_LEN];
63 struct dlm_header *msg = (struct dlm_header *) __tmp;
64 int ret = 0;
65 int err = 0;
66 uint16_t msglen;
67 uint32_t lockspace;
68
69 while (len > sizeof(struct dlm_header)) {
70
71 /* Copy just the header to check the total length. The
72 message may wrap around the end of the buffer back to the
73 start, so we need to use a temp buffer and copy_from_cb. */
74
75 copy_from_cb(msg, base, offset, sizeof(struct dlm_header),
76 limit);
77
78 msglen = le16_to_cpu(msg->h_length);
79 lockspace = msg->h_lockspace;
80
81 err = -EINVAL;
82 if (msglen < sizeof(struct dlm_header))
83 break;
84 err = -E2BIG;
85 if (msglen > dlm_config.buffer_size) {
86 log_print("message size %d from %d too big, buf len %d",
87 msglen, nodeid, len);
88 break;
89 }
90 err = 0;
91
92 /* If only part of the full message is contained in this
93 buffer, then do nothing and wait for lowcomms to call
94 us again later with more data. We return 0 meaning
95 we've consumed none of the input buffer. */
96
97 if (msglen > len)
98 break;
99
100 /* Allocate a larger temp buffer if the full message won't fit
101 in the buffer on the stack (which should work for most
102 ordinary messages). */
103
104 if (msglen > sizeof(__tmp) &&
105 msg == (struct dlm_header *) __tmp) {
106 msg = kmalloc(dlm_config.buffer_size, GFP_KERNEL);
107 if (msg == NULL)
108 return ret;
109 }
110
111 copy_from_cb(msg, base, offset, msglen, limit);
112
113 BUG_ON(lockspace != msg->h_lockspace);
114
115 ret += msglen;
116 offset += msglen;
117 offset &= (limit - 1);
118 len -= msglen;
119
120 switch (msg->h_cmd) {
121 case DLM_MSG:
122 dlm_receive_message(msg, nodeid, 0);
123 break;
124
125 case DLM_RCOM:
126 dlm_receive_rcom(msg, nodeid);
127 break;
128
129 default:
130 log_print("unknown msg type %x from %u: %u %u %u %u",
131 msg->h_cmd, nodeid, msglen, len, offset, ret);
132 }
133 }
134
135 if (msg != (struct dlm_header *) __tmp)
136 kfree(msg);
137
138 return err ? err : ret;
139}
140
diff --git a/fs/dlm/midcomms.h b/fs/dlm/midcomms.h
new file mode 100644
index 000000000000..95852a5f111d
--- /dev/null
+++ b/fs/dlm/midcomms.h
@@ -0,0 +1,21 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#ifndef __MIDCOMMS_DOT_H__
15#define __MIDCOMMS_DOT_H__
16
17int dlm_process_incoming_buffer(int nodeid, const void *base, unsigned offset,
18 unsigned len, unsigned limit);
19
20#endif /* __MIDCOMMS_DOT_H__ */
21
diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c
new file mode 100644
index 000000000000..55fbe313340e
--- /dev/null
+++ b/fs/dlm/rcom.c
@@ -0,0 +1,457 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "dlm_internal.h"
15#include "lockspace.h"
16#include "member.h"
17#include "lowcomms.h"
18#include "midcomms.h"
19#include "rcom.h"
20#include "recover.h"
21#include "dir.h"
22#include "config.h"
23#include "memory.h"
24#include "lock.h"
25#include "util.h"
26
27
28static int rcom_response(struct dlm_ls *ls)
29{
30 return test_bit(LSFL_RCOM_READY, &ls->ls_flags);
31}
32
33static int create_rcom(struct dlm_ls *ls, int to_nodeid, int type, int len,
34 struct dlm_rcom **rc_ret, struct dlm_mhandle **mh_ret)
35{
36 struct dlm_rcom *rc;
37 struct dlm_mhandle *mh;
38 char *mb;
39 int mb_len = sizeof(struct dlm_rcom) + len;
40
41 mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_KERNEL, &mb);
42 if (!mh) {
43 log_print("create_rcom to %d type %d len %d ENOBUFS",
44 to_nodeid, type, len);
45 return -ENOBUFS;
46 }
47 memset(mb, 0, mb_len);
48
49 rc = (struct dlm_rcom *) mb;
50
51 rc->rc_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
52 rc->rc_header.h_lockspace = ls->ls_global_id;
53 rc->rc_header.h_nodeid = dlm_our_nodeid();
54 rc->rc_header.h_length = mb_len;
55 rc->rc_header.h_cmd = DLM_RCOM;
56
57 rc->rc_type = type;
58
59 *mh_ret = mh;
60 *rc_ret = rc;
61 return 0;
62}
63
64static void send_rcom(struct dlm_ls *ls, struct dlm_mhandle *mh,
65 struct dlm_rcom *rc)
66{
67 dlm_rcom_out(rc);
68 dlm_lowcomms_commit_buffer(mh);
69}
70
71/* When replying to a status request, a node also sends back its
72 configuration values. The requesting node then checks that the remote
73 node is configured the same way as itself. */
74
75static void make_config(struct dlm_ls *ls, struct rcom_config *rf)
76{
77 rf->rf_lvblen = ls->ls_lvblen;
78 rf->rf_lsflags = ls->ls_exflags;
79}
80
81static int check_config(struct dlm_ls *ls, struct rcom_config *rf, int nodeid)
82{
83 if (rf->rf_lvblen != ls->ls_lvblen ||
84 rf->rf_lsflags != ls->ls_exflags) {
85 log_error(ls, "config mismatch: %d,%x nodeid %d: %d,%x",
86 ls->ls_lvblen, ls->ls_exflags,
87 nodeid, rf->rf_lvblen, rf->rf_lsflags);
88 return -EINVAL;
89 }
90 return 0;
91}
92
93int dlm_rcom_status(struct dlm_ls *ls, int nodeid)
94{
95 struct dlm_rcom *rc;
96 struct dlm_mhandle *mh;
97 int error = 0;
98
99 memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
100
101 if (nodeid == dlm_our_nodeid()) {
102 rc = (struct dlm_rcom *) ls->ls_recover_buf;
103 rc->rc_result = dlm_recover_status(ls);
104 goto out;
105 }
106
107 error = create_rcom(ls, nodeid, DLM_RCOM_STATUS, 0, &rc, &mh);
108 if (error)
109 goto out;
110
111 send_rcom(ls, mh, rc);
112
113 error = dlm_wait_function(ls, &rcom_response);
114 clear_bit(LSFL_RCOM_READY, &ls->ls_flags);
115 if (error)
116 goto out;
117
118 rc = (struct dlm_rcom *) ls->ls_recover_buf;
119
120 if (rc->rc_result == -ESRCH) {
121 /* we pretend the remote lockspace exists with 0 status */
122 log_debug(ls, "remote node %d not ready", nodeid);
123 rc->rc_result = 0;
124 } else
125 error = check_config(ls, (struct rcom_config *) rc->rc_buf,
126 nodeid);
127 /* the caller looks at rc_result for the remote recovery status */
128 out:
129 return error;
130}
131
132static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in)
133{
134 struct dlm_rcom *rc;
135 struct dlm_mhandle *mh;
136 int error, nodeid = rc_in->rc_header.h_nodeid;
137
138 error = create_rcom(ls, nodeid, DLM_RCOM_STATUS_REPLY,
139 sizeof(struct rcom_config), &rc, &mh);
140 if (error)
141 return;
142 rc->rc_result = dlm_recover_status(ls);
143 make_config(ls, (struct rcom_config *) rc->rc_buf);
144
145 send_rcom(ls, mh, rc);
146}
147
148static void receive_rcom_status_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
149{
150 memcpy(ls->ls_recover_buf, rc_in, rc_in->rc_header.h_length);
151 set_bit(LSFL_RCOM_READY, &ls->ls_flags);
152 wake_up(&ls->ls_wait_general);
153}
154
155int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len)
156{
157 struct dlm_rcom *rc;
158 struct dlm_mhandle *mh;
159 int error = 0, len = sizeof(struct dlm_rcom);
160
161 memset(ls->ls_recover_buf, 0, dlm_config.buffer_size);
162
163 if (nodeid == dlm_our_nodeid()) {
164 dlm_copy_master_names(ls, last_name, last_len,
165 ls->ls_recover_buf + len,
166 dlm_config.buffer_size - len, nodeid);
167 goto out;
168 }
169
170 error = create_rcom(ls, nodeid, DLM_RCOM_NAMES, last_len, &rc, &mh);
171 if (error)
172 goto out;
173 memcpy(rc->rc_buf, last_name, last_len);
174
175 send_rcom(ls, mh, rc);
176
177 error = dlm_wait_function(ls, &rcom_response);
178 clear_bit(LSFL_RCOM_READY, &ls->ls_flags);
179 out:
180 return error;
181}
182
183static void receive_rcom_names(struct dlm_ls *ls, struct dlm_rcom *rc_in)
184{
185 struct dlm_rcom *rc;
186 struct dlm_mhandle *mh;
187 int error, inlen, outlen;
188 int nodeid = rc_in->rc_header.h_nodeid;
189 uint32_t status = dlm_recover_status(ls);
190
191 /*
192 * We can't run dlm_dir_rebuild_send (which uses ls_nodes) while
193 * dlm_recoverd is running ls_nodes_reconfig (which changes ls_nodes).
194 * It could only happen in rare cases where we get a late NAMES
195 * message from a previous instance of recovery.
196 */
197
198 if (!(status & DLM_RS_NODES)) {
199 log_debug(ls, "ignoring RCOM_NAMES from %u", nodeid);
200 return;
201 }
202
203 nodeid = rc_in->rc_header.h_nodeid;
204 inlen = rc_in->rc_header.h_length - sizeof(struct dlm_rcom);
205 outlen = dlm_config.buffer_size - sizeof(struct dlm_rcom);
206
207 error = create_rcom(ls, nodeid, DLM_RCOM_NAMES_REPLY, outlen, &rc, &mh);
208 if (error)
209 return;
210
211 dlm_copy_master_names(ls, rc_in->rc_buf, inlen, rc->rc_buf, outlen,
212 nodeid);
213 send_rcom(ls, mh, rc);
214}
215
216static void receive_rcom_names_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
217{
218 memcpy(ls->ls_recover_buf, rc_in, rc_in->rc_header.h_length);
219 set_bit(LSFL_RCOM_READY, &ls->ls_flags);
220 wake_up(&ls->ls_wait_general);
221}
222
223int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid)
224{
225 struct dlm_rcom *rc;
226 struct dlm_mhandle *mh;
227 struct dlm_ls *ls = r->res_ls;
228 int error;
229
230 error = create_rcom(ls, dir_nodeid, DLM_RCOM_LOOKUP, r->res_length,
231 &rc, &mh);
232 if (error)
233 goto out;
234 memcpy(rc->rc_buf, r->res_name, r->res_length);
235 rc->rc_id = (unsigned long) r;
236
237 send_rcom(ls, mh, rc);
238 out:
239 return error;
240}
241
242static void receive_rcom_lookup(struct dlm_ls *ls, struct dlm_rcom *rc_in)
243{
244 struct dlm_rcom *rc;
245 struct dlm_mhandle *mh;
246 int error, ret_nodeid, nodeid = rc_in->rc_header.h_nodeid;
247 int len = rc_in->rc_header.h_length - sizeof(struct dlm_rcom);
248
249 error = create_rcom(ls, nodeid, DLM_RCOM_LOOKUP_REPLY, 0, &rc, &mh);
250 if (error)
251 return;
252
253 error = dlm_dir_lookup(ls, nodeid, rc_in->rc_buf, len, &ret_nodeid);
254 if (error)
255 ret_nodeid = error;
256 rc->rc_result = ret_nodeid;
257 rc->rc_id = rc_in->rc_id;
258
259 send_rcom(ls, mh, rc);
260}
261
262static void receive_rcom_lookup_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
263{
264 dlm_recover_master_reply(ls, rc_in);
265}
266
267static void pack_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb,
268 struct rcom_lock *rl)
269{
270 memset(rl, 0, sizeof(*rl));
271
272 rl->rl_ownpid = lkb->lkb_ownpid;
273 rl->rl_lkid = lkb->lkb_id;
274 rl->rl_exflags = lkb->lkb_exflags;
275 rl->rl_flags = lkb->lkb_flags;
276 rl->rl_lvbseq = lkb->lkb_lvbseq;
277 rl->rl_rqmode = lkb->lkb_rqmode;
278 rl->rl_grmode = lkb->lkb_grmode;
279 rl->rl_status = lkb->lkb_status;
280 rl->rl_wait_type = lkb->lkb_wait_type;
281
282 if (lkb->lkb_bastaddr)
283 rl->rl_asts |= AST_BAST;
284 if (lkb->lkb_astaddr)
285 rl->rl_asts |= AST_COMP;
286
287 rl->rl_namelen = r->res_length;
288 memcpy(rl->rl_name, r->res_name, r->res_length);
289
290 /* FIXME: might we have an lvb without DLM_LKF_VALBLK set ?
291 If so, receive_rcom_lock_args() won't take this copy. */
292
293 if (lkb->lkb_lvbptr)
294 memcpy(rl->rl_lvb, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
295}
296
297int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
298{
299 struct dlm_ls *ls = r->res_ls;
300 struct dlm_rcom *rc;
301 struct dlm_mhandle *mh;
302 struct rcom_lock *rl;
303 int error, len = sizeof(struct rcom_lock);
304
305 if (lkb->lkb_lvbptr)
306 len += ls->ls_lvblen;
307
308 error = create_rcom(ls, r->res_nodeid, DLM_RCOM_LOCK, len, &rc, &mh);
309 if (error)
310 goto out;
311
312 rl = (struct rcom_lock *) rc->rc_buf;
313 pack_rcom_lock(r, lkb, rl);
314 rc->rc_id = (unsigned long) r;
315
316 send_rcom(ls, mh, rc);
317 out:
318 return error;
319}
320
321static void receive_rcom_lock(struct dlm_ls *ls, struct dlm_rcom *rc_in)
322{
323 struct dlm_rcom *rc;
324 struct dlm_mhandle *mh;
325 int error, nodeid = rc_in->rc_header.h_nodeid;
326
327 dlm_recover_master_copy(ls, rc_in);
328
329 error = create_rcom(ls, nodeid, DLM_RCOM_LOCK_REPLY,
330 sizeof(struct rcom_lock), &rc, &mh);
331 if (error)
332 return;
333
334 /* We send back the same rcom_lock struct we received, but
335 dlm_recover_master_copy() has filled in rl_remid and rl_result */
336
337 memcpy(rc->rc_buf, rc_in->rc_buf, sizeof(struct rcom_lock));
338 rc->rc_id = rc_in->rc_id;
339
340 send_rcom(ls, mh, rc);
341}
342
343static void receive_rcom_lock_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
344{
345 uint32_t status = dlm_recover_status(ls);
346
347 if (!(status & DLM_RS_DIR)) {
348 log_debug(ls, "ignoring RCOM_LOCK_REPLY from %u",
349 rc_in->rc_header.h_nodeid);
350 return;
351 }
352
353 dlm_recover_process_copy(ls, rc_in);
354}
355
356static int send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
357{
358 struct dlm_rcom *rc;
359 struct dlm_mhandle *mh;
360 char *mb;
361 int mb_len = sizeof(struct dlm_rcom);
362
363 mh = dlm_lowcomms_get_buffer(nodeid, mb_len, GFP_KERNEL, &mb);
364 if (!mh)
365 return -ENOBUFS;
366 memset(mb, 0, mb_len);
367
368 rc = (struct dlm_rcom *) mb;
369
370 rc->rc_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
371 rc->rc_header.h_lockspace = rc_in->rc_header.h_lockspace;
372 rc->rc_header.h_nodeid = dlm_our_nodeid();
373 rc->rc_header.h_length = mb_len;
374 rc->rc_header.h_cmd = DLM_RCOM;
375
376 rc->rc_type = DLM_RCOM_STATUS_REPLY;
377 rc->rc_result = -ESRCH;
378
379 dlm_rcom_out(rc);
380 dlm_lowcomms_commit_buffer(mh);
381
382 return 0;
383}
384
385/* Called by dlm_recvd; corresponds to dlm_receive_message() but special
386 recovery-only comms are sent through here. */
387
388void dlm_receive_rcom(struct dlm_header *hd, int nodeid)
389{
390 struct dlm_rcom *rc = (struct dlm_rcom *) hd;
391 struct dlm_ls *ls;
392
393 dlm_rcom_in(rc);
394
395 /* If the lockspace doesn't exist then still send a status message
396 back; it's possible that it just doesn't have its global_id yet. */
397
398 ls = dlm_find_lockspace_global(hd->h_lockspace);
399 if (!ls) {
400 log_print("lockspace %x from %d not found",
401 hd->h_lockspace, nodeid);
402 send_ls_not_ready(nodeid, rc);
403 return;
404 }
405
406 if (dlm_recovery_stopped(ls) && (rc->rc_type != DLM_RCOM_STATUS)) {
407 log_error(ls, "ignoring recovery message %x from %d",
408 rc->rc_type, nodeid);
409 goto out;
410 }
411
412 if (nodeid != rc->rc_header.h_nodeid) {
413 log_error(ls, "bad rcom nodeid %d from %d",
414 rc->rc_header.h_nodeid, nodeid);
415 goto out;
416 }
417
418 switch (rc->rc_type) {
419 case DLM_RCOM_STATUS:
420 receive_rcom_status(ls, rc);
421 break;
422
423 case DLM_RCOM_NAMES:
424 receive_rcom_names(ls, rc);
425 break;
426
427 case DLM_RCOM_LOOKUP:
428 receive_rcom_lookup(ls, rc);
429 break;
430
431 case DLM_RCOM_LOCK:
432 receive_rcom_lock(ls, rc);
433 break;
434
435 case DLM_RCOM_STATUS_REPLY:
436 receive_rcom_status_reply(ls, rc);
437 break;
438
439 case DLM_RCOM_NAMES_REPLY:
440 receive_rcom_names_reply(ls, rc);
441 break;
442
443 case DLM_RCOM_LOOKUP_REPLY:
444 receive_rcom_lookup_reply(ls, rc);
445 break;
446
447 case DLM_RCOM_LOCK_REPLY:
448 receive_rcom_lock_reply(ls, rc);
449 break;
450
451 default:
452 DLM_ASSERT(0, printk("rc_type=%x\n", rc->rc_type););
453 }
454 out:
455 dlm_put_lockspace(ls);
456}
457
diff --git a/fs/dlm/rcom.h b/fs/dlm/rcom.h
new file mode 100644
index 000000000000..d7984321ff41
--- /dev/null
+++ b/fs/dlm/rcom.h
@@ -0,0 +1,24 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#ifndef __RCOM_DOT_H__
15#define __RCOM_DOT_H__
16
17int dlm_rcom_status(struct dlm_ls *ls, int nodeid);
18int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name,int last_len);
19int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid);
20int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
21void dlm_receive_rcom(struct dlm_header *hd, int nodeid);
22
23#endif
24
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
new file mode 100644
index 000000000000..b036ee7dcb32
--- /dev/null
+++ b/fs/dlm/recover.c
@@ -0,0 +1,762 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "dlm_internal.h"
15#include "lockspace.h"
16#include "dir.h"
17#include "config.h"
18#include "ast.h"
19#include "memory.h"
20#include "rcom.h"
21#include "lock.h"
22#include "lowcomms.h"
23#include "member.h"
24#include "recover.h"
25
26
27/*
28 * Recovery waiting routines: these functions wait for a particular reply from
29 * a remote node, or for the remote node to report a certain status. They need
30 * to abort if the lockspace is stopped indicating a node has failed (perhaps
31 * the one being waited for).
32 */
33
34/*
35 * Wait until given function returns non-zero or lockspace is stopped
36 * (LS_RECOVERY_STOP set due to failure of a node in ls_nodes). When another
37 * function thinks it could have completed the waited-on task, they should wake
38 * up ls_wait_general to get an immediate response rather than waiting for the
39 * timer to detect the result. A timer wakes us up periodically while waiting
40 * to see if we should abort due to a node failure. This should only be called
41 * by the dlm_recoverd thread.
42 */
43
44static void dlm_wait_timer_fn(unsigned long data)
45{
46 struct dlm_ls *ls = (struct dlm_ls *) data;
47 mod_timer(&ls->ls_timer, jiffies + (dlm_config.recover_timer * HZ));
48 wake_up(&ls->ls_wait_general);
49}
50
51int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls))
52{
53 int error = 0;
54
55 init_timer(&ls->ls_timer);
56 ls->ls_timer.function = dlm_wait_timer_fn;
57 ls->ls_timer.data = (long) ls;
58 ls->ls_timer.expires = jiffies + (dlm_config.recover_timer * HZ);
59 add_timer(&ls->ls_timer);
60
61 wait_event(ls->ls_wait_general, testfn(ls) || dlm_recovery_stopped(ls));
62 del_timer_sync(&ls->ls_timer);
63
64 if (dlm_recovery_stopped(ls)) {
65 log_debug(ls, "dlm_wait_function aborted");
66 error = -EINTR;
67 }
68 return error;
69}
70
71/*
72 * An efficient way for all nodes to wait for all others to have a certain
73 * status. The node with the lowest nodeid polls all the others for their
74 * status (wait_status_all) and all the others poll the node with the low id
75 * for its accumulated result (wait_status_low). When all nodes have set
76 * status flag X, then status flag X_ALL will be set on the low nodeid.
77 */
78
79uint32_t dlm_recover_status(struct dlm_ls *ls)
80{
81 uint32_t status;
82 spin_lock(&ls->ls_recover_lock);
83 status = ls->ls_recover_status;
84 spin_unlock(&ls->ls_recover_lock);
85 return status;
86}
87
88void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status)
89{
90 spin_lock(&ls->ls_recover_lock);
91 ls->ls_recover_status |= status;
92 spin_unlock(&ls->ls_recover_lock);
93}
94
95static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status)
96{
97 struct dlm_rcom *rc = (struct dlm_rcom *) ls->ls_recover_buf;
98 struct dlm_member *memb;
99 int error = 0, delay;
100
101 list_for_each_entry(memb, &ls->ls_nodes, list) {
102 delay = 0;
103 for (;;) {
104 if (dlm_recovery_stopped(ls)) {
105 error = -EINTR;
106 goto out;
107 }
108
109 error = dlm_rcom_status(ls, memb->nodeid);
110 if (error)
111 goto out;
112
113 if (rc->rc_result & wait_status)
114 break;
115 if (delay < 1000)
116 delay += 20;
117 msleep(delay);
118 }
119 }
120 out:
121 return error;
122}
123
124static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status)
125{
126 struct dlm_rcom *rc = (struct dlm_rcom *) ls->ls_recover_buf;
127 int error = 0, delay = 0, nodeid = ls->ls_low_nodeid;
128
129 for (;;) {
130 if (dlm_recovery_stopped(ls)) {
131 error = -EINTR;
132 goto out;
133 }
134
135 error = dlm_rcom_status(ls, nodeid);
136 if (error)
137 break;
138
139 if (rc->rc_result & wait_status)
140 break;
141 if (delay < 1000)
142 delay += 20;
143 msleep(delay);
144 }
145 out:
146 return error;
147}
148
149static int wait_status(struct dlm_ls *ls, uint32_t status)
150{
151 uint32_t status_all = status << 1;
152 int error;
153
154 if (ls->ls_low_nodeid == dlm_our_nodeid()) {
155 error = wait_status_all(ls, status);
156 if (!error)
157 dlm_set_recover_status(ls, status_all);
158 } else
159 error = wait_status_low(ls, status_all);
160
161 return error;
162}
163
164int dlm_recover_members_wait(struct dlm_ls *ls)
165{
166 return wait_status(ls, DLM_RS_NODES);
167}
168
169int dlm_recover_directory_wait(struct dlm_ls *ls)
170{
171 return wait_status(ls, DLM_RS_DIR);
172}
173
174int dlm_recover_locks_wait(struct dlm_ls *ls)
175{
176 return wait_status(ls, DLM_RS_LOCKS);
177}
178
179int dlm_recover_done_wait(struct dlm_ls *ls)
180{
181 return wait_status(ls, DLM_RS_DONE);
182}
183
184/*
185 * The recover_list contains all the rsb's for which we've requested the new
186 * master nodeid. As replies are returned from the resource directories the
187 * rsb's are removed from the list. When the list is empty we're done.
188 *
189 * The recover_list is later similarly used for all rsb's for which we've sent
190 * new lkb's and need to receive new corresponding lkid's.
191 *
192 * We use the address of the rsb struct as a simple local identifier for the
193 * rsb so we can match an rcom reply with the rsb it was sent for.
194 */
195
196static int recover_list_empty(struct dlm_ls *ls)
197{
198 int empty;
199
200 spin_lock(&ls->ls_recover_list_lock);
201 empty = list_empty(&ls->ls_recover_list);
202 spin_unlock(&ls->ls_recover_list_lock);
203
204 return empty;
205}
206
207static void recover_list_add(struct dlm_rsb *r)
208{
209 struct dlm_ls *ls = r->res_ls;
210
211 spin_lock(&ls->ls_recover_list_lock);
212 if (list_empty(&r->res_recover_list)) {
213 list_add_tail(&r->res_recover_list, &ls->ls_recover_list);
214 ls->ls_recover_list_count++;
215 dlm_hold_rsb(r);
216 }
217 spin_unlock(&ls->ls_recover_list_lock);
218}
219
220static void recover_list_del(struct dlm_rsb *r)
221{
222 struct dlm_ls *ls = r->res_ls;
223
224 spin_lock(&ls->ls_recover_list_lock);
225 list_del_init(&r->res_recover_list);
226 ls->ls_recover_list_count--;
227 spin_unlock(&ls->ls_recover_list_lock);
228
229 dlm_put_rsb(r);
230}
231
232static struct dlm_rsb *recover_list_find(struct dlm_ls *ls, uint64_t id)
233{
234 struct dlm_rsb *r = NULL;
235
236 spin_lock(&ls->ls_recover_list_lock);
237
238 list_for_each_entry(r, &ls->ls_recover_list, res_recover_list) {
239 if (id == (unsigned long) r)
240 goto out;
241 }
242 r = NULL;
243 out:
244 spin_unlock(&ls->ls_recover_list_lock);
245 return r;
246}
247
248static void recover_list_clear(struct dlm_ls *ls)
249{
250 struct dlm_rsb *r, *s;
251
252 spin_lock(&ls->ls_recover_list_lock);
253 list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) {
254 list_del_init(&r->res_recover_list);
255 dlm_put_rsb(r);
256 ls->ls_recover_list_count--;
257 }
258
259 if (ls->ls_recover_list_count != 0) {
260 log_error(ls, "warning: recover_list_count %d",
261 ls->ls_recover_list_count);
262 ls->ls_recover_list_count = 0;
263 }
264 spin_unlock(&ls->ls_recover_list_lock);
265}
266
267
268/* Master recovery: find new master node for rsb's that were
269 mastered on nodes that have been removed.
270
271 dlm_recover_masters
272 recover_master
273 dlm_send_rcom_lookup -> receive_rcom_lookup
274 dlm_dir_lookup
275 receive_rcom_lookup_reply <-
276 dlm_recover_master_reply
277 set_new_master
278 set_master_lkbs
279 set_lock_master
280*/
281
282/*
283 * Set the lock master for all LKBs in a lock queue
284 * If we are the new master of the rsb, we may have received new
285 * MSTCPY locks from other nodes already which we need to ignore
286 * when setting the new nodeid.
287 */
288
289static void set_lock_master(struct list_head *queue, int nodeid)
290{
291 struct dlm_lkb *lkb;
292
293 list_for_each_entry(lkb, queue, lkb_statequeue)
294 if (!(lkb->lkb_flags & DLM_IFL_MSTCPY))
295 lkb->lkb_nodeid = nodeid;
296}
297
298static void set_master_lkbs(struct dlm_rsb *r)
299{
300 set_lock_master(&r->res_grantqueue, r->res_nodeid);
301 set_lock_master(&r->res_convertqueue, r->res_nodeid);
302 set_lock_master(&r->res_waitqueue, r->res_nodeid);
303}
304
305/*
306 * Propogate the new master nodeid to locks
307 * The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider.
308 * The NEW_MASTER2 flag tells recover_lvb() which rsb's to consider.
309 */
310
311static void set_new_master(struct dlm_rsb *r, int nodeid)
312{
313 lock_rsb(r);
314 r->res_nodeid = nodeid;
315 set_master_lkbs(r);
316 rsb_set_flag(r, RSB_NEW_MASTER);
317 rsb_set_flag(r, RSB_NEW_MASTER2);
318 unlock_rsb(r);
319}
320
321/*
322 * We do async lookups on rsb's that need new masters. The rsb's
323 * waiting for a lookup reply are kept on the recover_list.
324 */
325
326static int recover_master(struct dlm_rsb *r)
327{
328 struct dlm_ls *ls = r->res_ls;
329 int error, dir_nodeid, ret_nodeid, our_nodeid = dlm_our_nodeid();
330
331 dir_nodeid = dlm_dir_nodeid(r);
332
333 if (dir_nodeid == our_nodeid) {
334 error = dlm_dir_lookup(ls, our_nodeid, r->res_name,
335 r->res_length, &ret_nodeid);
336 if (error)
337 log_error(ls, "recover dir lookup error %d", error);
338
339 if (ret_nodeid == our_nodeid)
340 ret_nodeid = 0;
341 set_new_master(r, ret_nodeid);
342 } else {
343 recover_list_add(r);
344 error = dlm_send_rcom_lookup(r, dir_nodeid);
345 }
346
347 return error;
348}
349
350/*
351 * When not using a directory, most resource names will hash to a new static
352 * master nodeid and the resource will need to be remastered.
353 */
354
355static int recover_master_static(struct dlm_rsb *r)
356{
357 int master = dlm_dir_nodeid(r);
358
359 if (master == dlm_our_nodeid())
360 master = 0;
361
362 if (r->res_nodeid != master) {
363 if (is_master(r))
364 dlm_purge_mstcpy_locks(r);
365 set_new_master(r, master);
366 return 1;
367 }
368 return 0;
369}
370
371/*
372 * Go through local root resources and for each rsb which has a master which
373 * has departed, get the new master nodeid from the directory. The dir will
374 * assign mastery to the first node to look up the new master. That means
375 * we'll discover in this lookup if we're the new master of any rsb's.
376 *
377 * We fire off all the dir lookup requests individually and asynchronously to
378 * the correct dir node.
379 */
380
381int dlm_recover_masters(struct dlm_ls *ls)
382{
383 struct dlm_rsb *r;
384 int error = 0, count = 0;
385
386 log_debug(ls, "dlm_recover_masters");
387
388 down_read(&ls->ls_root_sem);
389 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
390 if (dlm_recovery_stopped(ls)) {
391 up_read(&ls->ls_root_sem);
392 error = -EINTR;
393 goto out;
394 }
395
396 if (dlm_no_directory(ls))
397 count += recover_master_static(r);
398 else if (!is_master(r) && dlm_is_removed(ls, r->res_nodeid)) {
399 recover_master(r);
400 count++;
401 }
402
403 schedule();
404 }
405 up_read(&ls->ls_root_sem);
406
407 log_debug(ls, "dlm_recover_masters %d resources", count);
408
409 error = dlm_wait_function(ls, &recover_list_empty);
410 out:
411 if (error)
412 recover_list_clear(ls);
413 return error;
414}
415
416int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc)
417{
418 struct dlm_rsb *r;
419 int nodeid;
420
421 r = recover_list_find(ls, rc->rc_id);
422 if (!r) {
423 log_error(ls, "dlm_recover_master_reply no id %llx",
424 rc->rc_id);
425 goto out;
426 }
427
428 nodeid = rc->rc_result;
429 if (nodeid == dlm_our_nodeid())
430 nodeid = 0;
431
432 set_new_master(r, nodeid);
433 recover_list_del(r);
434
435 if (recover_list_empty(ls))
436 wake_up(&ls->ls_wait_general);
437 out:
438 return 0;
439}
440
441
442/* Lock recovery: rebuild the process-copy locks we hold on a
443 remastered rsb on the new rsb master.
444
445 dlm_recover_locks
446 recover_locks
447 recover_locks_queue
448 dlm_send_rcom_lock -> receive_rcom_lock
449 dlm_recover_master_copy
450 receive_rcom_lock_reply <-
451 dlm_recover_process_copy
452*/
453
454
455/*
456 * keep a count of the number of lkb's we send to the new master; when we get
457 * an equal number of replies then recovery for the rsb is done
458 */
459
460static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head)
461{
462 struct dlm_lkb *lkb;
463 int error = 0;
464
465 list_for_each_entry(lkb, head, lkb_statequeue) {
466 error = dlm_send_rcom_lock(r, lkb);
467 if (error)
468 break;
469 r->res_recover_locks_count++;
470 }
471
472 return error;
473}
474
475static int all_queues_empty(struct dlm_rsb *r)
476{
477 if (!list_empty(&r->res_grantqueue) ||
478 !list_empty(&r->res_convertqueue) ||
479 !list_empty(&r->res_waitqueue))
480 return 0;
481 return 1;
482}
483
484static int recover_locks(struct dlm_rsb *r)
485{
486 int error = 0;
487
488 lock_rsb(r);
489 if (all_queues_empty(r))
490 goto out;
491
492 DLM_ASSERT(!r->res_recover_locks_count, dlm_print_rsb(r););
493
494 error = recover_locks_queue(r, &r->res_grantqueue);
495 if (error)
496 goto out;
497 error = recover_locks_queue(r, &r->res_convertqueue);
498 if (error)
499 goto out;
500 error = recover_locks_queue(r, &r->res_waitqueue);
501 if (error)
502 goto out;
503
504 if (r->res_recover_locks_count)
505 recover_list_add(r);
506 else
507 rsb_clear_flag(r, RSB_NEW_MASTER);
508 out:
509 unlock_rsb(r);
510 return error;
511}
512
513int dlm_recover_locks(struct dlm_ls *ls)
514{
515 struct dlm_rsb *r;
516 int error, count = 0;
517
518 log_debug(ls, "dlm_recover_locks");
519
520 down_read(&ls->ls_root_sem);
521 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
522 if (is_master(r)) {
523 rsb_clear_flag(r, RSB_NEW_MASTER);
524 continue;
525 }
526
527 if (!rsb_flag(r, RSB_NEW_MASTER))
528 continue;
529
530 if (dlm_recovery_stopped(ls)) {
531 error = -EINTR;
532 up_read(&ls->ls_root_sem);
533 goto out;
534 }
535
536 error = recover_locks(r);
537 if (error) {
538 up_read(&ls->ls_root_sem);
539 goto out;
540 }
541
542 count += r->res_recover_locks_count;
543 }
544 up_read(&ls->ls_root_sem);
545
546 log_debug(ls, "dlm_recover_locks %d locks", count);
547
548 error = dlm_wait_function(ls, &recover_list_empty);
549 out:
550 if (error)
551 recover_list_clear(ls);
552 else
553 dlm_set_recover_status(ls, DLM_RS_LOCKS);
554 return error;
555}
556
557void dlm_recovered_lock(struct dlm_rsb *r)
558{
559 DLM_ASSERT(rsb_flag(r, RSB_NEW_MASTER), dlm_print_rsb(r););
560
561 r->res_recover_locks_count--;
562 if (!r->res_recover_locks_count) {
563 rsb_clear_flag(r, RSB_NEW_MASTER);
564 recover_list_del(r);
565 }
566
567 if (recover_list_empty(r->res_ls))
568 wake_up(&r->res_ls->ls_wait_general);
569}
570
571/*
572 * The lvb needs to be recovered on all master rsb's. This includes setting
573 * the VALNOTVALID flag if necessary, and determining the correct lvb contents
574 * based on the lvb's of the locks held on the rsb.
575 *
576 * RSB_VALNOTVALID is set if there are only NL/CR locks on the rsb. If it
577 * was already set prior to recovery, it's not cleared, regardless of locks.
578 *
579 * The LVB contents are only considered for changing when this is a new master
580 * of the rsb (NEW_MASTER2). Then, the rsb's lvb is taken from any lkb with
581 * mode > CR. If no lkb's exist with mode above CR, the lvb contents are taken
582 * from the lkb with the largest lvb sequence number.
583 */
584
585static void recover_lvb(struct dlm_rsb *r)
586{
587 struct dlm_lkb *lkb, *high_lkb = NULL;
588 uint32_t high_seq = 0;
589 int lock_lvb_exists = 0;
590 int big_lock_exists = 0;
591 int lvblen = r->res_ls->ls_lvblen;
592
593 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
594 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
595 continue;
596
597 lock_lvb_exists = 1;
598
599 if (lkb->lkb_grmode > DLM_LOCK_CR) {
600 big_lock_exists = 1;
601 goto setflag;
602 }
603
604 if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
605 high_lkb = lkb;
606 high_seq = lkb->lkb_lvbseq;
607 }
608 }
609
610 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
611 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
612 continue;
613
614 lock_lvb_exists = 1;
615
616 if (lkb->lkb_grmode > DLM_LOCK_CR) {
617 big_lock_exists = 1;
618 goto setflag;
619 }
620
621 if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
622 high_lkb = lkb;
623 high_seq = lkb->lkb_lvbseq;
624 }
625 }
626
627 setflag:
628 if (!lock_lvb_exists)
629 goto out;
630
631 if (!big_lock_exists)
632 rsb_set_flag(r, RSB_VALNOTVALID);
633
634 /* don't mess with the lvb unless we're the new master */
635 if (!rsb_flag(r, RSB_NEW_MASTER2))
636 goto out;
637
638 if (!r->res_lvbptr) {
639 r->res_lvbptr = allocate_lvb(r->res_ls);
640 if (!r->res_lvbptr)
641 goto out;
642 }
643
644 if (big_lock_exists) {
645 r->res_lvbseq = lkb->lkb_lvbseq;
646 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen);
647 } else if (high_lkb) {
648 r->res_lvbseq = high_lkb->lkb_lvbseq;
649 memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen);
650 } else {
651 r->res_lvbseq = 0;
652 memset(r->res_lvbptr, 0, lvblen);
653 }
654 out:
655 return;
656}
657
658/* All master rsb's flagged RECOVER_CONVERT need to be looked at. The locks
659 converting PR->CW or CW->PR need to have their lkb_grmode set. */
660
661static void recover_conversion(struct dlm_rsb *r)
662{
663 struct dlm_lkb *lkb;
664 int grmode = -1;
665
666 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
667 if (lkb->lkb_grmode == DLM_LOCK_PR ||
668 lkb->lkb_grmode == DLM_LOCK_CW) {
669 grmode = lkb->lkb_grmode;
670 break;
671 }
672 }
673
674 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
675 if (lkb->lkb_grmode != DLM_LOCK_IV)
676 continue;
677 if (grmode == -1)
678 lkb->lkb_grmode = lkb->lkb_rqmode;
679 else
680 lkb->lkb_grmode = grmode;
681 }
682}
683
684void dlm_recover_rsbs(struct dlm_ls *ls)
685{
686 struct dlm_rsb *r;
687 int count = 0;
688
689 log_debug(ls, "dlm_recover_rsbs");
690
691 down_read(&ls->ls_root_sem);
692 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
693 lock_rsb(r);
694 if (is_master(r)) {
695 if (rsb_flag(r, RSB_RECOVER_CONVERT))
696 recover_conversion(r);
697 recover_lvb(r);
698 count++;
699 }
700 rsb_clear_flag(r, RSB_RECOVER_CONVERT);
701 unlock_rsb(r);
702 }
703 up_read(&ls->ls_root_sem);
704
705 log_debug(ls, "dlm_recover_rsbs %d rsbs", count);
706}
707
708/* Create a single list of all root rsb's to be used during recovery */
709
710int dlm_create_root_list(struct dlm_ls *ls)
711{
712 struct dlm_rsb *r;
713 int i, error = 0;
714
715 down_write(&ls->ls_root_sem);
716 if (!list_empty(&ls->ls_root_list)) {
717 log_error(ls, "root list not empty");
718 error = -EINVAL;
719 goto out;
720 }
721
722 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
723 read_lock(&ls->ls_rsbtbl[i].lock);
724 list_for_each_entry(r, &ls->ls_rsbtbl[i].list, res_hashchain) {
725 list_add(&r->res_root_list, &ls->ls_root_list);
726 dlm_hold_rsb(r);
727 }
728 read_unlock(&ls->ls_rsbtbl[i].lock);
729 }
730 out:
731 up_write(&ls->ls_root_sem);
732 return error;
733}
734
735void dlm_release_root_list(struct dlm_ls *ls)
736{
737 struct dlm_rsb *r, *safe;
738
739 down_write(&ls->ls_root_sem);
740 list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) {
741 list_del_init(&r->res_root_list);
742 dlm_put_rsb(r);
743 }
744 up_write(&ls->ls_root_sem);
745}
746
747void dlm_clear_toss_list(struct dlm_ls *ls)
748{
749 struct dlm_rsb *r, *safe;
750 int i;
751
752 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
753 write_lock(&ls->ls_rsbtbl[i].lock);
754 list_for_each_entry_safe(r, safe, &ls->ls_rsbtbl[i].toss,
755 res_hashchain) {
756 list_del(&r->res_hashchain);
757 free_rsb(r);
758 }
759 write_unlock(&ls->ls_rsbtbl[i].lock);
760 }
761}
762
diff --git a/fs/dlm/recover.h b/fs/dlm/recover.h
new file mode 100644
index 000000000000..ebd0363f1e08
--- /dev/null
+++ b/fs/dlm/recover.h
@@ -0,0 +1,34 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#ifndef __RECOVER_DOT_H__
15#define __RECOVER_DOT_H__
16
17int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls));
18uint32_t dlm_recover_status(struct dlm_ls *ls);
19void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status);
20int dlm_recover_members_wait(struct dlm_ls *ls);
21int dlm_recover_directory_wait(struct dlm_ls *ls);
22int dlm_recover_locks_wait(struct dlm_ls *ls);
23int dlm_recover_done_wait(struct dlm_ls *ls);
24int dlm_recover_masters(struct dlm_ls *ls);
25int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc);
26int dlm_recover_locks(struct dlm_ls *ls);
27void dlm_recovered_lock(struct dlm_rsb *r);
28int dlm_create_root_list(struct dlm_ls *ls);
29void dlm_release_root_list(struct dlm_ls *ls);
30void dlm_clear_toss_list(struct dlm_ls *ls);
31void dlm_recover_rsbs(struct dlm_ls *ls);
32
33#endif /* __RECOVER_DOT_H__ */
34
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
new file mode 100644
index 000000000000..70103533677d
--- /dev/null
+++ b/fs/dlm/recoverd.c
@@ -0,0 +1,285 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "dlm_internal.h"
15#include "lockspace.h"
16#include "member.h"
17#include "dir.h"
18#include "ast.h"
19#include "recover.h"
20#include "lowcomms.h"
21#include "lock.h"
22#include "requestqueue.h"
23#include "recoverd.h"
24
25
26/* If the start for which we're re-enabling locking (seq) has been superseded
27 by a newer stop (ls_recover_seq), we need to leave locking disabled. */
28
29static int enable_locking(struct dlm_ls *ls, uint64_t seq)
30{
31 int error = -EINTR;
32
33 spin_lock(&ls->ls_recover_lock);
34 if (ls->ls_recover_seq == seq) {
35 set_bit(LSFL_RUNNING, &ls->ls_flags);
36 up_write(&ls->ls_in_recovery);
37 error = 0;
38 }
39 spin_unlock(&ls->ls_recover_lock);
40 return error;
41}
42
43static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
44{
45 unsigned long start;
46 int error, neg = 0;
47
48 log_debug(ls, "recover %llx", rv->seq);
49
50 mutex_lock(&ls->ls_recoverd_active);
51
52 /*
53 * Suspending and resuming dlm_astd ensures that no lkb's from this ls
54 * will be processed by dlm_astd during recovery.
55 */
56
57 dlm_astd_suspend();
58 dlm_astd_resume();
59
60 /*
61 * This list of root rsb's will be the basis of most of the recovery
62 * routines.
63 */
64
65 dlm_create_root_list(ls);
66
67 /*
68 * Free all the tossed rsb's so we don't have to recover them.
69 */
70
71 dlm_clear_toss_list(ls);
72
73 /*
74 * Add or remove nodes from the lockspace's ls_nodes list.
75 * Also waits for all nodes to complete dlm_recover_members.
76 */
77
78 error = dlm_recover_members(ls, rv, &neg);
79 if (error) {
80 log_error(ls, "recover_members failed %d", error);
81 goto fail;
82 }
83 start = jiffies;
84
85 /*
86 * Rebuild our own share of the directory by collecting from all other
87 * nodes their master rsb names that hash to us.
88 */
89
90 error = dlm_recover_directory(ls);
91 if (error) {
92 log_error(ls, "recover_directory failed %d", error);
93 goto fail;
94 }
95
96 /*
97 * Purge directory-related requests that are saved in requestqueue.
98 * All dir requests from before recovery are invalid now due to the dir
99 * rebuild and will be resent by the requesting nodes.
100 */
101
102 dlm_purge_requestqueue(ls);
103
104 /*
105 * Wait for all nodes to complete directory rebuild.
106 */
107
108 error = dlm_recover_directory_wait(ls);
109 if (error) {
110 log_error(ls, "recover_directory_wait failed %d", error);
111 goto fail;
112 }
113
114 /*
115 * We may have outstanding operations that are waiting for a reply from
116 * a failed node. Mark these to be resent after recovery. Unlock and
117 * cancel ops can just be completed.
118 */
119
120 dlm_recover_waiters_pre(ls);
121
122 error = dlm_recovery_stopped(ls);
123 if (error)
124 goto fail;
125
126 if (neg || dlm_no_directory(ls)) {
127 /*
128 * Clear lkb's for departed nodes.
129 */
130
131 dlm_purge_locks(ls);
132
133 /*
134 * Get new master nodeid's for rsb's that were mastered on
135 * departed nodes.
136 */
137
138 error = dlm_recover_masters(ls);
139 if (error) {
140 log_error(ls, "recover_masters failed %d", error);
141 goto fail;
142 }
143
144 /*
145 * Send our locks on remastered rsb's to the new masters.
146 */
147
148 error = dlm_recover_locks(ls);
149 if (error) {
150 log_error(ls, "recover_locks failed %d", error);
151 goto fail;
152 }
153
154 error = dlm_recover_locks_wait(ls);
155 if (error) {
156 log_error(ls, "recover_locks_wait failed %d", error);
157 goto fail;
158 }
159
160 /*
161 * Finalize state in master rsb's now that all locks can be
162 * checked. This includes conversion resolution and lvb
163 * settings.
164 */
165
166 dlm_recover_rsbs(ls);
167 }
168
169 dlm_release_root_list(ls);
170
171 dlm_set_recover_status(ls, DLM_RS_DONE);
172 error = dlm_recover_done_wait(ls);
173 if (error) {
174 log_error(ls, "recover_done_wait failed %d", error);
175 goto fail;
176 }
177
178 dlm_clear_members_gone(ls);
179
180 error = enable_locking(ls, rv->seq);
181 if (error) {
182 log_error(ls, "enable_locking failed %d", error);
183 goto fail;
184 }
185
186 error = dlm_process_requestqueue(ls);
187 if (error) {
188 log_error(ls, "process_requestqueue failed %d", error);
189 goto fail;
190 }
191
192 error = dlm_recover_waiters_post(ls);
193 if (error) {
194 log_error(ls, "recover_waiters_post failed %d", error);
195 goto fail;
196 }
197
198 dlm_grant_after_purge(ls);
199
200 dlm_astd_wake();
201
202 log_debug(ls, "recover %llx done: %u ms", rv->seq,
203 jiffies_to_msecs(jiffies - start));
204 mutex_unlock(&ls->ls_recoverd_active);
205
206 return 0;
207
208 fail:
209 dlm_release_root_list(ls);
210 log_debug(ls, "recover %llx error %d", rv->seq, error);
211 mutex_unlock(&ls->ls_recoverd_active);
212 return error;
213}
214
215static void do_ls_recovery(struct dlm_ls *ls)
216{
217 struct dlm_recover *rv = NULL;
218
219 spin_lock(&ls->ls_recover_lock);
220 rv = ls->ls_recover_args;
221 ls->ls_recover_args = NULL;
222 clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags);
223 spin_unlock(&ls->ls_recover_lock);
224
225 if (rv) {
226 ls_recover(ls, rv);
227 kfree(rv->nodeids);
228 kfree(rv);
229 }
230}
231
232static int dlm_recoverd(void *arg)
233{
234 struct dlm_ls *ls;
235
236 ls = dlm_find_lockspace_local(arg);
237
238 while (!kthread_should_stop()) {
239 set_current_state(TASK_INTERRUPTIBLE);
240 if (!test_bit(LSFL_WORK, &ls->ls_flags))
241 schedule();
242 set_current_state(TASK_RUNNING);
243
244 if (test_and_clear_bit(LSFL_WORK, &ls->ls_flags))
245 do_ls_recovery(ls);
246 }
247
248 dlm_put_lockspace(ls);
249 return 0;
250}
251
252void dlm_recoverd_kick(struct dlm_ls *ls)
253{
254 set_bit(LSFL_WORK, &ls->ls_flags);
255 wake_up_process(ls->ls_recoverd_task);
256}
257
258int dlm_recoverd_start(struct dlm_ls *ls)
259{
260 struct task_struct *p;
261 int error = 0;
262
263 p = kthread_run(dlm_recoverd, ls, "dlm_recoverd");
264 if (IS_ERR(p))
265 error = PTR_ERR(p);
266 else
267 ls->ls_recoverd_task = p;
268 return error;
269}
270
271void dlm_recoverd_stop(struct dlm_ls *ls)
272{
273 kthread_stop(ls->ls_recoverd_task);
274}
275
276void dlm_recoverd_suspend(struct dlm_ls *ls)
277{
278 mutex_lock(&ls->ls_recoverd_active);
279}
280
281void dlm_recoverd_resume(struct dlm_ls *ls)
282{
283 mutex_unlock(&ls->ls_recoverd_active);
284}
285
diff --git a/fs/dlm/recoverd.h b/fs/dlm/recoverd.h
new file mode 100644
index 000000000000..866657c5d69d
--- /dev/null
+++ b/fs/dlm/recoverd.h
@@ -0,0 +1,24 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#ifndef __RECOVERD_DOT_H__
15#define __RECOVERD_DOT_H__
16
17void dlm_recoverd_kick(struct dlm_ls *ls);
18void dlm_recoverd_stop(struct dlm_ls *ls);
19int dlm_recoverd_start(struct dlm_ls *ls);
20void dlm_recoverd_suspend(struct dlm_ls *ls);
21void dlm_recoverd_resume(struct dlm_ls *ls);
22
23#endif /* __RECOVERD_DOT_H__ */
24
diff --git a/fs/dlm/requestqueue.c b/fs/dlm/requestqueue.c
new file mode 100644
index 000000000000..7b2b089634a2
--- /dev/null
+++ b/fs/dlm/requestqueue.c
@@ -0,0 +1,184 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
5**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13#include "dlm_internal.h"
14#include "member.h"
15#include "lock.h"
16#include "dir.h"
17#include "config.h"
18#include "requestqueue.h"
19
20struct rq_entry {
21 struct list_head list;
22 int nodeid;
23 char request[1];
24};
25
26/*
27 * Requests received while the lockspace is in recovery get added to the
28 * request queue and processed when recovery is complete. This happens when
29 * the lockspace is suspended on some nodes before it is on others, or the
30 * lockspace is enabled on some while still suspended on others.
31 */
32
33void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd)
34{
35 struct rq_entry *e;
36 int length = hd->h_length;
37
38 if (dlm_is_removed(ls, nodeid))
39 return;
40
41 e = kmalloc(sizeof(struct rq_entry) + length, GFP_KERNEL);
42 if (!e) {
43 log_print("dlm_add_requestqueue: out of memory\n");
44 return;
45 }
46
47 e->nodeid = nodeid;
48 memcpy(e->request, hd, length);
49
50 mutex_lock(&ls->ls_requestqueue_mutex);
51 list_add_tail(&e->list, &ls->ls_requestqueue);
52 mutex_unlock(&ls->ls_requestqueue_mutex);
53}
54
55int dlm_process_requestqueue(struct dlm_ls *ls)
56{
57 struct rq_entry *e;
58 struct dlm_header *hd;
59 int error = 0;
60
61 mutex_lock(&ls->ls_requestqueue_mutex);
62
63 for (;;) {
64 if (list_empty(&ls->ls_requestqueue)) {
65 mutex_unlock(&ls->ls_requestqueue_mutex);
66 error = 0;
67 break;
68 }
69 e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list);
70 mutex_unlock(&ls->ls_requestqueue_mutex);
71
72 hd = (struct dlm_header *) e->request;
73 error = dlm_receive_message(hd, e->nodeid, 1);
74
75 if (error == -EINTR) {
76 /* entry is left on requestqueue */
77 log_debug(ls, "process_requestqueue abort eintr");
78 break;
79 }
80
81 mutex_lock(&ls->ls_requestqueue_mutex);
82 list_del(&e->list);
83 kfree(e);
84
85 if (dlm_locking_stopped(ls)) {
86 log_debug(ls, "process_requestqueue abort running");
87 mutex_unlock(&ls->ls_requestqueue_mutex);
88 error = -EINTR;
89 break;
90 }
91 schedule();
92 }
93
94 return error;
95}
96
97/*
98 * After recovery is done, locking is resumed and dlm_recoverd takes all the
99 * saved requests and processes them as they would have been by dlm_recvd. At
100 * the same time, dlm_recvd will start receiving new requests from remote
101 * nodes. We want to delay dlm_recvd processing new requests until
102 * dlm_recoverd has finished processing the old saved requests.
103 */
104
105void dlm_wait_requestqueue(struct dlm_ls *ls)
106{
107 for (;;) {
108 mutex_lock(&ls->ls_requestqueue_mutex);
109 if (list_empty(&ls->ls_requestqueue))
110 break;
111 if (dlm_locking_stopped(ls))
112 break;
113 mutex_unlock(&ls->ls_requestqueue_mutex);
114 schedule();
115 }
116 mutex_unlock(&ls->ls_requestqueue_mutex);
117}
118
119static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid)
120{
121 uint32_t type = ms->m_type;
122
123 if (dlm_is_removed(ls, nodeid))
124 return 1;
125
126 /* directory operations are always purged because the directory is
127 always rebuilt during recovery and the lookups resent */
128
129 if (type == DLM_MSG_REMOVE ||
130 type == DLM_MSG_LOOKUP ||
131 type == DLM_MSG_LOOKUP_REPLY)
132 return 1;
133
134 if (!dlm_no_directory(ls))
135 return 0;
136
137 /* with no directory, the master is likely to change as a part of
138 recovery; requests to/from the defunct master need to be purged */
139
140 switch (type) {
141 case DLM_MSG_REQUEST:
142 case DLM_MSG_CONVERT:
143 case DLM_MSG_UNLOCK:
144 case DLM_MSG_CANCEL:
145 /* we're no longer the master of this resource, the sender
146 will resend to the new master (see waiter_needs_recovery) */
147
148 if (dlm_hash2nodeid(ls, ms->m_hash) != dlm_our_nodeid())
149 return 1;
150 break;
151
152 case DLM_MSG_REQUEST_REPLY:
153 case DLM_MSG_CONVERT_REPLY:
154 case DLM_MSG_UNLOCK_REPLY:
155 case DLM_MSG_CANCEL_REPLY:
156 case DLM_MSG_GRANT:
157 /* this reply is from the former master of the resource,
158 we'll resend to the new master if needed */
159
160 if (dlm_hash2nodeid(ls, ms->m_hash) != nodeid)
161 return 1;
162 break;
163 }
164
165 return 0;
166}
167
168void dlm_purge_requestqueue(struct dlm_ls *ls)
169{
170 struct dlm_message *ms;
171 struct rq_entry *e, *safe;
172
173 mutex_lock(&ls->ls_requestqueue_mutex);
174 list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) {
175 ms = (struct dlm_message *) e->request;
176
177 if (purge_request(ls, ms, e->nodeid)) {
178 list_del(&e->list);
179 kfree(e);
180 }
181 }
182 mutex_unlock(&ls->ls_requestqueue_mutex);
183}
184
diff --git a/fs/dlm/requestqueue.h b/fs/dlm/requestqueue.h
new file mode 100644
index 000000000000..349f0d292d95
--- /dev/null
+++ b/fs/dlm/requestqueue.h
@@ -0,0 +1,22 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
5**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13#ifndef __REQUESTQUEUE_DOT_H__
14#define __REQUESTQUEUE_DOT_H__
15
16void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd);
17int dlm_process_requestqueue(struct dlm_ls *ls);
18void dlm_wait_requestqueue(struct dlm_ls *ls);
19void dlm_purge_requestqueue(struct dlm_ls *ls);
20
21#endif
22
diff --git a/fs/dlm/util.c b/fs/dlm/util.c
new file mode 100644
index 000000000000..767197db9944
--- /dev/null
+++ b/fs/dlm/util.c
@@ -0,0 +1,161 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
5**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13#include "dlm_internal.h"
14#include "rcom.h"
15#include "util.h"
16
17static void header_out(struct dlm_header *hd)
18{
19 hd->h_version = cpu_to_le32(hd->h_version);
20 hd->h_lockspace = cpu_to_le32(hd->h_lockspace);
21 hd->h_nodeid = cpu_to_le32(hd->h_nodeid);
22 hd->h_length = cpu_to_le16(hd->h_length);
23}
24
25static void header_in(struct dlm_header *hd)
26{
27 hd->h_version = le32_to_cpu(hd->h_version);
28 hd->h_lockspace = le32_to_cpu(hd->h_lockspace);
29 hd->h_nodeid = le32_to_cpu(hd->h_nodeid);
30 hd->h_length = le16_to_cpu(hd->h_length);
31}
32
33void dlm_message_out(struct dlm_message *ms)
34{
35 struct dlm_header *hd = (struct dlm_header *) ms;
36
37 header_out(hd);
38
39 ms->m_type = cpu_to_le32(ms->m_type);
40 ms->m_nodeid = cpu_to_le32(ms->m_nodeid);
41 ms->m_pid = cpu_to_le32(ms->m_pid);
42 ms->m_lkid = cpu_to_le32(ms->m_lkid);
43 ms->m_remid = cpu_to_le32(ms->m_remid);
44 ms->m_parent_lkid = cpu_to_le32(ms->m_parent_lkid);
45 ms->m_parent_remid = cpu_to_le32(ms->m_parent_remid);
46 ms->m_exflags = cpu_to_le32(ms->m_exflags);
47 ms->m_sbflags = cpu_to_le32(ms->m_sbflags);
48 ms->m_flags = cpu_to_le32(ms->m_flags);
49 ms->m_lvbseq = cpu_to_le32(ms->m_lvbseq);
50 ms->m_hash = cpu_to_le32(ms->m_hash);
51 ms->m_status = cpu_to_le32(ms->m_status);
52 ms->m_grmode = cpu_to_le32(ms->m_grmode);
53 ms->m_rqmode = cpu_to_le32(ms->m_rqmode);
54 ms->m_bastmode = cpu_to_le32(ms->m_bastmode);
55 ms->m_asts = cpu_to_le32(ms->m_asts);
56 ms->m_result = cpu_to_le32(ms->m_result);
57}
58
59void dlm_message_in(struct dlm_message *ms)
60{
61 struct dlm_header *hd = (struct dlm_header *) ms;
62
63 header_in(hd);
64
65 ms->m_type = le32_to_cpu(ms->m_type);
66 ms->m_nodeid = le32_to_cpu(ms->m_nodeid);
67 ms->m_pid = le32_to_cpu(ms->m_pid);
68 ms->m_lkid = le32_to_cpu(ms->m_lkid);
69 ms->m_remid = le32_to_cpu(ms->m_remid);
70 ms->m_parent_lkid = le32_to_cpu(ms->m_parent_lkid);
71 ms->m_parent_remid = le32_to_cpu(ms->m_parent_remid);
72 ms->m_exflags = le32_to_cpu(ms->m_exflags);
73 ms->m_sbflags = le32_to_cpu(ms->m_sbflags);
74 ms->m_flags = le32_to_cpu(ms->m_flags);
75 ms->m_lvbseq = le32_to_cpu(ms->m_lvbseq);
76 ms->m_hash = le32_to_cpu(ms->m_hash);
77 ms->m_status = le32_to_cpu(ms->m_status);
78 ms->m_grmode = le32_to_cpu(ms->m_grmode);
79 ms->m_rqmode = le32_to_cpu(ms->m_rqmode);
80 ms->m_bastmode = le32_to_cpu(ms->m_bastmode);
81 ms->m_asts = le32_to_cpu(ms->m_asts);
82 ms->m_result = le32_to_cpu(ms->m_result);
83}
84
85static void rcom_lock_out(struct rcom_lock *rl)
86{
87 rl->rl_ownpid = cpu_to_le32(rl->rl_ownpid);
88 rl->rl_lkid = cpu_to_le32(rl->rl_lkid);
89 rl->rl_remid = cpu_to_le32(rl->rl_remid);
90 rl->rl_parent_lkid = cpu_to_le32(rl->rl_parent_lkid);
91 rl->rl_parent_remid = cpu_to_le32(rl->rl_parent_remid);
92 rl->rl_exflags = cpu_to_le32(rl->rl_exflags);
93 rl->rl_flags = cpu_to_le32(rl->rl_flags);
94 rl->rl_lvbseq = cpu_to_le32(rl->rl_lvbseq);
95 rl->rl_result = cpu_to_le32(rl->rl_result);
96 rl->rl_wait_type = cpu_to_le16(rl->rl_wait_type);
97 rl->rl_namelen = cpu_to_le16(rl->rl_namelen);
98}
99
100static void rcom_lock_in(struct rcom_lock *rl)
101{
102 rl->rl_ownpid = le32_to_cpu(rl->rl_ownpid);
103 rl->rl_lkid = le32_to_cpu(rl->rl_lkid);
104 rl->rl_remid = le32_to_cpu(rl->rl_remid);
105 rl->rl_parent_lkid = le32_to_cpu(rl->rl_parent_lkid);
106 rl->rl_parent_remid = le32_to_cpu(rl->rl_parent_remid);
107 rl->rl_exflags = le32_to_cpu(rl->rl_exflags);
108 rl->rl_flags = le32_to_cpu(rl->rl_flags);
109 rl->rl_lvbseq = le32_to_cpu(rl->rl_lvbseq);
110 rl->rl_result = le32_to_cpu(rl->rl_result);
111 rl->rl_wait_type = le16_to_cpu(rl->rl_wait_type);
112 rl->rl_namelen = le16_to_cpu(rl->rl_namelen);
113}
114
115static void rcom_config_out(struct rcom_config *rf)
116{
117 rf->rf_lvblen = cpu_to_le32(rf->rf_lvblen);
118 rf->rf_lsflags = cpu_to_le32(rf->rf_lsflags);
119}
120
121static void rcom_config_in(struct rcom_config *rf)
122{
123 rf->rf_lvblen = le32_to_cpu(rf->rf_lvblen);
124 rf->rf_lsflags = le32_to_cpu(rf->rf_lsflags);
125}
126
127void dlm_rcom_out(struct dlm_rcom *rc)
128{
129 struct dlm_header *hd = (struct dlm_header *) rc;
130 int type = rc->rc_type;
131
132 header_out(hd);
133
134 rc->rc_type = cpu_to_le32(rc->rc_type);
135 rc->rc_result = cpu_to_le32(rc->rc_result);
136 rc->rc_id = cpu_to_le64(rc->rc_id);
137
138 if (type == DLM_RCOM_LOCK)
139 rcom_lock_out((struct rcom_lock *) rc->rc_buf);
140
141 else if (type == DLM_RCOM_STATUS_REPLY)
142 rcom_config_out((struct rcom_config *) rc->rc_buf);
143}
144
145void dlm_rcom_in(struct dlm_rcom *rc)
146{
147 struct dlm_header *hd = (struct dlm_header *) rc;
148
149 header_in(hd);
150
151 rc->rc_type = le32_to_cpu(rc->rc_type);
152 rc->rc_result = le32_to_cpu(rc->rc_result);
153 rc->rc_id = le64_to_cpu(rc->rc_id);
154
155 if (rc->rc_type == DLM_RCOM_LOCK)
156 rcom_lock_in((struct rcom_lock *) rc->rc_buf);
157
158 else if (rc->rc_type == DLM_RCOM_STATUS_REPLY)
159 rcom_config_in((struct rcom_config *) rc->rc_buf);
160}
161
diff --git a/fs/dlm/util.h b/fs/dlm/util.h
new file mode 100644
index 000000000000..2b25915161c0
--- /dev/null
+++ b/fs/dlm/util.h
@@ -0,0 +1,22 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) 2005 Red Hat, Inc. All rights reserved.
5**
6** This copyrighted material is made available to anyone wishing to use,
7** modify, copy, or redistribute it subject to the terms and conditions
8** of the GNU General Public License v.2.
9**
10*******************************************************************************
11******************************************************************************/
12
13#ifndef __UTIL_DOT_H__
14#define __UTIL_DOT_H__
15
16void dlm_message_out(struct dlm_message *ms);
17void dlm_message_in(struct dlm_message *ms);
18void dlm_rcom_out(struct dlm_rcom *rc);
19void dlm_rcom_in(struct dlm_rcom *rc);
20
21#endif
22