aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dlm
diff options
context:
space:
mode:
authorDavid Teigland <teigland@redhat.com>2006-07-12 17:44:04 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-07-13 09:25:34 -0400
commit597d0cae0f99f62501e229bed50e8149604015bb (patch)
treeb6cab09ff6fe2246740848164c0a52d5c03136a0 /fs/dlm
parent2eb168ca94aba3bcae350ad9b31870955174a218 (diff)
[DLM] dlm: user locks
This changes the way the dlm handles user locks. The core dlm is now aware of user locks so they can be dealt with more efficiently. There is no more dlm_device module which previously managed its own duplicate copy of every user lock. Signed-off-by: Patrick Caulfield <pcaulfie@redhat.com> Signed-off-by: David Teigland <teigland@redhat.com> Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/dlm')
-rw-r--r--fs/dlm/Kconfig8
-rw-r--r--fs/dlm/Makefile4
-rw-r--r--fs/dlm/ast.c7
-rw-r--r--fs/dlm/device.c1239
-rw-r--r--fs/dlm/dlm_internal.h44
-rw-r--r--fs/dlm/lock.c304
-rw-r--r--fs/dlm/lock.h11
-rw-r--r--fs/dlm/lockspace.c32
-rw-r--r--fs/dlm/lockspace.h1
-rw-r--r--fs/dlm/main.c8
-rw-r--r--fs/dlm/memory.c9
-rw-r--r--fs/dlm/user.c769
-rw-r--r--fs/dlm/user.h16
13 files changed, 1192 insertions, 1260 deletions
diff --git a/fs/dlm/Kconfig b/fs/dlm/Kconfig
index 09e78bf6e7a4..490f85b3fa59 100644
--- a/fs/dlm/Kconfig
+++ b/fs/dlm/Kconfig
@@ -10,14 +10,6 @@ config DLM
10 A general purpose distributed lock manager for kernel or userspace 10 A general purpose distributed lock manager for kernel or userspace
11 applications. 11 applications.
12 12
13config DLM_DEVICE
14 tristate "DLM device for userspace access"
15 depends on DLM
16 help
17 This module creates a misc device through which the dlm lockspace
18 and locking functions become available to userspace applications
19 (usually through the libdlm library).
20
21config DLM_DEBUG 13config DLM_DEBUG
22 bool "DLM debugging" 14 bool "DLM debugging"
23 depends on DLM 15 depends on DLM
diff --git a/fs/dlm/Makefile b/fs/dlm/Makefile
index 1e6232e7d8e5..1832e0297f7d 100644
--- a/fs/dlm/Makefile
+++ b/fs/dlm/Makefile
@@ -1,6 +1,4 @@
1obj-$(CONFIG_DLM) += dlm.o 1obj-$(CONFIG_DLM) += dlm.o
2obj-$(CONFIG_DLM_DEVICE) += dlm_device.o
3
4dlm-y := ast.o \ 2dlm-y := ast.o \
5 config.o \ 3 config.o \
6 dir.o \ 4 dir.o \
@@ -15,7 +13,7 @@ dlm-y := ast.o \
15 recover.o \ 13 recover.o \
16 recoverd.o \ 14 recoverd.o \
17 requestqueue.o \ 15 requestqueue.o \
16 user.o \
18 util.o 17 util.o
19dlm-$(CONFIG_DLM_DEBUG) += debug_fs.o 18dlm-$(CONFIG_DLM_DEBUG) += debug_fs.o
20 19
21dlm_device-y := device.o
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index 57bdf09b520a..a211330cbc42 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -13,7 +13,7 @@
13 13
14#include "dlm_internal.h" 14#include "dlm_internal.h"
15#include "lock.h" 15#include "lock.h"
16#include "ast.h" 16#include "user.h"
17 17
18#define WAKE_ASTS 0 18#define WAKE_ASTS 0
19 19
@@ -34,6 +34,11 @@ void dlm_del_ast(struct dlm_lkb *lkb)
34 34
35void dlm_add_ast(struct dlm_lkb *lkb, int type) 35void dlm_add_ast(struct dlm_lkb *lkb, int type)
36{ 36{
37 if (lkb->lkb_flags & DLM_IFL_USER) {
38 dlm_user_add_ast(lkb, type);
39 return;
40 }
41
37 spin_lock(&ast_queue_lock); 42 spin_lock(&ast_queue_lock);
38 if (!(lkb->lkb_ast_type & (AST_COMP | AST_BAST))) { 43 if (!(lkb->lkb_ast_type & (AST_COMP | AST_BAST))) {
39 kref_get(&lkb->lkb_ref); 44 kref_get(&lkb->lkb_ref);
diff --git a/fs/dlm/device.c b/fs/dlm/device.c
deleted file mode 100644
index 825bbc0a09c0..000000000000
--- a/fs/dlm/device.c
+++ /dev/null
@@ -1,1239 +0,0 @@
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5** Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14/*
15 * device.c
16 *
17 * This is the userland interface to the DLM.
18 *
19 * The locking is done via a misc char device (find the
20 * registered minor number in /proc/misc).
21 *
22 * User code should not use this interface directly but
23 * call the library routines in libdlm.a instead.
24 *
25 */
26
27#include <linux/miscdevice.h>
28#include <linux/init.h>
29#include <linux/wait.h>
30#include <linux/module.h>
31#include <linux/file.h>
32#include <linux/fs.h>
33#include <linux/poll.h>
34#include <linux/signal.h>
35#include <linux/spinlock.h>
36#include <linux/idr.h>
37
38#include <linux/dlm.h>
39#include <linux/dlm_device.h>
40
41#include "lvb_table.h"
42
43static struct file_operations _dlm_fops;
44static const char *name_prefix="dlm";
45static struct list_head user_ls_list;
46static struct mutex user_ls_lock;
47
48/* Flags in li_flags */
49#define LI_FLAG_COMPLETE 1
50#define LI_FLAG_FIRSTLOCK 2
51#define LI_FLAG_PERSISTENT 3
52#define LI_FLAG_ONLIST 4
53
54/* flags in ls_flags*/
55#define LS_FLAG_DELETED 1
56#define LS_FLAG_AUTOFREE 2
57
58/* flags in ls_flags*/
59#define FI_FLAG_OPEN 1
60#define FI_FLAG_COMPAT 2
61
62#define LOCKINFO_MAGIC 0x53595324
63
64struct lock_info {
65 uint32_t li_magic;
66 uint8_t li_cmd;
67 int8_t li_grmode;
68 int8_t li_rqmode;
69 struct dlm_lksb li_lksb;
70 wait_queue_head_t li_waitq;
71 unsigned long li_flags;
72 void __user *li_castparam;
73 void __user *li_castaddr;
74 void __user *li_bastparam;
75 void __user *li_bastaddr;
76 void __user *li_pend_bastparam;
77 void __user *li_pend_bastaddr;
78 struct list_head li_ownerqueue;
79 struct file_info *li_file;
80 struct dlm_lksb __user *li_user_lksb;
81 struct completion li_firstcomp;
82};
83
84/* A queued AST no less */
85struct ast_info {
86 struct dlm_lock_result result;
87 struct list_head list;
88 uint32_t lvb_updated;
89 uint32_t progress; /* How much has been read */
90};
91
92/* One of these per userland lockspace */
93struct user_ls {
94 void *ls_lockspace;
95 atomic_t ls_refcnt;
96 long ls_flags;
97
98 /* Lock infos are stored in here indexed by lock ID */
99 struct idr lockinfo_idr;
100 rwlock_t lockinfo_lock;
101
102 /* Passed into misc_register() */
103 struct miscdevice ls_miscinfo;
104 struct list_head ls_list;
105};
106
107/* misc_device info for the control device */
108static struct miscdevice ctl_device;
109
110/*
111 * Stuff we hang off the file struct.
112 * The first two are to cope with unlocking all the
113 * locks help by a process when it dies.
114 */
115struct file_info {
116 struct list_head fi_li_list; /* List of active lock_infos */
117 spinlock_t fi_li_lock;
118 struct list_head fi_ast_list; /* Queue of ASTs to be delivered */
119 spinlock_t fi_ast_lock;
120 wait_queue_head_t fi_wait;
121 struct user_ls *fi_ls;
122 atomic_t fi_refcnt; /* Number of users */
123 unsigned long fi_flags;
124};
125
126#ifdef CONFIG_COMPAT
127
128struct dlm_lock_params32 {
129 __u8 mode;
130 __u8 namelen;
131 __u16 flags;
132 __u32 lkid;
133 __u32 parent;
134
135 __u32 castparam;
136 __u32 castaddr;
137 __u32 bastparam;
138 __u32 bastaddr;
139 __u32 lksb;
140
141 char lvb[DLM_USER_LVB_LEN];
142 char name[0];
143};
144
145struct dlm_write_request32 {
146 __u32 version[3];
147 __u8 cmd;
148 __u8 is64bit;
149 __u8 unused[2];
150
151 union {
152 struct dlm_lock_params32 lock;
153 struct dlm_lspace_params lspace;
154 } i;
155};
156
157struct dlm_lksb32 {
158 __u32 sb_status;
159 __u32 sb_lkid;
160 __u8 sb_flags;
161 __u32 sb_lvbptr;
162};
163
164struct dlm_lock_result32 {
165 __u32 length;
166 __u32 user_astaddr;
167 __u32 user_astparam;
168 __u32 user_lksb;
169 struct dlm_lksb32 lksb;
170 __u8 bast_mode;
171 __u8 unused[3];
172 /* Offsets may be zero if no data is present */
173 __u32 lvb_offset;
174};
175
176
177static void compat_input(struct dlm_write_request *kparams, struct dlm_write_request32 *k32params)
178{
179
180 kparams->version[0] = k32params->version[0];
181 kparams->version[1] = k32params->version[1];
182 kparams->version[2] = k32params->version[2];
183
184 kparams->cmd = k32params->cmd;
185 kparams->is64bit = k32params->is64bit;
186 if (kparams->cmd == DLM_USER_CREATE_LOCKSPACE ||
187 kparams->cmd == DLM_USER_REMOVE_LOCKSPACE) {
188
189 kparams->i.lspace.flags = k32params->i.lspace.flags;
190 kparams->i.lspace.minor = k32params->i.lspace.minor;
191 strcpy(kparams->i.lspace.name, k32params->i.lspace.name);
192 }
193 else {
194 kparams->i.lock.mode = k32params->i.lock.mode;
195 kparams->i.lock.namelen = k32params->i.lock.namelen;
196 kparams->i.lock.flags = k32params->i.lock.flags;
197 kparams->i.lock.lkid = k32params->i.lock.lkid;
198 kparams->i.lock.parent = k32params->i.lock.parent;
199 kparams->i.lock.castparam = (void *)(long)k32params->i.lock.castparam;
200 kparams->i.lock.castaddr = (void *)(long)k32params->i.lock.castaddr;
201 kparams->i.lock.bastparam = (void *)(long)k32params->i.lock.bastparam;
202 kparams->i.lock.bastaddr = (void *)(long)k32params->i.lock.bastaddr;
203 kparams->i.lock.lksb = (void *)(long)k32params->i.lock.lksb;
204 memcpy(kparams->i.lock.lvb, k32params->i.lock.lvb, DLM_USER_LVB_LEN);
205 memcpy(kparams->i.lock.name, k32params->i.lock.name, kparams->i.lock.namelen);
206 }
207}
208
209void compat_output(struct dlm_lock_result *res, struct dlm_lock_result32 *res32)
210{
211 res32->length = res->length - (sizeof(struct dlm_lock_result) - sizeof(struct dlm_lock_result32));
212 res32->user_astaddr = (__u32)(long)res->user_astaddr;
213 res32->user_astparam = (__u32)(long)res->user_astparam;
214 res32->user_lksb = (__u32)(long)res->user_lksb;
215 res32->bast_mode = res->bast_mode;
216
217 res32->lvb_offset = res->lvb_offset;
218 res32->length = res->length;
219
220 res32->lksb.sb_status = res->lksb.sb_status;
221 res32->lksb.sb_flags = res->lksb.sb_flags;
222 res32->lksb.sb_lkid = res->lksb.sb_lkid;
223 res32->lksb.sb_lvbptr = (__u32)(long)res->lksb.sb_lvbptr;
224}
225#endif
226
227
228/* get and put ops for file_info.
229 Actually I don't really like "get" and "put", but everyone
230 else seems to use them and I can't think of anything
231 nicer at the moment */
232static void get_file_info(struct file_info *f)
233{
234 atomic_inc(&f->fi_refcnt);
235}
236
237static void put_file_info(struct file_info *f)
238{
239 if (atomic_dec_and_test(&f->fi_refcnt))
240 kfree(f);
241}
242
243static void release_lockinfo(struct user_ls *ls, struct lock_info *li)
244{
245 put_file_info(li->li_file);
246
247 write_lock(&ls->lockinfo_lock);
248 idr_remove(&ls->lockinfo_idr, li->li_lksb.sb_lkid);
249 write_unlock(&ls->lockinfo_lock);
250
251 if (li->li_lksb.sb_lvbptr)
252 kfree(li->li_lksb.sb_lvbptr);
253 kfree(li);
254
255 module_put(THIS_MODULE);
256}
257
258static struct lock_info *get_lockinfo(struct user_ls *ls, uint32_t lockid)
259{
260 struct lock_info *li;
261
262 read_lock(&ls->lockinfo_lock);
263 li = idr_find(&ls->lockinfo_idr, lockid);
264 read_unlock(&ls->lockinfo_lock);
265
266 return li;
267}
268
269static int add_lockinfo(struct user_ls *ls, struct lock_info *li)
270{
271 int n;
272 int r;
273 int ret = -EINVAL;
274
275 write_lock(&ls->lockinfo_lock);
276
277 if (idr_find(&ls->lockinfo_idr, li->li_lksb.sb_lkid))
278 goto out_up;
279
280 ret = -ENOMEM;
281 r = idr_pre_get(&ls->lockinfo_idr, GFP_KERNEL);
282 if (!r)
283 goto out_up;
284
285 r = idr_get_new_above(&ls->lockinfo_idr, li, li->li_lksb.sb_lkid, &n);
286 if (r)
287 goto out_up;
288
289 if (n != li->li_lksb.sb_lkid) {
290 idr_remove(&ls->lockinfo_idr, n);
291 goto out_up;
292 }
293
294 ret = 0;
295
296 out_up:
297 write_unlock(&ls->lockinfo_lock);
298
299 return ret;
300}
301
302
303static struct user_ls *__find_lockspace(int minor)
304{
305 struct user_ls *lsinfo;
306
307 list_for_each_entry(lsinfo, &user_ls_list, ls_list) {
308 if (lsinfo->ls_miscinfo.minor == minor)
309 return lsinfo;
310 }
311 return NULL;
312}
313
314/* Find a lockspace struct given the device minor number */
315static struct user_ls *find_lockspace(int minor)
316{
317 struct user_ls *lsinfo;
318
319 mutex_lock(&user_ls_lock);
320 lsinfo = __find_lockspace(minor);
321 mutex_unlock(&user_ls_lock);
322
323 return lsinfo;
324}
325
326static void add_lockspace_to_list(struct user_ls *lsinfo)
327{
328 mutex_lock(&user_ls_lock);
329 list_add(&lsinfo->ls_list, &user_ls_list);
330 mutex_unlock(&user_ls_lock);
331}
332
333/* Register a lockspace with the DLM and create a misc
334 device for userland to access it */
335static int register_lockspace(char *name, struct user_ls **ls, int flags)
336{
337 struct user_ls *newls;
338 int status;
339 int namelen;
340
341 namelen = strlen(name)+strlen(name_prefix)+2;
342
343 newls = kzalloc(sizeof(struct user_ls), GFP_KERNEL);
344 if (!newls)
345 return -ENOMEM;
346
347 newls->ls_miscinfo.name = kzalloc(namelen, GFP_KERNEL);
348 if (!newls->ls_miscinfo.name) {
349 kfree(newls);
350 return -ENOMEM;
351 }
352
353 status = dlm_new_lockspace(name, strlen(name), &newls->ls_lockspace, 0,
354 DLM_USER_LVB_LEN);
355 if (status != 0) {
356 kfree(newls->ls_miscinfo.name);
357 kfree(newls);
358 return status;
359 }
360
361 idr_init(&newls->lockinfo_idr);
362 rwlock_init(&newls->lockinfo_lock);
363
364 snprintf((char*)newls->ls_miscinfo.name, namelen, "%s_%s",
365 name_prefix, name);
366
367 newls->ls_miscinfo.fops = &_dlm_fops;
368 newls->ls_miscinfo.minor = MISC_DYNAMIC_MINOR;
369
370 status = misc_register(&newls->ls_miscinfo);
371 if (status) {
372 printk(KERN_ERR "dlm: misc register failed for %s\n", name);
373 dlm_release_lockspace(newls->ls_lockspace, 0);
374 kfree(newls->ls_miscinfo.name);
375 kfree(newls);
376 return status;
377 }
378
379 if (flags & DLM_USER_LSFLG_AUTOFREE)
380 set_bit(LS_FLAG_AUTOFREE, &newls->ls_flags);
381
382 add_lockspace_to_list(newls);
383 *ls = newls;
384 return 0;
385}
386
387/* Called with the user_ls_lock mutex held */
388static int unregister_lockspace(struct user_ls *lsinfo, int force)
389{
390 int status;
391
392 status = dlm_release_lockspace(lsinfo->ls_lockspace, force);
393 if (status)
394 return status;
395
396 status = misc_deregister(&lsinfo->ls_miscinfo);
397 if (status)
398 return status;
399
400 list_del(&lsinfo->ls_list);
401 set_bit(LS_FLAG_DELETED, &lsinfo->ls_flags);
402 lsinfo->ls_lockspace = NULL;
403 if (atomic_read(&lsinfo->ls_refcnt) == 0) {
404 kfree(lsinfo->ls_miscinfo.name);
405 kfree(lsinfo);
406 }
407
408 return 0;
409}
410
411/* Add it to userland's AST queue */
412static void add_to_astqueue(struct lock_info *li, void *astaddr, void *astparam,
413 int lvb_updated)
414{
415 struct ast_info *ast = kzalloc(sizeof(struct ast_info), GFP_KERNEL);
416 if (!ast)
417 return;
418
419 ast->result.user_astparam = astparam;
420 ast->result.user_astaddr = astaddr;
421 ast->result.user_lksb = li->li_user_lksb;
422 memcpy(&ast->result.lksb, &li->li_lksb, sizeof(struct dlm_lksb));
423 ast->lvb_updated = lvb_updated;
424
425 spin_lock(&li->li_file->fi_ast_lock);
426 list_add_tail(&ast->list, &li->li_file->fi_ast_list);
427 spin_unlock(&li->li_file->fi_ast_lock);
428 wake_up_interruptible(&li->li_file->fi_wait);
429}
430
431static void bast_routine(void *param, int mode)
432{
433 struct lock_info *li = param;
434
435 if (li && li->li_bastaddr)
436 add_to_astqueue(li, li->li_bastaddr, li->li_bastparam, 0);
437}
438
439/*
440 * This is the kernel's AST routine.
441 * All lock, unlock & query operations complete here.
442 * The only syncronous ops are those done during device close.
443 */
444static void ast_routine(void *param)
445{
446 struct lock_info *li = param;
447
448 /* Param may be NULL if a persistent lock is unlocked by someone else */
449 if (!li)
450 return;
451
452 /* If this is a succesful conversion then activate the blocking ast
453 * args from the conversion request */
454 if (!test_bit(LI_FLAG_FIRSTLOCK, &li->li_flags) &&
455 li->li_lksb.sb_status == 0) {
456
457 li->li_bastparam = li->li_pend_bastparam;
458 li->li_bastaddr = li->li_pend_bastaddr;
459 li->li_pend_bastaddr = NULL;
460 }
461
462 /* If it's an async request then post data to the user's AST queue. */
463 if (li->li_castaddr) {
464 int lvb_updated = 0;
465
466 /* See if the lvb has been updated */
467 if (dlm_lvb_operations[li->li_grmode+1][li->li_rqmode+1] == 1)
468 lvb_updated = 1;
469
470 if (li->li_lksb.sb_status == 0)
471 li->li_grmode = li->li_rqmode;
472
473 /* Only queue AST if the device is still open */
474 if (test_bit(FI_FLAG_OPEN, &li->li_file->fi_flags))
475 add_to_astqueue(li, li->li_castaddr, li->li_castparam,
476 lvb_updated);
477
478 /* If it's a new lock operation that failed, then
479 * remove it from the owner queue and free the
480 * lock_info.
481 */
482 if (test_and_clear_bit(LI_FLAG_FIRSTLOCK, &li->li_flags) &&
483 li->li_lksb.sb_status != 0) {
484
485 /* Wait till dlm_lock() has finished */
486 wait_for_completion(&li->li_firstcomp);
487
488 spin_lock(&li->li_file->fi_li_lock);
489 list_del(&li->li_ownerqueue);
490 clear_bit(LI_FLAG_ONLIST, &li->li_flags);
491 spin_unlock(&li->li_file->fi_li_lock);
492 release_lockinfo(li->li_file->fi_ls, li);
493 return;
494 }
495 /* Free unlocks & queries */
496 if (li->li_lksb.sb_status == -DLM_EUNLOCK ||
497 li->li_cmd == DLM_USER_QUERY) {
498 release_lockinfo(li->li_file->fi_ls, li);
499 }
500 } else {
501 /* Synchronous request, just wake up the caller */
502 set_bit(LI_FLAG_COMPLETE, &li->li_flags);
503 wake_up_interruptible(&li->li_waitq);
504 }
505}
506
507/*
508 * Wait for the lock op to complete and return the status.
509 */
510static int wait_for_ast(struct lock_info *li)
511{
512 /* Wait for the AST routine to complete */
513 set_task_state(current, TASK_INTERRUPTIBLE);
514 while (!test_bit(LI_FLAG_COMPLETE, &li->li_flags))
515 schedule();
516
517 set_task_state(current, TASK_RUNNING);
518
519 return li->li_lksb.sb_status;
520}
521
522
523/* Open on control device */
524static int dlm_ctl_open(struct inode *inode, struct file *file)
525{
526 file->private_data = NULL;
527 return 0;
528}
529
530/* Close on control device */
531static int dlm_ctl_close(struct inode *inode, struct file *file)
532{
533 return 0;
534}
535
536/* Open on lockspace device */
537static int dlm_open(struct inode *inode, struct file *file)
538{
539 struct file_info *f;
540 struct user_ls *lsinfo;
541
542 lsinfo = find_lockspace(iminor(inode));
543 if (!lsinfo)
544 return -ENOENT;
545
546 f = kzalloc(sizeof(struct file_info), GFP_KERNEL);
547 if (!f)
548 return -ENOMEM;
549
550 atomic_inc(&lsinfo->ls_refcnt);
551 INIT_LIST_HEAD(&f->fi_li_list);
552 INIT_LIST_HEAD(&f->fi_ast_list);
553 spin_lock_init(&f->fi_li_lock);
554 spin_lock_init(&f->fi_ast_lock);
555 init_waitqueue_head(&f->fi_wait);
556 f->fi_ls = lsinfo;
557 f->fi_flags = 0;
558 get_file_info(f);
559 set_bit(FI_FLAG_OPEN, &f->fi_flags);
560
561 file->private_data = f;
562
563 return 0;
564}
565
566/* Check the user's version matches ours */
567static int check_version(struct dlm_write_request *req)
568{
569 if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
570 (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
571 req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
572
573 printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
574 "user (%d.%d.%d) kernel (%d.%d.%d)\n",
575 current->comm,
576 current->pid,
577 req->version[0],
578 req->version[1],
579 req->version[2],
580 DLM_DEVICE_VERSION_MAJOR,
581 DLM_DEVICE_VERSION_MINOR,
582 DLM_DEVICE_VERSION_PATCH);
583 return -EINVAL;
584 }
585 return 0;
586}
587
588/* Close on lockspace device */
589static int dlm_close(struct inode *inode, struct file *file)
590{
591 struct file_info *f = file->private_data;
592 struct lock_info li;
593 struct lock_info *old_li, *safe;
594 sigset_t tmpsig;
595 sigset_t allsigs;
596 struct user_ls *lsinfo;
597 DECLARE_WAITQUEUE(wq, current);
598
599 lsinfo = find_lockspace(iminor(inode));
600 if (!lsinfo)
601 return -ENOENT;
602
603 /* Mark this closed so that ASTs will not be delivered any more */
604 clear_bit(FI_FLAG_OPEN, &f->fi_flags);
605
606 /* Block signals while we are doing this */
607 sigfillset(&allsigs);
608 sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
609
610 /* We use our own lock_info struct here, so that any
611 * outstanding "real" ASTs will be delivered with the
612 * corresponding "real" params, thus freeing the lock_info
613 * that belongs the lock. This catches the corner case where
614 * a lock is BUSY when we try to unlock it here
615 */
616 memset(&li, 0, sizeof(li));
617 clear_bit(LI_FLAG_COMPLETE, &li.li_flags);
618 init_waitqueue_head(&li.li_waitq);
619 add_wait_queue(&li.li_waitq, &wq);
620
621 /*
622 * Free any outstanding locks, they are on the
623 * list in LIFO order so there should be no problems
624 * about unlocking parents before children.
625 */
626 list_for_each_entry_safe(old_li, safe, &f->fi_li_list, li_ownerqueue) {
627 int status;
628 int flags = 0;
629
630 /* Don't unlock persistent locks, just mark them orphaned */
631 if (test_bit(LI_FLAG_PERSISTENT, &old_li->li_flags)) {
632 list_del(&old_li->li_ownerqueue);
633
634 /* Update master copy */
635 /* TODO: Check locking core updates the local and
636 remote ORPHAN flags */
637 li.li_lksb.sb_lkid = old_li->li_lksb.sb_lkid;
638 status = dlm_lock(f->fi_ls->ls_lockspace,
639 old_li->li_grmode, &li.li_lksb,
640 DLM_LKF_CONVERT|DLM_LKF_ORPHAN,
641 NULL, 0, 0, ast_routine, NULL, NULL);
642 if (status != 0)
643 printk("dlm: Error orphaning lock %x: %d\n",
644 old_li->li_lksb.sb_lkid, status);
645
646 /* But tidy our references in it */
647 release_lockinfo(old_li->li_file->fi_ls, old_li);
648 continue;
649 }
650
651 clear_bit(LI_FLAG_COMPLETE, &li.li_flags);
652
653 flags = DLM_LKF_FORCEUNLOCK;
654 if (old_li->li_grmode >= DLM_LOCK_PW)
655 flags |= DLM_LKF_IVVALBLK;
656
657 status = dlm_unlock(f->fi_ls->ls_lockspace,
658 old_li->li_lksb.sb_lkid, flags,
659 &li.li_lksb, &li);
660
661 /* Must wait for it to complete as the next lock could be its
662 * parent */
663 if (status == 0)
664 wait_for_ast(&li);
665
666 /* Unlock suceeded, free the lock_info struct. */
667 if (status == 0)
668 release_lockinfo(old_li->li_file->fi_ls, old_li);
669 }
670
671 remove_wait_queue(&li.li_waitq, &wq);
672
673 /*
674 * If this is the last reference to the lockspace
675 * then free the struct. If it's an AUTOFREE lockspace
676 * then free the whole thing.
677 */
678 mutex_lock(&user_ls_lock);
679 if (atomic_dec_and_test(&lsinfo->ls_refcnt)) {
680
681 if (lsinfo->ls_lockspace) {
682 if (test_bit(LS_FLAG_AUTOFREE, &lsinfo->ls_flags)) {
683 unregister_lockspace(lsinfo, 1);
684 }
685 } else {
686 kfree(lsinfo->ls_miscinfo.name);
687 kfree(lsinfo);
688 }
689 }
690 mutex_unlock(&user_ls_lock);
691 put_file_info(f);
692
693 /* Restore signals */
694 sigprocmask(SIG_SETMASK, &tmpsig, NULL);
695 recalc_sigpending();
696
697 return 0;
698}
699
700static int do_user_create_lockspace(struct file_info *fi, uint8_t cmd,
701 struct dlm_lspace_params *kparams)
702{
703 int status;
704 struct user_ls *lsinfo;
705
706 if (!capable(CAP_SYS_ADMIN))
707 return -EPERM;
708
709 status = register_lockspace(kparams->name, &lsinfo, kparams->flags);
710
711 /* If it succeeded then return the minor number */
712 if (status == 0)
713 status = lsinfo->ls_miscinfo.minor;
714
715 return status;
716}
717
718static int do_user_remove_lockspace(struct file_info *fi, uint8_t cmd,
719 struct dlm_lspace_params *kparams)
720{
721 int status;
722 int force = 1;
723 struct user_ls *lsinfo;
724
725 if (!capable(CAP_SYS_ADMIN))
726 return -EPERM;
727
728 mutex_lock(&user_ls_lock);
729 lsinfo = __find_lockspace(kparams->minor);
730 if (!lsinfo) {
731 mutex_unlock(&user_ls_lock);
732 return -EINVAL;
733 }
734
735 if (kparams->flags & DLM_USER_LSFLG_FORCEFREE)
736 force = 3;
737
738 status = unregister_lockspace(lsinfo, force);
739 mutex_unlock(&user_ls_lock);
740
741 return status;
742}
743
744/* Read call, might block if no ASTs are waiting.
745 * It will only ever return one message at a time, regardless
746 * of how many are pending.
747 */
748static ssize_t dlm_read(struct file *file, char __user *buffer, size_t count,
749 loff_t *ppos)
750{
751 struct file_info *fi = file->private_data;
752 struct ast_info *ast;
753 void *data;
754 int data_size;
755 int struct_size;
756 int offset;
757 DECLARE_WAITQUEUE(wait, current);
758#ifdef CONFIG_COMPAT
759 struct dlm_lock_result32 result32;
760
761 if (count < sizeof(struct dlm_lock_result32))
762#else
763 if (count < sizeof(struct dlm_lock_result))
764#endif
765 return -EINVAL;
766
767 spin_lock(&fi->fi_ast_lock);
768 if (list_empty(&fi->fi_ast_list)) {
769
770 /* No waiting ASTs.
771 * Return EOF if the lockspace been deleted.
772 */
773 if (test_bit(LS_FLAG_DELETED, &fi->fi_ls->ls_flags))
774 return 0;
775
776 if (file->f_flags & O_NONBLOCK) {
777 spin_unlock(&fi->fi_ast_lock);
778 return -EAGAIN;
779 }
780
781 add_wait_queue(&fi->fi_wait, &wait);
782
783 repeat:
784 set_current_state(TASK_INTERRUPTIBLE);
785 if (list_empty(&fi->fi_ast_list) &&
786 !signal_pending(current)) {
787
788 spin_unlock(&fi->fi_ast_lock);
789 schedule();
790 spin_lock(&fi->fi_ast_lock);
791 goto repeat;
792 }
793
794 current->state = TASK_RUNNING;
795 remove_wait_queue(&fi->fi_wait, &wait);
796
797 if (signal_pending(current)) {
798 spin_unlock(&fi->fi_ast_lock);
799 return -ERESTARTSYS;
800 }
801 }
802
803 ast = list_entry(fi->fi_ast_list.next, struct ast_info, list);
804 list_del(&ast->list);
805 spin_unlock(&fi->fi_ast_lock);
806
807 /* Work out the size of the returned data */
808#ifdef CONFIG_COMPAT
809 if (test_bit(FI_FLAG_COMPAT, &fi->fi_flags)) {
810 data_size = struct_size = sizeof(struct dlm_lock_result32);
811 data = &result32;
812 }
813 else
814#endif
815 {
816 data_size = struct_size = sizeof(struct dlm_lock_result);
817 data = &ast->result;
818 }
819 if (ast->lvb_updated && ast->result.lksb.sb_lvbptr)
820 data_size += DLM_USER_LVB_LEN;
821
822 offset = struct_size;
823
824 /* Room for the extended data ? */
825 if (count >= data_size) {
826
827 if (ast->lvb_updated && ast->result.lksb.sb_lvbptr) {
828 if (copy_to_user(buffer+offset,
829 ast->result.lksb.sb_lvbptr,
830 DLM_USER_LVB_LEN))
831 return -EFAULT;
832 ast->result.lvb_offset = offset;
833 offset += DLM_USER_LVB_LEN;
834 }
835 }
836
837 ast->result.length = data_size;
838
839#ifdef CONFIG_COMPAT
840 compat_output(&ast->result, &result32);
841#endif
842
843 /* Copy the header now it has all the offsets in it */
844 if (copy_to_user(buffer, data, struct_size))
845 offset = -EFAULT;
846
847 /* If we only returned a header and there's more to come then put it
848 back on the list */
849 if (count < data_size) {
850 spin_lock(&fi->fi_ast_lock);
851 list_add(&ast->list, &fi->fi_ast_list);
852 spin_unlock(&fi->fi_ast_lock);
853 } else
854 kfree(ast);
855 return offset;
856}
857
858static unsigned int dlm_poll(struct file *file, poll_table *wait)
859{
860 struct file_info *fi = file->private_data;
861
862 poll_wait(file, &fi->fi_wait, wait);
863
864 spin_lock(&fi->fi_ast_lock);
865 if (!list_empty(&fi->fi_ast_list)) {
866 spin_unlock(&fi->fi_ast_lock);
867 return POLLIN | POLLRDNORM;
868 }
869
870 spin_unlock(&fi->fi_ast_lock);
871 return 0;
872}
873
874static struct lock_info *allocate_lockinfo(struct file_info *fi, uint8_t cmd,
875 struct dlm_lock_params *kparams)
876{
877 struct lock_info *li;
878
879 if (!try_module_get(THIS_MODULE))
880 return NULL;
881
882 li = kzalloc(sizeof(struct lock_info), GFP_KERNEL);
883 if (li) {
884 li->li_magic = LOCKINFO_MAGIC;
885 li->li_file = fi;
886 li->li_cmd = cmd;
887 li->li_flags = 0;
888 li->li_grmode = -1;
889 li->li_rqmode = -1;
890 li->li_pend_bastparam = NULL;
891 li->li_pend_bastaddr = NULL;
892 li->li_castaddr = NULL;
893 li->li_castparam = NULL;
894 li->li_lksb.sb_lvbptr = NULL;
895 li->li_bastaddr = kparams->bastaddr;
896 li->li_bastparam = kparams->bastparam;
897
898 get_file_info(fi);
899 }
900 return li;
901}
902
903static int do_user_lock(struct file_info *fi, uint8_t cmd,
904 struct dlm_lock_params *kparams)
905{
906 struct lock_info *li;
907 int status;
908
909 /*
910 * Validate things that we need to have correct.
911 */
912 if (!kparams->castaddr)
913 return -EINVAL;
914
915 if (!kparams->lksb)
916 return -EINVAL;
917
918 /* Persistent child locks are not available yet */
919 if ((kparams->flags & DLM_LKF_PERSISTENT) && kparams->parent)
920 return -EINVAL;
921
922 /* For conversions, there should already be a lockinfo struct,
923 unless we are adopting an orphaned persistent lock */
924 if (kparams->flags & DLM_LKF_CONVERT) {
925
926 li = get_lockinfo(fi->fi_ls, kparams->lkid);
927
928 /* If this is a persistent lock we will have to create a
929 lockinfo again */
930 if (!li && (kparams->flags & DLM_LKF_PERSISTENT)) {
931 li = allocate_lockinfo(fi, cmd, kparams);
932 if (!li)
933 return -ENOMEM;
934
935 li->li_lksb.sb_lkid = kparams->lkid;
936 li->li_castaddr = kparams->castaddr;
937 li->li_castparam = kparams->castparam;
938
939 /* OK, this isn't exactly a FIRSTLOCK but it is the
940 first time we've used this lockinfo, and if things
941 fail we want rid of it */
942 init_completion(&li->li_firstcomp);
943 set_bit(LI_FLAG_FIRSTLOCK, &li->li_flags);
944 add_lockinfo(fi->fi_ls, li);
945
946 /* TODO: do a query to get the current state ?? */
947 }
948 if (!li)
949 return -EINVAL;
950
951 if (li->li_magic != LOCKINFO_MAGIC)
952 return -EINVAL;
953
954 /* For conversions don't overwrite the current blocking AST
955 info so that:
956 a) if a blocking AST fires before the conversion is queued
957 it runs the current handler
958 b) if the conversion is cancelled, the original blocking AST
959 declaration is active
960 The pend_ info is made active when the conversion
961 completes.
962 */
963 li->li_pend_bastaddr = kparams->bastaddr;
964 li->li_pend_bastparam = kparams->bastparam;
965 } else {
966 li = allocate_lockinfo(fi, cmd, kparams);
967 if (!li)
968 return -ENOMEM;
969
970 /* Allow us to complete our work before
971 the AST routine runs. In fact we only need (and use) this
972 when the initial lock fails */
973 init_completion(&li->li_firstcomp);
974 set_bit(LI_FLAG_FIRSTLOCK, &li->li_flags);
975 }
976
977 li->li_user_lksb = kparams->lksb;
978 li->li_castaddr = kparams->castaddr;
979 li->li_castparam = kparams->castparam;
980 li->li_lksb.sb_lkid = kparams->lkid;
981 li->li_rqmode = kparams->mode;
982 if (kparams->flags & DLM_LKF_PERSISTENT)
983 set_bit(LI_FLAG_PERSISTENT, &li->li_flags);
984
985 /* Copy in the value block */
986 if (kparams->flags & DLM_LKF_VALBLK) {
987 if (!li->li_lksb.sb_lvbptr) {
988 li->li_lksb.sb_lvbptr = kmalloc(DLM_USER_LVB_LEN,
989 GFP_KERNEL);
990 if (!li->li_lksb.sb_lvbptr) {
991 status = -ENOMEM;
992 goto out_err;
993 }
994 }
995
996 memcpy(li->li_lksb.sb_lvbptr, kparams->lvb, DLM_USER_LVB_LEN);
997 }
998
999 /* Lock it ... */
1000 status = dlm_lock(fi->fi_ls->ls_lockspace,
1001 kparams->mode, &li->li_lksb,
1002 kparams->flags,
1003 kparams->name, kparams->namelen,
1004 kparams->parent,
1005 ast_routine,
1006 li,
1007 (li->li_pend_bastaddr || li->li_bastaddr) ?
1008 bast_routine : NULL);
1009 if (status)
1010 goto out_err;
1011
1012 /* If it succeeded (this far) with a new lock then keep track of
1013 it on the file's lockinfo list */
1014 if (!status && test_bit(LI_FLAG_FIRSTLOCK, &li->li_flags)) {
1015
1016 spin_lock(&fi->fi_li_lock);
1017 list_add(&li->li_ownerqueue, &fi->fi_li_list);
1018 set_bit(LI_FLAG_ONLIST, &li->li_flags);
1019 spin_unlock(&fi->fi_li_lock);
1020 if (add_lockinfo(fi->fi_ls, li))
1021 printk(KERN_WARNING "Add lockinfo failed\n");
1022
1023 complete(&li->li_firstcomp);
1024 }
1025
1026 /* Return the lockid as the user needs it /now/ */
1027 return li->li_lksb.sb_lkid;
1028
1029 out_err:
1030 if (test_bit(LI_FLAG_FIRSTLOCK, &li->li_flags))
1031 release_lockinfo(fi->fi_ls, li);
1032 return status;
1033
1034}
1035
1036static int do_user_unlock(struct file_info *fi, uint8_t cmd,
1037 struct dlm_lock_params *kparams)
1038{
1039 struct lock_info *li;
1040 int status;
1041 int convert_cancel = 0;
1042
1043 li = get_lockinfo(fi->fi_ls, kparams->lkid);
1044 if (!li) {
1045 li = allocate_lockinfo(fi, cmd, kparams);
1046 if (!li)
1047 return -ENOMEM;
1048 spin_lock(&fi->fi_li_lock);
1049 list_add(&li->li_ownerqueue, &fi->fi_li_list);
1050 set_bit(LI_FLAG_ONLIST, &li->li_flags);
1051 spin_unlock(&fi->fi_li_lock);
1052 }
1053
1054 if (li->li_magic != LOCKINFO_MAGIC)
1055 return -EINVAL;
1056
1057 li->li_user_lksb = kparams->lksb;
1058 li->li_castparam = kparams->castparam;
1059 li->li_cmd = cmd;
1060
1061 /* Cancelling a conversion doesn't remove the lock...*/
1062 if (kparams->flags & DLM_LKF_CANCEL && li->li_grmode != -1)
1063 convert_cancel = 1;
1064
1065 /* Wait until dlm_lock() has completed */
1066 if (!test_bit(LI_FLAG_ONLIST, &li->li_flags)) {
1067 wait_for_completion(&li->li_firstcomp);
1068 }
1069
1070 /* dlm_unlock() passes a 0 for castaddr which means don't overwrite
1071 the existing li_castaddr as that's the completion routine for
1072 unlocks. dlm_unlock_wait() specifies a new AST routine to be
1073 executed when the unlock completes. */
1074 if (kparams->castaddr)
1075 li->li_castaddr = kparams->castaddr;
1076
1077 /* Use existing lksb & astparams */
1078 status = dlm_unlock(fi->fi_ls->ls_lockspace,
1079 kparams->lkid,
1080 kparams->flags, &li->li_lksb, li);
1081
1082 if (!status && !convert_cancel) {
1083 spin_lock(&fi->fi_li_lock);
1084 list_del(&li->li_ownerqueue);
1085 clear_bit(LI_FLAG_ONLIST, &li->li_flags);
1086 spin_unlock(&fi->fi_li_lock);
1087 }
1088
1089 return status;
1090}
1091
1092/* Write call, submit a locking request */
1093static ssize_t dlm_write(struct file *file, const char __user *buffer,
1094 size_t count, loff_t *ppos)
1095{
1096 struct file_info *fi = file->private_data;
1097 struct dlm_write_request *kparams;
1098 sigset_t tmpsig;
1099 sigset_t allsigs;
1100 int status;
1101
1102#ifdef CONFIG_COMPAT
1103 if (count < sizeof(struct dlm_write_request32))
1104#else
1105 if (count < sizeof(struct dlm_write_request))
1106#endif
1107 return -EINVAL;
1108
1109 if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN)
1110 return -EINVAL;
1111
1112 /* Has the lockspace been deleted */
1113 if (fi && test_bit(LS_FLAG_DELETED, &fi->fi_ls->ls_flags))
1114 return -ENOENT;
1115
1116 kparams = kmalloc(count, GFP_KERNEL);
1117 if (!kparams)
1118 return -ENOMEM;
1119
1120 status = -EFAULT;
1121 /* Get the command info */
1122 if (copy_from_user(kparams, buffer, count))
1123 goto out_free;
1124
1125 status = -EBADE;
1126 if (check_version(kparams))
1127 goto out_free;
1128
1129#ifdef CONFIG_COMPAT
1130 if (!kparams->is64bit) {
1131 struct dlm_write_request32 *k32params = (struct dlm_write_request32 *)kparams;
1132 kparams = kmalloc(count + (sizeof(struct dlm_write_request) - sizeof(struct dlm_write_request32)), GFP_KERNEL);
1133 if (!kparams)
1134 return -ENOMEM;
1135
1136 if (fi)
1137 set_bit(FI_FLAG_COMPAT, &fi->fi_flags);
1138 compat_input(kparams, k32params);
1139 kfree(k32params);
1140 }
1141#endif
1142
1143 /* Block signals while we are doing this */
1144 sigfillset(&allsigs);
1145 sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
1146
1147 status = -EINVAL;
1148 switch (kparams->cmd)
1149 {
1150 case DLM_USER_LOCK:
1151 if (!fi) goto out_sig;
1152 status = do_user_lock(fi, kparams->cmd, &kparams->i.lock);
1153 break;
1154
1155 case DLM_USER_UNLOCK:
1156 if (!fi) goto out_sig;
1157 status = do_user_unlock(fi, kparams->cmd, &kparams->i.lock);
1158 break;
1159
1160 case DLM_USER_CREATE_LOCKSPACE:
1161 if (fi) goto out_sig;
1162 status = do_user_create_lockspace(fi, kparams->cmd,
1163 &kparams->i.lspace);
1164 break;
1165
1166 case DLM_USER_REMOVE_LOCKSPACE:
1167 if (fi) goto out_sig;
1168 status = do_user_remove_lockspace(fi, kparams->cmd,
1169 &kparams->i.lspace);
1170 break;
1171 default:
1172 printk("Unknown command passed to DLM device : %d\n",
1173 kparams->cmd);
1174 break;
1175 }
1176
1177 out_sig:
1178 /* Restore signals */
1179 sigprocmask(SIG_SETMASK, &tmpsig, NULL);
1180 recalc_sigpending();
1181
1182 out_free:
1183 kfree(kparams);
1184 if (status == 0)
1185 return count;
1186 else
1187 return status;
1188}
1189
1190static struct file_operations _dlm_fops = {
1191 .open = dlm_open,
1192 .release = dlm_close,
1193 .read = dlm_read,
1194 .write = dlm_write,
1195 .poll = dlm_poll,
1196 .owner = THIS_MODULE,
1197};
1198
1199static struct file_operations _dlm_ctl_fops = {
1200 .open = dlm_ctl_open,
1201 .release = dlm_ctl_close,
1202 .write = dlm_write,
1203 .owner = THIS_MODULE,
1204};
1205
1206/*
1207 * Create control device
1208 */
1209static int __init dlm_device_init(void)
1210{
1211 int r;
1212
1213 INIT_LIST_HEAD(&user_ls_list);
1214 mutex_init(&user_ls_lock);
1215
1216 ctl_device.name = "dlm-control";
1217 ctl_device.fops = &_dlm_ctl_fops;
1218 ctl_device.minor = MISC_DYNAMIC_MINOR;
1219
1220 r = misc_register(&ctl_device);
1221 if (r) {
1222 printk(KERN_ERR "dlm: misc_register failed for control dev\n");
1223 return r;
1224 }
1225
1226 return 0;
1227}
1228
1229static void __exit dlm_device_exit(void)
1230{
1231 misc_deregister(&ctl_device);
1232}
1233
1234MODULE_DESCRIPTION("Distributed Lock Manager device interface");
1235MODULE_AUTHOR("Red Hat, Inc.");
1236MODULE_LICENSE("GPL");
1237
1238module_init(dlm_device_init);
1239module_exit(dlm_device_exit);
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 149106f2b80f..db080de2a7e9 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -35,6 +35,7 @@
35#include <linux/kref.h> 35#include <linux/kref.h>
36#include <linux/kernel.h> 36#include <linux/kernel.h>
37#include <linux/jhash.h> 37#include <linux/jhash.h>
38#include <linux/miscdevice.h>
38#include <linux/mutex.h> 39#include <linux/mutex.h>
39#include <asm/semaphore.h> 40#include <asm/semaphore.h>
40#include <asm/uaccess.h> 41#include <asm/uaccess.h>
@@ -68,6 +69,7 @@ struct dlm_mhandle;
68#define log_error(ls, fmt, args...) \ 69#define log_error(ls, fmt, args...) \
69 printk(KERN_ERR "dlm: %s: " fmt "\n", (ls)->ls_name , ##args) 70 printk(KERN_ERR "dlm: %s: " fmt "\n", (ls)->ls_name , ##args)
70 71
72#define DLM_LOG_DEBUG
71#ifdef DLM_LOG_DEBUG 73#ifdef DLM_LOG_DEBUG
72#define log_debug(ls, fmt, args...) log_error(ls, fmt, ##args) 74#define log_debug(ls, fmt, args...) log_error(ls, fmt, ##args)
73#else 75#else
@@ -204,6 +206,9 @@ struct dlm_args {
204 206
205#define DLM_IFL_MSTCPY 0x00010000 207#define DLM_IFL_MSTCPY 0x00010000
206#define DLM_IFL_RESEND 0x00020000 208#define DLM_IFL_RESEND 0x00020000
209#define DLM_IFL_DEAD 0x00040000
210#define DLM_IFL_USER 0x00000001
211#define DLM_IFL_ORPHAN 0x00000002
207 212
208struct dlm_lkb { 213struct dlm_lkb {
209 struct dlm_rsb *lkb_resource; /* the rsb */ 214 struct dlm_rsb *lkb_resource; /* the rsb */
@@ -231,6 +236,7 @@ struct dlm_lkb {
231 struct list_head lkb_rsb_lookup; /* waiting for rsb lookup */ 236 struct list_head lkb_rsb_lookup; /* waiting for rsb lookup */
232 struct list_head lkb_wait_reply; /* waiting for remote reply */ 237 struct list_head lkb_wait_reply; /* waiting for remote reply */
233 struct list_head lkb_astqueue; /* need ast to be sent */ 238 struct list_head lkb_astqueue; /* need ast to be sent */
239 struct list_head lkb_ownqueue; /* list of locks for a process */
234 240
235 char *lkb_lvbptr; 241 char *lkb_lvbptr;
236 struct dlm_lksb *lkb_lksb; /* caller's status block */ 242 struct dlm_lksb *lkb_lksb; /* caller's status block */
@@ -409,6 +415,7 @@ struct rcom_lock {
409 415
410struct dlm_ls { 416struct dlm_ls {
411 struct list_head ls_list; /* list of lockspaces */ 417 struct list_head ls_list; /* list of lockspaces */
418 dlm_lockspace_t *ls_local_handle;
412 uint32_t ls_global_id; /* global unique lockspace ID */ 419 uint32_t ls_global_id; /* global unique lockspace ID */
413 uint32_t ls_exflags; 420 uint32_t ls_exflags;
414 int ls_lvblen; 421 int ls_lvblen;
@@ -444,6 +451,8 @@ struct dlm_ls {
444 wait_queue_head_t ls_uevent_wait; /* user part of join/leave */ 451 wait_queue_head_t ls_uevent_wait; /* user part of join/leave */
445 int ls_uevent_result; 452 int ls_uevent_result;
446 453
454 struct miscdevice ls_device;
455
447 /* recovery related */ 456 /* recovery related */
448 457
449 struct timer_list ls_timer; 458 struct timer_list ls_timer;
@@ -461,6 +470,7 @@ struct dlm_ls {
461 spinlock_t ls_recover_list_lock; 470 spinlock_t ls_recover_list_lock;
462 int ls_recover_list_count; 471 int ls_recover_list_count;
463 wait_queue_head_t ls_wait_general; 472 wait_queue_head_t ls_wait_general;
473 struct mutex ls_clear_proc_locks;
464 474
465 struct list_head ls_root_list; /* root resources */ 475 struct list_head ls_root_list; /* root resources */
466 struct rw_semaphore ls_root_sem; /* protect root_list */ 476 struct rw_semaphore ls_root_sem; /* protect root_list */
@@ -475,6 +485,40 @@ struct dlm_ls {
475#define LSFL_RCOM_READY 3 485#define LSFL_RCOM_READY 3
476#define LSFL_UEVENT_WAIT 4 486#define LSFL_UEVENT_WAIT 4
477 487
488/* much of this is just saving user space pointers associated with the
489 lock that we pass back to the user lib with an ast */
490
491struct dlm_user_args {
492 struct dlm_user_proc *proc; /* each process that opens the lockspace
493 device has private data
494 (dlm_user_proc) on the struct file,
495 the process's locks point back to it*/
496 struct dlm_lksb lksb;
497 int old_mode;
498 int update_user_lvb;
499 struct dlm_lksb __user *user_lksb;
500 void __user *castparam;
501 void __user *castaddr;
502 void __user *bastparam;
503 void __user *bastaddr;
504};
505
506#define DLM_PROC_FLAGS_CLOSING 1
507#define DLM_PROC_FLAGS_COMPAT 2
508
509/* locks list is kept so we can remove all a process's locks when it
510 exits (or orphan those that are persistent) */
511
512struct dlm_user_proc {
513 dlm_lockspace_t *lockspace;
514 unsigned long flags; /* DLM_PROC_FLAGS */
515 struct list_head asts;
516 spinlock_t asts_spin;
517 struct list_head locks;
518 spinlock_t locks_spin;
519 wait_queue_head_t wait;
520};
521
478static inline int dlm_locking_stopped(struct dlm_ls *ls) 522static inline int dlm_locking_stopped(struct dlm_ls *ls)
479{ 523{
480 return !test_bit(LSFL_RUNNING, &ls->ls_flags); 524 return !test_bit(LSFL_RUNNING, &ls->ls_flags);
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 5f6963904107..4e222f873b6c 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -55,8 +55,9 @@
55 R: do_xxxx() 55 R: do_xxxx()
56 L: receive_xxxx_reply() <- R: send_xxxx_reply() 56 L: receive_xxxx_reply() <- R: send_xxxx_reply()
57*/ 57*/
58 58#include <linux/types.h>
59#include "dlm_internal.h" 59#include "dlm_internal.h"
60#include <linux/dlm_device.h>
60#include "memory.h" 61#include "memory.h"
61#include "lowcomms.h" 62#include "lowcomms.h"
62#include "requestqueue.h" 63#include "requestqueue.h"
@@ -69,6 +70,7 @@
69#include "rcom.h" 70#include "rcom.h"
70#include "recover.h" 71#include "recover.h"
71#include "lvb_table.h" 72#include "lvb_table.h"
73#include "user.h"
72#include "config.h" 74#include "config.h"
73 75
74static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb); 76static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
@@ -84,6 +86,8 @@ static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
84 struct dlm_message *ms); 86 struct dlm_message *ms);
85static int receive_extralen(struct dlm_message *ms); 87static int receive_extralen(struct dlm_message *ms);
86 88
89#define FAKE_USER_AST (void*)0xff00ff00
90
87/* 91/*
88 * Lock compatibilty matrix - thanks Steve 92 * Lock compatibilty matrix - thanks Steve
89 * UN = Unlocked state. Not really a state, used as a flag 93 * UN = Unlocked state. Not really a state, used as a flag
@@ -152,7 +156,7 @@ static const int __quecvt_compat_matrix[8][8] = {
152 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */ 156 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
153}; 157};
154 158
155static void dlm_print_lkb(struct dlm_lkb *lkb) 159void dlm_print_lkb(struct dlm_lkb *lkb)
156{ 160{
157 printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x\n" 161 printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x\n"
158 " status %d rqmode %d grmode %d wait_type %d ast_type %d\n", 162 " status %d rqmode %d grmode %d wait_type %d ast_type %d\n",
@@ -291,7 +295,7 @@ static int search_rsb_list(struct list_head *head, char *name, int len,
291 if (len == r->res_length && !memcmp(name, r->res_name, len)) 295 if (len == r->res_length && !memcmp(name, r->res_name, len))
292 goto found; 296 goto found;
293 } 297 }
294 return -ENOENT; 298 return -EBADR;
295 299
296 found: 300 found:
297 if (r->res_nodeid && (flags & R_MASTER)) 301 if (r->res_nodeid && (flags & R_MASTER))
@@ -376,7 +380,7 @@ static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
376 if (!error) 380 if (!error)
377 goto out; 381 goto out;
378 382
379 if (error == -ENOENT && !(flags & R_CREATE)) 383 if (error == -EBADR && !(flags & R_CREATE))
380 goto out; 384 goto out;
381 385
382 /* the rsb was found but wasn't a master copy */ 386 /* the rsb was found but wasn't a master copy */
@@ -920,7 +924,7 @@ static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
920 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) 924 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
921 return; 925 return;
922 926
923 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1]; 927 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
924 if (b == 1) { 928 if (b == 1) {
925 int len = receive_extralen(ms); 929 int len = receive_extralen(ms);
926 memcpy(lkb->lkb_lvbptr, ms->m_extra, len); 930 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
@@ -963,6 +967,8 @@ static void revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
963 lkb->lkb_rqmode = DLM_LOCK_IV; 967 lkb->lkb_rqmode = DLM_LOCK_IV;
964 968
965 switch (lkb->lkb_status) { 969 switch (lkb->lkb_status) {
970 case DLM_LKSTS_GRANTED:
971 break;
966 case DLM_LKSTS_CONVERT: 972 case DLM_LKSTS_CONVERT:
967 move_lkb(r, lkb, DLM_LKSTS_GRANTED); 973 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
968 break; 974 break;
@@ -1727,6 +1733,11 @@ static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1727 return -DLM_EUNLOCK; 1733 return -DLM_EUNLOCK;
1728} 1734}
1729 1735
1736/* FIXME: if revert_lock() finds that the lkb is granted, we should
1737 skip the queue_cast(ECANCEL). It indicates that the request/convert
1738 completed (and queued a normal ast) just before the cancel; we don't
1739 want to clobber the sb_result for the normal ast with ECANCEL. */
1740
1730static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb) 1741static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
1731{ 1742{
1732 revert_lock(r, lkb); 1743 revert_lock(r, lkb);
@@ -2739,7 +2750,7 @@ static void receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
2739 confirm_master(r, error); 2750 confirm_master(r, error);
2740 break; 2751 break;
2741 2752
2742 case -ENOENT: 2753 case -EBADR:
2743 case -ENOTBLK: 2754 case -ENOTBLK:
2744 /* find_rsb failed to find rsb or rsb wasn't master */ 2755 /* find_rsb failed to find rsb or rsb wasn't master */
2745 r->res_nodeid = -1; 2756 r->res_nodeid = -1;
@@ -3545,3 +3556,284 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
3545 return 0; 3556 return 0;
3546} 3557}
3547 3558
3559int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
3560 int mode, uint32_t flags, void *name, unsigned int namelen,
3561 uint32_t parent_lkid)
3562{
3563 struct dlm_lkb *lkb;
3564 struct dlm_args args;
3565 int error;
3566
3567 lock_recovery(ls);
3568
3569 error = create_lkb(ls, &lkb);
3570 if (error) {
3571 kfree(ua);
3572 goto out;
3573 }
3574
3575 if (flags & DLM_LKF_VALBLK) {
3576 ua->lksb.sb_lvbptr = kmalloc(DLM_USER_LVB_LEN, GFP_KERNEL);
3577 if (!ua->lksb.sb_lvbptr) {
3578 kfree(ua);
3579 __put_lkb(ls, lkb);
3580 error = -ENOMEM;
3581 goto out;
3582 }
3583 }
3584
3585 /* After ua is attached to lkb it will be freed by free_lkb().
3586 When DLM_IFL_USER is set, the dlm knows that this is a userspace
3587 lock and that lkb_astparam is the dlm_user_args structure. */
3588
3589 error = set_lock_args(mode, &ua->lksb, flags, namelen, parent_lkid,
3590 FAKE_USER_AST, ua, FAKE_USER_AST, &args);
3591 lkb->lkb_flags |= DLM_IFL_USER;
3592 ua->old_mode = DLM_LOCK_IV;
3593
3594 if (error) {
3595 __put_lkb(ls, lkb);
3596 goto out;
3597 }
3598
3599 error = request_lock(ls, lkb, name, namelen, &args);
3600
3601 switch (error) {
3602 case 0:
3603 break;
3604 case -EINPROGRESS:
3605 error = 0;
3606 break;
3607 case -EAGAIN:
3608 error = 0;
3609 /* fall through */
3610 default:
3611 __put_lkb(ls, lkb);
3612 goto out;
3613 }
3614
3615 /* add this new lkb to the per-process list of locks */
3616 spin_lock(&ua->proc->locks_spin);
3617 kref_get(&lkb->lkb_ref);
3618 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
3619 spin_unlock(&ua->proc->locks_spin);
3620 out:
3621 unlock_recovery(ls);
3622 return error;
3623}
3624
3625int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
3626 int mode, uint32_t flags, uint32_t lkid, char *lvb_in)
3627{
3628 struct dlm_lkb *lkb;
3629 struct dlm_args args;
3630 struct dlm_user_args *ua;
3631 int error;
3632
3633 lock_recovery(ls);
3634
3635 error = find_lkb(ls, lkid, &lkb);
3636 if (error)
3637 goto out;
3638
3639 /* user can change the params on its lock when it converts it, or
3640 add an lvb that didn't exist before */
3641
3642 ua = (struct dlm_user_args *)lkb->lkb_astparam;
3643
3644 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
3645 ua->lksb.sb_lvbptr = kmalloc(DLM_USER_LVB_LEN, GFP_KERNEL);
3646 if (!ua->lksb.sb_lvbptr) {
3647 error = -ENOMEM;
3648 goto out_put;
3649 }
3650 }
3651 if (lvb_in && ua->lksb.sb_lvbptr)
3652 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
3653
3654 ua->castparam = ua_tmp->castparam;
3655 ua->castaddr = ua_tmp->castaddr;
3656 ua->bastparam = ua_tmp->bastparam;
3657 ua->bastaddr = ua_tmp->bastaddr;
3658 ua->old_mode = lkb->lkb_grmode;
3659
3660 error = set_lock_args(mode, &ua->lksb, flags, 0, 0, FAKE_USER_AST, ua,
3661 FAKE_USER_AST, &args);
3662 if (error)
3663 goto out_put;
3664
3665 error = convert_lock(ls, lkb, &args);
3666
3667 if (error == -EINPROGRESS || error == -EAGAIN)
3668 error = 0;
3669 out_put:
3670 dlm_put_lkb(lkb);
3671 out:
3672 unlock_recovery(ls);
3673 kfree(ua_tmp);
3674 return error;
3675}
3676
3677int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
3678 uint32_t flags, uint32_t lkid, char *lvb_in)
3679{
3680 struct dlm_lkb *lkb;
3681 struct dlm_args args;
3682 struct dlm_user_args *ua;
3683 int error;
3684
3685 lock_recovery(ls);
3686
3687 error = find_lkb(ls, lkid, &lkb);
3688 if (error)
3689 goto out;
3690
3691 ua = (struct dlm_user_args *)lkb->lkb_astparam;
3692
3693 if (lvb_in && ua->lksb.sb_lvbptr)
3694 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
3695 ua->castparam = ua_tmp->castparam;
3696
3697 error = set_unlock_args(flags, ua, &args);
3698 if (error)
3699 goto out_put;
3700
3701 error = unlock_lock(ls, lkb, &args);
3702
3703 if (error == -DLM_EUNLOCK)
3704 error = 0;
3705 if (error)
3706 goto out_put;
3707
3708 spin_lock(&ua->proc->locks_spin);
3709 list_del(&lkb->lkb_ownqueue);
3710 spin_unlock(&ua->proc->locks_spin);
3711
3712 /* this removes the reference for the proc->locks list added by
3713 dlm_user_request */
3714 unhold_lkb(lkb);
3715 out_put:
3716 dlm_put_lkb(lkb);
3717 out:
3718 unlock_recovery(ls);
3719 return error;
3720}
3721
3722int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
3723 uint32_t flags, uint32_t lkid)
3724{
3725 struct dlm_lkb *lkb;
3726 struct dlm_args args;
3727 struct dlm_user_args *ua;
3728 int error;
3729
3730 lock_recovery(ls);
3731
3732 error = find_lkb(ls, lkid, &lkb);
3733 if (error)
3734 goto out;
3735
3736 ua = (struct dlm_user_args *)lkb->lkb_astparam;
3737 ua->castparam = ua_tmp->castparam;
3738
3739 error = set_unlock_args(flags, ua, &args);
3740 if (error)
3741 goto out_put;
3742
3743 error = cancel_lock(ls, lkb, &args);
3744
3745 if (error == -DLM_ECANCEL)
3746 error = 0;
3747 if (error)
3748 goto out_put;
3749
3750 /* this lkb was removed from the WAITING queue */
3751 if (lkb->lkb_grmode == DLM_LOCK_IV) {
3752 spin_lock(&ua->proc->locks_spin);
3753 list_del(&lkb->lkb_ownqueue);
3754 spin_unlock(&ua->proc->locks_spin);
3755 unhold_lkb(lkb);
3756 }
3757 out_put:
3758 dlm_put_lkb(lkb);
3759 out:
3760 unlock_recovery(ls);
3761 return error;
3762}
3763
3764static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
3765{
3766 struct dlm_user_args *ua = (struct dlm_user_args *)lkb->lkb_astparam;
3767
3768 if (ua->lksb.sb_lvbptr)
3769 kfree(ua->lksb.sb_lvbptr);
3770 kfree(ua);
3771 lkb->lkb_astparam = (long)NULL;
3772
3773 /* TODO: propogate to master if needed */
3774 return 0;
3775}
3776
3777/* The force flag allows the unlock to go ahead even if the lkb isn't granted.
3778 Regardless of what rsb queue the lock is on, it's removed and freed. */
3779
3780static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
3781{
3782 struct dlm_user_args *ua = (struct dlm_user_args *)lkb->lkb_astparam;
3783 struct dlm_args args;
3784 int error;
3785
3786 /* FIXME: we need to handle the case where the lkb is in limbo
3787 while the rsb is being looked up, currently we assert in
3788 _unlock_lock/is_remote because rsb nodeid is -1. */
3789
3790 set_unlock_args(DLM_LKF_FORCEUNLOCK, ua, &args);
3791
3792 error = unlock_lock(ls, lkb, &args);
3793 if (error == -DLM_EUNLOCK)
3794 error = 0;
3795 return error;
3796}
3797
3798/* The ls_clear_proc_locks mutex protects against dlm_user_add_asts() which
3799 1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
3800 which we clear here. */
3801
3802/* proc CLOSING flag is set so no more device_reads should look at proc->asts
3803 list, and no more device_writes should add lkb's to proc->locks list; so we
3804 shouldn't need to take asts_spin or locks_spin here. this assumes that
3805 device reads/writes/closes are serialized -- FIXME: we may need to serialize
3806 them ourself. */
3807
3808void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
3809{
3810 struct dlm_lkb *lkb, *safe;
3811
3812 lock_recovery(ls);
3813 mutex_lock(&ls->ls_clear_proc_locks);
3814
3815 list_for_each_entry_safe(lkb, safe, &proc->locks, lkb_ownqueue) {
3816 if (lkb->lkb_ast_type) {
3817 list_del(&lkb->lkb_astqueue);
3818 unhold_lkb(lkb);
3819 }
3820
3821 list_del(&lkb->lkb_ownqueue);
3822
3823 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT) {
3824 lkb->lkb_flags |= DLM_IFL_ORPHAN;
3825 orphan_proc_lock(ls, lkb);
3826 } else {
3827 lkb->lkb_flags |= DLM_IFL_DEAD;
3828 unlock_proc_lock(ls, lkb);
3829 }
3830
3831 /* this removes the reference for the proc->locks list
3832 added by dlm_user_request, it may result in the lkb
3833 being freed */
3834
3835 dlm_put_lkb(lkb);
3836 }
3837 mutex_unlock(&ls->ls_clear_proc_locks);
3838 unlock_recovery(ls);
3839}
diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h
index 56cdc073b1f6..8d2660f0ab10 100644
--- a/fs/dlm/lock.h
+++ b/fs/dlm/lock.h
@@ -14,6 +14,7 @@
14#define __LOCK_DOT_H__ 14#define __LOCK_DOT_H__
15 15
16void dlm_print_rsb(struct dlm_rsb *r); 16void dlm_print_rsb(struct dlm_rsb *r);
17void dlm_print_lkb(struct dlm_lkb *lkb);
17int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery); 18int dlm_receive_message(struct dlm_header *hd, int nodeid, int recovery);
18int dlm_modes_compat(int mode1, int mode2); 19int dlm_modes_compat(int mode1, int mode2);
19int dlm_find_rsb(struct dlm_ls *ls, char *name, int namelen, 20int dlm_find_rsb(struct dlm_ls *ls, char *name, int namelen,
@@ -31,6 +32,16 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls);
31int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc); 32int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc);
32int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc); 33int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc);
33 34
35int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua, int mode,
36 uint32_t flags, void *name, unsigned int namelen, uint32_t parent_lkid);
37int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
38 int mode, uint32_t flags, uint32_t lkid, char *lvb_in);
39int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
40 uint32_t flags, uint32_t lkid, char *lvb_in);
41int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
42 uint32_t flags, uint32_t lkid);
43void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc);
44
34static inline int is_master(struct dlm_rsb *r) 45static inline int is_master(struct dlm_rsb *r)
35{ 46{
36 return !r->res_nodeid; 47 return !r->res_nodeid;
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 9ed4b70348fb..3f6cb422ac4b 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -270,12 +270,36 @@ struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
270 return ls; 270 return ls;
271} 271}
272 272
273struct dlm_ls *dlm_find_lockspace_local(void *id) 273struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
274{ 274{
275 struct dlm_ls *ls = id; 275 struct dlm_ls *ls;
276 276
277 spin_lock(&lslist_lock); 277 spin_lock(&lslist_lock);
278 ls->ls_count++; 278 list_for_each_entry(ls, &lslist, ls_list) {
279 if (ls->ls_local_handle == lockspace) {
280 ls->ls_count++;
281 goto out;
282 }
283 }
284 ls = NULL;
285 out:
286 spin_unlock(&lslist_lock);
287 return ls;
288}
289
290struct dlm_ls *dlm_find_lockspace_device(int minor)
291{
292 struct dlm_ls *ls;
293
294 spin_lock(&lslist_lock);
295 list_for_each_entry(ls, &lslist, ls_list) {
296 if (ls->ls_device.minor == minor) {
297 ls->ls_count++;
298 goto out;
299 }
300 }
301 ls = NULL;
302 out:
279 spin_unlock(&lslist_lock); 303 spin_unlock(&lslist_lock);
280 return ls; 304 return ls;
281} 305}
@@ -436,6 +460,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
436 init_rwsem(&ls->ls_in_recovery); 460 init_rwsem(&ls->ls_in_recovery);
437 INIT_LIST_HEAD(&ls->ls_requestqueue); 461 INIT_LIST_HEAD(&ls->ls_requestqueue);
438 mutex_init(&ls->ls_requestqueue_mutex); 462 mutex_init(&ls->ls_requestqueue_mutex);
463 mutex_init(&ls->ls_clear_proc_locks);
439 464
440 ls->ls_recover_buf = kmalloc(dlm_config.buffer_size, GFP_KERNEL); 465 ls->ls_recover_buf = kmalloc(dlm_config.buffer_size, GFP_KERNEL);
441 if (!ls->ls_recover_buf) 466 if (!ls->ls_recover_buf)
@@ -444,6 +469,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
444 INIT_LIST_HEAD(&ls->ls_recover_list); 469 INIT_LIST_HEAD(&ls->ls_recover_list);
445 spin_lock_init(&ls->ls_recover_list_lock); 470 spin_lock_init(&ls->ls_recover_list_lock);
446 ls->ls_recover_list_count = 0; 471 ls->ls_recover_list_count = 0;
472 ls->ls_local_handle = ls;
447 init_waitqueue_head(&ls->ls_wait_general); 473 init_waitqueue_head(&ls->ls_wait_general);
448 INIT_LIST_HEAD(&ls->ls_root_list); 474 INIT_LIST_HEAD(&ls->ls_root_list);
449 init_rwsem(&ls->ls_root_sem); 475 init_rwsem(&ls->ls_root_sem);
diff --git a/fs/dlm/lockspace.h b/fs/dlm/lockspace.h
index 17bd3ba863a9..891eabbdd021 100644
--- a/fs/dlm/lockspace.h
+++ b/fs/dlm/lockspace.h
@@ -18,6 +18,7 @@ int dlm_lockspace_init(void);
18void dlm_lockspace_exit(void); 18void dlm_lockspace_exit(void);
19struct dlm_ls *dlm_find_lockspace_global(uint32_t id); 19struct dlm_ls *dlm_find_lockspace_global(uint32_t id);
20struct dlm_ls *dlm_find_lockspace_local(void *id); 20struct dlm_ls *dlm_find_lockspace_local(void *id);
21struct dlm_ls *dlm_find_lockspace_device(int minor);
21void dlm_put_lockspace(struct dlm_ls *ls); 22void dlm_put_lockspace(struct dlm_ls *ls);
22 23
23#endif /* __LOCKSPACE_DOT_H__ */ 24#endif /* __LOCKSPACE_DOT_H__ */
diff --git a/fs/dlm/main.c b/fs/dlm/main.c
index 81bf4cb22033..a8da8dc36b2e 100644
--- a/fs/dlm/main.c
+++ b/fs/dlm/main.c
@@ -14,6 +14,7 @@
14#include "dlm_internal.h" 14#include "dlm_internal.h"
15#include "lockspace.h" 15#include "lockspace.h"
16#include "lock.h" 16#include "lock.h"
17#include "user.h"
17#include "memory.h" 18#include "memory.h"
18#include "lowcomms.h" 19#include "lowcomms.h"
19#include "config.h" 20#include "config.h"
@@ -50,10 +51,16 @@ static int __init init_dlm(void)
50 if (error) 51 if (error)
51 goto out_debug; 52 goto out_debug;
52 53
54 error = dlm_user_init();
55 if (error)
56 goto out_lowcomms;
57
53 printk("DLM (built %s %s) installed\n", __DATE__, __TIME__); 58 printk("DLM (built %s %s) installed\n", __DATE__, __TIME__);
54 59
55 return 0; 60 return 0;
56 61
62 out_lowcomms:
63 dlm_lowcomms_exit();
57 out_debug: 64 out_debug:
58 dlm_unregister_debugfs(); 65 dlm_unregister_debugfs();
59 out_config: 66 out_config:
@@ -68,6 +75,7 @@ static int __init init_dlm(void)
68 75
69static void __exit exit_dlm(void) 76static void __exit exit_dlm(void)
70{ 77{
78 dlm_user_exit();
71 dlm_lowcomms_exit(); 79 dlm_lowcomms_exit();
72 dlm_config_exit(); 80 dlm_config_exit();
73 dlm_memory_exit(); 81 dlm_memory_exit();
diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c
index f7cf4589fae8..48dfc27861f4 100644
--- a/fs/dlm/memory.c
+++ b/fs/dlm/memory.c
@@ -84,6 +84,15 @@ struct dlm_lkb *allocate_lkb(struct dlm_ls *ls)
84 84
85void free_lkb(struct dlm_lkb *lkb) 85void free_lkb(struct dlm_lkb *lkb)
86{ 86{
87 if (lkb->lkb_flags & DLM_IFL_USER) {
88 struct dlm_user_args *ua;
89 ua = (struct dlm_user_args *)lkb->lkb_astparam;
90 if (ua) {
91 if (ua->lksb.sb_lvbptr)
92 kfree(ua->lksb.sb_lvbptr);
93 kfree(ua);
94 }
95 }
87 kmem_cache_free(lkb_cache, lkb); 96 kmem_cache_free(lkb_cache, lkb);
88} 97}
89 98
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
new file mode 100644
index 000000000000..1f05960a916f
--- /dev/null
+++ b/fs/dlm/user.c
@@ -0,0 +1,769 @@
1/*
2 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
3 *
4 * This copyrighted material is made available to anyone wishing to use,
5 * modify, copy, or redistribute it subject to the terms and conditions
6 * of the GNU General Public License v.2.
7 */
8
9#include <linux/miscdevice.h>
10#include <linux/init.h>
11#include <linux/wait.h>
12#include <linux/module.h>
13#include <linux/file.h>
14#include <linux/fs.h>
15#include <linux/poll.h>
16#include <linux/signal.h>
17#include <linux/spinlock.h>
18#include <linux/dlm.h>
19#include <linux/dlm_device.h>
20
21#include "dlm_internal.h"
22#include "lockspace.h"
23#include "lock.h"
24#include "lvb_table.h"
25
26static const char *name_prefix="dlm";
27static struct miscdevice ctl_device;
28static struct file_operations device_fops;
29
30#ifdef CONFIG_COMPAT
31
32struct dlm_lock_params32 {
33 __u8 mode;
34 __u8 namelen;
35 __u16 flags;
36 __u32 lkid;
37 __u32 parent;
38
39 __u32 castparam;
40 __u32 castaddr;
41 __u32 bastparam;
42 __u32 bastaddr;
43 __u32 lksb;
44
45 char lvb[DLM_USER_LVB_LEN];
46 char name[0];
47};
48
49struct dlm_write_request32 {
50 __u32 version[3];
51 __u8 cmd;
52 __u8 is64bit;
53 __u8 unused[2];
54
55 union {
56 struct dlm_lock_params32 lock;
57 struct dlm_lspace_params lspace;
58 } i;
59};
60
61struct dlm_lksb32 {
62 __u32 sb_status;
63 __u32 sb_lkid;
64 __u8 sb_flags;
65 __u32 sb_lvbptr;
66};
67
68struct dlm_lock_result32 {
69 __u32 length;
70 __u32 user_astaddr;
71 __u32 user_astparam;
72 __u32 user_lksb;
73 struct dlm_lksb32 lksb;
74 __u8 bast_mode;
75 __u8 unused[3];
76 /* Offsets may be zero if no data is present */
77 __u32 lvb_offset;
78};
79
80static void compat_input(struct dlm_write_request *kb,
81 struct dlm_write_request32 *kb32)
82{
83 kb->version[0] = kb32->version[0];
84 kb->version[1] = kb32->version[1];
85 kb->version[2] = kb32->version[2];
86
87 kb->cmd = kb32->cmd;
88 kb->is64bit = kb32->is64bit;
89 if (kb->cmd == DLM_USER_CREATE_LOCKSPACE ||
90 kb->cmd == DLM_USER_REMOVE_LOCKSPACE) {
91 kb->i.lspace.flags = kb32->i.lspace.flags;
92 kb->i.lspace.minor = kb32->i.lspace.minor;
93 strcpy(kb->i.lspace.name, kb32->i.lspace.name);
94 } else {
95 kb->i.lock.mode = kb32->i.lock.mode;
96 kb->i.lock.namelen = kb32->i.lock.namelen;
97 kb->i.lock.flags = kb32->i.lock.flags;
98 kb->i.lock.lkid = kb32->i.lock.lkid;
99 kb->i.lock.parent = kb32->i.lock.parent;
100 kb->i.lock.castparam = (void *)(long)kb32->i.lock.castparam;
101 kb->i.lock.castaddr = (void *)(long)kb32->i.lock.castaddr;
102 kb->i.lock.bastparam = (void *)(long)kb32->i.lock.bastparam;
103 kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr;
104 kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb;
105 memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
106 memcpy(kb->i.lock.name, kb32->i.lock.name, kb->i.lock.namelen);
107 }
108}
109
110static void compat_output(struct dlm_lock_result *res,
111 struct dlm_lock_result32 *res32)
112{
113 res32->length = res->length - (sizeof(struct dlm_lock_result) -
114 sizeof(struct dlm_lock_result32));
115 res32->user_astaddr = (__u32)(long)res->user_astaddr;
116 res32->user_astparam = (__u32)(long)res->user_astparam;
117 res32->user_lksb = (__u32)(long)res->user_lksb;
118 res32->bast_mode = res->bast_mode;
119
120 res32->lvb_offset = res->lvb_offset;
121 res32->length = res->length;
122
123 res32->lksb.sb_status = res->lksb.sb_status;
124 res32->lksb.sb_flags = res->lksb.sb_flags;
125 res32->lksb.sb_lkid = res->lksb.sb_lkid;
126 res32->lksb.sb_lvbptr = (__u32)(long)res->lksb.sb_lvbptr;
127}
128#endif
129
130
131void dlm_user_add_ast(struct dlm_lkb *lkb, int type)
132{
133 struct dlm_ls *ls;
134 struct dlm_user_args *ua;
135 struct dlm_user_proc *proc;
136
137 /* dlm_clear_proc_locks() sets ORPHAN/DEAD flag on each
138 lkb before dealing with it. We need to check this
139 flag before taking ls_clear_proc_locks mutex because if
140 it's set, dlm_clear_proc_locks() holds the mutex. */
141
142 if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD)) {
143 /* log_print("user_add_ast skip1 %x", lkb->lkb_flags); */
144 return;
145 }
146
147 ls = lkb->lkb_resource->res_ls;
148 mutex_lock(&ls->ls_clear_proc_locks);
149
150 /* If ORPHAN/DEAD flag is set, it means the process is dead so an ast
151 can't be delivered. For ORPHAN's, dlm_clear_proc_locks() freed
152 lkb->ua so we can't try to use it. */
153
154 if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD)) {
155 /* log_print("user_add_ast skip2 %x", lkb->lkb_flags); */
156 goto out;
157 }
158
159 DLM_ASSERT(lkb->lkb_astparam, dlm_print_lkb(lkb););
160 ua = (struct dlm_user_args *)lkb->lkb_astparam;
161 proc = ua->proc;
162
163 if (type == AST_BAST && ua->bastaddr == NULL)
164 goto out;
165
166 spin_lock(&proc->asts_spin);
167 if (!(lkb->lkb_ast_type & (AST_COMP | AST_BAST))) {
168 kref_get(&lkb->lkb_ref);
169 list_add_tail(&lkb->lkb_astqueue, &proc->asts);
170 lkb->lkb_ast_type |= type;
171 wake_up_interruptible(&proc->wait);
172 }
173
174 /* We want to copy the lvb to userspace when the completion
175 ast is read if the status is 0, the lock has an lvb and
176 lvb_ops says we should. We could probably have set_lvb_lock()
177 set update_user_lvb instead and not need old_mode */
178
179 if ((lkb->lkb_ast_type & AST_COMP) &&
180 (lkb->lkb_lksb->sb_status == 0) &&
181 lkb->lkb_lksb->sb_lvbptr &&
182 dlm_lvb_operations[ua->old_mode + 1][lkb->lkb_grmode + 1])
183 ua->update_user_lvb = 1;
184 else
185 ua->update_user_lvb = 0;
186
187 spin_unlock(&proc->asts_spin);
188 out:
189 mutex_unlock(&ls->ls_clear_proc_locks);
190}
191
192static int device_user_lock(struct dlm_user_proc *proc,
193 struct dlm_lock_params *params)
194{
195 struct dlm_ls *ls;
196 struct dlm_user_args *ua;
197 int error = -ENOMEM;
198
199 ls = dlm_find_lockspace_local(proc->lockspace);
200 if (!ls)
201 return -ENOENT;
202
203 if (!params->castaddr || !params->lksb) {
204 error = -EINVAL;
205 goto out;
206 }
207
208 ua = kzalloc(sizeof(struct dlm_user_args), GFP_KERNEL);
209 if (!ua)
210 goto out;
211 ua->proc = proc;
212 ua->user_lksb = params->lksb;
213 ua->castparam = params->castparam;
214 ua->castaddr = params->castaddr;
215 ua->bastparam = params->bastparam;
216 ua->bastaddr = params->bastaddr;
217
218 if (params->flags & DLM_LKF_CONVERT)
219 error = dlm_user_convert(ls, ua,
220 params->mode, params->flags,
221 params->lkid, params->lvb);
222 else {
223 error = dlm_user_request(ls, ua,
224 params->mode, params->flags,
225 params->name, params->namelen,
226 params->parent);
227 if (!error)
228 error = ua->lksb.sb_lkid;
229 }
230 out:
231 dlm_put_lockspace(ls);
232 return error;
233}
234
235static int device_user_unlock(struct dlm_user_proc *proc,
236 struct dlm_lock_params *params)
237{
238 struct dlm_ls *ls;
239 struct dlm_user_args *ua;
240 int error = -ENOMEM;
241
242 ls = dlm_find_lockspace_local(proc->lockspace);
243 if (!ls)
244 return -ENOENT;
245
246 ua = kzalloc(sizeof(struct dlm_user_args), GFP_KERNEL);
247 if (!ua)
248 goto out;
249 ua->proc = proc;
250 ua->user_lksb = params->lksb;
251 ua->castparam = params->castparam;
252 ua->castaddr = params->castaddr;
253
254 if (params->flags & DLM_LKF_CANCEL)
255 error = dlm_user_cancel(ls, ua, params->flags, params->lkid);
256 else
257 error = dlm_user_unlock(ls, ua, params->flags, params->lkid,
258 params->lvb);
259 out:
260 dlm_put_lockspace(ls);
261 return error;
262}
263
264static int device_create_lockspace(struct dlm_lspace_params *params)
265{
266 dlm_lockspace_t *lockspace;
267 struct dlm_ls *ls;
268 int error, len;
269
270 if (!capable(CAP_SYS_ADMIN))
271 return -EPERM;
272
273 error = dlm_new_lockspace(params->name, strlen(params->name),
274 &lockspace, 0, DLM_USER_LVB_LEN);
275 if (error)
276 return error;
277
278 ls = dlm_find_lockspace_local(lockspace);
279 if (!ls)
280 return -ENOENT;
281
282 error = -ENOMEM;
283 len = strlen(params->name) + strlen(name_prefix) + 2;
284 ls->ls_device.name = kzalloc(len, GFP_KERNEL);
285 if (!ls->ls_device.name)
286 goto fail;
287 snprintf((char *)ls->ls_device.name, len, "%s_%s", name_prefix,
288 params->name);
289 ls->ls_device.fops = &device_fops;
290 ls->ls_device.minor = MISC_DYNAMIC_MINOR;
291
292 error = misc_register(&ls->ls_device);
293 if (error) {
294 kfree(ls->ls_device.name);
295 goto fail;
296 }
297
298 error = ls->ls_device.minor;
299 dlm_put_lockspace(ls);
300 return error;
301
302 fail:
303 dlm_put_lockspace(ls);
304 dlm_release_lockspace(lockspace, 0);
305 return error;
306}
307
308static int device_remove_lockspace(struct dlm_lspace_params *params)
309{
310 dlm_lockspace_t *lockspace;
311 struct dlm_ls *ls;
312 int error;
313
314 if (!capable(CAP_SYS_ADMIN))
315 return -EPERM;
316
317 ls = dlm_find_lockspace_device(params->minor);
318 if (!ls)
319 return -ENOENT;
320
321 error = misc_deregister(&ls->ls_device);
322 if (error) {
323 dlm_put_lockspace(ls);
324 goto out;
325 }
326 kfree(ls->ls_device.name);
327
328 lockspace = ls->ls_local_handle;
329
330 /* dlm_release_lockspace waits for references to go to zero,
331 so all processes will need to close their device for the ls
332 before the release will procede */
333
334 dlm_put_lockspace(ls);
335 error = dlm_release_lockspace(lockspace, 0);
336out:
337 return error;
338}
339
340/* Check the user's version matches ours */
341static int check_version(struct dlm_write_request *req)
342{
343 if (req->version[0] != DLM_DEVICE_VERSION_MAJOR ||
344 (req->version[0] == DLM_DEVICE_VERSION_MAJOR &&
345 req->version[1] > DLM_DEVICE_VERSION_MINOR)) {
346
347 printk(KERN_DEBUG "dlm: process %s (%d) version mismatch "
348 "user (%d.%d.%d) kernel (%d.%d.%d)\n",
349 current->comm,
350 current->pid,
351 req->version[0],
352 req->version[1],
353 req->version[2],
354 DLM_DEVICE_VERSION_MAJOR,
355 DLM_DEVICE_VERSION_MINOR,
356 DLM_DEVICE_VERSION_PATCH);
357 return -EINVAL;
358 }
359 return 0;
360}
361
362/*
363 * device_write
364 *
365 * device_user_lock
366 * dlm_user_request -> request_lock
367 * dlm_user_convert -> convert_lock
368 *
369 * device_user_unlock
370 * dlm_user_unlock -> unlock_lock
371 * dlm_user_cancel -> cancel_lock
372 *
373 * device_create_lockspace
374 * dlm_new_lockspace
375 *
376 * device_remove_lockspace
377 * dlm_release_lockspace
378 */
379
380/* a write to a lockspace device is a lock or unlock request, a write
381 to the control device is to create/remove a lockspace */
382
383static ssize_t device_write(struct file *file, const char __user *buf,
384 size_t count, loff_t *ppos)
385{
386 struct dlm_user_proc *proc = file->private_data;
387 struct dlm_write_request *kbuf;
388 sigset_t tmpsig, allsigs;
389 int error;
390
391#ifdef CONFIG_COMPAT
392 if (count < sizeof(struct dlm_write_request32))
393#else
394 if (count < sizeof(struct dlm_write_request))
395#endif
396 return -EINVAL;
397
398 kbuf = kmalloc(count, GFP_KERNEL);
399 if (!kbuf)
400 return -ENOMEM;
401
402 if (copy_from_user(kbuf, buf, count)) {
403 error = -EFAULT;
404 goto out_free;
405 }
406
407 if (check_version(kbuf)) {
408 error = -EBADE;
409 goto out_free;
410 }
411
412#ifdef CONFIG_COMPAT
413 if (!kbuf->is64bit) {
414 struct dlm_write_request32 *k32buf;
415 k32buf = (struct dlm_write_request32 *)kbuf;
416 kbuf = kmalloc(count + (sizeof(struct dlm_write_request) -
417 sizeof(struct dlm_write_request32)), GFP_KERNEL);
418 if (!kbuf)
419 return -ENOMEM;
420
421 if (proc)
422 set_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags);
423 compat_input(kbuf, k32buf);
424 kfree(k32buf);
425 }
426#endif
427
428 /* do we really need this? can a write happen after a close? */
429 if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
430 test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
431 return -EINVAL;
432
433 sigfillset(&allsigs);
434 sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
435
436 error = -EINVAL;
437
438 switch (kbuf->cmd)
439 {
440 case DLM_USER_LOCK:
441 if (!proc) {
442 log_print("no locking on control device");
443 goto out_sig;
444 }
445 error = device_user_lock(proc, &kbuf->i.lock);
446 break;
447
448 case DLM_USER_UNLOCK:
449 if (!proc) {
450 log_print("no locking on control device");
451 goto out_sig;
452 }
453 error = device_user_unlock(proc, &kbuf->i.lock);
454 break;
455
456 case DLM_USER_CREATE_LOCKSPACE:
457 if (proc) {
458 log_print("create/remove only on control device");
459 goto out_sig;
460 }
461 error = device_create_lockspace(&kbuf->i.lspace);
462 break;
463
464 case DLM_USER_REMOVE_LOCKSPACE:
465 if (proc) {
466 log_print("create/remove only on control device");
467 goto out_sig;
468 }
469 error = device_remove_lockspace(&kbuf->i.lspace);
470 break;
471
472 default:
473 log_print("Unknown command passed to DLM device : %d\n",
474 kbuf->cmd);
475 }
476
477 out_sig:
478 sigprocmask(SIG_SETMASK, &tmpsig, NULL);
479 recalc_sigpending();
480 out_free:
481 kfree(kbuf);
482 return error;
483}
484
485/* Every process that opens the lockspace device has its own "proc" structure
486 hanging off the open file that's used to keep track of locks owned by the
487 process and asts that need to be delivered to the process. */
488
489static int device_open(struct inode *inode, struct file *file)
490{
491 struct dlm_user_proc *proc;
492 struct dlm_ls *ls;
493
494 ls = dlm_find_lockspace_device(iminor(inode));
495 if (!ls)
496 return -ENOENT;
497
498 proc = kzalloc(sizeof(struct dlm_user_proc), GFP_KERNEL);
499 if (!proc) {
500 dlm_put_lockspace(ls);
501 return -ENOMEM;
502 }
503
504 proc->lockspace = ls->ls_local_handle;
505 INIT_LIST_HEAD(&proc->asts);
506 INIT_LIST_HEAD(&proc->locks);
507 spin_lock_init(&proc->asts_spin);
508 spin_lock_init(&proc->locks_spin);
509 init_waitqueue_head(&proc->wait);
510 file->private_data = proc;
511
512 return 0;
513}
514
515static int device_close(struct inode *inode, struct file *file)
516{
517 struct dlm_user_proc *proc = file->private_data;
518 struct dlm_ls *ls;
519 sigset_t tmpsig, allsigs;
520
521 ls = dlm_find_lockspace_local(proc->lockspace);
522 if (!ls)
523 return -ENOENT;
524
525 sigfillset(&allsigs);
526 sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
527
528 set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags);
529
530 dlm_clear_proc_locks(ls, proc);
531
532 /* at this point no more lkb's should exist for this lockspace,
533 so there's no chance of dlm_user_add_ast() being called and
534 looking for lkb->ua->proc */
535
536 kfree(proc);
537 file->private_data = NULL;
538
539 dlm_put_lockspace(ls);
540 dlm_put_lockspace(ls); /* for the find in device_open() */
541
542 /* FIXME: AUTOFREE: if this ls is no longer used do
543 device_remove_lockspace() */
544
545 sigprocmask(SIG_SETMASK, &tmpsig, NULL);
546 recalc_sigpending();
547
548 return 0;
549}
550
551static int copy_result_to_user(struct dlm_user_args *ua, int compat, int type,
552 int bmode, char __user *buf, size_t count)
553{
554#ifdef CONFIG_COMPAT
555 struct dlm_lock_result32 result32;
556#endif
557 struct dlm_lock_result result;
558 void *resultptr;
559 int error=0;
560 int len;
561 int struct_len;
562
563 memset(&result, 0, sizeof(struct dlm_lock_result));
564 memcpy(&result.lksb, &ua->lksb, sizeof(struct dlm_lksb));
565 result.user_lksb = ua->user_lksb;
566
567 /* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
568 in a conversion unless the conversion is successful. See code
569 in dlm_user_convert() for updating ua from ua_tmp. OpenVMS, though,
570 notes that a new blocking AST address and parameter are set even if
571 the conversion fails, so maybe we should just do that. */
572
573 if (type == AST_BAST) {
574 result.user_astaddr = ua->bastaddr;
575 result.user_astparam = ua->bastparam;
576 result.bast_mode = bmode;
577 } else {
578 result.user_astaddr = ua->castaddr;
579 result.user_astparam = ua->castparam;
580 }
581
582#ifdef CONFIG_COMPAT
583 if (compat)
584 len = sizeof(struct dlm_lock_result32);
585 else
586#endif
587 len = sizeof(struct dlm_lock_result);
588 struct_len = len;
589
590 /* copy lvb to userspace if there is one, it's been updated, and
591 the user buffer has space for it */
592
593 if (ua->update_user_lvb && ua->lksb.sb_lvbptr &&
594 count >= len + DLM_USER_LVB_LEN) {
595 if (copy_to_user(buf+len, ua->lksb.sb_lvbptr,
596 DLM_USER_LVB_LEN)) {
597 error = -EFAULT;
598 goto out;
599 }
600
601 result.lvb_offset = len;
602 len += DLM_USER_LVB_LEN;
603 }
604
605 result.length = len;
606 resultptr = &result;
607#ifdef CONFIG_COMPAT
608 if (compat) {
609 compat_output(&result, &result32);
610 resultptr = &result32;
611 }
612#endif
613
614 if (copy_to_user(buf, resultptr, struct_len))
615 error = -EFAULT;
616 else
617 error = len;
618 out:
619 return error;
620}
621
622/* a read returns a single ast described in a struct dlm_lock_result */
623
624static ssize_t device_read(struct file *file, char __user *buf, size_t count,
625 loff_t *ppos)
626{
627 struct dlm_user_proc *proc = file->private_data;
628 struct dlm_lkb *lkb;
629 struct dlm_user_args *ua;
630 DECLARE_WAITQUEUE(wait, current);
631 int error, type=0, bmode=0, removed = 0;
632
633#ifdef CONFIG_COMPAT
634 if (count < sizeof(struct dlm_lock_result32))
635#else
636 if (count < sizeof(struct dlm_lock_result))
637#endif
638 return -EINVAL;
639
640 /* do we really need this? can a read happen after a close? */
641 if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
642 return -EINVAL;
643
644 spin_lock(&proc->asts_spin);
645 if (list_empty(&proc->asts)) {
646 if (file->f_flags & O_NONBLOCK) {
647 spin_unlock(&proc->asts_spin);
648 return -EAGAIN;
649 }
650
651 add_wait_queue(&proc->wait, &wait);
652
653 repeat:
654 set_current_state(TASK_INTERRUPTIBLE);
655 if (list_empty(&proc->asts) && !signal_pending(current)) {
656 spin_unlock(&proc->asts_spin);
657 schedule();
658 spin_lock(&proc->asts_spin);
659 goto repeat;
660 }
661 set_current_state(TASK_RUNNING);
662 remove_wait_queue(&proc->wait, &wait);
663
664 if (signal_pending(current)) {
665 spin_unlock(&proc->asts_spin);
666 return -ERESTARTSYS;
667 }
668 }
669
670 if (list_empty(&proc->asts)) {
671 spin_unlock(&proc->asts_spin);
672 return -EAGAIN;
673 }
674
675 /* there may be both completion and blocking asts to return for
676 the lkb, don't remove lkb from asts list unless no asts remain */
677
678 lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_astqueue);
679
680 if (lkb->lkb_ast_type & AST_COMP) {
681 lkb->lkb_ast_type &= ~AST_COMP;
682 type = AST_COMP;
683 } else if (lkb->lkb_ast_type & AST_BAST) {
684 lkb->lkb_ast_type &= ~AST_BAST;
685 type = AST_BAST;
686 bmode = lkb->lkb_bastmode;
687 }
688
689 if (!lkb->lkb_ast_type) {
690 list_del(&lkb->lkb_astqueue);
691 removed = 1;
692 }
693 spin_unlock(&proc->asts_spin);
694
695 ua = (struct dlm_user_args *)lkb->lkb_astparam;
696 error = copy_result_to_user(ua,
697 test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
698 type, bmode, buf, count);
699
700 /* removes reference for the proc->asts lists added by
701 dlm_user_add_ast() and may result in the lkb being freed */
702 if (removed)
703 dlm_put_lkb(lkb);
704
705 return error;
706}
707
708static unsigned int device_poll(struct file *file, poll_table *wait)
709{
710 struct dlm_user_proc *proc = file->private_data;
711
712 poll_wait(file, &proc->wait, wait);
713
714 spin_lock(&proc->asts_spin);
715 if (!list_empty(&proc->asts)) {
716 spin_unlock(&proc->asts_spin);
717 return POLLIN | POLLRDNORM;
718 }
719 spin_unlock(&proc->asts_spin);
720 return 0;
721}
722
723static int ctl_device_open(struct inode *inode, struct file *file)
724{
725 file->private_data = NULL;
726 return 0;
727}
728
729static int ctl_device_close(struct inode *inode, struct file *file)
730{
731 return 0;
732}
733
734static struct file_operations device_fops = {
735 .open = device_open,
736 .release = device_close,
737 .read = device_read,
738 .write = device_write,
739 .poll = device_poll,
740 .owner = THIS_MODULE,
741};
742
743static struct file_operations ctl_device_fops = {
744 .open = ctl_device_open,
745 .release = ctl_device_close,
746 .write = device_write,
747 .owner = THIS_MODULE,
748};
749
750int dlm_user_init(void)
751{
752 int error;
753
754 ctl_device.name = "dlm-control";
755 ctl_device.fops = &ctl_device_fops;
756 ctl_device.minor = MISC_DYNAMIC_MINOR;
757
758 error = misc_register(&ctl_device);
759 if (error)
760 log_print("misc_register failed for control device");
761
762 return error;
763}
764
765void dlm_user_exit(void)
766{
767 misc_deregister(&ctl_device);
768}
769
diff --git a/fs/dlm/user.h b/fs/dlm/user.h
new file mode 100644
index 000000000000..d38e9f3e4151
--- /dev/null
+++ b/fs/dlm/user.h
@@ -0,0 +1,16 @@
1/*
2 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
3 *
4 * This copyrighted material is made available to anyone wishing to use,
5 * modify, copy, or redistribute it subject to the terms and conditions
6 * of the GNU General Public License v.2.
7 */
8
9#ifndef __USER_DOT_H__
10#define __USER_DOT_H__
11
12void dlm_user_add_ast(struct dlm_lkb *lkb, int type);
13int dlm_user_init(void);
14void dlm_user_exit(void);
15
16#endif