diff options
-rw-r--r-- | fs/Kconfig | 1 | ||||
-rw-r--r-- | fs/fcntl.c | 66 | ||||
-rw-r--r-- | fs/lockd/svc.c | 11 | ||||
-rw-r--r-- | fs/lockd/svclock.c | 6 | ||||
-rw-r--r-- | fs/lockd/svcsubs.c | 9 | ||||
-rw-r--r-- | fs/locks.c | 57 | ||||
-rw-r--r-- | fs/nfs/Kconfig | 1 | ||||
-rw-r--r-- | fs/nfsd/Kconfig | 1 | ||||
-rw-r--r-- | fs/nfsd/nfs4state.c | 26 | ||||
-rw-r--r-- | include/linux/fs.h | 6 |
10 files changed, 114 insertions, 70 deletions
diff --git a/fs/Kconfig b/fs/Kconfig index b5e582bd769d..97673c955484 100644 --- a/fs/Kconfig +++ b/fs/Kconfig | |||
@@ -53,7 +53,6 @@ config EXPORTFS | |||
53 | config FILE_LOCKING | 53 | config FILE_LOCKING |
54 | bool "Enable POSIX file locking API" if EMBEDDED | 54 | bool "Enable POSIX file locking API" if EMBEDDED |
55 | default y | 55 | default y |
56 | select BKL # while lockd still uses it. | ||
57 | help | 56 | help |
58 | This option enables standard file locking support, required | 57 | This option enables standard file locking support, required |
59 | for filesystems like NFS and for the flock() system | 58 | for filesystems like NFS and for the flock() system |
diff --git a/fs/fcntl.c b/fs/fcntl.c index f8cc34f542c3..dcdbc6f5c33b 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c | |||
@@ -640,7 +640,7 @@ static void fasync_free_rcu(struct rcu_head *head) | |||
640 | * match the state "is the filp on a fasync list". | 640 | * match the state "is the filp on a fasync list". |
641 | * | 641 | * |
642 | */ | 642 | */ |
643 | static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp) | 643 | int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp) |
644 | { | 644 | { |
645 | struct fasync_struct *fa, **fp; | 645 | struct fasync_struct *fa, **fp; |
646 | int result = 0; | 646 | int result = 0; |
@@ -666,21 +666,28 @@ static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp) | |||
666 | return result; | 666 | return result; |
667 | } | 667 | } |
668 | 668 | ||
669 | struct fasync_struct *fasync_alloc(void) | ||
670 | { | ||
671 | return kmem_cache_alloc(fasync_cache, GFP_KERNEL); | ||
672 | } | ||
673 | |||
669 | /* | 674 | /* |
670 | * Add a fasync entry. Return negative on error, positive if | 675 | * NOTE! This can be used only for unused fasync entries: |
671 | * added, and zero if did nothing but change an existing one. | 676 | * entries that actually got inserted on the fasync list |
672 | * | 677 | * need to be released by rcu - see fasync_remove_entry. |
673 | * NOTE! It is very important that the FASYNC flag always | ||
674 | * match the state "is the filp on a fasync list". | ||
675 | */ | 678 | */ |
676 | static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp) | 679 | void fasync_free(struct fasync_struct *new) |
677 | { | 680 | { |
678 | struct fasync_struct *new, *fa, **fp; | 681 | kmem_cache_free(fasync_cache, new); |
679 | int result = 0; | 682 | } |
680 | 683 | ||
681 | new = kmem_cache_alloc(fasync_cache, GFP_KERNEL); | 684 | /* |
682 | if (!new) | 685 | * Insert a new entry into the fasync list. Return the pointer to the |
683 | return -ENOMEM; | 686 | * old one if we didn't use the new one. |
687 | */ | ||
688 | struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new) | ||
689 | { | ||
690 | struct fasync_struct *fa, **fp; | ||
684 | 691 | ||
685 | spin_lock(&filp->f_lock); | 692 | spin_lock(&filp->f_lock); |
686 | spin_lock(&fasync_lock); | 693 | spin_lock(&fasync_lock); |
@@ -691,8 +698,6 @@ static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fa | |||
691 | spin_lock_irq(&fa->fa_lock); | 698 | spin_lock_irq(&fa->fa_lock); |
692 | fa->fa_fd = fd; | 699 | fa->fa_fd = fd; |
693 | spin_unlock_irq(&fa->fa_lock); | 700 | spin_unlock_irq(&fa->fa_lock); |
694 | |||
695 | kmem_cache_free(fasync_cache, new); | ||
696 | goto out; | 701 | goto out; |
697 | } | 702 | } |
698 | 703 | ||
@@ -702,13 +707,42 @@ static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fa | |||
702 | new->fa_fd = fd; | 707 | new->fa_fd = fd; |
703 | new->fa_next = *fapp; | 708 | new->fa_next = *fapp; |
704 | rcu_assign_pointer(*fapp, new); | 709 | rcu_assign_pointer(*fapp, new); |
705 | result = 1; | ||
706 | filp->f_flags |= FASYNC; | 710 | filp->f_flags |= FASYNC; |
707 | 711 | ||
708 | out: | 712 | out: |
709 | spin_unlock(&fasync_lock); | 713 | spin_unlock(&fasync_lock); |
710 | spin_unlock(&filp->f_lock); | 714 | spin_unlock(&filp->f_lock); |
711 | return result; | 715 | return fa; |
716 | } | ||
717 | |||
718 | /* | ||
719 | * Add a fasync entry. Return negative on error, positive if | ||
720 | * added, and zero if did nothing but change an existing one. | ||
721 | * | ||
722 | * NOTE! It is very important that the FASYNC flag always | ||
723 | * match the state "is the filp on a fasync list". | ||
724 | */ | ||
725 | static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp) | ||
726 | { | ||
727 | struct fasync_struct *new; | ||
728 | |||
729 | new = fasync_alloc(); | ||
730 | if (!new) | ||
731 | return -ENOMEM; | ||
732 | |||
733 | /* | ||
734 | * fasync_insert_entry() returns the old (update) entry if | ||
735 | * it existed. | ||
736 | * | ||
737 | * So free the (unused) new entry and return 0 to let the | ||
738 | * caller know that we didn't add any new fasync entries. | ||
739 | */ | ||
740 | if (fasync_insert_entry(fd, filp, fapp, new)) { | ||
741 | fasync_free(new); | ||
742 | return 0; | ||
743 | } | ||
744 | |||
745 | return 1; | ||
712 | } | 746 | } |
713 | 747 | ||
714 | /* | 748 | /* |
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index b13aabc12298..abfff9d7979d 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/in.h> | 22 | #include <linux/in.h> |
23 | #include <linux/uio.h> | 23 | #include <linux/uio.h> |
24 | #include <linux/smp.h> | 24 | #include <linux/smp.h> |
25 | #include <linux/smp_lock.h> | ||
26 | #include <linux/mutex.h> | 25 | #include <linux/mutex.h> |
27 | #include <linux/kthread.h> | 26 | #include <linux/kthread.h> |
28 | #include <linux/freezer.h> | 27 | #include <linux/freezer.h> |
@@ -130,15 +129,6 @@ lockd(void *vrqstp) | |||
130 | 129 | ||
131 | dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); | 130 | dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); |
132 | 131 | ||
133 | /* | ||
134 | * FIXME: it would be nice if lockd didn't spend its entire life | ||
135 | * running under the BKL. At the very least, it would be good to | ||
136 | * have someone clarify what it's intended to protect here. I've | ||
137 | * seen some handwavy posts about posix locking needing to be | ||
138 | * done under the BKL, but it's far from clear. | ||
139 | */ | ||
140 | lock_kernel(); | ||
141 | |||
142 | if (!nlm_timeout) | 132 | if (!nlm_timeout) |
143 | nlm_timeout = LOCKD_DFLT_TIMEO; | 133 | nlm_timeout = LOCKD_DFLT_TIMEO; |
144 | nlmsvc_timeout = nlm_timeout * HZ; | 134 | nlmsvc_timeout = nlm_timeout * HZ; |
@@ -195,7 +185,6 @@ lockd(void *vrqstp) | |||
195 | if (nlmsvc_ops) | 185 | if (nlmsvc_ops) |
196 | nlmsvc_invalidate_all(); | 186 | nlmsvc_invalidate_all(); |
197 | nlm_shutdown_hosts(); | 187 | nlm_shutdown_hosts(); |
198 | unlock_kernel(); | ||
199 | return 0; | 188 | return 0; |
200 | } | 189 | } |
201 | 190 | ||
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index 6f1ef000975a..c462d346acbd 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c | |||
@@ -700,14 +700,16 @@ nlmsvc_notify_blocked(struct file_lock *fl) | |||
700 | struct nlm_block *block; | 700 | struct nlm_block *block; |
701 | 701 | ||
702 | dprintk("lockd: VFS unblock notification for block %p\n", fl); | 702 | dprintk("lockd: VFS unblock notification for block %p\n", fl); |
703 | spin_lock(&nlm_blocked_lock); | ||
703 | list_for_each_entry(block, &nlm_blocked, b_list) { | 704 | list_for_each_entry(block, &nlm_blocked, b_list) { |
704 | if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { | 705 | if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { |
705 | nlmsvc_insert_block(block, 0); | 706 | nlmsvc_insert_block_locked(block, 0); |
707 | spin_unlock(&nlm_blocked_lock); | ||
706 | svc_wake_up(block->b_daemon); | 708 | svc_wake_up(block->b_daemon); |
707 | return; | 709 | return; |
708 | } | 710 | } |
709 | } | 711 | } |
710 | 712 | spin_unlock(&nlm_blocked_lock); | |
711 | printk(KERN_WARNING "lockd: notification for unknown block!\n"); | 713 | printk(KERN_WARNING "lockd: notification for unknown block!\n"); |
712 | } | 714 | } |
713 | 715 | ||
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c index d0ef94cfb3da..1ca0679c80bf 100644 --- a/fs/lockd/svcsubs.c +++ b/fs/lockd/svcsubs.c | |||
@@ -170,6 +170,7 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file, | |||
170 | 170 | ||
171 | again: | 171 | again: |
172 | file->f_locks = 0; | 172 | file->f_locks = 0; |
173 | lock_flocks(); /* protects i_flock list */ | ||
173 | for (fl = inode->i_flock; fl; fl = fl->fl_next) { | 174 | for (fl = inode->i_flock; fl; fl = fl->fl_next) { |
174 | if (fl->fl_lmops != &nlmsvc_lock_operations) | 175 | if (fl->fl_lmops != &nlmsvc_lock_operations) |
175 | continue; | 176 | continue; |
@@ -181,6 +182,7 @@ again: | |||
181 | if (match(lockhost, host)) { | 182 | if (match(lockhost, host)) { |
182 | struct file_lock lock = *fl; | 183 | struct file_lock lock = *fl; |
183 | 184 | ||
185 | unlock_flocks(); | ||
184 | lock.fl_type = F_UNLCK; | 186 | lock.fl_type = F_UNLCK; |
185 | lock.fl_start = 0; | 187 | lock.fl_start = 0; |
186 | lock.fl_end = OFFSET_MAX; | 188 | lock.fl_end = OFFSET_MAX; |
@@ -192,6 +194,7 @@ again: | |||
192 | goto again; | 194 | goto again; |
193 | } | 195 | } |
194 | } | 196 | } |
197 | unlock_flocks(); | ||
195 | 198 | ||
196 | return 0; | 199 | return 0; |
197 | } | 200 | } |
@@ -226,10 +229,14 @@ nlm_file_inuse(struct nlm_file *file) | |||
226 | if (file->f_count || !list_empty(&file->f_blocks) || file->f_shares) | 229 | if (file->f_count || !list_empty(&file->f_blocks) || file->f_shares) |
227 | return 1; | 230 | return 1; |
228 | 231 | ||
232 | lock_flocks(); | ||
229 | for (fl = inode->i_flock; fl; fl = fl->fl_next) { | 233 | for (fl = inode->i_flock; fl; fl = fl->fl_next) { |
230 | if (fl->fl_lmops == &nlmsvc_lock_operations) | 234 | if (fl->fl_lmops == &nlmsvc_lock_operations) { |
235 | unlock_flocks(); | ||
231 | return 1; | 236 | return 1; |
237 | } | ||
232 | } | 238 | } |
239 | unlock_flocks(); | ||
233 | file->f_locks = 0; | 240 | file->f_locks = 0; |
234 | return 0; | 241 | return 0; |
235 | } | 242 | } |
diff --git a/fs/locks.c b/fs/locks.c index 4de3a2666810..50ec15927aab 100644 --- a/fs/locks.c +++ b/fs/locks.c | |||
@@ -142,6 +142,7 @@ int lease_break_time = 45; | |||
142 | 142 | ||
143 | static LIST_HEAD(file_lock_list); | 143 | static LIST_HEAD(file_lock_list); |
144 | static LIST_HEAD(blocked_list); | 144 | static LIST_HEAD(blocked_list); |
145 | static DEFINE_SPINLOCK(file_lock_lock); | ||
145 | 146 | ||
146 | /* | 147 | /* |
147 | * Protects the two list heads above, plus the inode->i_flock list | 148 | * Protects the two list heads above, plus the inode->i_flock list |
@@ -149,23 +150,24 @@ static LIST_HEAD(blocked_list); | |||
149 | */ | 150 | */ |
150 | void lock_flocks(void) | 151 | void lock_flocks(void) |
151 | { | 152 | { |
152 | lock_kernel(); | 153 | spin_lock(&file_lock_lock); |
153 | } | 154 | } |
154 | EXPORT_SYMBOL_GPL(lock_flocks); | 155 | EXPORT_SYMBOL_GPL(lock_flocks); |
155 | 156 | ||
156 | void unlock_flocks(void) | 157 | void unlock_flocks(void) |
157 | { | 158 | { |
158 | unlock_kernel(); | 159 | spin_unlock(&file_lock_lock); |
159 | } | 160 | } |
160 | EXPORT_SYMBOL_GPL(unlock_flocks); | 161 | EXPORT_SYMBOL_GPL(unlock_flocks); |
161 | 162 | ||
162 | static struct kmem_cache *filelock_cache __read_mostly; | 163 | static struct kmem_cache *filelock_cache __read_mostly; |
163 | 164 | ||
164 | /* Allocate an empty lock structure. */ | 165 | /* Allocate an empty lock structure. */ |
165 | static struct file_lock *locks_alloc_lock(void) | 166 | struct file_lock *locks_alloc_lock(void) |
166 | { | 167 | { |
167 | return kmem_cache_alloc(filelock_cache, GFP_KERNEL); | 168 | return kmem_cache_alloc(filelock_cache, GFP_KERNEL); |
168 | } | 169 | } |
170 | EXPORT_SYMBOL_GPL(locks_alloc_lock); | ||
169 | 171 | ||
170 | void locks_release_private(struct file_lock *fl) | 172 | void locks_release_private(struct file_lock *fl) |
171 | { | 173 | { |
@@ -1365,7 +1367,6 @@ int fcntl_getlease(struct file *filp) | |||
1365 | int generic_setlease(struct file *filp, long arg, struct file_lock **flp) | 1367 | int generic_setlease(struct file *filp, long arg, struct file_lock **flp) |
1366 | { | 1368 | { |
1367 | struct file_lock *fl, **before, **my_before = NULL, *lease; | 1369 | struct file_lock *fl, **before, **my_before = NULL, *lease; |
1368 | struct file_lock *new_fl = NULL; | ||
1369 | struct dentry *dentry = filp->f_path.dentry; | 1370 | struct dentry *dentry = filp->f_path.dentry; |
1370 | struct inode *inode = dentry->d_inode; | 1371 | struct inode *inode = dentry->d_inode; |
1371 | int error, rdlease_count = 0, wrlease_count = 0; | 1372 | int error, rdlease_count = 0, wrlease_count = 0; |
@@ -1385,11 +1386,6 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp) | |||
1385 | lease = *flp; | 1386 | lease = *flp; |
1386 | 1387 | ||
1387 | if (arg != F_UNLCK) { | 1388 | if (arg != F_UNLCK) { |
1388 | error = -ENOMEM; | ||
1389 | new_fl = locks_alloc_lock(); | ||
1390 | if (new_fl == NULL) | ||
1391 | goto out; | ||
1392 | |||
1393 | error = -EAGAIN; | 1389 | error = -EAGAIN; |
1394 | if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) | 1390 | if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) |
1395 | goto out; | 1391 | goto out; |
@@ -1434,7 +1430,6 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp) | |||
1434 | goto out; | 1430 | goto out; |
1435 | } | 1431 | } |
1436 | 1432 | ||
1437 | error = 0; | ||
1438 | if (arg == F_UNLCK) | 1433 | if (arg == F_UNLCK) |
1439 | goto out; | 1434 | goto out; |
1440 | 1435 | ||
@@ -1442,15 +1437,11 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp) | |||
1442 | if (!leases_enable) | 1437 | if (!leases_enable) |
1443 | goto out; | 1438 | goto out; |
1444 | 1439 | ||
1445 | locks_copy_lock(new_fl, lease); | 1440 | locks_insert_lock(before, lease); |
1446 | locks_insert_lock(before, new_fl); | ||
1447 | |||
1448 | *flp = new_fl; | ||
1449 | return 0; | 1441 | return 0; |
1450 | 1442 | ||
1451 | out: | 1443 | out: |
1452 | if (new_fl != NULL) | 1444 | locks_free_lock(lease); |
1453 | locks_free_lock(new_fl); | ||
1454 | return error; | 1445 | return error; |
1455 | } | 1446 | } |
1456 | EXPORT_SYMBOL(generic_setlease); | 1447 | EXPORT_SYMBOL(generic_setlease); |
@@ -1514,26 +1505,38 @@ EXPORT_SYMBOL_GPL(vfs_setlease); | |||
1514 | */ | 1505 | */ |
1515 | int fcntl_setlease(unsigned int fd, struct file *filp, long arg) | 1506 | int fcntl_setlease(unsigned int fd, struct file *filp, long arg) |
1516 | { | 1507 | { |
1517 | struct file_lock fl, *flp = &fl; | 1508 | struct file_lock *fl; |
1509 | struct fasync_struct *new; | ||
1518 | struct inode *inode = filp->f_path.dentry->d_inode; | 1510 | struct inode *inode = filp->f_path.dentry->d_inode; |
1519 | int error; | 1511 | int error; |
1520 | 1512 | ||
1521 | locks_init_lock(&fl); | 1513 | fl = lease_alloc(filp, arg); |
1522 | error = lease_init(filp, arg, &fl); | 1514 | if (IS_ERR(fl)) |
1523 | if (error) | 1515 | return PTR_ERR(fl); |
1524 | return error; | ||
1525 | 1516 | ||
1517 | new = fasync_alloc(); | ||
1518 | if (!new) { | ||
1519 | locks_free_lock(fl); | ||
1520 | return -ENOMEM; | ||
1521 | } | ||
1526 | lock_flocks(); | 1522 | lock_flocks(); |
1527 | 1523 | error = __vfs_setlease(filp, arg, &fl); | |
1528 | error = __vfs_setlease(filp, arg, &flp); | ||
1529 | if (error || arg == F_UNLCK) | 1524 | if (error || arg == F_UNLCK) |
1530 | goto out_unlock; | 1525 | goto out_unlock; |
1531 | 1526 | ||
1532 | error = fasync_helper(fd, filp, 1, &flp->fl_fasync); | 1527 | /* |
1528 | * fasync_insert_entry() returns the old entry if any. | ||
1529 | * If there was no old entry, then it used 'new' and | ||
1530 | * inserted it into the fasync list. Clear new so that | ||
1531 | * we don't release it here. | ||
1532 | */ | ||
1533 | if (!fasync_insert_entry(fd, filp, &fl->fl_fasync, new)) | ||
1534 | new = NULL; | ||
1535 | |||
1533 | if (error < 0) { | 1536 | if (error < 0) { |
1534 | /* remove lease just inserted by setlease */ | 1537 | /* remove lease just inserted by setlease */ |
1535 | flp->fl_type = F_UNLCK | F_INPROGRESS; | 1538 | fl->fl_type = F_UNLCK | F_INPROGRESS; |
1536 | flp->fl_break_time = jiffies - 10; | 1539 | fl->fl_break_time = jiffies - 10; |
1537 | time_out_leases(inode); | 1540 | time_out_leases(inode); |
1538 | goto out_unlock; | 1541 | goto out_unlock; |
1539 | } | 1542 | } |
@@ -1541,6 +1544,8 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg) | |||
1541 | error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); | 1544 | error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); |
1542 | out_unlock: | 1545 | out_unlock: |
1543 | unlock_flocks(); | 1546 | unlock_flocks(); |
1547 | if (new) | ||
1548 | fasync_free(new); | ||
1544 | return error; | 1549 | return error; |
1545 | } | 1550 | } |
1546 | 1551 | ||
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index fd667652c502..ba306658a6db 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig | |||
@@ -1,7 +1,6 @@ | |||
1 | config NFS_FS | 1 | config NFS_FS |
2 | tristate "NFS client support" | 2 | tristate "NFS client support" |
3 | depends on INET && FILE_LOCKING | 3 | depends on INET && FILE_LOCKING |
4 | depends on BKL # fix as soon as lockd is done | ||
5 | select LOCKD | 4 | select LOCKD |
6 | select SUNRPC | 5 | select SUNRPC |
7 | select NFS_ACL_SUPPORT if NFS_V3_ACL | 6 | select NFS_ACL_SUPPORT if NFS_V3_ACL |
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig index 31a78fce4732..18b3e8975fe0 100644 --- a/fs/nfsd/Kconfig +++ b/fs/nfsd/Kconfig | |||
@@ -2,7 +2,6 @@ config NFSD | |||
2 | tristate "NFS server support" | 2 | tristate "NFS server support" |
3 | depends on INET | 3 | depends on INET |
4 | depends on FILE_LOCKING | 4 | depends on FILE_LOCKING |
5 | depends on BKL # fix as soon as lockd is done | ||
6 | select LOCKD | 5 | select LOCKD |
7 | select SUNRPC | 6 | select SUNRPC |
8 | select EXPORTFS | 7 | select EXPORTFS |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 9019e8ec9dc8..56347e0ac88d 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -2614,7 +2614,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta | |||
2614 | struct nfs4_delegation *dp; | 2614 | struct nfs4_delegation *dp; |
2615 | struct nfs4_stateowner *sop = stp->st_stateowner; | 2615 | struct nfs4_stateowner *sop = stp->st_stateowner; |
2616 | int cb_up = atomic_read(&sop->so_client->cl_cb_set); | 2616 | int cb_up = atomic_read(&sop->so_client->cl_cb_set); |
2617 | struct file_lock fl, *flp = &fl; | 2617 | struct file_lock *fl; |
2618 | int status, flag = 0; | 2618 | int status, flag = 0; |
2619 | 2619 | ||
2620 | flag = NFS4_OPEN_DELEGATE_NONE; | 2620 | flag = NFS4_OPEN_DELEGATE_NONE; |
@@ -2648,20 +2648,24 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta | |||
2648 | flag = NFS4_OPEN_DELEGATE_NONE; | 2648 | flag = NFS4_OPEN_DELEGATE_NONE; |
2649 | goto out; | 2649 | goto out; |
2650 | } | 2650 | } |
2651 | locks_init_lock(&fl); | 2651 | status = -ENOMEM; |
2652 | fl.fl_lmops = &nfsd_lease_mng_ops; | 2652 | fl = locks_alloc_lock(); |
2653 | fl.fl_flags = FL_LEASE; | 2653 | if (!fl) |
2654 | fl.fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; | 2654 | goto out; |
2655 | fl.fl_end = OFFSET_MAX; | 2655 | locks_init_lock(fl); |
2656 | fl.fl_owner = (fl_owner_t)dp; | 2656 | fl->fl_lmops = &nfsd_lease_mng_ops; |
2657 | fl.fl_file = find_readable_file(stp->st_file); | 2657 | fl->fl_flags = FL_LEASE; |
2658 | BUG_ON(!fl.fl_file); | 2658 | fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; |
2659 | fl.fl_pid = current->tgid; | 2659 | fl->fl_end = OFFSET_MAX; |
2660 | fl->fl_owner = (fl_owner_t)dp; | ||
2661 | fl->fl_file = find_readable_file(stp->st_file); | ||
2662 | BUG_ON(!fl->fl_file); | ||
2663 | fl->fl_pid = current->tgid; | ||
2660 | 2664 | ||
2661 | /* vfs_setlease checks to see if delegation should be handed out. | 2665 | /* vfs_setlease checks to see if delegation should be handed out. |
2662 | * the lock_manager callbacks fl_mylease and fl_change are used | 2666 | * the lock_manager callbacks fl_mylease and fl_change are used |
2663 | */ | 2667 | */ |
2664 | if ((status = vfs_setlease(fl.fl_file, fl.fl_type, &flp))) { | 2668 | if ((status = vfs_setlease(fl->fl_file, fl->fl_type, &fl))) { |
2665 | dprintk("NFSD: setlease failed [%d], no delegation\n", status); | 2669 | dprintk("NFSD: setlease failed [%d], no delegation\n", status); |
2666 | unhash_delegation(dp); | 2670 | unhash_delegation(dp); |
2667 | flag = NFS4_OPEN_DELEGATE_NONE; | 2671 | flag = NFS4_OPEN_DELEGATE_NONE; |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 240eb1d4f876..b2a6009cba10 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -1122,6 +1122,7 @@ extern int fcntl_getlease(struct file *filp); | |||
1122 | 1122 | ||
1123 | /* fs/locks.c */ | 1123 | /* fs/locks.c */ |
1124 | extern void locks_init_lock(struct file_lock *); | 1124 | extern void locks_init_lock(struct file_lock *); |
1125 | extern struct file_lock * locks_alloc_lock(void); | ||
1125 | extern void locks_copy_lock(struct file_lock *, struct file_lock *); | 1126 | extern void locks_copy_lock(struct file_lock *, struct file_lock *); |
1126 | extern void __locks_copy_lock(struct file_lock *, const struct file_lock *); | 1127 | extern void __locks_copy_lock(struct file_lock *, const struct file_lock *); |
1127 | extern void locks_remove_posix(struct file *, fl_owner_t); | 1128 | extern void locks_remove_posix(struct file *, fl_owner_t); |
@@ -1310,6 +1311,11 @@ struct fasync_struct { | |||
1310 | 1311 | ||
1311 | /* SMP safe fasync helpers: */ | 1312 | /* SMP safe fasync helpers: */ |
1312 | extern int fasync_helper(int, struct file *, int, struct fasync_struct **); | 1313 | extern int fasync_helper(int, struct file *, int, struct fasync_struct **); |
1314 | extern struct fasync_struct *fasync_insert_entry(int, struct file *, struct fasync_struct **, struct fasync_struct *); | ||
1315 | extern int fasync_remove_entry(struct file *, struct fasync_struct **); | ||
1316 | extern struct fasync_struct *fasync_alloc(void); | ||
1317 | extern void fasync_free(struct fasync_struct *); | ||
1318 | |||
1313 | /* can be called from interrupts */ | 1319 | /* can be called from interrupts */ |
1314 | extern void kill_fasync(struct fasync_struct **, int, int); | 1320 | extern void kill_fasync(struct fasync_struct **, int, int); |
1315 | 1321 | ||