diff options
Diffstat (limited to 'fs/locks.c')
| -rw-r--r-- | fs/locks.c | 593 |
1 files changed, 323 insertions, 270 deletions
diff --git a/fs/locks.c b/fs/locks.c index 735b8d3fa78c..4753218f308e 100644 --- a/fs/locks.c +++ b/fs/locks.c | |||
| @@ -137,7 +137,7 @@ | |||
| 137 | 137 | ||
| 138 | #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX) | 138 | #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX) |
| 139 | #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK) | 139 | #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK) |
| 140 | #define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG)) | 140 | #define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT)) |
| 141 | #define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK) | 141 | #define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK) |
| 142 | 142 | ||
| 143 | static bool lease_breaking(struct file_lock *fl) | 143 | static bool lease_breaking(struct file_lock *fl) |
| @@ -157,14 +157,11 @@ static int target_leasetype(struct file_lock *fl) | |||
| 157 | int leases_enable = 1; | 157 | int leases_enable = 1; |
| 158 | int lease_break_time = 45; | 158 | int lease_break_time = 45; |
| 159 | 159 | ||
| 160 | #define for_each_lock(inode, lockp) \ | ||
| 161 | for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next) | ||
| 162 | |||
| 163 | /* | 160 | /* |
| 164 | * The global file_lock_list is only used for displaying /proc/locks, so we | 161 | * The global file_lock_list is only used for displaying /proc/locks, so we |
| 165 | * keep a list on each CPU, with each list protected by its own spinlock via | 162 | * keep a list on each CPU, with each list protected by its own spinlock via |
| 166 | * the file_lock_lglock. Note that alterations to the list also require that | 163 | * the file_lock_lglock. Note that alterations to the list also require that |
| 167 | * the relevant i_lock is held. | 164 | * the relevant flc_lock is held. |
| 168 | */ | 165 | */ |
| 169 | DEFINE_STATIC_LGLOCK(file_lock_lglock); | 166 | DEFINE_STATIC_LGLOCK(file_lock_lglock); |
| 170 | static DEFINE_PER_CPU(struct hlist_head, file_lock_list); | 167 | static DEFINE_PER_CPU(struct hlist_head, file_lock_list); |
| @@ -192,21 +189,68 @@ static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS); | |||
| 192 | * contrast to those that are acting as records of acquired locks). | 189 | * contrast to those that are acting as records of acquired locks). |
| 193 | * | 190 | * |
| 194 | * Note that when we acquire this lock in order to change the above fields, | 191 | * Note that when we acquire this lock in order to change the above fields, |
| 195 | * we often hold the i_lock as well. In certain cases, when reading the fields | 192 | * we often hold the flc_lock as well. In certain cases, when reading the fields |
| 196 | * protected by this lock, we can skip acquiring it iff we already hold the | 193 | * protected by this lock, we can skip acquiring it iff we already hold the |
| 197 | * i_lock. | 194 | * flc_lock. |
| 198 | * | 195 | * |
| 199 | * In particular, adding an entry to the fl_block list requires that you hold | 196 | * In particular, adding an entry to the fl_block list requires that you hold |
| 200 | * both the i_lock and the blocked_lock_lock (acquired in that order). Deleting | 197 | * both the flc_lock and the blocked_lock_lock (acquired in that order). |
| 201 | * an entry from the list however only requires the file_lock_lock. | 198 | * Deleting an entry from the list however only requires the file_lock_lock. |
| 202 | */ | 199 | */ |
| 203 | static DEFINE_SPINLOCK(blocked_lock_lock); | 200 | static DEFINE_SPINLOCK(blocked_lock_lock); |
| 204 | 201 | ||
| 202 | static struct kmem_cache *flctx_cache __read_mostly; | ||
| 205 | static struct kmem_cache *filelock_cache __read_mostly; | 203 | static struct kmem_cache *filelock_cache __read_mostly; |
| 206 | 204 | ||
| 205 | static struct file_lock_context * | ||
| 206 | locks_get_lock_context(struct inode *inode) | ||
| 207 | { | ||
| 208 | struct file_lock_context *new; | ||
| 209 | |||
| 210 | if (likely(inode->i_flctx)) | ||
| 211 | goto out; | ||
| 212 | |||
| 213 | new = kmem_cache_alloc(flctx_cache, GFP_KERNEL); | ||
| 214 | if (!new) | ||
| 215 | goto out; | ||
| 216 | |||
| 217 | spin_lock_init(&new->flc_lock); | ||
| 218 | INIT_LIST_HEAD(&new->flc_flock); | ||
| 219 | INIT_LIST_HEAD(&new->flc_posix); | ||
| 220 | INIT_LIST_HEAD(&new->flc_lease); | ||
| 221 | |||
| 222 | /* | ||
| 223 | * Assign the pointer if it's not already assigned. If it is, then | ||
| 224 | * free the context we just allocated. | ||
| 225 | */ | ||
| 226 | spin_lock(&inode->i_lock); | ||
| 227 | if (likely(!inode->i_flctx)) { | ||
| 228 | inode->i_flctx = new; | ||
| 229 | new = NULL; | ||
| 230 | } | ||
| 231 | spin_unlock(&inode->i_lock); | ||
| 232 | |||
| 233 | if (new) | ||
| 234 | kmem_cache_free(flctx_cache, new); | ||
| 235 | out: | ||
| 236 | return inode->i_flctx; | ||
| 237 | } | ||
| 238 | |||
| 239 | void | ||
| 240 | locks_free_lock_context(struct file_lock_context *ctx) | ||
| 241 | { | ||
| 242 | if (ctx) { | ||
| 243 | WARN_ON_ONCE(!list_empty(&ctx->flc_flock)); | ||
| 244 | WARN_ON_ONCE(!list_empty(&ctx->flc_posix)); | ||
| 245 | WARN_ON_ONCE(!list_empty(&ctx->flc_lease)); | ||
| 246 | kmem_cache_free(flctx_cache, ctx); | ||
| 247 | } | ||
| 248 | } | ||
| 249 | |||
| 207 | static void locks_init_lock_heads(struct file_lock *fl) | 250 | static void locks_init_lock_heads(struct file_lock *fl) |
| 208 | { | 251 | { |
| 209 | INIT_HLIST_NODE(&fl->fl_link); | 252 | INIT_HLIST_NODE(&fl->fl_link); |
| 253 | INIT_LIST_HEAD(&fl->fl_list); | ||
| 210 | INIT_LIST_HEAD(&fl->fl_block); | 254 | INIT_LIST_HEAD(&fl->fl_block); |
| 211 | init_waitqueue_head(&fl->fl_wait); | 255 | init_waitqueue_head(&fl->fl_wait); |
| 212 | } | 256 | } |
| @@ -243,6 +287,7 @@ EXPORT_SYMBOL_GPL(locks_release_private); | |||
| 243 | void locks_free_lock(struct file_lock *fl) | 287 | void locks_free_lock(struct file_lock *fl) |
| 244 | { | 288 | { |
| 245 | BUG_ON(waitqueue_active(&fl->fl_wait)); | 289 | BUG_ON(waitqueue_active(&fl->fl_wait)); |
| 290 | BUG_ON(!list_empty(&fl->fl_list)); | ||
| 246 | BUG_ON(!list_empty(&fl->fl_block)); | 291 | BUG_ON(!list_empty(&fl->fl_block)); |
| 247 | BUG_ON(!hlist_unhashed(&fl->fl_link)); | 292 | BUG_ON(!hlist_unhashed(&fl->fl_link)); |
| 248 | 293 | ||
| @@ -257,8 +302,8 @@ locks_dispose_list(struct list_head *dispose) | |||
| 257 | struct file_lock *fl; | 302 | struct file_lock *fl; |
| 258 | 303 | ||
| 259 | while (!list_empty(dispose)) { | 304 | while (!list_empty(dispose)) { |
| 260 | fl = list_first_entry(dispose, struct file_lock, fl_block); | 305 | fl = list_first_entry(dispose, struct file_lock, fl_list); |
| 261 | list_del_init(&fl->fl_block); | 306 | list_del_init(&fl->fl_list); |
| 262 | locks_free_lock(fl); | 307 | locks_free_lock(fl); |
| 263 | } | 308 | } |
| 264 | } | 309 | } |
| @@ -513,7 +558,7 @@ static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2) | |||
| 513 | return fl1->fl_owner == fl2->fl_owner; | 558 | return fl1->fl_owner == fl2->fl_owner; |
| 514 | } | 559 | } |
| 515 | 560 | ||
| 516 | /* Must be called with the i_lock held! */ | 561 | /* Must be called with the flc_lock held! */ |
| 517 | static void locks_insert_global_locks(struct file_lock *fl) | 562 | static void locks_insert_global_locks(struct file_lock *fl) |
| 518 | { | 563 | { |
| 519 | lg_local_lock(&file_lock_lglock); | 564 | lg_local_lock(&file_lock_lglock); |
| @@ -522,12 +567,12 @@ static void locks_insert_global_locks(struct file_lock *fl) | |||
| 522 | lg_local_unlock(&file_lock_lglock); | 567 | lg_local_unlock(&file_lock_lglock); |
| 523 | } | 568 | } |
| 524 | 569 | ||
| 525 | /* Must be called with the i_lock held! */ | 570 | /* Must be called with the flc_lock held! */ |
| 526 | static void locks_delete_global_locks(struct file_lock *fl) | 571 | static void locks_delete_global_locks(struct file_lock *fl) |
| 527 | { | 572 | { |
| 528 | /* | 573 | /* |
| 529 | * Avoid taking lock if already unhashed. This is safe since this check | 574 | * Avoid taking lock if already unhashed. This is safe since this check |
| 530 | * is done while holding the i_lock, and new insertions into the list | 575 | * is done while holding the flc_lock, and new insertions into the list |
| 531 | * also require that it be held. | 576 | * also require that it be held. |
| 532 | */ | 577 | */ |
| 533 | if (hlist_unhashed(&fl->fl_link)) | 578 | if (hlist_unhashed(&fl->fl_link)) |
| @@ -579,10 +624,10 @@ static void locks_delete_block(struct file_lock *waiter) | |||
| 579 | * the order they blocked. The documentation doesn't require this but | 624 | * the order they blocked. The documentation doesn't require this but |
| 580 | * it seems like the reasonable thing to do. | 625 | * it seems like the reasonable thing to do. |
| 581 | * | 626 | * |
| 582 | * Must be called with both the i_lock and blocked_lock_lock held. The fl_block | 627 | * Must be called with both the flc_lock and blocked_lock_lock held. The |
| 583 | * list itself is protected by the blocked_lock_lock, but by ensuring that the | 628 | * fl_block list itself is protected by the blocked_lock_lock, but by ensuring |
| 584 | * i_lock is also held on insertions we can avoid taking the blocked_lock_lock | 629 | * that the flc_lock is also held on insertions we can avoid taking the |
| 585 | * in some cases when we see that the fl_block list is empty. | 630 | * blocked_lock_lock in some cases when we see that the fl_block list is empty. |
| 586 | */ | 631 | */ |
| 587 | static void __locks_insert_block(struct file_lock *blocker, | 632 | static void __locks_insert_block(struct file_lock *blocker, |
| 588 | struct file_lock *waiter) | 633 | struct file_lock *waiter) |
| @@ -594,7 +639,7 @@ static void __locks_insert_block(struct file_lock *blocker, | |||
| 594 | locks_insert_global_blocked(waiter); | 639 | locks_insert_global_blocked(waiter); |
| 595 | } | 640 | } |
| 596 | 641 | ||
| 597 | /* Must be called with i_lock held. */ | 642 | /* Must be called with flc_lock held. */ |
| 598 | static void locks_insert_block(struct file_lock *blocker, | 643 | static void locks_insert_block(struct file_lock *blocker, |
| 599 | struct file_lock *waiter) | 644 | struct file_lock *waiter) |
| 600 | { | 645 | { |
| @@ -606,15 +651,15 @@ static void locks_insert_block(struct file_lock *blocker, | |||
| 606 | /* | 651 | /* |
| 607 | * Wake up processes blocked waiting for blocker. | 652 | * Wake up processes blocked waiting for blocker. |
| 608 | * | 653 | * |
| 609 | * Must be called with the inode->i_lock held! | 654 | * Must be called with the inode->flc_lock held! |
| 610 | */ | 655 | */ |
| 611 | static void locks_wake_up_blocks(struct file_lock *blocker) | 656 | static void locks_wake_up_blocks(struct file_lock *blocker) |
| 612 | { | 657 | { |
| 613 | /* | 658 | /* |
| 614 | * Avoid taking global lock if list is empty. This is safe since new | 659 | * Avoid taking global lock if list is empty. This is safe since new |
| 615 | * blocked requests are only added to the list under the i_lock, and | 660 | * blocked requests are only added to the list under the flc_lock, and |
| 616 | * the i_lock is always held here. Note that removal from the fl_block | 661 | * the flc_lock is always held here. Note that removal from the fl_block |
| 617 | * list does not require the i_lock, so we must recheck list_empty() | 662 | * list does not require the flc_lock, so we must recheck list_empty() |
| 618 | * after acquiring the blocked_lock_lock. | 663 | * after acquiring the blocked_lock_lock. |
| 619 | */ | 664 | */ |
| 620 | if (list_empty(&blocker->fl_block)) | 665 | if (list_empty(&blocker->fl_block)) |
| @@ -635,63 +680,36 @@ static void locks_wake_up_blocks(struct file_lock *blocker) | |||
| 635 | spin_unlock(&blocked_lock_lock); | 680 | spin_unlock(&blocked_lock_lock); |
| 636 | } | 681 | } |
| 637 | 682 | ||
| 638 | /* Insert file lock fl into an inode's lock list at the position indicated | 683 | static void |
| 639 | * by pos. At the same time add the lock to the global file lock list. | 684 | locks_insert_lock_ctx(struct file_lock *fl, int *counter, |
| 640 | * | 685 | struct list_head *before) |
| 641 | * Must be called with the i_lock held! | ||
| 642 | */ | ||
| 643 | static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl) | ||
| 644 | { | 686 | { |
| 645 | fl->fl_nspid = get_pid(task_tgid(current)); | 687 | fl->fl_nspid = get_pid(task_tgid(current)); |
| 646 | 688 | list_add_tail(&fl->fl_list, before); | |
| 647 | /* insert into file's list */ | 689 | ++*counter; |
| 648 | fl->fl_next = *pos; | ||
| 649 | *pos = fl; | ||
| 650 | |||
| 651 | locks_insert_global_locks(fl); | 690 | locks_insert_global_locks(fl); |
| 652 | } | 691 | } |
| 653 | 692 | ||
| 654 | /** | 693 | static void |
| 655 | * locks_delete_lock - Delete a lock and then free it. | 694 | locks_unlink_lock_ctx(struct file_lock *fl, int *counter) |
| 656 | * @thisfl_p: pointer that points to the fl_next field of the previous | ||
| 657 | * inode->i_flock list entry | ||
| 658 | * | ||
| 659 | * Unlink a lock from all lists and free the namespace reference, but don't | ||
| 660 | * free it yet. Wake up processes that are blocked waiting for this lock and | ||
| 661 | * notify the FS that the lock has been cleared. | ||
| 662 | * | ||
| 663 | * Must be called with the i_lock held! | ||
| 664 | */ | ||
| 665 | static void locks_unlink_lock(struct file_lock **thisfl_p) | ||
| 666 | { | 695 | { |
| 667 | struct file_lock *fl = *thisfl_p; | ||
| 668 | |||
| 669 | locks_delete_global_locks(fl); | 696 | locks_delete_global_locks(fl); |
| 670 | 697 | list_del_init(&fl->fl_list); | |
| 671 | *thisfl_p = fl->fl_next; | 698 | --*counter; |
| 672 | fl->fl_next = NULL; | ||
| 673 | |||
| 674 | if (fl->fl_nspid) { | 699 | if (fl->fl_nspid) { |
| 675 | put_pid(fl->fl_nspid); | 700 | put_pid(fl->fl_nspid); |
| 676 | fl->fl_nspid = NULL; | 701 | fl->fl_nspid = NULL; |
| 677 | } | 702 | } |
| 678 | |||
| 679 | locks_wake_up_blocks(fl); | 703 | locks_wake_up_blocks(fl); |
| 680 | } | 704 | } |
| 681 | 705 | ||
| 682 | /* | 706 | static void |
| 683 | * Unlink a lock from all lists and free it. | 707 | locks_delete_lock_ctx(struct file_lock *fl, int *counter, |
| 684 | * | 708 | struct list_head *dispose) |
| 685 | * Must be called with i_lock held! | ||
| 686 | */ | ||
| 687 | static void locks_delete_lock(struct file_lock **thisfl_p, | ||
| 688 | struct list_head *dispose) | ||
| 689 | { | 709 | { |
| 690 | struct file_lock *fl = *thisfl_p; | 710 | locks_unlink_lock_ctx(fl, counter); |
| 691 | |||
| 692 | locks_unlink_lock(thisfl_p); | ||
| 693 | if (dispose) | 711 | if (dispose) |
| 694 | list_add(&fl->fl_block, dispose); | 712 | list_add(&fl->fl_list, dispose); |
| 695 | else | 713 | else |
| 696 | locks_free_lock(fl); | 714 | locks_free_lock(fl); |
| 697 | } | 715 | } |
| @@ -746,22 +764,27 @@ void | |||
| 746 | posix_test_lock(struct file *filp, struct file_lock *fl) | 764 | posix_test_lock(struct file *filp, struct file_lock *fl) |
| 747 | { | 765 | { |
| 748 | struct file_lock *cfl; | 766 | struct file_lock *cfl; |
| 767 | struct file_lock_context *ctx; | ||
| 749 | struct inode *inode = file_inode(filp); | 768 | struct inode *inode = file_inode(filp); |
| 750 | 769 | ||
| 751 | spin_lock(&inode->i_lock); | 770 | ctx = inode->i_flctx; |
| 752 | for (cfl = file_inode(filp)->i_flock; cfl; cfl = cfl->fl_next) { | 771 | if (!ctx || list_empty_careful(&ctx->flc_posix)) { |
| 753 | if (!IS_POSIX(cfl)) | ||
| 754 | continue; | ||
| 755 | if (posix_locks_conflict(fl, cfl)) | ||
| 756 | break; | ||
| 757 | } | ||
| 758 | if (cfl) { | ||
| 759 | locks_copy_conflock(fl, cfl); | ||
| 760 | if (cfl->fl_nspid) | ||
| 761 | fl->fl_pid = pid_vnr(cfl->fl_nspid); | ||
| 762 | } else | ||
| 763 | fl->fl_type = F_UNLCK; | 772 | fl->fl_type = F_UNLCK; |
| 764 | spin_unlock(&inode->i_lock); | 773 | return; |
| 774 | } | ||
| 775 | |||
| 776 | spin_lock(&ctx->flc_lock); | ||
| 777 | list_for_each_entry(cfl, &ctx->flc_posix, fl_list) { | ||
| 778 | if (posix_locks_conflict(fl, cfl)) { | ||
| 779 | locks_copy_conflock(fl, cfl); | ||
| 780 | if (cfl->fl_nspid) | ||
| 781 | fl->fl_pid = pid_vnr(cfl->fl_nspid); | ||
| 782 | goto out; | ||
| 783 | } | ||
| 784 | } | ||
| 785 | fl->fl_type = F_UNLCK; | ||
| 786 | out: | ||
| 787 | spin_unlock(&ctx->flc_lock); | ||
| 765 | return; | 788 | return; |
| 766 | } | 789 | } |
| 767 | EXPORT_SYMBOL(posix_test_lock); | 790 | EXPORT_SYMBOL(posix_test_lock); |
| @@ -845,34 +868,34 @@ static int posix_locks_deadlock(struct file_lock *caller_fl, | |||
| 845 | static int flock_lock_file(struct file *filp, struct file_lock *request) | 868 | static int flock_lock_file(struct file *filp, struct file_lock *request) |
| 846 | { | 869 | { |
| 847 | struct file_lock *new_fl = NULL; | 870 | struct file_lock *new_fl = NULL; |
| 848 | struct file_lock **before; | 871 | struct file_lock *fl; |
| 849 | struct inode * inode = file_inode(filp); | 872 | struct file_lock_context *ctx; |
| 873 | struct inode *inode = file_inode(filp); | ||
| 850 | int error = 0; | 874 | int error = 0; |
| 851 | int found = 0; | 875 | bool found = false; |
| 852 | LIST_HEAD(dispose); | 876 | LIST_HEAD(dispose); |
| 853 | 877 | ||
| 878 | ctx = locks_get_lock_context(inode); | ||
| 879 | if (!ctx) | ||
| 880 | return -ENOMEM; | ||
| 881 | |||
| 854 | if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) { | 882 | if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) { |
| 855 | new_fl = locks_alloc_lock(); | 883 | new_fl = locks_alloc_lock(); |
| 856 | if (!new_fl) | 884 | if (!new_fl) |
| 857 | return -ENOMEM; | 885 | return -ENOMEM; |
| 858 | } | 886 | } |
| 859 | 887 | ||
| 860 | spin_lock(&inode->i_lock); | 888 | spin_lock(&ctx->flc_lock); |
| 861 | if (request->fl_flags & FL_ACCESS) | 889 | if (request->fl_flags & FL_ACCESS) |
| 862 | goto find_conflict; | 890 | goto find_conflict; |
| 863 | 891 | ||
| 864 | for_each_lock(inode, before) { | 892 | list_for_each_entry(fl, &ctx->flc_flock, fl_list) { |
| 865 | struct file_lock *fl = *before; | ||
| 866 | if (IS_POSIX(fl)) | ||
| 867 | break; | ||
| 868 | if (IS_LEASE(fl)) | ||
| 869 | continue; | ||
| 870 | if (filp != fl->fl_file) | 893 | if (filp != fl->fl_file) |
| 871 | continue; | 894 | continue; |
| 872 | if (request->fl_type == fl->fl_type) | 895 | if (request->fl_type == fl->fl_type) |
| 873 | goto out; | 896 | goto out; |
| 874 | found = 1; | 897 | found = true; |
| 875 | locks_delete_lock(before, &dispose); | 898 | locks_delete_lock_ctx(fl, &ctx->flc_flock_cnt, &dispose); |
| 876 | break; | 899 | break; |
| 877 | } | 900 | } |
| 878 | 901 | ||
| @@ -887,18 +910,13 @@ static int flock_lock_file(struct file *filp, struct file_lock *request) | |||
| 887 | * give it the opportunity to lock the file. | 910 | * give it the opportunity to lock the file. |
| 888 | */ | 911 | */ |
| 889 | if (found) { | 912 | if (found) { |
| 890 | spin_unlock(&inode->i_lock); | 913 | spin_unlock(&ctx->flc_lock); |
| 891 | cond_resched(); | 914 | cond_resched(); |
| 892 | spin_lock(&inode->i_lock); | 915 | spin_lock(&ctx->flc_lock); |
| 893 | } | 916 | } |
| 894 | 917 | ||
| 895 | find_conflict: | 918 | find_conflict: |
| 896 | for_each_lock(inode, before) { | 919 | list_for_each_entry(fl, &ctx->flc_flock, fl_list) { |
| 897 | struct file_lock *fl = *before; | ||
| 898 | if (IS_POSIX(fl)) | ||
| 899 | break; | ||
| 900 | if (IS_LEASE(fl)) | ||
| 901 | continue; | ||
| 902 | if (!flock_locks_conflict(request, fl)) | 920 | if (!flock_locks_conflict(request, fl)) |
| 903 | continue; | 921 | continue; |
| 904 | error = -EAGAIN; | 922 | error = -EAGAIN; |
| @@ -911,12 +929,12 @@ find_conflict: | |||
| 911 | if (request->fl_flags & FL_ACCESS) | 929 | if (request->fl_flags & FL_ACCESS) |
| 912 | goto out; | 930 | goto out; |
| 913 | locks_copy_lock(new_fl, request); | 931 | locks_copy_lock(new_fl, request); |
| 914 | locks_insert_lock(before, new_fl); | 932 | locks_insert_lock_ctx(new_fl, &ctx->flc_flock_cnt, &ctx->flc_flock); |
| 915 | new_fl = NULL; | 933 | new_fl = NULL; |
| 916 | error = 0; | 934 | error = 0; |
| 917 | 935 | ||
| 918 | out: | 936 | out: |
| 919 | spin_unlock(&inode->i_lock); | 937 | spin_unlock(&ctx->flc_lock); |
| 920 | if (new_fl) | 938 | if (new_fl) |
| 921 | locks_free_lock(new_fl); | 939 | locks_free_lock(new_fl); |
| 922 | locks_dispose_list(&dispose); | 940 | locks_dispose_list(&dispose); |
| @@ -925,16 +943,20 @@ out: | |||
| 925 | 943 | ||
| 926 | static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock) | 944 | static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock) |
| 927 | { | 945 | { |
| 928 | struct file_lock *fl; | 946 | struct file_lock *fl, *tmp; |
| 929 | struct file_lock *new_fl = NULL; | 947 | struct file_lock *new_fl = NULL; |
| 930 | struct file_lock *new_fl2 = NULL; | 948 | struct file_lock *new_fl2 = NULL; |
| 931 | struct file_lock *left = NULL; | 949 | struct file_lock *left = NULL; |
| 932 | struct file_lock *right = NULL; | 950 | struct file_lock *right = NULL; |
| 933 | struct file_lock **before; | 951 | struct file_lock_context *ctx; |
| 934 | int error; | 952 | int error; |
| 935 | bool added = false; | 953 | bool added = false; |
| 936 | LIST_HEAD(dispose); | 954 | LIST_HEAD(dispose); |
| 937 | 955 | ||
| 956 | ctx = locks_get_lock_context(inode); | ||
| 957 | if (!ctx) | ||
| 958 | return -ENOMEM; | ||
| 959 | |||
| 938 | /* | 960 | /* |
| 939 | * We may need two file_lock structures for this operation, | 961 | * We may need two file_lock structures for this operation, |
| 940 | * so we get them in advance to avoid races. | 962 | * so we get them in advance to avoid races. |
| @@ -948,15 +970,14 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str | |||
| 948 | new_fl2 = locks_alloc_lock(); | 970 | new_fl2 = locks_alloc_lock(); |
| 949 | } | 971 | } |
| 950 | 972 | ||
| 951 | spin_lock(&inode->i_lock); | 973 | spin_lock(&ctx->flc_lock); |
| 952 | /* | 974 | /* |
| 953 | * New lock request. Walk all POSIX locks and look for conflicts. If | 975 | * New lock request. Walk all POSIX locks and look for conflicts. If |
| 954 | * there are any, either return error or put the request on the | 976 | * there are any, either return error or put the request on the |
| 955 | * blocker's list of waiters and the global blocked_hash. | 977 | * blocker's list of waiters and the global blocked_hash. |
| 956 | */ | 978 | */ |
| 957 | if (request->fl_type != F_UNLCK) { | 979 | if (request->fl_type != F_UNLCK) { |
| 958 | for_each_lock(inode, before) { | 980 | list_for_each_entry(fl, &ctx->flc_posix, fl_list) { |
| 959 | fl = *before; | ||
| 960 | if (!IS_POSIX(fl)) | 981 | if (!IS_POSIX(fl)) |
| 961 | continue; | 982 | continue; |
| 962 | if (!posix_locks_conflict(request, fl)) | 983 | if (!posix_locks_conflict(request, fl)) |
| @@ -986,29 +1007,25 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str | |||
| 986 | if (request->fl_flags & FL_ACCESS) | 1007 | if (request->fl_flags & FL_ACCESS) |
| 987 | goto out; | 1008 | goto out; |
| 988 | 1009 | ||
| 989 | /* | 1010 | /* Find the first old lock with the same owner as the new lock */ |
| 990 | * Find the first old lock with the same owner as the new lock. | 1011 | list_for_each_entry(fl, &ctx->flc_posix, fl_list) { |
| 991 | */ | 1012 | if (posix_same_owner(request, fl)) |
| 992 | 1013 | break; | |
| 993 | before = &inode->i_flock; | ||
| 994 | |||
| 995 | /* First skip locks owned by other processes. */ | ||
| 996 | while ((fl = *before) && (!IS_POSIX(fl) || | ||
| 997 | !posix_same_owner(request, fl))) { | ||
| 998 | before = &fl->fl_next; | ||
| 999 | } | 1014 | } |
| 1000 | 1015 | ||
| 1001 | /* Process locks with this owner. */ | 1016 | /* Process locks with this owner. */ |
| 1002 | while ((fl = *before) && posix_same_owner(request, fl)) { | 1017 | list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) { |
| 1003 | /* Detect adjacent or overlapping regions (if same lock type) | 1018 | if (!posix_same_owner(request, fl)) |
| 1004 | */ | 1019 | break; |
| 1020 | |||
| 1021 | /* Detect adjacent or overlapping regions (if same lock type) */ | ||
| 1005 | if (request->fl_type == fl->fl_type) { | 1022 | if (request->fl_type == fl->fl_type) { |
| 1006 | /* In all comparisons of start vs end, use | 1023 | /* In all comparisons of start vs end, use |
| 1007 | * "start - 1" rather than "end + 1". If end | 1024 | * "start - 1" rather than "end + 1". If end |
| 1008 | * is OFFSET_MAX, end + 1 will become negative. | 1025 | * is OFFSET_MAX, end + 1 will become negative. |
| 1009 | */ | 1026 | */ |
| 1010 | if (fl->fl_end < request->fl_start - 1) | 1027 | if (fl->fl_end < request->fl_start - 1) |
| 1011 | goto next_lock; | 1028 | continue; |
| 1012 | /* If the next lock in the list has entirely bigger | 1029 | /* If the next lock in the list has entirely bigger |
| 1013 | * addresses than the new one, insert the lock here. | 1030 | * addresses than the new one, insert the lock here. |
| 1014 | */ | 1031 | */ |
| @@ -1029,18 +1046,18 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str | |||
| 1029 | else | 1046 | else |
| 1030 | request->fl_end = fl->fl_end; | 1047 | request->fl_end = fl->fl_end; |
| 1031 | if (added) { | 1048 | if (added) { |
| 1032 | locks_delete_lock(before, &dispose); | 1049 | locks_delete_lock_ctx(fl, &ctx->flc_posix_cnt, |
| 1050 | &dispose); | ||
| 1033 | continue; | 1051 | continue; |
| 1034 | } | 1052 | } |
| 1035 | request = fl; | 1053 | request = fl; |
| 1036 | added = true; | 1054 | added = true; |
| 1037 | } | 1055 | } else { |
| 1038 | else { | ||
| 1039 | /* Processing for different lock types is a bit | 1056 | /* Processing for different lock types is a bit |
| 1040 | * more complex. | 1057 | * more complex. |
| 1041 | */ | 1058 | */ |
| 1042 | if (fl->fl_end < request->fl_start) | 1059 | if (fl->fl_end < request->fl_start) |
| 1043 | goto next_lock; | 1060 | continue; |
| 1044 | if (fl->fl_start > request->fl_end) | 1061 | if (fl->fl_start > request->fl_end) |
| 1045 | break; | 1062 | break; |
| 1046 | if (request->fl_type == F_UNLCK) | 1063 | if (request->fl_type == F_UNLCK) |
| @@ -1059,7 +1076,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str | |||
| 1059 | * one (This may happen several times). | 1076 | * one (This may happen several times). |
| 1060 | */ | 1077 | */ |
| 1061 | if (added) { | 1078 | if (added) { |
| 1062 | locks_delete_lock(before, &dispose); | 1079 | locks_delete_lock_ctx(fl, |
| 1080 | &ctx->flc_posix_cnt, &dispose); | ||
| 1063 | continue; | 1081 | continue; |
| 1064 | } | 1082 | } |
| 1065 | /* | 1083 | /* |
| @@ -1075,15 +1093,13 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str | |||
| 1075 | locks_copy_lock(new_fl, request); | 1093 | locks_copy_lock(new_fl, request); |
| 1076 | request = new_fl; | 1094 | request = new_fl; |
| 1077 | new_fl = NULL; | 1095 | new_fl = NULL; |
| 1078 | locks_delete_lock(before, &dispose); | 1096 | locks_insert_lock_ctx(request, |
| 1079 | locks_insert_lock(before, request); | 1097 | &ctx->flc_posix_cnt, &fl->fl_list); |
| 1098 | locks_delete_lock_ctx(fl, | ||
| 1099 | &ctx->flc_posix_cnt, &dispose); | ||
| 1080 | added = true; | 1100 | added = true; |
| 1081 | } | 1101 | } |
| 1082 | } | 1102 | } |
| 1083 | /* Go on to next lock. | ||
| 1084 | */ | ||
| 1085 | next_lock: | ||
| 1086 | before = &fl->fl_next; | ||
| 1087 | } | 1103 | } |
| 1088 | 1104 | ||
| 1089 | /* | 1105 | /* |
| @@ -1108,7 +1124,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str | |||
| 1108 | goto out; | 1124 | goto out; |
| 1109 | } | 1125 | } |
| 1110 | locks_copy_lock(new_fl, request); | 1126 | locks_copy_lock(new_fl, request); |
| 1111 | locks_insert_lock(before, new_fl); | 1127 | locks_insert_lock_ctx(new_fl, &ctx->flc_posix_cnt, |
| 1128 | &fl->fl_list); | ||
| 1112 | new_fl = NULL; | 1129 | new_fl = NULL; |
| 1113 | } | 1130 | } |
| 1114 | if (right) { | 1131 | if (right) { |
| @@ -1119,7 +1136,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str | |||
| 1119 | left = new_fl2; | 1136 | left = new_fl2; |
| 1120 | new_fl2 = NULL; | 1137 | new_fl2 = NULL; |
| 1121 | locks_copy_lock(left, right); | 1138 | locks_copy_lock(left, right); |
| 1122 | locks_insert_lock(before, left); | 1139 | locks_insert_lock_ctx(left, &ctx->flc_posix_cnt, |
| 1140 | &fl->fl_list); | ||
| 1123 | } | 1141 | } |
| 1124 | right->fl_start = request->fl_end + 1; | 1142 | right->fl_start = request->fl_end + 1; |
| 1125 | locks_wake_up_blocks(right); | 1143 | locks_wake_up_blocks(right); |
| @@ -1129,7 +1147,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str | |||
| 1129 | locks_wake_up_blocks(left); | 1147 | locks_wake_up_blocks(left); |
| 1130 | } | 1148 | } |
| 1131 | out: | 1149 | out: |
| 1132 | spin_unlock(&inode->i_lock); | 1150 | spin_unlock(&ctx->flc_lock); |
| 1133 | /* | 1151 | /* |
| 1134 | * Free any unused locks. | 1152 | * Free any unused locks. |
| 1135 | */ | 1153 | */ |
| @@ -1199,22 +1217,29 @@ EXPORT_SYMBOL(posix_lock_file_wait); | |||
| 1199 | */ | 1217 | */ |
| 1200 | int locks_mandatory_locked(struct file *file) | 1218 | int locks_mandatory_locked(struct file *file) |
| 1201 | { | 1219 | { |
| 1220 | int ret; | ||
| 1202 | struct inode *inode = file_inode(file); | 1221 | struct inode *inode = file_inode(file); |
| 1222 | struct file_lock_context *ctx; | ||
| 1203 | struct file_lock *fl; | 1223 | struct file_lock *fl; |
| 1204 | 1224 | ||
| 1225 | ctx = inode->i_flctx; | ||
| 1226 | if (!ctx || list_empty_careful(&ctx->flc_posix)) | ||
| 1227 | return 0; | ||
| 1228 | |||
| 1205 | /* | 1229 | /* |
| 1206 | * Search the lock list for this inode for any POSIX locks. | 1230 | * Search the lock list for this inode for any POSIX locks. |
| 1207 | */ | 1231 | */ |
| 1208 | spin_lock(&inode->i_lock); | 1232 | spin_lock(&ctx->flc_lock); |
| 1209 | for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { | 1233 | ret = 0; |
| 1210 | if (!IS_POSIX(fl)) | 1234 | list_for_each_entry(fl, &ctx->flc_posix, fl_list) { |
| 1211 | continue; | ||
| 1212 | if (fl->fl_owner != current->files && | 1235 | if (fl->fl_owner != current->files && |
| 1213 | fl->fl_owner != file) | 1236 | fl->fl_owner != file) { |
| 1237 | ret = -EAGAIN; | ||
| 1214 | break; | 1238 | break; |
| 1239 | } | ||
| 1215 | } | 1240 | } |
| 1216 | spin_unlock(&inode->i_lock); | 1241 | spin_unlock(&ctx->flc_lock); |
| 1217 | return fl ? -EAGAIN : 0; | 1242 | return ret; |
| 1218 | } | 1243 | } |
| 1219 | 1244 | ||
| 1220 | /** | 1245 | /** |
| @@ -1294,9 +1319,9 @@ static void lease_clear_pending(struct file_lock *fl, int arg) | |||
| 1294 | } | 1319 | } |
| 1295 | 1320 | ||
| 1296 | /* We already had a lease on this file; just change its type */ | 1321 | /* We already had a lease on this file; just change its type */ |
| 1297 | int lease_modify(struct file_lock **before, int arg, struct list_head *dispose) | 1322 | int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose) |
| 1298 | { | 1323 | { |
| 1299 | struct file_lock *fl = *before; | 1324 | struct file_lock_context *flctx; |
| 1300 | int error = assign_type(fl, arg); | 1325 | int error = assign_type(fl, arg); |
| 1301 | 1326 | ||
| 1302 | if (error) | 1327 | if (error) |
| @@ -1306,6 +1331,7 @@ int lease_modify(struct file_lock **before, int arg, struct list_head *dispose) | |||
| 1306 | if (arg == F_UNLCK) { | 1331 | if (arg == F_UNLCK) { |
| 1307 | struct file *filp = fl->fl_file; | 1332 | struct file *filp = fl->fl_file; |
| 1308 | 1333 | ||
| 1334 | flctx = file_inode(filp)->i_flctx; | ||
| 1309 | f_delown(filp); | 1335 | f_delown(filp); |
| 1310 | filp->f_owner.signum = 0; | 1336 | filp->f_owner.signum = 0; |
| 1311 | fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); | 1337 | fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); |
| @@ -1313,7 +1339,7 @@ int lease_modify(struct file_lock **before, int arg, struct list_head *dispose) | |||
| 1313 | printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); | 1339 | printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); |
| 1314 | fl->fl_fasync = NULL; | 1340 | fl->fl_fasync = NULL; |
| 1315 | } | 1341 | } |
| 1316 | locks_delete_lock(before, dispose); | 1342 | locks_delete_lock_ctx(fl, &flctx->flc_lease_cnt, dispose); |
| 1317 | } | 1343 | } |
| 1318 | return 0; | 1344 | return 0; |
| 1319 | } | 1345 | } |
| @@ -1329,25 +1355,24 @@ static bool past_time(unsigned long then) | |||
| 1329 | 1355 | ||
| 1330 | static void time_out_leases(struct inode *inode, struct list_head *dispose) | 1356 | static void time_out_leases(struct inode *inode, struct list_head *dispose) |
| 1331 | { | 1357 | { |
| 1332 | struct file_lock **before; | 1358 | struct file_lock_context *ctx = inode->i_flctx; |
| 1333 | struct file_lock *fl; | 1359 | struct file_lock *fl, *tmp; |
| 1334 | 1360 | ||
| 1335 | lockdep_assert_held(&inode->i_lock); | 1361 | lockdep_assert_held(&ctx->flc_lock); |
| 1336 | 1362 | ||
| 1337 | before = &inode->i_flock; | 1363 | list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) { |
| 1338 | while ((fl = *before) && IS_LEASE(fl) && lease_breaking(fl)) { | ||
| 1339 | trace_time_out_leases(inode, fl); | 1364 | trace_time_out_leases(inode, fl); |
| 1340 | if (past_time(fl->fl_downgrade_time)) | 1365 | if (past_time(fl->fl_downgrade_time)) |
| 1341 | lease_modify(before, F_RDLCK, dispose); | 1366 | lease_modify(fl, F_RDLCK, dispose); |
| 1342 | if (past_time(fl->fl_break_time)) | 1367 | if (past_time(fl->fl_break_time)) |
| 1343 | lease_modify(before, F_UNLCK, dispose); | 1368 | lease_modify(fl, F_UNLCK, dispose); |
| 1344 | if (fl == *before) /* lease_modify may have freed fl */ | ||
| 1345 | before = &fl->fl_next; | ||
| 1346 | } | 1369 | } |
| 1347 | } | 1370 | } |
| 1348 | 1371 | ||
| 1349 | static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker) | 1372 | static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker) |
| 1350 | { | 1373 | { |
| 1374 | if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) | ||
| 1375 | return false; | ||
| 1351 | if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE)) | 1376 | if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE)) |
| 1352 | return false; | 1377 | return false; |
| 1353 | return locks_conflict(breaker, lease); | 1378 | return locks_conflict(breaker, lease); |
| @@ -1356,11 +1381,12 @@ static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker) | |||
| 1356 | static bool | 1381 | static bool |
| 1357 | any_leases_conflict(struct inode *inode, struct file_lock *breaker) | 1382 | any_leases_conflict(struct inode *inode, struct file_lock *breaker) |
| 1358 | { | 1383 | { |
| 1384 | struct file_lock_context *ctx = inode->i_flctx; | ||
| 1359 | struct file_lock *fl; | 1385 | struct file_lock *fl; |
| 1360 | 1386 | ||
| 1361 | lockdep_assert_held(&inode->i_lock); | 1387 | lockdep_assert_held(&ctx->flc_lock); |
| 1362 | 1388 | ||
| 1363 | for (fl = inode->i_flock ; fl && IS_LEASE(fl); fl = fl->fl_next) { | 1389 | list_for_each_entry(fl, &ctx->flc_lease, fl_list) { |
| 1364 | if (leases_conflict(fl, breaker)) | 1390 | if (leases_conflict(fl, breaker)) |
| 1365 | return true; | 1391 | return true; |
| 1366 | } | 1392 | } |
| @@ -1384,7 +1410,8 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) | |||
| 1384 | { | 1410 | { |
| 1385 | int error = 0; | 1411 | int error = 0; |
| 1386 | struct file_lock *new_fl; | 1412 | struct file_lock *new_fl; |
| 1387 | struct file_lock *fl, **before; | 1413 | struct file_lock_context *ctx = inode->i_flctx; |
| 1414 | struct file_lock *fl; | ||
| 1388 | unsigned long break_time; | 1415 | unsigned long break_time; |
| 1389 | int want_write = (mode & O_ACCMODE) != O_RDONLY; | 1416 | int want_write = (mode & O_ACCMODE) != O_RDONLY; |
| 1390 | LIST_HEAD(dispose); | 1417 | LIST_HEAD(dispose); |
| @@ -1394,7 +1421,13 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) | |||
| 1394 | return PTR_ERR(new_fl); | 1421 | return PTR_ERR(new_fl); |
| 1395 | new_fl->fl_flags = type; | 1422 | new_fl->fl_flags = type; |
| 1396 | 1423 | ||
| 1397 | spin_lock(&inode->i_lock); | 1424 | /* typically we will check that ctx is non-NULL before calling */ |
| 1425 | if (!ctx) { | ||
| 1426 | WARN_ON_ONCE(1); | ||
| 1427 | return error; | ||
| 1428 | } | ||
| 1429 | |||
| 1430 | spin_lock(&ctx->flc_lock); | ||
| 1398 | 1431 | ||
| 1399 | time_out_leases(inode, &dispose); | 1432 | time_out_leases(inode, &dispose); |
| 1400 | 1433 | ||
| @@ -1408,9 +1441,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) | |||
| 1408 | break_time++; /* so that 0 means no break time */ | 1441 | break_time++; /* so that 0 means no break time */ |
| 1409 | } | 1442 | } |
| 1410 | 1443 | ||
| 1411 | for (before = &inode->i_flock; | 1444 | list_for_each_entry(fl, &ctx->flc_lease, fl_list) { |
| 1412 | ((fl = *before) != NULL) && IS_LEASE(fl); | ||
| 1413 | before = &fl->fl_next) { | ||
| 1414 | if (!leases_conflict(fl, new_fl)) | 1445 | if (!leases_conflict(fl, new_fl)) |
| 1415 | continue; | 1446 | continue; |
| 1416 | if (want_write) { | 1447 | if (want_write) { |
| @@ -1419,17 +1450,17 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) | |||
| 1419 | fl->fl_flags |= FL_UNLOCK_PENDING; | 1450 | fl->fl_flags |= FL_UNLOCK_PENDING; |
| 1420 | fl->fl_break_time = break_time; | 1451 | fl->fl_break_time = break_time; |
| 1421 | } else { | 1452 | } else { |
| 1422 | if (lease_breaking(inode->i_flock)) | 1453 | if (lease_breaking(fl)) |
| 1423 | continue; | 1454 | continue; |
| 1424 | fl->fl_flags |= FL_DOWNGRADE_PENDING; | 1455 | fl->fl_flags |= FL_DOWNGRADE_PENDING; |
| 1425 | fl->fl_downgrade_time = break_time; | 1456 | fl->fl_downgrade_time = break_time; |
| 1426 | } | 1457 | } |
| 1427 | if (fl->fl_lmops->lm_break(fl)) | 1458 | if (fl->fl_lmops->lm_break(fl)) |
| 1428 | locks_delete_lock(before, &dispose); | 1459 | locks_delete_lock_ctx(fl, &ctx->flc_lease_cnt, |
| 1460 | &dispose); | ||
| 1429 | } | 1461 | } |
| 1430 | 1462 | ||
| 1431 | fl = inode->i_flock; | 1463 | if (list_empty(&ctx->flc_lease)) |
| 1432 | if (!fl || !IS_LEASE(fl)) | ||
| 1433 | goto out; | 1464 | goto out; |
| 1434 | 1465 | ||
| 1435 | if (mode & O_NONBLOCK) { | 1466 | if (mode & O_NONBLOCK) { |
| @@ -1439,18 +1470,19 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) | |||
| 1439 | } | 1470 | } |
| 1440 | 1471 | ||
| 1441 | restart: | 1472 | restart: |
| 1442 | break_time = inode->i_flock->fl_break_time; | 1473 | fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list); |
| 1474 | break_time = fl->fl_break_time; | ||
| 1443 | if (break_time != 0) | 1475 | if (break_time != 0) |
| 1444 | break_time -= jiffies; | 1476 | break_time -= jiffies; |
| 1445 | if (break_time == 0) | 1477 | if (break_time == 0) |
| 1446 | break_time++; | 1478 | break_time++; |
| 1447 | locks_insert_block(inode->i_flock, new_fl); | 1479 | locks_insert_block(fl, new_fl); |
| 1448 | trace_break_lease_block(inode, new_fl); | 1480 | trace_break_lease_block(inode, new_fl); |
| 1449 | spin_unlock(&inode->i_lock); | 1481 | spin_unlock(&ctx->flc_lock); |
| 1450 | locks_dispose_list(&dispose); | 1482 | locks_dispose_list(&dispose); |
| 1451 | error = wait_event_interruptible_timeout(new_fl->fl_wait, | 1483 | error = wait_event_interruptible_timeout(new_fl->fl_wait, |
| 1452 | !new_fl->fl_next, break_time); | 1484 | !new_fl->fl_next, break_time); |
| 1453 | spin_lock(&inode->i_lock); | 1485 | spin_lock(&ctx->flc_lock); |
| 1454 | trace_break_lease_unblock(inode, new_fl); | 1486 | trace_break_lease_unblock(inode, new_fl); |
| 1455 | locks_delete_block(new_fl); | 1487 | locks_delete_block(new_fl); |
| 1456 | if (error >= 0) { | 1488 | if (error >= 0) { |
| @@ -1462,12 +1494,10 @@ restart: | |||
| 1462 | time_out_leases(inode, &dispose); | 1494 | time_out_leases(inode, &dispose); |
| 1463 | if (any_leases_conflict(inode, new_fl)) | 1495 | if (any_leases_conflict(inode, new_fl)) |
| 1464 | goto restart; | 1496 | goto restart; |
| 1465 | |||
| 1466 | error = 0; | 1497 | error = 0; |
| 1467 | } | 1498 | } |
| 1468 | |||
| 1469 | out: | 1499 | out: |
| 1470 | spin_unlock(&inode->i_lock); | 1500 | spin_unlock(&ctx->flc_lock); |
| 1471 | locks_dispose_list(&dispose); | 1501 | locks_dispose_list(&dispose); |
| 1472 | locks_free_lock(new_fl); | 1502 | locks_free_lock(new_fl); |
| 1473 | return error; | 1503 | return error; |
| @@ -1487,14 +1517,18 @@ EXPORT_SYMBOL(__break_lease); | |||
| 1487 | void lease_get_mtime(struct inode *inode, struct timespec *time) | 1517 | void lease_get_mtime(struct inode *inode, struct timespec *time) |
| 1488 | { | 1518 | { |
| 1489 | bool has_lease = false; | 1519 | bool has_lease = false; |
| 1490 | struct file_lock *flock; | 1520 | struct file_lock_context *ctx = inode->i_flctx; |
| 1521 | struct file_lock *fl; | ||
| 1491 | 1522 | ||
| 1492 | if (inode->i_flock) { | 1523 | if (ctx && !list_empty_careful(&ctx->flc_lease)) { |
| 1493 | spin_lock(&inode->i_lock); | 1524 | spin_lock(&ctx->flc_lock); |
| 1494 | flock = inode->i_flock; | 1525 | if (!list_empty(&ctx->flc_lease)) { |
| 1495 | if (flock && IS_LEASE(flock) && (flock->fl_type == F_WRLCK)) | 1526 | fl = list_first_entry(&ctx->flc_lease, |
| 1496 | has_lease = true; | 1527 | struct file_lock, fl_list); |
| 1497 | spin_unlock(&inode->i_lock); | 1528 | if (fl->fl_type == F_WRLCK) |
| 1529 | has_lease = true; | ||
| 1530 | } | ||
| 1531 | spin_unlock(&ctx->flc_lock); | ||
| 1498 | } | 1532 | } |
| 1499 | 1533 | ||
| 1500 | if (has_lease) | 1534 | if (has_lease) |
| @@ -1532,20 +1566,22 @@ int fcntl_getlease(struct file *filp) | |||
| 1532 | { | 1566 | { |
| 1533 | struct file_lock *fl; | 1567 | struct file_lock *fl; |
| 1534 | struct inode *inode = file_inode(filp); | 1568 | struct inode *inode = file_inode(filp); |
| 1569 | struct file_lock_context *ctx = inode->i_flctx; | ||
| 1535 | int type = F_UNLCK; | 1570 | int type = F_UNLCK; |
| 1536 | LIST_HEAD(dispose); | 1571 | LIST_HEAD(dispose); |
| 1537 | 1572 | ||
| 1538 | spin_lock(&inode->i_lock); | 1573 | if (ctx && !list_empty_careful(&ctx->flc_lease)) { |
| 1539 | time_out_leases(file_inode(filp), &dispose); | 1574 | spin_lock(&ctx->flc_lock); |
| 1540 | for (fl = file_inode(filp)->i_flock; fl && IS_LEASE(fl); | 1575 | time_out_leases(file_inode(filp), &dispose); |
| 1541 | fl = fl->fl_next) { | 1576 | list_for_each_entry(fl, &ctx->flc_lease, fl_list) { |
| 1542 | if (fl->fl_file == filp) { | 1577 | if (fl->fl_file != filp) |
| 1578 | continue; | ||
| 1543 | type = target_leasetype(fl); | 1579 | type = target_leasetype(fl); |
| 1544 | break; | 1580 | break; |
| 1545 | } | 1581 | } |
| 1582 | spin_unlock(&ctx->flc_lock); | ||
| 1583 | locks_dispose_list(&dispose); | ||
| 1546 | } | 1584 | } |
| 1547 | spin_unlock(&inode->i_lock); | ||
| 1548 | locks_dispose_list(&dispose); | ||
| 1549 | return type; | 1585 | return type; |
| 1550 | } | 1586 | } |
| 1551 | 1587 | ||
| @@ -1560,11 +1596,14 @@ int fcntl_getlease(struct file *filp) | |||
| 1560 | * conflict with the lease we're trying to set. | 1596 | * conflict with the lease we're trying to set. |
| 1561 | */ | 1597 | */ |
| 1562 | static int | 1598 | static int |
| 1563 | check_conflicting_open(const struct dentry *dentry, const long arg) | 1599 | check_conflicting_open(const struct dentry *dentry, const long arg, int flags) |
| 1564 | { | 1600 | { |
| 1565 | int ret = 0; | 1601 | int ret = 0; |
| 1566 | struct inode *inode = dentry->d_inode; | 1602 | struct inode *inode = dentry->d_inode; |
| 1567 | 1603 | ||
| 1604 | if (flags & FL_LAYOUT) | ||
| 1605 | return 0; | ||
| 1606 | |||
| 1568 | if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) | 1607 | if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) |
| 1569 | return -EAGAIN; | 1608 | return -EAGAIN; |
| 1570 | 1609 | ||
| @@ -1578,9 +1617,10 @@ check_conflicting_open(const struct dentry *dentry, const long arg) | |||
| 1578 | static int | 1617 | static int |
| 1579 | generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv) | 1618 | generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **priv) |
| 1580 | { | 1619 | { |
| 1581 | struct file_lock *fl, **before, **my_before = NULL, *lease; | 1620 | struct file_lock *fl, *my_fl = NULL, *lease; |
| 1582 | struct dentry *dentry = filp->f_path.dentry; | 1621 | struct dentry *dentry = filp->f_path.dentry; |
| 1583 | struct inode *inode = dentry->d_inode; | 1622 | struct inode *inode = dentry->d_inode; |
| 1623 | struct file_lock_context *ctx; | ||
| 1584 | bool is_deleg = (*flp)->fl_flags & FL_DELEG; | 1624 | bool is_deleg = (*flp)->fl_flags & FL_DELEG; |
| 1585 | int error; | 1625 | int error; |
| 1586 | LIST_HEAD(dispose); | 1626 | LIST_HEAD(dispose); |
| @@ -1588,6 +1628,10 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr | |||
| 1588 | lease = *flp; | 1628 | lease = *flp; |
| 1589 | trace_generic_add_lease(inode, lease); | 1629 | trace_generic_add_lease(inode, lease); |
| 1590 | 1630 | ||
| 1631 | ctx = locks_get_lock_context(inode); | ||
| 1632 | if (!ctx) | ||
| 1633 | return -ENOMEM; | ||
| 1634 | |||
| 1591 | /* | 1635 | /* |
| 1592 | * In the delegation case we need mutual exclusion with | 1636 | * In the delegation case we need mutual exclusion with |
| 1593 | * a number of operations that take the i_mutex. We trylock | 1637 | * a number of operations that take the i_mutex. We trylock |
| @@ -1606,9 +1650,9 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr | |||
| 1606 | return -EINVAL; | 1650 | return -EINVAL; |
| 1607 | } | 1651 | } |
| 1608 | 1652 | ||
| 1609 | spin_lock(&inode->i_lock); | 1653 | spin_lock(&ctx->flc_lock); |
| 1610 | time_out_leases(inode, &dispose); | 1654 | time_out_leases(inode, &dispose); |
| 1611 | error = check_conflicting_open(dentry, arg); | 1655 | error = check_conflicting_open(dentry, arg, lease->fl_flags); |
| 1612 | if (error) | 1656 | if (error) |
| 1613 | goto out; | 1657 | goto out; |
| 1614 | 1658 | ||
| @@ -1621,13 +1665,13 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr | |||
| 1621 | * except for this filp. | 1665 | * except for this filp. |
| 1622 | */ | 1666 | */ |
| 1623 | error = -EAGAIN; | 1667 | error = -EAGAIN; |
| 1624 | for (before = &inode->i_flock; | 1668 | list_for_each_entry(fl, &ctx->flc_lease, fl_list) { |
| 1625 | ((fl = *before) != NULL) && IS_LEASE(fl); | 1669 | if (fl->fl_file == filp && |
| 1626 | before = &fl->fl_next) { | 1670 | fl->fl_owner == lease->fl_owner) { |
| 1627 | if (fl->fl_file == filp) { | 1671 | my_fl = fl; |
| 1628 | my_before = before; | ||
| 1629 | continue; | 1672 | continue; |
| 1630 | } | 1673 | } |
| 1674 | |||
| 1631 | /* | 1675 | /* |
| 1632 | * No exclusive leases if someone else has a lease on | 1676 | * No exclusive leases if someone else has a lease on |
| 1633 | * this file: | 1677 | * this file: |
| @@ -1642,9 +1686,8 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr | |||
| 1642 | goto out; | 1686 | goto out; |
| 1643 | } | 1687 | } |
| 1644 | 1688 | ||
| 1645 | if (my_before != NULL) { | 1689 | if (my_fl != NULL) { |
| 1646 | lease = *my_before; | 1690 | error = lease->fl_lmops->lm_change(my_fl, arg, &dispose); |
| 1647 | error = lease->fl_lmops->lm_change(my_before, arg, &dispose); | ||
| 1648 | if (error) | 1691 | if (error) |
| 1649 | goto out; | 1692 | goto out; |
| 1650 | goto out_setup; | 1693 | goto out_setup; |
| @@ -1654,7 +1697,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr | |||
| 1654 | if (!leases_enable) | 1697 | if (!leases_enable) |
| 1655 | goto out; | 1698 | goto out; |
| 1656 | 1699 | ||
| 1657 | locks_insert_lock(before, lease); | 1700 | locks_insert_lock_ctx(lease, &ctx->flc_lease_cnt, &ctx->flc_lease); |
| 1658 | /* | 1701 | /* |
| 1659 | * The check in break_lease() is lockless. It's possible for another | 1702 | * The check in break_lease() is lockless. It's possible for another |
| 1660 | * open to race in after we did the earlier check for a conflicting | 1703 | * open to race in after we did the earlier check for a conflicting |
| @@ -1665,46 +1708,51 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr | |||
| 1665 | * precedes these checks. | 1708 | * precedes these checks. |
| 1666 | */ | 1709 | */ |
| 1667 | smp_mb(); | 1710 | smp_mb(); |
| 1668 | error = check_conflicting_open(dentry, arg); | 1711 | error = check_conflicting_open(dentry, arg, lease->fl_flags); |
| 1669 | if (error) | 1712 | if (error) { |
| 1670 | goto out_unlink; | 1713 | locks_unlink_lock_ctx(lease, &ctx->flc_lease_cnt); |
| 1714 | goto out; | ||
| 1715 | } | ||
| 1671 | 1716 | ||
| 1672 | out_setup: | 1717 | out_setup: |
| 1673 | if (lease->fl_lmops->lm_setup) | 1718 | if (lease->fl_lmops->lm_setup) |
| 1674 | lease->fl_lmops->lm_setup(lease, priv); | 1719 | lease->fl_lmops->lm_setup(lease, priv); |
| 1675 | out: | 1720 | out: |
| 1676 | spin_unlock(&inode->i_lock); | 1721 | spin_unlock(&ctx->flc_lock); |
| 1677 | locks_dispose_list(&dispose); | 1722 | locks_dispose_list(&dispose); |
| 1678 | if (is_deleg) | 1723 | if (is_deleg) |
| 1679 | mutex_unlock(&inode->i_mutex); | 1724 | mutex_unlock(&inode->i_mutex); |
| 1680 | if (!error && !my_before) | 1725 | if (!error && !my_fl) |
| 1681 | *flp = NULL; | 1726 | *flp = NULL; |
| 1682 | return error; | 1727 | return error; |
| 1683 | out_unlink: | ||
| 1684 | locks_unlink_lock(before); | ||
| 1685 | goto out; | ||
| 1686 | } | 1728 | } |
| 1687 | 1729 | ||
| 1688 | static int generic_delete_lease(struct file *filp) | 1730 | static int generic_delete_lease(struct file *filp, void *owner) |
| 1689 | { | 1731 | { |
| 1690 | int error = -EAGAIN; | 1732 | int error = -EAGAIN; |
| 1691 | struct file_lock *fl, **before; | 1733 | struct file_lock *fl, *victim = NULL; |
| 1692 | struct dentry *dentry = filp->f_path.dentry; | 1734 | struct dentry *dentry = filp->f_path.dentry; |
| 1693 | struct inode *inode = dentry->d_inode; | 1735 | struct inode *inode = dentry->d_inode; |
| 1736 | struct file_lock_context *ctx = inode->i_flctx; | ||
| 1694 | LIST_HEAD(dispose); | 1737 | LIST_HEAD(dispose); |
| 1695 | 1738 | ||
| 1696 | spin_lock(&inode->i_lock); | 1739 | if (!ctx) { |
| 1697 | time_out_leases(inode, &dispose); | 1740 | trace_generic_delete_lease(inode, NULL); |
| 1698 | for (before = &inode->i_flock; | 1741 | return error; |
| 1699 | ((fl = *before) != NULL) && IS_LEASE(fl); | 1742 | } |
| 1700 | before = &fl->fl_next) { | 1743 | |
| 1701 | if (fl->fl_file == filp) | 1744 | spin_lock(&ctx->flc_lock); |
| 1745 | list_for_each_entry(fl, &ctx->flc_lease, fl_list) { | ||
| 1746 | if (fl->fl_file == filp && | ||
| 1747 | fl->fl_owner == owner) { | ||
| 1748 | victim = fl; | ||
| 1702 | break; | 1749 | break; |
| 1750 | } | ||
| 1703 | } | 1751 | } |
| 1704 | trace_generic_delete_lease(inode, fl); | 1752 | trace_generic_delete_lease(inode, fl); |
| 1705 | if (fl) | 1753 | if (victim) |
| 1706 | error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose); | 1754 | error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); |
| 1707 | spin_unlock(&inode->i_lock); | 1755 | spin_unlock(&ctx->flc_lock); |
| 1708 | locks_dispose_list(&dispose); | 1756 | locks_dispose_list(&dispose); |
| 1709 | return error; | 1757 | return error; |
| 1710 | } | 1758 | } |
| @@ -1737,13 +1785,14 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp, | |||
| 1737 | 1785 | ||
| 1738 | switch (arg) { | 1786 | switch (arg) { |
| 1739 | case F_UNLCK: | 1787 | case F_UNLCK: |
| 1740 | return generic_delete_lease(filp); | 1788 | return generic_delete_lease(filp, *priv); |
| 1741 | case F_RDLCK: | 1789 | case F_RDLCK: |
| 1742 | case F_WRLCK: | 1790 | case F_WRLCK: |
| 1743 | if (!(*flp)->fl_lmops->lm_break) { | 1791 | if (!(*flp)->fl_lmops->lm_break) { |
| 1744 | WARN_ON_ONCE(1); | 1792 | WARN_ON_ONCE(1); |
| 1745 | return -ENOLCK; | 1793 | return -ENOLCK; |
| 1746 | } | 1794 | } |
| 1795 | |||
| 1747 | return generic_add_lease(filp, arg, flp, priv); | 1796 | return generic_add_lease(filp, arg, flp, priv); |
| 1748 | default: | 1797 | default: |
| 1749 | return -EINVAL; | 1798 | return -EINVAL; |
| @@ -1816,7 +1865,7 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg) | |||
| 1816 | int fcntl_setlease(unsigned int fd, struct file *filp, long arg) | 1865 | int fcntl_setlease(unsigned int fd, struct file *filp, long arg) |
| 1817 | { | 1866 | { |
| 1818 | if (arg == F_UNLCK) | 1867 | if (arg == F_UNLCK) |
| 1819 | return vfs_setlease(filp, F_UNLCK, NULL, NULL); | 1868 | return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp); |
| 1820 | return do_fcntl_add_lease(fd, filp, arg); | 1869 | return do_fcntl_add_lease(fd, filp, arg); |
| 1821 | } | 1870 | } |
| 1822 | 1871 | ||
| @@ -2171,7 +2220,7 @@ again: | |||
| 2171 | */ | 2220 | */ |
| 2172 | /* | 2221 | /* |
| 2173 | * we need that spin_lock here - it prevents reordering between | 2222 | * we need that spin_lock here - it prevents reordering between |
| 2174 | * update of inode->i_flock and check for it done in close(). | 2223 | * update of i_flctx->flc_posix and check for it done in close(). |
| 2175 | * rcu_read_lock() wouldn't do. | 2224 | * rcu_read_lock() wouldn't do. |
| 2176 | */ | 2225 | */ |
| 2177 | spin_lock(¤t->files->file_lock); | 2226 | spin_lock(¤t->files->file_lock); |
| @@ -2331,13 +2380,14 @@ out: | |||
| 2331 | void locks_remove_posix(struct file *filp, fl_owner_t owner) | 2380 | void locks_remove_posix(struct file *filp, fl_owner_t owner) |
| 2332 | { | 2381 | { |
| 2333 | struct file_lock lock; | 2382 | struct file_lock lock; |
| 2383 | struct file_lock_context *ctx = file_inode(filp)->i_flctx; | ||
| 2334 | 2384 | ||
| 2335 | /* | 2385 | /* |
| 2336 | * If there are no locks held on this file, we don't need to call | 2386 | * If there are no locks held on this file, we don't need to call |
| 2337 | * posix_lock_file(). Another process could be setting a lock on this | 2387 | * posix_lock_file(). Another process could be setting a lock on this |
| 2338 | * file at the same time, but we wouldn't remove that lock anyway. | 2388 | * file at the same time, but we wouldn't remove that lock anyway. |
| 2339 | */ | 2389 | */ |
| 2340 | if (!file_inode(filp)->i_flock) | 2390 | if (!ctx || list_empty(&ctx->flc_posix)) |
| 2341 | return; | 2391 | return; |
| 2342 | 2392 | ||
| 2343 | lock.fl_type = F_UNLCK; | 2393 | lock.fl_type = F_UNLCK; |
| @@ -2358,67 +2408,67 @@ void locks_remove_posix(struct file *filp, fl_owner_t owner) | |||
| 2358 | 2408 | ||
| 2359 | EXPORT_SYMBOL(locks_remove_posix); | 2409 | EXPORT_SYMBOL(locks_remove_posix); |
| 2360 | 2410 | ||
| 2411 | /* The i_flctx must be valid when calling into here */ | ||
| 2412 | static void | ||
| 2413 | locks_remove_flock(struct file *filp) | ||
| 2414 | { | ||
| 2415 | struct file_lock fl = { | ||
| 2416 | .fl_owner = filp, | ||
| 2417 | .fl_pid = current->tgid, | ||
| 2418 | .fl_file = filp, | ||
| 2419 | .fl_flags = FL_FLOCK, | ||
| 2420 | .fl_type = F_UNLCK, | ||
| 2421 | .fl_end = OFFSET_MAX, | ||
| 2422 | }; | ||
| 2423 | struct file_lock_context *flctx = file_inode(filp)->i_flctx; | ||
| 2424 | |||
| 2425 | if (list_empty(&flctx->flc_flock)) | ||
| 2426 | return; | ||
| 2427 | |||
| 2428 | if (filp->f_op->flock) | ||
| 2429 | filp->f_op->flock(filp, F_SETLKW, &fl); | ||
| 2430 | else | ||
| 2431 | flock_lock_file(filp, &fl); | ||
| 2432 | |||
| 2433 | if (fl.fl_ops && fl.fl_ops->fl_release_private) | ||
| 2434 | fl.fl_ops->fl_release_private(&fl); | ||
| 2435 | } | ||
| 2436 | |||
| 2437 | /* The i_flctx must be valid when calling into here */ | ||
| 2438 | static void | ||
| 2439 | locks_remove_lease(struct file *filp) | ||
| 2440 | { | ||
| 2441 | struct inode *inode = file_inode(filp); | ||
| 2442 | struct file_lock_context *ctx = inode->i_flctx; | ||
| 2443 | struct file_lock *fl, *tmp; | ||
| 2444 | LIST_HEAD(dispose); | ||
| 2445 | |||
| 2446 | if (list_empty(&ctx->flc_lease)) | ||
| 2447 | return; | ||
| 2448 | |||
| 2449 | spin_lock(&ctx->flc_lock); | ||
| 2450 | list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) | ||
| 2451 | lease_modify(fl, F_UNLCK, &dispose); | ||
| 2452 | spin_unlock(&ctx->flc_lock); | ||
| 2453 | locks_dispose_list(&dispose); | ||
| 2454 | } | ||
| 2455 | |||
| 2361 | /* | 2456 | /* |
| 2362 | * This function is called on the last close of an open file. | 2457 | * This function is called on the last close of an open file. |
| 2363 | */ | 2458 | */ |
| 2364 | void locks_remove_file(struct file *filp) | 2459 | void locks_remove_file(struct file *filp) |
| 2365 | { | 2460 | { |
| 2366 | struct inode * inode = file_inode(filp); | 2461 | if (!file_inode(filp)->i_flctx) |
| 2367 | struct file_lock *fl; | ||
| 2368 | struct file_lock **before; | ||
| 2369 | LIST_HEAD(dispose); | ||
| 2370 | |||
| 2371 | if (!inode->i_flock) | ||
| 2372 | return; | 2462 | return; |
| 2373 | 2463 | ||
| 2464 | /* remove any OFD locks */ | ||
| 2374 | locks_remove_posix(filp, filp); | 2465 | locks_remove_posix(filp, filp); |
| 2375 | 2466 | ||
| 2376 | if (filp->f_op->flock) { | 2467 | /* remove flock locks */ |
| 2377 | struct file_lock fl = { | 2468 | locks_remove_flock(filp); |
| 2378 | .fl_owner = filp, | ||
| 2379 | .fl_pid = current->tgid, | ||
| 2380 | .fl_file = filp, | ||
| 2381 | .fl_flags = FL_FLOCK, | ||
| 2382 | .fl_type = F_UNLCK, | ||
| 2383 | .fl_end = OFFSET_MAX, | ||
| 2384 | }; | ||
| 2385 | filp->f_op->flock(filp, F_SETLKW, &fl); | ||
| 2386 | if (fl.fl_ops && fl.fl_ops->fl_release_private) | ||
| 2387 | fl.fl_ops->fl_release_private(&fl); | ||
| 2388 | } | ||
| 2389 | |||
| 2390 | spin_lock(&inode->i_lock); | ||
| 2391 | before = &inode->i_flock; | ||
| 2392 | 2469 | ||
| 2393 | while ((fl = *before) != NULL) { | 2470 | /* remove any leases */ |
| 2394 | if (fl->fl_file == filp) { | 2471 | locks_remove_lease(filp); |
| 2395 | if (IS_LEASE(fl)) { | ||
| 2396 | lease_modify(before, F_UNLCK, &dispose); | ||
| 2397 | continue; | ||
| 2398 | } | ||
| 2399 | |||
| 2400 | /* | ||
| 2401 | * There's a leftover lock on the list of a type that | ||
| 2402 | * we didn't expect to see. Most likely a classic | ||
| 2403 | * POSIX lock that ended up not getting released | ||
| 2404 | * properly, or that raced onto the list somehow. Log | ||
| 2405 | * some info about it and then just remove it from | ||
| 2406 | * the list. | ||
| 2407 | */ | ||
| 2408 | WARN(!IS_FLOCK(fl), | ||
| 2409 | "leftover lock: dev=%u:%u ino=%lu type=%hhd flags=0x%x start=%lld end=%lld\n", | ||
| 2410 | MAJOR(inode->i_sb->s_dev), | ||
| 2411 | MINOR(inode->i_sb->s_dev), inode->i_ino, | ||
| 2412 | fl->fl_type, fl->fl_flags, | ||
| 2413 | fl->fl_start, fl->fl_end); | ||
| 2414 | |||
| 2415 | locks_delete_lock(before, &dispose); | ||
| 2416 | continue; | ||
| 2417 | } | ||
| 2418 | before = &fl->fl_next; | ||
| 2419 | } | ||
| 2420 | spin_unlock(&inode->i_lock); | ||
| 2421 | locks_dispose_list(&dispose); | ||
| 2422 | } | 2472 | } |
| 2423 | 2473 | ||
| 2424 | /** | 2474 | /** |
| @@ -2621,6 +2671,9 @@ static int __init filelock_init(void) | |||
| 2621 | { | 2671 | { |
| 2622 | int i; | 2672 | int i; |
| 2623 | 2673 | ||
| 2674 | flctx_cache = kmem_cache_create("file_lock_ctx", | ||
| 2675 | sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL); | ||
| 2676 | |||
| 2624 | filelock_cache = kmem_cache_create("file_lock_cache", | 2677 | filelock_cache = kmem_cache_create("file_lock_cache", |
| 2625 | sizeof(struct file_lock), 0, SLAB_PANIC, NULL); | 2678 | sizeof(struct file_lock), 0, SLAB_PANIC, NULL); |
| 2626 | 2679 | ||
