aboutsummaryrefslogtreecommitdiffstats
path: root/fs/cifs/file.c
diff options
context:
space:
mode:
authorPavel Shilovsky <piastry@etersoft.ru>2012-03-05 01:39:20 -0500
committerSteve French <sfrench@us.ibm.com>2012-03-06 22:50:15 -0500
commitd5751469f210d2149cc2159ffff66cbeef6da3f2 (patch)
tree7d5e9af79050cfe56e4412d6ebdd90dfce55c8d9 /fs/cifs/file.c
parentb0f8ef202ec7f07ba9bd93150d54ef4327851422 (diff)
CIFS: Do not kmalloc under the flocks spinlock
Reorganize the code to make the memory already allocated before spinlock'ed loop. Cc: stable@vger.kernel.org Reviewed-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Pavel Shilovsky <piastry@etersoft.ru> Signed-off-by: Steve French <sfrench@us.ibm.com>
Diffstat (limited to 'fs/cifs/file.c')
-rw-r--r--fs/cifs/file.c69
1 files changed, 56 insertions, 13 deletions
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 4dd9283885e7..5e64748a2917 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -920,16 +920,26 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
920 for (lockp = &inode->i_flock; *lockp != NULL; \ 920 for (lockp = &inode->i_flock; *lockp != NULL; \
921 lockp = &(*lockp)->fl_next) 921 lockp = &(*lockp)->fl_next)
922 922
923struct lock_to_push {
924 struct list_head llist;
925 __u64 offset;
926 __u64 length;
927 __u32 pid;
928 __u16 netfid;
929 __u8 type;
930};
931
923static int 932static int
924cifs_push_posix_locks(struct cifsFileInfo *cfile) 933cifs_push_posix_locks(struct cifsFileInfo *cfile)
925{ 934{
926 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); 935 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
927 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 936 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
928 struct file_lock *flock, **before; 937 struct file_lock *flock, **before;
929 struct cifsLockInfo *lck, *tmp; 938 unsigned int count = 0, i = 0;
930 int rc = 0, xid, type; 939 int rc = 0, xid, type;
940 struct list_head locks_to_send, *el;
941 struct lock_to_push *lck, *tmp;
931 __u64 length; 942 __u64 length;
932 struct list_head locks_to_send;
933 943
934 xid = GetXid(); 944 xid = GetXid();
935 945
@@ -940,29 +950,55 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
940 return rc; 950 return rc;
941 } 951 }
942 952
953 lock_flocks();
954 cifs_for_each_lock(cfile->dentry->d_inode, before) {
955 if ((*before)->fl_flags & FL_POSIX)
956 count++;
957 }
958 unlock_flocks();
959
943 INIT_LIST_HEAD(&locks_to_send); 960 INIT_LIST_HEAD(&locks_to_send);
944 961
962 /*
963 * Allocating count locks is enough because no locks can be added to
964 * the list while we are holding cinode->lock_mutex that protects
965 * locking operations of this inode.
966 */
967 for (; i < count; i++) {
968 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
969 if (!lck) {
970 rc = -ENOMEM;
971 goto err_out;
972 }
973 list_add_tail(&lck->llist, &locks_to_send);
974 }
975
976 i = 0;
977 el = locks_to_send.next;
945 lock_flocks(); 978 lock_flocks();
946 cifs_for_each_lock(cfile->dentry->d_inode, before) { 979 cifs_for_each_lock(cfile->dentry->d_inode, before) {
980 if (el == &locks_to_send) {
981 /* something is really wrong */
982 cERROR(1, "Can't push all brlocks!");
983 break;
984 }
947 flock = *before; 985 flock = *before;
986 if ((flock->fl_flags & FL_POSIX) == 0)
987 continue;
948 length = 1 + flock->fl_end - flock->fl_start; 988 length = 1 + flock->fl_end - flock->fl_start;
949 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK) 989 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
950 type = CIFS_RDLCK; 990 type = CIFS_RDLCK;
951 else 991 else
952 type = CIFS_WRLCK; 992 type = CIFS_WRLCK;
953 993 lck = list_entry(el, struct lock_to_push, llist);
954 lck = cifs_lock_init(flock->fl_start, length, type,
955 cfile->netfid);
956 if (!lck) {
957 rc = -ENOMEM;
958 goto send_locks;
959 }
960 lck->pid = flock->fl_pid; 994 lck->pid = flock->fl_pid;
961 995 lck->netfid = cfile->netfid;
962 list_add_tail(&lck->llist, &locks_to_send); 996 lck->length = length;
997 lck->type = type;
998 lck->offset = flock->fl_start;
999 i++;
1000 el = el->next;
963 } 1001 }
964
965send_locks:
966 unlock_flocks(); 1002 unlock_flocks();
967 1003
968 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) { 1004 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
@@ -979,11 +1015,18 @@ send_locks:
979 kfree(lck); 1015 kfree(lck);
980 } 1016 }
981 1017
1018out:
982 cinode->can_cache_brlcks = false; 1019 cinode->can_cache_brlcks = false;
983 mutex_unlock(&cinode->lock_mutex); 1020 mutex_unlock(&cinode->lock_mutex);
984 1021
985 FreeXid(xid); 1022 FreeXid(xid);
986 return rc; 1023 return rc;
1024err_out:
1025 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1026 list_del(&lck->llist);
1027 kfree(lck);
1028 }
1029 goto out;
987} 1030}
988 1031
989static int 1032static int