aboutsummaryrefslogtreecommitdiffstats
path: root/fs/cifs/file.c
diff options
context:
space:
mode:
authorPavel Shilovsky <piastry@etersoft.ru>2011-10-22 07:33:31 -0400
committerSteve French <smfrench@gmail.com>2011-10-24 14:11:52 -0400
commit9ee305b70e09f5132c9723780ce10e69710b8bca (patch)
treed739e9ba99cc523235404b7f5e38828ea6ed9536 /fs/cifs/file.c
parent4f6bcec910d45e4f46b1514977caa529bc69e645 (diff)
CIFS: Send as many mandatory unlock ranges at once as possible
that reduces a traffic and increases a performance. Signed-off-by: Pavel Shilovsky <piastry@etersoft.ru> Acked-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Steve French <smfrench@gmail.com>
Diffstat (limited to 'fs/cifs/file.c')
-rw-r--r--fs/cifs/file.c160
1 files changed, 124 insertions, 36 deletions
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 805e2bd1dfd5..569184e6ee01 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1057,6 +1057,128 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
1057 return rc; 1057 return rc;
1058} 1058}
1059 1059
1060static void
1061cifs_move_llist(struct list_head *source, struct list_head *dest)
1062{
1063 struct list_head *li, *tmp;
1064 list_for_each_safe(li, tmp, source)
1065 list_move(li, dest);
1066}
1067
1068static void
1069cifs_free_llist(struct list_head *llist)
1070{
1071 struct cifsLockInfo *li, *tmp;
1072 list_for_each_entry_safe(li, tmp, llist, llist) {
1073 cifs_del_lock_waiters(li);
1074 list_del(&li->llist);
1075 kfree(li);
1076 }
1077}
1078
1079static int
1080cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
1081{
1082 int rc = 0, stored_rc;
1083 int types[] = {LOCKING_ANDX_LARGE_FILES,
1084 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1085 unsigned int i;
1086 unsigned int max_num, num;
1087 LOCKING_ANDX_RANGE *buf, *cur;
1088 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1089 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
1090 struct cifsLockInfo *li, *tmp;
1091 __u64 length = 1 + flock->fl_end - flock->fl_start;
1092 struct list_head tmp_llist;
1093
1094 INIT_LIST_HEAD(&tmp_llist);
1095
1096 max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
1097 sizeof(LOCKING_ANDX_RANGE);
1098 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1099 if (!buf)
1100 return -ENOMEM;
1101
1102 mutex_lock(&cinode->lock_mutex);
1103 for (i = 0; i < 2; i++) {
1104 cur = buf;
1105 num = 0;
1106 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
1107 if (flock->fl_start > li->offset ||
1108 (flock->fl_start + length) <
1109 (li->offset + li->length))
1110 continue;
1111 if (current->tgid != li->pid)
1112 continue;
1113 if (cfile->netfid != li->netfid)
1114 continue;
1115 if (types[i] != li->type)
1116 continue;
1117 if (!cinode->can_cache_brlcks) {
1118 cur->Pid = cpu_to_le16(li->pid);
1119 cur->LengthLow = cpu_to_le32((u32)li->length);
1120 cur->LengthHigh =
1121 cpu_to_le32((u32)(li->length>>32));
1122 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1123 cur->OffsetHigh =
1124 cpu_to_le32((u32)(li->offset>>32));
1125 /*
1126 * We need to save a lock here to let us add
1127 * it again to the inode list if the unlock
1128 * range request fails on the server.
1129 */
1130 list_move(&li->llist, &tmp_llist);
1131 if (++num == max_num) {
1132 stored_rc = cifs_lockv(xid, tcon,
1133 cfile->netfid,
1134 li->type, num,
1135 0, buf);
1136 if (stored_rc) {
1137 /*
1138 * We failed on the unlock range
1139 * request - add all locks from
1140 * the tmp list to the head of
1141 * the inode list.
1142 */
1143 cifs_move_llist(&tmp_llist,
1144 &cinode->llist);
1145 rc = stored_rc;
1146 } else
1147 /*
1148 * The unlock range request
1149 * succeed - free the tmp list.
1150 */
1151 cifs_free_llist(&tmp_llist);
1152 cur = buf;
1153 num = 0;
1154 } else
1155 cur++;
1156 } else {
1157 /*
1158 * We can cache brlock requests - simply remove
1159 * a lock from the inode list.
1160 */
1161 list_del(&li->llist);
1162 cifs_del_lock_waiters(li);
1163 kfree(li);
1164 }
1165 }
1166 if (num) {
1167 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
1168 types[i], num, 0, buf);
1169 if (stored_rc) {
1170 cifs_move_llist(&tmp_llist, &cinode->llist);
1171 rc = stored_rc;
1172 } else
1173 cifs_free_llist(&tmp_llist);
1174 }
1175 }
1176
1177 mutex_unlock(&cinode->lock_mutex);
1178 kfree(buf);
1179 return rc;
1180}
1181
1060static int 1182static int
1061cifs_setlk(struct file *file, struct file_lock *flock, __u8 type, 1183cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
1062 bool wait_flag, bool posix_lck, int lock, int unlock, int xid) 1184 bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
@@ -1104,43 +1226,9 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
1104 rc = cifs_lock_add(cinode, length, flock->fl_start, 1226 rc = cifs_lock_add(cinode, length, flock->fl_start,
1105 type, netfid); 1227 type, netfid);
1106 } 1228 }
1107 } else if (unlock) { 1229 } else if (unlock)
1108 /* 1230 rc = cifs_unlock_range(cfile, flock, xid);
1109 * For each stored lock that this unlock overlaps completely,
1110 * unlock it.
1111 */
1112 int stored_rc = 0;
1113 struct cifsLockInfo *li, *tmp;
1114
1115 mutex_lock(&cinode->lock_mutex);
1116 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
1117 if (flock->fl_start > li->offset ||
1118 (flock->fl_start + length) <
1119 (li->offset + li->length))
1120 continue;
1121 if (current->tgid != li->pid)
1122 continue;
1123 if (cfile->netfid != li->netfid)
1124 continue;
1125
1126 if (!cinode->can_cache_brlcks)
1127 stored_rc = CIFSSMBLock(xid, tcon, netfid,
1128 current->tgid,
1129 li->length, li->offset,
1130 1, 0, li->type, 0, 0);
1131 else
1132 stored_rc = 0;
1133 1231
1134 if (stored_rc)
1135 rc = stored_rc;
1136 else {
1137 list_del(&li->llist);
1138 cifs_del_lock_waiters(li);
1139 kfree(li);
1140 }
1141 }
1142 mutex_unlock(&cinode->lock_mutex);
1143 }
1144out: 1232out:
1145 if (flock->fl_flags & FL_POSIX) 1233 if (flock->fl_flags & FL_POSIX)
1146 posix_lock_file_wait(file, flock); 1234 posix_lock_file_wait(file, flock);