diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-10 18:34:42 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-10 18:34:42 -0500 |
commit | 4b4f8580a4b77126733db8072862793d4deae66a (patch) | |
tree | 0d6ab49f4fe61ca96fd513b6dfae8be541796320 /fs/nfs | |
parent | 872912352c5be930e9568e5f3b6d73107d9f278d (diff) | |
parent | 8116bf4cb62d337c953cfa5369ef4cf83e73140c (diff) |
Merge tag 'locks-v3.20-1' of git://git.samba.org/jlayton/linux
Pull file locking related changes #1 from Jeff Layton:
"This patchset contains a fairly major overhaul of how file locks are
tracked within the inode. Rather than a single list, we now create a
per-inode "lock context" that contains individual lists for the file
locks, and a new dedicated spinlock for them.
There are changes in other trees that are based on top of this set so
it may be easiest to pull this in early"
* tag 'locks-v3.20-1' of git://git.samba.org/jlayton/linux:
locks: update comments that refer to inode->i_flock
locks: consolidate NULL i_flctx checks in locks_remove_file
locks: keep a count of locks on the flctx lists
locks: clean up the lm_change prototype
locks: add a dedicated spinlock to protect i_flctx lists
locks: remove i_flock field from struct inode
locks: convert lease handling to file_lock_context
locks: convert posix locks to file_lock_context
locks: move flock locks to file_lock_context
ceph: move spinlocking into ceph_encode_locks_to_buffer and ceph_count_locks
locks: add a new struct file_locking_context pointer to struct inode
locks: have locks_release_file use flock_lock_file to release generic flock locks
locks: add new struct list_head to struct file_lock
Diffstat (limited to 'fs/nfs')
-rw-r--r-- | fs/nfs/delegation.c | 23 | ||||
-rw-r--r-- | fs/nfs/nfs4state.c | 70 | ||||
-rw-r--r-- | fs/nfs/pagelist.c | 6 | ||||
-rw-r--r-- | fs/nfs/write.c | 41 |
4 files changed, 92 insertions, 48 deletions
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 7f3f60641344..8cdb2b28a104 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c | |||
@@ -85,25 +85,30 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_ | |||
85 | { | 85 | { |
86 | struct inode *inode = state->inode; | 86 | struct inode *inode = state->inode; |
87 | struct file_lock *fl; | 87 | struct file_lock *fl; |
88 | struct file_lock_context *flctx = inode->i_flctx; | ||
89 | struct list_head *list; | ||
88 | int status = 0; | 90 | int status = 0; |
89 | 91 | ||
90 | if (inode->i_flock == NULL) | 92 | if (flctx == NULL) |
91 | goto out; | 93 | goto out; |
92 | 94 | ||
93 | /* Protect inode->i_flock using the i_lock */ | 95 | list = &flctx->flc_posix; |
94 | spin_lock(&inode->i_lock); | 96 | spin_lock(&flctx->flc_lock); |
95 | for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { | 97 | restart: |
96 | if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK))) | 98 | list_for_each_entry(fl, list, fl_list) { |
97 | continue; | ||
98 | if (nfs_file_open_context(fl->fl_file) != ctx) | 99 | if (nfs_file_open_context(fl->fl_file) != ctx) |
99 | continue; | 100 | continue; |
100 | spin_unlock(&inode->i_lock); | 101 | spin_unlock(&flctx->flc_lock); |
101 | status = nfs4_lock_delegation_recall(fl, state, stateid); | 102 | status = nfs4_lock_delegation_recall(fl, state, stateid); |
102 | if (status < 0) | 103 | if (status < 0) |
103 | goto out; | 104 | goto out; |
104 | spin_lock(&inode->i_lock); | 105 | spin_lock(&flctx->flc_lock); |
105 | } | 106 | } |
106 | spin_unlock(&inode->i_lock); | 107 | if (list == &flctx->flc_posix) { |
108 | list = &flctx->flc_flock; | ||
109 | goto restart; | ||
110 | } | ||
111 | spin_unlock(&flctx->flc_lock); | ||
107 | out: | 112 | out: |
108 | return status; | 113 | return status; |
109 | } | 114 | } |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 5194933ed419..a3bb22ab68c5 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -1366,49 +1366,55 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_ | |||
1366 | struct nfs_inode *nfsi = NFS_I(inode); | 1366 | struct nfs_inode *nfsi = NFS_I(inode); |
1367 | struct file_lock *fl; | 1367 | struct file_lock *fl; |
1368 | int status = 0; | 1368 | int status = 0; |
1369 | struct file_lock_context *flctx = inode->i_flctx; | ||
1370 | struct list_head *list; | ||
1369 | 1371 | ||
1370 | if (inode->i_flock == NULL) | 1372 | if (flctx == NULL) |
1371 | return 0; | 1373 | return 0; |
1372 | 1374 | ||
1375 | list = &flctx->flc_posix; | ||
1376 | |||
1373 | /* Guard against delegation returns and new lock/unlock calls */ | 1377 | /* Guard against delegation returns and new lock/unlock calls */ |
1374 | down_write(&nfsi->rwsem); | 1378 | down_write(&nfsi->rwsem); |
1375 | /* Protect inode->i_flock using the BKL */ | 1379 | spin_lock(&flctx->flc_lock); |
1376 | spin_lock(&inode->i_lock); | 1380 | restart: |
1377 | for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { | 1381 | list_for_each_entry(fl, list, fl_list) { |
1378 | if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK))) | ||
1379 | continue; | ||
1380 | if (nfs_file_open_context(fl->fl_file)->state != state) | 1382 | if (nfs_file_open_context(fl->fl_file)->state != state) |
1381 | continue; | 1383 | continue; |
1382 | spin_unlock(&inode->i_lock); | 1384 | spin_unlock(&flctx->flc_lock); |
1383 | status = ops->recover_lock(state, fl); | 1385 | status = ops->recover_lock(state, fl); |
1384 | switch (status) { | 1386 | switch (status) { |
1385 | case 0: | 1387 | case 0: |
1386 | break; | 1388 | break; |
1387 | case -ESTALE: | 1389 | case -ESTALE: |
1388 | case -NFS4ERR_ADMIN_REVOKED: | 1390 | case -NFS4ERR_ADMIN_REVOKED: |
1389 | case -NFS4ERR_STALE_STATEID: | 1391 | case -NFS4ERR_STALE_STATEID: |
1390 | case -NFS4ERR_BAD_STATEID: | 1392 | case -NFS4ERR_BAD_STATEID: |
1391 | case -NFS4ERR_EXPIRED: | 1393 | case -NFS4ERR_EXPIRED: |
1392 | case -NFS4ERR_NO_GRACE: | 1394 | case -NFS4ERR_NO_GRACE: |
1393 | case -NFS4ERR_STALE_CLIENTID: | 1395 | case -NFS4ERR_STALE_CLIENTID: |
1394 | case -NFS4ERR_BADSESSION: | 1396 | case -NFS4ERR_BADSESSION: |
1395 | case -NFS4ERR_BADSLOT: | 1397 | case -NFS4ERR_BADSLOT: |
1396 | case -NFS4ERR_BAD_HIGH_SLOT: | 1398 | case -NFS4ERR_BAD_HIGH_SLOT: |
1397 | case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: | 1399 | case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: |
1398 | goto out; | 1400 | goto out; |
1399 | default: | 1401 | default: |
1400 | printk(KERN_ERR "NFS: %s: unhandled error %d\n", | 1402 | pr_err("NFS: %s: unhandled error %d\n", |
1401 | __func__, status); | 1403 | __func__, status); |
1402 | case -ENOMEM: | 1404 | case -ENOMEM: |
1403 | case -NFS4ERR_DENIED: | 1405 | case -NFS4ERR_DENIED: |
1404 | case -NFS4ERR_RECLAIM_BAD: | 1406 | case -NFS4ERR_RECLAIM_BAD: |
1405 | case -NFS4ERR_RECLAIM_CONFLICT: | 1407 | case -NFS4ERR_RECLAIM_CONFLICT: |
1406 | /* kill_proc(fl->fl_pid, SIGLOST, 1); */ | 1408 | /* kill_proc(fl->fl_pid, SIGLOST, 1); */ |
1407 | status = 0; | 1409 | status = 0; |
1408 | } | 1410 | } |
1409 | spin_lock(&inode->i_lock); | 1411 | spin_lock(&flctx->flc_lock); |
1410 | } | 1412 | } |
1411 | spin_unlock(&inode->i_lock); | 1413 | if (list == &flctx->flc_posix) { |
1414 | list = &flctx->flc_flock; | ||
1415 | goto restart; | ||
1416 | } | ||
1417 | spin_unlock(&flctx->flc_lock); | ||
1412 | out: | 1418 | out: |
1413 | up_write(&nfsi->rwsem); | 1419 | up_write(&nfsi->rwsem); |
1414 | return status; | 1420 | return status; |
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 2b5e769beb16..29c7f33c9cf1 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c | |||
@@ -826,11 +826,15 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev, | |||
826 | struct nfs_pageio_descriptor *pgio) | 826 | struct nfs_pageio_descriptor *pgio) |
827 | { | 827 | { |
828 | size_t size; | 828 | size_t size; |
829 | struct file_lock_context *flctx; | ||
829 | 830 | ||
830 | if (prev) { | 831 | if (prev) { |
831 | if (!nfs_match_open_context(req->wb_context, prev->wb_context)) | 832 | if (!nfs_match_open_context(req->wb_context, prev->wb_context)) |
832 | return false; | 833 | return false; |
833 | if (req->wb_context->dentry->d_inode->i_flock != NULL && | 834 | flctx = req->wb_context->dentry->d_inode->i_flctx; |
835 | if (flctx != NULL && | ||
836 | !(list_empty_careful(&flctx->flc_posix) && | ||
837 | list_empty_careful(&flctx->flc_flock)) && | ||
834 | !nfs_match_lock_context(req->wb_lock_context, | 838 | !nfs_match_lock_context(req->wb_lock_context, |
835 | prev->wb_lock_context)) | 839 | prev->wb_lock_context)) |
836 | return false; | 840 | return false; |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index af3af685a9e3..4ae66f416eb9 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -1091,6 +1091,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page) | |||
1091 | { | 1091 | { |
1092 | struct nfs_open_context *ctx = nfs_file_open_context(file); | 1092 | struct nfs_open_context *ctx = nfs_file_open_context(file); |
1093 | struct nfs_lock_context *l_ctx; | 1093 | struct nfs_lock_context *l_ctx; |
1094 | struct file_lock_context *flctx = file_inode(file)->i_flctx; | ||
1094 | struct nfs_page *req; | 1095 | struct nfs_page *req; |
1095 | int do_flush, status; | 1096 | int do_flush, status; |
1096 | /* | 1097 | /* |
@@ -1109,7 +1110,9 @@ int nfs_flush_incompatible(struct file *file, struct page *page) | |||
1109 | do_flush = req->wb_page != page || req->wb_context != ctx; | 1110 | do_flush = req->wb_page != page || req->wb_context != ctx; |
1110 | /* for now, flush if more than 1 request in page_group */ | 1111 | /* for now, flush if more than 1 request in page_group */ |
1111 | do_flush |= req->wb_this_page != req; | 1112 | do_flush |= req->wb_this_page != req; |
1112 | if (l_ctx && ctx->dentry->d_inode->i_flock != NULL) { | 1113 | if (l_ctx && flctx && |
1114 | !(list_empty_careful(&flctx->flc_posix) && | ||
1115 | list_empty_careful(&flctx->flc_flock))) { | ||
1113 | do_flush |= l_ctx->lockowner.l_owner != current->files | 1116 | do_flush |= l_ctx->lockowner.l_owner != current->files |
1114 | || l_ctx->lockowner.l_pid != current->tgid; | 1117 | || l_ctx->lockowner.l_pid != current->tgid; |
1115 | } | 1118 | } |
@@ -1170,6 +1173,13 @@ out: | |||
1170 | return PageUptodate(page) != 0; | 1173 | return PageUptodate(page) != 0; |
1171 | } | 1174 | } |
1172 | 1175 | ||
1176 | static bool | ||
1177 | is_whole_file_wrlock(struct file_lock *fl) | ||
1178 | { | ||
1179 | return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX && | ||
1180 | fl->fl_type == F_WRLCK; | ||
1181 | } | ||
1182 | |||
1173 | /* If we know the page is up to date, and we're not using byte range locks (or | 1183 | /* If we know the page is up to date, and we're not using byte range locks (or |
1174 | * if we have the whole file locked for writing), it may be more efficient to | 1184 | * if we have the whole file locked for writing), it may be more efficient to |
1175 | * extend the write to cover the entire page in order to avoid fragmentation | 1185 | * extend the write to cover the entire page in order to avoid fragmentation |
@@ -1180,17 +1190,36 @@ out: | |||
1180 | */ | 1190 | */ |
1181 | static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode) | 1191 | static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode) |
1182 | { | 1192 | { |
1193 | int ret; | ||
1194 | struct file_lock_context *flctx = inode->i_flctx; | ||
1195 | struct file_lock *fl; | ||
1196 | |||
1183 | if (file->f_flags & O_DSYNC) | 1197 | if (file->f_flags & O_DSYNC) |
1184 | return 0; | 1198 | return 0; |
1185 | if (!nfs_write_pageuptodate(page, inode)) | 1199 | if (!nfs_write_pageuptodate(page, inode)) |
1186 | return 0; | 1200 | return 0; |
1187 | if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) | 1201 | if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) |
1188 | return 1; | 1202 | return 1; |
1189 | if (inode->i_flock == NULL || (inode->i_flock->fl_start == 0 && | 1203 | if (!flctx || (list_empty_careful(&flctx->flc_flock) && |
1190 | inode->i_flock->fl_end == OFFSET_MAX && | 1204 | list_empty_careful(&flctx->flc_posix))) |
1191 | inode->i_flock->fl_type != F_RDLCK)) | 1205 | return 0; |
1192 | return 1; | 1206 | |
1193 | return 0; | 1207 | /* Check to see if there are whole file write locks */ |
1208 | ret = 0; | ||
1209 | spin_lock(&flctx->flc_lock); | ||
1210 | if (!list_empty(&flctx->flc_posix)) { | ||
1211 | fl = list_first_entry(&flctx->flc_posix, struct file_lock, | ||
1212 | fl_list); | ||
1213 | if (is_whole_file_wrlock(fl)) | ||
1214 | ret = 1; | ||
1215 | } else if (!list_empty(&flctx->flc_flock)) { | ||
1216 | fl = list_first_entry(&flctx->flc_flock, struct file_lock, | ||
1217 | fl_list); | ||
1218 | if (fl->fl_type == F_WRLCK) | ||
1219 | ret = 1; | ||
1220 | } | ||
1221 | spin_unlock(&flctx->flc_lock); | ||
1222 | return ret; | ||
1194 | } | 1223 | } |
1195 | 1224 | ||
1196 | /* | 1225 | /* |