aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/write.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r--fs/nfs/write.c99
1 files changed, 71 insertions, 28 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index af3af685a9e3..88a6d2196ece 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -473,13 +473,18 @@ try_again:
473 do { 473 do {
474 /* 474 /*
475 * Subrequests are always contiguous, non overlapping 475 * Subrequests are always contiguous, non overlapping
476 * and in order. If not, it's a programming error. 476 * and in order - but may be repeated (mirrored writes).
477 */ 477 */
478 WARN_ON_ONCE(subreq->wb_offset != 478 if (subreq->wb_offset == (head->wb_offset + total_bytes)) {
479 (head->wb_offset + total_bytes)); 479 /* keep track of how many bytes this group covers */
480 480 total_bytes += subreq->wb_bytes;
481 /* keep track of how many bytes this group covers */ 481 } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset ||
482 total_bytes += subreq->wb_bytes; 482 ((subreq->wb_offset + subreq->wb_bytes) >
483 (head->wb_offset + total_bytes)))) {
484 nfs_page_group_unlock(head);
485 spin_unlock(&inode->i_lock);
486 return ERR_PTR(-EIO);
487 }
483 488
484 if (!nfs_lock_request(subreq)) { 489 if (!nfs_lock_request(subreq)) {
485 /* releases page group bit lock and 490 /* releases page group bit lock and
@@ -786,7 +791,7 @@ nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
786 spin_unlock(cinfo->lock); 791 spin_unlock(cinfo->lock);
787 if (!cinfo->dreq) { 792 if (!cinfo->dreq) {
788 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 793 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
789 inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info, 794 inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
790 BDI_RECLAIMABLE); 795 BDI_RECLAIMABLE);
791 __mark_inode_dirty(req->wb_context->dentry->d_inode, 796 __mark_inode_dirty(req->wb_context->dentry->d_inode,
792 I_DIRTY_DATASYNC); 797 I_DIRTY_DATASYNC);
@@ -842,9 +847,9 @@ EXPORT_SYMBOL_GPL(nfs_init_cinfo);
842 */ 847 */
843void 848void
844nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, 849nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
845 struct nfs_commit_info *cinfo) 850 struct nfs_commit_info *cinfo, u32 ds_commit_idx)
846{ 851{
847 if (pnfs_mark_request_commit(req, lseg, cinfo)) 852 if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx))
848 return; 853 return;
849 nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo); 854 nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo);
850} 855}
@@ -853,7 +858,7 @@ static void
853nfs_clear_page_commit(struct page *page) 858nfs_clear_page_commit(struct page *page)
854{ 859{
855 dec_zone_page_state(page, NR_UNSTABLE_NFS); 860 dec_zone_page_state(page, NR_UNSTABLE_NFS);
856 dec_bdi_stat(page_file_mapping(page)->backing_dev_info, BDI_RECLAIMABLE); 861 dec_bdi_stat(inode_to_bdi(page_file_mapping(page)->host), BDI_RECLAIMABLE);
857} 862}
858 863
859/* Called holding inode (/cinfo) lock */ 864/* Called holding inode (/cinfo) lock */
@@ -900,7 +905,8 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
900 } 905 }
901 if (nfs_write_need_commit(hdr)) { 906 if (nfs_write_need_commit(hdr)) {
902 memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf)); 907 memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf));
903 nfs_mark_request_commit(req, hdr->lseg, &cinfo); 908 nfs_mark_request_commit(req, hdr->lseg, &cinfo,
909 hdr->pgio_mirror_idx);
904 goto next; 910 goto next;
905 } 911 }
906remove_req: 912remove_req:
@@ -1091,6 +1097,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
1091{ 1097{
1092 struct nfs_open_context *ctx = nfs_file_open_context(file); 1098 struct nfs_open_context *ctx = nfs_file_open_context(file);
1093 struct nfs_lock_context *l_ctx; 1099 struct nfs_lock_context *l_ctx;
1100 struct file_lock_context *flctx = file_inode(file)->i_flctx;
1094 struct nfs_page *req; 1101 struct nfs_page *req;
1095 int do_flush, status; 1102 int do_flush, status;
1096 /* 1103 /*
@@ -1109,7 +1116,9 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
1109 do_flush = req->wb_page != page || req->wb_context != ctx; 1116 do_flush = req->wb_page != page || req->wb_context != ctx;
1110 /* for now, flush if more than 1 request in page_group */ 1117 /* for now, flush if more than 1 request in page_group */
1111 do_flush |= req->wb_this_page != req; 1118 do_flush |= req->wb_this_page != req;
1112 if (l_ctx && ctx->dentry->d_inode->i_flock != NULL) { 1119 if (l_ctx && flctx &&
1120 !(list_empty_careful(&flctx->flc_posix) &&
1121 list_empty_careful(&flctx->flc_flock))) {
1113 do_flush |= l_ctx->lockowner.l_owner != current->files 1122 do_flush |= l_ctx->lockowner.l_owner != current->files
1114 || l_ctx->lockowner.l_pid != current->tgid; 1123 || l_ctx->lockowner.l_pid != current->tgid;
1115 } 1124 }
@@ -1170,6 +1179,13 @@ out:
1170 return PageUptodate(page) != 0; 1179 return PageUptodate(page) != 0;
1171} 1180}
1172 1181
1182static bool
1183is_whole_file_wrlock(struct file_lock *fl)
1184{
1185 return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX &&
1186 fl->fl_type == F_WRLCK;
1187}
1188
1173/* If we know the page is up to date, and we're not using byte range locks (or 1189/* If we know the page is up to date, and we're not using byte range locks (or
1174 * if we have the whole file locked for writing), it may be more efficient to 1190 * if we have the whole file locked for writing), it may be more efficient to
1175 * extend the write to cover the entire page in order to avoid fragmentation 1191 * extend the write to cover the entire page in order to avoid fragmentation
@@ -1180,17 +1196,36 @@ out:
1180 */ 1196 */
1181static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode) 1197static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
1182{ 1198{
1199 int ret;
1200 struct file_lock_context *flctx = inode->i_flctx;
1201 struct file_lock *fl;
1202
1183 if (file->f_flags & O_DSYNC) 1203 if (file->f_flags & O_DSYNC)
1184 return 0; 1204 return 0;
1185 if (!nfs_write_pageuptodate(page, inode)) 1205 if (!nfs_write_pageuptodate(page, inode))
1186 return 0; 1206 return 0;
1187 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) 1207 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
1188 return 1; 1208 return 1;
1189 if (inode->i_flock == NULL || (inode->i_flock->fl_start == 0 && 1209 if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
1190 inode->i_flock->fl_end == OFFSET_MAX && 1210 list_empty_careful(&flctx->flc_posix)))
1191 inode->i_flock->fl_type != F_RDLCK)) 1211 return 0;
1192 return 1; 1212
1193 return 0; 1213 /* Check to see if there are whole file write locks */
1214 ret = 0;
1215 spin_lock(&flctx->flc_lock);
1216 if (!list_empty(&flctx->flc_posix)) {
1217 fl = list_first_entry(&flctx->flc_posix, struct file_lock,
1218 fl_list);
1219 if (is_whole_file_wrlock(fl))
1220 ret = 1;
1221 } else if (!list_empty(&flctx->flc_flock)) {
1222 fl = list_first_entry(&flctx->flc_flock, struct file_lock,
1223 fl_list);
1224 if (fl->fl_type == F_WRLCK)
1225 ret = 1;
1226 }
1227 spin_unlock(&flctx->flc_lock);
1228 return ret;
1194} 1229}
1195 1230
1196/* 1231/*
@@ -1240,15 +1275,15 @@ static int flush_task_priority(int how)
1240 1275
1241static void nfs_initiate_write(struct nfs_pgio_header *hdr, 1276static void nfs_initiate_write(struct nfs_pgio_header *hdr,
1242 struct rpc_message *msg, 1277 struct rpc_message *msg,
1278 const struct nfs_rpc_ops *rpc_ops,
1243 struct rpc_task_setup *task_setup_data, int how) 1279 struct rpc_task_setup *task_setup_data, int how)
1244{ 1280{
1245 struct inode *inode = hdr->inode;
1246 int priority = flush_task_priority(how); 1281 int priority = flush_task_priority(how);
1247 1282
1248 task_setup_data->priority = priority; 1283 task_setup_data->priority = priority;
1249 NFS_PROTO(inode)->write_setup(hdr, msg); 1284 rpc_ops->write_setup(hdr, msg);
1250 1285
1251 nfs4_state_protect_write(NFS_SERVER(inode)->nfs_client, 1286 nfs4_state_protect_write(NFS_SERVER(hdr->inode)->nfs_client,
1252 &task_setup_data->rpc_client, msg, hdr); 1287 &task_setup_data->rpc_client, msg, hdr);
1253} 1288}
1254 1289
@@ -1298,8 +1333,14 @@ EXPORT_SYMBOL_GPL(nfs_pageio_init_write);
1298 1333
1299void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio) 1334void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
1300{ 1335{
1336 struct nfs_pgio_mirror *mirror;
1337
1301 pgio->pg_ops = &nfs_pgio_rw_ops; 1338 pgio->pg_ops = &nfs_pgio_rw_ops;
1302 pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize; 1339
1340 nfs_pageio_stop_mirroring(pgio);
1341
1342 mirror = &pgio->pg_mirrors[0];
1343 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
1303} 1344}
1304EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds); 1345EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
1305 1346
@@ -1465,6 +1506,7 @@ void nfs_commitdata_release(struct nfs_commit_data *data)
1465EXPORT_SYMBOL_GPL(nfs_commitdata_release); 1506EXPORT_SYMBOL_GPL(nfs_commitdata_release);
1466 1507
1467int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data, 1508int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
1509 const struct nfs_rpc_ops *nfs_ops,
1468 const struct rpc_call_ops *call_ops, 1510 const struct rpc_call_ops *call_ops,
1469 int how, int flags) 1511 int how, int flags)
1470{ 1512{
@@ -1486,7 +1528,7 @@ int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
1486 .priority = priority, 1528 .priority = priority,
1487 }; 1529 };
1488 /* Set up the initial task struct. */ 1530 /* Set up the initial task struct. */
1489 NFS_PROTO(data->inode)->commit_setup(data, &msg); 1531 nfs_ops->commit_setup(data, &msg);
1490 1532
1491 dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid); 1533 dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
1492 1534
@@ -1554,17 +1596,18 @@ EXPORT_SYMBOL_GPL(nfs_init_commit);
1554 1596
1555void nfs_retry_commit(struct list_head *page_list, 1597void nfs_retry_commit(struct list_head *page_list,
1556 struct pnfs_layout_segment *lseg, 1598 struct pnfs_layout_segment *lseg,
1557 struct nfs_commit_info *cinfo) 1599 struct nfs_commit_info *cinfo,
1600 u32 ds_commit_idx)
1558{ 1601{
1559 struct nfs_page *req; 1602 struct nfs_page *req;
1560 1603
1561 while (!list_empty(page_list)) { 1604 while (!list_empty(page_list)) {
1562 req = nfs_list_entry(page_list->next); 1605 req = nfs_list_entry(page_list->next);
1563 nfs_list_remove_request(req); 1606 nfs_list_remove_request(req);
1564 nfs_mark_request_commit(req, lseg, cinfo); 1607 nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx);
1565 if (!cinfo->dreq) { 1608 if (!cinfo->dreq) {
1566 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 1609 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1567 dec_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info, 1610 dec_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
1568 BDI_RECLAIMABLE); 1611 BDI_RECLAIMABLE);
1569 } 1612 }
1570 nfs_unlock_and_release_request(req); 1613 nfs_unlock_and_release_request(req);
@@ -1589,10 +1632,10 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how,
1589 /* Set up the argument struct */ 1632 /* Set up the argument struct */
1590 nfs_init_commit(data, head, NULL, cinfo); 1633 nfs_init_commit(data, head, NULL, cinfo);
1591 atomic_inc(&cinfo->mds->rpcs_out); 1634 atomic_inc(&cinfo->mds->rpcs_out);
1592 return nfs_initiate_commit(NFS_CLIENT(inode), data, data->mds_ops, 1635 return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode),
1593 how, 0); 1636 data->mds_ops, how, 0);
1594 out_bad: 1637 out_bad:
1595 nfs_retry_commit(head, NULL, cinfo); 1638 nfs_retry_commit(head, NULL, cinfo, 0);
1596 cinfo->completion_ops->error_cleanup(NFS_I(inode)); 1639 cinfo->completion_ops->error_cleanup(NFS_I(inode));
1597 return -ENOMEM; 1640 return -ENOMEM;
1598} 1641}