aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/write.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r--fs/nfs/write.c141
1 files changed, 103 insertions, 38 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index af3af685a9e3..849ed784d6ac 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -473,13 +473,18 @@ try_again:
473 do { 473 do {
474 /* 474 /*
475 * Subrequests are always contiguous, non overlapping 475 * Subrequests are always contiguous, non overlapping
476 * and in order. If not, it's a programming error. 476 * and in order - but may be repeated (mirrored writes).
477 */ 477 */
478 WARN_ON_ONCE(subreq->wb_offset != 478 if (subreq->wb_offset == (head->wb_offset + total_bytes)) {
479 (head->wb_offset + total_bytes)); 479 /* keep track of how many bytes this group covers */
480 480 total_bytes += subreq->wb_bytes;
481 /* keep track of how many bytes this group covers */ 481 } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset ||
482 total_bytes += subreq->wb_bytes; 482 ((subreq->wb_offset + subreq->wb_bytes) >
483 (head->wb_offset + total_bytes)))) {
484 nfs_page_group_unlock(head);
485 spin_unlock(&inode->i_lock);
486 return ERR_PTR(-EIO);
487 }
483 488
484 if (!nfs_lock_request(subreq)) { 489 if (!nfs_lock_request(subreq)) {
485 /* releases page group bit lock and 490 /* releases page group bit lock and
@@ -784,13 +789,8 @@ nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
784 nfs_list_add_request(req, dst); 789 nfs_list_add_request(req, dst);
785 cinfo->mds->ncommit++; 790 cinfo->mds->ncommit++;
786 spin_unlock(cinfo->lock); 791 spin_unlock(cinfo->lock);
787 if (!cinfo->dreq) { 792 if (!cinfo->dreq)
788 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 793 nfs_mark_page_unstable(req->wb_page);
789 inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
790 BDI_RECLAIMABLE);
791 __mark_inode_dirty(req->wb_context->dentry->d_inode,
792 I_DIRTY_DATASYNC);
793 }
794} 794}
795EXPORT_SYMBOL_GPL(nfs_request_add_commit_list); 795EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
796 796
@@ -842,9 +842,9 @@ EXPORT_SYMBOL_GPL(nfs_init_cinfo);
842 */ 842 */
843void 843void
844nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, 844nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
845 struct nfs_commit_info *cinfo) 845 struct nfs_commit_info *cinfo, u32 ds_commit_idx)
846{ 846{
847 if (pnfs_mark_request_commit(req, lseg, cinfo)) 847 if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx))
848 return; 848 return;
849 nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo); 849 nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo);
850} 850}
@@ -853,7 +853,7 @@ static void
853nfs_clear_page_commit(struct page *page) 853nfs_clear_page_commit(struct page *page)
854{ 854{
855 dec_zone_page_state(page, NR_UNSTABLE_NFS); 855 dec_zone_page_state(page, NR_UNSTABLE_NFS);
856 dec_bdi_stat(page_file_mapping(page)->backing_dev_info, BDI_RECLAIMABLE); 856 dec_bdi_stat(inode_to_bdi(page_file_mapping(page)->host), BDI_RECLAIMABLE);
857} 857}
858 858
859/* Called holding inode (/cinfo) lock */ 859/* Called holding inode (/cinfo) lock */
@@ -900,7 +900,8 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
900 } 900 }
901 if (nfs_write_need_commit(hdr)) { 901 if (nfs_write_need_commit(hdr)) {
902 memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf)); 902 memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf));
903 nfs_mark_request_commit(req, hdr->lseg, &cinfo); 903 nfs_mark_request_commit(req, hdr->lseg, &cinfo,
904 hdr->pgio_mirror_idx);
904 goto next; 905 goto next;
905 } 906 }
906remove_req: 907remove_req:
@@ -1091,6 +1092,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
1091{ 1092{
1092 struct nfs_open_context *ctx = nfs_file_open_context(file); 1093 struct nfs_open_context *ctx = nfs_file_open_context(file);
1093 struct nfs_lock_context *l_ctx; 1094 struct nfs_lock_context *l_ctx;
1095 struct file_lock_context *flctx = file_inode(file)->i_flctx;
1094 struct nfs_page *req; 1096 struct nfs_page *req;
1095 int do_flush, status; 1097 int do_flush, status;
1096 /* 1098 /*
@@ -1109,7 +1111,9 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
1109 do_flush = req->wb_page != page || req->wb_context != ctx; 1111 do_flush = req->wb_page != page || req->wb_context != ctx;
1110 /* for now, flush if more than 1 request in page_group */ 1112 /* for now, flush if more than 1 request in page_group */
1111 do_flush |= req->wb_this_page != req; 1113 do_flush |= req->wb_this_page != req;
1112 if (l_ctx && ctx->dentry->d_inode->i_flock != NULL) { 1114 if (l_ctx && flctx &&
1115 !(list_empty_careful(&flctx->flc_posix) &&
1116 list_empty_careful(&flctx->flc_flock))) {
1113 do_flush |= l_ctx->lockowner.l_owner != current->files 1117 do_flush |= l_ctx->lockowner.l_owner != current->files
1114 || l_ctx->lockowner.l_pid != current->tgid; 1118 || l_ctx->lockowner.l_pid != current->tgid;
1115 } 1119 }
@@ -1170,6 +1174,13 @@ out:
1170 return PageUptodate(page) != 0; 1174 return PageUptodate(page) != 0;
1171} 1175}
1172 1176
1177static bool
1178is_whole_file_wrlock(struct file_lock *fl)
1179{
1180 return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX &&
1181 fl->fl_type == F_WRLCK;
1182}
1183
1173/* If we know the page is up to date, and we're not using byte range locks (or 1184/* If we know the page is up to date, and we're not using byte range locks (or
1174 * if we have the whole file locked for writing), it may be more efficient to 1185 * if we have the whole file locked for writing), it may be more efficient to
1175 * extend the write to cover the entire page in order to avoid fragmentation 1186 * extend the write to cover the entire page in order to avoid fragmentation
@@ -1180,17 +1191,36 @@ out:
1180 */ 1191 */
1181static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode) 1192static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode)
1182{ 1193{
1194 int ret;
1195 struct file_lock_context *flctx = inode->i_flctx;
1196 struct file_lock *fl;
1197
1183 if (file->f_flags & O_DSYNC) 1198 if (file->f_flags & O_DSYNC)
1184 return 0; 1199 return 0;
1185 if (!nfs_write_pageuptodate(page, inode)) 1200 if (!nfs_write_pageuptodate(page, inode))
1186 return 0; 1201 return 0;
1187 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) 1202 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
1188 return 1; 1203 return 1;
1189 if (inode->i_flock == NULL || (inode->i_flock->fl_start == 0 && 1204 if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
1190 inode->i_flock->fl_end == OFFSET_MAX && 1205 list_empty_careful(&flctx->flc_posix)))
1191 inode->i_flock->fl_type != F_RDLCK)) 1206 return 0;
1192 return 1; 1207
1193 return 0; 1208 /* Check to see if there are whole file write locks */
1209 ret = 0;
1210 spin_lock(&flctx->flc_lock);
1211 if (!list_empty(&flctx->flc_posix)) {
1212 fl = list_first_entry(&flctx->flc_posix, struct file_lock,
1213 fl_list);
1214 if (is_whole_file_wrlock(fl))
1215 ret = 1;
1216 } else if (!list_empty(&flctx->flc_flock)) {
1217 fl = list_first_entry(&flctx->flc_flock, struct file_lock,
1218 fl_list);
1219 if (fl->fl_type == F_WRLCK)
1220 ret = 1;
1221 }
1222 spin_unlock(&flctx->flc_lock);
1223 return ret;
1194} 1224}
1195 1225
1196/* 1226/*
@@ -1240,15 +1270,15 @@ static int flush_task_priority(int how)
1240 1270
1241static void nfs_initiate_write(struct nfs_pgio_header *hdr, 1271static void nfs_initiate_write(struct nfs_pgio_header *hdr,
1242 struct rpc_message *msg, 1272 struct rpc_message *msg,
1273 const struct nfs_rpc_ops *rpc_ops,
1243 struct rpc_task_setup *task_setup_data, int how) 1274 struct rpc_task_setup *task_setup_data, int how)
1244{ 1275{
1245 struct inode *inode = hdr->inode;
1246 int priority = flush_task_priority(how); 1276 int priority = flush_task_priority(how);
1247 1277
1248 task_setup_data->priority = priority; 1278 task_setup_data->priority = priority;
1249 NFS_PROTO(inode)->write_setup(hdr, msg); 1279 rpc_ops->write_setup(hdr, msg);
1250 1280
1251 nfs4_state_protect_write(NFS_SERVER(inode)->nfs_client, 1281 nfs4_state_protect_write(NFS_SERVER(hdr->inode)->nfs_client,
1252 &task_setup_data->rpc_client, msg, hdr); 1282 &task_setup_data->rpc_client, msg, hdr);
1253} 1283}
1254 1284
@@ -1298,8 +1328,14 @@ EXPORT_SYMBOL_GPL(nfs_pageio_init_write);
1298 1328
1299void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio) 1329void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
1300{ 1330{
1331 struct nfs_pgio_mirror *mirror;
1332
1301 pgio->pg_ops = &nfs_pgio_rw_ops; 1333 pgio->pg_ops = &nfs_pgio_rw_ops;
1302 pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize; 1334
1335 nfs_pageio_stop_mirroring(pgio);
1336
1337 mirror = &pgio->pg_mirrors[0];
1338 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
1303} 1339}
1304EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds); 1340EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
1305 1341
@@ -1341,6 +1377,36 @@ static int nfs_should_remove_suid(const struct inode *inode)
1341 return 0; 1377 return 0;
1342} 1378}
1343 1379
1380static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
1381 struct nfs_fattr *fattr)
1382{
1383 struct nfs_pgio_args *argp = &hdr->args;
1384 struct nfs_pgio_res *resp = &hdr->res;
1385
1386 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
1387 return;
1388 if (argp->offset + resp->count != fattr->size)
1389 return;
1390 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode))
1391 return;
1392 /* Set attribute barrier */
1393 nfs_fattr_set_barrier(fattr);
1394}
1395
1396void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
1397{
1398 struct nfs_fattr *fattr = hdr->res.fattr;
1399 struct inode *inode = hdr->inode;
1400
1401 if (fattr == NULL)
1402 return;
1403 spin_lock(&inode->i_lock);
1404 nfs_writeback_check_extend(hdr, fattr);
1405 nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
1406 spin_unlock(&inode->i_lock);
1407}
1408EXPORT_SYMBOL_GPL(nfs_writeback_update_inode);
1409
1344/* 1410/*
1345 * This function is called when the WRITE call is complete. 1411 * This function is called when the WRITE call is complete.
1346 */ 1412 */
@@ -1465,6 +1531,7 @@ void nfs_commitdata_release(struct nfs_commit_data *data)
1465EXPORT_SYMBOL_GPL(nfs_commitdata_release); 1531EXPORT_SYMBOL_GPL(nfs_commitdata_release);
1466 1532
1467int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data, 1533int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
1534 const struct nfs_rpc_ops *nfs_ops,
1468 const struct rpc_call_ops *call_ops, 1535 const struct rpc_call_ops *call_ops,
1469 int how, int flags) 1536 int how, int flags)
1470{ 1537{
@@ -1486,7 +1553,7 @@ int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
1486 .priority = priority, 1553 .priority = priority,
1487 }; 1554 };
1488 /* Set up the initial task struct. */ 1555 /* Set up the initial task struct. */
1489 NFS_PROTO(data->inode)->commit_setup(data, &msg); 1556 nfs_ops->commit_setup(data, &msg);
1490 1557
1491 dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid); 1558 dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
1492 1559
@@ -1554,19 +1621,17 @@ EXPORT_SYMBOL_GPL(nfs_init_commit);
1554 1621
1555void nfs_retry_commit(struct list_head *page_list, 1622void nfs_retry_commit(struct list_head *page_list,
1556 struct pnfs_layout_segment *lseg, 1623 struct pnfs_layout_segment *lseg,
1557 struct nfs_commit_info *cinfo) 1624 struct nfs_commit_info *cinfo,
1625 u32 ds_commit_idx)
1558{ 1626{
1559 struct nfs_page *req; 1627 struct nfs_page *req;
1560 1628
1561 while (!list_empty(page_list)) { 1629 while (!list_empty(page_list)) {
1562 req = nfs_list_entry(page_list->next); 1630 req = nfs_list_entry(page_list->next);
1563 nfs_list_remove_request(req); 1631 nfs_list_remove_request(req);
1564 nfs_mark_request_commit(req, lseg, cinfo); 1632 nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx);
1565 if (!cinfo->dreq) { 1633 if (!cinfo->dreq)
1566 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 1634 nfs_clear_page_commit(req->wb_page);
1567 dec_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
1568 BDI_RECLAIMABLE);
1569 }
1570 nfs_unlock_and_release_request(req); 1635 nfs_unlock_and_release_request(req);
1571 } 1636 }
1572} 1637}
@@ -1589,10 +1654,10 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how,
1589 /* Set up the argument struct */ 1654 /* Set up the argument struct */
1590 nfs_init_commit(data, head, NULL, cinfo); 1655 nfs_init_commit(data, head, NULL, cinfo);
1591 atomic_inc(&cinfo->mds->rpcs_out); 1656 atomic_inc(&cinfo->mds->rpcs_out);
1592 return nfs_initiate_commit(NFS_CLIENT(inode), data, data->mds_ops, 1657 return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode),
1593 how, 0); 1658 data->mds_ops, how, 0);
1594 out_bad: 1659 out_bad:
1595 nfs_retry_commit(head, NULL, cinfo); 1660 nfs_retry_commit(head, NULL, cinfo, 0);
1596 cinfo->completion_ops->error_cleanup(NFS_I(inode)); 1661 cinfo->completion_ops->error_cleanup(NFS_I(inode));
1597 return -ENOMEM; 1662 return -ENOMEM;
1598} 1663}