diff options
Diffstat (limited to 'fs/nfs/write.c')
| -rw-r--r-- | fs/nfs/write.c | 118 |
1 files changed, 72 insertions, 46 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 4cfada2cc09f..b674462793d3 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
| @@ -46,7 +46,6 @@ | |||
| 46 | * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de> | 46 | * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de> |
| 47 | */ | 47 | */ |
| 48 | 48 | ||
| 49 | #include <linux/config.h> | ||
| 50 | #include <linux/types.h> | 49 | #include <linux/types.h> |
| 51 | #include <linux/slab.h> | 50 | #include <linux/slab.h> |
| 52 | #include <linux/mm.h> | 51 | #include <linux/mm.h> |
| @@ -91,23 +90,13 @@ static mempool_t *nfs_commit_mempool; | |||
| 91 | 90 | ||
| 92 | static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion); | 91 | static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion); |
| 93 | 92 | ||
| 94 | struct nfs_write_data *nfs_commit_alloc(unsigned int pagecount) | 93 | struct nfs_write_data *nfs_commit_alloc(void) |
| 95 | { | 94 | { |
| 96 | struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS); | 95 | struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS); |
| 97 | 96 | ||
| 98 | if (p) { | 97 | if (p) { |
| 99 | memset(p, 0, sizeof(*p)); | 98 | memset(p, 0, sizeof(*p)); |
| 100 | INIT_LIST_HEAD(&p->pages); | 99 | INIT_LIST_HEAD(&p->pages); |
| 101 | if (pagecount < NFS_PAGEVEC_SIZE) | ||
| 102 | p->pagevec = &p->page_array[0]; | ||
| 103 | else { | ||
| 104 | size_t size = ++pagecount * sizeof(struct page *); | ||
| 105 | p->pagevec = kzalloc(size, GFP_NOFS); | ||
| 106 | if (!p->pagevec) { | ||
| 107 | mempool_free(p, nfs_commit_mempool); | ||
| 108 | p = NULL; | ||
| 109 | } | ||
| 110 | } | ||
| 111 | } | 100 | } |
| 112 | return p; | 101 | return p; |
| 113 | } | 102 | } |
| @@ -119,21 +108,20 @@ void nfs_commit_free(struct nfs_write_data *p) | |||
| 119 | mempool_free(p, nfs_commit_mempool); | 108 | mempool_free(p, nfs_commit_mempool); |
| 120 | } | 109 | } |
| 121 | 110 | ||
| 122 | struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount) | 111 | struct nfs_write_data *nfs_writedata_alloc(size_t len) |
| 123 | { | 112 | { |
| 113 | unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
| 124 | struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS); | 114 | struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS); |
| 125 | 115 | ||
| 126 | if (p) { | 116 | if (p) { |
| 127 | memset(p, 0, sizeof(*p)); | 117 | memset(p, 0, sizeof(*p)); |
| 128 | INIT_LIST_HEAD(&p->pages); | 118 | INIT_LIST_HEAD(&p->pages); |
| 129 | if (pagecount < NFS_PAGEVEC_SIZE) | 119 | p->npages = pagecount; |
| 130 | p->pagevec = &p->page_array[0]; | 120 | if (pagecount <= ARRAY_SIZE(p->page_array)) |
| 121 | p->pagevec = p->page_array; | ||
| 131 | else { | 122 | else { |
| 132 | size_t size = ++pagecount * sizeof(struct page *); | 123 | p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS); |
| 133 | p->pagevec = kmalloc(size, GFP_NOFS); | 124 | if (!p->pagevec) { |
| 134 | if (p->pagevec) { | ||
| 135 | memset(p->pagevec, 0, size); | ||
| 136 | } else { | ||
| 137 | mempool_free(p, nfs_wdata_mempool); | 125 | mempool_free(p, nfs_wdata_mempool); |
| 138 | p = NULL; | 126 | p = NULL; |
| 139 | } | 127 | } |
| @@ -142,7 +130,7 @@ struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount) | |||
| 142 | return p; | 130 | return p; |
| 143 | } | 131 | } |
| 144 | 132 | ||
| 145 | void nfs_writedata_free(struct nfs_write_data *p) | 133 | static void nfs_writedata_free(struct nfs_write_data *p) |
| 146 | { | 134 | { |
| 147 | if (p && (p->pagevec != &p->page_array[0])) | 135 | if (p && (p->pagevec != &p->page_array[0])) |
| 148 | kfree(p->pagevec); | 136 | kfree(p->pagevec); |
| @@ -213,7 +201,7 @@ static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode, | |||
| 213 | int result, written = 0; | 201 | int result, written = 0; |
| 214 | struct nfs_write_data *wdata; | 202 | struct nfs_write_data *wdata; |
| 215 | 203 | ||
| 216 | wdata = nfs_writedata_alloc(1); | 204 | wdata = nfs_writedata_alloc(wsize); |
| 217 | if (!wdata) | 205 | if (!wdata) |
| 218 | return -ENOMEM; | 206 | return -ENOMEM; |
| 219 | 207 | ||
| @@ -408,6 +396,7 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) | |||
| 408 | out: | 396 | out: |
| 409 | clear_bit(BDI_write_congested, &bdi->state); | 397 | clear_bit(BDI_write_congested, &bdi->state); |
| 410 | wake_up_all(&nfs_write_congestion); | 398 | wake_up_all(&nfs_write_congestion); |
| 399 | writeback_congestion_end(); | ||
| 411 | return err; | 400 | return err; |
| 412 | } | 401 | } |
| 413 | 402 | ||
| @@ -501,7 +490,7 @@ nfs_mark_request_dirty(struct nfs_page *req) | |||
| 501 | nfs_list_add_request(req, &nfsi->dirty); | 490 | nfs_list_add_request(req, &nfsi->dirty); |
| 502 | nfsi->ndirty++; | 491 | nfsi->ndirty++; |
| 503 | spin_unlock(&nfsi->req_lock); | 492 | spin_unlock(&nfsi->req_lock); |
| 504 | inc_page_state(nr_dirty); | 493 | inc_zone_page_state(req->wb_page, NR_FILE_DIRTY); |
| 505 | mark_inode_dirty(inode); | 494 | mark_inode_dirty(inode); |
| 506 | } | 495 | } |
| 507 | 496 | ||
| @@ -529,7 +518,7 @@ nfs_mark_request_commit(struct nfs_page *req) | |||
| 529 | nfs_list_add_request(req, &nfsi->commit); | 518 | nfs_list_add_request(req, &nfsi->commit); |
| 530 | nfsi->ncommit++; | 519 | nfsi->ncommit++; |
| 531 | spin_unlock(&nfsi->req_lock); | 520 | spin_unlock(&nfsi->req_lock); |
| 532 | inc_page_state(nr_unstable); | 521 | inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); |
| 533 | mark_inode_dirty(inode); | 522 | mark_inode_dirty(inode); |
| 534 | } | 523 | } |
| 535 | #endif | 524 | #endif |
| @@ -583,6 +572,30 @@ static int nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, un | |||
| 583 | return ret; | 572 | return ret; |
| 584 | } | 573 | } |
| 585 | 574 | ||
| 575 | static void nfs_cancel_dirty_list(struct list_head *head) | ||
| 576 | { | ||
| 577 | struct nfs_page *req; | ||
| 578 | while(!list_empty(head)) { | ||
| 579 | req = nfs_list_entry(head->next); | ||
| 580 | nfs_list_remove_request(req); | ||
| 581 | nfs_inode_remove_request(req); | ||
| 582 | nfs_clear_page_writeback(req); | ||
| 583 | } | ||
| 584 | } | ||
| 585 | |||
| 586 | static void nfs_cancel_commit_list(struct list_head *head) | ||
| 587 | { | ||
| 588 | struct nfs_page *req; | ||
| 589 | |||
| 590 | while(!list_empty(head)) { | ||
| 591 | req = nfs_list_entry(head->next); | ||
| 592 | nfs_list_remove_request(req); | ||
| 593 | nfs_inode_remove_request(req); | ||
| 594 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); | ||
| 595 | nfs_clear_page_writeback(req); | ||
| 596 | } | ||
| 597 | } | ||
| 598 | |||
| 586 | /* | 599 | /* |
| 587 | * nfs_scan_dirty - Scan an inode for dirty requests | 600 | * nfs_scan_dirty - Scan an inode for dirty requests |
| 588 | * @inode: NFS inode to scan | 601 | * @inode: NFS inode to scan |
| @@ -602,7 +615,6 @@ nfs_scan_dirty(struct inode *inode, struct list_head *dst, unsigned long idx_sta | |||
| 602 | if (nfsi->ndirty != 0) { | 615 | if (nfsi->ndirty != 0) { |
| 603 | res = nfs_scan_lock_dirty(nfsi, dst, idx_start, npages); | 616 | res = nfs_scan_lock_dirty(nfsi, dst, idx_start, npages); |
| 604 | nfsi->ndirty -= res; | 617 | nfsi->ndirty -= res; |
| 605 | sub_page_state(nr_dirty,res); | ||
| 606 | if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty)) | 618 | if ((nfsi->ndirty == 0) != list_empty(&nfsi->dirty)) |
| 607 | printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n"); | 619 | printk(KERN_ERR "NFS: desynchronized value of nfs_i.ndirty.\n"); |
| 608 | } | 620 | } |
| @@ -627,7 +639,7 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_st | |||
| 627 | int res = 0; | 639 | int res = 0; |
| 628 | 640 | ||
| 629 | if (nfsi->ncommit != 0) { | 641 | if (nfsi->ncommit != 0) { |
| 630 | res = nfs_scan_list(&nfsi->commit, dst, idx_start, npages); | 642 | res = nfs_scan_list(nfsi, &nfsi->commit, dst, idx_start, npages); |
| 631 | nfsi->ncommit -= res; | 643 | nfsi->ncommit -= res; |
| 632 | if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit)) | 644 | if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit)) |
| 633 | printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n"); | 645 | printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n"); |
| @@ -981,24 +993,24 @@ static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how) | |||
| 981 | struct nfs_page *req = nfs_list_entry(head->next); | 993 | struct nfs_page *req = nfs_list_entry(head->next); |
| 982 | struct page *page = req->wb_page; | 994 | struct page *page = req->wb_page; |
| 983 | struct nfs_write_data *data; | 995 | struct nfs_write_data *data; |
| 984 | unsigned int wsize = NFS_SERVER(inode)->wsize; | 996 | size_t wsize = NFS_SERVER(inode)->wsize, nbytes; |
| 985 | unsigned int nbytes, offset; | 997 | unsigned int offset; |
| 986 | int requests = 0; | 998 | int requests = 0; |
| 987 | LIST_HEAD(list); | 999 | LIST_HEAD(list); |
| 988 | 1000 | ||
| 989 | nfs_list_remove_request(req); | 1001 | nfs_list_remove_request(req); |
| 990 | 1002 | ||
| 991 | nbytes = req->wb_bytes; | 1003 | nbytes = req->wb_bytes; |
| 992 | for (;;) { | 1004 | do { |
| 993 | data = nfs_writedata_alloc(1); | 1005 | size_t len = min(nbytes, wsize); |
| 1006 | |||
| 1007 | data = nfs_writedata_alloc(len); | ||
| 994 | if (!data) | 1008 | if (!data) |
| 995 | goto out_bad; | 1009 | goto out_bad; |
| 996 | list_add(&data->pages, &list); | 1010 | list_add(&data->pages, &list); |
| 997 | requests++; | 1011 | requests++; |
| 998 | if (nbytes <= wsize) | 1012 | nbytes -= len; |
| 999 | break; | 1013 | } while (nbytes != 0); |
| 1000 | nbytes -= wsize; | ||
| 1001 | } | ||
| 1002 | atomic_set(&req->wb_complete, requests); | 1014 | atomic_set(&req->wb_complete, requests); |
| 1003 | 1015 | ||
| 1004 | ClearPageError(page); | 1016 | ClearPageError(page); |
| @@ -1052,7 +1064,7 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, int how) | |||
| 1052 | struct nfs_write_data *data; | 1064 | struct nfs_write_data *data; |
| 1053 | unsigned int count; | 1065 | unsigned int count; |
| 1054 | 1066 | ||
| 1055 | data = nfs_writedata_alloc(NFS_SERVER(inode)->wpages); | 1067 | data = nfs_writedata_alloc(NFS_SERVER(inode)->wsize); |
| 1056 | if (!data) | 1068 | if (!data) |
| 1057 | goto out_bad; | 1069 | goto out_bad; |
| 1058 | 1070 | ||
| @@ -1241,7 +1253,13 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data) | |||
| 1241 | dprintk("NFS: %4d nfs_writeback_done (status %d)\n", | 1253 | dprintk("NFS: %4d nfs_writeback_done (status %d)\n", |
| 1242 | task->tk_pid, task->tk_status); | 1254 | task->tk_pid, task->tk_status); |
| 1243 | 1255 | ||
| 1244 | /* Call the NFS version-specific code */ | 1256 | /* |
| 1257 | * ->write_done will attempt to use post-op attributes to detect | ||
| 1258 | * conflicting writes by other clients. A strict interpretation | ||
| 1259 | * of close-to-open would allow us to continue caching even if | ||
| 1260 | * another writer had changed the file, but some applications | ||
| 1261 | * depend on tighter cache coherency when writing. | ||
| 1262 | */ | ||
| 1245 | status = NFS_PROTO(data->inode)->write_done(task, data); | 1263 | status = NFS_PROTO(data->inode)->write_done(task, data); |
| 1246 | if (status != 0) | 1264 | if (status != 0) |
| 1247 | return status; | 1265 | return status; |
| @@ -1262,7 +1280,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data) | |||
| 1262 | if (time_before(complain, jiffies)) { | 1280 | if (time_before(complain, jiffies)) { |
| 1263 | dprintk("NFS: faulty NFS server %s:" | 1281 | dprintk("NFS: faulty NFS server %s:" |
| 1264 | " (committed = %d) != (stable = %d)\n", | 1282 | " (committed = %d) != (stable = %d)\n", |
| 1265 | NFS_SERVER(data->inode)->hostname, | 1283 | NFS_SERVER(data->inode)->nfs_client->cl_hostname, |
| 1266 | resp->verf->committed, argp->stable); | 1284 | resp->verf->committed, argp->stable); |
| 1267 | complain = jiffies + 300 * HZ; | 1285 | complain = jiffies + 300 * HZ; |
| 1268 | } | 1286 | } |
| @@ -1360,7 +1378,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how) | |||
| 1360 | struct nfs_write_data *data; | 1378 | struct nfs_write_data *data; |
| 1361 | struct nfs_page *req; | 1379 | struct nfs_page *req; |
| 1362 | 1380 | ||
| 1363 | data = nfs_commit_alloc(NFS_SERVER(inode)->wpages); | 1381 | data = nfs_commit_alloc(); |
| 1364 | 1382 | ||
| 1365 | if (!data) | 1383 | if (!data) |
| 1366 | goto out_bad; | 1384 | goto out_bad; |
| @@ -1375,6 +1393,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how) | |||
| 1375 | req = nfs_list_entry(head->next); | 1393 | req = nfs_list_entry(head->next); |
| 1376 | nfs_list_remove_request(req); | 1394 | nfs_list_remove_request(req); |
| 1377 | nfs_mark_request_commit(req); | 1395 | nfs_mark_request_commit(req); |
| 1396 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); | ||
| 1378 | nfs_clear_page_writeback(req); | 1397 | nfs_clear_page_writeback(req); |
| 1379 | } | 1398 | } |
| 1380 | return -ENOMEM; | 1399 | return -ENOMEM; |
| @@ -1387,7 +1406,6 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata) | |||
| 1387 | { | 1406 | { |
| 1388 | struct nfs_write_data *data = calldata; | 1407 | struct nfs_write_data *data = calldata; |
| 1389 | struct nfs_page *req; | 1408 | struct nfs_page *req; |
| 1390 | int res = 0; | ||
| 1391 | 1409 | ||
| 1392 | dprintk("NFS: %4d nfs_commit_done (status %d)\n", | 1410 | dprintk("NFS: %4d nfs_commit_done (status %d)\n", |
| 1393 | task->tk_pid, task->tk_status); | 1411 | task->tk_pid, task->tk_status); |
| @@ -1399,6 +1417,7 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata) | |||
| 1399 | while (!list_empty(&data->pages)) { | 1417 | while (!list_empty(&data->pages)) { |
| 1400 | req = nfs_list_entry(data->pages.next); | 1418 | req = nfs_list_entry(data->pages.next); |
| 1401 | nfs_list_remove_request(req); | 1419 | nfs_list_remove_request(req); |
| 1420 | dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); | ||
| 1402 | 1421 | ||
| 1403 | dprintk("NFS: commit (%s/%Ld %d@%Ld)", | 1422 | dprintk("NFS: commit (%s/%Ld %d@%Ld)", |
| 1404 | req->wb_context->dentry->d_inode->i_sb->s_id, | 1423 | req->wb_context->dentry->d_inode->i_sb->s_id, |
| @@ -1425,9 +1444,7 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata) | |||
| 1425 | nfs_mark_request_dirty(req); | 1444 | nfs_mark_request_dirty(req); |
| 1426 | next: | 1445 | next: |
| 1427 | nfs_clear_page_writeback(req); | 1446 | nfs_clear_page_writeback(req); |
| 1428 | res++; | ||
| 1429 | } | 1447 | } |
| 1430 | sub_page_state(nr_unstable,res); | ||
| 1431 | } | 1448 | } |
| 1432 | 1449 | ||
| 1433 | static const struct rpc_call_ops nfs_commit_ops = { | 1450 | static const struct rpc_call_ops nfs_commit_ops = { |
| @@ -1495,15 +1512,25 @@ int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start, | |||
| 1495 | pages = nfs_scan_dirty(inode, &head, idx_start, npages); | 1512 | pages = nfs_scan_dirty(inode, &head, idx_start, npages); |
| 1496 | if (pages != 0) { | 1513 | if (pages != 0) { |
| 1497 | spin_unlock(&nfsi->req_lock); | 1514 | spin_unlock(&nfsi->req_lock); |
| 1498 | ret = nfs_flush_list(inode, &head, pages, how); | 1515 | if (how & FLUSH_INVALIDATE) |
| 1516 | nfs_cancel_dirty_list(&head); | ||
| 1517 | else | ||
| 1518 | ret = nfs_flush_list(inode, &head, pages, how); | ||
| 1499 | spin_lock(&nfsi->req_lock); | 1519 | spin_lock(&nfsi->req_lock); |
| 1500 | continue; | 1520 | continue; |
| 1501 | } | 1521 | } |
| 1502 | if (nocommit) | 1522 | if (nocommit) |
| 1503 | break; | 1523 | break; |
| 1504 | pages = nfs_scan_commit(inode, &head, 0, 0); | 1524 | pages = nfs_scan_commit(inode, &head, idx_start, npages); |
| 1505 | if (pages == 0) | 1525 | if (pages == 0) |
| 1506 | break; | 1526 | break; |
| 1527 | if (how & FLUSH_INVALIDATE) { | ||
| 1528 | spin_unlock(&nfsi->req_lock); | ||
| 1529 | nfs_cancel_commit_list(&head); | ||
| 1530 | spin_lock(&nfsi->req_lock); | ||
| 1531 | continue; | ||
| 1532 | } | ||
| 1533 | pages += nfs_scan_commit(inode, &head, 0, 0); | ||
| 1507 | spin_unlock(&nfsi->req_lock); | 1534 | spin_unlock(&nfsi->req_lock); |
| 1508 | ret = nfs_commit_list(inode, &head, how); | 1535 | ret = nfs_commit_list(inode, &head, how); |
| 1509 | spin_lock(&nfsi->req_lock); | 1536 | spin_lock(&nfsi->req_lock); |
| @@ -1512,7 +1539,7 @@ int nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start, | |||
| 1512 | return ret; | 1539 | return ret; |
| 1513 | } | 1540 | } |
| 1514 | 1541 | ||
| 1515 | int nfs_init_writepagecache(void) | 1542 | int __init nfs_init_writepagecache(void) |
| 1516 | { | 1543 | { |
| 1517 | nfs_wdata_cachep = kmem_cache_create("nfs_write_data", | 1544 | nfs_wdata_cachep = kmem_cache_create("nfs_write_data", |
| 1518 | sizeof(struct nfs_write_data), | 1545 | sizeof(struct nfs_write_data), |
| @@ -1538,7 +1565,6 @@ void nfs_destroy_writepagecache(void) | |||
| 1538 | { | 1565 | { |
| 1539 | mempool_destroy(nfs_commit_mempool); | 1566 | mempool_destroy(nfs_commit_mempool); |
| 1540 | mempool_destroy(nfs_wdata_mempool); | 1567 | mempool_destroy(nfs_wdata_mempool); |
| 1541 | if (kmem_cache_destroy(nfs_wdata_cachep)) | 1568 | kmem_cache_destroy(nfs_wdata_cachep); |
| 1542 | printk(KERN_INFO "nfs_write_data: not all structures were freed\n"); | ||
| 1543 | } | 1569 | } |
| 1544 | 1570 | ||
