diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2006-09-13 09:55:09 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-09-13 09:55:09 -0400 |
commit | 0bc0748dfbefacce9c6b67ab23f2c80133b598f7 (patch) | |
tree | 76946adc08a26973fcdb39fe0eb3ef5e1f8f121d /fs | |
parent | b6397893a5ed81970e803d61ee2f1a0e79f87438 (diff) | |
parent | 95064a75ebf8744e1ff595e8cd7ff9b6c851523e (diff) |
Merge branch 'master' into gfs2
Diffstat (limited to 'fs')
-rw-r--r-- | fs/cifs/readdir.c | 11 | ||||
-rw-r--r-- | fs/ext3/inode.c | 11 | ||||
-rw-r--r-- | fs/nfs/direct.c | 50 | ||||
-rw-r--r-- | fs/nfs/read.c | 24 | ||||
-rw-r--r-- | fs/nfs/write.c | 37 | ||||
-rw-r--r-- | fs/super.c | 1 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_aops.c | 18 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_lrw.c | 27 | ||||
-rw-r--r-- | fs/xfs/quota/xfs_qm_bhv.c | 2 | ||||
-rw-r--r-- | fs/xfs/xfs_alloc.h | 20 | ||||
-rw-r--r-- | fs/xfs/xfs_fsops.c | 16 | ||||
-rw-r--r-- | fs/xfs/xfs_mount.c | 32 | ||||
-rw-r--r-- | fs/xfs/xfs_vfsops.c | 3 |
13 files changed, 133 insertions, 119 deletions
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 105761e3ba0e..9aeb58a7d369 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c | |||
@@ -82,7 +82,6 @@ static int construct_dentry(struct qstr *qstring, struct file *file, | |||
82 | if(*ptmp_inode == NULL) | 82 | if(*ptmp_inode == NULL) |
83 | return rc; | 83 | return rc; |
84 | rc = 1; | 84 | rc = 1; |
85 | d_instantiate(tmp_dentry, *ptmp_inode); | ||
86 | } | 85 | } |
87 | } else { | 86 | } else { |
88 | tmp_dentry = d_alloc(file->f_dentry, qstring); | 87 | tmp_dentry = d_alloc(file->f_dentry, qstring); |
@@ -99,9 +98,7 @@ static int construct_dentry(struct qstr *qstring, struct file *file, | |||
99 | tmp_dentry->d_op = &cifs_dentry_ops; | 98 | tmp_dentry->d_op = &cifs_dentry_ops; |
100 | if(*ptmp_inode == NULL) | 99 | if(*ptmp_inode == NULL) |
101 | return rc; | 100 | return rc; |
102 | rc = 1; | 101 | rc = 2; |
103 | d_instantiate(tmp_dentry, *ptmp_inode); | ||
104 | d_rehash(tmp_dentry); | ||
105 | } | 102 | } |
106 | 103 | ||
107 | tmp_dentry->d_time = jiffies; | 104 | tmp_dentry->d_time = jiffies; |
@@ -870,6 +867,12 @@ static int cifs_filldir(char *pfindEntry, struct file *file, | |||
870 | pfindEntry, &obj_type, rc); | 867 | pfindEntry, &obj_type, rc); |
871 | else | 868 | else |
872 | fill_in_inode(tmp_inode, 1 /* NT */, pfindEntry, &obj_type, rc); | 869 | fill_in_inode(tmp_inode, 1 /* NT */, pfindEntry, &obj_type, rc); |
870 | |||
871 | if(rc) /* new inode - needs to be tied to dentry */ { | ||
872 | d_instantiate(tmp_dentry, tmp_inode); | ||
873 | if(rc == 2) | ||
874 | d_rehash(tmp_dentry); | ||
875 | } | ||
873 | 876 | ||
874 | 877 | ||
875 | rc = filldir(direntry,qstring.name,qstring.len,file->f_pos, | 878 | rc = filldir(direntry,qstring.name,qstring.len,file->f_pos, |
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index c5ee9f0691e3..0f0b1eadb98d 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c | |||
@@ -1009,11 +1009,14 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode, | |||
1009 | buffer_trace_init(&dummy.b_history); | 1009 | buffer_trace_init(&dummy.b_history); |
1010 | err = ext3_get_blocks_handle(handle, inode, block, 1, | 1010 | err = ext3_get_blocks_handle(handle, inode, block, 1, |
1011 | &dummy, create, 1); | 1011 | &dummy, create, 1); |
1012 | if (err == 1) { | 1012 | /* |
1013 | * ext3_get_blocks_handle() returns number of blocks | ||
1014 | * mapped. 0 in case of a HOLE. | ||
1015 | */ | ||
1016 | if (err > 0) { | ||
1017 | if (err > 1) | ||
1018 | WARN_ON(1); | ||
1013 | err = 0; | 1019 | err = 0; |
1014 | } else if (err >= 0) { | ||
1015 | WARN_ON(1); | ||
1016 | err = -EIO; | ||
1017 | } | 1020 | } |
1018 | *errp = err; | 1021 | *errp = err; |
1019 | if (!err && buffer_mapped(&dummy)) { | 1022 | if (!err && buffer_mapped(&dummy)) { |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index fecd3b095deb..76ca1cbc38f9 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -100,25 +100,6 @@ static inline int put_dreq(struct nfs_direct_req *dreq) | |||
100 | return atomic_dec_and_test(&dreq->io_count); | 100 | return atomic_dec_and_test(&dreq->io_count); |
101 | } | 101 | } |
102 | 102 | ||
103 | /* | ||
104 | * "size" is never larger than rsize or wsize. | ||
105 | */ | ||
106 | static inline int nfs_direct_count_pages(unsigned long user_addr, size_t size) | ||
107 | { | ||
108 | int page_count; | ||
109 | |||
110 | page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
111 | page_count -= user_addr >> PAGE_SHIFT; | ||
112 | BUG_ON(page_count < 0); | ||
113 | |||
114 | return page_count; | ||
115 | } | ||
116 | |||
117 | static inline unsigned int nfs_max_pages(unsigned int size) | ||
118 | { | ||
119 | return (size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | ||
120 | } | ||
121 | |||
122 | /** | 103 | /** |
123 | * nfs_direct_IO - NFS address space operation for direct I/O | 104 | * nfs_direct_IO - NFS address space operation for direct I/O |
124 | * @rw: direction (read or write) | 105 | * @rw: direction (read or write) |
@@ -276,28 +257,24 @@ static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned lo | |||
276 | struct nfs_open_context *ctx = dreq->ctx; | 257 | struct nfs_open_context *ctx = dreq->ctx; |
277 | struct inode *inode = ctx->dentry->d_inode; | 258 | struct inode *inode = ctx->dentry->d_inode; |
278 | size_t rsize = NFS_SERVER(inode)->rsize; | 259 | size_t rsize = NFS_SERVER(inode)->rsize; |
279 | unsigned int rpages = nfs_max_pages(rsize); | ||
280 | unsigned int pgbase; | 260 | unsigned int pgbase; |
281 | int result; | 261 | int result; |
282 | ssize_t started = 0; | 262 | ssize_t started = 0; |
283 | 263 | ||
284 | get_dreq(dreq); | 264 | get_dreq(dreq); |
285 | 265 | ||
286 | pgbase = user_addr & ~PAGE_MASK; | ||
287 | do { | 266 | do { |
288 | struct nfs_read_data *data; | 267 | struct nfs_read_data *data; |
289 | size_t bytes; | 268 | size_t bytes; |
290 | 269 | ||
270 | pgbase = user_addr & ~PAGE_MASK; | ||
271 | bytes = min(rsize,count); | ||
272 | |||
291 | result = -ENOMEM; | 273 | result = -ENOMEM; |
292 | data = nfs_readdata_alloc(rpages); | 274 | data = nfs_readdata_alloc(pgbase + bytes); |
293 | if (unlikely(!data)) | 275 | if (unlikely(!data)) |
294 | break; | 276 | break; |
295 | 277 | ||
296 | bytes = rsize; | ||
297 | if (count < rsize) | ||
298 | bytes = count; | ||
299 | |||
300 | data->npages = nfs_direct_count_pages(user_addr, bytes); | ||
301 | down_read(¤t->mm->mmap_sem); | 278 | down_read(¤t->mm->mmap_sem); |
302 | result = get_user_pages(current, current->mm, user_addr, | 279 | result = get_user_pages(current, current->mm, user_addr, |
303 | data->npages, 1, 0, data->pagevec, NULL); | 280 | data->npages, 1, 0, data->pagevec, NULL); |
@@ -344,8 +321,10 @@ static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned lo | |||
344 | started += bytes; | 321 | started += bytes; |
345 | user_addr += bytes; | 322 | user_addr += bytes; |
346 | pos += bytes; | 323 | pos += bytes; |
324 | /* FIXME: Remove this unnecessary math from final patch */ | ||
347 | pgbase += bytes; | 325 | pgbase += bytes; |
348 | pgbase &= ~PAGE_MASK; | 326 | pgbase &= ~PAGE_MASK; |
327 | BUG_ON(pgbase != (user_addr & ~PAGE_MASK)); | ||
349 | 328 | ||
350 | count -= bytes; | 329 | count -= bytes; |
351 | } while (count != 0); | 330 | } while (count != 0); |
@@ -524,7 +503,7 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode | |||
524 | 503 | ||
525 | static void nfs_alloc_commit_data(struct nfs_direct_req *dreq) | 504 | static void nfs_alloc_commit_data(struct nfs_direct_req *dreq) |
526 | { | 505 | { |
527 | dreq->commit_data = nfs_commit_alloc(0); | 506 | dreq->commit_data = nfs_commit_alloc(); |
528 | if (dreq->commit_data != NULL) | 507 | if (dreq->commit_data != NULL) |
529 | dreq->commit_data->req = (struct nfs_page *) dreq; | 508 | dreq->commit_data->req = (struct nfs_page *) dreq; |
530 | } | 509 | } |
@@ -605,28 +584,24 @@ static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned l | |||
605 | struct nfs_open_context *ctx = dreq->ctx; | 584 | struct nfs_open_context *ctx = dreq->ctx; |
606 | struct inode *inode = ctx->dentry->d_inode; | 585 | struct inode *inode = ctx->dentry->d_inode; |
607 | size_t wsize = NFS_SERVER(inode)->wsize; | 586 | size_t wsize = NFS_SERVER(inode)->wsize; |
608 | unsigned int wpages = nfs_max_pages(wsize); | ||
609 | unsigned int pgbase; | 587 | unsigned int pgbase; |
610 | int result; | 588 | int result; |
611 | ssize_t started = 0; | 589 | ssize_t started = 0; |
612 | 590 | ||
613 | get_dreq(dreq); | 591 | get_dreq(dreq); |
614 | 592 | ||
615 | pgbase = user_addr & ~PAGE_MASK; | ||
616 | do { | 593 | do { |
617 | struct nfs_write_data *data; | 594 | struct nfs_write_data *data; |
618 | size_t bytes; | 595 | size_t bytes; |
619 | 596 | ||
597 | pgbase = user_addr & ~PAGE_MASK; | ||
598 | bytes = min(wsize,count); | ||
599 | |||
620 | result = -ENOMEM; | 600 | result = -ENOMEM; |
621 | data = nfs_writedata_alloc(wpages); | 601 | data = nfs_writedata_alloc(pgbase + bytes); |
622 | if (unlikely(!data)) | 602 | if (unlikely(!data)) |
623 | break; | 603 | break; |
624 | 604 | ||
625 | bytes = wsize; | ||
626 | if (count < wsize) | ||
627 | bytes = count; | ||
628 | |||
629 | data->npages = nfs_direct_count_pages(user_addr, bytes); | ||
630 | down_read(¤t->mm->mmap_sem); | 605 | down_read(¤t->mm->mmap_sem); |
631 | result = get_user_pages(current, current->mm, user_addr, | 606 | result = get_user_pages(current, current->mm, user_addr, |
632 | data->npages, 0, 0, data->pagevec, NULL); | 607 | data->npages, 0, 0, data->pagevec, NULL); |
@@ -676,8 +651,11 @@ static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned l | |||
676 | started += bytes; | 651 | started += bytes; |
677 | user_addr += bytes; | 652 | user_addr += bytes; |
678 | pos += bytes; | 653 | pos += bytes; |
654 | |||
655 | /* FIXME: Remove this useless math from the final patch */ | ||
679 | pgbase += bytes; | 656 | pgbase += bytes; |
680 | pgbase &= ~PAGE_MASK; | 657 | pgbase &= ~PAGE_MASK; |
658 | BUG_ON(pgbase != (user_addr & ~PAGE_MASK)); | ||
681 | 659 | ||
682 | count -= bytes; | 660 | count -= bytes; |
683 | } while (count != 0); | 661 | } while (count != 0); |
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index da9cf11c326f..7a9ee00e0c61 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -43,13 +43,15 @@ static mempool_t *nfs_rdata_mempool; | |||
43 | 43 | ||
44 | #define MIN_POOL_READ (32) | 44 | #define MIN_POOL_READ (32) |
45 | 45 | ||
46 | struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount) | 46 | struct nfs_read_data *nfs_readdata_alloc(size_t len) |
47 | { | 47 | { |
48 | unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
48 | struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, SLAB_NOFS); | 49 | struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, SLAB_NOFS); |
49 | 50 | ||
50 | if (p) { | 51 | if (p) { |
51 | memset(p, 0, sizeof(*p)); | 52 | memset(p, 0, sizeof(*p)); |
52 | INIT_LIST_HEAD(&p->pages); | 53 | INIT_LIST_HEAD(&p->pages); |
54 | p->npages = pagecount; | ||
53 | if (pagecount <= ARRAY_SIZE(p->page_array)) | 55 | if (pagecount <= ARRAY_SIZE(p->page_array)) |
54 | p->pagevec = p->page_array; | 56 | p->pagevec = p->page_array; |
55 | else { | 57 | else { |
@@ -140,7 +142,7 @@ static int nfs_readpage_sync(struct nfs_open_context *ctx, struct inode *inode, | |||
140 | int result; | 142 | int result; |
141 | struct nfs_read_data *rdata; | 143 | struct nfs_read_data *rdata; |
142 | 144 | ||
143 | rdata = nfs_readdata_alloc(1); | 145 | rdata = nfs_readdata_alloc(count); |
144 | if (!rdata) | 146 | if (!rdata) |
145 | return -ENOMEM; | 147 | return -ENOMEM; |
146 | 148 | ||
@@ -336,25 +338,25 @@ static int nfs_pagein_multi(struct list_head *head, struct inode *inode) | |||
336 | struct nfs_page *req = nfs_list_entry(head->next); | 338 | struct nfs_page *req = nfs_list_entry(head->next); |
337 | struct page *page = req->wb_page; | 339 | struct page *page = req->wb_page; |
338 | struct nfs_read_data *data; | 340 | struct nfs_read_data *data; |
339 | unsigned int rsize = NFS_SERVER(inode)->rsize; | 341 | size_t rsize = NFS_SERVER(inode)->rsize, nbytes; |
340 | unsigned int nbytes, offset; | 342 | unsigned int offset; |
341 | int requests = 0; | 343 | int requests = 0; |
342 | LIST_HEAD(list); | 344 | LIST_HEAD(list); |
343 | 345 | ||
344 | nfs_list_remove_request(req); | 346 | nfs_list_remove_request(req); |
345 | 347 | ||
346 | nbytes = req->wb_bytes; | 348 | nbytes = req->wb_bytes; |
347 | for(;;) { | 349 | do { |
348 | data = nfs_readdata_alloc(1); | 350 | size_t len = min(nbytes,rsize); |
351 | |||
352 | data = nfs_readdata_alloc(len); | ||
349 | if (!data) | 353 | if (!data) |
350 | goto out_bad; | 354 | goto out_bad; |
351 | INIT_LIST_HEAD(&data->pages); | 355 | INIT_LIST_HEAD(&data->pages); |
352 | list_add(&data->pages, &list); | 356 | list_add(&data->pages, &list); |
353 | requests++; | 357 | requests++; |
354 | if (nbytes <= rsize) | 358 | nbytes -= len; |
355 | break; | 359 | } while(nbytes != 0); |
356 | nbytes -= rsize; | ||
357 | } | ||
358 | atomic_set(&req->wb_complete, requests); | 360 | atomic_set(&req->wb_complete, requests); |
359 | 361 | ||
360 | ClearPageError(page); | 362 | ClearPageError(page); |
@@ -402,7 +404,7 @@ static int nfs_pagein_one(struct list_head *head, struct inode *inode) | |||
402 | if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE) | 404 | if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE) |
403 | return nfs_pagein_multi(head, inode); | 405 | return nfs_pagein_multi(head, inode); |
404 | 406 | ||
405 | data = nfs_readdata_alloc(NFS_SERVER(inode)->rpages); | 407 | data = nfs_readdata_alloc(NFS_SERVER(inode)->rsize); |
406 | if (!data) | 408 | if (!data) |
407 | goto out_bad; | 409 | goto out_bad; |
408 | 410 | ||
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 50774991f8d5..8ab3cf10d792 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -90,22 +90,13 @@ static mempool_t *nfs_commit_mempool; | |||
90 | 90 | ||
91 | static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion); | 91 | static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion); |
92 | 92 | ||
93 | struct nfs_write_data *nfs_commit_alloc(unsigned int pagecount) | 93 | struct nfs_write_data *nfs_commit_alloc(void) |
94 | { | 94 | { |
95 | struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS); | 95 | struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS); |
96 | 96 | ||
97 | if (p) { | 97 | if (p) { |
98 | memset(p, 0, sizeof(*p)); | 98 | memset(p, 0, sizeof(*p)); |
99 | INIT_LIST_HEAD(&p->pages); | 99 | INIT_LIST_HEAD(&p->pages); |
100 | if (pagecount <= ARRAY_SIZE(p->page_array)) | ||
101 | p->pagevec = p->page_array; | ||
102 | else { | ||
103 | p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS); | ||
104 | if (!p->pagevec) { | ||
105 | mempool_free(p, nfs_commit_mempool); | ||
106 | p = NULL; | ||
107 | } | ||
108 | } | ||
109 | } | 100 | } |
110 | return p; | 101 | return p; |
111 | } | 102 | } |
@@ -117,13 +108,15 @@ void nfs_commit_free(struct nfs_write_data *p) | |||
117 | mempool_free(p, nfs_commit_mempool); | 108 | mempool_free(p, nfs_commit_mempool); |
118 | } | 109 | } |
119 | 110 | ||
120 | struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount) | 111 | struct nfs_write_data *nfs_writedata_alloc(size_t len) |
121 | { | 112 | { |
113 | unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
122 | struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS); | 114 | struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS); |
123 | 115 | ||
124 | if (p) { | 116 | if (p) { |
125 | memset(p, 0, sizeof(*p)); | 117 | memset(p, 0, sizeof(*p)); |
126 | INIT_LIST_HEAD(&p->pages); | 118 | INIT_LIST_HEAD(&p->pages); |
119 | p->npages = pagecount; | ||
127 | if (pagecount <= ARRAY_SIZE(p->page_array)) | 120 | if (pagecount <= ARRAY_SIZE(p->page_array)) |
128 | p->pagevec = p->page_array; | 121 | p->pagevec = p->page_array; |
129 | else { | 122 | else { |
@@ -208,7 +201,7 @@ static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode, | |||
208 | int result, written = 0; | 201 | int result, written = 0; |
209 | struct nfs_write_data *wdata; | 202 | struct nfs_write_data *wdata; |
210 | 203 | ||
211 | wdata = nfs_writedata_alloc(1); | 204 | wdata = nfs_writedata_alloc(wsize); |
212 | if (!wdata) | 205 | if (!wdata) |
213 | return -ENOMEM; | 206 | return -ENOMEM; |
214 | 207 | ||
@@ -999,24 +992,24 @@ static int nfs_flush_multi(struct inode *inode, struct list_head *head, int how) | |||
999 | struct nfs_page *req = nfs_list_entry(head->next); | 992 | struct nfs_page *req = nfs_list_entry(head->next); |
1000 | struct page *page = req->wb_page; | 993 | struct page *page = req->wb_page; |
1001 | struct nfs_write_data *data; | 994 | struct nfs_write_data *data; |
1002 | unsigned int wsize = NFS_SERVER(inode)->wsize; | 995 | size_t wsize = NFS_SERVER(inode)->wsize, nbytes; |
1003 | unsigned int nbytes, offset; | 996 | unsigned int offset; |
1004 | int requests = 0; | 997 | int requests = 0; |
1005 | LIST_HEAD(list); | 998 | LIST_HEAD(list); |
1006 | 999 | ||
1007 | nfs_list_remove_request(req); | 1000 | nfs_list_remove_request(req); |
1008 | 1001 | ||
1009 | nbytes = req->wb_bytes; | 1002 | nbytes = req->wb_bytes; |
1010 | for (;;) { | 1003 | do { |
1011 | data = nfs_writedata_alloc(1); | 1004 | size_t len = min(nbytes, wsize); |
1005 | |||
1006 | data = nfs_writedata_alloc(len); | ||
1012 | if (!data) | 1007 | if (!data) |
1013 | goto out_bad; | 1008 | goto out_bad; |
1014 | list_add(&data->pages, &list); | 1009 | list_add(&data->pages, &list); |
1015 | requests++; | 1010 | requests++; |
1016 | if (nbytes <= wsize) | 1011 | nbytes -= len; |
1017 | break; | 1012 | } while (nbytes != 0); |
1018 | nbytes -= wsize; | ||
1019 | } | ||
1020 | atomic_set(&req->wb_complete, requests); | 1013 | atomic_set(&req->wb_complete, requests); |
1021 | 1014 | ||
1022 | ClearPageError(page); | 1015 | ClearPageError(page); |
@@ -1070,7 +1063,7 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, int how) | |||
1070 | struct nfs_write_data *data; | 1063 | struct nfs_write_data *data; |
1071 | unsigned int count; | 1064 | unsigned int count; |
1072 | 1065 | ||
1073 | data = nfs_writedata_alloc(NFS_SERVER(inode)->wpages); | 1066 | data = nfs_writedata_alloc(NFS_SERVER(inode)->wsize); |
1074 | if (!data) | 1067 | if (!data) |
1075 | goto out_bad; | 1068 | goto out_bad; |
1076 | 1069 | ||
@@ -1378,7 +1371,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how) | |||
1378 | struct nfs_write_data *data; | 1371 | struct nfs_write_data *data; |
1379 | struct nfs_page *req; | 1372 | struct nfs_page *req; |
1380 | 1373 | ||
1381 | data = nfs_commit_alloc(NFS_SERVER(inode)->wpages); | 1374 | data = nfs_commit_alloc(); |
1382 | 1375 | ||
1383 | if (!data) | 1376 | if (!data) |
1384 | goto out_bad; | 1377 | goto out_bad; |
diff --git a/fs/super.c b/fs/super.c index 6d4e8174b6db..5c4c94d5495e 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -49,6 +49,7 @@ DEFINE_SPINLOCK(sb_lock); | |||
49 | 49 | ||
50 | /** | 50 | /** |
51 | * alloc_super - create new superblock | 51 | * alloc_super - create new superblock |
52 | * @type: filesystem type superblock should belong to | ||
52 | * | 53 | * |
53 | * Allocates and initializes a new &struct super_block. alloc_super() | 54 | * Allocates and initializes a new &struct super_block. alloc_super() |
54 | * returns a pointer new superblock or %NULL if allocation had failed. | 55 | * returns a pointer new superblock or %NULL if allocation had failed. |
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index c40f81ba9b13..34dcb43a7837 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
@@ -1390,11 +1390,19 @@ xfs_vm_direct_IO( | |||
1390 | 1390 | ||
1391 | iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN); | 1391 | iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN); |
1392 | 1392 | ||
1393 | ret = blockdev_direct_IO_own_locking(rw, iocb, inode, | 1393 | if (rw == WRITE) { |
1394 | iomap.iomap_target->bt_bdev, | 1394 | ret = blockdev_direct_IO_own_locking(rw, iocb, inode, |
1395 | iov, offset, nr_segs, | 1395 | iomap.iomap_target->bt_bdev, |
1396 | xfs_get_blocks_direct, | 1396 | iov, offset, nr_segs, |
1397 | xfs_end_io_direct); | 1397 | xfs_get_blocks_direct, |
1398 | xfs_end_io_direct); | ||
1399 | } else { | ||
1400 | ret = blockdev_direct_IO_no_locking(rw, iocb, inode, | ||
1401 | iomap.iomap_target->bt_bdev, | ||
1402 | iov, offset, nr_segs, | ||
1403 | xfs_get_blocks_direct, | ||
1404 | xfs_end_io_direct); | ||
1405 | } | ||
1398 | 1406 | ||
1399 | if (unlikely(ret <= 0 && iocb->private)) | 1407 | if (unlikely(ret <= 0 && iocb->private)) |
1400 | xfs_destroy_ioend(iocb->private); | 1408 | xfs_destroy_ioend(iocb->private); |
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c index 5d9cfd91ad08..ee788b1cb364 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.c +++ b/fs/xfs/linux-2.6/xfs_lrw.c | |||
@@ -264,7 +264,9 @@ xfs_read( | |||
264 | dmflags, &locktype); | 264 | dmflags, &locktype); |
265 | if (ret) { | 265 | if (ret) { |
266 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | 266 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); |
267 | goto unlock_mutex; | 267 | if (unlikely(ioflags & IO_ISDIRECT)) |
268 | mutex_unlock(&inode->i_mutex); | ||
269 | return ret; | ||
268 | } | 270 | } |
269 | } | 271 | } |
270 | 272 | ||
@@ -272,6 +274,9 @@ xfs_read( | |||
272 | bhv_vop_flushinval_pages(vp, ctooff(offtoct(*offset)), | 274 | bhv_vop_flushinval_pages(vp, ctooff(offtoct(*offset)), |
273 | -1, FI_REMAPF_LOCKED); | 275 | -1, FI_REMAPF_LOCKED); |
274 | 276 | ||
277 | if (unlikely(ioflags & IO_ISDIRECT)) | ||
278 | mutex_unlock(&inode->i_mutex); | ||
279 | |||
275 | xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore, | 280 | xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore, |
276 | (void *)iovp, segs, *offset, ioflags); | 281 | (void *)iovp, segs, *offset, ioflags); |
277 | ret = __generic_file_aio_read(iocb, iovp, segs, offset); | 282 | ret = __generic_file_aio_read(iocb, iovp, segs, offset); |
@@ -281,10 +286,6 @@ xfs_read( | |||
281 | XFS_STATS_ADD(xs_read_bytes, ret); | 286 | XFS_STATS_ADD(xs_read_bytes, ret); |
282 | 287 | ||
283 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | 288 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); |
284 | |||
285 | unlock_mutex: | ||
286 | if (unlikely(ioflags & IO_ISDIRECT)) | ||
287 | mutex_unlock(&inode->i_mutex); | ||
288 | return ret; | 289 | return ret; |
289 | } | 290 | } |
290 | 291 | ||
@@ -390,6 +391,8 @@ xfs_splice_write( | |||
390 | xfs_inode_t *ip = XFS_BHVTOI(bdp); | 391 | xfs_inode_t *ip = XFS_BHVTOI(bdp); |
391 | xfs_mount_t *mp = ip->i_mount; | 392 | xfs_mount_t *mp = ip->i_mount; |
392 | ssize_t ret; | 393 | ssize_t ret; |
394 | struct inode *inode = outfilp->f_mapping->host; | ||
395 | xfs_fsize_t isize; | ||
393 | 396 | ||
394 | XFS_STATS_INC(xs_write_calls); | 397 | XFS_STATS_INC(xs_write_calls); |
395 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | 398 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
@@ -416,6 +419,20 @@ xfs_splice_write( | |||
416 | if (ret > 0) | 419 | if (ret > 0) |
417 | XFS_STATS_ADD(xs_write_bytes, ret); | 420 | XFS_STATS_ADD(xs_write_bytes, ret); |
418 | 421 | ||
422 | isize = i_size_read(inode); | ||
423 | if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize)) | ||
424 | *ppos = isize; | ||
425 | |||
426 | if (*ppos > ip->i_d.di_size) { | ||
427 | xfs_ilock(ip, XFS_ILOCK_EXCL); | ||
428 | if (*ppos > ip->i_d.di_size) { | ||
429 | ip->i_d.di_size = *ppos; | ||
430 | i_size_write(inode, *ppos); | ||
431 | ip->i_update_core = 1; | ||
432 | ip->i_update_size = 1; | ||
433 | } | ||
434 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | ||
435 | } | ||
419 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | 436 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); |
420 | return ret; | 437 | return ret; |
421 | } | 438 | } |
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c index f137856c3261..db8872be8c87 100644 --- a/fs/xfs/quota/xfs_qm_bhv.c +++ b/fs/xfs/quota/xfs_qm_bhv.c | |||
@@ -203,7 +203,7 @@ xfs_qm_statvfs( | |||
203 | if (error || !vnode) | 203 | if (error || !vnode) |
204 | return error; | 204 | return error; |
205 | 205 | ||
206 | mp = XFS_BHVTOM(bhv); | 206 | mp = xfs_vfstom(bhvtovfs(bhv)); |
207 | ip = xfs_vtoi(vnode); | 207 | ip = xfs_vtoi(vnode); |
208 | 208 | ||
209 | if (!(ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)) | 209 | if (!(ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)) |
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h index 650591f999ae..5a4256120ccc 100644 --- a/fs/xfs/xfs_alloc.h +++ b/fs/xfs/xfs_alloc.h | |||
@@ -44,6 +44,26 @@ typedef enum xfs_alloctype | |||
44 | #define XFS_ALLOC_FLAG_FREEING 0x00000002 /* indicate caller is freeing extents*/ | 44 | #define XFS_ALLOC_FLAG_FREEING 0x00000002 /* indicate caller is freeing extents*/ |
45 | 45 | ||
46 | /* | 46 | /* |
47 | * In order to avoid ENOSPC-related deadlock caused by | ||
48 | * out-of-order locking of AGF buffer (PV 947395), we place | ||
49 | * constraints on the relationship among actual allocations for | ||
50 | * data blocks, freelist blocks, and potential file data bmap | ||
51 | * btree blocks. However, these restrictions may result in no | ||
52 | * actual space allocated for a delayed extent, for example, a data | ||
53 | * block in a certain AG is allocated but there is no additional | ||
54 | * block for the additional bmap btree block due to a split of the | ||
55 | * bmap btree of the file. The result of this may lead to an | ||
56 | * infinite loop in xfssyncd when the file gets flushed to disk and | ||
57 | * all delayed extents need to be actually allocated. To get around | ||
58 | * this, we explicitly set aside a few blocks which will not be | ||
59 | * reserved in delayed allocation. Considering the minimum number of | ||
60 | * needed freelist blocks is 4 fsbs _per AG_, a potential split of file's bmap | ||
61 | * btree requires 1 fsb, so we set the number of set-aside blocks | ||
62 | * to 4 + 4*agcount. | ||
63 | */ | ||
64 | #define XFS_ALLOC_SET_ASIDE(mp) (4 + ((mp)->m_sb.sb_agcount * 4)) | ||
65 | |||
66 | /* | ||
47 | * Argument structure for xfs_alloc routines. | 67 | * Argument structure for xfs_alloc routines. |
48 | * This is turned into a structure to avoid having 20 arguments passed | 68 | * This is turned into a structure to avoid having 20 arguments passed |
49 | * down several levels of the stack. | 69 | * down several levels of the stack. |
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 077629bab532..c064e72ada9e 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c | |||
@@ -462,7 +462,7 @@ xfs_fs_counts( | |||
462 | 462 | ||
463 | xfs_icsb_sync_counters_lazy(mp); | 463 | xfs_icsb_sync_counters_lazy(mp); |
464 | s = XFS_SB_LOCK(mp); | 464 | s = XFS_SB_LOCK(mp); |
465 | cnt->freedata = mp->m_sb.sb_fdblocks; | 465 | cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); |
466 | cnt->freertx = mp->m_sb.sb_frextents; | 466 | cnt->freertx = mp->m_sb.sb_frextents; |
467 | cnt->freeino = mp->m_sb.sb_ifree; | 467 | cnt->freeino = mp->m_sb.sb_ifree; |
468 | cnt->allocino = mp->m_sb.sb_icount; | 468 | cnt->allocino = mp->m_sb.sb_icount; |
@@ -519,15 +519,19 @@ xfs_reserve_blocks( | |||
519 | } | 519 | } |
520 | mp->m_resblks = request; | 520 | mp->m_resblks = request; |
521 | } else { | 521 | } else { |
522 | __int64_t free; | ||
523 | |||
524 | free = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); | ||
522 | delta = request - mp->m_resblks; | 525 | delta = request - mp->m_resblks; |
523 | lcounter = mp->m_sb.sb_fdblocks - delta; | 526 | lcounter = free - delta; |
524 | if (lcounter < 0) { | 527 | if (lcounter < 0) { |
525 | /* We can't satisfy the request, just get what we can */ | 528 | /* We can't satisfy the request, just get what we can */ |
526 | mp->m_resblks += mp->m_sb.sb_fdblocks; | 529 | mp->m_resblks += free; |
527 | mp->m_resblks_avail += mp->m_sb.sb_fdblocks; | 530 | mp->m_resblks_avail += free; |
528 | mp->m_sb.sb_fdblocks = 0; | 531 | mp->m_sb.sb_fdblocks = XFS_ALLOC_SET_ASIDE(mp); |
529 | } else { | 532 | } else { |
530 | mp->m_sb.sb_fdblocks = lcounter; | 533 | mp->m_sb.sb_fdblocks = |
534 | lcounter + XFS_ALLOC_SET_ASIDE(mp); | ||
531 | mp->m_resblks = request; | 535 | mp->m_resblks = request; |
532 | mp->m_resblks_avail += delta; | 536 | mp->m_resblks_avail += delta; |
533 | } | 537 | } |
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 4be5c0b2d296..9dfae18d995f 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c | |||
@@ -1243,24 +1243,6 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields) | |||
1243 | xfs_trans_log_buf(tp, bp, first, last); | 1243 | xfs_trans_log_buf(tp, bp, first, last); |
1244 | } | 1244 | } |
1245 | 1245 | ||
1246 | /* | ||
1247 | * In order to avoid ENOSPC-related deadlock caused by | ||
1248 | * out-of-order locking of AGF buffer (PV 947395), we place | ||
1249 | * constraints on the relationship among actual allocations for | ||
1250 | * data blocks, freelist blocks, and potential file data bmap | ||
1251 | * btree blocks. However, these restrictions may result in no | ||
1252 | * actual space allocated for a delayed extent, for example, a data | ||
1253 | * block in a certain AG is allocated but there is no additional | ||
1254 | * block for the additional bmap btree block due to a split of the | ||
1255 | * bmap btree of the file. The result of this may lead to an | ||
1256 | * infinite loop in xfssyncd when the file gets flushed to disk and | ||
1257 | * all delayed extents need to be actually allocated. To get around | ||
1258 | * this, we explicitly set aside a few blocks which will not be | ||
1259 | * reserved in delayed allocation. Considering the minimum number of | ||
1260 | * needed freelist blocks is 4 fsbs, a potential split of file's bmap | ||
1261 | * btree requires 1 fsb, so we set the number of set-aside blocks to 8. | ||
1262 | */ | ||
1263 | #define SET_ASIDE_BLOCKS 8 | ||
1264 | 1246 | ||
1265 | /* | 1247 | /* |
1266 | * xfs_mod_incore_sb_unlocked() is a utility routine common used to apply | 1248 | * xfs_mod_incore_sb_unlocked() is a utility routine common used to apply |
@@ -1306,7 +1288,8 @@ xfs_mod_incore_sb_unlocked(xfs_mount_t *mp, xfs_sb_field_t field, | |||
1306 | return 0; | 1288 | return 0; |
1307 | case XFS_SBS_FDBLOCKS: | 1289 | case XFS_SBS_FDBLOCKS: |
1308 | 1290 | ||
1309 | lcounter = (long long)mp->m_sb.sb_fdblocks - SET_ASIDE_BLOCKS; | 1291 | lcounter = (long long) |
1292 | mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); | ||
1310 | res_used = (long long)(mp->m_resblks - mp->m_resblks_avail); | 1293 | res_used = (long long)(mp->m_resblks - mp->m_resblks_avail); |
1311 | 1294 | ||
1312 | if (delta > 0) { /* Putting blocks back */ | 1295 | if (delta > 0) { /* Putting blocks back */ |
@@ -1340,7 +1323,7 @@ xfs_mod_incore_sb_unlocked(xfs_mount_t *mp, xfs_sb_field_t field, | |||
1340 | } | 1323 | } |
1341 | } | 1324 | } |
1342 | 1325 | ||
1343 | mp->m_sb.sb_fdblocks = lcounter + SET_ASIDE_BLOCKS; | 1326 | mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp); |
1344 | return 0; | 1327 | return 0; |
1345 | case XFS_SBS_FREXTENTS: | 1328 | case XFS_SBS_FREXTENTS: |
1346 | lcounter = (long long)mp->m_sb.sb_frextents; | 1329 | lcounter = (long long)mp->m_sb.sb_frextents; |
@@ -2021,7 +2004,8 @@ xfs_icsb_sync_counters_lazy( | |||
2021 | * when we get near ENOSPC. | 2004 | * when we get near ENOSPC. |
2022 | */ | 2005 | */ |
2023 | #define XFS_ICSB_INO_CNTR_REENABLE 64 | 2006 | #define XFS_ICSB_INO_CNTR_REENABLE 64 |
2024 | #define XFS_ICSB_FDBLK_CNTR_REENABLE 512 | 2007 | #define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \ |
2008 | (512 + XFS_ALLOC_SET_ASIDE(mp)) | ||
2025 | STATIC void | 2009 | STATIC void |
2026 | xfs_icsb_balance_counter( | 2010 | xfs_icsb_balance_counter( |
2027 | xfs_mount_t *mp, | 2011 | xfs_mount_t *mp, |
@@ -2055,7 +2039,7 @@ xfs_icsb_balance_counter( | |||
2055 | case XFS_SBS_FDBLOCKS: | 2039 | case XFS_SBS_FDBLOCKS: |
2056 | count = mp->m_sb.sb_fdblocks; | 2040 | count = mp->m_sb.sb_fdblocks; |
2057 | resid = do_div(count, weight); | 2041 | resid = do_div(count, weight); |
2058 | if (count < XFS_ICSB_FDBLK_CNTR_REENABLE) | 2042 | if (count < XFS_ICSB_FDBLK_CNTR_REENABLE(mp)) |
2059 | goto out; | 2043 | goto out; |
2060 | break; | 2044 | break; |
2061 | default: | 2045 | default: |
@@ -2110,11 +2094,11 @@ again: | |||
2110 | case XFS_SBS_FDBLOCKS: | 2094 | case XFS_SBS_FDBLOCKS: |
2111 | BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0); | 2095 | BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0); |
2112 | 2096 | ||
2113 | lcounter = icsbp->icsb_fdblocks; | 2097 | lcounter = icsbp->icsb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); |
2114 | lcounter += delta; | 2098 | lcounter += delta; |
2115 | if (unlikely(lcounter < 0)) | 2099 | if (unlikely(lcounter < 0)) |
2116 | goto slow_path; | 2100 | goto slow_path; |
2117 | icsbp->icsb_fdblocks = lcounter; | 2101 | icsbp->icsb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp); |
2118 | break; | 2102 | break; |
2119 | default: | 2103 | default: |
2120 | BUG(); | 2104 | BUG(); |
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index b427d220a169..a34796e57afb 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c | |||
@@ -811,7 +811,8 @@ xfs_statvfs( | |||
811 | statp->f_bsize = sbp->sb_blocksize; | 811 | statp->f_bsize = sbp->sb_blocksize; |
812 | lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; | 812 | lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; |
813 | statp->f_blocks = sbp->sb_dblocks - lsize; | 813 | statp->f_blocks = sbp->sb_dblocks - lsize; |
814 | statp->f_bfree = statp->f_bavail = sbp->sb_fdblocks; | 814 | statp->f_bfree = statp->f_bavail = |
815 | sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); | ||
815 | fakeinos = statp->f_bfree << sbp->sb_inopblog; | 816 | fakeinos = statp->f_bfree << sbp->sb_inopblog; |
816 | #if XFS_BIG_INUMS | 817 | #if XFS_BIG_INUMS |
817 | fakeinos += mp->m_inoadd; | 818 | fakeinos += mp->m_inoadd; |