diff options
author | Nick Piggin <npiggin@suse.de> | 2008-10-16 01:04:32 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-16 14:21:48 -0400 |
commit | 15b4650e55e06d2cc05115767551cd3ace875431 (patch) | |
tree | 5542e01b8651140b707b6b9ebe81acb6e6ca41b3 | |
parent | 8360e81b5dd23c153301f08937a68fd67d9b46c0 (diff) |
afs: convert to new aops
Cannot assume writes will fully complete, so this conversion goes the easy
way and always brings the page uptodate before the write.
[dhowells@redhat.com: style tweaks]
Signed-off-by: Nick Piggin <npiggin@suse.de>
Acked-by: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | fs/afs/file.c | 4 | ||||
-rw-r--r-- | fs/afs/internal.h | 8 | ||||
-rw-r--r-- | fs/afs/write.c | 131 |
3 files changed, 51 insertions, 92 deletions
diff --git a/fs/afs/file.c b/fs/afs/file.c index 525f7c56e068..a3901769a96c 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c | |||
@@ -50,8 +50,8 @@ const struct address_space_operations afs_fs_aops = { | |||
50 | .launder_page = afs_launder_page, | 50 | .launder_page = afs_launder_page, |
51 | .releasepage = afs_releasepage, | 51 | .releasepage = afs_releasepage, |
52 | .invalidatepage = afs_invalidatepage, | 52 | .invalidatepage = afs_invalidatepage, |
53 | .prepare_write = afs_prepare_write, | 53 | .write_begin = afs_write_begin, |
54 | .commit_write = afs_commit_write, | 54 | .write_end = afs_write_end, |
55 | .writepage = afs_writepage, | 55 | .writepage = afs_writepage, |
56 | .writepages = afs_writepages, | 56 | .writepages = afs_writepages, |
57 | }; | 57 | }; |
diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 3cb6920ff30b..67f259d99cd6 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h | |||
@@ -728,8 +728,12 @@ extern int afs_volume_release_fileserver(struct afs_vnode *, | |||
728 | */ | 728 | */ |
729 | extern int afs_set_page_dirty(struct page *); | 729 | extern int afs_set_page_dirty(struct page *); |
730 | extern void afs_put_writeback(struct afs_writeback *); | 730 | extern void afs_put_writeback(struct afs_writeback *); |
731 | extern int afs_prepare_write(struct file *, struct page *, unsigned, unsigned); | 731 | extern int afs_write_begin(struct file *file, struct address_space *mapping, |
732 | extern int afs_commit_write(struct file *, struct page *, unsigned, unsigned); | 732 | loff_t pos, unsigned len, unsigned flags, |
733 | struct page **pagep, void **fsdata); | ||
734 | extern int afs_write_end(struct file *file, struct address_space *mapping, | ||
735 | loff_t pos, unsigned len, unsigned copied, | ||
736 | struct page *page, void *fsdata); | ||
733 | extern int afs_writepage(struct page *, struct writeback_control *); | 737 | extern int afs_writepage(struct page *, struct writeback_control *); |
734 | extern int afs_writepages(struct address_space *, struct writeback_control *); | 738 | extern int afs_writepages(struct address_space *, struct writeback_control *); |
735 | extern int afs_write_inode(struct inode *, int); | 739 | extern int afs_write_inode(struct inode *, int); |
diff --git a/fs/afs/write.c b/fs/afs/write.c index 065b4e10681a..d6b85dab35fc 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c | |||
@@ -84,15 +84,23 @@ void afs_put_writeback(struct afs_writeback *wb) | |||
84 | * partly or wholly fill a page that's under preparation for writing | 84 | * partly or wholly fill a page that's under preparation for writing |
85 | */ | 85 | */ |
86 | static int afs_fill_page(struct afs_vnode *vnode, struct key *key, | 86 | static int afs_fill_page(struct afs_vnode *vnode, struct key *key, |
87 | unsigned start, unsigned len, struct page *page) | 87 | loff_t pos, unsigned len, struct page *page) |
88 | { | 88 | { |
89 | loff_t i_size; | ||
90 | unsigned eof; | ||
89 | int ret; | 91 | int ret; |
90 | 92 | ||
91 | _enter(",,%u,%u", start, len); | 93 | _enter(",,%llu,%u", (unsigned long long)pos, len); |
92 | 94 | ||
93 | ASSERTCMP(start + len, <=, PAGE_SIZE); | 95 | ASSERTCMP(len, <=, PAGE_CACHE_SIZE); |
94 | 96 | ||
95 | ret = afs_vnode_fetch_data(vnode, key, start, len, page); | 97 | i_size = i_size_read(&vnode->vfs_inode); |
98 | if (pos + len > i_size) | ||
99 | eof = i_size; | ||
100 | else | ||
101 | eof = PAGE_CACHE_SIZE; | ||
102 | |||
103 | ret = afs_vnode_fetch_data(vnode, key, 0, eof, page); | ||
96 | if (ret < 0) { | 104 | if (ret < 0) { |
97 | if (ret == -ENOENT) { | 105 | if (ret == -ENOENT) { |
98 | _debug("got NOENT from server" | 106 | _debug("got NOENT from server" |
@@ -107,109 +115,55 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key, | |||
107 | } | 115 | } |
108 | 116 | ||
109 | /* | 117 | /* |
110 | * prepare a page for being written to | ||
111 | */ | ||
112 | static int afs_prepare_page(struct afs_vnode *vnode, struct page *page, | ||
113 | struct key *key, unsigned offset, unsigned to) | ||
114 | { | ||
115 | unsigned eof, tail, start, stop, len; | ||
116 | loff_t i_size, pos; | ||
117 | void *p; | ||
118 | int ret; | ||
119 | |||
120 | _enter(""); | ||
121 | |||
122 | if (offset == 0 && to == PAGE_SIZE) | ||
123 | return 0; | ||
124 | |||
125 | p = kmap_atomic(page, KM_USER0); | ||
126 | |||
127 | i_size = i_size_read(&vnode->vfs_inode); | ||
128 | pos = (loff_t) page->index << PAGE_SHIFT; | ||
129 | if (pos >= i_size) { | ||
130 | /* partial write, page beyond EOF */ | ||
131 | _debug("beyond"); | ||
132 | if (offset > 0) | ||
133 | memset(p, 0, offset); | ||
134 | if (to < PAGE_SIZE) | ||
135 | memset(p + to, 0, PAGE_SIZE - to); | ||
136 | kunmap_atomic(p, KM_USER0); | ||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | if (i_size - pos >= PAGE_SIZE) { | ||
141 | /* partial write, page entirely before EOF */ | ||
142 | _debug("before"); | ||
143 | tail = eof = PAGE_SIZE; | ||
144 | } else { | ||
145 | /* partial write, page overlaps EOF */ | ||
146 | eof = i_size - pos; | ||
147 | _debug("overlap %u", eof); | ||
148 | tail = max(eof, to); | ||
149 | if (tail < PAGE_SIZE) | ||
150 | memset(p + tail, 0, PAGE_SIZE - tail); | ||
151 | if (offset > eof) | ||
152 | memset(p + eof, 0, PAGE_SIZE - eof); | ||
153 | } | ||
154 | |||
155 | kunmap_atomic(p, KM_USER0); | ||
156 | |||
157 | ret = 0; | ||
158 | if (offset > 0 || eof > to) { | ||
159 | /* need to fill one or two bits that aren't going to be written | ||
160 | * (cover both fillers in one read if there are two) */ | ||
161 | start = (offset > 0) ? 0 : to; | ||
162 | stop = (eof > to) ? eof : offset; | ||
163 | len = stop - start; | ||
164 | _debug("wr=%u-%u av=0-%u rd=%u@%u", | ||
165 | offset, to, eof, start, len); | ||
166 | ret = afs_fill_page(vnode, key, start, len, page); | ||
167 | } | ||
168 | |||
169 | _leave(" = %d", ret); | ||
170 | return ret; | ||
171 | } | ||
172 | |||
173 | /* | ||
174 | * prepare to perform part of a write to a page | 118 | * prepare to perform part of a write to a page |
175 | * - the caller holds the page locked, preventing it from being written out or | ||
176 | * modified by anyone else | ||
177 | */ | 119 | */ |
178 | int afs_prepare_write(struct file *file, struct page *page, | 120 | int afs_write_begin(struct file *file, struct address_space *mapping, |
179 | unsigned offset, unsigned to) | 121 | loff_t pos, unsigned len, unsigned flags, |
122 | struct page **pagep, void **fsdata) | ||
180 | { | 123 | { |
181 | struct afs_writeback *candidate, *wb; | 124 | struct afs_writeback *candidate, *wb; |
182 | struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode); | 125 | struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode); |
126 | struct page *page; | ||
183 | struct key *key = file->private_data; | 127 | struct key *key = file->private_data; |
184 | pgoff_t index; | 128 | unsigned from = pos & (PAGE_CACHE_SIZE - 1); |
129 | unsigned to = from + len; | ||
130 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; | ||
185 | int ret; | 131 | int ret; |
186 | 132 | ||
187 | _enter("{%x:%u},{%lx},%u,%u", | 133 | _enter("{%x:%u},{%lx},%u,%u", |
188 | vnode->fid.vid, vnode->fid.vnode, page->index, offset, to); | 134 | vnode->fid.vid, vnode->fid.vnode, index, from, to); |
189 | 135 | ||
190 | candidate = kzalloc(sizeof(*candidate), GFP_KERNEL); | 136 | candidate = kzalloc(sizeof(*candidate), GFP_KERNEL); |
191 | if (!candidate) | 137 | if (!candidate) |
192 | return -ENOMEM; | 138 | return -ENOMEM; |
193 | candidate->vnode = vnode; | 139 | candidate->vnode = vnode; |
194 | candidate->first = candidate->last = page->index; | 140 | candidate->first = candidate->last = index; |
195 | candidate->offset_first = offset; | 141 | candidate->offset_first = from; |
196 | candidate->to_last = to; | 142 | candidate->to_last = to; |
197 | candidate->usage = 1; | 143 | candidate->usage = 1; |
198 | candidate->state = AFS_WBACK_PENDING; | 144 | candidate->state = AFS_WBACK_PENDING; |
199 | init_waitqueue_head(&candidate->waitq); | 145 | init_waitqueue_head(&candidate->waitq); |
200 | 146 | ||
147 | page = __grab_cache_page(mapping, index); | ||
148 | if (!page) { | ||
149 | kfree(candidate); | ||
150 | return -ENOMEM; | ||
151 | } | ||
152 | *pagep = page; | ||
153 | /* page won't leak in error case: it eventually gets cleaned off LRU */ | ||
154 | |||
201 | if (!PageUptodate(page)) { | 155 | if (!PageUptodate(page)) { |
202 | _debug("not up to date"); | 156 | _debug("not up to date"); |
203 | ret = afs_prepare_page(vnode, page, key, offset, to); | 157 | ret = afs_fill_page(vnode, key, pos, len, page); |
204 | if (ret < 0) { | 158 | if (ret < 0) { |
205 | kfree(candidate); | 159 | kfree(candidate); |
206 | _leave(" = %d [prep]", ret); | 160 | _leave(" = %d [prep]", ret); |
207 | return ret; | 161 | return ret; |
208 | } | 162 | } |
163 | SetPageUptodate(page); | ||
209 | } | 164 | } |
210 | 165 | ||
211 | try_again: | 166 | try_again: |
212 | index = page->index; | ||
213 | spin_lock(&vnode->writeback_lock); | 167 | spin_lock(&vnode->writeback_lock); |
214 | 168 | ||
215 | /* see if this page is already pending a writeback under a suitable key | 169 | /* see if this page is already pending a writeback under a suitable key |
@@ -242,8 +196,8 @@ try_again: | |||
242 | subsume_in_current_wb: | 196 | subsume_in_current_wb: |
243 | _debug("subsume"); | 197 | _debug("subsume"); |
244 | ASSERTRANGE(wb->first, <=, index, <=, wb->last); | 198 | ASSERTRANGE(wb->first, <=, index, <=, wb->last); |
245 | if (index == wb->first && offset < wb->offset_first) | 199 | if (index == wb->first && from < wb->offset_first) |
246 | wb->offset_first = offset; | 200 | wb->offset_first = from; |
247 | if (index == wb->last && to > wb->to_last) | 201 | if (index == wb->last && to > wb->to_last) |
248 | wb->to_last = to; | 202 | wb->to_last = to; |
249 | spin_unlock(&vnode->writeback_lock); | 203 | spin_unlock(&vnode->writeback_lock); |
@@ -289,17 +243,17 @@ flush_conflicting_wb: | |||
289 | /* | 243 | /* |
290 | * finalise part of a write to a page | 244 | * finalise part of a write to a page |
291 | */ | 245 | */ |
292 | int afs_commit_write(struct file *file, struct page *page, | 246 | int afs_write_end(struct file *file, struct address_space *mapping, |
293 | unsigned offset, unsigned to) | 247 | loff_t pos, unsigned len, unsigned copied, |
248 | struct page *page, void *fsdata) | ||
294 | { | 249 | { |
295 | struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode); | 250 | struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode); |
296 | loff_t i_size, maybe_i_size; | 251 | loff_t i_size, maybe_i_size; |
297 | 252 | ||
298 | _enter("{%x:%u},{%lx},%u,%u", | 253 | _enter("{%x:%u},{%lx}", |
299 | vnode->fid.vid, vnode->fid.vnode, page->index, offset, to); | 254 | vnode->fid.vid, vnode->fid.vnode, page->index); |
300 | 255 | ||
301 | maybe_i_size = (loff_t) page->index << PAGE_SHIFT; | 256 | maybe_i_size = pos + copied; |
302 | maybe_i_size += to; | ||
303 | 257 | ||
304 | i_size = i_size_read(&vnode->vfs_inode); | 258 | i_size = i_size_read(&vnode->vfs_inode); |
305 | if (maybe_i_size > i_size) { | 259 | if (maybe_i_size > i_size) { |
@@ -310,12 +264,13 @@ int afs_commit_write(struct file *file, struct page *page, | |||
310 | spin_unlock(&vnode->writeback_lock); | 264 | spin_unlock(&vnode->writeback_lock); |
311 | } | 265 | } |
312 | 266 | ||
313 | SetPageUptodate(page); | ||
314 | set_page_dirty(page); | 267 | set_page_dirty(page); |
315 | if (PageDirty(page)) | 268 | if (PageDirty(page)) |
316 | _debug("dirtied"); | 269 | _debug("dirtied"); |
270 | unlock_page(page); | ||
271 | page_cache_release(page); | ||
317 | 272 | ||
318 | return 0; | 273 | return copied; |
319 | } | 274 | } |
320 | 275 | ||
321 | /* | 276 | /* |