aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/ops_address.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-05-05 16:59:11 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-05-05 16:59:11 -0400
commitfd88de569b802c4a04aaa6ee74667775f4aed8c6 (patch)
tree1766c45303798bf289059afc8f117cf8bc784086 /fs/gfs2/ops_address.c
parent5bb76af1e089ac186c15c6aa792340d22b63d4b4 (diff)
[GFS2] Readpages support
This adds readpages support (and also corrects a small bug in the readpage error path at the same time). Hopefully this will improve performance by allowing GFS to submit larger lumps of I/O at a time. In order to simplify the setting of BH_Boundary, it currently gets set when we hit the end of a indirect pointer block. There is always a boundary at this point with the current allocation code. It doesn't get all the boundaries right though, so there is still room for improvement in this. See comments in fs/gfs2/ops_address.c for further information about readpages with GFS2. Signed-off-by: Steven Whitehouse
Diffstat (limited to 'fs/gfs2/ops_address.c')
-rw-r--r--fs/gfs2/ops_address.c145
1 files changed, 117 insertions, 28 deletions
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index afcc12a00a3c..d89179b316fe 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -13,6 +13,7 @@
13#include <linux/completion.h> 13#include <linux/completion.h>
14#include <linux/buffer_head.h> 14#include <linux/buffer_head.h>
15#include <linux/pagemap.h> 15#include <linux/pagemap.h>
16#include <linux/pagevec.h>
16#include <linux/mpage.h> 17#include <linux/mpage.h>
17#include <linux/fs.h> 18#include <linux/fs.h>
18#include <linux/gfs2_ondisk.h> 19#include <linux/gfs2_ondisk.h>
@@ -47,12 +48,12 @@
47int gfs2_get_block(struct inode *inode, sector_t lblock, 48int gfs2_get_block(struct inode *inode, sector_t lblock,
48 struct buffer_head *bh_result, int create) 49 struct buffer_head *bh_result, int create)
49{ 50{
50 struct gfs2_inode *ip = inode->u.generic_ip;
51 int new = create; 51 int new = create;
52 uint64_t dblock; 52 uint64_t dblock;
53 int error; 53 int error;
54 int boundary;
54 55
55 error = gfs2_block_map(ip, lblock, &new, &dblock, NULL); 56 error = gfs2_block_map(inode, lblock, &new, &dblock, &boundary);
56 if (error) 57 if (error)
57 return error; 58 return error;
58 59
@@ -62,6 +63,8 @@ int gfs2_get_block(struct inode *inode, sector_t lblock,
62 map_bh(bh_result, inode->i_sb, dblock); 63 map_bh(bh_result, inode->i_sb, dblock);
63 if (new) 64 if (new)
64 set_buffer_new(bh_result); 65 set_buffer_new(bh_result);
66 if (boundary)
67 set_buffer_boundary(bh_result);
65 68
66 return 0; 69 return 0;
67} 70}
@@ -83,8 +86,9 @@ static int get_block_noalloc(struct inode *inode, sector_t lblock,
83 int new = 0; 86 int new = 0;
84 uint64_t dblock; 87 uint64_t dblock;
85 int error; 88 int error;
89 int boundary;
86 90
87 error = gfs2_block_map(ip, lblock, &new, &dblock, NULL); 91 error = gfs2_block_map(inode, lblock, &new, &dblock, &boundary);
88 if (error) 92 if (error)
89 return error; 93 return error;
90 94
@@ -92,6 +96,8 @@ static int get_block_noalloc(struct inode *inode, sector_t lblock,
92 map_bh(bh_result, inode->i_sb, dblock); 96 map_bh(bh_result, inode->i_sb, dblock);
93 else if (gfs2_assert_withdraw(ip->i_sbd, !create)) 97 else if (gfs2_assert_withdraw(ip->i_sbd, !create))
94 error = -EIO; 98 error = -EIO;
99 if (boundary)
100 set_buffer_boundary(bh_result);
95 101
96 return error; 102 return error;
97} 103}
@@ -151,6 +157,19 @@ out_ignore:
151 return 0; 157 return 0;
152} 158}
153 159
160static int zero_readpage(struct page *page)
161{
162 void *kaddr;
163
164 kaddr = kmap_atomic(page, KM_USER0);
165 memset(kaddr, 0, PAGE_CACHE_SIZE);
166 kunmap_atomic(page, KM_USER0);
167
168 SetPageUptodate(page);
169
170 return 0;
171}
172
154/** 173/**
155 * stuffed_readpage - Fill in a Linux page with stuffed file data 174 * stuffed_readpage - Fill in a Linux page with stuffed file data
156 * @ip: the inode 175 * @ip: the inode
@@ -165,17 +184,18 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
165 void *kaddr; 184 void *kaddr;
166 int error; 185 int error;
167 186
187 /* Only the first page of a stuffed file might contain data */
188 if (unlikely(page->index))
189 return zero_readpage(page);
190
168 error = gfs2_meta_inode_buffer(ip, &dibh); 191 error = gfs2_meta_inode_buffer(ip, &dibh);
169 if (error) 192 if (error)
170 return error; 193 return error;
171 194
172 kaddr = kmap_atomic(page, KM_USER0); 195 kaddr = kmap_atomic(page, KM_USER0);
173 memcpy((char *)kaddr, 196 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
174 dibh->b_data + sizeof(struct gfs2_dinode),
175 ip->i_di.di_size); 197 ip->i_di.di_size);
176 memset((char *)kaddr + ip->i_di.di_size, 198 memset(kaddr + ip->i_di.di_size, 0, PAGE_CACHE_SIZE - ip->i_di.di_size);
177 0,
178 PAGE_CACHE_SIZE - ip->i_di.di_size);
179 kunmap_atomic(page, KM_USER0); 199 kunmap_atomic(page, KM_USER0);
180 200
181 brelse(dibh); 201 brelse(dibh);
@@ -185,19 +205,6 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
185 return 0; 205 return 0;
186} 206}
187 207
188static int zero_readpage(struct page *page)
189{
190 void *kaddr;
191
192 kaddr = kmap_atomic(page, KM_USER0);
193 memset(kaddr, 0, PAGE_CACHE_SIZE);
194 kunmap_atomic(page, KM_USER0);
195
196 SetPageUptodate(page);
197 unlock_page(page);
198
199 return 0;
200}
201 208
202/** 209/**
203 * gfs2_readpage - readpage with locking 210 * gfs2_readpage - readpage with locking
@@ -215,19 +222,16 @@ static int gfs2_readpage(struct file *file, struct page *page)
215 struct gfs2_holder gh; 222 struct gfs2_holder gh;
216 int error; 223 int error;
217 224
218 if (file != &gfs2_internal_file_sentinal) { 225 if (likely(file != &gfs2_internal_file_sentinal)) {
219 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|GL_AOP, &gh); 226 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|GL_AOP, &gh);
220 error = gfs2_glock_nq_m_atime(1, &gh); 227 error = gfs2_glock_nq_m_atime(1, &gh);
221 if (error) 228 if (unlikely(error))
222 goto out_unlock; 229 goto out_unlock;
223 } 230 }
224 231
225 if (gfs2_is_stuffed(ip)) { 232 if (gfs2_is_stuffed(ip)) {
226 if (!page->index) { 233 error = stuffed_readpage(ip, page);
227 error = stuffed_readpage(ip, page); 234 unlock_page(page);
228 unlock_page(page);
229 } else
230 error = zero_readpage(page);
231 } else 235 } else
232 error = mpage_readpage(page, gfs2_get_block); 236 error = mpage_readpage(page, gfs2_get_block);
233 237
@@ -242,6 +246,90 @@ out:
242 return error; 246 return error;
243out_unlock: 247out_unlock:
244 unlock_page(page); 248 unlock_page(page);
249 if (file != &gfs2_internal_file_sentinal)
250 gfs2_holder_uninit(&gh);
251 goto out;
252}
253
254#define list_to_page(head) (list_entry((head)->prev, struct page, lru))
255
256/**
257 * gfs2_readpages - Read a bunch of pages at once
258 *
259 * Some notes:
260 * 1. This is only for readahead, so we can simply ignore any things
261 * which are slightly inconvenient (such as locking conflicts between
262 * the page lock and the glock) and return having done no I/O. Its
263 * obviously not something we'd want to do on too regular a basis.
264 * Any I/O we ignore at this time will be done via readpage later.
265 * 2. We have to handle stuffed files here too.
266 * 3. mpage_readpages() does most of the heavy lifting in the common case.
267 * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places.
268 * 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as
269 * well as read-ahead.
270 */
271static int gfs2_readpages(struct file *file, struct address_space *mapping,
272 struct list_head *pages, unsigned nr_pages)
273{
274 struct inode *inode = mapping->host;
275 struct gfs2_inode *ip = inode->u.generic_ip;
276 struct gfs2_sbd *sdp = ip->i_sbd;
277 struct gfs2_holder gh;
278 unsigned page_idx;
279 int ret;
280
281 if (likely(file != &gfs2_internal_file_sentinal)) {
282 gfs2_holder_init(ip->i_gl, LM_ST_SHARED,
283 LM_FLAG_TRY_1CB|GL_ATIME|GL_AOP, &gh);
284 ret = gfs2_glock_nq_m_atime(1, &gh);
285 if (ret == GLR_TRYFAILED)
286 goto out_noerror;
287 if (unlikely(ret))
288 goto out_unlock;
289 }
290
291 if (gfs2_is_stuffed(ip)) {
292 struct pagevec lru_pvec;
293 pagevec_init(&lru_pvec, 0);
294 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
295 struct page *page = list_to_page(pages);
296 list_del(&page->lru);
297 if (!add_to_page_cache(page, mapping,
298 page->index, GFP_KERNEL)) {
299 ret = stuffed_readpage(ip, page);
300 unlock_page(page);
301 if (!pagevec_add(&lru_pvec, page))
302 __pagevec_lru_add(&lru_pvec);
303 }
304 page_cache_release(page);
305 }
306 pagevec_lru_add(&lru_pvec);
307 ret = 0;
308 } else {
309 /* What we really want to do .... */
310 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block);
311 }
312
313 if (likely(file != &gfs2_internal_file_sentinal)) {
314 gfs2_glock_dq_m(1, &gh);
315 gfs2_holder_uninit(&gh);
316 }
317out:
318 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
319 ret = -EIO;
320 return ret;
321out_noerror:
322 ret = 0;
323out_unlock:
324 /* unlock all pages, we can't do any I/O right now */
325 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
326 struct page *page = list_to_page(pages);
327 list_del(&page->lru);
328 unlock_page(page);
329 page_cache_release(page);
330 }
331 if (likely(file != &gfs2_internal_file_sentinal))
332 gfs2_holder_uninit(&gh);
245 goto out; 333 goto out;
246} 334}
247 335
@@ -572,6 +660,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
572struct address_space_operations gfs2_file_aops = { 660struct address_space_operations gfs2_file_aops = {
573 .writepage = gfs2_writepage, 661 .writepage = gfs2_writepage,
574 .readpage = gfs2_readpage, 662 .readpage = gfs2_readpage,
663 .readpages = gfs2_readpages,
575 .sync_page = block_sync_page, 664 .sync_page = block_sync_page,
576 .prepare_write = gfs2_prepare_write, 665 .prepare_write = gfs2_prepare_write,
577 .commit_write = gfs2_commit_write, 666 .commit_write = gfs2_commit_write,