diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2007-10-15 10:40:33 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2008-01-25 03:07:13 -0500 |
commit | 3cc3f710ce0effe397b830826a1a081fa81f11c7 (patch) | |
tree | 53f69f1b8d1cbc2849c6bac08ce7786f3ecd7447 /fs/gfs2/ops_address.c | |
parent | 51ff87bdd9f21a5d3672517b75d25ab5842d94a8 (diff) |
[GFS2] Use ->page_mkwrite() for mmap()
This cleans up the mmap() code path for GFS2 by implementing the
page_mkwrite function for GFS2. We are thus able to use the
generic filemap_fault function for our ->fault() implementation.
This now means that shared writable mappings will be much more
efficiently shared across the cluster if there is a reasonable
proportion of read activity (the greater proportion, the better).
As a side effect, it also reduces the size of the code, removes
special cases from readpage and readpages, and makes the code
path easier to follow.
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/ops_address.c')
-rw-r--r-- | fs/gfs2/ops_address.c | 45 |
1 files changed, 8 insertions, 37 deletions
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c index 9bb24b1d9c05..1696e5d9d112 100644 --- a/fs/gfs2/ops_address.c +++ b/fs/gfs2/ops_address.c | |||
@@ -265,9 +265,7 @@ static int __gfs2_readpage(void *file, struct page *page) | |||
265 | * @file: The file to read | 265 | * @file: The file to read |
266 | * @page: The page of the file | 266 | * @page: The page of the file |
267 | * | 267 | * |
268 | * This deals with the locking required. If the GFF_EXLOCK flags is set | 268 | * This deals with the locking required. We use a trylock in order to |
269 | * then we already hold the glock (due to page fault) and thus we call | ||
270 | * __gfs2_readpage() directly. Otherwise we use a trylock in order to | ||
271 | * avoid the page lock / glock ordering problems returning AOP_TRUNCATED_PAGE | 269 | * avoid the page lock / glock ordering problems returning AOP_TRUNCATED_PAGE |
272 | * in the event that we are unable to get the lock. | 270 | * in the event that we are unable to get the lock. |
273 | */ | 271 | */ |
@@ -278,12 +276,6 @@ static int gfs2_readpage(struct file *file, struct page *page) | |||
278 | struct gfs2_holder gh; | 276 | struct gfs2_holder gh; |
279 | int error; | 277 | int error; |
280 | 278 | ||
281 | if (file) { | ||
282 | struct gfs2_file *gf = file->private_data; | ||
283 | if (test_bit(GFF_EXLOCK, &gf->f_flags)) | ||
284 | return __gfs2_readpage(file, page); | ||
285 | } | ||
286 | |||
287 | gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh); | 279 | gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh); |
288 | error = gfs2_glock_nq_atime(&gh); | 280 | error = gfs2_glock_nq_atime(&gh); |
289 | if (unlikely(error)) { | 281 | if (unlikely(error)) { |
@@ -354,9 +346,8 @@ int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state, | |||
354 | * 2. We don't handle stuffed files here we let readpage do the honours. | 346 | * 2. We don't handle stuffed files here we let readpage do the honours. |
355 | * 3. mpage_readpages() does most of the heavy lifting in the common case. | 347 | * 3. mpage_readpages() does most of the heavy lifting in the common case. |
356 | * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places. | 348 | * 4. gfs2_get_block() is relied upon to set BH_Boundary in the right places. |
357 | * 5. We use LM_FLAG_TRY_1CB here, effectively we then have lock-ahead as | ||
358 | * well as read-ahead. | ||
359 | */ | 349 | */ |
350 | |||
360 | static int gfs2_readpages(struct file *file, struct address_space *mapping, | 351 | static int gfs2_readpages(struct file *file, struct address_space *mapping, |
361 | struct list_head *pages, unsigned nr_pages) | 352 | struct list_head *pages, unsigned nr_pages) |
362 | { | 353 | { |
@@ -364,40 +355,20 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping, | |||
364 | struct gfs2_inode *ip = GFS2_I(inode); | 355 | struct gfs2_inode *ip = GFS2_I(inode); |
365 | struct gfs2_sbd *sdp = GFS2_SB(inode); | 356 | struct gfs2_sbd *sdp = GFS2_SB(inode); |
366 | struct gfs2_holder gh; | 357 | struct gfs2_holder gh; |
367 | int ret = 0; | 358 | int ret; |
368 | int do_unlock = 0; | ||
369 | 359 | ||
370 | if (file) { | 360 | gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh); |
371 | struct gfs2_file *gf = file->private_data; | ||
372 | if (test_bit(GFF_EXLOCK, &gf->f_flags)) | ||
373 | goto skip_lock; | ||
374 | } | ||
375 | gfs2_holder_init(ip->i_gl, LM_ST_SHARED, | ||
376 | LM_FLAG_TRY_1CB|GL_ATIME, &gh); | ||
377 | do_unlock = 1; | ||
378 | ret = gfs2_glock_nq_atime(&gh); | 361 | ret = gfs2_glock_nq_atime(&gh); |
379 | if (ret == GLR_TRYFAILED) | ||
380 | goto out_noerror; | ||
381 | if (unlikely(ret)) | 362 | if (unlikely(ret)) |
382 | goto out_unlock; | 363 | goto out_uninit; |
383 | skip_lock: | ||
384 | if (!gfs2_is_stuffed(ip)) | 364 | if (!gfs2_is_stuffed(ip)) |
385 | ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block); | 365 | ret = mpage_readpages(mapping, pages, nr_pages, gfs2_get_block); |
386 | 366 | gfs2_glock_dq(&gh); | |
387 | if (do_unlock) { | 367 | out_uninit: |
388 | gfs2_glock_dq_m(1, &gh); | 368 | gfs2_holder_uninit(&gh); |
389 | gfs2_holder_uninit(&gh); | ||
390 | } | ||
391 | out: | ||
392 | if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) | 369 | if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) |
393 | ret = -EIO; | 370 | ret = -EIO; |
394 | return ret; | 371 | return ret; |
395 | out_noerror: | ||
396 | ret = 0; | ||
397 | out_unlock: | ||
398 | if (do_unlock) | ||
399 | gfs2_holder_uninit(&gh); | ||
400 | goto out; | ||
401 | } | 372 | } |
402 | 373 | ||
403 | /** | 374 | /** |