aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/ops_file.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/gfs2/ops_file.c')
-rw-r--r--fs/gfs2/ops_file.c229
1 files changed, 160 insertions, 69 deletions
diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c
index bb11fd6752d3..f4842f2548cd 100644
--- a/fs/gfs2/ops_file.c
+++ b/fs/gfs2/ops_file.c
@@ -33,57 +33,12 @@
33#include "lm.h" 33#include "lm.h"
34#include "log.h" 34#include "log.h"
35#include "meta_io.h" 35#include "meta_io.h"
36#include "ops_file.h"
37#include "ops_vm.h"
38#include "quota.h" 36#include "quota.h"
39#include "rgrp.h" 37#include "rgrp.h"
40#include "trans.h" 38#include "trans.h"
41#include "util.h" 39#include "util.h"
42#include "eaops.h" 40#include "eaops.h"
43 41#include "ops_address.h"
44/*
45 * Most fields left uninitialised to catch anybody who tries to
46 * use them. f_flags set to prevent file_accessed() from touching
47 * any other part of this. Its use is purely as a flag so that we
48 * know (in readpage()) whether or not do to locking.
49 */
50struct file gfs2_internal_file_sentinel = {
51 .f_flags = O_NOATIME|O_RDONLY,
52};
53
54static int gfs2_read_actor(read_descriptor_t *desc, struct page *page,
55 unsigned long offset, unsigned long size)
56{
57 char *kaddr;
58 unsigned long count = desc->count;
59
60 if (size > count)
61 size = count;
62
63 kaddr = kmap(page);
64 memcpy(desc->arg.data, kaddr + offset, size);
65 kunmap(page);
66
67 desc->count = count - size;
68 desc->written += size;
69 desc->arg.buf += size;
70 return size;
71}
72
73int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
74 char *buf, loff_t *pos, unsigned size)
75{
76 struct inode *inode = &ip->i_inode;
77 read_descriptor_t desc;
78 desc.written = 0;
79 desc.arg.data = buf;
80 desc.count = size;
81 desc.error = 0;
82 do_generic_mapping_read(inode->i_mapping, ra_state,
83 &gfs2_internal_file_sentinel, pos, &desc,
84 gfs2_read_actor);
85 return desc.written ? desc.written : desc.error;
86}
87 42
88/** 43/**
89 * gfs2_llseek - seek to a location in a file 44 * gfs2_llseek - seek to a location in a file
@@ -214,7 +169,7 @@ static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
214 if (put_user(fsflags, ptr)) 169 if (put_user(fsflags, ptr))
215 error = -EFAULT; 170 error = -EFAULT;
216 171
217 gfs2_glock_dq_m(1, &gh); 172 gfs2_glock_dq(&gh);
218 gfs2_holder_uninit(&gh); 173 gfs2_holder_uninit(&gh);
219 return error; 174 return error;
220} 175}
@@ -291,7 +246,16 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
291 if (error) 246 if (error)
292 goto out; 247 goto out;
293 } 248 }
294 249 if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
250 if (flags & GFS2_DIF_JDATA)
251 gfs2_log_flush(sdp, ip->i_gl);
252 error = filemap_fdatawrite(inode->i_mapping);
253 if (error)
254 goto out;
255 error = filemap_fdatawait(inode->i_mapping);
256 if (error)
257 goto out;
258 }
295 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 259 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
296 if (error) 260 if (error)
297 goto out; 261 goto out;
@@ -303,6 +267,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
303 gfs2_dinode_out(ip, bh->b_data); 267 gfs2_dinode_out(ip, bh->b_data);
304 brelse(bh); 268 brelse(bh);
305 gfs2_set_inode_flags(inode); 269 gfs2_set_inode_flags(inode);
270 gfs2_set_aops(inode);
306out_trans_end: 271out_trans_end:
307 gfs2_trans_end(sdp); 272 gfs2_trans_end(sdp);
308out: 273out:
@@ -338,6 +303,128 @@ static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
338 return -ENOTTY; 303 return -ENOTTY;
339} 304}
340 305
306/**
307 * gfs2_allocate_page_backing - Use bmap to allocate blocks
308 * @page: The (locked) page to allocate backing for
309 *
310 * We try to allocate all the blocks required for the page in
311 * one go. This might fail for various reasons, so we keep
312 * trying until all the blocks to back this page are allocated.
313 * If some of the blocks are already allocated, thats ok too.
314 */
315
316static int gfs2_allocate_page_backing(struct page *page)
317{
318 struct inode *inode = page->mapping->host;
319 struct buffer_head bh;
320 unsigned long size = PAGE_CACHE_SIZE;
321 u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
322
323 do {
324 bh.b_state = 0;
325 bh.b_size = size;
326 gfs2_block_map(inode, lblock, &bh, 1);
327 if (!buffer_mapped(&bh))
328 return -EIO;
329 size -= bh.b_size;
330 lblock += (bh.b_size >> inode->i_blkbits);
331 } while(size > 0);
332 return 0;
333}
334
335/**
336 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
337 * @vma: The virtual memory area
338 * @page: The page which is about to become writable
339 *
340 * When the page becomes writable, we need to ensure that we have
341 * blocks allocated on disk to back that page.
342 */
343
344static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct page *page)
345{
346 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
347 struct gfs2_inode *ip = GFS2_I(inode);
348 struct gfs2_sbd *sdp = GFS2_SB(inode);
349 unsigned long last_index;
350 u64 pos = page->index << (PAGE_CACHE_SIZE - inode->i_blkbits);
351 unsigned int data_blocks, ind_blocks, rblocks;
352 int alloc_required = 0;
353 struct gfs2_holder gh;
354 struct gfs2_alloc *al;
355 int ret;
356
357 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME, &gh);
358 ret = gfs2_glock_nq_atime(&gh);
359 if (ret)
360 goto out;
361
362 set_bit(GIF_SW_PAGED, &ip->i_flags);
363 gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
364 ret = gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE, &alloc_required);
365 if (ret || !alloc_required)
366 goto out_unlock;
367 ret = -ENOMEM;
368 al = gfs2_alloc_get(ip);
369 if (al == NULL)
370 goto out_unlock;
371
372 ret = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
373 if (ret)
374 goto out_alloc_put;
375 ret = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
376 if (ret)
377 goto out_quota_unlock;
378 al->al_requested = data_blocks + ind_blocks;
379 ret = gfs2_inplace_reserve(ip);
380 if (ret)
381 goto out_quota_unlock;
382
383 rblocks = RES_DINODE + ind_blocks;
384 if (gfs2_is_jdata(ip))
385 rblocks += data_blocks ? data_blocks : 1;
386 if (ind_blocks || data_blocks)
387 rblocks += RES_STATFS + RES_QUOTA;
388 ret = gfs2_trans_begin(sdp, rblocks, 0);
389 if (ret)
390 goto out_trans_fail;
391
392 lock_page(page);
393 ret = -EINVAL;
394 last_index = ip->i_inode.i_size >> PAGE_CACHE_SHIFT;
395 if (page->index > last_index)
396 goto out_unlock_page;
397 ret = 0;
398 if (!PageUptodate(page) || page->mapping != ip->i_inode.i_mapping)
399 goto out_unlock_page;
400 if (gfs2_is_stuffed(ip)) {
401 ret = gfs2_unstuff_dinode(ip, page);
402 if (ret)
403 goto out_unlock_page;
404 }
405 ret = gfs2_allocate_page_backing(page);
406
407out_unlock_page:
408 unlock_page(page);
409 gfs2_trans_end(sdp);
410out_trans_fail:
411 gfs2_inplace_release(ip);
412out_quota_unlock:
413 gfs2_quota_unlock(ip);
414out_alloc_put:
415 gfs2_alloc_put(ip);
416out_unlock:
417 gfs2_glock_dq(&gh);
418out:
419 gfs2_holder_uninit(&gh);
420 return ret;
421}
422
423static struct vm_operations_struct gfs2_vm_ops = {
424 .fault = filemap_fault,
425 .page_mkwrite = gfs2_page_mkwrite,
426};
427
341 428
342/** 429/**
343 * gfs2_mmap - 430 * gfs2_mmap -
@@ -360,14 +447,7 @@ static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
360 return error; 447 return error;
361 } 448 }
362 449
363 /* This is VM_MAYWRITE instead of VM_WRITE because a call 450 vma->vm_ops = &gfs2_vm_ops;
364 to mprotect() can turn on VM_WRITE later. */
365
366 if ((vma->vm_flags & (VM_MAYSHARE | VM_MAYWRITE)) ==
367 (VM_MAYSHARE | VM_MAYWRITE))
368 vma->vm_ops = &gfs2_vm_ops_sharewrite;
369 else
370 vma->vm_ops = &gfs2_vm_ops_private;
371 451
372 gfs2_glock_dq_uninit(&i_gh); 452 gfs2_glock_dq_uninit(&i_gh);
373 453
@@ -538,15 +618,6 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
538 if (__mandatory_lock(&ip->i_inode)) 618 if (__mandatory_lock(&ip->i_inode))
539 return -ENOLCK; 619 return -ENOLCK;
540 620
541 if (sdp->sd_args.ar_localflocks) {
542 if (IS_GETLK(cmd)) {
543 posix_test_lock(file, fl);
544 return 0;
545 } else {
546 return posix_lock_file_wait(file, fl);
547 }
548 }
549
550 if (cmd == F_CANCELLK) { 621 if (cmd == F_CANCELLK) {
551 /* Hack: */ 622 /* Hack: */
552 cmd = F_SETLK; 623 cmd = F_SETLK;
@@ -632,16 +703,12 @@ static void do_unflock(struct file *file, struct file_lock *fl)
632static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl) 703static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
633{ 704{
634 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); 705 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
635 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
636 706
637 if (!(fl->fl_flags & FL_FLOCK)) 707 if (!(fl->fl_flags & FL_FLOCK))
638 return -ENOLCK; 708 return -ENOLCK;
639 if (__mandatory_lock(&ip->i_inode)) 709 if (__mandatory_lock(&ip->i_inode))
640 return -ENOLCK; 710 return -ENOLCK;
641 711
642 if (sdp->sd_args.ar_localflocks)
643 return flock_lock_file_wait(file, fl);
644
645 if (fl->fl_type == F_UNLCK) { 712 if (fl->fl_type == F_UNLCK) {
646 do_unflock(file, fl); 713 do_unflock(file, fl);
647 return 0; 714 return 0;
@@ -678,3 +745,27 @@ const struct file_operations gfs2_dir_fops = {
678 .flock = gfs2_flock, 745 .flock = gfs2_flock,
679}; 746};
680 747
748const struct file_operations gfs2_file_fops_nolock = {
749 .llseek = gfs2_llseek,
750 .read = do_sync_read,
751 .aio_read = generic_file_aio_read,
752 .write = do_sync_write,
753 .aio_write = generic_file_aio_write,
754 .unlocked_ioctl = gfs2_ioctl,
755 .mmap = gfs2_mmap,
756 .open = gfs2_open,
757 .release = gfs2_close,
758 .fsync = gfs2_fsync,
759 .splice_read = generic_file_splice_read,
760 .splice_write = generic_file_splice_write,
761 .setlease = gfs2_setlease,
762};
763
764const struct file_operations gfs2_dir_fops_nolock = {
765 .readdir = gfs2_readdir,
766 .unlocked_ioctl = gfs2_ioctl,
767 .open = gfs2_open,
768 .release = gfs2_close,
769 .fsync = gfs2_fsync,
770};
771