aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c87
1 files changed, 44 insertions, 43 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 0d6ca7bac6c8..3b3ab5281920 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -160,12 +160,7 @@ int sync_blockdev(struct block_device *bdev)
160} 160}
161EXPORT_SYMBOL(sync_blockdev); 161EXPORT_SYMBOL(sync_blockdev);
162 162
163/* 163static void __fsync_super(struct super_block *sb)
164 * Write out and wait upon all dirty data associated with this
165 * superblock. Filesystem data as well as the underlying block
166 * device. Takes the superblock lock.
167 */
168int fsync_super(struct super_block *sb)
169{ 164{
170 sync_inodes_sb(sb, 0); 165 sync_inodes_sb(sb, 0);
171 DQUOT_SYNC(sb); 166 DQUOT_SYNC(sb);
@@ -177,7 +172,16 @@ int fsync_super(struct super_block *sb)
177 sb->s_op->sync_fs(sb, 1); 172 sb->s_op->sync_fs(sb, 1);
178 sync_blockdev(sb->s_bdev); 173 sync_blockdev(sb->s_bdev);
179 sync_inodes_sb(sb, 1); 174 sync_inodes_sb(sb, 1);
175}
180 176
177/*
178 * Write out and wait upon all dirty data associated with this
179 * superblock. Filesystem data as well as the underlying block
180 * device. Takes the superblock lock.
181 */
182int fsync_super(struct super_block *sb)
183{
184 __fsync_super(sb);
181 return sync_blockdev(sb->s_bdev); 185 return sync_blockdev(sb->s_bdev);
182} 186}
183 187
@@ -216,19 +220,7 @@ struct super_block *freeze_bdev(struct block_device *bdev)
216 sb->s_frozen = SB_FREEZE_WRITE; 220 sb->s_frozen = SB_FREEZE_WRITE;
217 smp_wmb(); 221 smp_wmb();
218 222
219 sync_inodes_sb(sb, 0); 223 __fsync_super(sb);
220 DQUOT_SYNC(sb);
221
222 lock_super(sb);
223 if (sb->s_dirt && sb->s_op->write_super)
224 sb->s_op->write_super(sb);
225 unlock_super(sb);
226
227 if (sb->s_op->sync_fs)
228 sb->s_op->sync_fs(sb, 1);
229
230 sync_blockdev(sb->s_bdev);
231 sync_inodes_sb(sb, 1);
232 224
233 sb->s_frozen = SB_FREEZE_TRANS; 225 sb->s_frozen = SB_FREEZE_TRANS;
234 smp_wmb(); 226 smp_wmb();
@@ -327,31 +319,24 @@ int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
327 return ret; 319 return ret;
328} 320}
329 321
330static long do_fsync(unsigned int fd, int datasync) 322long do_fsync(struct file *file, int datasync)
331{ 323{
332 struct file * file; 324 int ret;
333 struct address_space *mapping; 325 int err;
334 int ret, err; 326 struct address_space *mapping = file->f_mapping;
335
336 ret = -EBADF;
337 file = fget(fd);
338 if (!file)
339 goto out;
340 327
341 ret = -EINVAL;
342 if (!file->f_op || !file->f_op->fsync) { 328 if (!file->f_op || !file->f_op->fsync) {
343 /* Why? We can still call filemap_fdatawrite */ 329 /* Why? We can still call filemap_fdatawrite */
344 goto out_putf; 330 ret = -EINVAL;
331 goto out;
345 } 332 }
346 333
347 mapping = file->f_mapping;
348
349 current->flags |= PF_SYNCWRITE; 334 current->flags |= PF_SYNCWRITE;
350 ret = filemap_fdatawrite(mapping); 335 ret = filemap_fdatawrite(mapping);
351 336
352 /* 337 /*
353 * We need to protect against concurrent writers, 338 * We need to protect against concurrent writers, which could cause
354 * which could cause livelocks in fsync_buffers_list 339 * livelocks in fsync_buffers_list().
355 */ 340 */
356 mutex_lock(&mapping->host->i_mutex); 341 mutex_lock(&mapping->host->i_mutex);
357 err = file->f_op->fsync(file, file->f_dentry, datasync); 342 err = file->f_op->fsync(file, file->f_dentry, datasync);
@@ -362,21 +347,31 @@ static long do_fsync(unsigned int fd, int datasync)
362 if (!ret) 347 if (!ret)
363 ret = err; 348 ret = err;
364 current->flags &= ~PF_SYNCWRITE; 349 current->flags &= ~PF_SYNCWRITE;
365
366out_putf:
367 fput(file);
368out: 350out:
369 return ret; 351 return ret;
370} 352}
371 353
354static long __do_fsync(unsigned int fd, int datasync)
355{
356 struct file *file;
357 int ret = -EBADF;
358
359 file = fget(fd);
360 if (file) {
361 ret = do_fsync(file, datasync);
362 fput(file);
363 }
364 return ret;
365}
366
372asmlinkage long sys_fsync(unsigned int fd) 367asmlinkage long sys_fsync(unsigned int fd)
373{ 368{
374 return do_fsync(fd, 0); 369 return __do_fsync(fd, 0);
375} 370}
376 371
377asmlinkage long sys_fdatasync(unsigned int fd) 372asmlinkage long sys_fdatasync(unsigned int fd)
378{ 373{
379 return do_fsync(fd, 1); 374 return __do_fsync(fd, 1);
380} 375}
381 376
382/* 377/*
@@ -865,8 +860,8 @@ int __set_page_dirty_buffers(struct page *page)
865 } 860 }
866 write_unlock_irq(&mapping->tree_lock); 861 write_unlock_irq(&mapping->tree_lock);
867 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); 862 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
863 return 1;
868 } 864 }
869
870 return 0; 865 return 0;
871} 866}
872EXPORT_SYMBOL(__set_page_dirty_buffers); 867EXPORT_SYMBOL(__set_page_dirty_buffers);
@@ -3078,7 +3073,7 @@ static void recalc_bh_state(void)
3078 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096) 3073 if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
3079 return; 3074 return;
3080 __get_cpu_var(bh_accounting).ratelimit = 0; 3075 __get_cpu_var(bh_accounting).ratelimit = 0;
3081 for_each_cpu(i) 3076 for_each_online_cpu(i)
3082 tot += per_cpu(bh_accounting, i).nr; 3077 tot += per_cpu(bh_accounting, i).nr;
3083 buffer_heads_over_limit = (tot > max_buffer_heads); 3078 buffer_heads_over_limit = (tot > max_buffer_heads);
3084} 3079}
@@ -3127,6 +3122,9 @@ static void buffer_exit_cpu(int cpu)
3127 brelse(b->bhs[i]); 3122 brelse(b->bhs[i]);
3128 b->bhs[i] = NULL; 3123 b->bhs[i] = NULL;
3129 } 3124 }
3125 get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
3126 per_cpu(bh_accounting, cpu).nr = 0;
3127 put_cpu_var(bh_accounting);
3130} 3128}
3131 3129
3132static int buffer_cpu_notify(struct notifier_block *self, 3130static int buffer_cpu_notify(struct notifier_block *self,
@@ -3143,8 +3141,11 @@ void __init buffer_init(void)
3143 int nrpages; 3141 int nrpages;
3144 3142
3145 bh_cachep = kmem_cache_create("buffer_head", 3143 bh_cachep = kmem_cache_create("buffer_head",
3146 sizeof(struct buffer_head), 0, 3144 sizeof(struct buffer_head), 0,
3147 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL); 3145 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3146 SLAB_MEM_SPREAD),
3147 init_buffer_head,
3148 NULL);
3148 3149
3149 /* 3150 /*
3150 * Limit the bh occupancy to 10% of ZONE_NORMAL 3151 * Limit the bh occupancy to 10% of ZONE_NORMAL