aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c174
1 files changed, 0 insertions, 174 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 3b6d701073e7..16cfbcd254f1 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -159,31 +159,6 @@ int sync_blockdev(struct block_device *bdev)
159} 159}
160EXPORT_SYMBOL(sync_blockdev); 160EXPORT_SYMBOL(sync_blockdev);
161 161
162static void __fsync_super(struct super_block *sb)
163{
164 sync_inodes_sb(sb, 0);
165 DQUOT_SYNC(sb);
166 lock_super(sb);
167 if (sb->s_dirt && sb->s_op->write_super)
168 sb->s_op->write_super(sb);
169 unlock_super(sb);
170 if (sb->s_op->sync_fs)
171 sb->s_op->sync_fs(sb, 1);
172 sync_blockdev(sb->s_bdev);
173 sync_inodes_sb(sb, 1);
174}
175
176/*
177 * Write out and wait upon all dirty data associated with this
178 * superblock. Filesystem data as well as the underlying block
179 * device. Takes the superblock lock.
180 */
181int fsync_super(struct super_block *sb)
182{
183 __fsync_super(sb);
184 return sync_blockdev(sb->s_bdev);
185}
186
187/* 162/*
188 * Write out and wait upon all dirty data associated with this 163 * Write out and wait upon all dirty data associated with this
189 * device. Filesystem data as well as the underlying block 164 * device. Filesystem data as well as the underlying block
@@ -260,118 +235,6 @@ void thaw_bdev(struct block_device *bdev, struct super_block *sb)
260EXPORT_SYMBOL(thaw_bdev); 235EXPORT_SYMBOL(thaw_bdev);
261 236
262/* 237/*
263 * sync everything. Start out by waking pdflush, because that writes back
264 * all queues in parallel.
265 */
266static void do_sync(unsigned long wait)
267{
268 wakeup_pdflush(0);
269 sync_inodes(0); /* All mappings, inodes and their blockdevs */
270 DQUOT_SYNC(NULL);
271 sync_supers(); /* Write the superblocks */
272 sync_filesystems(0); /* Start syncing the filesystems */
273 sync_filesystems(wait); /* Waitingly sync the filesystems */
274 sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */
275 if (!wait)
276 printk("Emergency Sync complete\n");
277 if (unlikely(laptop_mode))
278 laptop_sync_completion();
279}
280
281asmlinkage long sys_sync(void)
282{
283 do_sync(1);
284 return 0;
285}
286
287void emergency_sync(void)
288{
289 pdflush_operation(do_sync, 0);
290}
291
292/*
293 * Generic function to fsync a file.
294 *
295 * filp may be NULL if called via the msync of a vma.
296 */
297
298int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
299{
300 struct inode * inode = dentry->d_inode;
301 struct super_block * sb;
302 int ret, err;
303
304 /* sync the inode to buffers */
305 ret = write_inode_now(inode, 0);
306
307 /* sync the superblock to buffers */
308 sb = inode->i_sb;
309 lock_super(sb);
310 if (sb->s_op->write_super)
311 sb->s_op->write_super(sb);
312 unlock_super(sb);
313
314 /* .. finally sync the buffers to disk */
315 err = sync_blockdev(sb->s_bdev);
316 if (!ret)
317 ret = err;
318 return ret;
319}
320
321long do_fsync(struct file *file, int datasync)
322{
323 int ret;
324 int err;
325 struct address_space *mapping = file->f_mapping;
326
327 if (!file->f_op || !file->f_op->fsync) {
328 /* Why? We can still call filemap_fdatawrite */
329 ret = -EINVAL;
330 goto out;
331 }
332
333 ret = filemap_fdatawrite(mapping);
334
335 /*
336 * We need to protect against concurrent writers, which could cause
337 * livelocks in fsync_buffers_list().
338 */
339 mutex_lock(&mapping->host->i_mutex);
340 err = file->f_op->fsync(file, file->f_dentry, datasync);
341 if (!ret)
342 ret = err;
343 mutex_unlock(&mapping->host->i_mutex);
344 err = filemap_fdatawait(mapping);
345 if (!ret)
346 ret = err;
347out:
348 return ret;
349}
350
351static long __do_fsync(unsigned int fd, int datasync)
352{
353 struct file *file;
354 int ret = -EBADF;
355
356 file = fget(fd);
357 if (file) {
358 ret = do_fsync(file, datasync);
359 fput(file);
360 }
361 return ret;
362}
363
364asmlinkage long sys_fsync(unsigned int fd)
365{
366 return __do_fsync(fd, 0);
367}
368
369asmlinkage long sys_fdatasync(unsigned int fd)
370{
371 return __do_fsync(fd, 1);
372}
373
374/*
375 * Various filesystems appear to want __find_get_block to be non-blocking. 238 * Various filesystems appear to want __find_get_block to be non-blocking.
376 * But it's the page lock which protects the buffers. To get around this, 239 * But it's the page lock which protects the buffers. To get around this,
377 * we get exclusion from try_to_free_buffers with the blockdev mapping's 240 * we get exclusion from try_to_free_buffers with the blockdev mapping's
@@ -1551,35 +1414,6 @@ static void discard_buffer(struct buffer_head * bh)
1551} 1414}
1552 1415
1553/** 1416/**
1554 * try_to_release_page() - release old fs-specific metadata on a page
1555 *
1556 * @page: the page which the kernel is trying to free
1557 * @gfp_mask: memory allocation flags (and I/O mode)
1558 *
1559 * The address_space is to try to release any data against the page
1560 * (presumably at page->private). If the release was successful, return `1'.
1561 * Otherwise return zero.
1562 *
1563 * The @gfp_mask argument specifies whether I/O may be performed to release
1564 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
1565 *
1566 * NOTE: @gfp_mask may go away, and this function may become non-blocking.
1567 */
1568int try_to_release_page(struct page *page, gfp_t gfp_mask)
1569{
1570 struct address_space * const mapping = page->mapping;
1571
1572 BUG_ON(!PageLocked(page));
1573 if (PageWriteback(page))
1574 return 0;
1575
1576 if (mapping && mapping->a_ops->releasepage)
1577 return mapping->a_ops->releasepage(page, gfp_mask);
1578 return try_to_free_buffers(page);
1579}
1580EXPORT_SYMBOL(try_to_release_page);
1581
1582/**
1583 * block_invalidatepage - invalidate part of all of a buffer-backed page 1417 * block_invalidatepage - invalidate part of all of a buffer-backed page
1584 * 1418 *
1585 * @page: the page which is affected 1419 * @page: the page which is affected
@@ -1630,14 +1464,6 @@ out:
1630} 1464}
1631EXPORT_SYMBOL(block_invalidatepage); 1465EXPORT_SYMBOL(block_invalidatepage);
1632 1466
1633void do_invalidatepage(struct page *page, unsigned long offset)
1634{
1635 void (*invalidatepage)(struct page *, unsigned long);
1636 invalidatepage = page->mapping->a_ops->invalidatepage ? :
1637 block_invalidatepage;
1638 (*invalidatepage)(page, offset);
1639}
1640
1641/* 1467/*
1642 * We attach and possibly dirty the buffers atomically wrt 1468 * We attach and possibly dirty the buffers atomically wrt
1643 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers 1469 * __set_page_dirty_buffers() via private_lock. try_to_free_buffers