diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/buffer.c | 174 | ||||
-rw-r--r-- | fs/super.c | 31 | ||||
-rw-r--r-- | fs/sync.c | 113 |
3 files changed, 144 insertions, 174 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 3b6d701073e7..16cfbcd254f1 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -159,31 +159,6 @@ int sync_blockdev(struct block_device *bdev) | |||
159 | } | 159 | } |
160 | EXPORT_SYMBOL(sync_blockdev); | 160 | EXPORT_SYMBOL(sync_blockdev); |
161 | 161 | ||
162 | static void __fsync_super(struct super_block *sb) | ||
163 | { | ||
164 | sync_inodes_sb(sb, 0); | ||
165 | DQUOT_SYNC(sb); | ||
166 | lock_super(sb); | ||
167 | if (sb->s_dirt && sb->s_op->write_super) | ||
168 | sb->s_op->write_super(sb); | ||
169 | unlock_super(sb); | ||
170 | if (sb->s_op->sync_fs) | ||
171 | sb->s_op->sync_fs(sb, 1); | ||
172 | sync_blockdev(sb->s_bdev); | ||
173 | sync_inodes_sb(sb, 1); | ||
174 | } | ||
175 | |||
176 | /* | ||
177 | * Write out and wait upon all dirty data associated with this | ||
178 | * superblock. Filesystem data as well as the underlying block | ||
179 | * device. Takes the superblock lock. | ||
180 | */ | ||
181 | int fsync_super(struct super_block *sb) | ||
182 | { | ||
183 | __fsync_super(sb); | ||
184 | return sync_blockdev(sb->s_bdev); | ||
185 | } | ||
186 | |||
187 | /* | 162 | /* |
188 | * Write out and wait upon all dirty data associated with this | 163 | * Write out and wait upon all dirty data associated with this |
189 | * device. Filesystem data as well as the underlying block | 164 | * device. Filesystem data as well as the underlying block |
@@ -260,118 +235,6 @@ void thaw_bdev(struct block_device *bdev, struct super_block *sb) | |||
260 | EXPORT_SYMBOL(thaw_bdev); | 235 | EXPORT_SYMBOL(thaw_bdev); |
261 | 236 | ||
262 | /* | 237 | /* |
263 | * sync everything. Start out by waking pdflush, because that writes back | ||
264 | * all queues in parallel. | ||
265 | */ | ||
266 | static void do_sync(unsigned long wait) | ||
267 | { | ||
268 | wakeup_pdflush(0); | ||
269 | sync_inodes(0); /* All mappings, inodes and their blockdevs */ | ||
270 | DQUOT_SYNC(NULL); | ||
271 | sync_supers(); /* Write the superblocks */ | ||
272 | sync_filesystems(0); /* Start syncing the filesystems */ | ||
273 | sync_filesystems(wait); /* Waitingly sync the filesystems */ | ||
274 | sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */ | ||
275 | if (!wait) | ||
276 | printk("Emergency Sync complete\n"); | ||
277 | if (unlikely(laptop_mode)) | ||
278 | laptop_sync_completion(); | ||
279 | } | ||
280 | |||
281 | asmlinkage long sys_sync(void) | ||
282 | { | ||
283 | do_sync(1); | ||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | void emergency_sync(void) | ||
288 | { | ||
289 | pdflush_operation(do_sync, 0); | ||
290 | } | ||
291 | |||
292 | /* | ||
293 | * Generic function to fsync a file. | ||
294 | * | ||
295 | * filp may be NULL if called via the msync of a vma. | ||
296 | */ | ||
297 | |||
298 | int file_fsync(struct file *filp, struct dentry *dentry, int datasync) | ||
299 | { | ||
300 | struct inode * inode = dentry->d_inode; | ||
301 | struct super_block * sb; | ||
302 | int ret, err; | ||
303 | |||
304 | /* sync the inode to buffers */ | ||
305 | ret = write_inode_now(inode, 0); | ||
306 | |||
307 | /* sync the superblock to buffers */ | ||
308 | sb = inode->i_sb; | ||
309 | lock_super(sb); | ||
310 | if (sb->s_op->write_super) | ||
311 | sb->s_op->write_super(sb); | ||
312 | unlock_super(sb); | ||
313 | |||
314 | /* .. finally sync the buffers to disk */ | ||
315 | err = sync_blockdev(sb->s_bdev); | ||
316 | if (!ret) | ||
317 | ret = err; | ||
318 | return ret; | ||
319 | } | ||
320 | |||
321 | long do_fsync(struct file *file, int datasync) | ||
322 | { | ||
323 | int ret; | ||
324 | int err; | ||
325 | struct address_space *mapping = file->f_mapping; | ||
326 | |||
327 | if (!file->f_op || !file->f_op->fsync) { | ||
328 | /* Why? We can still call filemap_fdatawrite */ | ||
329 | ret = -EINVAL; | ||
330 | goto out; | ||
331 | } | ||
332 | |||
333 | ret = filemap_fdatawrite(mapping); | ||
334 | |||
335 | /* | ||
336 | * We need to protect against concurrent writers, which could cause | ||
337 | * livelocks in fsync_buffers_list(). | ||
338 | */ | ||
339 | mutex_lock(&mapping->host->i_mutex); | ||
340 | err = file->f_op->fsync(file, file->f_dentry, datasync); | ||
341 | if (!ret) | ||
342 | ret = err; | ||
343 | mutex_unlock(&mapping->host->i_mutex); | ||
344 | err = filemap_fdatawait(mapping); | ||
345 | if (!ret) | ||
346 | ret = err; | ||
347 | out: | ||
348 | return ret; | ||
349 | } | ||
350 | |||
351 | static long __do_fsync(unsigned int fd, int datasync) | ||
352 | { | ||
353 | struct file *file; | ||
354 | int ret = -EBADF; | ||
355 | |||
356 | file = fget(fd); | ||
357 | if (file) { | ||
358 | ret = do_fsync(file, datasync); | ||
359 | fput(file); | ||
360 | } | ||
361 | return ret; | ||
362 | } | ||
363 | |||
364 | asmlinkage long sys_fsync(unsigned int fd) | ||
365 | { | ||
366 | return __do_fsync(fd, 0); | ||
367 | } | ||
368 | |||
369 | asmlinkage long sys_fdatasync(unsigned int fd) | ||
370 | { | ||
371 | return __do_fsync(fd, 1); | ||
372 | } | ||
373 | |||
374 | /* | ||
375 | * Various filesystems appear to want __find_get_block to be non-blocking. | 238 | * Various filesystems appear to want __find_get_block to be non-blocking. |
376 | * But it's the page lock which protects the buffers. To get around this, | 239 | * But it's the page lock which protects the buffers. To get around this, |
377 | * we get exclusion from try_to_free_buffers with the blockdev mapping's | 240 | * we get exclusion from try_to_free_buffers with the blockdev mapping's |
@@ -1551,35 +1414,6 @@ static void discard_buffer(struct buffer_head * bh) | |||
1551 | } | 1414 | } |
1552 | 1415 | ||
1553 | /** | 1416 | /** |
1554 | * try_to_release_page() - release old fs-specific metadata on a page | ||
1555 | * | ||
1556 | * @page: the page which the kernel is trying to free | ||
1557 | * @gfp_mask: memory allocation flags (and I/O mode) | ||
1558 | * | ||
1559 | * The address_space is to try to release any data against the page | ||
1560 | * (presumably at page->private). If the release was successful, return `1'. | ||
1561 | * Otherwise return zero. | ||
1562 | * | ||
1563 | * The @gfp_mask argument specifies whether I/O may be performed to release | ||
1564 | * this page (__GFP_IO), and whether the call may block (__GFP_WAIT). | ||
1565 | * | ||
1566 | * NOTE: @gfp_mask may go away, and this function may become non-blocking. | ||
1567 | */ | ||
1568 | int try_to_release_page(struct page *page, gfp_t gfp_mask) | ||
1569 | { | ||
1570 | struct address_space * const mapping = page->mapping; | ||
1571 | |||
1572 | BUG_ON(!PageLocked(page)); | ||
1573 | if (PageWriteback(page)) | ||
1574 | return 0; | ||
1575 | |||
1576 | if (mapping && mapping->a_ops->releasepage) | ||
1577 | return mapping->a_ops->releasepage(page, gfp_mask); | ||
1578 | return try_to_free_buffers(page); | ||
1579 | } | ||
1580 | EXPORT_SYMBOL(try_to_release_page); | ||
1581 | |||
1582 | /** | ||
1583 | * block_invalidatepage - invalidate part of all of a buffer-backed page | 1417 | * block_invalidatepage - invalidate part of all of a buffer-backed page |
1584 | * | 1418 | * |
1585 | * @page: the page which is affected | 1419 | * @page: the page which is affected |
@@ -1630,14 +1464,6 @@ out: | |||
1630 | } | 1464 | } |
1631 | EXPORT_SYMBOL(block_invalidatepage); | 1465 | EXPORT_SYMBOL(block_invalidatepage); |
1632 | 1466 | ||
1633 | void do_invalidatepage(struct page *page, unsigned long offset) | ||
1634 | { | ||
1635 | void (*invalidatepage)(struct page *, unsigned long); | ||
1636 | invalidatepage = page->mapping->a_ops->invalidatepage ? : | ||
1637 | block_invalidatepage; | ||
1638 | (*invalidatepage)(page, offset); | ||
1639 | } | ||
1640 | |||
1641 | /* | 1467 | /* |
1642 | * We attach and possibly dirty the buffers atomically wrt | 1468 | * We attach and possibly dirty the buffers atomically wrt |
1643 | * __set_page_dirty_buffers() via private_lock. try_to_free_buffers | 1469 | * __set_page_dirty_buffers() via private_lock. try_to_free_buffers |
diff --git a/fs/super.c b/fs/super.c index 6987824d0dce..15671cd048b1 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -220,6 +220,37 @@ static int grab_super(struct super_block *s) __releases(sb_lock) | |||
220 | return 0; | 220 | return 0; |
221 | } | 221 | } |
222 | 222 | ||
223 | /* | ||
224 | * Write out and wait upon all dirty data associated with this | ||
225 | * superblock. Filesystem data as well as the underlying block | ||
226 | * device. Takes the superblock lock. Requires a second blkdev | ||
227 | * flush by the caller to complete the operation. | ||
228 | */ | ||
229 | void __fsync_super(struct super_block *sb) | ||
230 | { | ||
231 | sync_inodes_sb(sb, 0); | ||
232 | DQUOT_SYNC(sb); | ||
233 | lock_super(sb); | ||
234 | if (sb->s_dirt && sb->s_op->write_super) | ||
235 | sb->s_op->write_super(sb); | ||
236 | unlock_super(sb); | ||
237 | if (sb->s_op->sync_fs) | ||
238 | sb->s_op->sync_fs(sb, 1); | ||
239 | sync_blockdev(sb->s_bdev); | ||
240 | sync_inodes_sb(sb, 1); | ||
241 | } | ||
242 | |||
243 | /* | ||
244 | * Write out and wait upon all dirty data associated with this | ||
245 | * superblock. Filesystem data as well as the underlying block | ||
246 | * device. Takes the superblock lock. | ||
247 | */ | ||
248 | int fsync_super(struct super_block *sb) | ||
249 | { | ||
250 | __fsync_super(sb); | ||
251 | return sync_blockdev(sb->s_bdev); | ||
252 | } | ||
253 | |||
223 | /** | 254 | /** |
224 | * generic_shutdown_super - common helper for ->kill_sb() | 255 | * generic_shutdown_super - common helper for ->kill_sb() |
225 | * @sb: superblock to kill | 256 | * @sb: superblock to kill |
@@ -10,11 +10,124 @@ | |||
10 | #include <linux/syscalls.h> | 10 | #include <linux/syscalls.h> |
11 | #include <linux/linkage.h> | 11 | #include <linux/linkage.h> |
12 | #include <linux/pagemap.h> | 12 | #include <linux/pagemap.h> |
13 | #include <linux/quotaops.h> | ||
14 | #include <linux/buffer_head.h> | ||
13 | 15 | ||
14 | #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \ | 16 | #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \ |
15 | SYNC_FILE_RANGE_WAIT_AFTER) | 17 | SYNC_FILE_RANGE_WAIT_AFTER) |
16 | 18 | ||
17 | /* | 19 | /* |
20 | * sync everything. Start out by waking pdflush, because that writes back | ||
21 | * all queues in parallel. | ||
22 | */ | ||
23 | static void do_sync(unsigned long wait) | ||
24 | { | ||
25 | wakeup_pdflush(0); | ||
26 | sync_inodes(0); /* All mappings, inodes and their blockdevs */ | ||
27 | DQUOT_SYNC(NULL); | ||
28 | sync_supers(); /* Write the superblocks */ | ||
29 | sync_filesystems(0); /* Start syncing the filesystems */ | ||
30 | sync_filesystems(wait); /* Waitingly sync the filesystems */ | ||
31 | sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */ | ||
32 | if (!wait) | ||
33 | printk("Emergency Sync complete\n"); | ||
34 | if (unlikely(laptop_mode)) | ||
35 | laptop_sync_completion(); | ||
36 | } | ||
37 | |||
38 | asmlinkage long sys_sync(void) | ||
39 | { | ||
40 | do_sync(1); | ||
41 | return 0; | ||
42 | } | ||
43 | |||
44 | void emergency_sync(void) | ||
45 | { | ||
46 | pdflush_operation(do_sync, 0); | ||
47 | } | ||
48 | |||
49 | /* | ||
50 | * Generic function to fsync a file. | ||
51 | * | ||
52 | * filp may be NULL if called via the msync of a vma. | ||
53 | */ | ||
54 | int file_fsync(struct file *filp, struct dentry *dentry, int datasync) | ||
55 | { | ||
56 | struct inode * inode = dentry->d_inode; | ||
57 | struct super_block * sb; | ||
58 | int ret, err; | ||
59 | |||
60 | /* sync the inode to buffers */ | ||
61 | ret = write_inode_now(inode, 0); | ||
62 | |||
63 | /* sync the superblock to buffers */ | ||
64 | sb = inode->i_sb; | ||
65 | lock_super(sb); | ||
66 | if (sb->s_op->write_super) | ||
67 | sb->s_op->write_super(sb); | ||
68 | unlock_super(sb); | ||
69 | |||
70 | /* .. finally sync the buffers to disk */ | ||
71 | err = sync_blockdev(sb->s_bdev); | ||
72 | if (!ret) | ||
73 | ret = err; | ||
74 | return ret; | ||
75 | } | ||
76 | |||
77 | long do_fsync(struct file *file, int datasync) | ||
78 | { | ||
79 | int ret; | ||
80 | int err; | ||
81 | struct address_space *mapping = file->f_mapping; | ||
82 | |||
83 | if (!file->f_op || !file->f_op->fsync) { | ||
84 | /* Why? We can still call filemap_fdatawrite */ | ||
85 | ret = -EINVAL; | ||
86 | goto out; | ||
87 | } | ||
88 | |||
89 | ret = filemap_fdatawrite(mapping); | ||
90 | |||
91 | /* | ||
92 | * We need to protect against concurrent writers, which could cause | ||
93 | * livelocks in fsync_buffers_list(). | ||
94 | */ | ||
95 | mutex_lock(&mapping->host->i_mutex); | ||
96 | err = file->f_op->fsync(file, file->f_dentry, datasync); | ||
97 | if (!ret) | ||
98 | ret = err; | ||
99 | mutex_unlock(&mapping->host->i_mutex); | ||
100 | err = filemap_fdatawait(mapping); | ||
101 | if (!ret) | ||
102 | ret = err; | ||
103 | out: | ||
104 | return ret; | ||
105 | } | ||
106 | |||
107 | static long __do_fsync(unsigned int fd, int datasync) | ||
108 | { | ||
109 | struct file *file; | ||
110 | int ret = -EBADF; | ||
111 | |||
112 | file = fget(fd); | ||
113 | if (file) { | ||
114 | ret = do_fsync(file, datasync); | ||
115 | fput(file); | ||
116 | } | ||
117 | return ret; | ||
118 | } | ||
119 | |||
120 | asmlinkage long sys_fsync(unsigned int fd) | ||
121 | { | ||
122 | return __do_fsync(fd, 0); | ||
123 | } | ||
124 | |||
125 | asmlinkage long sys_fdatasync(unsigned int fd) | ||
126 | { | ||
127 | return __do_fsync(fd, 1); | ||
128 | } | ||
129 | |||
130 | /* | ||
18 | * sys_sync_file_range() permits finely controlled syncing over a segment of | 131 | * sys_sync_file_range() permits finely controlled syncing over a segment of |
19 | * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is | 132 | * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is |
20 | * zero then sys_sync_file_range() will operate from offset out to EOF. | 133 | * zero then sys_sync_file_range() will operate from offset out to EOF. |