diff options
author | Nick Piggin <npiggin@suse.de> | 2009-02-25 04:44:19 -0500 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2009-03-27 14:44:03 -0400 |
commit | 585d3bc06f4ca57f975a5a1f698f65a45ea66225 (patch) | |
tree | 393cbd213e714f00c0cae85851e7837ed88b9a99 | |
parent | 3ba13d179e8c24c68eac32b93593a6b10fcd1572 (diff) |
fs: move bdev code out of buffer.c
Move some block device related code out from buffer.c and put it in
block_dev.c. I'm trying to move non-buffer_head code out of buffer.c
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r-- | fs/block_dev.c | 146 | ||||
-rw-r--r-- | fs/buffer.c | 145 | ||||
-rw-r--r-- | include/linux/buffer_head.h | 7 | ||||
-rw-r--r-- | include/linux/fs.h | 7 |
4 files changed, 153 insertions, 152 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c index b3c1efff5e1d..8c3c6899ccf3 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/blkpg.h> | 19 | #include <linux/blkpg.h> |
20 | #include <linux/buffer_head.h> | 20 | #include <linux/buffer_head.h> |
21 | #include <linux/pagevec.h> | ||
21 | #include <linux/writeback.h> | 22 | #include <linux/writeback.h> |
22 | #include <linux/mpage.h> | 23 | #include <linux/mpage.h> |
23 | #include <linux/mount.h> | 24 | #include <linux/mount.h> |
@@ -174,6 +175,151 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | |||
174 | iov, offset, nr_segs, blkdev_get_blocks, NULL); | 175 | iov, offset, nr_segs, blkdev_get_blocks, NULL); |
175 | } | 176 | } |
176 | 177 | ||
178 | /* | ||
179 | * Write out and wait upon all the dirty data associated with a block | ||
180 | * device via its mapping. Does not take the superblock lock. | ||
181 | */ | ||
182 | int sync_blockdev(struct block_device *bdev) | ||
183 | { | ||
184 | int ret = 0; | ||
185 | |||
186 | if (bdev) | ||
187 | ret = filemap_write_and_wait(bdev->bd_inode->i_mapping); | ||
188 | return ret; | ||
189 | } | ||
190 | EXPORT_SYMBOL(sync_blockdev); | ||
191 | |||
192 | /* | ||
193 | * Write out and wait upon all dirty data associated with this | ||
194 | * device. Filesystem data as well as the underlying block | ||
195 | * device. Takes the superblock lock. | ||
196 | */ | ||
197 | int fsync_bdev(struct block_device *bdev) | ||
198 | { | ||
199 | struct super_block *sb = get_super(bdev); | ||
200 | if (sb) { | ||
201 | int res = fsync_super(sb); | ||
202 | drop_super(sb); | ||
203 | return res; | ||
204 | } | ||
205 | return sync_blockdev(bdev); | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * freeze_bdev -- lock a filesystem and force it into a consistent state | ||
210 | * @bdev: blockdevice to lock | ||
211 | * | ||
212 | * This takes the block device bd_mount_sem to make sure no new mounts | ||
213 | * happen on bdev until thaw_bdev() is called. | ||
214 | * If a superblock is found on this device, we take the s_umount semaphore | ||
215 | * on it to make sure nobody unmounts until the snapshot creation is done. | ||
216 | * The reference counter (bd_fsfreeze_count) guarantees that only the last | ||
217 | * unfreeze process can unfreeze the frozen filesystem actually when multiple | ||
218 | * freeze requests arrive simultaneously. It counts up in freeze_bdev() and | ||
219 | * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze | ||
220 | * actually. | ||
221 | */ | ||
222 | struct super_block *freeze_bdev(struct block_device *bdev) | ||
223 | { | ||
224 | struct super_block *sb; | ||
225 | int error = 0; | ||
226 | |||
227 | mutex_lock(&bdev->bd_fsfreeze_mutex); | ||
228 | if (bdev->bd_fsfreeze_count > 0) { | ||
229 | bdev->bd_fsfreeze_count++; | ||
230 | sb = get_super(bdev); | ||
231 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | ||
232 | return sb; | ||
233 | } | ||
234 | bdev->bd_fsfreeze_count++; | ||
235 | |||
236 | down(&bdev->bd_mount_sem); | ||
237 | sb = get_super(bdev); | ||
238 | if (sb && !(sb->s_flags & MS_RDONLY)) { | ||
239 | sb->s_frozen = SB_FREEZE_WRITE; | ||
240 | smp_wmb(); | ||
241 | |||
242 | __fsync_super(sb); | ||
243 | |||
244 | sb->s_frozen = SB_FREEZE_TRANS; | ||
245 | smp_wmb(); | ||
246 | |||
247 | sync_blockdev(sb->s_bdev); | ||
248 | |||
249 | if (sb->s_op->freeze_fs) { | ||
250 | error = sb->s_op->freeze_fs(sb); | ||
251 | if (error) { | ||
252 | printk(KERN_ERR | ||
253 | "VFS:Filesystem freeze failed\n"); | ||
254 | sb->s_frozen = SB_UNFROZEN; | ||
255 | drop_super(sb); | ||
256 | up(&bdev->bd_mount_sem); | ||
257 | bdev->bd_fsfreeze_count--; | ||
258 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | ||
259 | return ERR_PTR(error); | ||
260 | } | ||
261 | } | ||
262 | } | ||
263 | |||
264 | sync_blockdev(bdev); | ||
265 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | ||
266 | |||
267 | return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */ | ||
268 | } | ||
269 | EXPORT_SYMBOL(freeze_bdev); | ||
270 | |||
271 | /** | ||
272 | * thaw_bdev -- unlock filesystem | ||
273 | * @bdev: blockdevice to unlock | ||
274 | * @sb: associated superblock | ||
275 | * | ||
276 | * Unlocks the filesystem and marks it writeable again after freeze_bdev(). | ||
277 | */ | ||
278 | int thaw_bdev(struct block_device *bdev, struct super_block *sb) | ||
279 | { | ||
280 | int error = 0; | ||
281 | |||
282 | mutex_lock(&bdev->bd_fsfreeze_mutex); | ||
283 | if (!bdev->bd_fsfreeze_count) { | ||
284 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | ||
285 | return -EINVAL; | ||
286 | } | ||
287 | |||
288 | bdev->bd_fsfreeze_count--; | ||
289 | if (bdev->bd_fsfreeze_count > 0) { | ||
290 | if (sb) | ||
291 | drop_super(sb); | ||
292 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | ||
293 | return 0; | ||
294 | } | ||
295 | |||
296 | if (sb) { | ||
297 | BUG_ON(sb->s_bdev != bdev); | ||
298 | if (!(sb->s_flags & MS_RDONLY)) { | ||
299 | if (sb->s_op->unfreeze_fs) { | ||
300 | error = sb->s_op->unfreeze_fs(sb); | ||
301 | if (error) { | ||
302 | printk(KERN_ERR | ||
303 | "VFS:Filesystem thaw failed\n"); | ||
304 | sb->s_frozen = SB_FREEZE_TRANS; | ||
305 | bdev->bd_fsfreeze_count++; | ||
306 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | ||
307 | return error; | ||
308 | } | ||
309 | } | ||
310 | sb->s_frozen = SB_UNFROZEN; | ||
311 | smp_wmb(); | ||
312 | wake_up(&sb->s_wait_unfrozen); | ||
313 | } | ||
314 | drop_super(sb); | ||
315 | } | ||
316 | |||
317 | up(&bdev->bd_mount_sem); | ||
318 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | ||
319 | return 0; | ||
320 | } | ||
321 | EXPORT_SYMBOL(thaw_bdev); | ||
322 | |||
177 | static int blkdev_writepage(struct page *page, struct writeback_control *wbc) | 323 | static int blkdev_writepage(struct page *page, struct writeback_control *wbc) |
178 | { | 324 | { |
179 | return block_write_full_page(page, blkdev_get_block, wbc); | 325 | return block_write_full_page(page, blkdev_get_block, wbc); |
diff --git a/fs/buffer.c b/fs/buffer.c index 891e1c78e4f1..a2fd743d97cb 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -166,151 +166,6 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate) | |||
166 | } | 166 | } |
167 | 167 | ||
168 | /* | 168 | /* |
169 | * Write out and wait upon all the dirty data associated with a block | ||
170 | * device via its mapping. Does not take the superblock lock. | ||
171 | */ | ||
172 | int sync_blockdev(struct block_device *bdev) | ||
173 | { | ||
174 | int ret = 0; | ||
175 | |||
176 | if (bdev) | ||
177 | ret = filemap_write_and_wait(bdev->bd_inode->i_mapping); | ||
178 | return ret; | ||
179 | } | ||
180 | EXPORT_SYMBOL(sync_blockdev); | ||
181 | |||
182 | /* | ||
183 | * Write out and wait upon all dirty data associated with this | ||
184 | * device. Filesystem data as well as the underlying block | ||
185 | * device. Takes the superblock lock. | ||
186 | */ | ||
187 | int fsync_bdev(struct block_device *bdev) | ||
188 | { | ||
189 | struct super_block *sb = get_super(bdev); | ||
190 | if (sb) { | ||
191 | int res = fsync_super(sb); | ||
192 | drop_super(sb); | ||
193 | return res; | ||
194 | } | ||
195 | return sync_blockdev(bdev); | ||
196 | } | ||
197 | |||
198 | /** | ||
199 | * freeze_bdev -- lock a filesystem and force it into a consistent state | ||
200 | * @bdev: blockdevice to lock | ||
201 | * | ||
202 | * This takes the block device bd_mount_sem to make sure no new mounts | ||
203 | * happen on bdev until thaw_bdev() is called. | ||
204 | * If a superblock is found on this device, we take the s_umount semaphore | ||
205 | * on it to make sure nobody unmounts until the snapshot creation is done. | ||
206 | * The reference counter (bd_fsfreeze_count) guarantees that only the last | ||
207 | * unfreeze process can unfreeze the frozen filesystem actually when multiple | ||
208 | * freeze requests arrive simultaneously. It counts up in freeze_bdev() and | ||
209 | * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze | ||
210 | * actually. | ||
211 | */ | ||
212 | struct super_block *freeze_bdev(struct block_device *bdev) | ||
213 | { | ||
214 | struct super_block *sb; | ||
215 | int error = 0; | ||
216 | |||
217 | mutex_lock(&bdev->bd_fsfreeze_mutex); | ||
218 | if (bdev->bd_fsfreeze_count > 0) { | ||
219 | bdev->bd_fsfreeze_count++; | ||
220 | sb = get_super(bdev); | ||
221 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | ||
222 | return sb; | ||
223 | } | ||
224 | bdev->bd_fsfreeze_count++; | ||
225 | |||
226 | down(&bdev->bd_mount_sem); | ||
227 | sb = get_super(bdev); | ||
228 | if (sb && !(sb->s_flags & MS_RDONLY)) { | ||
229 | sb->s_frozen = SB_FREEZE_WRITE; | ||
230 | smp_wmb(); | ||
231 | |||
232 | __fsync_super(sb); | ||
233 | |||
234 | sb->s_frozen = SB_FREEZE_TRANS; | ||
235 | smp_wmb(); | ||
236 | |||
237 | sync_blockdev(sb->s_bdev); | ||
238 | |||
239 | if (sb->s_op->freeze_fs) { | ||
240 | error = sb->s_op->freeze_fs(sb); | ||
241 | if (error) { | ||
242 | printk(KERN_ERR | ||
243 | "VFS:Filesystem freeze failed\n"); | ||
244 | sb->s_frozen = SB_UNFROZEN; | ||
245 | drop_super(sb); | ||
246 | up(&bdev->bd_mount_sem); | ||
247 | bdev->bd_fsfreeze_count--; | ||
248 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | ||
249 | return ERR_PTR(error); | ||
250 | } | ||
251 | } | ||
252 | } | ||
253 | |||
254 | sync_blockdev(bdev); | ||
255 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | ||
256 | |||
257 | return sb; /* thaw_bdev releases s->s_umount and bd_mount_sem */ | ||
258 | } | ||
259 | EXPORT_SYMBOL(freeze_bdev); | ||
260 | |||
261 | /** | ||
262 | * thaw_bdev -- unlock filesystem | ||
263 | * @bdev: blockdevice to unlock | ||
264 | * @sb: associated superblock | ||
265 | * | ||
266 | * Unlocks the filesystem and marks it writeable again after freeze_bdev(). | ||
267 | */ | ||
268 | int thaw_bdev(struct block_device *bdev, struct super_block *sb) | ||
269 | { | ||
270 | int error = 0; | ||
271 | |||
272 | mutex_lock(&bdev->bd_fsfreeze_mutex); | ||
273 | if (!bdev->bd_fsfreeze_count) { | ||
274 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | ||
275 | return -EINVAL; | ||
276 | } | ||
277 | |||
278 | bdev->bd_fsfreeze_count--; | ||
279 | if (bdev->bd_fsfreeze_count > 0) { | ||
280 | if (sb) | ||
281 | drop_super(sb); | ||
282 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | ||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | if (sb) { | ||
287 | BUG_ON(sb->s_bdev != bdev); | ||
288 | if (!(sb->s_flags & MS_RDONLY)) { | ||
289 | if (sb->s_op->unfreeze_fs) { | ||
290 | error = sb->s_op->unfreeze_fs(sb); | ||
291 | if (error) { | ||
292 | printk(KERN_ERR | ||
293 | "VFS:Filesystem thaw failed\n"); | ||
294 | sb->s_frozen = SB_FREEZE_TRANS; | ||
295 | bdev->bd_fsfreeze_count++; | ||
296 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | ||
297 | return error; | ||
298 | } | ||
299 | } | ||
300 | sb->s_frozen = SB_UNFROZEN; | ||
301 | smp_wmb(); | ||
302 | wake_up(&sb->s_wait_unfrozen); | ||
303 | } | ||
304 | drop_super(sb); | ||
305 | } | ||
306 | |||
307 | up(&bdev->bd_mount_sem); | ||
308 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | ||
309 | return 0; | ||
310 | } | ||
311 | EXPORT_SYMBOL(thaw_bdev); | ||
312 | |||
313 | /* | ||
314 | * Various filesystems appear to want __find_get_block to be non-blocking. | 169 | * Various filesystems appear to want __find_get_block to be non-blocking. |
315 | * But it's the page lock which protects the buffers. To get around this, | 170 | * But it's the page lock which protects the buffers. To get around this, |
316 | * we get exclusion from try_to_free_buffers with the blockdev mapping's | 171 | * we get exclusion from try_to_free_buffers with the blockdev mapping's |
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index bd7ac793be19..f19fd9045ea0 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
@@ -165,15 +165,8 @@ int sync_mapping_buffers(struct address_space *mapping); | |||
165 | void unmap_underlying_metadata(struct block_device *bdev, sector_t block); | 165 | void unmap_underlying_metadata(struct block_device *bdev, sector_t block); |
166 | 166 | ||
167 | void mark_buffer_async_write(struct buffer_head *bh); | 167 | void mark_buffer_async_write(struct buffer_head *bh); |
168 | void invalidate_bdev(struct block_device *); | ||
169 | int sync_blockdev(struct block_device *bdev); | ||
170 | void __wait_on_buffer(struct buffer_head *); | 168 | void __wait_on_buffer(struct buffer_head *); |
171 | wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); | 169 | wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); |
172 | int fsync_bdev(struct block_device *); | ||
173 | struct super_block *freeze_bdev(struct block_device *); | ||
174 | int thaw_bdev(struct block_device *, struct super_block *); | ||
175 | int fsync_super(struct super_block *); | ||
176 | int fsync_no_super(struct block_device *); | ||
177 | struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, | 170 | struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, |
178 | unsigned size); | 171 | unsigned size); |
179 | struct buffer_head *__getblk(struct block_device *bdev, sector_t block, | 172 | struct buffer_head *__getblk(struct block_device *bdev, sector_t block, |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 5f74d616cd7d..c2c4454a268a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -1874,6 +1874,13 @@ extern void bd_set_size(struct block_device *, loff_t size); | |||
1874 | extern void bd_forget(struct inode *inode); | 1874 | extern void bd_forget(struct inode *inode); |
1875 | extern void bdput(struct block_device *); | 1875 | extern void bdput(struct block_device *); |
1876 | extern struct block_device *open_by_devnum(dev_t, fmode_t); | 1876 | extern struct block_device *open_by_devnum(dev_t, fmode_t); |
1877 | extern void invalidate_bdev(struct block_device *); | ||
1878 | extern int sync_blockdev(struct block_device *bdev); | ||
1879 | extern struct super_block *freeze_bdev(struct block_device *); | ||
1880 | extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); | ||
1881 | extern int fsync_bdev(struct block_device *); | ||
1882 | extern int fsync_super(struct super_block *); | ||
1883 | extern int fsync_no_super(struct block_device *); | ||
1877 | #else | 1884 | #else |
1878 | static inline void bd_forget(struct inode *inode) {} | 1885 | static inline void bd_forget(struct inode *inode) {} |
1879 | #endif | 1886 | #endif |