diff options
-rw-r--r-- | fs/buffer.c | 57 | ||||
-rw-r--r-- | fs/sync.c | 1 |
2 files changed, 28 insertions, 30 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 90a98865b0cc..209f7f15f5f8 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -52,6 +52,7 @@ init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private) | |||
52 | bh->b_end_io = handler; | 52 | bh->b_end_io = handler; |
53 | bh->b_private = private; | 53 | bh->b_private = private; |
54 | } | 54 | } |
55 | EXPORT_SYMBOL(init_buffer); | ||
55 | 56 | ||
56 | static int sync_buffer(void *word) | 57 | static int sync_buffer(void *word) |
57 | { | 58 | { |
@@ -80,6 +81,7 @@ void unlock_buffer(struct buffer_head *bh) | |||
80 | smp_mb__after_clear_bit(); | 81 | smp_mb__after_clear_bit(); |
81 | wake_up_bit(&bh->b_state, BH_Lock); | 82 | wake_up_bit(&bh->b_state, BH_Lock); |
82 | } | 83 | } |
84 | EXPORT_SYMBOL(unlock_buffer); | ||
83 | 85 | ||
84 | /* | 86 | /* |
85 | * Block until a buffer comes unlocked. This doesn't stop it | 87 | * Block until a buffer comes unlocked. This doesn't stop it |
@@ -90,6 +92,7 @@ void __wait_on_buffer(struct buffer_head * bh) | |||
90 | { | 92 | { |
91 | wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE); | 93 | wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE); |
92 | } | 94 | } |
95 | EXPORT_SYMBOL(__wait_on_buffer); | ||
93 | 96 | ||
94 | static void | 97 | static void |
95 | __clear_page_buffers(struct page *page) | 98 | __clear_page_buffers(struct page *page) |
@@ -144,6 +147,7 @@ void end_buffer_read_sync(struct buffer_head *bh, int uptodate) | |||
144 | __end_buffer_read_notouch(bh, uptodate); | 147 | __end_buffer_read_notouch(bh, uptodate); |
145 | put_bh(bh); | 148 | put_bh(bh); |
146 | } | 149 | } |
150 | EXPORT_SYMBOL(end_buffer_read_sync); | ||
147 | 151 | ||
148 | void end_buffer_write_sync(struct buffer_head *bh, int uptodate) | 152 | void end_buffer_write_sync(struct buffer_head *bh, int uptodate) |
149 | { | 153 | { |
@@ -164,6 +168,7 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate) | |||
164 | unlock_buffer(bh); | 168 | unlock_buffer(bh); |
165 | put_bh(bh); | 169 | put_bh(bh); |
166 | } | 170 | } |
171 | EXPORT_SYMBOL(end_buffer_write_sync); | ||
167 | 172 | ||
168 | /* | 173 | /* |
169 | * Various filesystems appear to want __find_get_block to be non-blocking. | 174 | * Various filesystems appear to want __find_get_block to be non-blocking. |
@@ -272,6 +277,7 @@ void invalidate_bdev(struct block_device *bdev) | |||
272 | invalidate_bh_lrus(); | 277 | invalidate_bh_lrus(); |
273 | invalidate_mapping_pages(mapping, 0, -1); | 278 | invalidate_mapping_pages(mapping, 0, -1); |
274 | } | 279 | } |
280 | EXPORT_SYMBOL(invalidate_bdev); | ||
275 | 281 | ||
276 | /* | 282 | /* |
277 | * Kick pdflush then try to free up some ZONE_NORMAL memory. | 283 | * Kick pdflush then try to free up some ZONE_NORMAL memory. |
@@ -410,6 +416,7 @@ still_busy: | |||
410 | local_irq_restore(flags); | 416 | local_irq_restore(flags); |
411 | return; | 417 | return; |
412 | } | 418 | } |
419 | EXPORT_SYMBOL(end_buffer_async_write); | ||
413 | 420 | ||
414 | /* | 421 | /* |
415 | * If a page's buffers are under async readin (end_buffer_async_read | 422 | * If a page's buffers are under async readin (end_buffer_async_read |
@@ -438,8 +445,8 @@ static void mark_buffer_async_read(struct buffer_head *bh) | |||
438 | set_buffer_async_read(bh); | 445 | set_buffer_async_read(bh); |
439 | } | 446 | } |
440 | 447 | ||
441 | void mark_buffer_async_write_endio(struct buffer_head *bh, | 448 | static void mark_buffer_async_write_endio(struct buffer_head *bh, |
442 | bh_end_io_t *handler) | 449 | bh_end_io_t *handler) |
443 | { | 450 | { |
444 | bh->b_end_io = handler; | 451 | bh->b_end_io = handler; |
445 | set_buffer_async_write(bh); | 452 | set_buffer_async_write(bh); |
@@ -553,7 +560,7 @@ repeat: | |||
553 | return err; | 560 | return err; |
554 | } | 561 | } |
555 | 562 | ||
556 | void do_thaw_all(struct work_struct *work) | 563 | static void do_thaw_all(struct work_struct *work) |
557 | { | 564 | { |
558 | struct super_block *sb; | 565 | struct super_block *sb; |
559 | char b[BDEVNAME_SIZE]; | 566 | char b[BDEVNAME_SIZE]; |
@@ -1172,6 +1179,7 @@ void mark_buffer_dirty(struct buffer_head *bh) | |||
1172 | } | 1179 | } |
1173 | } | 1180 | } |
1174 | } | 1181 | } |
1182 | EXPORT_SYMBOL(mark_buffer_dirty); | ||
1175 | 1183 | ||
1176 | /* | 1184 | /* |
1177 | * Decrement a buffer_head's reference count. If all buffers against a page | 1185 | * Decrement a buffer_head's reference count. If all buffers against a page |
@@ -1188,6 +1196,7 @@ void __brelse(struct buffer_head * buf) | |||
1188 | } | 1196 | } |
1189 | WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n"); | 1197 | WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n"); |
1190 | } | 1198 | } |
1199 | EXPORT_SYMBOL(__brelse); | ||
1191 | 1200 | ||
1192 | /* | 1201 | /* |
1193 | * bforget() is like brelse(), except it discards any | 1202 | * bforget() is like brelse(), except it discards any |
@@ -1206,6 +1215,7 @@ void __bforget(struct buffer_head *bh) | |||
1206 | } | 1215 | } |
1207 | __brelse(bh); | 1216 | __brelse(bh); |
1208 | } | 1217 | } |
1218 | EXPORT_SYMBOL(__bforget); | ||
1209 | 1219 | ||
1210 | static struct buffer_head *__bread_slow(struct buffer_head *bh) | 1220 | static struct buffer_head *__bread_slow(struct buffer_head *bh) |
1211 | { | 1221 | { |
@@ -2218,6 +2228,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block) | |||
2218 | } | 2228 | } |
2219 | return 0; | 2229 | return 0; |
2220 | } | 2230 | } |
2231 | EXPORT_SYMBOL(block_read_full_page); | ||
2221 | 2232 | ||
2222 | /* utility function for filesystems that need to do work on expanding | 2233 | /* utility function for filesystems that need to do work on expanding |
2223 | * truncates. Uses filesystem pagecache writes to allow the filesystem to | 2234 | * truncates. Uses filesystem pagecache writes to allow the filesystem to |
@@ -2252,6 +2263,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size) | |||
2252 | out: | 2263 | out: |
2253 | return err; | 2264 | return err; |
2254 | } | 2265 | } |
2266 | EXPORT_SYMBOL(generic_cont_expand_simple); | ||
2255 | 2267 | ||
2256 | static int cont_expand_zero(struct file *file, struct address_space *mapping, | 2268 | static int cont_expand_zero(struct file *file, struct address_space *mapping, |
2257 | loff_t pos, loff_t *bytes) | 2269 | loff_t pos, loff_t *bytes) |
@@ -2352,6 +2364,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping, | |||
2352 | out: | 2364 | out: |
2353 | return err; | 2365 | return err; |
2354 | } | 2366 | } |
2367 | EXPORT_SYMBOL(cont_write_begin); | ||
2355 | 2368 | ||
2356 | int block_prepare_write(struct page *page, unsigned from, unsigned to, | 2369 | int block_prepare_write(struct page *page, unsigned from, unsigned to, |
2357 | get_block_t *get_block) | 2370 | get_block_t *get_block) |
@@ -2362,6 +2375,7 @@ int block_prepare_write(struct page *page, unsigned from, unsigned to, | |||
2362 | ClearPageUptodate(page); | 2375 | ClearPageUptodate(page); |
2363 | return err; | 2376 | return err; |
2364 | } | 2377 | } |
2378 | EXPORT_SYMBOL(block_prepare_write); | ||
2365 | 2379 | ||
2366 | int block_commit_write(struct page *page, unsigned from, unsigned to) | 2380 | int block_commit_write(struct page *page, unsigned from, unsigned to) |
2367 | { | 2381 | { |
@@ -2369,6 +2383,7 @@ int block_commit_write(struct page *page, unsigned from, unsigned to) | |||
2369 | __block_commit_write(inode,page,from,to); | 2383 | __block_commit_write(inode,page,from,to); |
2370 | return 0; | 2384 | return 0; |
2371 | } | 2385 | } |
2386 | EXPORT_SYMBOL(block_commit_write); | ||
2372 | 2387 | ||
2373 | /* | 2388 | /* |
2374 | * block_page_mkwrite() is not allowed to change the file size as it gets | 2389 | * block_page_mkwrite() is not allowed to change the file size as it gets |
@@ -2426,6 +2441,7 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | |||
2426 | out: | 2441 | out: |
2427 | return ret; | 2442 | return ret; |
2428 | } | 2443 | } |
2444 | EXPORT_SYMBOL(block_page_mkwrite); | ||
2429 | 2445 | ||
2430 | /* | 2446 | /* |
2431 | * nobh_write_begin()'s prereads are special: the buffer_heads are freed | 2447 | * nobh_write_begin()'s prereads are special: the buffer_heads are freed |
@@ -2849,6 +2865,7 @@ unlock: | |||
2849 | out: | 2865 | out: |
2850 | return err; | 2866 | return err; |
2851 | } | 2867 | } |
2868 | EXPORT_SYMBOL(block_truncate_page); | ||
2852 | 2869 | ||
2853 | /* | 2870 | /* |
2854 | * The generic ->writepage function for buffer-backed address_spaces | 2871 | * The generic ->writepage function for buffer-backed address_spaces |
@@ -2890,6 +2907,7 @@ int block_write_full_page_endio(struct page *page, get_block_t *get_block, | |||
2890 | zero_user_segment(page, offset, PAGE_CACHE_SIZE); | 2907 | zero_user_segment(page, offset, PAGE_CACHE_SIZE); |
2891 | return __block_write_full_page(inode, page, get_block, wbc, handler); | 2908 | return __block_write_full_page(inode, page, get_block, wbc, handler); |
2892 | } | 2909 | } |
2910 | EXPORT_SYMBOL(block_write_full_page_endio); | ||
2893 | 2911 | ||
2894 | /* | 2912 | /* |
2895 | * The generic ->writepage function for buffer-backed address_spaces | 2913 | * The generic ->writepage function for buffer-backed address_spaces |
@@ -2900,7 +2918,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block, | |||
2900 | return block_write_full_page_endio(page, get_block, wbc, | 2918 | return block_write_full_page_endio(page, get_block, wbc, |
2901 | end_buffer_async_write); | 2919 | end_buffer_async_write); |
2902 | } | 2920 | } |
2903 | 2921 | EXPORT_SYMBOL(block_write_full_page); | |
2904 | 2922 | ||
2905 | sector_t generic_block_bmap(struct address_space *mapping, sector_t block, | 2923 | sector_t generic_block_bmap(struct address_space *mapping, sector_t block, |
2906 | get_block_t *get_block) | 2924 | get_block_t *get_block) |
@@ -2913,6 +2931,7 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block, | |||
2913 | get_block(inode, block, &tmp, 0); | 2931 | get_block(inode, block, &tmp, 0); |
2914 | return tmp.b_blocknr; | 2932 | return tmp.b_blocknr; |
2915 | } | 2933 | } |
2934 | EXPORT_SYMBOL(generic_block_bmap); | ||
2916 | 2935 | ||
2917 | static void end_bio_bh_io_sync(struct bio *bio, int err) | 2936 | static void end_bio_bh_io_sync(struct bio *bio, int err) |
2918 | { | 2937 | { |
@@ -2982,6 +3001,7 @@ int submit_bh(int rw, struct buffer_head * bh) | |||
2982 | bio_put(bio); | 3001 | bio_put(bio); |
2983 | return ret; | 3002 | return ret; |
2984 | } | 3003 | } |
3004 | EXPORT_SYMBOL(submit_bh); | ||
2985 | 3005 | ||
2986 | /** | 3006 | /** |
2987 | * ll_rw_block: low-level access to block devices (DEPRECATED) | 3007 | * ll_rw_block: low-level access to block devices (DEPRECATED) |
@@ -3043,6 +3063,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) | |||
3043 | unlock_buffer(bh); | 3063 | unlock_buffer(bh); |
3044 | } | 3064 | } |
3045 | } | 3065 | } |
3066 | EXPORT_SYMBOL(ll_rw_block); | ||
3046 | 3067 | ||
3047 | /* | 3068 | /* |
3048 | * For a data-integrity writeout, we need to wait upon any in-progress I/O | 3069 | * For a data-integrity writeout, we need to wait upon any in-progress I/O |
@@ -3071,6 +3092,7 @@ int sync_dirty_buffer(struct buffer_head *bh) | |||
3071 | } | 3092 | } |
3072 | return ret; | 3093 | return ret; |
3073 | } | 3094 | } |
3095 | EXPORT_SYMBOL(sync_dirty_buffer); | ||
3074 | 3096 | ||
3075 | /* | 3097 | /* |
3076 | * try_to_free_buffers() checks if all the buffers on this particular page | 3098 | * try_to_free_buffers() checks if all the buffers on this particular page |
@@ -3185,6 +3207,7 @@ void block_sync_page(struct page *page) | |||
3185 | if (mapping) | 3207 | if (mapping) |
3186 | blk_run_backing_dev(mapping->backing_dev_info, page); | 3208 | blk_run_backing_dev(mapping->backing_dev_info, page); |
3187 | } | 3209 | } |
3210 | EXPORT_SYMBOL(block_sync_page); | ||
3188 | 3211 | ||
3189 | /* | 3212 | /* |
3190 | * There are no bdflush tunables left. But distributions are | 3213 | * There are no bdflush tunables left. But distributions are |
@@ -3361,29 +3384,3 @@ void __init buffer_init(void) | |||
3361 | max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); | 3384 | max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); |
3362 | hotcpu_notifier(buffer_cpu_notify, 0); | 3385 | hotcpu_notifier(buffer_cpu_notify, 0); |
3363 | } | 3386 | } |
3364 | |||
3365 | EXPORT_SYMBOL(__bforget); | ||
3366 | EXPORT_SYMBOL(__brelse); | ||
3367 | EXPORT_SYMBOL(__wait_on_buffer); | ||
3368 | EXPORT_SYMBOL(block_commit_write); | ||
3369 | EXPORT_SYMBOL(block_prepare_write); | ||
3370 | EXPORT_SYMBOL(block_page_mkwrite); | ||
3371 | EXPORT_SYMBOL(block_read_full_page); | ||
3372 | EXPORT_SYMBOL(block_sync_page); | ||
3373 | EXPORT_SYMBOL(block_truncate_page); | ||
3374 | EXPORT_SYMBOL(block_write_full_page); | ||
3375 | EXPORT_SYMBOL(block_write_full_page_endio); | ||
3376 | EXPORT_SYMBOL(cont_write_begin); | ||
3377 | EXPORT_SYMBOL(end_buffer_read_sync); | ||
3378 | EXPORT_SYMBOL(end_buffer_write_sync); | ||
3379 | EXPORT_SYMBOL(end_buffer_async_write); | ||
3380 | EXPORT_SYMBOL(file_fsync); | ||
3381 | EXPORT_SYMBOL(generic_block_bmap); | ||
3382 | EXPORT_SYMBOL(generic_cont_expand_simple); | ||
3383 | EXPORT_SYMBOL(init_buffer); | ||
3384 | EXPORT_SYMBOL(invalidate_bdev); | ||
3385 | EXPORT_SYMBOL(ll_rw_block); | ||
3386 | EXPORT_SYMBOL(mark_buffer_dirty); | ||
3387 | EXPORT_SYMBOL(submit_bh); | ||
3388 | EXPORT_SYMBOL(sync_dirty_buffer); | ||
3389 | EXPORT_SYMBOL(unlock_buffer); | ||
@@ -183,6 +183,7 @@ int file_fsync(struct file *filp, struct dentry *dentry, int datasync) | |||
183 | ret = err; | 183 | ret = err; |
184 | return ret; | 184 | return ret; |
185 | } | 185 | } |
186 | EXPORT_SYMBOL(file_fsync); | ||
186 | 187 | ||
187 | /** | 188 | /** |
188 | * vfs_fsync_range - helper to sync a range of data & metadata to disk | 189 | * vfs_fsync_range - helper to sync a range of data & metadata to disk |