aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c86
1 files changed, 40 insertions, 46 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index a3ef091a45bd..6fa530256bfd 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -52,6 +52,7 @@ init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
52 bh->b_end_io = handler; 52 bh->b_end_io = handler;
53 bh->b_private = private; 53 bh->b_private = private;
54} 54}
55EXPORT_SYMBOL(init_buffer);
55 56
56static int sync_buffer(void *word) 57static int sync_buffer(void *word)
57{ 58{
@@ -80,6 +81,7 @@ void unlock_buffer(struct buffer_head *bh)
80 smp_mb__after_clear_bit(); 81 smp_mb__after_clear_bit();
81 wake_up_bit(&bh->b_state, BH_Lock); 82 wake_up_bit(&bh->b_state, BH_Lock);
82} 83}
84EXPORT_SYMBOL(unlock_buffer);
83 85
84/* 86/*
85 * Block until a buffer comes unlocked. This doesn't stop it 87 * Block until a buffer comes unlocked. This doesn't stop it
@@ -90,6 +92,7 @@ void __wait_on_buffer(struct buffer_head * bh)
90{ 92{
91 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE); 93 wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
92} 94}
95EXPORT_SYMBOL(__wait_on_buffer);
93 96
94static void 97static void
95__clear_page_buffers(struct page *page) 98__clear_page_buffers(struct page *page)
@@ -144,6 +147,7 @@ void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
144 __end_buffer_read_notouch(bh, uptodate); 147 __end_buffer_read_notouch(bh, uptodate);
145 put_bh(bh); 148 put_bh(bh);
146} 149}
150EXPORT_SYMBOL(end_buffer_read_sync);
147 151
148void end_buffer_write_sync(struct buffer_head *bh, int uptodate) 152void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
149{ 153{
@@ -164,6 +168,7 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
164 unlock_buffer(bh); 168 unlock_buffer(bh);
165 put_bh(bh); 169 put_bh(bh);
166} 170}
171EXPORT_SYMBOL(end_buffer_write_sync);
167 172
168/* 173/*
169 * Various filesystems appear to want __find_get_block to be non-blocking. 174 * Various filesystems appear to want __find_get_block to be non-blocking.
@@ -272,16 +277,17 @@ void invalidate_bdev(struct block_device *bdev)
272 invalidate_bh_lrus(); 277 invalidate_bh_lrus();
273 invalidate_mapping_pages(mapping, 0, -1); 278 invalidate_mapping_pages(mapping, 0, -1);
274} 279}
280EXPORT_SYMBOL(invalidate_bdev);
275 281
276/* 282/*
277 * Kick pdflush then try to free up some ZONE_NORMAL memory. 283 * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
278 */ 284 */
279static void free_more_memory(void) 285static void free_more_memory(void)
280{ 286{
281 struct zone *zone; 287 struct zone *zone;
282 int nid; 288 int nid;
283 289
284 wakeup_pdflush(1024); 290 wakeup_flusher_threads(1024);
285 yield(); 291 yield();
286 292
287 for_each_online_node(nid) { 293 for_each_online_node(nid) {
@@ -410,6 +416,7 @@ still_busy:
410 local_irq_restore(flags); 416 local_irq_restore(flags);
411 return; 417 return;
412} 418}
419EXPORT_SYMBOL(end_buffer_async_write);
413 420
414/* 421/*
415 * If a page's buffers are under async readin (end_buffer_async_read 422 * If a page's buffers are under async readin (end_buffer_async_read
@@ -438,8 +445,8 @@ static void mark_buffer_async_read(struct buffer_head *bh)
438 set_buffer_async_read(bh); 445 set_buffer_async_read(bh);
439} 446}
440 447
441void mark_buffer_async_write_endio(struct buffer_head *bh, 448static void mark_buffer_async_write_endio(struct buffer_head *bh,
442 bh_end_io_t *handler) 449 bh_end_io_t *handler)
443{ 450{
444 bh->b_end_io = handler; 451 bh->b_end_io = handler;
445 set_buffer_async_write(bh); 452 set_buffer_async_write(bh);
@@ -553,7 +560,7 @@ repeat:
553 return err; 560 return err;
554} 561}
555 562
556void do_thaw_all(struct work_struct *work) 563static void do_thaw_all(struct work_struct *work)
557{ 564{
558 struct super_block *sb; 565 struct super_block *sb;
559 char b[BDEVNAME_SIZE]; 566 char b[BDEVNAME_SIZE];
@@ -1165,10 +1172,14 @@ void mark_buffer_dirty(struct buffer_head *bh)
1165 1172
1166 if (!test_set_buffer_dirty(bh)) { 1173 if (!test_set_buffer_dirty(bh)) {
1167 struct page *page = bh->b_page; 1174 struct page *page = bh->b_page;
1168 if (!TestSetPageDirty(page)) 1175 if (!TestSetPageDirty(page)) {
1169 __set_page_dirty(page, page_mapping(page), 0); 1176 struct address_space *mapping = page_mapping(page);
1177 if (mapping)
1178 __set_page_dirty(page, mapping, 0);
1179 }
1170 } 1180 }
1171} 1181}
1182EXPORT_SYMBOL(mark_buffer_dirty);
1172 1183
1173/* 1184/*
1174 * Decrement a buffer_head's reference count. If all buffers against a page 1185 * Decrement a buffer_head's reference count. If all buffers against a page
@@ -1185,6 +1196,7 @@ void __brelse(struct buffer_head * buf)
1185 } 1196 }
1186 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n"); 1197 WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1187} 1198}
1199EXPORT_SYMBOL(__brelse);
1188 1200
1189/* 1201/*
1190 * bforget() is like brelse(), except it discards any 1202 * bforget() is like brelse(), except it discards any
@@ -1203,6 +1215,7 @@ void __bforget(struct buffer_head *bh)
1203 } 1215 }
1204 __brelse(bh); 1216 __brelse(bh);
1205} 1217}
1218EXPORT_SYMBOL(__bforget);
1206 1219
1207static struct buffer_head *__bread_slow(struct buffer_head *bh) 1220static struct buffer_head *__bread_slow(struct buffer_head *bh)
1208{ 1221{
@@ -1696,9 +1709,9 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
1696 /* 1709 /*
1697 * If it's a fully non-blocking write attempt and we cannot 1710 * If it's a fully non-blocking write attempt and we cannot
1698 * lock the buffer then redirty the page. Note that this can 1711 * lock the buffer then redirty the page. Note that this can
1699 * potentially cause a busy-wait loop from pdflush and kswapd 1712 * potentially cause a busy-wait loop from writeback threads
1700 * activity, but those code paths have their own higher-level 1713 * and kswapd activity, but those code paths have their own
1701 * throttling. 1714 * higher-level throttling.
1702 */ 1715 */
1703 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { 1716 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1704 lock_buffer(bh); 1717 lock_buffer(bh);
@@ -2215,6 +2228,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
2215 } 2228 }
2216 return 0; 2229 return 0;
2217} 2230}
2231EXPORT_SYMBOL(block_read_full_page);
2218 2232
2219/* utility function for filesystems that need to do work on expanding 2233/* utility function for filesystems that need to do work on expanding
2220 * truncates. Uses filesystem pagecache writes to allow the filesystem to 2234 * truncates. Uses filesystem pagecache writes to allow the filesystem to
@@ -2225,16 +2239,10 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
2225 struct address_space *mapping = inode->i_mapping; 2239 struct address_space *mapping = inode->i_mapping;
2226 struct page *page; 2240 struct page *page;
2227 void *fsdata; 2241 void *fsdata;
2228 unsigned long limit;
2229 int err; 2242 int err;
2230 2243
2231 err = -EFBIG; 2244 err = inode_newsize_ok(inode, size);
2232 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; 2245 if (err)
2233 if (limit != RLIM_INFINITY && size > (loff_t)limit) {
2234 send_sig(SIGXFSZ, current, 0);
2235 goto out;
2236 }
2237 if (size > inode->i_sb->s_maxbytes)
2238 goto out; 2246 goto out;
2239 2247
2240 err = pagecache_write_begin(NULL, mapping, size, 0, 2248 err = pagecache_write_begin(NULL, mapping, size, 0,
@@ -2249,6 +2257,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
2249out: 2257out:
2250 return err; 2258 return err;
2251} 2259}
2260EXPORT_SYMBOL(generic_cont_expand_simple);
2252 2261
2253static int cont_expand_zero(struct file *file, struct address_space *mapping, 2262static int cont_expand_zero(struct file *file, struct address_space *mapping,
2254 loff_t pos, loff_t *bytes) 2263 loff_t pos, loff_t *bytes)
@@ -2349,6 +2358,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
2349out: 2358out:
2350 return err; 2359 return err;
2351} 2360}
2361EXPORT_SYMBOL(cont_write_begin);
2352 2362
2353int block_prepare_write(struct page *page, unsigned from, unsigned to, 2363int block_prepare_write(struct page *page, unsigned from, unsigned to,
2354 get_block_t *get_block) 2364 get_block_t *get_block)
@@ -2359,6 +2369,7 @@ int block_prepare_write(struct page *page, unsigned from, unsigned to,
2359 ClearPageUptodate(page); 2369 ClearPageUptodate(page);
2360 return err; 2370 return err;
2361} 2371}
2372EXPORT_SYMBOL(block_prepare_write);
2362 2373
2363int block_commit_write(struct page *page, unsigned from, unsigned to) 2374int block_commit_write(struct page *page, unsigned from, unsigned to)
2364{ 2375{
@@ -2366,6 +2377,7 @@ int block_commit_write(struct page *page, unsigned from, unsigned to)
2366 __block_commit_write(inode,page,from,to); 2377 __block_commit_write(inode,page,from,to);
2367 return 0; 2378 return 0;
2368} 2379}
2380EXPORT_SYMBOL(block_commit_write);
2369 2381
2370/* 2382/*
2371 * block_page_mkwrite() is not allowed to change the file size as it gets 2383 * block_page_mkwrite() is not allowed to change the file size as it gets
@@ -2423,6 +2435,7 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2423out: 2435out:
2424 return ret; 2436 return ret;
2425} 2437}
2438EXPORT_SYMBOL(block_page_mkwrite);
2426 2439
2427/* 2440/*
2428 * nobh_write_begin()'s prereads are special: the buffer_heads are freed 2441 * nobh_write_begin()'s prereads are special: the buffer_heads are freed
@@ -2846,6 +2859,7 @@ unlock:
2846out: 2859out:
2847 return err; 2860 return err;
2848} 2861}
2862EXPORT_SYMBOL(block_truncate_page);
2849 2863
2850/* 2864/*
2851 * The generic ->writepage function for buffer-backed address_spaces 2865 * The generic ->writepage function for buffer-backed address_spaces
@@ -2887,6 +2901,7 @@ int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2887 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 2901 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2888 return __block_write_full_page(inode, page, get_block, wbc, handler); 2902 return __block_write_full_page(inode, page, get_block, wbc, handler);
2889} 2903}
2904EXPORT_SYMBOL(block_write_full_page_endio);
2890 2905
2891/* 2906/*
2892 * The generic ->writepage function for buffer-backed address_spaces 2907 * The generic ->writepage function for buffer-backed address_spaces
@@ -2897,7 +2912,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
2897 return block_write_full_page_endio(page, get_block, wbc, 2912 return block_write_full_page_endio(page, get_block, wbc,
2898 end_buffer_async_write); 2913 end_buffer_async_write);
2899} 2914}
2900 2915EXPORT_SYMBOL(block_write_full_page);
2901 2916
2902sector_t generic_block_bmap(struct address_space *mapping, sector_t block, 2917sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2903 get_block_t *get_block) 2918 get_block_t *get_block)
@@ -2910,6 +2925,7 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2910 get_block(inode, block, &tmp, 0); 2925 get_block(inode, block, &tmp, 0);
2911 return tmp.b_blocknr; 2926 return tmp.b_blocknr;
2912} 2927}
2928EXPORT_SYMBOL(generic_block_bmap);
2913 2929
2914static void end_bio_bh_io_sync(struct bio *bio, int err) 2930static void end_bio_bh_io_sync(struct bio *bio, int err)
2915{ 2931{
@@ -2979,6 +2995,7 @@ int submit_bh(int rw, struct buffer_head * bh)
2979 bio_put(bio); 2995 bio_put(bio);
2980 return ret; 2996 return ret;
2981} 2997}
2998EXPORT_SYMBOL(submit_bh);
2982 2999
2983/** 3000/**
2984 * ll_rw_block: low-level access to block devices (DEPRECATED) 3001 * ll_rw_block: low-level access to block devices (DEPRECATED)
@@ -3040,6 +3057,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
3040 unlock_buffer(bh); 3057 unlock_buffer(bh);
3041 } 3058 }
3042} 3059}
3060EXPORT_SYMBOL(ll_rw_block);
3043 3061
3044/* 3062/*
3045 * For a data-integrity writeout, we need to wait upon any in-progress I/O 3063 * For a data-integrity writeout, we need to wait upon any in-progress I/O
@@ -3068,6 +3086,7 @@ int sync_dirty_buffer(struct buffer_head *bh)
3068 } 3086 }
3069 return ret; 3087 return ret;
3070} 3088}
3089EXPORT_SYMBOL(sync_dirty_buffer);
3071 3090
3072/* 3091/*
3073 * try_to_free_buffers() checks if all the buffers on this particular page 3092 * try_to_free_buffers() checks if all the buffers on this particular page
@@ -3182,13 +3201,14 @@ void block_sync_page(struct page *page)
3182 if (mapping) 3201 if (mapping)
3183 blk_run_backing_dev(mapping->backing_dev_info, page); 3202 blk_run_backing_dev(mapping->backing_dev_info, page);
3184} 3203}
3204EXPORT_SYMBOL(block_sync_page);
3185 3205
3186/* 3206/*
3187 * There are no bdflush tunables left. But distributions are 3207 * There are no bdflush tunables left. But distributions are
3188 * still running obsolete flush daemons, so we terminate them here. 3208 * still running obsolete flush daemons, so we terminate them here.
3189 * 3209 *
3190 * Use of bdflush() is deprecated and will be removed in a future kernel. 3210 * Use of bdflush() is deprecated and will be removed in a future kernel.
3191 * The `pdflush' kernel threads fully replace bdflush daemons and this call. 3211 * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3192 */ 3212 */
3193SYSCALL_DEFINE2(bdflush, int, func, long, data) 3213SYSCALL_DEFINE2(bdflush, int, func, long, data)
3194{ 3214{
@@ -3358,29 +3378,3 @@ void __init buffer_init(void)
3358 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head)); 3378 max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3359 hotcpu_notifier(buffer_cpu_notify, 0); 3379 hotcpu_notifier(buffer_cpu_notify, 0);
3360} 3380}
3361
3362EXPORT_SYMBOL(__bforget);
3363EXPORT_SYMBOL(__brelse);
3364EXPORT_SYMBOL(__wait_on_buffer);
3365EXPORT_SYMBOL(block_commit_write);
3366EXPORT_SYMBOL(block_prepare_write);
3367EXPORT_SYMBOL(block_page_mkwrite);
3368EXPORT_SYMBOL(block_read_full_page);
3369EXPORT_SYMBOL(block_sync_page);
3370EXPORT_SYMBOL(block_truncate_page);
3371EXPORT_SYMBOL(block_write_full_page);
3372EXPORT_SYMBOL(block_write_full_page_endio);
3373EXPORT_SYMBOL(cont_write_begin);
3374EXPORT_SYMBOL(end_buffer_read_sync);
3375EXPORT_SYMBOL(end_buffer_write_sync);
3376EXPORT_SYMBOL(end_buffer_async_write);
3377EXPORT_SYMBOL(file_fsync);
3378EXPORT_SYMBOL(generic_block_bmap);
3379EXPORT_SYMBOL(generic_cont_expand_simple);
3380EXPORT_SYMBOL(init_buffer);
3381EXPORT_SYMBOL(invalidate_bdev);
3382EXPORT_SYMBOL(ll_rw_block);
3383EXPORT_SYMBOL(mark_buffer_dirty);
3384EXPORT_SYMBOL(submit_bh);
3385EXPORT_SYMBOL(sync_dirty_buffer);
3386EXPORT_SYMBOL(unlock_buffer);