aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-05-22 17:13:31 -0400
committerJens Axboe <axboe@fb.com>2015-06-02 10:33:34 -0400
commit4610007142823307d930ac890d822633a05ce08c (patch)
tree8352cf6e74dfea553680cd98f9039db5d1ebfc18
parentf0054bb1e1f3be03cc33369df640db97f10f6172 (diff)
writeback: reorganize mm/backing-dev.c
Move wb_shutdown(), bdi_register(), bdi_register_dev(), bdi_prune_sb(), bdi_remove_from_list() and bdi_unregister() so that init / exit functions are grouped together. This will make updating init / exit paths for cgroup writeback support easier. This is pure source file reorganization. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Jan Kara <jack@suse.cz> Cc: Jens Axboe <axboe@kernel.dk> Cc: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--mm/backing-dev.c174
1 files changed, 87 insertions, 87 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 597f0cec8405..ff85ecb7d6a1 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -286,93 +286,6 @@ void wb_wakeup_delayed(struct bdi_writeback *wb)
286} 286}
287 287
288/* 288/*
289 * Remove bdi from bdi_list, and ensure that it is no longer visible
290 */
291static void bdi_remove_from_list(struct backing_dev_info *bdi)
292{
293 spin_lock_bh(&bdi_lock);
294 list_del_rcu(&bdi->bdi_list);
295 spin_unlock_bh(&bdi_lock);
296
297 synchronize_rcu_expedited();
298}
299
300int bdi_register(struct backing_dev_info *bdi, struct device *parent,
301 const char *fmt, ...)
302{
303 va_list args;
304 struct device *dev;
305
306 if (bdi->dev) /* The driver needs to use separate queues per device */
307 return 0;
308
309 va_start(args, fmt);
310 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
311 va_end(args);
312 if (IS_ERR(dev))
313 return PTR_ERR(dev);
314
315 bdi->dev = dev;
316
317 bdi_debug_register(bdi, dev_name(dev));
318 set_bit(WB_registered, &bdi->wb.state);
319
320 spin_lock_bh(&bdi_lock);
321 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
322 spin_unlock_bh(&bdi_lock);
323
324 trace_writeback_bdi_register(bdi);
325 return 0;
326}
327EXPORT_SYMBOL(bdi_register);
328
329int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
330{
331 return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
332}
333EXPORT_SYMBOL(bdi_register_dev);
334
335/*
336 * Remove bdi from the global list and shutdown any threads we have running
337 */
338static void wb_shutdown(struct bdi_writeback *wb)
339{
340 /* Make sure nobody queues further work */
341 spin_lock_bh(&wb->work_lock);
342 if (!test_and_clear_bit(WB_registered, &wb->state)) {
343 spin_unlock_bh(&wb->work_lock);
344 return;
345 }
346 spin_unlock_bh(&wb->work_lock);
347
348 /*
349 * Drain work list and shutdown the delayed_work. !WB_registered
350 * tells wb_workfn() that @wb is dying and its work_list needs to
351 * be drained no matter what.
352 */
353 mod_delayed_work(bdi_wq, &wb->dwork, 0);
354 flush_delayed_work(&wb->dwork);
355 WARN_ON(!list_empty(&wb->work_list));
356}
357
358/*
359 * Called when the device behind @bdi has been removed or ejected.
360 *
361 * We can't really do much here except for reducing the dirty ratio at
362 * the moment. In the future we should be able to set a flag so that
363 * the filesystem can handle errors at mark_inode_dirty time instead
364 * of only at writeback time.
365 */
366void bdi_unregister(struct backing_dev_info *bdi)
367{
368 if (WARN_ON_ONCE(!bdi->dev))
369 return;
370
371 bdi_set_min_ratio(bdi, 0);
372}
373EXPORT_SYMBOL(bdi_unregister);
374
375/*
376 * Initial write bandwidth: 100 MB/s 289 * Initial write bandwidth: 100 MB/s
377 */ 290 */
378#define INIT_BW (100 << (20 - PAGE_SHIFT)) 291#define INIT_BW (100 << (20 - PAGE_SHIFT))
@@ -418,6 +331,29 @@ static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
418 return 0; 331 return 0;
419} 332}
420 333
334/*
335 * Remove bdi from the global list and shutdown any threads we have running
336 */
337static void wb_shutdown(struct bdi_writeback *wb)
338{
339 /* Make sure nobody queues further work */
340 spin_lock_bh(&wb->work_lock);
341 if (!test_and_clear_bit(WB_registered, &wb->state)) {
342 spin_unlock_bh(&wb->work_lock);
343 return;
344 }
345 spin_unlock_bh(&wb->work_lock);
346
347 /*
348 * Drain work list and shutdown the delayed_work. !WB_registered
349 * tells wb_workfn() that @wb is dying and its work_list needs to
350 * be drained no matter what.
351 */
352 mod_delayed_work(bdi_wq, &wb->dwork, 0);
353 flush_delayed_work(&wb->dwork);
354 WARN_ON(!list_empty(&wb->work_list));
355}
356
421static void wb_exit(struct bdi_writeback *wb) 357static void wb_exit(struct bdi_writeback *wb)
422{ 358{
423 int i; 359 int i;
@@ -449,6 +385,70 @@ int bdi_init(struct backing_dev_info *bdi)
449} 385}
450EXPORT_SYMBOL(bdi_init); 386EXPORT_SYMBOL(bdi_init);
451 387
388int bdi_register(struct backing_dev_info *bdi, struct device *parent,
389 const char *fmt, ...)
390{
391 va_list args;
392 struct device *dev;
393
394 if (bdi->dev) /* The driver needs to use separate queues per device */
395 return 0;
396
397 va_start(args, fmt);
398 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
399 va_end(args);
400 if (IS_ERR(dev))
401 return PTR_ERR(dev);
402
403 bdi->dev = dev;
404
405 bdi_debug_register(bdi, dev_name(dev));
406 set_bit(WB_registered, &bdi->wb.state);
407
408 spin_lock_bh(&bdi_lock);
409 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
410 spin_unlock_bh(&bdi_lock);
411
412 trace_writeback_bdi_register(bdi);
413 return 0;
414}
415EXPORT_SYMBOL(bdi_register);
416
417int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
418{
419 return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
420}
421EXPORT_SYMBOL(bdi_register_dev);
422
423/*
424 * Remove bdi from bdi_list, and ensure that it is no longer visible
425 */
426static void bdi_remove_from_list(struct backing_dev_info *bdi)
427{
428 spin_lock_bh(&bdi_lock);
429 list_del_rcu(&bdi->bdi_list);
430 spin_unlock_bh(&bdi_lock);
431
432 synchronize_rcu_expedited();
433}
434
435/*
436 * Called when the device behind @bdi has been removed or ejected.
437 *
438 * We can't really do much here except for reducing the dirty ratio at
439 * the moment. In the future we should be able to set a flag so that
440 * the filesystem can handle errors at mark_inode_dirty time instead
441 * of only at writeback time.
442 */
443void bdi_unregister(struct backing_dev_info *bdi)
444{
445 if (WARN_ON_ONCE(!bdi->dev))
446 return;
447
448 bdi_set_min_ratio(bdi, 0);
449}
450EXPORT_SYMBOL(bdi_unregister);
451
452void bdi_destroy(struct backing_dev_info *bdi) 452void bdi_destroy(struct backing_dev_info *bdi)
453{ 453{
454 /* make sure nobody finds us on the bdi_list anymore */ 454 /* make sure nobody finds us on the bdi_list anymore */