aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/backing-dev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/backing-dev.h')
-rw-r--r--include/linux/backing-dev.h80
1 files changed, 14 insertions, 66 deletions
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 5a5d79ee256f..c85f74946a8b 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -13,18 +13,23 @@
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/blkdev.h> 14#include <linux/blkdev.h>
15#include <linux/writeback.h> 15#include <linux/writeback.h>
16#include <linux/memcontrol.h>
16#include <linux/blk-cgroup.h> 17#include <linux/blk-cgroup.h>
17#include <linux/backing-dev-defs.h> 18#include <linux/backing-dev-defs.h>
18#include <linux/slab.h> 19#include <linux/slab.h>
19 20
20int __must_check bdi_init(struct backing_dev_info *bdi); 21int __must_check bdi_init(struct backing_dev_info *bdi);
21void bdi_destroy(struct backing_dev_info *bdi); 22void bdi_exit(struct backing_dev_info *bdi);
22 23
23__printf(3, 4) 24__printf(3, 4)
24int bdi_register(struct backing_dev_info *bdi, struct device *parent, 25int bdi_register(struct backing_dev_info *bdi, struct device *parent,
25 const char *fmt, ...); 26 const char *fmt, ...);
26int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); 27int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
28void bdi_unregister(struct backing_dev_info *bdi);
29
27int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); 30int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
31void bdi_destroy(struct backing_dev_info *bdi);
32
28void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, 33void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
29 bool range_cyclic, enum wb_reason reason); 34 bool range_cyclic, enum wb_reason reason);
30void wb_start_background_writeback(struct bdi_writeback *wb); 35void wb_start_background_writeback(struct bdi_writeback *wb);
@@ -252,13 +257,19 @@ int inode_congested(struct inode *inode, int cong_bits);
252 * @inode: inode of interest 257 * @inode: inode of interest
253 * 258 *
254 * cgroup writeback requires support from both the bdi and filesystem. 259 * cgroup writeback requires support from both the bdi and filesystem.
255 * Test whether @inode has both. 260 * Also, both memcg and iocg have to be on the default hierarchy. Test
261 * whether all conditions are met.
262 *
263 * Note that the test result may change dynamically on the same inode
264 * depending on how memcg and iocg are configured.
256 */ 265 */
257static inline bool inode_cgwb_enabled(struct inode *inode) 266static inline bool inode_cgwb_enabled(struct inode *inode)
258{ 267{
259 struct backing_dev_info *bdi = inode_to_bdi(inode); 268 struct backing_dev_info *bdi = inode_to_bdi(inode);
260 269
261 return bdi_cap_account_dirty(bdi) && 270 return cgroup_on_dfl(mem_cgroup_root_css->cgroup) &&
271 cgroup_on_dfl(blkcg_root_css->cgroup) &&
272 bdi_cap_account_dirty(bdi) &&
262 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) && 273 (bdi->capabilities & BDI_CAP_CGROUP_WRITEBACK) &&
263 (inode->i_sb->s_iflags & SB_I_CGROUPWB); 274 (inode->i_sb->s_iflags & SB_I_CGROUPWB);
264} 275}
@@ -401,61 +412,6 @@ static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
401 rcu_read_unlock(); 412 rcu_read_unlock();
402} 413}
403 414
404struct wb_iter {
405 int start_memcg_id;
406 struct radix_tree_iter tree_iter;
407 void **slot;
408};
409
410static inline struct bdi_writeback *__wb_iter_next(struct wb_iter *iter,
411 struct backing_dev_info *bdi)
412{
413 struct radix_tree_iter *titer = &iter->tree_iter;
414
415 WARN_ON_ONCE(!rcu_read_lock_held());
416
417 if (iter->start_memcg_id >= 0) {
418 iter->slot = radix_tree_iter_init(titer, iter->start_memcg_id);
419 iter->start_memcg_id = -1;
420 } else {
421 iter->slot = radix_tree_next_slot(iter->slot, titer, 0);
422 }
423
424 if (!iter->slot)
425 iter->slot = radix_tree_next_chunk(&bdi->cgwb_tree, titer, 0);
426 if (iter->slot)
427 return *iter->slot;
428 return NULL;
429}
430
431static inline struct bdi_writeback *__wb_iter_init(struct wb_iter *iter,
432 struct backing_dev_info *bdi,
433 int start_memcg_id)
434{
435 iter->start_memcg_id = start_memcg_id;
436
437 if (start_memcg_id)
438 return __wb_iter_next(iter, bdi);
439 else
440 return &bdi->wb;
441}
442
443/**
444 * bdi_for_each_wb - walk all wb's of a bdi in ascending memcg ID order
445 * @wb_cur: cursor struct bdi_writeback pointer
446 * @bdi: bdi to walk wb's of
447 * @iter: pointer to struct wb_iter to be used as iteration buffer
448 * @start_memcg_id: memcg ID to start iteration from
449 *
450 * Iterate @wb_cur through the wb's (bdi_writeback's) of @bdi in ascending
451 * memcg ID order starting from @start_memcg_id. @iter is struct wb_iter
452 * to be used as temp storage during iteration. rcu_read_lock() must be
453 * held throughout iteration.
454 */
455#define bdi_for_each_wb(wb_cur, bdi, iter, start_memcg_id) \
456 for ((wb_cur) = __wb_iter_init(iter, bdi, start_memcg_id); \
457 (wb_cur); (wb_cur) = __wb_iter_next(iter, bdi))
458
459#else /* CONFIG_CGROUP_WRITEBACK */ 415#else /* CONFIG_CGROUP_WRITEBACK */
460 416
461static inline bool inode_cgwb_enabled(struct inode *inode) 417static inline bool inode_cgwb_enabled(struct inode *inode)
@@ -515,14 +471,6 @@ static inline void wb_blkcg_offline(struct blkcg *blkcg)
515{ 471{
516} 472}
517 473
518struct wb_iter {
519 int next_id;
520};
521
522#define bdi_for_each_wb(wb_cur, bdi, iter, start_blkcg_id) \
523 for ((iter)->next_id = (start_blkcg_id); \
524 ({ (wb_cur) = !(iter)->next_id++ ? &(bdi)->wb : NULL; }); )
525
526static inline int inode_congested(struct inode *inode, int cong_bits) 474static inline int inode_congested(struct inode *inode, int cong_bits)
527{ 475{
528 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); 476 return wb_congested(&inode_to_bdi(inode)->wb, cong_bits);