diff options
author | Christoph Hellwig <hch@lst.de> | 2019-08-28 10:19:54 -0400 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2019-09-07 03:28:04 -0400 |
commit | 7b86ac3371b70c3fd8fd95501719beb1faab719f (patch) | |
tree | b7f61e4615d249563f09567a22ee399634c898dd /mm/memcontrol.c | |
parent | a520110e4a15ceb385304d9cab22bb51438f6080 (diff) |
pagewalk: separate function pointers from iterator data
The mm_walk structure currently mixed data and code. Split out the
operations vectors into a new mm_walk_ops structure, and while we are
changing the API also declare the mm_walk structure inside the
walk_page_range and walk_page_vma functions.
Based on patch from Linus Torvalds.
Link: https://lore.kernel.org/r/20190828141955.22210-3-hch@lst.de
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Reviewed-by: Jason Gunthorpe <jgg@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r-- | mm/memcontrol.c | 23 |
1 files changed, 11 insertions, 12 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 4c3af5d71ab1..9b2516a76be2 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -5283,17 +5283,16 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, | |||
5283 | return 0; | 5283 | return 0; |
5284 | } | 5284 | } |
5285 | 5285 | ||
5286 | static const struct mm_walk_ops precharge_walk_ops = { | ||
5287 | .pmd_entry = mem_cgroup_count_precharge_pte_range, | ||
5288 | }; | ||
5289 | |||
5286 | static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) | 5290 | static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) |
5287 | { | 5291 | { |
5288 | unsigned long precharge; | 5292 | unsigned long precharge; |
5289 | 5293 | ||
5290 | struct mm_walk mem_cgroup_count_precharge_walk = { | ||
5291 | .pmd_entry = mem_cgroup_count_precharge_pte_range, | ||
5292 | .mm = mm, | ||
5293 | }; | ||
5294 | down_read(&mm->mmap_sem); | 5294 | down_read(&mm->mmap_sem); |
5295 | walk_page_range(0, mm->highest_vm_end, | 5295 | walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL); |
5296 | &mem_cgroup_count_precharge_walk); | ||
5297 | up_read(&mm->mmap_sem); | 5296 | up_read(&mm->mmap_sem); |
5298 | 5297 | ||
5299 | precharge = mc.precharge; | 5298 | precharge = mc.precharge; |
@@ -5562,13 +5561,12 @@ put: /* get_mctgt_type() gets the page */ | |||
5562 | return ret; | 5561 | return ret; |
5563 | } | 5562 | } |
5564 | 5563 | ||
5564 | static const struct mm_walk_ops charge_walk_ops = { | ||
5565 | .pmd_entry = mem_cgroup_move_charge_pte_range, | ||
5566 | }; | ||
5567 | |||
5565 | static void mem_cgroup_move_charge(void) | 5568 | static void mem_cgroup_move_charge(void) |
5566 | { | 5569 | { |
5567 | struct mm_walk mem_cgroup_move_charge_walk = { | ||
5568 | .pmd_entry = mem_cgroup_move_charge_pte_range, | ||
5569 | .mm = mc.mm, | ||
5570 | }; | ||
5571 | |||
5572 | lru_add_drain_all(); | 5570 | lru_add_drain_all(); |
5573 | /* | 5571 | /* |
5574 | * Signal lock_page_memcg() to take the memcg's move_lock | 5572 | * Signal lock_page_memcg() to take the memcg's move_lock |
@@ -5594,7 +5592,8 @@ retry: | |||
5594 | * When we have consumed all precharges and failed in doing | 5592 | * When we have consumed all precharges and failed in doing |
5595 | * additional charge, the page walk just aborts. | 5593 | * additional charge, the page walk just aborts. |
5596 | */ | 5594 | */ |
5597 | walk_page_range(0, mc.mm->highest_vm_end, &mem_cgroup_move_charge_walk); | 5595 | walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops, |
5596 | NULL); | ||
5598 | 5597 | ||
5599 | up_read(&mc.mm->mmap_sem); | 5598 | up_read(&mc.mm->mmap_sem); |
5600 | atomic_dec(&mc.from->moving_account); | 5599 | atomic_dec(&mc.from->moving_account); |