aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorSasha Levin <sasha.levin@oracle.com>2013-02-27 20:06:00 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-27 22:10:24 -0500
commitb67bfe0d42cac56c512dd5da4b1b347a23f4b70a (patch)
tree3d465aea12b97683f26ffa38eba8744469de9997 /mm
parent1e142b29e210b5dfb2deeb6ce2210b60af16d2a6 (diff)
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c3
-rw-r--r--mm/kmemleak.c9
-rw-r--r--mm/ksm.c15
-rw-r--r--mm/mmu_notifier.c18
4 files changed, 16 insertions, 29 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index bfa142e67b1c..e2f7f5aaaafb 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1906,9 +1906,8 @@ static inline void free_mm_slot(struct mm_slot *mm_slot)
1906static struct mm_slot *get_mm_slot(struct mm_struct *mm) 1906static struct mm_slot *get_mm_slot(struct mm_struct *mm)
1907{ 1907{
1908 struct mm_slot *mm_slot; 1908 struct mm_slot *mm_slot;
1909 struct hlist_node *node;
1910 1909
1911 hash_for_each_possible(mm_slots_hash, mm_slot, node, hash, (unsigned long)mm) 1910 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
1912 if (mm == mm_slot->mm) 1911 if (mm == mm_slot->mm)
1913 return mm_slot; 1912 return mm_slot;
1914 1913
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 83dd5fbf5e60..c8d7f3110fd0 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -436,7 +436,7 @@ static int get_object(struct kmemleak_object *object)
436 */ 436 */
437static void free_object_rcu(struct rcu_head *rcu) 437static void free_object_rcu(struct rcu_head *rcu)
438{ 438{
439 struct hlist_node *elem, *tmp; 439 struct hlist_node *tmp;
440 struct kmemleak_scan_area *area; 440 struct kmemleak_scan_area *area;
441 struct kmemleak_object *object = 441 struct kmemleak_object *object =
442 container_of(rcu, struct kmemleak_object, rcu); 442 container_of(rcu, struct kmemleak_object, rcu);
@@ -445,8 +445,8 @@ static void free_object_rcu(struct rcu_head *rcu)
445 * Once use_count is 0 (guaranteed by put_object), there is no other 445 * Once use_count is 0 (guaranteed by put_object), there is no other
446 * code accessing this object, hence no need for locking. 446 * code accessing this object, hence no need for locking.
447 */ 447 */
448 hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) { 448 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
449 hlist_del(elem); 449 hlist_del(&area->node);
450 kmem_cache_free(scan_area_cache, area); 450 kmem_cache_free(scan_area_cache, area);
451 } 451 }
452 kmem_cache_free(object_cache, object); 452 kmem_cache_free(object_cache, object);
@@ -1177,7 +1177,6 @@ static void scan_block(void *_start, void *_end,
1177static void scan_object(struct kmemleak_object *object) 1177static void scan_object(struct kmemleak_object *object)
1178{ 1178{
1179 struct kmemleak_scan_area *area; 1179 struct kmemleak_scan_area *area;
1180 struct hlist_node *elem;
1181 unsigned long flags; 1180 unsigned long flags;
1182 1181
1183 /* 1182 /*
@@ -1205,7 +1204,7 @@ static void scan_object(struct kmemleak_object *object)
1205 spin_lock_irqsave(&object->lock, flags); 1204 spin_lock_irqsave(&object->lock, flags);
1206 } 1205 }
1207 } else 1206 } else
1208 hlist_for_each_entry(area, elem, &object->area_list, node) 1207 hlist_for_each_entry(area, &object->area_list, node)
1209 scan_block((void *)area->start, 1208 scan_block((void *)area->start,
1210 (void *)(area->start + area->size), 1209 (void *)(area->start + area->size),
1211 object, 0); 1210 object, 0);
diff --git a/mm/ksm.c b/mm/ksm.c
index ab2ba9ad3c59..85bfd4c16346 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -320,10 +320,9 @@ static inline void free_mm_slot(struct mm_slot *mm_slot)
320 320
321static struct mm_slot *get_mm_slot(struct mm_struct *mm) 321static struct mm_slot *get_mm_slot(struct mm_struct *mm)
322{ 322{
323 struct hlist_node *node;
324 struct mm_slot *slot; 323 struct mm_slot *slot;
325 324
326 hash_for_each_possible(mm_slots_hash, slot, node, link, (unsigned long)mm) 325 hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm)
327 if (slot->mm == mm) 326 if (slot->mm == mm)
328 return slot; 327 return slot;
329 328
@@ -496,9 +495,8 @@ static inline int get_kpfn_nid(unsigned long kpfn)
496static void remove_node_from_stable_tree(struct stable_node *stable_node) 495static void remove_node_from_stable_tree(struct stable_node *stable_node)
497{ 496{
498 struct rmap_item *rmap_item; 497 struct rmap_item *rmap_item;
499 struct hlist_node *hlist;
500 498
501 hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { 499 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
502 if (rmap_item->hlist.next) 500 if (rmap_item->hlist.next)
503 ksm_pages_sharing--; 501 ksm_pages_sharing--;
504 else 502 else
@@ -1898,7 +1896,6 @@ int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
1898{ 1896{
1899 struct stable_node *stable_node; 1897 struct stable_node *stable_node;
1900 struct rmap_item *rmap_item; 1898 struct rmap_item *rmap_item;
1901 struct hlist_node *hlist;
1902 unsigned int mapcount = page_mapcount(page); 1899 unsigned int mapcount = page_mapcount(page);
1903 int referenced = 0; 1900 int referenced = 0;
1904 int search_new_forks = 0; 1901 int search_new_forks = 0;
@@ -1910,7 +1907,7 @@ int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
1910 if (!stable_node) 1907 if (!stable_node)
1911 return 0; 1908 return 0;
1912again: 1909again:
1913 hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { 1910 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
1914 struct anon_vma *anon_vma = rmap_item->anon_vma; 1911 struct anon_vma *anon_vma = rmap_item->anon_vma;
1915 struct anon_vma_chain *vmac; 1912 struct anon_vma_chain *vmac;
1916 struct vm_area_struct *vma; 1913 struct vm_area_struct *vma;
@@ -1952,7 +1949,6 @@ out:
1952int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) 1949int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
1953{ 1950{
1954 struct stable_node *stable_node; 1951 struct stable_node *stable_node;
1955 struct hlist_node *hlist;
1956 struct rmap_item *rmap_item; 1952 struct rmap_item *rmap_item;
1957 int ret = SWAP_AGAIN; 1953 int ret = SWAP_AGAIN;
1958 int search_new_forks = 0; 1954 int search_new_forks = 0;
@@ -1964,7 +1960,7 @@ int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
1964 if (!stable_node) 1960 if (!stable_node)
1965 return SWAP_FAIL; 1961 return SWAP_FAIL;
1966again: 1962again:
1967 hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { 1963 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
1968 struct anon_vma *anon_vma = rmap_item->anon_vma; 1964 struct anon_vma *anon_vma = rmap_item->anon_vma;
1969 struct anon_vma_chain *vmac; 1965 struct anon_vma_chain *vmac;
1970 struct vm_area_struct *vma; 1966 struct vm_area_struct *vma;
@@ -2005,7 +2001,6 @@ int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
2005 struct vm_area_struct *, unsigned long, void *), void *arg) 2001 struct vm_area_struct *, unsigned long, void *), void *arg)
2006{ 2002{
2007 struct stable_node *stable_node; 2003 struct stable_node *stable_node;
2008 struct hlist_node *hlist;
2009 struct rmap_item *rmap_item; 2004 struct rmap_item *rmap_item;
2010 int ret = SWAP_AGAIN; 2005 int ret = SWAP_AGAIN;
2011 int search_new_forks = 0; 2006 int search_new_forks = 0;
@@ -2017,7 +2012,7 @@ int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
2017 if (!stable_node) 2012 if (!stable_node)
2018 return ret; 2013 return ret;
2019again: 2014again:
2020 hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { 2015 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
2021 struct anon_vma *anon_vma = rmap_item->anon_vma; 2016 struct anon_vma *anon_vma = rmap_item->anon_vma;
2022 struct anon_vma_chain *vmac; 2017 struct anon_vma_chain *vmac;
2023 struct vm_area_struct *vma; 2018 struct vm_area_struct *vma;
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 2175fb0d501c..be04122fb277 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -95,11 +95,10 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
95 unsigned long address) 95 unsigned long address)
96{ 96{
97 struct mmu_notifier *mn; 97 struct mmu_notifier *mn;
98 struct hlist_node *n;
99 int young = 0, id; 98 int young = 0, id;
100 99
101 id = srcu_read_lock(&srcu); 100 id = srcu_read_lock(&srcu);
102 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { 101 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
103 if (mn->ops->clear_flush_young) 102 if (mn->ops->clear_flush_young)
104 young |= mn->ops->clear_flush_young(mn, mm, address); 103 young |= mn->ops->clear_flush_young(mn, mm, address);
105 } 104 }
@@ -112,11 +111,10 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
112 unsigned long address) 111 unsigned long address)
113{ 112{
114 struct mmu_notifier *mn; 113 struct mmu_notifier *mn;
115 struct hlist_node *n;
116 int young = 0, id; 114 int young = 0, id;
117 115
118 id = srcu_read_lock(&srcu); 116 id = srcu_read_lock(&srcu);
119 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { 117 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
120 if (mn->ops->test_young) { 118 if (mn->ops->test_young) {
121 young = mn->ops->test_young(mn, mm, address); 119 young = mn->ops->test_young(mn, mm, address);
122 if (young) 120 if (young)
@@ -132,11 +130,10 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
132 pte_t pte) 130 pte_t pte)
133{ 131{
134 struct mmu_notifier *mn; 132 struct mmu_notifier *mn;
135 struct hlist_node *n;
136 int id; 133 int id;
137 134
138 id = srcu_read_lock(&srcu); 135 id = srcu_read_lock(&srcu);
139 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { 136 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
140 if (mn->ops->change_pte) 137 if (mn->ops->change_pte)
141 mn->ops->change_pte(mn, mm, address, pte); 138 mn->ops->change_pte(mn, mm, address, pte);
142 } 139 }
@@ -147,11 +144,10 @@ void __mmu_notifier_invalidate_page(struct mm_struct *mm,
147 unsigned long address) 144 unsigned long address)
148{ 145{
149 struct mmu_notifier *mn; 146 struct mmu_notifier *mn;
150 struct hlist_node *n;
151 int id; 147 int id;
152 148
153 id = srcu_read_lock(&srcu); 149 id = srcu_read_lock(&srcu);
154 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { 150 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
155 if (mn->ops->invalidate_page) 151 if (mn->ops->invalidate_page)
156 mn->ops->invalidate_page(mn, mm, address); 152 mn->ops->invalidate_page(mn, mm, address);
157 } 153 }
@@ -162,11 +158,10 @@ void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
162 unsigned long start, unsigned long end) 158 unsigned long start, unsigned long end)
163{ 159{
164 struct mmu_notifier *mn; 160 struct mmu_notifier *mn;
165 struct hlist_node *n;
166 int id; 161 int id;
167 162
168 id = srcu_read_lock(&srcu); 163 id = srcu_read_lock(&srcu);
169 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { 164 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
170 if (mn->ops->invalidate_range_start) 165 if (mn->ops->invalidate_range_start)
171 mn->ops->invalidate_range_start(mn, mm, start, end); 166 mn->ops->invalidate_range_start(mn, mm, start, end);
172 } 167 }
@@ -178,11 +173,10 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
178 unsigned long start, unsigned long end) 173 unsigned long start, unsigned long end)
179{ 174{
180 struct mmu_notifier *mn; 175 struct mmu_notifier *mn;
181 struct hlist_node *n;
182 int id; 176 int id;
183 177
184 id = srcu_read_lock(&srcu); 178 id = srcu_read_lock(&srcu);
185 hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { 179 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
186 if (mn->ops->invalidate_range_end) 180 if (mn->ops->invalidate_range_end)
187 mn->ops->invalidate_range_end(mn, mm, start, end); 181 mn->ops->invalidate_range_end(mn, mm, start, end);
188 } 182 }