aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSasha Levin <sasha.levin@oracle.com>2013-02-27 20:06:00 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-27 22:10:24 -0500
commitb67bfe0d42cac56c512dd5da4b1b347a23f4b70a (patch)
tree3d465aea12b97683f26ffa38eba8744469de9997 /arch
parent1e142b29e210b5dfb2deeb6ce2210b60af16d2a6 (diff)
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/kernel/kprobes.c6
-rw-r--r--arch/ia64/kernel/kprobes.c8
-rw-r--r--arch/mips/kernel/kprobes.c6
-rw-r--r--arch/powerpc/kernel/kprobes.c6
-rw-r--r--arch/powerpc/kvm/book3s_mmu_hpte.c18
-rw-r--r--arch/s390/kernel/kprobes.c8
-rw-r--r--arch/s390/pci/pci_msi.c3
-rw-r--r--arch/sh/kernel/kprobes.c6
-rw-r--r--arch/sparc/kernel/kprobes.c6
-rw-r--r--arch/sparc/kernel/ldc.c3
-rw-r--r--arch/x86/kernel/kprobes/core.c8
-rw-r--r--arch/x86/kvm/mmu.c26
12 files changed, 45 insertions, 59 deletions
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 4dd41fc9e235..170e9f34003f 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -395,7 +395,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
395{ 395{
396 struct kretprobe_instance *ri = NULL; 396 struct kretprobe_instance *ri = NULL;
397 struct hlist_head *head, empty_rp; 397 struct hlist_head *head, empty_rp;
398 struct hlist_node *node, *tmp; 398 struct hlist_node *tmp;
399 unsigned long flags, orig_ret_address = 0; 399 unsigned long flags, orig_ret_address = 0;
400 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 400 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
401 401
@@ -415,7 +415,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
415 * real return address, and all the rest will point to 415 * real return address, and all the rest will point to
416 * kretprobe_trampoline 416 * kretprobe_trampoline
417 */ 417 */
418 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 418 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
419 if (ri->task != current) 419 if (ri->task != current)
420 /* another task is sharing our hash bucket */ 420 /* another task is sharing our hash bucket */
421 continue; 421 continue;
@@ -442,7 +442,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
442 kretprobe_assert(ri, orig_ret_address, trampoline_address); 442 kretprobe_assert(ri, orig_ret_address, trampoline_address);
443 kretprobe_hash_unlock(current, &flags); 443 kretprobe_hash_unlock(current, &flags);
444 444
445 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 445 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
446 hlist_del(&ri->hlist); 446 hlist_del(&ri->hlist);
447 kfree(ri); 447 kfree(ri);
448 } 448 }
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 7026b29e277a..f8280a766a78 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -423,7 +423,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
423{ 423{
424 struct kretprobe_instance *ri = NULL; 424 struct kretprobe_instance *ri = NULL;
425 struct hlist_head *head, empty_rp; 425 struct hlist_head *head, empty_rp;
426 struct hlist_node *node, *tmp; 426 struct hlist_node *tmp;
427 unsigned long flags, orig_ret_address = 0; 427 unsigned long flags, orig_ret_address = 0;
428 unsigned long trampoline_address = 428 unsigned long trampoline_address =
429 ((struct fnptr *)kretprobe_trampoline)->ip; 429 ((struct fnptr *)kretprobe_trampoline)->ip;
@@ -444,7 +444,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
444 * real return address, and all the rest will point to 444 * real return address, and all the rest will point to
445 * kretprobe_trampoline 445 * kretprobe_trampoline
446 */ 446 */
447 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 447 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
448 if (ri->task != current) 448 if (ri->task != current)
449 /* another task is sharing our hash bucket */ 449 /* another task is sharing our hash bucket */
450 continue; 450 continue;
@@ -461,7 +461,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
461 461
462 regs->cr_iip = orig_ret_address; 462 regs->cr_iip = orig_ret_address;
463 463
464 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 464 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
465 if (ri->task != current) 465 if (ri->task != current)
466 /* another task is sharing our hash bucket */ 466 /* another task is sharing our hash bucket */
467 continue; 467 continue;
@@ -487,7 +487,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
487 kretprobe_hash_unlock(current, &flags); 487 kretprobe_hash_unlock(current, &flags);
488 preempt_enable_no_resched(); 488 preempt_enable_no_resched();
489 489
490 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 490 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
491 hlist_del(&ri->hlist); 491 hlist_del(&ri->hlist);
492 kfree(ri); 492 kfree(ri);
493 } 493 }
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index 158467da9bc1..ce3f0807ad1e 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -598,7 +598,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
598{ 598{
599 struct kretprobe_instance *ri = NULL; 599 struct kretprobe_instance *ri = NULL;
600 struct hlist_head *head, empty_rp; 600 struct hlist_head *head, empty_rp;
601 struct hlist_node *node, *tmp; 601 struct hlist_node *tmp;
602 unsigned long flags, orig_ret_address = 0; 602 unsigned long flags, orig_ret_address = 0;
603 unsigned long trampoline_address = (unsigned long)kretprobe_trampoline; 603 unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
604 604
@@ -618,7 +618,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
618 * real return address, and all the rest will point to 618 * real return address, and all the rest will point to
619 * kretprobe_trampoline 619 * kretprobe_trampoline
620 */ 620 */
621 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 621 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
622 if (ri->task != current) 622 if (ri->task != current)
623 /* another task is sharing our hash bucket */ 623 /* another task is sharing our hash bucket */
624 continue; 624 continue;
@@ -645,7 +645,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
645 kretprobe_hash_unlock(current, &flags); 645 kretprobe_hash_unlock(current, &flags);
646 preempt_enable_no_resched(); 646 preempt_enable_no_resched();
647 647
648 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 648 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
649 hlist_del(&ri->hlist); 649 hlist_del(&ri->hlist);
650 kfree(ri); 650 kfree(ri);
651 } 651 }
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index e88c64331819..11f5b03a0b06 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -310,7 +310,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
310{ 310{
311 struct kretprobe_instance *ri = NULL; 311 struct kretprobe_instance *ri = NULL;
312 struct hlist_head *head, empty_rp; 312 struct hlist_head *head, empty_rp;
313 struct hlist_node *node, *tmp; 313 struct hlist_node *tmp;
314 unsigned long flags, orig_ret_address = 0; 314 unsigned long flags, orig_ret_address = 0;
315 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 315 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
316 316
@@ -330,7 +330,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
330 * real return address, and all the rest will point to 330 * real return address, and all the rest will point to
331 * kretprobe_trampoline 331 * kretprobe_trampoline
332 */ 332 */
333 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 333 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
334 if (ri->task != current) 334 if (ri->task != current)
335 /* another task is sharing our hash bucket */ 335 /* another task is sharing our hash bucket */
336 continue; 336 continue;
@@ -357,7 +357,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
357 kretprobe_hash_unlock(current, &flags); 357 kretprobe_hash_unlock(current, &flags);
358 preempt_enable_no_resched(); 358 preempt_enable_no_resched();
359 359
360 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 360 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
361 hlist_del(&ri->hlist); 361 hlist_del(&ri->hlist);
362 kfree(ri); 362 kfree(ri);
363 } 363 }
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c
index 2c86b0d63714..da8b13c4b776 100644
--- a/arch/powerpc/kvm/book3s_mmu_hpte.c
+++ b/arch/powerpc/kvm/book3s_mmu_hpte.c
@@ -124,7 +124,6 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
124{ 124{
125 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 125 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
126 struct hpte_cache *pte; 126 struct hpte_cache *pte;
127 struct hlist_node *node;
128 int i; 127 int i;
129 128
130 rcu_read_lock(); 129 rcu_read_lock();
@@ -132,7 +131,7 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
132 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { 131 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
133 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i]; 132 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
134 133
135 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) 134 hlist_for_each_entry_rcu(pte, list, list_vpte_long)
136 invalidate_pte(vcpu, pte); 135 invalidate_pte(vcpu, pte);
137 } 136 }
138 137
@@ -143,7 +142,6 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
143{ 142{
144 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 143 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
145 struct hlist_head *list; 144 struct hlist_head *list;
146 struct hlist_node *node;
147 struct hpte_cache *pte; 145 struct hpte_cache *pte;
148 146
149 /* Find the list of entries in the map */ 147 /* Find the list of entries in the map */
@@ -152,7 +150,7 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
152 rcu_read_lock(); 150 rcu_read_lock();
153 151
154 /* Check the list for matching entries and invalidate */ 152 /* Check the list for matching entries and invalidate */
155 hlist_for_each_entry_rcu(pte, node, list, list_pte) 153 hlist_for_each_entry_rcu(pte, list, list_pte)
156 if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) 154 if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
157 invalidate_pte(vcpu, pte); 155 invalidate_pte(vcpu, pte);
158 156
@@ -163,7 +161,6 @@ static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
163{ 161{
164 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 162 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
165 struct hlist_head *list; 163 struct hlist_head *list;
166 struct hlist_node *node;
167 struct hpte_cache *pte; 164 struct hpte_cache *pte;
168 165
169 /* Find the list of entries in the map */ 166 /* Find the list of entries in the map */
@@ -173,7 +170,7 @@ static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
173 rcu_read_lock(); 170 rcu_read_lock();
174 171
175 /* Check the list for matching entries and invalidate */ 172 /* Check the list for matching entries and invalidate */
176 hlist_for_each_entry_rcu(pte, node, list, list_pte_long) 173 hlist_for_each_entry_rcu(pte, list, list_pte_long)
177 if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea) 174 if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
178 invalidate_pte(vcpu, pte); 175 invalidate_pte(vcpu, pte);
179 176
@@ -207,7 +204,6 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
207{ 204{
208 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 205 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
209 struct hlist_head *list; 206 struct hlist_head *list;
210 struct hlist_node *node;
211 struct hpte_cache *pte; 207 struct hpte_cache *pte;
212 u64 vp_mask = 0xfffffffffULL; 208 u64 vp_mask = 0xfffffffffULL;
213 209
@@ -216,7 +212,7 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
216 rcu_read_lock(); 212 rcu_read_lock();
217 213
218 /* Check the list for matching entries and invalidate */ 214 /* Check the list for matching entries and invalidate */
219 hlist_for_each_entry_rcu(pte, node, list, list_vpte) 215 hlist_for_each_entry_rcu(pte, list, list_vpte)
220 if ((pte->pte.vpage & vp_mask) == guest_vp) 216 if ((pte->pte.vpage & vp_mask) == guest_vp)
221 invalidate_pte(vcpu, pte); 217 invalidate_pte(vcpu, pte);
222 218
@@ -228,7 +224,6 @@ static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
228{ 224{
229 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 225 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
230 struct hlist_head *list; 226 struct hlist_head *list;
231 struct hlist_node *node;
232 struct hpte_cache *pte; 227 struct hpte_cache *pte;
233 u64 vp_mask = 0xffffff000ULL; 228 u64 vp_mask = 0xffffff000ULL;
234 229
@@ -238,7 +233,7 @@ static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
238 rcu_read_lock(); 233 rcu_read_lock();
239 234
240 /* Check the list for matching entries and invalidate */ 235 /* Check the list for matching entries and invalidate */
241 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) 236 hlist_for_each_entry_rcu(pte, list, list_vpte_long)
242 if ((pte->pte.vpage & vp_mask) == guest_vp) 237 if ((pte->pte.vpage & vp_mask) == guest_vp)
243 invalidate_pte(vcpu, pte); 238 invalidate_pte(vcpu, pte);
244 239
@@ -266,7 +261,6 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
266void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) 261void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
267{ 262{
268 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 263 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
269 struct hlist_node *node;
270 struct hpte_cache *pte; 264 struct hpte_cache *pte;
271 int i; 265 int i;
272 266
@@ -277,7 +271,7 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
277 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { 271 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
278 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i]; 272 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
279 273
280 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) 274 hlist_for_each_entry_rcu(pte, list, list_vpte_long)
281 if ((pte->pte.raddr >= pa_start) && 275 if ((pte->pte.raddr >= pa_start) &&
282 (pte->pte.raddr < pa_end)) 276 (pte->pte.raddr < pa_end))
283 invalidate_pte(vcpu, pte); 277 invalidate_pte(vcpu, pte);
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index d1c7214e157c..3388b2b2a07d 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -354,7 +354,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
354{ 354{
355 struct kretprobe_instance *ri; 355 struct kretprobe_instance *ri;
356 struct hlist_head *head, empty_rp; 356 struct hlist_head *head, empty_rp;
357 struct hlist_node *node, *tmp; 357 struct hlist_node *tmp;
358 unsigned long flags, orig_ret_address; 358 unsigned long flags, orig_ret_address;
359 unsigned long trampoline_address; 359 unsigned long trampoline_address;
360 kprobe_opcode_t *correct_ret_addr; 360 kprobe_opcode_t *correct_ret_addr;
@@ -379,7 +379,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
379 orig_ret_address = 0; 379 orig_ret_address = 0;
380 correct_ret_addr = NULL; 380 correct_ret_addr = NULL;
381 trampoline_address = (unsigned long) &kretprobe_trampoline; 381 trampoline_address = (unsigned long) &kretprobe_trampoline;
382 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 382 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
383 if (ri->task != current) 383 if (ri->task != current)
384 /* another task is sharing our hash bucket */ 384 /* another task is sharing our hash bucket */
385 continue; 385 continue;
@@ -398,7 +398,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
398 kretprobe_assert(ri, orig_ret_address, trampoline_address); 398 kretprobe_assert(ri, orig_ret_address, trampoline_address);
399 399
400 correct_ret_addr = ri->ret_addr; 400 correct_ret_addr = ri->ret_addr;
401 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 401 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
402 if (ri->task != current) 402 if (ri->task != current)
403 /* another task is sharing our hash bucket */ 403 /* another task is sharing our hash bucket */
404 continue; 404 continue;
@@ -427,7 +427,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
427 kretprobe_hash_unlock(current, &flags); 427 kretprobe_hash_unlock(current, &flags);
428 preempt_enable_no_resched(); 428 preempt_enable_no_resched();
429 429
430 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 430 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
431 hlist_del(&ri->hlist); 431 hlist_del(&ri->hlist);
432 kfree(ri); 432 kfree(ri);
433 } 433 }
diff --git a/arch/s390/pci/pci_msi.c b/arch/s390/pci/pci_msi.c
index 90fd3482b9e2..0297931335e1 100644
--- a/arch/s390/pci/pci_msi.c
+++ b/arch/s390/pci/pci_msi.c
@@ -25,10 +25,9 @@ static DEFINE_SPINLOCK(msi_map_lock);
25 25
26struct msi_desc *__irq_get_msi_desc(unsigned int irq) 26struct msi_desc *__irq_get_msi_desc(unsigned int irq)
27{ 27{
28 struct hlist_node *entry;
29 struct msi_map *map; 28 struct msi_map *map;
30 29
31 hlist_for_each_entry_rcu(map, entry, 30 hlist_for_each_entry_rcu(map,
32 &msi_hash[msi_hashfn(irq)], msi_chain) 31 &msi_hash[msi_hashfn(irq)], msi_chain)
33 if (map->irq == irq) 32 if (map->irq == irq)
34 return map->msi; 33 return map->msi;
diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
index 1208b09e95c3..42b46e61a2d5 100644
--- a/arch/sh/kernel/kprobes.c
+++ b/arch/sh/kernel/kprobes.c
@@ -310,7 +310,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
310{ 310{
311 struct kretprobe_instance *ri = NULL; 311 struct kretprobe_instance *ri = NULL;
312 struct hlist_head *head, empty_rp; 312 struct hlist_head *head, empty_rp;
313 struct hlist_node *node, *tmp; 313 struct hlist_node *tmp;
314 unsigned long flags, orig_ret_address = 0; 314 unsigned long flags, orig_ret_address = 0;
315 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 315 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
316 316
@@ -330,7 +330,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
330 * real return address, and all the rest will point to 330 * real return address, and all the rest will point to
331 * kretprobe_trampoline 331 * kretprobe_trampoline
332 */ 332 */
333 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 333 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
334 if (ri->task != current) 334 if (ri->task != current)
335 /* another task is sharing our hash bucket */ 335 /* another task is sharing our hash bucket */
336 continue; 336 continue;
@@ -360,7 +360,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
360 360
361 preempt_enable_no_resched(); 361 preempt_enable_no_resched();
362 362
363 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 363 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
364 hlist_del(&ri->hlist); 364 hlist_del(&ri->hlist);
365 kfree(ri); 365 kfree(ri);
366 } 366 }
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index a39d1ba5a119..e72212148d2a 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -511,7 +511,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
511{ 511{
512 struct kretprobe_instance *ri = NULL; 512 struct kretprobe_instance *ri = NULL;
513 struct hlist_head *head, empty_rp; 513 struct hlist_head *head, empty_rp;
514 struct hlist_node *node, *tmp; 514 struct hlist_node *tmp;
515 unsigned long flags, orig_ret_address = 0; 515 unsigned long flags, orig_ret_address = 0;
516 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 516 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
517 517
@@ -531,7 +531,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
531 * real return address, and all the rest will point to 531 * real return address, and all the rest will point to
532 * kretprobe_trampoline 532 * kretprobe_trampoline
533 */ 533 */
534 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 534 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
535 if (ri->task != current) 535 if (ri->task != current)
536 /* another task is sharing our hash bucket */ 536 /* another task is sharing our hash bucket */
537 continue; 537 continue;
@@ -559,7 +559,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
559 kretprobe_hash_unlock(current, &flags); 559 kretprobe_hash_unlock(current, &flags);
560 preempt_enable_no_resched(); 560 preempt_enable_no_resched();
561 561
562 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 562 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
563 hlist_del(&ri->hlist); 563 hlist_del(&ri->hlist);
564 kfree(ri); 564 kfree(ri);
565 } 565 }
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 9fcc6b4e93b3..54df554b82d9 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -953,9 +953,8 @@ static HLIST_HEAD(ldc_channel_list);
953static int __ldc_channel_exists(unsigned long id) 953static int __ldc_channel_exists(unsigned long id)
954{ 954{
955 struct ldc_channel *lp; 955 struct ldc_channel *lp;
956 struct hlist_node *n;
957 956
958 hlist_for_each_entry(lp, n, &ldc_channel_list, list) { 957 hlist_for_each_entry(lp, &ldc_channel_list, list) {
959 if (lp->id == id) 958 if (lp->id == id)
960 return 1; 959 return 1;
961 } 960 }
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index e124554598ee..3f06e6149981 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -652,7 +652,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
652{ 652{
653 struct kretprobe_instance *ri = NULL; 653 struct kretprobe_instance *ri = NULL;
654 struct hlist_head *head, empty_rp; 654 struct hlist_head *head, empty_rp;
655 struct hlist_node *node, *tmp; 655 struct hlist_node *tmp;
656 unsigned long flags, orig_ret_address = 0; 656 unsigned long flags, orig_ret_address = 0;
657 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 657 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
658 kprobe_opcode_t *correct_ret_addr = NULL; 658 kprobe_opcode_t *correct_ret_addr = NULL;
@@ -682,7 +682,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
682 * will be the real return address, and all the rest will 682 * will be the real return address, and all the rest will
683 * point to kretprobe_trampoline. 683 * point to kretprobe_trampoline.
684 */ 684 */
685 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 685 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
686 if (ri->task != current) 686 if (ri->task != current)
687 /* another task is sharing our hash bucket */ 687 /* another task is sharing our hash bucket */
688 continue; 688 continue;
@@ -701,7 +701,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
701 kretprobe_assert(ri, orig_ret_address, trampoline_address); 701 kretprobe_assert(ri, orig_ret_address, trampoline_address);
702 702
703 correct_ret_addr = ri->ret_addr; 703 correct_ret_addr = ri->ret_addr;
704 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 704 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
705 if (ri->task != current) 705 if (ri->task != current)
706 /* another task is sharing our hash bucket */ 706 /* another task is sharing our hash bucket */
707 continue; 707 continue;
@@ -728,7 +728,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
728 728
729 kretprobe_hash_unlock(current, &flags); 729 kretprobe_hash_unlock(current, &flags);
730 730
731 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 731 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
732 hlist_del(&ri->hlist); 732 hlist_del(&ri->hlist);
733 kfree(ri); 733 kfree(ri);
734 } 734 }
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 4ed3edbe06bd..956ca358108a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1644,13 +1644,13 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1644static void kvm_mmu_commit_zap_page(struct kvm *kvm, 1644static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1645 struct list_head *invalid_list); 1645 struct list_head *invalid_list);
1646 1646
1647#define for_each_gfn_sp(kvm, sp, gfn, pos) \ 1647#define for_each_gfn_sp(kvm, sp, gfn) \
1648 hlist_for_each_entry(sp, pos, \ 1648 hlist_for_each_entry(sp, \
1649 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ 1649 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
1650 if ((sp)->gfn != (gfn)) {} else 1650 if ((sp)->gfn != (gfn)) {} else
1651 1651
1652#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \ 1652#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn) \
1653 hlist_for_each_entry(sp, pos, \ 1653 hlist_for_each_entry(sp, \
1654 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ 1654 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
1655 if ((sp)->gfn != (gfn) || (sp)->role.direct || \ 1655 if ((sp)->gfn != (gfn) || (sp)->role.direct || \
1656 (sp)->role.invalid) {} else 1656 (sp)->role.invalid) {} else
@@ -1706,11 +1706,10 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1706static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) 1706static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
1707{ 1707{
1708 struct kvm_mmu_page *s; 1708 struct kvm_mmu_page *s;
1709 struct hlist_node *node;
1710 LIST_HEAD(invalid_list); 1709 LIST_HEAD(invalid_list);
1711 bool flush = false; 1710 bool flush = false;
1712 1711
1713 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { 1712 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
1714 if (!s->unsync) 1713 if (!s->unsync)
1715 continue; 1714 continue;
1716 1715
@@ -1848,7 +1847,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1848 union kvm_mmu_page_role role; 1847 union kvm_mmu_page_role role;
1849 unsigned quadrant; 1848 unsigned quadrant;
1850 struct kvm_mmu_page *sp; 1849 struct kvm_mmu_page *sp;
1851 struct hlist_node *node;
1852 bool need_sync = false; 1850 bool need_sync = false;
1853 1851
1854 role = vcpu->arch.mmu.base_role; 1852 role = vcpu->arch.mmu.base_role;
@@ -1863,7 +1861,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1863 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; 1861 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1864 role.quadrant = quadrant; 1862 role.quadrant = quadrant;
1865 } 1863 }
1866 for_each_gfn_sp(vcpu->kvm, sp, gfn, node) { 1864 for_each_gfn_sp(vcpu->kvm, sp, gfn) {
1867 if (!need_sync && sp->unsync) 1865 if (!need_sync && sp->unsync)
1868 need_sync = true; 1866 need_sync = true;
1869 1867
@@ -2151,14 +2149,13 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
2151int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) 2149int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2152{ 2150{
2153 struct kvm_mmu_page *sp; 2151 struct kvm_mmu_page *sp;
2154 struct hlist_node *node;
2155 LIST_HEAD(invalid_list); 2152 LIST_HEAD(invalid_list);
2156 int r; 2153 int r;
2157 2154
2158 pgprintk("%s: looking for gfn %llx\n", __func__, gfn); 2155 pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2159 r = 0; 2156 r = 0;
2160 spin_lock(&kvm->mmu_lock); 2157 spin_lock(&kvm->mmu_lock);
2161 for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) { 2158 for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2162 pgprintk("%s: gfn %llx role %x\n", __func__, gfn, 2159 pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2163 sp->role.word); 2160 sp->role.word);
2164 r = 1; 2161 r = 1;
@@ -2288,9 +2285,8 @@ static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2288static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) 2285static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
2289{ 2286{
2290 struct kvm_mmu_page *s; 2287 struct kvm_mmu_page *s;
2291 struct hlist_node *node;
2292 2288
2293 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { 2289 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
2294 if (s->unsync) 2290 if (s->unsync)
2295 continue; 2291 continue;
2296 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); 2292 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
@@ -2302,10 +2298,9 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
2302 bool can_unsync) 2298 bool can_unsync)
2303{ 2299{
2304 struct kvm_mmu_page *s; 2300 struct kvm_mmu_page *s;
2305 struct hlist_node *node;
2306 bool need_unsync = false; 2301 bool need_unsync = false;
2307 2302
2308 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { 2303 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
2309 if (!can_unsync) 2304 if (!can_unsync)
2310 return 1; 2305 return 1;
2311 2306
@@ -3933,7 +3928,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3933 gfn_t gfn = gpa >> PAGE_SHIFT; 3928 gfn_t gfn = gpa >> PAGE_SHIFT;
3934 union kvm_mmu_page_role mask = { .word = 0 }; 3929 union kvm_mmu_page_role mask = { .word = 0 };
3935 struct kvm_mmu_page *sp; 3930 struct kvm_mmu_page *sp;
3936 struct hlist_node *node;
3937 LIST_HEAD(invalid_list); 3931 LIST_HEAD(invalid_list);
3938 u64 entry, gentry, *spte; 3932 u64 entry, gentry, *spte;
3939 int npte; 3933 int npte;
@@ -3964,7 +3958,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3964 kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); 3958 kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
3965 3959
3966 mask.cr0_wp = mask.cr4_pae = mask.nxe = 1; 3960 mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
3967 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) { 3961 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
3968 if (detect_write_misaligned(sp, gpa, bytes) || 3962 if (detect_write_misaligned(sp, gpa, bytes) ||
3969 detect_write_flooding(sp)) { 3963 detect_write_flooding(sp)) {
3970 zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp, 3964 zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,