aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c12
-rw-r--r--kernel/events/core.c6
-rw-r--r--kernel/kprobes.c35
-rw-r--r--kernel/pid.c3
-rw-r--r--kernel/sched/core.c6
-rw-r--r--kernel/smpboot.c2
-rw-r--r--kernel/trace/ftrace.c24
-rw-r--r--kernel/trace/trace_output.c3
-rw-r--r--kernel/tracepoint.c6
-rw-r--r--kernel/user-return-notifier.c4
-rw-r--r--kernel/user.c3
-rw-r--r--kernel/workqueue.c13
12 files changed, 46 insertions, 71 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 40e0df6c2a2f..a32f9432666c 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -554,7 +554,6 @@ static struct css_set *find_existing_css_set(
554{ 554{
555 int i; 555 int i;
556 struct cgroupfs_root *root = cgrp->root; 556 struct cgroupfs_root *root = cgrp->root;
557 struct hlist_node *node;
558 struct css_set *cg; 557 struct css_set *cg;
559 unsigned long key; 558 unsigned long key;
560 559
@@ -577,7 +576,7 @@ static struct css_set *find_existing_css_set(
577 } 576 }
578 577
579 key = css_set_hash(template); 578 key = css_set_hash(template);
580 hash_for_each_possible(css_set_table, cg, node, hlist, key) { 579 hash_for_each_possible(css_set_table, cg, hlist, key) {
581 if (!compare_css_sets(cg, oldcg, cgrp, template)) 580 if (!compare_css_sets(cg, oldcg, cgrp, template))
582 continue; 581 continue;
583 582
@@ -1611,7 +1610,6 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1611 struct cgroupfs_root *existing_root; 1610 struct cgroupfs_root *existing_root;
1612 const struct cred *cred; 1611 const struct cred *cred;
1613 int i; 1612 int i;
1614 struct hlist_node *node;
1615 struct css_set *cg; 1613 struct css_set *cg;
1616 1614
1617 BUG_ON(sb->s_root != NULL); 1615 BUG_ON(sb->s_root != NULL);
@@ -1666,7 +1664,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1666 /* Link the top cgroup in this hierarchy into all 1664 /* Link the top cgroup in this hierarchy into all
1667 * the css_set objects */ 1665 * the css_set objects */
1668 write_lock(&css_set_lock); 1666 write_lock(&css_set_lock);
1669 hash_for_each(css_set_table, i, node, cg, hlist) 1667 hash_for_each(css_set_table, i, cg, hlist)
1670 link_css_set(&tmp_cg_links, cg, root_cgrp); 1668 link_css_set(&tmp_cg_links, cg, root_cgrp);
1671 write_unlock(&css_set_lock); 1669 write_unlock(&css_set_lock);
1672 1670
@@ -4493,7 +4491,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
4493{ 4491{
4494 struct cgroup_subsys_state *css; 4492 struct cgroup_subsys_state *css;
4495 int i, ret; 4493 int i, ret;
4496 struct hlist_node *node, *tmp; 4494 struct hlist_node *tmp;
4497 struct css_set *cg; 4495 struct css_set *cg;
4498 unsigned long key; 4496 unsigned long key;
4499 4497
@@ -4561,7 +4559,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
4561 * this is all done under the css_set_lock. 4559 * this is all done under the css_set_lock.
4562 */ 4560 */
4563 write_lock(&css_set_lock); 4561 write_lock(&css_set_lock);
4564 hash_for_each_safe(css_set_table, i, node, tmp, cg, hlist) { 4562 hash_for_each_safe(css_set_table, i, tmp, cg, hlist) {
4565 /* skip entries that we already rehashed */ 4563 /* skip entries that we already rehashed */
4566 if (cg->subsys[ss->subsys_id]) 4564 if (cg->subsys[ss->subsys_id])
4567 continue; 4565 continue;
@@ -4571,7 +4569,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
4571 cg->subsys[ss->subsys_id] = css; 4569 cg->subsys[ss->subsys_id] = css;
4572 /* recompute hash and restore entry */ 4570 /* recompute hash and restore entry */
4573 key = css_set_hash(cg->subsys); 4571 key = css_set_hash(cg->subsys);
4574 hash_add(css_set_table, node, key); 4572 hash_add(css_set_table, &cg->hlist, key);
4575 } 4573 }
4576 write_unlock(&css_set_lock); 4574 write_unlock(&css_set_lock);
4577 4575
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5a92cf6beff0..b0cd86501c30 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5126,7 +5126,6 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
5126{ 5126{
5127 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5127 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5128 struct perf_event *event; 5128 struct perf_event *event;
5129 struct hlist_node *node;
5130 struct hlist_head *head; 5129 struct hlist_head *head;
5131 5130
5132 rcu_read_lock(); 5131 rcu_read_lock();
@@ -5134,7 +5133,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
5134 if (!head) 5133 if (!head)
5135 goto end; 5134 goto end;
5136 5135
5137 hlist_for_each_entry_rcu(event, node, head, hlist_entry) { 5136 hlist_for_each_entry_rcu(event, head, hlist_entry) {
5138 if (perf_swevent_match(event, type, event_id, data, regs)) 5137 if (perf_swevent_match(event, type, event_id, data, regs))
5139 perf_swevent_event(event, nr, data, regs); 5138 perf_swevent_event(event, nr, data, regs);
5140 } 5139 }
@@ -5419,7 +5418,6 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5419{ 5418{
5420 struct perf_sample_data data; 5419 struct perf_sample_data data;
5421 struct perf_event *event; 5420 struct perf_event *event;
5422 struct hlist_node *node;
5423 5421
5424 struct perf_raw_record raw = { 5422 struct perf_raw_record raw = {
5425 .size = entry_size, 5423 .size = entry_size,
@@ -5429,7 +5427,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5429 perf_sample_data_init(&data, addr, 0); 5427 perf_sample_data_init(&data, addr, 0);
5430 data.raw = &raw; 5428 data.raw = &raw;
5431 5429
5432 hlist_for_each_entry_rcu(event, node, head, hlist_entry) { 5430 hlist_for_each_entry_rcu(event, head, hlist_entry) {
5433 if (perf_tp_event_match(event, &data, regs)) 5431 if (perf_tp_event_match(event, &data, regs))
5434 perf_swevent_event(event, count, &data, regs); 5432 perf_swevent_event(event, count, &data, regs);
5435 } 5433 }
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 550294d58a02..e35be53f6613 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -334,11 +334,10 @@ static inline void reset_kprobe_instance(void)
334struct kprobe __kprobes *get_kprobe(void *addr) 334struct kprobe __kprobes *get_kprobe(void *addr)
335{ 335{
336 struct hlist_head *head; 336 struct hlist_head *head;
337 struct hlist_node *node;
338 struct kprobe *p; 337 struct kprobe *p;
339 338
340 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; 339 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
341 hlist_for_each_entry_rcu(p, node, head, hlist) { 340 hlist_for_each_entry_rcu(p, head, hlist) {
342 if (p->addr == addr) 341 if (p->addr == addr)
343 return p; 342 return p;
344 } 343 }
@@ -799,7 +798,6 @@ out:
799static void __kprobes optimize_all_kprobes(void) 798static void __kprobes optimize_all_kprobes(void)
800{ 799{
801 struct hlist_head *head; 800 struct hlist_head *head;
802 struct hlist_node *node;
803 struct kprobe *p; 801 struct kprobe *p;
804 unsigned int i; 802 unsigned int i;
805 803
@@ -810,7 +808,7 @@ static void __kprobes optimize_all_kprobes(void)
810 kprobes_allow_optimization = true; 808 kprobes_allow_optimization = true;
811 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 809 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
812 head = &kprobe_table[i]; 810 head = &kprobe_table[i];
813 hlist_for_each_entry_rcu(p, node, head, hlist) 811 hlist_for_each_entry_rcu(p, head, hlist)
814 if (!kprobe_disabled(p)) 812 if (!kprobe_disabled(p))
815 optimize_kprobe(p); 813 optimize_kprobe(p);
816 } 814 }
@@ -821,7 +819,6 @@ static void __kprobes optimize_all_kprobes(void)
821static void __kprobes unoptimize_all_kprobes(void) 819static void __kprobes unoptimize_all_kprobes(void)
822{ 820{
823 struct hlist_head *head; 821 struct hlist_head *head;
824 struct hlist_node *node;
825 struct kprobe *p; 822 struct kprobe *p;
826 unsigned int i; 823 unsigned int i;
827 824
@@ -832,7 +829,7 @@ static void __kprobes unoptimize_all_kprobes(void)
832 kprobes_allow_optimization = false; 829 kprobes_allow_optimization = false;
833 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 830 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
834 head = &kprobe_table[i]; 831 head = &kprobe_table[i];
835 hlist_for_each_entry_rcu(p, node, head, hlist) { 832 hlist_for_each_entry_rcu(p, head, hlist) {
836 if (!kprobe_disabled(p)) 833 if (!kprobe_disabled(p))
837 unoptimize_kprobe(p, false); 834 unoptimize_kprobe(p, false);
838 } 835 }
@@ -1148,7 +1145,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
1148{ 1145{
1149 struct kretprobe_instance *ri; 1146 struct kretprobe_instance *ri;
1150 struct hlist_head *head, empty_rp; 1147 struct hlist_head *head, empty_rp;
1151 struct hlist_node *node, *tmp; 1148 struct hlist_node *tmp;
1152 unsigned long hash, flags = 0; 1149 unsigned long hash, flags = 0;
1153 1150
1154 if (unlikely(!kprobes_initialized)) 1151 if (unlikely(!kprobes_initialized))
@@ -1159,12 +1156,12 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
1159 hash = hash_ptr(tk, KPROBE_HASH_BITS); 1156 hash = hash_ptr(tk, KPROBE_HASH_BITS);
1160 head = &kretprobe_inst_table[hash]; 1157 head = &kretprobe_inst_table[hash];
1161 kretprobe_table_lock(hash, &flags); 1158 kretprobe_table_lock(hash, &flags);
1162 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 1159 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
1163 if (ri->task == tk) 1160 if (ri->task == tk)
1164 recycle_rp_inst(ri, &empty_rp); 1161 recycle_rp_inst(ri, &empty_rp);
1165 } 1162 }
1166 kretprobe_table_unlock(hash, &flags); 1163 kretprobe_table_unlock(hash, &flags);
1167 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 1164 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
1168 hlist_del(&ri->hlist); 1165 hlist_del(&ri->hlist);
1169 kfree(ri); 1166 kfree(ri);
1170 } 1167 }
@@ -1173,9 +1170,9 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
1173static inline void free_rp_inst(struct kretprobe *rp) 1170static inline void free_rp_inst(struct kretprobe *rp)
1174{ 1171{
1175 struct kretprobe_instance *ri; 1172 struct kretprobe_instance *ri;
1176 struct hlist_node *pos, *next; 1173 struct hlist_node *next;
1177 1174
1178 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) { 1175 hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
1179 hlist_del(&ri->hlist); 1176 hlist_del(&ri->hlist);
1180 kfree(ri); 1177 kfree(ri);
1181 } 1178 }
@@ -1185,14 +1182,14 @@ static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
1185{ 1182{
1186 unsigned long flags, hash; 1183 unsigned long flags, hash;
1187 struct kretprobe_instance *ri; 1184 struct kretprobe_instance *ri;
1188 struct hlist_node *pos, *next; 1185 struct hlist_node *next;
1189 struct hlist_head *head; 1186 struct hlist_head *head;
1190 1187
1191 /* No race here */ 1188 /* No race here */
1192 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { 1189 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
1193 kretprobe_table_lock(hash, &flags); 1190 kretprobe_table_lock(hash, &flags);
1194 head = &kretprobe_inst_table[hash]; 1191 head = &kretprobe_inst_table[hash];
1195 hlist_for_each_entry_safe(ri, pos, next, head, hlist) { 1192 hlist_for_each_entry_safe(ri, next, head, hlist) {
1196 if (ri->rp == rp) 1193 if (ri->rp == rp)
1197 ri->rp = NULL; 1194 ri->rp = NULL;
1198 } 1195 }
@@ -2028,7 +2025,6 @@ static int __kprobes kprobes_module_callback(struct notifier_block *nb,
2028{ 2025{
2029 struct module *mod = data; 2026 struct module *mod = data;
2030 struct hlist_head *head; 2027 struct hlist_head *head;
2031 struct hlist_node *node;
2032 struct kprobe *p; 2028 struct kprobe *p;
2033 unsigned int i; 2029 unsigned int i;
2034 int checkcore = (val == MODULE_STATE_GOING); 2030 int checkcore = (val == MODULE_STATE_GOING);
@@ -2045,7 +2041,7 @@ static int __kprobes kprobes_module_callback(struct notifier_block *nb,
2045 mutex_lock(&kprobe_mutex); 2041 mutex_lock(&kprobe_mutex);
2046 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2042 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2047 head = &kprobe_table[i]; 2043 head = &kprobe_table[i];
2048 hlist_for_each_entry_rcu(p, node, head, hlist) 2044 hlist_for_each_entry_rcu(p, head, hlist)
2049 if (within_module_init((unsigned long)p->addr, mod) || 2045 if (within_module_init((unsigned long)p->addr, mod) ||
2050 (checkcore && 2046 (checkcore &&
2051 within_module_core((unsigned long)p->addr, mod))) { 2047 within_module_core((unsigned long)p->addr, mod))) {
@@ -2192,7 +2188,6 @@ static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
2192static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) 2188static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
2193{ 2189{
2194 struct hlist_head *head; 2190 struct hlist_head *head;
2195 struct hlist_node *node;
2196 struct kprobe *p, *kp; 2191 struct kprobe *p, *kp;
2197 const char *sym = NULL; 2192 const char *sym = NULL;
2198 unsigned int i = *(loff_t *) v; 2193 unsigned int i = *(loff_t *) v;
@@ -2201,7 +2196,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
2201 2196
2202 head = &kprobe_table[i]; 2197 head = &kprobe_table[i];
2203 preempt_disable(); 2198 preempt_disable();
2204 hlist_for_each_entry_rcu(p, node, head, hlist) { 2199 hlist_for_each_entry_rcu(p, head, hlist) {
2205 sym = kallsyms_lookup((unsigned long)p->addr, NULL, 2200 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2206 &offset, &modname, namebuf); 2201 &offset, &modname, namebuf);
2207 if (kprobe_aggrprobe(p)) { 2202 if (kprobe_aggrprobe(p)) {
@@ -2236,7 +2231,6 @@ static const struct file_operations debugfs_kprobes_operations = {
2236static void __kprobes arm_all_kprobes(void) 2231static void __kprobes arm_all_kprobes(void)
2237{ 2232{
2238 struct hlist_head *head; 2233 struct hlist_head *head;
2239 struct hlist_node *node;
2240 struct kprobe *p; 2234 struct kprobe *p;
2241 unsigned int i; 2235 unsigned int i;
2242 2236
@@ -2249,7 +2243,7 @@ static void __kprobes arm_all_kprobes(void)
2249 /* Arming kprobes doesn't optimize kprobe itself */ 2243 /* Arming kprobes doesn't optimize kprobe itself */
2250 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2244 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2251 head = &kprobe_table[i]; 2245 head = &kprobe_table[i];
2252 hlist_for_each_entry_rcu(p, node, head, hlist) 2246 hlist_for_each_entry_rcu(p, head, hlist)
2253 if (!kprobe_disabled(p)) 2247 if (!kprobe_disabled(p))
2254 arm_kprobe(p); 2248 arm_kprobe(p);
2255 } 2249 }
@@ -2265,7 +2259,6 @@ already_enabled:
2265static void __kprobes disarm_all_kprobes(void) 2259static void __kprobes disarm_all_kprobes(void)
2266{ 2260{
2267 struct hlist_head *head; 2261 struct hlist_head *head;
2268 struct hlist_node *node;
2269 struct kprobe *p; 2262 struct kprobe *p;
2270 unsigned int i; 2263 unsigned int i;
2271 2264
@@ -2282,7 +2275,7 @@ static void __kprobes disarm_all_kprobes(void)
2282 2275
2283 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2276 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2284 head = &kprobe_table[i]; 2277 head = &kprobe_table[i];
2285 hlist_for_each_entry_rcu(p, node, head, hlist) { 2278 hlist_for_each_entry_rcu(p, head, hlist) {
2286 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) 2279 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
2287 disarm_kprobe(p, false); 2280 disarm_kprobe(p, false);
2288 } 2281 }
diff --git a/kernel/pid.c b/kernel/pid.c
index f2c6a6825098..047dc6264638 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -350,10 +350,9 @@ void disable_pid_allocation(struct pid_namespace *ns)
350 350
351struct pid *find_pid_ns(int nr, struct pid_namespace *ns) 351struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
352{ 352{
353 struct hlist_node *elem;
354 struct upid *pnr; 353 struct upid *pnr;
355 354
356 hlist_for_each_entry_rcu(pnr, elem, 355 hlist_for_each_entry_rcu(pnr,
357 &pid_hash[pid_hashfn(nr, ns)], pid_chain) 356 &pid_hash[pid_hashfn(nr, ns)], pid_chain)
358 if (pnr->nr == nr && pnr->ns == ns) 357 if (pnr->nr == nr && pnr->ns == ns)
359 return container_of(pnr, struct pid, 358 return container_of(pnr, struct pid,
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2b5243176aba..12af4270c9c1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1752,9 +1752,8 @@ EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
1752static void fire_sched_in_preempt_notifiers(struct task_struct *curr) 1752static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1753{ 1753{
1754 struct preempt_notifier *notifier; 1754 struct preempt_notifier *notifier;
1755 struct hlist_node *node;
1756 1755
1757 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) 1756 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
1758 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 1757 notifier->ops->sched_in(notifier, raw_smp_processor_id());
1759} 1758}
1760 1759
@@ -1763,9 +1762,8 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
1763 struct task_struct *next) 1762 struct task_struct *next)
1764{ 1763{
1765 struct preempt_notifier *notifier; 1764 struct preempt_notifier *notifier;
1766 struct hlist_node *node;
1767 1765
1768 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) 1766 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
1769 notifier->ops->sched_out(notifier, next); 1767 notifier->ops->sched_out(notifier, next);
1770} 1768}
1771 1769
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index d4abac261779..b9bde5727829 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -131,7 +131,7 @@ static int smpboot_thread_fn(void *data)
131 continue; 131 continue;
132 } 132 }
133 133
134 BUG_ON(td->cpu != smp_processor_id()); 134 //BUG_ON(td->cpu != smp_processor_id());
135 135
136 /* Check for state change setup */ 136 /* Check for state change setup */
137 switch (td->status) { 137 switch (td->status) {
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 98ca94a41819..ab25b88aae56 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -762,7 +762,6 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
762{ 762{
763 struct ftrace_profile *rec; 763 struct ftrace_profile *rec;
764 struct hlist_head *hhd; 764 struct hlist_head *hhd;
765 struct hlist_node *n;
766 unsigned long key; 765 unsigned long key;
767 766
768 key = hash_long(ip, ftrace_profile_bits); 767 key = hash_long(ip, ftrace_profile_bits);
@@ -771,7 +770,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
771 if (hlist_empty(hhd)) 770 if (hlist_empty(hhd))
772 return NULL; 771 return NULL;
773 772
774 hlist_for_each_entry_rcu(rec, n, hhd, node) { 773 hlist_for_each_entry_rcu(rec, hhd, node) {
775 if (rec->ip == ip) 774 if (rec->ip == ip)
776 return rec; 775 return rec;
777 } 776 }
@@ -1133,7 +1132,6 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1133 unsigned long key; 1132 unsigned long key;
1134 struct ftrace_func_entry *entry; 1133 struct ftrace_func_entry *entry;
1135 struct hlist_head *hhd; 1134 struct hlist_head *hhd;
1136 struct hlist_node *n;
1137 1135
1138 if (ftrace_hash_empty(hash)) 1136 if (ftrace_hash_empty(hash))
1139 return NULL; 1137 return NULL;
@@ -1145,7 +1143,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1145 1143
1146 hhd = &hash->buckets[key]; 1144 hhd = &hash->buckets[key];
1147 1145
1148 hlist_for_each_entry_rcu(entry, n, hhd, hlist) { 1146 hlist_for_each_entry_rcu(entry, hhd, hlist) {
1149 if (entry->ip == ip) 1147 if (entry->ip == ip)
1150 return entry; 1148 return entry;
1151 } 1149 }
@@ -1202,7 +1200,7 @@ remove_hash_entry(struct ftrace_hash *hash,
1202static void ftrace_hash_clear(struct ftrace_hash *hash) 1200static void ftrace_hash_clear(struct ftrace_hash *hash)
1203{ 1201{
1204 struct hlist_head *hhd; 1202 struct hlist_head *hhd;
1205 struct hlist_node *tp, *tn; 1203 struct hlist_node *tn;
1206 struct ftrace_func_entry *entry; 1204 struct ftrace_func_entry *entry;
1207 int size = 1 << hash->size_bits; 1205 int size = 1 << hash->size_bits;
1208 int i; 1206 int i;
@@ -1212,7 +1210,7 @@ static void ftrace_hash_clear(struct ftrace_hash *hash)
1212 1210
1213 for (i = 0; i < size; i++) { 1211 for (i = 0; i < size; i++) {
1214 hhd = &hash->buckets[i]; 1212 hhd = &hash->buckets[i];
1215 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) 1213 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1216 free_hash_entry(hash, entry); 1214 free_hash_entry(hash, entry);
1217 } 1215 }
1218 FTRACE_WARN_ON(hash->count); 1216 FTRACE_WARN_ON(hash->count);
@@ -1275,7 +1273,6 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1275{ 1273{
1276 struct ftrace_func_entry *entry; 1274 struct ftrace_func_entry *entry;
1277 struct ftrace_hash *new_hash; 1275 struct ftrace_hash *new_hash;
1278 struct hlist_node *tp;
1279 int size; 1276 int size;
1280 int ret; 1277 int ret;
1281 int i; 1278 int i;
@@ -1290,7 +1287,7 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1290 1287
1291 size = 1 << hash->size_bits; 1288 size = 1 << hash->size_bits;
1292 for (i = 0; i < size; i++) { 1289 for (i = 0; i < size; i++) {
1293 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) { 1290 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1294 ret = add_hash_entry(new_hash, entry->ip); 1291 ret = add_hash_entry(new_hash, entry->ip);
1295 if (ret < 0) 1292 if (ret < 0)
1296 goto free_hash; 1293 goto free_hash;
@@ -1316,7 +1313,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
1316 struct ftrace_hash **dst, struct ftrace_hash *src) 1313 struct ftrace_hash **dst, struct ftrace_hash *src)
1317{ 1314{
1318 struct ftrace_func_entry *entry; 1315 struct ftrace_func_entry *entry;
1319 struct hlist_node *tp, *tn; 1316 struct hlist_node *tn;
1320 struct hlist_head *hhd; 1317 struct hlist_head *hhd;
1321 struct ftrace_hash *old_hash; 1318 struct ftrace_hash *old_hash;
1322 struct ftrace_hash *new_hash; 1319 struct ftrace_hash *new_hash;
@@ -1362,7 +1359,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
1362 size = 1 << src->size_bits; 1359 size = 1 << src->size_bits;
1363 for (i = 0; i < size; i++) { 1360 for (i = 0; i < size; i++) {
1364 hhd = &src->buckets[i]; 1361 hhd = &src->buckets[i];
1365 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) { 1362 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1366 if (bits > 0) 1363 if (bits > 0)
1367 key = hash_long(entry->ip, bits); 1364 key = hash_long(entry->ip, bits);
1368 else 1365 else
@@ -2901,7 +2898,6 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2901{ 2898{
2902 struct ftrace_func_probe *entry; 2899 struct ftrace_func_probe *entry;
2903 struct hlist_head *hhd; 2900 struct hlist_head *hhd;
2904 struct hlist_node *n;
2905 unsigned long key; 2901 unsigned long key;
2906 2902
2907 key = hash_long(ip, FTRACE_HASH_BITS); 2903 key = hash_long(ip, FTRACE_HASH_BITS);
@@ -2917,7 +2913,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2917 * on the hash. rcu_read_lock is too dangerous here. 2913 * on the hash. rcu_read_lock is too dangerous here.
2918 */ 2914 */
2919 preempt_disable_notrace(); 2915 preempt_disable_notrace();
2920 hlist_for_each_entry_rcu(entry, n, hhd, node) { 2916 hlist_for_each_entry_rcu(entry, hhd, node) {
2921 if (entry->ip == ip) 2917 if (entry->ip == ip)
2922 entry->ops->func(ip, parent_ip, &entry->data); 2918 entry->ops->func(ip, parent_ip, &entry->data);
2923 } 2919 }
@@ -3068,7 +3064,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3068 void *data, int flags) 3064 void *data, int flags)
3069{ 3065{
3070 struct ftrace_func_probe *entry; 3066 struct ftrace_func_probe *entry;
3071 struct hlist_node *n, *tmp; 3067 struct hlist_node *tmp;
3072 char str[KSYM_SYMBOL_LEN]; 3068 char str[KSYM_SYMBOL_LEN];
3073 int type = MATCH_FULL; 3069 int type = MATCH_FULL;
3074 int i, len = 0; 3070 int i, len = 0;
@@ -3091,7 +3087,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3091 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { 3087 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3092 struct hlist_head *hhd = &ftrace_func_hash[i]; 3088 struct hlist_head *hhd = &ftrace_func_hash[i];
3093 3089
3094 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) { 3090 hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3095 3091
3096 /* break up if statements for readability */ 3092 /* break up if statements for readability */
3097 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops) 3093 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 194d79602dc7..697e88d13907 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -739,12 +739,11 @@ static int task_state_char(unsigned long state)
739struct trace_event *ftrace_find_event(int type) 739struct trace_event *ftrace_find_event(int type)
740{ 740{
741 struct trace_event *event; 741 struct trace_event *event;
742 struct hlist_node *n;
743 unsigned key; 742 unsigned key;
744 743
745 key = type & (EVENT_HASHSIZE - 1); 744 key = type & (EVENT_HASHSIZE - 1);
746 745
747 hlist_for_each_entry(event, n, &event_hash[key], node) { 746 hlist_for_each_entry(event, &event_hash[key], node) {
748 if (event->type == type) 747 if (event->type == type)
749 return event; 748 return event;
750 } 749 }
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index d96ba22dabfa..0c05a4592047 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -192,12 +192,11 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
192static struct tracepoint_entry *get_tracepoint(const char *name) 192static struct tracepoint_entry *get_tracepoint(const char *name)
193{ 193{
194 struct hlist_head *head; 194 struct hlist_head *head;
195 struct hlist_node *node;
196 struct tracepoint_entry *e; 195 struct tracepoint_entry *e;
197 u32 hash = jhash(name, strlen(name), 0); 196 u32 hash = jhash(name, strlen(name), 0);
198 197
199 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; 198 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
200 hlist_for_each_entry(e, node, head, hlist) { 199 hlist_for_each_entry(e, head, hlist) {
201 if (!strcmp(name, e->name)) 200 if (!strcmp(name, e->name))
202 return e; 201 return e;
203 } 202 }
@@ -211,13 +210,12 @@ static struct tracepoint_entry *get_tracepoint(const char *name)
211static struct tracepoint_entry *add_tracepoint(const char *name) 210static struct tracepoint_entry *add_tracepoint(const char *name)
212{ 211{
213 struct hlist_head *head; 212 struct hlist_head *head;
214 struct hlist_node *node;
215 struct tracepoint_entry *e; 213 struct tracepoint_entry *e;
216 size_t name_len = strlen(name) + 1; 214 size_t name_len = strlen(name) + 1;
217 u32 hash = jhash(name, name_len-1, 0); 215 u32 hash = jhash(name, name_len-1, 0);
218 216
219 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; 217 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
220 hlist_for_each_entry(e, node, head, hlist) { 218 hlist_for_each_entry(e, head, hlist) {
221 if (!strcmp(name, e->name)) { 219 if (!strcmp(name, e->name)) {
222 printk(KERN_NOTICE 220 printk(KERN_NOTICE
223 "tracepoint %s busy\n", name); 221 "tracepoint %s busy\n", name);
diff --git a/kernel/user-return-notifier.c b/kernel/user-return-notifier.c
index 1744bb80f1fb..394f70b17162 100644
--- a/kernel/user-return-notifier.c
+++ b/kernel/user-return-notifier.c
@@ -34,11 +34,11 @@ EXPORT_SYMBOL_GPL(user_return_notifier_unregister);
34void fire_user_return_notifiers(void) 34void fire_user_return_notifiers(void)
35{ 35{
36 struct user_return_notifier *urn; 36 struct user_return_notifier *urn;
37 struct hlist_node *tmp1, *tmp2; 37 struct hlist_node *tmp2;
38 struct hlist_head *head; 38 struct hlist_head *head;
39 39
40 head = &get_cpu_var(return_notifier_list); 40 head = &get_cpu_var(return_notifier_list);
41 hlist_for_each_entry_safe(urn, tmp1, tmp2, head, link) 41 hlist_for_each_entry_safe(urn, tmp2, head, link)
42 urn->on_user_return(urn); 42 urn->on_user_return(urn);
43 put_cpu_var(return_notifier_list); 43 put_cpu_var(return_notifier_list);
44} 44}
diff --git a/kernel/user.c b/kernel/user.c
index 57ebfd42023c..e81978e8c03b 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -105,9 +105,8 @@ static void uid_hash_remove(struct user_struct *up)
105static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent) 105static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
106{ 106{
107 struct user_struct *user; 107 struct user_struct *user;
108 struct hlist_node *h;
109 108
110 hlist_for_each_entry(user, h, hashent, uidhash_node) { 109 hlist_for_each_entry(user, hashent, uidhash_node) {
111 if (uid_eq(user->uid, uid)) { 110 if (uid_eq(user->uid, uid)) {
112 atomic_inc(&user->__count); 111 atomic_inc(&user->__count);
113 return user; 112 return user;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f4feacad3812..81f2457811eb 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -251,8 +251,8 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
251 for ((pool) = &std_worker_pools(cpu)[0]; \ 251 for ((pool) = &std_worker_pools(cpu)[0]; \
252 (pool) < &std_worker_pools(cpu)[NR_STD_WORKER_POOLS]; (pool)++) 252 (pool) < &std_worker_pools(cpu)[NR_STD_WORKER_POOLS]; (pool)++)
253 253
254#define for_each_busy_worker(worker, i, pos, pool) \ 254#define for_each_busy_worker(worker, i, pool) \
255 hash_for_each(pool->busy_hash, i, pos, worker, hentry) 255 hash_for_each(pool->busy_hash, i, worker, hentry)
256 256
257static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, 257static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
258 unsigned int sw) 258 unsigned int sw)
@@ -909,9 +909,8 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool,
909 struct work_struct *work) 909 struct work_struct *work)
910{ 910{
911 struct worker *worker; 911 struct worker *worker;
912 struct hlist_node *tmp;
913 912
914 hash_for_each_possible(pool->busy_hash, worker, tmp, hentry, 913 hash_for_each_possible(pool->busy_hash, worker, hentry,
915 (unsigned long)work) 914 (unsigned long)work)
916 if (worker->current_work == work && 915 if (worker->current_work == work &&
917 worker->current_func == work->func) 916 worker->current_func == work->func)
@@ -1626,7 +1625,6 @@ static void busy_worker_rebind_fn(struct work_struct *work)
1626static void rebind_workers(struct worker_pool *pool) 1625static void rebind_workers(struct worker_pool *pool)
1627{ 1626{
1628 struct worker *worker, *n; 1627 struct worker *worker, *n;
1629 struct hlist_node *pos;
1630 int i; 1628 int i;
1631 1629
1632 lockdep_assert_held(&pool->assoc_mutex); 1630 lockdep_assert_held(&pool->assoc_mutex);
@@ -1648,7 +1646,7 @@ static void rebind_workers(struct worker_pool *pool)
1648 } 1646 }
1649 1647
1650 /* rebind busy workers */ 1648 /* rebind busy workers */
1651 for_each_busy_worker(worker, i, pos, pool) { 1649 for_each_busy_worker(worker, i, pool) {
1652 struct work_struct *rebind_work = &worker->rebind_work; 1650 struct work_struct *rebind_work = &worker->rebind_work;
1653 struct workqueue_struct *wq; 1651 struct workqueue_struct *wq;
1654 1652
@@ -3423,7 +3421,6 @@ static void wq_unbind_fn(struct work_struct *work)
3423 int cpu = smp_processor_id(); 3421 int cpu = smp_processor_id();
3424 struct worker_pool *pool; 3422 struct worker_pool *pool;
3425 struct worker *worker; 3423 struct worker *worker;
3426 struct hlist_node *pos;
3427 int i; 3424 int i;
3428 3425
3429 for_each_std_worker_pool(pool, cpu) { 3426 for_each_std_worker_pool(pool, cpu) {
@@ -3442,7 +3439,7 @@ static void wq_unbind_fn(struct work_struct *work)
3442 list_for_each_entry(worker, &pool->idle_list, entry) 3439 list_for_each_entry(worker, &pool->idle_list, entry)
3443 worker->flags |= WORKER_UNBOUND; 3440 worker->flags |= WORKER_UNBOUND;
3444 3441
3445 for_each_busy_worker(worker, i, pos, pool) 3442 for_each_busy_worker(worker, i, pool)
3446 worker->flags |= WORKER_UNBOUND; 3443 worker->flags |= WORKER_UNBOUND;
3447 3444
3448 pool->flags |= POOL_DISASSOCIATED; 3445 pool->flags |= POOL_DISASSOCIATED;