aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-27 23:58:09 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-27 23:58:09 -0500
commit2a7d2b96d5cba7568139d9ab157a0e97ab32440f (patch)
treead029d8cc7b7068b7250e914360ec6315fdfa114 /kernel
parente3c4877de8b9d93bd47b6ee88eb594b1c1e10da5 (diff)
parentb67bfe0d42cac56c512dd5da4b1b347a23f4b70a (diff)
Merge branch 'akpm' (final batch from Andrew)
Merge third patch-bumb from Andrew Morton: "This wraps me up for -rc1. - Lots of misc stuff and things which were deferred/missed from patchbombings 1 & 2. - ocfs2 things - lib/scatterlist - hfsplus - fatfs - documentation - signals - procfs - lockdep - coredump - seqfile core - kexec - Tejun's large IDR tree reworkings - ipmi - partitions - nbd - random() things - kfifo - tools/testing/selftests updates - Sasha's large and pointless hlist cleanup" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (163 commits) hlist: drop the node parameter from iterators kcmp: make it depend on CHECKPOINT_RESTORE selftests: add a simple doc tools/testing/selftests/Makefile: rearrange targets selftests/efivarfs: add create-read test selftests/efivarfs: add empty file creation test selftests: add tests for efivarfs kfifo: fix kfifo_alloc() and kfifo_init() kfifo: move kfifo.c from kernel/ to lib/ arch Kconfig: centralise CONFIG_ARCH_NO_VIRT_TO_BUS w1: add support for DS2413 Dual Channel Addressable Switch memstick: move the dereference below the NULL test drivers/pps/clients/pps-gpio.c: use devm_kzalloc Documentation/DMA-API-HOWTO.txt: fix typo include/linux/eventfd.h: fix incorrect filename is a comment mtd: mtd_stresstest: use prandom_bytes() mtd: mtd_subpagetest: convert to use prandom library mtd: mtd_speedtest: use prandom_bytes mtd: mtd_pagetest: convert to use prandom library mtd: mtd_oobtest: convert to use prandom library ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile6
-rw-r--r--kernel/cgroup.c43
-rw-r--r--kernel/events/core.c16
-rw-r--r--kernel/exit.c6
-rw-r--r--kernel/fork.c7
-rw-r--r--kernel/kexec.c44
-rw-r--r--kernel/kfifo.c609
-rw-r--r--kernel/kprobes.c35
-rw-r--r--kernel/lockdep.c17
-rw-r--r--kernel/pid.c3
-rw-r--r--kernel/posix-timers.c18
-rw-r--r--kernel/sched/core.c6
-rw-r--r--kernel/signal.c14
-rw-r--r--kernel/smpboot.c2
-rw-r--r--kernel/sys.c10
-rw-r--r--kernel/sysctl.c2
-rw-r--r--kernel/sysctl_binary.c3
-rw-r--r--kernel/trace/ftrace.c24
-rw-r--r--kernel/trace/trace_output.c3
-rw-r--r--kernel/tracepoint.c6
-rw-r--r--kernel/user-return-notifier.c4
-rw-r--r--kernel/user.c3
-rw-r--r--kernel/utsname.c2
-rw-r--r--kernel/utsname_sysctl.c3
-rw-r--r--kernel/workqueue.c13
25 files changed, 120 insertions, 779 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 05949c0510c5..bbde5f1a4486 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -7,7 +7,7 @@ obj-y = fork.o exec_domain.o panic.o printk.o \
7 sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \ 7 sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
8 signal.o sys.o kmod.o workqueue.o pid.o task_work.o \ 8 signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
9 rcupdate.o extable.o params.o posix-timers.o \ 9 rcupdate.o extable.o params.o posix-timers.o \
10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ 10 kthread.o wait.o sys_ni.o posix-cpu-timers.o mutex.o \
11 hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ 11 hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
12 notifier.o ksysfs.o cred.o \ 12 notifier.o ksysfs.o cred.o \
13 async.o range.o groups.o lglock.o smpboot.o 13 async.o range.o groups.o lglock.o smpboot.o
@@ -25,9 +25,7 @@ endif
25obj-y += sched/ 25obj-y += sched/
26obj-y += power/ 26obj-y += power/
27 27
28ifeq ($(CONFIG_CHECKPOINT_RESTORE),y) 28obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o
29obj-$(CONFIG_X86) += kcmp.o
30endif
31obj-$(CONFIG_FREEZER) += freezer.o 29obj-$(CONFIG_FREEZER) += freezer.o
32obj-$(CONFIG_PROFILING) += profile.o 30obj-$(CONFIG_PROFILING) += profile.o
33obj-$(CONFIG_STACKTRACE) += stacktrace.o 31obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index fb2fb11fbb25..a32f9432666c 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -554,7 +554,6 @@ static struct css_set *find_existing_css_set(
554{ 554{
555 int i; 555 int i;
556 struct cgroupfs_root *root = cgrp->root; 556 struct cgroupfs_root *root = cgrp->root;
557 struct hlist_node *node;
558 struct css_set *cg; 557 struct css_set *cg;
559 unsigned long key; 558 unsigned long key;
560 559
@@ -577,7 +576,7 @@ static struct css_set *find_existing_css_set(
577 } 576 }
578 577
579 key = css_set_hash(template); 578 key = css_set_hash(template);
580 hash_for_each_possible(css_set_table, cg, node, hlist, key) { 579 hash_for_each_possible(css_set_table, cg, hlist, key) {
581 if (!compare_css_sets(cg, oldcg, cgrp, template)) 580 if (!compare_css_sets(cg, oldcg, cgrp, template))
582 continue; 581 continue;
583 582
@@ -1611,7 +1610,6 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1611 struct cgroupfs_root *existing_root; 1610 struct cgroupfs_root *existing_root;
1612 const struct cred *cred; 1611 const struct cred *cred;
1613 int i; 1612 int i;
1614 struct hlist_node *node;
1615 struct css_set *cg; 1613 struct css_set *cg;
1616 1614
1617 BUG_ON(sb->s_root != NULL); 1615 BUG_ON(sb->s_root != NULL);
@@ -1666,7 +1664,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1666 /* Link the top cgroup in this hierarchy into all 1664 /* Link the top cgroup in this hierarchy into all
1667 * the css_set objects */ 1665 * the css_set objects */
1668 write_lock(&css_set_lock); 1666 write_lock(&css_set_lock);
1669 hash_for_each(css_set_table, i, node, cg, hlist) 1667 hash_for_each(css_set_table, i, cg, hlist)
1670 link_css_set(&tmp_cg_links, cg, root_cgrp); 1668 link_css_set(&tmp_cg_links, cg, root_cgrp);
1671 write_unlock(&css_set_lock); 1669 write_unlock(&css_set_lock);
1672 1670
@@ -4493,7 +4491,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
4493{ 4491{
4494 struct cgroup_subsys_state *css; 4492 struct cgroup_subsys_state *css;
4495 int i, ret; 4493 int i, ret;
4496 struct hlist_node *node, *tmp; 4494 struct hlist_node *tmp;
4497 struct css_set *cg; 4495 struct css_set *cg;
4498 unsigned long key; 4496 unsigned long key;
4499 4497
@@ -4561,7 +4559,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
4561 * this is all done under the css_set_lock. 4559 * this is all done under the css_set_lock.
4562 */ 4560 */
4563 write_lock(&css_set_lock); 4561 write_lock(&css_set_lock);
4564 hash_for_each_safe(css_set_table, i, node, tmp, cg, hlist) { 4562 hash_for_each_safe(css_set_table, i, tmp, cg, hlist) {
4565 /* skip entries that we already rehashed */ 4563 /* skip entries that we already rehashed */
4566 if (cg->subsys[ss->subsys_id]) 4564 if (cg->subsys[ss->subsys_id])
4567 continue; 4565 continue;
@@ -4571,7 +4569,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
4571 cg->subsys[ss->subsys_id] = css; 4569 cg->subsys[ss->subsys_id] = css;
4572 /* recompute hash and restore entry */ 4570 /* recompute hash and restore entry */
4573 key = css_set_hash(cg->subsys); 4571 key = css_set_hash(cg->subsys);
4574 hash_add(css_set_table, node, key); 4572 hash_add(css_set_table, &cg->hlist, key);
4575 } 4573 }
4576 write_unlock(&css_set_lock); 4574 write_unlock(&css_set_lock);
4577 4575
@@ -4618,10 +4616,8 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
4618 offline_css(ss, dummytop); 4616 offline_css(ss, dummytop);
4619 ss->active = 0; 4617 ss->active = 0;
4620 4618
4621 if (ss->use_id) { 4619 if (ss->use_id)
4622 idr_remove_all(&ss->idr);
4623 idr_destroy(&ss->idr); 4620 idr_destroy(&ss->idr);
4624 }
4625 4621
4626 /* deassign the subsys_id */ 4622 /* deassign the subsys_id */
4627 subsys[ss->subsys_id] = NULL; 4623 subsys[ss->subsys_id] = NULL;
@@ -5322,7 +5318,7 @@ EXPORT_SYMBOL_GPL(free_css_id);
5322static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth) 5318static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
5323{ 5319{
5324 struct css_id *newid; 5320 struct css_id *newid;
5325 int myid, error, size; 5321 int ret, size;
5326 5322
5327 BUG_ON(!ss->use_id); 5323 BUG_ON(!ss->use_id);
5328 5324
@@ -5330,35 +5326,24 @@ static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
5330 newid = kzalloc(size, GFP_KERNEL); 5326 newid = kzalloc(size, GFP_KERNEL);
5331 if (!newid) 5327 if (!newid)
5332 return ERR_PTR(-ENOMEM); 5328 return ERR_PTR(-ENOMEM);
5333 /* get id */ 5329
5334 if (unlikely(!idr_pre_get(&ss->idr, GFP_KERNEL))) { 5330 idr_preload(GFP_KERNEL);
5335 error = -ENOMEM;
5336 goto err_out;
5337 }
5338 spin_lock(&ss->id_lock); 5331 spin_lock(&ss->id_lock);
5339 /* Don't use 0. allocates an ID of 1-65535 */ 5332 /* Don't use 0. allocates an ID of 1-65535 */
5340 error = idr_get_new_above(&ss->idr, newid, 1, &myid); 5333 ret = idr_alloc(&ss->idr, newid, 1, CSS_ID_MAX + 1, GFP_NOWAIT);
5341 spin_unlock(&ss->id_lock); 5334 spin_unlock(&ss->id_lock);
5335 idr_preload_end();
5342 5336
5343 /* Returns error when there are no free spaces for new ID.*/ 5337 /* Returns error when there are no free spaces for new ID.*/
5344 if (error) { 5338 if (ret < 0)
5345 error = -ENOSPC;
5346 goto err_out; 5339 goto err_out;
5347 }
5348 if (myid > CSS_ID_MAX)
5349 goto remove_idr;
5350 5340
5351 newid->id = myid; 5341 newid->id = ret;
5352 newid->depth = depth; 5342 newid->depth = depth;
5353 return newid; 5343 return newid;
5354remove_idr:
5355 error = -ENOSPC;
5356 spin_lock(&ss->id_lock);
5357 idr_remove(&ss->idr, myid);
5358 spin_unlock(&ss->id_lock);
5359err_out: 5344err_out:
5360 kfree(newid); 5345 kfree(newid);
5361 return ERR_PTR(error); 5346 return ERR_PTR(ret);
5362 5347
5363} 5348}
5364 5349
diff --git a/kernel/events/core.c b/kernel/events/core.c
index ccc457e36354..b0cd86501c30 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5126,7 +5126,6 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
5126{ 5126{
5127 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5127 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5128 struct perf_event *event; 5128 struct perf_event *event;
5129 struct hlist_node *node;
5130 struct hlist_head *head; 5129 struct hlist_head *head;
5131 5130
5132 rcu_read_lock(); 5131 rcu_read_lock();
@@ -5134,7 +5133,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
5134 if (!head) 5133 if (!head)
5135 goto end; 5134 goto end;
5136 5135
5137 hlist_for_each_entry_rcu(event, node, head, hlist_entry) { 5136 hlist_for_each_entry_rcu(event, head, hlist_entry) {
5138 if (perf_swevent_match(event, type, event_id, data, regs)) 5137 if (perf_swevent_match(event, type, event_id, data, regs))
5139 perf_swevent_event(event, nr, data, regs); 5138 perf_swevent_event(event, nr, data, regs);
5140 } 5139 }
@@ -5419,7 +5418,6 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5419{ 5418{
5420 struct perf_sample_data data; 5419 struct perf_sample_data data;
5421 struct perf_event *event; 5420 struct perf_event *event;
5422 struct hlist_node *node;
5423 5421
5424 struct perf_raw_record raw = { 5422 struct perf_raw_record raw = {
5425 .size = entry_size, 5423 .size = entry_size,
@@ -5429,7 +5427,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5429 perf_sample_data_init(&data, addr, 0); 5427 perf_sample_data_init(&data, addr, 0);
5430 data.raw = &raw; 5428 data.raw = &raw;
5431 5429
5432 hlist_for_each_entry_rcu(event, node, head, hlist_entry) { 5430 hlist_for_each_entry_rcu(event, head, hlist_entry) {
5433 if (perf_tp_event_match(event, &data, regs)) 5431 if (perf_tp_event_match(event, &data, regs))
5434 perf_swevent_event(event, count, &data, regs); 5432 perf_swevent_event(event, count, &data, regs);
5435 } 5433 }
@@ -5965,13 +5963,9 @@ int perf_pmu_register(struct pmu *pmu, char *name, int type)
5965 pmu->name = name; 5963 pmu->name = name;
5966 5964
5967 if (type < 0) { 5965 if (type < 0) {
5968 int err = idr_pre_get(&pmu_idr, GFP_KERNEL); 5966 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
5969 if (!err) 5967 if (type < 0) {
5970 goto free_pdc; 5968 ret = type;
5971
5972 err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
5973 if (err) {
5974 ret = err;
5975 goto free_pdc; 5969 goto free_pdc;
5976 } 5970 }
5977 } 5971 }
diff --git a/kernel/exit.c b/kernel/exit.c
index 7dd20408707c..51e485ca9935 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -20,6 +20,7 @@
20#include <linux/tsacct_kern.h> 20#include <linux/tsacct_kern.h>
21#include <linux/file.h> 21#include <linux/file.h>
22#include <linux/fdtable.h> 22#include <linux/fdtable.h>
23#include <linux/freezer.h>
23#include <linux/binfmts.h> 24#include <linux/binfmts.h>
24#include <linux/nsproxy.h> 25#include <linux/nsproxy.h>
25#include <linux/pid_namespace.h> 26#include <linux/pid_namespace.h>
@@ -31,7 +32,6 @@
31#include <linux/mempolicy.h> 32#include <linux/mempolicy.h>
32#include <linux/taskstats_kern.h> 33#include <linux/taskstats_kern.h>
33#include <linux/delayacct.h> 34#include <linux/delayacct.h>
34#include <linux/freezer.h>
35#include <linux/cgroup.h> 35#include <linux/cgroup.h>
36#include <linux/syscalls.h> 36#include <linux/syscalls.h>
37#include <linux/signal.h> 37#include <linux/signal.h>
@@ -485,7 +485,7 @@ static void exit_mm(struct task_struct * tsk)
485 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 485 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
486 if (!self.task) /* see coredump_finish() */ 486 if (!self.task) /* see coredump_finish() */
487 break; 487 break;
488 schedule(); 488 freezable_schedule();
489 } 489 }
490 __set_task_state(tsk, TASK_RUNNING); 490 __set_task_state(tsk, TASK_RUNNING);
491 down_read(&mm->mmap_sem); 491 down_read(&mm->mmap_sem);
@@ -835,7 +835,7 @@ void do_exit(long code)
835 /* 835 /*
836 * Make sure we are holding no locks: 836 * Make sure we are holding no locks:
837 */ 837 */
838 debug_check_no_locks_held(tsk); 838 debug_check_no_locks_held();
839 /* 839 /*
840 * We can do this unlocked here. The futex code uses this flag 840 * We can do this unlocked here. The futex code uses this flag
841 * just to verify whether the pi state cleanup has been done 841 * just to verify whether the pi state cleanup has been done
diff --git a/kernel/fork.c b/kernel/fork.c
index 8f62b2a0f120..8d932b1c9056 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1861,10 +1861,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1861 exit_sem(current); 1861 exit_sem(current);
1862 } 1862 }
1863 1863
1864 if (new_nsproxy) { 1864 if (new_nsproxy)
1865 switch_task_namespaces(current, new_nsproxy); 1865 switch_task_namespaces(current, new_nsproxy);
1866 new_nsproxy = NULL;
1867 }
1868 1866
1869 task_lock(current); 1867 task_lock(current);
1870 1868
@@ -1894,9 +1892,6 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1894 } 1892 }
1895 } 1893 }
1896 1894
1897 if (new_nsproxy)
1898 put_nsproxy(new_nsproxy);
1899
1900bad_unshare_cleanup_cred: 1895bad_unshare_cleanup_cred:
1901 if (new_cred) 1896 if (new_cred)
1902 put_cred(new_cred); 1897 put_cred(new_cred);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 2436ffcec91f..bddd3d7a74b6 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -229,6 +229,8 @@ out:
229 229
230} 230}
231 231
232static void kimage_free_page_list(struct list_head *list);
233
232static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry, 234static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
233 unsigned long nr_segments, 235 unsigned long nr_segments,
234 struct kexec_segment __user *segments) 236 struct kexec_segment __user *segments)
@@ -242,8 +244,6 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
242 if (result) 244 if (result)
243 goto out; 245 goto out;
244 246
245 *rimage = image;
246
247 /* 247 /*
248 * Find a location for the control code buffer, and add it 248 * Find a location for the control code buffer, and add it
249 * the vector of segments so that it's pages will also be 249 * the vector of segments so that it's pages will also be
@@ -254,22 +254,22 @@ static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
254 get_order(KEXEC_CONTROL_PAGE_SIZE)); 254 get_order(KEXEC_CONTROL_PAGE_SIZE));
255 if (!image->control_code_page) { 255 if (!image->control_code_page) {
256 printk(KERN_ERR "Could not allocate control_code_buffer\n"); 256 printk(KERN_ERR "Could not allocate control_code_buffer\n");
257 goto out; 257 goto out_free;
258 } 258 }
259 259
260 image->swap_page = kimage_alloc_control_pages(image, 0); 260 image->swap_page = kimage_alloc_control_pages(image, 0);
261 if (!image->swap_page) { 261 if (!image->swap_page) {
262 printk(KERN_ERR "Could not allocate swap buffer\n"); 262 printk(KERN_ERR "Could not allocate swap buffer\n");
263 goto out; 263 goto out_free;
264 } 264 }
265 265
266 result = 0; 266 *rimage = image;
267 out: 267 return 0;
268 if (result == 0)
269 *rimage = image;
270 else
271 kfree(image);
272 268
269out_free:
270 kimage_free_page_list(&image->control_pages);
271 kfree(image);
272out:
273 return result; 273 return result;
274} 274}
275 275
@@ -316,7 +316,7 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
316 mend = mstart + image->segment[i].memsz - 1; 316 mend = mstart + image->segment[i].memsz - 1;
317 /* Ensure we are within the crash kernel limits */ 317 /* Ensure we are within the crash kernel limits */
318 if ((mstart < crashk_res.start) || (mend > crashk_res.end)) 318 if ((mstart < crashk_res.start) || (mend > crashk_res.end))
319 goto out; 319 goto out_free;
320 } 320 }
321 321
322 /* 322 /*
@@ -329,16 +329,15 @@ static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
329 get_order(KEXEC_CONTROL_PAGE_SIZE)); 329 get_order(KEXEC_CONTROL_PAGE_SIZE));
330 if (!image->control_code_page) { 330 if (!image->control_code_page) {
331 printk(KERN_ERR "Could not allocate control_code_buffer\n"); 331 printk(KERN_ERR "Could not allocate control_code_buffer\n");
332 goto out; 332 goto out_free;
333 } 333 }
334 334
335 result = 0; 335 *rimage = image;
336out: 336 return 0;
337 if (result == 0)
338 *rimage = image;
339 else
340 kfree(image);
341 337
338out_free:
339 kfree(image);
340out:
342 return result; 341 return result;
343} 342}
344 343
@@ -503,8 +502,6 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
503 502
504 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT) 503 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
505 break; 504 break;
506 if (hole_end > crashk_res.end)
507 break;
508 /* See if I overlap any of the segments */ 505 /* See if I overlap any of the segments */
509 for (i = 0; i < image->nr_segments; i++) { 506 for (i = 0; i < image->nr_segments; i++) {
510 unsigned long mstart, mend; 507 unsigned long mstart, mend;
@@ -1514,6 +1511,8 @@ static int __init crash_save_vmcoreinfo_init(void)
1514 VMCOREINFO_OFFSET(page, _count); 1511 VMCOREINFO_OFFSET(page, _count);
1515 VMCOREINFO_OFFSET(page, mapping); 1512 VMCOREINFO_OFFSET(page, mapping);
1516 VMCOREINFO_OFFSET(page, lru); 1513 VMCOREINFO_OFFSET(page, lru);
1514 VMCOREINFO_OFFSET(page, _mapcount);
1515 VMCOREINFO_OFFSET(page, private);
1517 VMCOREINFO_OFFSET(pglist_data, node_zones); 1516 VMCOREINFO_OFFSET(pglist_data, node_zones);
1518 VMCOREINFO_OFFSET(pglist_data, nr_zones); 1517 VMCOREINFO_OFFSET(pglist_data, nr_zones);
1519#ifdef CONFIG_FLAT_NODE_MEM_MAP 1518#ifdef CONFIG_FLAT_NODE_MEM_MAP
@@ -1536,6 +1535,11 @@ static int __init crash_save_vmcoreinfo_init(void)
1536 VMCOREINFO_NUMBER(PG_lru); 1535 VMCOREINFO_NUMBER(PG_lru);
1537 VMCOREINFO_NUMBER(PG_private); 1536 VMCOREINFO_NUMBER(PG_private);
1538 VMCOREINFO_NUMBER(PG_swapcache); 1537 VMCOREINFO_NUMBER(PG_swapcache);
1538 VMCOREINFO_NUMBER(PG_slab);
1539#ifdef CONFIG_MEMORY_FAILURE
1540 VMCOREINFO_NUMBER(PG_hwpoison);
1541#endif
1542 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
1539 1543
1540 arch_crash_save_vmcoreinfo(); 1544 arch_crash_save_vmcoreinfo();
1541 update_vmcoreinfo_note(); 1545 update_vmcoreinfo_note();
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
deleted file mode 100644
index 59dcf5b81d24..000000000000
--- a/kernel/kfifo.c
+++ /dev/null
@@ -1,609 +0,0 @@
1/*
2 * A generic kernel FIFO implementation
3 *
4 * Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 *
20 */
21
22#include <linux/kernel.h>
23#include <linux/export.h>
24#include <linux/slab.h>
25#include <linux/err.h>
26#include <linux/log2.h>
27#include <linux/uaccess.h>
28#include <linux/kfifo.h>
29
30/*
31 * internal helper to calculate the unused elements in a fifo
32 */
33static inline unsigned int kfifo_unused(struct __kfifo *fifo)
34{
35 return (fifo->mask + 1) - (fifo->in - fifo->out);
36}
37
38int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
39 size_t esize, gfp_t gfp_mask)
40{
41 /*
42 * round down to the next power of 2, since our 'let the indices
43 * wrap' technique works only in this case.
44 */
45 if (!is_power_of_2(size))
46 size = rounddown_pow_of_two(size);
47
48 fifo->in = 0;
49 fifo->out = 0;
50 fifo->esize = esize;
51
52 if (size < 2) {
53 fifo->data = NULL;
54 fifo->mask = 0;
55 return -EINVAL;
56 }
57
58 fifo->data = kmalloc(size * esize, gfp_mask);
59
60 if (!fifo->data) {
61 fifo->mask = 0;
62 return -ENOMEM;
63 }
64 fifo->mask = size - 1;
65
66 return 0;
67}
68EXPORT_SYMBOL(__kfifo_alloc);
69
70void __kfifo_free(struct __kfifo *fifo)
71{
72 kfree(fifo->data);
73 fifo->in = 0;
74 fifo->out = 0;
75 fifo->esize = 0;
76 fifo->data = NULL;
77 fifo->mask = 0;
78}
79EXPORT_SYMBOL(__kfifo_free);
80
81int __kfifo_init(struct __kfifo *fifo, void *buffer,
82 unsigned int size, size_t esize)
83{
84 size /= esize;
85
86 if (!is_power_of_2(size))
87 size = rounddown_pow_of_two(size);
88
89 fifo->in = 0;
90 fifo->out = 0;
91 fifo->esize = esize;
92 fifo->data = buffer;
93
94 if (size < 2) {
95 fifo->mask = 0;
96 return -EINVAL;
97 }
98 fifo->mask = size - 1;
99
100 return 0;
101}
102EXPORT_SYMBOL(__kfifo_init);
103
104static void kfifo_copy_in(struct __kfifo *fifo, const void *src,
105 unsigned int len, unsigned int off)
106{
107 unsigned int size = fifo->mask + 1;
108 unsigned int esize = fifo->esize;
109 unsigned int l;
110
111 off &= fifo->mask;
112 if (esize != 1) {
113 off *= esize;
114 size *= esize;
115 len *= esize;
116 }
117 l = min(len, size - off);
118
119 memcpy(fifo->data + off, src, l);
120 memcpy(fifo->data, src + l, len - l);
121 /*
122 * make sure that the data in the fifo is up to date before
123 * incrementing the fifo->in index counter
124 */
125 smp_wmb();
126}
127
128unsigned int __kfifo_in(struct __kfifo *fifo,
129 const void *buf, unsigned int len)
130{
131 unsigned int l;
132
133 l = kfifo_unused(fifo);
134 if (len > l)
135 len = l;
136
137 kfifo_copy_in(fifo, buf, len, fifo->in);
138 fifo->in += len;
139 return len;
140}
141EXPORT_SYMBOL(__kfifo_in);
142
143static void kfifo_copy_out(struct __kfifo *fifo, void *dst,
144 unsigned int len, unsigned int off)
145{
146 unsigned int size = fifo->mask + 1;
147 unsigned int esize = fifo->esize;
148 unsigned int l;
149
150 off &= fifo->mask;
151 if (esize != 1) {
152 off *= esize;
153 size *= esize;
154 len *= esize;
155 }
156 l = min(len, size - off);
157
158 memcpy(dst, fifo->data + off, l);
159 memcpy(dst + l, fifo->data, len - l);
160 /*
161 * make sure that the data is copied before
162 * incrementing the fifo->out index counter
163 */
164 smp_wmb();
165}
166
167unsigned int __kfifo_out_peek(struct __kfifo *fifo,
168 void *buf, unsigned int len)
169{
170 unsigned int l;
171
172 l = fifo->in - fifo->out;
173 if (len > l)
174 len = l;
175
176 kfifo_copy_out(fifo, buf, len, fifo->out);
177 return len;
178}
179EXPORT_SYMBOL(__kfifo_out_peek);
180
181unsigned int __kfifo_out(struct __kfifo *fifo,
182 void *buf, unsigned int len)
183{
184 len = __kfifo_out_peek(fifo, buf, len);
185 fifo->out += len;
186 return len;
187}
188EXPORT_SYMBOL(__kfifo_out);
189
190static unsigned long kfifo_copy_from_user(struct __kfifo *fifo,
191 const void __user *from, unsigned int len, unsigned int off,
192 unsigned int *copied)
193{
194 unsigned int size = fifo->mask + 1;
195 unsigned int esize = fifo->esize;
196 unsigned int l;
197 unsigned long ret;
198
199 off &= fifo->mask;
200 if (esize != 1) {
201 off *= esize;
202 size *= esize;
203 len *= esize;
204 }
205 l = min(len, size - off);
206
207 ret = copy_from_user(fifo->data + off, from, l);
208 if (unlikely(ret))
209 ret = DIV_ROUND_UP(ret + len - l, esize);
210 else {
211 ret = copy_from_user(fifo->data, from + l, len - l);
212 if (unlikely(ret))
213 ret = DIV_ROUND_UP(ret, esize);
214 }
215 /*
216 * make sure that the data in the fifo is up to date before
217 * incrementing the fifo->in index counter
218 */
219 smp_wmb();
220 *copied = len - ret;
221 /* return the number of elements which are not copied */
222 return ret;
223}
224
225int __kfifo_from_user(struct __kfifo *fifo, const void __user *from,
226 unsigned long len, unsigned int *copied)
227{
228 unsigned int l;
229 unsigned long ret;
230 unsigned int esize = fifo->esize;
231 int err;
232
233 if (esize != 1)
234 len /= esize;
235
236 l = kfifo_unused(fifo);
237 if (len > l)
238 len = l;
239
240 ret = kfifo_copy_from_user(fifo, from, len, fifo->in, copied);
241 if (unlikely(ret)) {
242 len -= ret;
243 err = -EFAULT;
244 } else
245 err = 0;
246 fifo->in += len;
247 return err;
248}
249EXPORT_SYMBOL(__kfifo_from_user);
250
251static unsigned long kfifo_copy_to_user(struct __kfifo *fifo, void __user *to,
252 unsigned int len, unsigned int off, unsigned int *copied)
253{
254 unsigned int l;
255 unsigned long ret;
256 unsigned int size = fifo->mask + 1;
257 unsigned int esize = fifo->esize;
258
259 off &= fifo->mask;
260 if (esize != 1) {
261 off *= esize;
262 size *= esize;
263 len *= esize;
264 }
265 l = min(len, size - off);
266
267 ret = copy_to_user(to, fifo->data + off, l);
268 if (unlikely(ret))
269 ret = DIV_ROUND_UP(ret + len - l, esize);
270 else {
271 ret = copy_to_user(to + l, fifo->data, len - l);
272 if (unlikely(ret))
273 ret = DIV_ROUND_UP(ret, esize);
274 }
275 /*
276 * make sure that the data is copied before
277 * incrementing the fifo->out index counter
278 */
279 smp_wmb();
280 *copied = len - ret;
281 /* return the number of elements which are not copied */
282 return ret;
283}
284
285int __kfifo_to_user(struct __kfifo *fifo, void __user *to,
286 unsigned long len, unsigned int *copied)
287{
288 unsigned int l;
289 unsigned long ret;
290 unsigned int esize = fifo->esize;
291 int err;
292
293 if (esize != 1)
294 len /= esize;
295
296 l = fifo->in - fifo->out;
297 if (len > l)
298 len = l;
299 ret = kfifo_copy_to_user(fifo, to, len, fifo->out, copied);
300 if (unlikely(ret)) {
301 len -= ret;
302 err = -EFAULT;
303 } else
304 err = 0;
305 fifo->out += len;
306 return err;
307}
308EXPORT_SYMBOL(__kfifo_to_user);
309
310static int setup_sgl_buf(struct scatterlist *sgl, void *buf,
311 int nents, unsigned int len)
312{
313 int n;
314 unsigned int l;
315 unsigned int off;
316 struct page *page;
317
318 if (!nents)
319 return 0;
320
321 if (!len)
322 return 0;
323
324 n = 0;
325 page = virt_to_page(buf);
326 off = offset_in_page(buf);
327 l = 0;
328
329 while (len >= l + PAGE_SIZE - off) {
330 struct page *npage;
331
332 l += PAGE_SIZE;
333 buf += PAGE_SIZE;
334 npage = virt_to_page(buf);
335 if (page_to_phys(page) != page_to_phys(npage) - l) {
336 sg_set_page(sgl, page, l - off, off);
337 sgl = sg_next(sgl);
338 if (++n == nents || sgl == NULL)
339 return n;
340 page = npage;
341 len -= l - off;
342 l = off = 0;
343 }
344 }
345 sg_set_page(sgl, page, len, off);
346 return n + 1;
347}
348
349static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl,
350 int nents, unsigned int len, unsigned int off)
351{
352 unsigned int size = fifo->mask + 1;
353 unsigned int esize = fifo->esize;
354 unsigned int l;
355 unsigned int n;
356
357 off &= fifo->mask;
358 if (esize != 1) {
359 off *= esize;
360 size *= esize;
361 len *= esize;
362 }
363 l = min(len, size - off);
364
365 n = setup_sgl_buf(sgl, fifo->data + off, nents, l);
366 n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l);
367
368 return n;
369}
370
371unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo,
372 struct scatterlist *sgl, int nents, unsigned int len)
373{
374 unsigned int l;
375
376 l = kfifo_unused(fifo);
377 if (len > l)
378 len = l;
379
380 return setup_sgl(fifo, sgl, nents, len, fifo->in);
381}
382EXPORT_SYMBOL(__kfifo_dma_in_prepare);
383
384unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo,
385 struct scatterlist *sgl, int nents, unsigned int len)
386{
387 unsigned int l;
388
389 l = fifo->in - fifo->out;
390 if (len > l)
391 len = l;
392
393 return setup_sgl(fifo, sgl, nents, len, fifo->out);
394}
395EXPORT_SYMBOL(__kfifo_dma_out_prepare);
396
397unsigned int __kfifo_max_r(unsigned int len, size_t recsize)
398{
399 unsigned int max = (1 << (recsize << 3)) - 1;
400
401 if (len > max)
402 return max;
403 return len;
404}
405EXPORT_SYMBOL(__kfifo_max_r);
406
407#define __KFIFO_PEEK(data, out, mask) \
408 ((data)[(out) & (mask)])
409/*
410 * __kfifo_peek_n internal helper function for determinate the length of
411 * the next record in the fifo
412 */
413static unsigned int __kfifo_peek_n(struct __kfifo *fifo, size_t recsize)
414{
415 unsigned int l;
416 unsigned int mask = fifo->mask;
417 unsigned char *data = fifo->data;
418
419 l = __KFIFO_PEEK(data, fifo->out, mask);
420
421 if (--recsize)
422 l |= __KFIFO_PEEK(data, fifo->out + 1, mask) << 8;
423
424 return l;
425}
426
427#define __KFIFO_POKE(data, in, mask, val) \
428 ( \
429 (data)[(in) & (mask)] = (unsigned char)(val) \
430 )
431
432/*
433 * __kfifo_poke_n internal helper function for storeing the length of
434 * the record into the fifo
435 */
436static void __kfifo_poke_n(struct __kfifo *fifo, unsigned int n, size_t recsize)
437{
438 unsigned int mask = fifo->mask;
439 unsigned char *data = fifo->data;
440
441 __KFIFO_POKE(data, fifo->in, mask, n);
442
443 if (recsize > 1)
444 __KFIFO_POKE(data, fifo->in + 1, mask, n >> 8);
445}
446
447unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize)
448{
449 return __kfifo_peek_n(fifo, recsize);
450}
451EXPORT_SYMBOL(__kfifo_len_r);
452
453unsigned int __kfifo_in_r(struct __kfifo *fifo, const void *buf,
454 unsigned int len, size_t recsize)
455{
456 if (len + recsize > kfifo_unused(fifo))
457 return 0;
458
459 __kfifo_poke_n(fifo, len, recsize);
460
461 kfifo_copy_in(fifo, buf, len, fifo->in + recsize);
462 fifo->in += len + recsize;
463 return len;
464}
465EXPORT_SYMBOL(__kfifo_in_r);
466
467static unsigned int kfifo_out_copy_r(struct __kfifo *fifo,
468 void *buf, unsigned int len, size_t recsize, unsigned int *n)
469{
470 *n = __kfifo_peek_n(fifo, recsize);
471
472 if (len > *n)
473 len = *n;
474
475 kfifo_copy_out(fifo, buf, len, fifo->out + recsize);
476 return len;
477}
478
479unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, void *buf,
480 unsigned int len, size_t recsize)
481{
482 unsigned int n;
483
484 if (fifo->in == fifo->out)
485 return 0;
486
487 return kfifo_out_copy_r(fifo, buf, len, recsize, &n);
488}
489EXPORT_SYMBOL(__kfifo_out_peek_r);
490
491unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf,
492 unsigned int len, size_t recsize)
493{
494 unsigned int n;
495
496 if (fifo->in == fifo->out)
497 return 0;
498
499 len = kfifo_out_copy_r(fifo, buf, len, recsize, &n);
500 fifo->out += n + recsize;
501 return len;
502}
503EXPORT_SYMBOL(__kfifo_out_r);
504
505void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize)
506{
507 unsigned int n;
508
509 n = __kfifo_peek_n(fifo, recsize);
510 fifo->out += n + recsize;
511}
512EXPORT_SYMBOL(__kfifo_skip_r);
513
514int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from,
515 unsigned long len, unsigned int *copied, size_t recsize)
516{
517 unsigned long ret;
518
519 len = __kfifo_max_r(len, recsize);
520
521 if (len + recsize > kfifo_unused(fifo)) {
522 *copied = 0;
523 return 0;
524 }
525
526 __kfifo_poke_n(fifo, len, recsize);
527
528 ret = kfifo_copy_from_user(fifo, from, len, fifo->in + recsize, copied);
529 if (unlikely(ret)) {
530 *copied = 0;
531 return -EFAULT;
532 }
533 fifo->in += len + recsize;
534 return 0;
535}
536EXPORT_SYMBOL(__kfifo_from_user_r);
537
538int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to,
539 unsigned long len, unsigned int *copied, size_t recsize)
540{
541 unsigned long ret;
542 unsigned int n;
543
544 if (fifo->in == fifo->out) {
545 *copied = 0;
546 return 0;
547 }
548
549 n = __kfifo_peek_n(fifo, recsize);
550 if (len > n)
551 len = n;
552
553 ret = kfifo_copy_to_user(fifo, to, len, fifo->out + recsize, copied);
554 if (unlikely(ret)) {
555 *copied = 0;
556 return -EFAULT;
557 }
558 fifo->out += n + recsize;
559 return 0;
560}
561EXPORT_SYMBOL(__kfifo_to_user_r);
562
563unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo,
564 struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
565{
566 if (!nents)
567 BUG();
568
569 len = __kfifo_max_r(len, recsize);
570
571 if (len + recsize > kfifo_unused(fifo))
572 return 0;
573
574 return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize);
575}
576EXPORT_SYMBOL(__kfifo_dma_in_prepare_r);
577
578void __kfifo_dma_in_finish_r(struct __kfifo *fifo,
579 unsigned int len, size_t recsize)
580{
581 len = __kfifo_max_r(len, recsize);
582 __kfifo_poke_n(fifo, len, recsize);
583 fifo->in += len + recsize;
584}
585EXPORT_SYMBOL(__kfifo_dma_in_finish_r);
586
587unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo,
588 struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
589{
590 if (!nents)
591 BUG();
592
593 len = __kfifo_max_r(len, recsize);
594
595 if (len + recsize > fifo->in - fifo->out)
596 return 0;
597
598 return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize);
599}
600EXPORT_SYMBOL(__kfifo_dma_out_prepare_r);
601
602void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize)
603{
604 unsigned int len;
605
606 len = __kfifo_peek_n(fifo, recsize);
607 fifo->out += len + recsize;
608}
609EXPORT_SYMBOL(__kfifo_dma_out_finish_r);
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 550294d58a02..e35be53f6613 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -334,11 +334,10 @@ static inline void reset_kprobe_instance(void)
334struct kprobe __kprobes *get_kprobe(void *addr) 334struct kprobe __kprobes *get_kprobe(void *addr)
335{ 335{
336 struct hlist_head *head; 336 struct hlist_head *head;
337 struct hlist_node *node;
338 struct kprobe *p; 337 struct kprobe *p;
339 338
340 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; 339 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
341 hlist_for_each_entry_rcu(p, node, head, hlist) { 340 hlist_for_each_entry_rcu(p, head, hlist) {
342 if (p->addr == addr) 341 if (p->addr == addr)
343 return p; 342 return p;
344 } 343 }
@@ -799,7 +798,6 @@ out:
799static void __kprobes optimize_all_kprobes(void) 798static void __kprobes optimize_all_kprobes(void)
800{ 799{
801 struct hlist_head *head; 800 struct hlist_head *head;
802 struct hlist_node *node;
803 struct kprobe *p; 801 struct kprobe *p;
804 unsigned int i; 802 unsigned int i;
805 803
@@ -810,7 +808,7 @@ static void __kprobes optimize_all_kprobes(void)
810 kprobes_allow_optimization = true; 808 kprobes_allow_optimization = true;
811 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 809 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
812 head = &kprobe_table[i]; 810 head = &kprobe_table[i];
813 hlist_for_each_entry_rcu(p, node, head, hlist) 811 hlist_for_each_entry_rcu(p, head, hlist)
814 if (!kprobe_disabled(p)) 812 if (!kprobe_disabled(p))
815 optimize_kprobe(p); 813 optimize_kprobe(p);
816 } 814 }
@@ -821,7 +819,6 @@ static void __kprobes optimize_all_kprobes(void)
821static void __kprobes unoptimize_all_kprobes(void) 819static void __kprobes unoptimize_all_kprobes(void)
822{ 820{
823 struct hlist_head *head; 821 struct hlist_head *head;
824 struct hlist_node *node;
825 struct kprobe *p; 822 struct kprobe *p;
826 unsigned int i; 823 unsigned int i;
827 824
@@ -832,7 +829,7 @@ static void __kprobes unoptimize_all_kprobes(void)
832 kprobes_allow_optimization = false; 829 kprobes_allow_optimization = false;
833 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 830 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
834 head = &kprobe_table[i]; 831 head = &kprobe_table[i];
835 hlist_for_each_entry_rcu(p, node, head, hlist) { 832 hlist_for_each_entry_rcu(p, head, hlist) {
836 if (!kprobe_disabled(p)) 833 if (!kprobe_disabled(p))
837 unoptimize_kprobe(p, false); 834 unoptimize_kprobe(p, false);
838 } 835 }
@@ -1148,7 +1145,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
1148{ 1145{
1149 struct kretprobe_instance *ri; 1146 struct kretprobe_instance *ri;
1150 struct hlist_head *head, empty_rp; 1147 struct hlist_head *head, empty_rp;
1151 struct hlist_node *node, *tmp; 1148 struct hlist_node *tmp;
1152 unsigned long hash, flags = 0; 1149 unsigned long hash, flags = 0;
1153 1150
1154 if (unlikely(!kprobes_initialized)) 1151 if (unlikely(!kprobes_initialized))
@@ -1159,12 +1156,12 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
1159 hash = hash_ptr(tk, KPROBE_HASH_BITS); 1156 hash = hash_ptr(tk, KPROBE_HASH_BITS);
1160 head = &kretprobe_inst_table[hash]; 1157 head = &kretprobe_inst_table[hash];
1161 kretprobe_table_lock(hash, &flags); 1158 kretprobe_table_lock(hash, &flags);
1162 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 1159 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
1163 if (ri->task == tk) 1160 if (ri->task == tk)
1164 recycle_rp_inst(ri, &empty_rp); 1161 recycle_rp_inst(ri, &empty_rp);
1165 } 1162 }
1166 kretprobe_table_unlock(hash, &flags); 1163 kretprobe_table_unlock(hash, &flags);
1167 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 1164 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
1168 hlist_del(&ri->hlist); 1165 hlist_del(&ri->hlist);
1169 kfree(ri); 1166 kfree(ri);
1170 } 1167 }
@@ -1173,9 +1170,9 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
1173static inline void free_rp_inst(struct kretprobe *rp) 1170static inline void free_rp_inst(struct kretprobe *rp)
1174{ 1171{
1175 struct kretprobe_instance *ri; 1172 struct kretprobe_instance *ri;
1176 struct hlist_node *pos, *next; 1173 struct hlist_node *next;
1177 1174
1178 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) { 1175 hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
1179 hlist_del(&ri->hlist); 1176 hlist_del(&ri->hlist);
1180 kfree(ri); 1177 kfree(ri);
1181 } 1178 }
@@ -1185,14 +1182,14 @@ static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
1185{ 1182{
1186 unsigned long flags, hash; 1183 unsigned long flags, hash;
1187 struct kretprobe_instance *ri; 1184 struct kretprobe_instance *ri;
1188 struct hlist_node *pos, *next; 1185 struct hlist_node *next;
1189 struct hlist_head *head; 1186 struct hlist_head *head;
1190 1187
1191 /* No race here */ 1188 /* No race here */
1192 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { 1189 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
1193 kretprobe_table_lock(hash, &flags); 1190 kretprobe_table_lock(hash, &flags);
1194 head = &kretprobe_inst_table[hash]; 1191 head = &kretprobe_inst_table[hash];
1195 hlist_for_each_entry_safe(ri, pos, next, head, hlist) { 1192 hlist_for_each_entry_safe(ri, next, head, hlist) {
1196 if (ri->rp == rp) 1193 if (ri->rp == rp)
1197 ri->rp = NULL; 1194 ri->rp = NULL;
1198 } 1195 }
@@ -2028,7 +2025,6 @@ static int __kprobes kprobes_module_callback(struct notifier_block *nb,
2028{ 2025{
2029 struct module *mod = data; 2026 struct module *mod = data;
2030 struct hlist_head *head; 2027 struct hlist_head *head;
2031 struct hlist_node *node;
2032 struct kprobe *p; 2028 struct kprobe *p;
2033 unsigned int i; 2029 unsigned int i;
2034 int checkcore = (val == MODULE_STATE_GOING); 2030 int checkcore = (val == MODULE_STATE_GOING);
@@ -2045,7 +2041,7 @@ static int __kprobes kprobes_module_callback(struct notifier_block *nb,
2045 mutex_lock(&kprobe_mutex); 2041 mutex_lock(&kprobe_mutex);
2046 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2042 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2047 head = &kprobe_table[i]; 2043 head = &kprobe_table[i];
2048 hlist_for_each_entry_rcu(p, node, head, hlist) 2044 hlist_for_each_entry_rcu(p, head, hlist)
2049 if (within_module_init((unsigned long)p->addr, mod) || 2045 if (within_module_init((unsigned long)p->addr, mod) ||
2050 (checkcore && 2046 (checkcore &&
2051 within_module_core((unsigned long)p->addr, mod))) { 2047 within_module_core((unsigned long)p->addr, mod))) {
@@ -2192,7 +2188,6 @@ static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
2192static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) 2188static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
2193{ 2189{
2194 struct hlist_head *head; 2190 struct hlist_head *head;
2195 struct hlist_node *node;
2196 struct kprobe *p, *kp; 2191 struct kprobe *p, *kp;
2197 const char *sym = NULL; 2192 const char *sym = NULL;
2198 unsigned int i = *(loff_t *) v; 2193 unsigned int i = *(loff_t *) v;
@@ -2201,7 +2196,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
2201 2196
2202 head = &kprobe_table[i]; 2197 head = &kprobe_table[i];
2203 preempt_disable(); 2198 preempt_disable();
2204 hlist_for_each_entry_rcu(p, node, head, hlist) { 2199 hlist_for_each_entry_rcu(p, head, hlist) {
2205 sym = kallsyms_lookup((unsigned long)p->addr, NULL, 2200 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2206 &offset, &modname, namebuf); 2201 &offset, &modname, namebuf);
2207 if (kprobe_aggrprobe(p)) { 2202 if (kprobe_aggrprobe(p)) {
@@ -2236,7 +2231,6 @@ static const struct file_operations debugfs_kprobes_operations = {
2236static void __kprobes arm_all_kprobes(void) 2231static void __kprobes arm_all_kprobes(void)
2237{ 2232{
2238 struct hlist_head *head; 2233 struct hlist_head *head;
2239 struct hlist_node *node;
2240 struct kprobe *p; 2234 struct kprobe *p;
2241 unsigned int i; 2235 unsigned int i;
2242 2236
@@ -2249,7 +2243,7 @@ static void __kprobes arm_all_kprobes(void)
2249 /* Arming kprobes doesn't optimize kprobe itself */ 2243 /* Arming kprobes doesn't optimize kprobe itself */
2250 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2244 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2251 head = &kprobe_table[i]; 2245 head = &kprobe_table[i];
2252 hlist_for_each_entry_rcu(p, node, head, hlist) 2246 hlist_for_each_entry_rcu(p, head, hlist)
2253 if (!kprobe_disabled(p)) 2247 if (!kprobe_disabled(p))
2254 arm_kprobe(p); 2248 arm_kprobe(p);
2255 } 2249 }
@@ -2265,7 +2259,6 @@ already_enabled:
2265static void __kprobes disarm_all_kprobes(void) 2259static void __kprobes disarm_all_kprobes(void)
2266{ 2260{
2267 struct hlist_head *head; 2261 struct hlist_head *head;
2268 struct hlist_node *node;
2269 struct kprobe *p; 2262 struct kprobe *p;
2270 unsigned int i; 2263 unsigned int i;
2271 2264
@@ -2282,7 +2275,7 @@ static void __kprobes disarm_all_kprobes(void)
2282 2275
2283 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2276 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2284 head = &kprobe_table[i]; 2277 head = &kprobe_table[i];
2285 hlist_for_each_entry_rcu(p, node, head, hlist) { 2278 hlist_for_each_entry_rcu(p, head, hlist) {
2286 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) 2279 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
2287 disarm_kprobe(p, false); 2280 disarm_kprobe(p, false);
2288 } 2281 }
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 8a0efac4f99d..259db207b5d9 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -4088,7 +4088,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
4088} 4088}
4089EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); 4089EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
4090 4090
4091static void print_held_locks_bug(struct task_struct *curr) 4091static void print_held_locks_bug(void)
4092{ 4092{
4093 if (!debug_locks_off()) 4093 if (!debug_locks_off())
4094 return; 4094 return;
@@ -4097,22 +4097,21 @@ static void print_held_locks_bug(struct task_struct *curr)
4097 4097
4098 printk("\n"); 4098 printk("\n");
4099 printk("=====================================\n"); 4099 printk("=====================================\n");
4100 printk("[ BUG: lock held at task exit time! ]\n"); 4100 printk("[ BUG: %s/%d still has locks held! ]\n",
4101 current->comm, task_pid_nr(current));
4101 print_kernel_ident(); 4102 print_kernel_ident();
4102 printk("-------------------------------------\n"); 4103 printk("-------------------------------------\n");
4103 printk("%s/%d is exiting with locks still held!\n", 4104 lockdep_print_held_locks(current);
4104 curr->comm, task_pid_nr(curr));
4105 lockdep_print_held_locks(curr);
4106
4107 printk("\nstack backtrace:\n"); 4105 printk("\nstack backtrace:\n");
4108 dump_stack(); 4106 dump_stack();
4109} 4107}
4110 4108
4111void debug_check_no_locks_held(struct task_struct *task) 4109void debug_check_no_locks_held(void)
4112{ 4110{
4113 if (unlikely(task->lockdep_depth > 0)) 4111 if (unlikely(current->lockdep_depth > 0))
4114 print_held_locks_bug(task); 4112 print_held_locks_bug();
4115} 4113}
4114EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
4116 4115
4117void debug_show_all_locks(void) 4116void debug_show_all_locks(void)
4118{ 4117{
diff --git a/kernel/pid.c b/kernel/pid.c
index f2c6a6825098..047dc6264638 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -350,10 +350,9 @@ void disable_pid_allocation(struct pid_namespace *ns)
350 350
351struct pid *find_pid_ns(int nr, struct pid_namespace *ns) 351struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
352{ 352{
353 struct hlist_node *elem;
354 struct upid *pnr; 353 struct upid *pnr;
355 354
356 hlist_for_each_entry_rcu(pnr, elem, 355 hlist_for_each_entry_rcu(pnr,
357 &pid_hash[pid_hashfn(nr, ns)], pid_chain) 356 &pid_hash[pid_hashfn(nr, ns)], pid_chain)
358 if (pnr->nr == nr && pnr->ns == ns) 357 if (pnr->nr == nr && pnr->ns == ns)
359 return container_of(pnr, struct pid, 358 return container_of(pnr, struct pid,
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 7edfe4b901e7..6edbb2c55c22 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -552,24 +552,22 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
552 return -EAGAIN; 552 return -EAGAIN;
553 553
554 spin_lock_init(&new_timer->it_lock); 554 spin_lock_init(&new_timer->it_lock);
555 retry: 555
556 if (unlikely(!idr_pre_get(&posix_timers_id, GFP_KERNEL))) { 556 idr_preload(GFP_KERNEL);
557 error = -EAGAIN;
558 goto out;
559 }
560 spin_lock_irq(&idr_lock); 557 spin_lock_irq(&idr_lock);
561 error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id); 558 error = idr_alloc(&posix_timers_id, new_timer, 0, 0, GFP_NOWAIT);
562 spin_unlock_irq(&idr_lock); 559 spin_unlock_irq(&idr_lock);
563 if (error) { 560 idr_preload_end();
564 if (error == -EAGAIN) 561 if (error < 0) {
565 goto retry;
566 /* 562 /*
567 * Weird looking, but we return EAGAIN if the IDR is 563 * Weird looking, but we return EAGAIN if the IDR is
568 * full (proper POSIX return value for this) 564 * full (proper POSIX return value for this)
569 */ 565 */
570 error = -EAGAIN; 566 if (error == -ENOSPC)
567 error = -EAGAIN;
571 goto out; 568 goto out;
572 } 569 }
570 new_timer_id = error;
573 571
574 it_id_set = IT_ID_SET; 572 it_id_set = IT_ID_SET;
575 new_timer->it_id = (timer_t) new_timer_id; 573 new_timer->it_id = (timer_t) new_timer_id;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2b5243176aba..12af4270c9c1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1752,9 +1752,8 @@ EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
1752static void fire_sched_in_preempt_notifiers(struct task_struct *curr) 1752static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1753{ 1753{
1754 struct preempt_notifier *notifier; 1754 struct preempt_notifier *notifier;
1755 struct hlist_node *node;
1756 1755
1757 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) 1756 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
1758 notifier->ops->sched_in(notifier, raw_smp_processor_id()); 1757 notifier->ops->sched_in(notifier, raw_smp_processor_id());
1759} 1758}
1760 1759
@@ -1763,9 +1762,8 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
1763 struct task_struct *next) 1762 struct task_struct *next)
1764{ 1763{
1765 struct preempt_notifier *notifier; 1764 struct preempt_notifier *notifier;
1766 struct hlist_node *node;
1767 1765
1768 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) 1766 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
1769 notifier->ops->sched_out(notifier, next); 1767 notifier->ops->sched_out(notifier, next);
1770} 1768}
1771 1769
diff --git a/kernel/signal.c b/kernel/signal.c
index 2a7ae2963185..2676aac4103d 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1157,11 +1157,11 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1157static void print_fatal_signal(int signr) 1157static void print_fatal_signal(int signr)
1158{ 1158{
1159 struct pt_regs *regs = signal_pt_regs(); 1159 struct pt_regs *regs = signal_pt_regs();
1160 printk("%s/%d: potentially unexpected fatal signal %d.\n", 1160 printk(KERN_INFO "%s/%d: potentially unexpected fatal signal %d.\n",
1161 current->comm, task_pid_nr(current), signr); 1161 current->comm, task_pid_nr(current), signr);
1162 1162
1163#if defined(__i386__) && !defined(__arch_um__) 1163#if defined(__i386__) && !defined(__arch_um__)
1164 printk("code at %08lx: ", regs->ip); 1164 printk(KERN_INFO "code at %08lx: ", regs->ip);
1165 { 1165 {
1166 int i; 1166 int i;
1167 for (i = 0; i < 16; i++) { 1167 for (i = 0; i < 16; i++) {
@@ -1169,11 +1169,11 @@ static void print_fatal_signal(int signr)
1169 1169
1170 if (get_user(insn, (unsigned char *)(regs->ip + i))) 1170 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1171 break; 1171 break;
1172 printk("%02x ", insn); 1172 printk(KERN_CONT "%02x ", insn);
1173 } 1173 }
1174 } 1174 }
1175 printk(KERN_CONT "\n");
1175#endif 1176#endif
1176 printk("\n");
1177 preempt_disable(); 1177 preempt_disable();
1178 show_regs(regs); 1178 show_regs(regs);
1179 preempt_enable(); 1179 preempt_enable();
@@ -2996,7 +2996,8 @@ static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
2996 /* Not even root can pretend to send signals from the kernel. 2996 /* Not even root can pretend to send signals from the kernel.
2997 * Nor can they impersonate a kill()/tgkill(), which adds source info. 2997 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2998 */ 2998 */
2999 if (info->si_code >= 0 || info->si_code == SI_TKILL) { 2999 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3000 (task_pid_vnr(current) != pid)) {
3000 /* We used to allow any < 0 si_code */ 3001 /* We used to allow any < 0 si_code */
3001 WARN_ON_ONCE(info->si_code < 0); 3002 WARN_ON_ONCE(info->si_code < 0);
3002 return -EPERM; 3003 return -EPERM;
@@ -3045,7 +3046,8 @@ static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3045 /* Not even root can pretend to send signals from the kernel. 3046 /* Not even root can pretend to send signals from the kernel.
3046 * Nor can they impersonate a kill()/tgkill(), which adds source info. 3047 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3047 */ 3048 */
3048 if (info->si_code >= 0 || info->si_code == SI_TKILL) { 3049 if (((info->si_code >= 0 || info->si_code == SI_TKILL)) &&
3050 (task_pid_vnr(current) != pid)) {
3049 /* We used to allow any < 0 si_code */ 3051 /* We used to allow any < 0 si_code */
3050 WARN_ON_ONCE(info->si_code < 0); 3052 WARN_ON_ONCE(info->si_code < 0);
3051 return -EPERM; 3053 return -EPERM;
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index d4abac261779..b9bde5727829 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -131,7 +131,7 @@ static int smpboot_thread_fn(void *data)
131 continue; 131 continue;
132 } 132 }
133 133
134 BUG_ON(td->cpu != smp_processor_id()); 134 //BUG_ON(td->cpu != smp_processor_id());
135 135
136 /* Check for state change setup */ 136 /* Check for state change setup */
137 switch (td->status) { 137 switch (td->status) {
diff --git a/kernel/sys.c b/kernel/sys.c
index e10566bee399..81f56445fba9 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2185,11 +2185,6 @@ SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2185 2185
2186char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; 2186char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
2187 2187
2188static void argv_cleanup(struct subprocess_info *info)
2189{
2190 argv_free(info->argv);
2191}
2192
2193static int __orderly_poweroff(void) 2188static int __orderly_poweroff(void)
2194{ 2189{
2195 int argc; 2190 int argc;
@@ -2209,9 +2204,8 @@ static int __orderly_poweroff(void)
2209 } 2204 }
2210 2205
2211 ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_WAIT_EXEC, 2206 ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_WAIT_EXEC,
2212 NULL, argv_cleanup, NULL); 2207 NULL, NULL, NULL);
2213 if (ret == -ENOMEM) 2208 argv_free(argv);
2214 argv_free(argv);
2215 2209
2216 return ret; 2210 return ret;
2217} 2211}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index d8df00e69c14..d1b4ee67d2df 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2095,7 +2095,7 @@ int proc_dointvec_minmax(struct ctl_table *table, int write,
2095static void validate_coredump_safety(void) 2095static void validate_coredump_safety(void)
2096{ 2096{
2097#ifdef CONFIG_COREDUMP 2097#ifdef CONFIG_COREDUMP
2098 if (suid_dumpable == SUID_DUMPABLE_SAFE && 2098 if (suid_dumpable == SUID_DUMP_ROOT &&
2099 core_pattern[0] != '/' && core_pattern[0] != '|') { 2099 core_pattern[0] != '/' && core_pattern[0] != '|') {
2100 printk(KERN_WARNING "Unsafe core_pattern used with "\ 2100 printk(KERN_WARNING "Unsafe core_pattern used with "\
2101 "suid_dumpable=2. Pipe handler or fully qualified "\ 2101 "suid_dumpable=2. Pipe handler or fully qualified "\
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index b25115e8c7f3..ebf72358e86a 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1171,9 +1171,10 @@ static ssize_t bin_dn_node_address(struct file *file,
1171 1171
1172 /* Convert the decnet address to binary */ 1172 /* Convert the decnet address to binary */
1173 result = -EIO; 1173 result = -EIO;
1174 nodep = strchr(buf, '.') + 1; 1174 nodep = strchr(buf, '.');
1175 if (!nodep) 1175 if (!nodep)
1176 goto out; 1176 goto out;
1177 ++nodep;
1177 1178
1178 area = simple_strtoul(buf, NULL, 10); 1179 area = simple_strtoul(buf, NULL, 10);
1179 node = simple_strtoul(nodep, NULL, 10); 1180 node = simple_strtoul(nodep, NULL, 10);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 98ca94a41819..ab25b88aae56 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -762,7 +762,6 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
762{ 762{
763 struct ftrace_profile *rec; 763 struct ftrace_profile *rec;
764 struct hlist_head *hhd; 764 struct hlist_head *hhd;
765 struct hlist_node *n;
766 unsigned long key; 765 unsigned long key;
767 766
768 key = hash_long(ip, ftrace_profile_bits); 767 key = hash_long(ip, ftrace_profile_bits);
@@ -771,7 +770,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
771 if (hlist_empty(hhd)) 770 if (hlist_empty(hhd))
772 return NULL; 771 return NULL;
773 772
774 hlist_for_each_entry_rcu(rec, n, hhd, node) { 773 hlist_for_each_entry_rcu(rec, hhd, node) {
775 if (rec->ip == ip) 774 if (rec->ip == ip)
776 return rec; 775 return rec;
777 } 776 }
@@ -1133,7 +1132,6 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1133 unsigned long key; 1132 unsigned long key;
1134 struct ftrace_func_entry *entry; 1133 struct ftrace_func_entry *entry;
1135 struct hlist_head *hhd; 1134 struct hlist_head *hhd;
1136 struct hlist_node *n;
1137 1135
1138 if (ftrace_hash_empty(hash)) 1136 if (ftrace_hash_empty(hash))
1139 return NULL; 1137 return NULL;
@@ -1145,7 +1143,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1145 1143
1146 hhd = &hash->buckets[key]; 1144 hhd = &hash->buckets[key];
1147 1145
1148 hlist_for_each_entry_rcu(entry, n, hhd, hlist) { 1146 hlist_for_each_entry_rcu(entry, hhd, hlist) {
1149 if (entry->ip == ip) 1147 if (entry->ip == ip)
1150 return entry; 1148 return entry;
1151 } 1149 }
@@ -1202,7 +1200,7 @@ remove_hash_entry(struct ftrace_hash *hash,
1202static void ftrace_hash_clear(struct ftrace_hash *hash) 1200static void ftrace_hash_clear(struct ftrace_hash *hash)
1203{ 1201{
1204 struct hlist_head *hhd; 1202 struct hlist_head *hhd;
1205 struct hlist_node *tp, *tn; 1203 struct hlist_node *tn;
1206 struct ftrace_func_entry *entry; 1204 struct ftrace_func_entry *entry;
1207 int size = 1 << hash->size_bits; 1205 int size = 1 << hash->size_bits;
1208 int i; 1206 int i;
@@ -1212,7 +1210,7 @@ static void ftrace_hash_clear(struct ftrace_hash *hash)
1212 1210
1213 for (i = 0; i < size; i++) { 1211 for (i = 0; i < size; i++) {
1214 hhd = &hash->buckets[i]; 1212 hhd = &hash->buckets[i];
1215 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) 1213 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1216 free_hash_entry(hash, entry); 1214 free_hash_entry(hash, entry);
1217 } 1215 }
1218 FTRACE_WARN_ON(hash->count); 1216 FTRACE_WARN_ON(hash->count);
@@ -1275,7 +1273,6 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1275{ 1273{
1276 struct ftrace_func_entry *entry; 1274 struct ftrace_func_entry *entry;
1277 struct ftrace_hash *new_hash; 1275 struct ftrace_hash *new_hash;
1278 struct hlist_node *tp;
1279 int size; 1276 int size;
1280 int ret; 1277 int ret;
1281 int i; 1278 int i;
@@ -1290,7 +1287,7 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1290 1287
1291 size = 1 << hash->size_bits; 1288 size = 1 << hash->size_bits;
1292 for (i = 0; i < size; i++) { 1289 for (i = 0; i < size; i++) {
1293 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) { 1290 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1294 ret = add_hash_entry(new_hash, entry->ip); 1291 ret = add_hash_entry(new_hash, entry->ip);
1295 if (ret < 0) 1292 if (ret < 0)
1296 goto free_hash; 1293 goto free_hash;
@@ -1316,7 +1313,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
1316 struct ftrace_hash **dst, struct ftrace_hash *src) 1313 struct ftrace_hash **dst, struct ftrace_hash *src)
1317{ 1314{
1318 struct ftrace_func_entry *entry; 1315 struct ftrace_func_entry *entry;
1319 struct hlist_node *tp, *tn; 1316 struct hlist_node *tn;
1320 struct hlist_head *hhd; 1317 struct hlist_head *hhd;
1321 struct ftrace_hash *old_hash; 1318 struct ftrace_hash *old_hash;
1322 struct ftrace_hash *new_hash; 1319 struct ftrace_hash *new_hash;
@@ -1362,7 +1359,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
1362 size = 1 << src->size_bits; 1359 size = 1 << src->size_bits;
1363 for (i = 0; i < size; i++) { 1360 for (i = 0; i < size; i++) {
1364 hhd = &src->buckets[i]; 1361 hhd = &src->buckets[i];
1365 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) { 1362 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1366 if (bits > 0) 1363 if (bits > 0)
1367 key = hash_long(entry->ip, bits); 1364 key = hash_long(entry->ip, bits);
1368 else 1365 else
@@ -2901,7 +2898,6 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2901{ 2898{
2902 struct ftrace_func_probe *entry; 2899 struct ftrace_func_probe *entry;
2903 struct hlist_head *hhd; 2900 struct hlist_head *hhd;
2904 struct hlist_node *n;
2905 unsigned long key; 2901 unsigned long key;
2906 2902
2907 key = hash_long(ip, FTRACE_HASH_BITS); 2903 key = hash_long(ip, FTRACE_HASH_BITS);
@@ -2917,7 +2913,7 @@ static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
2917 * on the hash. rcu_read_lock is too dangerous here. 2913 * on the hash. rcu_read_lock is too dangerous here.
2918 */ 2914 */
2919 preempt_disable_notrace(); 2915 preempt_disable_notrace();
2920 hlist_for_each_entry_rcu(entry, n, hhd, node) { 2916 hlist_for_each_entry_rcu(entry, hhd, node) {
2921 if (entry->ip == ip) 2917 if (entry->ip == ip)
2922 entry->ops->func(ip, parent_ip, &entry->data); 2918 entry->ops->func(ip, parent_ip, &entry->data);
2923 } 2919 }
@@ -3068,7 +3064,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3068 void *data, int flags) 3064 void *data, int flags)
3069{ 3065{
3070 struct ftrace_func_probe *entry; 3066 struct ftrace_func_probe *entry;
3071 struct hlist_node *n, *tmp; 3067 struct hlist_node *tmp;
3072 char str[KSYM_SYMBOL_LEN]; 3068 char str[KSYM_SYMBOL_LEN];
3073 int type = MATCH_FULL; 3069 int type = MATCH_FULL;
3074 int i, len = 0; 3070 int i, len = 0;
@@ -3091,7 +3087,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3091 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { 3087 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3092 struct hlist_head *hhd = &ftrace_func_hash[i]; 3088 struct hlist_head *hhd = &ftrace_func_hash[i];
3093 3089
3094 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) { 3090 hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3095 3091
3096 /* break up if statements for readability */ 3092 /* break up if statements for readability */
3097 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops) 3093 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 194d79602dc7..697e88d13907 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -739,12 +739,11 @@ static int task_state_char(unsigned long state)
739struct trace_event *ftrace_find_event(int type) 739struct trace_event *ftrace_find_event(int type)
740{ 740{
741 struct trace_event *event; 741 struct trace_event *event;
742 struct hlist_node *n;
743 unsigned key; 742 unsigned key;
744 743
745 key = type & (EVENT_HASHSIZE - 1); 744 key = type & (EVENT_HASHSIZE - 1);
746 745
747 hlist_for_each_entry(event, n, &event_hash[key], node) { 746 hlist_for_each_entry(event, &event_hash[key], node) {
748 if (event->type == type) 747 if (event->type == type)
749 return event; 748 return event;
750 } 749 }
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index d96ba22dabfa..0c05a4592047 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -192,12 +192,11 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
192static struct tracepoint_entry *get_tracepoint(const char *name) 192static struct tracepoint_entry *get_tracepoint(const char *name)
193{ 193{
194 struct hlist_head *head; 194 struct hlist_head *head;
195 struct hlist_node *node;
196 struct tracepoint_entry *e; 195 struct tracepoint_entry *e;
197 u32 hash = jhash(name, strlen(name), 0); 196 u32 hash = jhash(name, strlen(name), 0);
198 197
199 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; 198 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
200 hlist_for_each_entry(e, node, head, hlist) { 199 hlist_for_each_entry(e, head, hlist) {
201 if (!strcmp(name, e->name)) 200 if (!strcmp(name, e->name))
202 return e; 201 return e;
203 } 202 }
@@ -211,13 +210,12 @@ static struct tracepoint_entry *get_tracepoint(const char *name)
211static struct tracepoint_entry *add_tracepoint(const char *name) 210static struct tracepoint_entry *add_tracepoint(const char *name)
212{ 211{
213 struct hlist_head *head; 212 struct hlist_head *head;
214 struct hlist_node *node;
215 struct tracepoint_entry *e; 213 struct tracepoint_entry *e;
216 size_t name_len = strlen(name) + 1; 214 size_t name_len = strlen(name) + 1;
217 u32 hash = jhash(name, name_len-1, 0); 215 u32 hash = jhash(name, name_len-1, 0);
218 216
219 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; 217 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
220 hlist_for_each_entry(e, node, head, hlist) { 218 hlist_for_each_entry(e, head, hlist) {
221 if (!strcmp(name, e->name)) { 219 if (!strcmp(name, e->name)) {
222 printk(KERN_NOTICE 220 printk(KERN_NOTICE
223 "tracepoint %s busy\n", name); 221 "tracepoint %s busy\n", name);
diff --git a/kernel/user-return-notifier.c b/kernel/user-return-notifier.c
index 1744bb80f1fb..394f70b17162 100644
--- a/kernel/user-return-notifier.c
+++ b/kernel/user-return-notifier.c
@@ -34,11 +34,11 @@ EXPORT_SYMBOL_GPL(user_return_notifier_unregister);
34void fire_user_return_notifiers(void) 34void fire_user_return_notifiers(void)
35{ 35{
36 struct user_return_notifier *urn; 36 struct user_return_notifier *urn;
37 struct hlist_node *tmp1, *tmp2; 37 struct hlist_node *tmp2;
38 struct hlist_head *head; 38 struct hlist_head *head;
39 39
40 head = &get_cpu_var(return_notifier_list); 40 head = &get_cpu_var(return_notifier_list);
41 hlist_for_each_entry_safe(urn, tmp1, tmp2, head, link) 41 hlist_for_each_entry_safe(urn, tmp2, head, link)
42 urn->on_user_return(urn); 42 urn->on_user_return(urn);
43 put_cpu_var(return_notifier_list); 43 put_cpu_var(return_notifier_list);
44} 44}
diff --git a/kernel/user.c b/kernel/user.c
index 57ebfd42023c..e81978e8c03b 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -105,9 +105,8 @@ static void uid_hash_remove(struct user_struct *up)
105static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent) 105static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
106{ 106{
107 struct user_struct *user; 107 struct user_struct *user;
108 struct hlist_node *h;
109 108
110 hlist_for_each_entry(user, h, hashent, uidhash_node) { 109 hlist_for_each_entry(user, hashent, uidhash_node) {
111 if (uid_eq(user->uid, uid)) { 110 if (uid_eq(user->uid, uid)) {
112 atomic_inc(&user->__count); 111 atomic_inc(&user->__count);
113 return user; 112 return user;
diff --git a/kernel/utsname.c b/kernel/utsname.c
index 08b197e8c485..a47fc5de3113 100644
--- a/kernel/utsname.c
+++ b/kernel/utsname.c
@@ -30,7 +30,7 @@ static struct uts_namespace *create_uts_ns(void)
30/* 30/*
31 * Clone a new ns copying an original utsname, setting refcount to 1 31 * Clone a new ns copying an original utsname, setting refcount to 1
32 * @old_ns: namespace to clone 32 * @old_ns: namespace to clone
33 * Return NULL on error (failure to kmalloc), new ns otherwise 33 * Return ERR_PTR(-ENOMEM) on error (failure to kmalloc), new ns otherwise
34 */ 34 */
35static struct uts_namespace *clone_uts_ns(struct user_namespace *user_ns, 35static struct uts_namespace *clone_uts_ns(struct user_namespace *user_ns,
36 struct uts_namespace *old_ns) 36 struct uts_namespace *old_ns)
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
index 63da38c2d820..4f69f9a5e221 100644
--- a/kernel/utsname_sysctl.c
+++ b/kernel/utsname_sysctl.c
@@ -15,6 +15,8 @@
15#include <linux/sysctl.h> 15#include <linux/sysctl.h>
16#include <linux/wait.h> 16#include <linux/wait.h>
17 17
18#ifdef CONFIG_PROC_SYSCTL
19
18static void *get_uts(ctl_table *table, int write) 20static void *get_uts(ctl_table *table, int write)
19{ 21{
20 char *which = table->data; 22 char *which = table->data;
@@ -38,7 +40,6 @@ static void put_uts(ctl_table *table, int write, void *which)
38 up_write(&uts_sem); 40 up_write(&uts_sem);
39} 41}
40 42
41#ifdef CONFIG_PROC_SYSCTL
42/* 43/*
43 * Special case of dostring for the UTS structure. This has locks 44 * Special case of dostring for the UTS structure. This has locks
44 * to observe. Should this be in kernel/sys.c ???? 45 * to observe. Should this be in kernel/sys.c ????
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f4feacad3812..81f2457811eb 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -251,8 +251,8 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
251 for ((pool) = &std_worker_pools(cpu)[0]; \ 251 for ((pool) = &std_worker_pools(cpu)[0]; \
252 (pool) < &std_worker_pools(cpu)[NR_STD_WORKER_POOLS]; (pool)++) 252 (pool) < &std_worker_pools(cpu)[NR_STD_WORKER_POOLS]; (pool)++)
253 253
254#define for_each_busy_worker(worker, i, pos, pool) \ 254#define for_each_busy_worker(worker, i, pool) \
255 hash_for_each(pool->busy_hash, i, pos, worker, hentry) 255 hash_for_each(pool->busy_hash, i, worker, hentry)
256 256
257static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, 257static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
258 unsigned int sw) 258 unsigned int sw)
@@ -909,9 +909,8 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool,
909 struct work_struct *work) 909 struct work_struct *work)
910{ 910{
911 struct worker *worker; 911 struct worker *worker;
912 struct hlist_node *tmp;
913 912
914 hash_for_each_possible(pool->busy_hash, worker, tmp, hentry, 913 hash_for_each_possible(pool->busy_hash, worker, hentry,
915 (unsigned long)work) 914 (unsigned long)work)
916 if (worker->current_work == work && 915 if (worker->current_work == work &&
917 worker->current_func == work->func) 916 worker->current_func == work->func)
@@ -1626,7 +1625,6 @@ static void busy_worker_rebind_fn(struct work_struct *work)
1626static void rebind_workers(struct worker_pool *pool) 1625static void rebind_workers(struct worker_pool *pool)
1627{ 1626{
1628 struct worker *worker, *n; 1627 struct worker *worker, *n;
1629 struct hlist_node *pos;
1630 int i; 1628 int i;
1631 1629
1632 lockdep_assert_held(&pool->assoc_mutex); 1630 lockdep_assert_held(&pool->assoc_mutex);
@@ -1648,7 +1646,7 @@ static void rebind_workers(struct worker_pool *pool)
1648 } 1646 }
1649 1647
1650 /* rebind busy workers */ 1648 /* rebind busy workers */
1651 for_each_busy_worker(worker, i, pos, pool) { 1649 for_each_busy_worker(worker, i, pool) {
1652 struct work_struct *rebind_work = &worker->rebind_work; 1650 struct work_struct *rebind_work = &worker->rebind_work;
1653 struct workqueue_struct *wq; 1651 struct workqueue_struct *wq;
1654 1652
@@ -3423,7 +3421,6 @@ static void wq_unbind_fn(struct work_struct *work)
3423 int cpu = smp_processor_id(); 3421 int cpu = smp_processor_id();
3424 struct worker_pool *pool; 3422 struct worker_pool *pool;
3425 struct worker *worker; 3423 struct worker *worker;
3426 struct hlist_node *pos;
3427 int i; 3424 int i;
3428 3425
3429 for_each_std_worker_pool(pool, cpu) { 3426 for_each_std_worker_pool(pool, cpu) {
@@ -3442,7 +3439,7 @@ static void wq_unbind_fn(struct work_struct *work)
3442 list_for_each_entry(worker, &pool->idle_list, entry) 3439 list_for_each_entry(worker, &pool->idle_list, entry)
3443 worker->flags |= WORKER_UNBOUND; 3440 worker->flags |= WORKER_UNBOUND;
3444 3441
3445 for_each_busy_worker(worker, i, pos, pool) 3442 for_each_busy_worker(worker, i, pool)
3446 worker->flags |= WORKER_UNBOUND; 3443 worker->flags |= WORKER_UNBOUND;
3447 3444
3448 pool->flags |= POOL_DISASSOCIATED; 3445 pool->flags |= POOL_DISASSOCIATED;