aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Kconfig.locks202
-rw-r--r--kernel/Makefile7
-rw-r--r--kernel/capability.c15
-rw-r--r--kernel/cgroup.c8
-rw-r--r--kernel/cpuset.c27
-rw-r--r--kernel/exit.c35
-rw-r--r--kernel/fork.c22
-rw-r--r--kernel/futex.c23
-rw-r--r--kernel/hrtimer.c2
-rw-r--r--kernel/hung_task.c2
-rw-r--r--kernel/hw_breakpoint.c423
-rw-r--r--kernel/irq/chip.c6
-rw-r--r--kernel/irq/handle.c1
-rw-r--r--kernel/irq/proc.c40
-rw-r--r--kernel/irq/spurious.c16
-rw-r--r--kernel/kallsyms.c1
-rw-r--r--kernel/kgdb.c2
-rw-r--r--kernel/kmod.c8
-rw-r--r--kernel/kprobes.c72
-rw-r--r--kernel/kthread.c23
-rw-r--r--kernel/lockdep.c22
-rw-r--r--kernel/module.c5
-rw-r--r--kernel/mutex-debug.c1
-rw-r--r--kernel/mutex.c4
-rw-r--r--kernel/notifier.c2
-rw-r--r--kernel/panic.c3
-rw-r--r--kernel/params.c17
-rw-r--r--kernel/perf_event.c965
-rw-r--r--kernel/power/Makefile2
-rw-r--r--kernel/power/hibernate.c41
-rw-r--r--kernel/power/main.c1
-rw-r--r--kernel/power/process.c14
-rw-r--r--kernel/power/suspend_test.c5
-rw-r--r--kernel/power/swap.c146
-rw-r--r--kernel/power/swsusp.c130
-rw-r--r--kernel/printk.c7
-rw-r--r--kernel/rcupdate.c260
-rw-r--r--kernel/rcutiny.c282
-rw-r--r--kernel/rcutorture.c69
-rw-r--r--kernel/rcutree.c787
-rw-r--r--kernel/rcutree.h160
-rw-r--r--kernel/rcutree_plugin.h442
-rw-r--r--kernel/rcutree_trace.c16
-rw-r--r--kernel/sched.c368
-rw-r--r--kernel/sched_debug.c4
-rw-r--r--kernel/sched_fair.c139
-rw-r--r--kernel/sched_rt.c61
-rw-r--r--kernel/signal.c73
-rw-r--r--kernel/slow-work-debugfs.c227
-rw-r--r--kernel/slow-work.c519
-rw-r--r--kernel/slow-work.h72
-rw-r--r--kernel/smp.c56
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/spinlock.c310
-rw-r--r--kernel/srcu.c74
-rw-r--r--kernel/sys.c46
-rw-r--r--kernel/sys_ni.c3
-rw-r--r--kernel/sysctl.c896
-rw-r--r--kernel/sysctl_binary.c1507
-rw-r--r--kernel/sysctl_check.c1378
-rw-r--r--kernel/time.c30
-rw-r--r--kernel/time/clocksource.c6
-rw-r--r--kernel/time/tick-sched.c9
-rw-r--r--kernel/time/timecompare.c6
-rw-r--r--kernel/time/timekeeping.c1
-rw-r--r--kernel/trace/Kconfig38
-rw-r--r--kernel/trace/Makefile2
-rw-r--r--kernel/trace/blktrace.c39
-rw-r--r--kernel/trace/ftrace.c410
-rw-r--r--kernel/trace/kmemtrace.c2
-rw-r--r--kernel/trace/ring_buffer.c38
-rw-r--r--kernel/trace/ring_buffer_benchmark.c85
-rw-r--r--kernel/trace/trace.c55
-rw-r--r--kernel/trace/trace.h80
-rw-r--r--kernel/trace/trace_branch.c8
-rw-r--r--kernel/trace/trace_clock.c8
-rw-r--r--kernel/trace/trace_entries.h16
-rw-r--r--kernel/trace/trace_event_profile.c50
-rw-r--r--kernel/trace/trace_events.c191
-rw-r--r--kernel/trace/trace_events_filter.c426
-rw-r--r--kernel/trace/trace_export.c43
-rw-r--r--kernel/trace/trace_hw_branches.c8
-rw-r--r--kernel/trace/trace_kprobe.c1523
-rw-r--r--kernel/trace/trace_ksym.c550
-rw-r--r--kernel/trace/trace_output.c23
-rw-r--r--kernel/trace/trace_selftest.c55
-rw-r--r--kernel/trace/trace_syscalls.c231
-rw-r--r--kernel/user-return-notifier.c44
-rw-r--r--kernel/user.c2
-rw-r--r--kernel/utsname_sysctl.c31
-rw-r--r--kernel/workqueue.c35
91 files changed, 9640 insertions, 4456 deletions
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
new file mode 100644
index 000000000000..88c92fb44618
--- /dev/null
+++ b/kernel/Kconfig.locks
@@ -0,0 +1,202 @@
1#
2# The ARCH_INLINE foo is necessary because select ignores "depends on"
3#
4config ARCH_INLINE_SPIN_TRYLOCK
5 bool
6
7config ARCH_INLINE_SPIN_TRYLOCK_BH
8 bool
9
10config ARCH_INLINE_SPIN_LOCK
11 bool
12
13config ARCH_INLINE_SPIN_LOCK_BH
14 bool
15
16config ARCH_INLINE_SPIN_LOCK_IRQ
17 bool
18
19config ARCH_INLINE_SPIN_LOCK_IRQSAVE
20 bool
21
22config ARCH_INLINE_SPIN_UNLOCK
23 bool
24
25config ARCH_INLINE_SPIN_UNLOCK_BH
26 bool
27
28config ARCH_INLINE_SPIN_UNLOCK_IRQ
29 bool
30
31config ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE
32 bool
33
34
35config ARCH_INLINE_READ_TRYLOCK
36 bool
37
38config ARCH_INLINE_READ_LOCK
39 bool
40
41config ARCH_INLINE_READ_LOCK_BH
42 bool
43
44config ARCH_INLINE_READ_LOCK_IRQ
45 bool
46
47config ARCH_INLINE_READ_LOCK_IRQSAVE
48 bool
49
50config ARCH_INLINE_READ_UNLOCK
51 bool
52
53config ARCH_INLINE_READ_UNLOCK_BH
54 bool
55
56config ARCH_INLINE_READ_UNLOCK_IRQ
57 bool
58
59config ARCH_INLINE_READ_UNLOCK_IRQRESTORE
60 bool
61
62
63config ARCH_INLINE_WRITE_TRYLOCK
64 bool
65
66config ARCH_INLINE_WRITE_LOCK
67 bool
68
69config ARCH_INLINE_WRITE_LOCK_BH
70 bool
71
72config ARCH_INLINE_WRITE_LOCK_IRQ
73 bool
74
75config ARCH_INLINE_WRITE_LOCK_IRQSAVE
76 bool
77
78config ARCH_INLINE_WRITE_UNLOCK
79 bool
80
81config ARCH_INLINE_WRITE_UNLOCK_BH
82 bool
83
84config ARCH_INLINE_WRITE_UNLOCK_IRQ
85 bool
86
87config ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
88 bool
89
90#
91# lock_* functions are inlined when:
92# - DEBUG_SPINLOCK=n and GENERIC_LOCKBREAK=n and ARCH_INLINE_*LOCK=y
93#
94# trylock_* functions are inlined when:
95# - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
96#
97# unlock and unlock_irq functions are inlined when:
98# - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
99# or
100# - DEBUG_SPINLOCK=n and PREEMPT=n
101#
102# unlock_bh and unlock_irqrestore functions are inlined when:
103# - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y
104#
105
106config INLINE_SPIN_TRYLOCK
107 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK
108
109config INLINE_SPIN_TRYLOCK_BH
110 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK_BH
111
112config INLINE_SPIN_LOCK
113 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK
114
115config INLINE_SPIN_LOCK_BH
116 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
117 ARCH_INLINE_SPIN_LOCK_BH
118
119config INLINE_SPIN_LOCK_IRQ
120 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
121 ARCH_INLINE_SPIN_LOCK_IRQ
122
123config INLINE_SPIN_LOCK_IRQSAVE
124 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
125 ARCH_INLINE_SPIN_LOCK_IRQSAVE
126
127config INLINE_SPIN_UNLOCK
128 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK)
129
130config INLINE_SPIN_UNLOCK_BH
131 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_BH
132
133config INLINE_SPIN_UNLOCK_IRQ
134 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK_BH)
135
136config INLINE_SPIN_UNLOCK_IRQRESTORE
137 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE
138
139
140config INLINE_READ_TRYLOCK
141 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_TRYLOCK
142
143config INLINE_READ_LOCK
144 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK
145
146config INLINE_READ_LOCK_BH
147 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
148 ARCH_INLINE_READ_LOCK_BH
149
150config INLINE_READ_LOCK_IRQ
151 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
152 ARCH_INLINE_READ_LOCK_IRQ
153
154config INLINE_READ_LOCK_IRQSAVE
155 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
156 ARCH_INLINE_READ_LOCK_IRQSAVE
157
158config INLINE_READ_UNLOCK
159 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK)
160
161config INLINE_READ_UNLOCK_BH
162 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_BH
163
164config INLINE_READ_UNLOCK_IRQ
165 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK_BH)
166
167config INLINE_READ_UNLOCK_IRQRESTORE
168 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_IRQRESTORE
169
170
171config INLINE_WRITE_TRYLOCK
172 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_TRYLOCK
173
174config INLINE_WRITE_LOCK
175 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK
176
177config INLINE_WRITE_LOCK_BH
178 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
179 ARCH_INLINE_WRITE_LOCK_BH
180
181config INLINE_WRITE_LOCK_IRQ
182 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
183 ARCH_INLINE_WRITE_LOCK_IRQ
184
185config INLINE_WRITE_LOCK_IRQSAVE
186 def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \
187 ARCH_INLINE_WRITE_LOCK_IRQSAVE
188
189config INLINE_WRITE_UNLOCK
190 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK)
191
192config INLINE_WRITE_UNLOCK_BH
193 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_BH
194
195config INLINE_WRITE_UNLOCK_IRQ
196 def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK_BH)
197
198config INLINE_WRITE_UNLOCK_IRQRESTORE
199 def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
200
201config MUTEX_SPIN_ON_OWNER
202 def_bool SMP && !DEBUG_MUTEXES && !HAVE_DEFAULT_NO_SPIN_MUTEXES
diff --git a/kernel/Makefile b/kernel/Makefile
index b8d4cd8ac0b9..864ff75d65f2 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ 5obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
6 cpu.o exit.o itimer.o time.o softirq.o resource.o \ 6 cpu.o exit.o itimer.o time.o softirq.o resource.o \
7 sysctl.o capability.o ptrace.o timer.o user.o \ 7 sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
8 signal.o sys.o kmod.o workqueue.o pid.o \ 8 signal.o sys.o kmod.o workqueue.o pid.o \
9 rcupdate.o extable.o params.o posix-timers.o \ 9 rcupdate.o extable.o params.o posix-timers.o \
10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ 10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
@@ -21,6 +21,7 @@ CFLAGS_REMOVE_mutex-debug.o = -pg
21CFLAGS_REMOVE_rtmutex-debug.o = -pg 21CFLAGS_REMOVE_rtmutex-debug.o = -pg
22CFLAGS_REMOVE_cgroup-debug.o = -pg 22CFLAGS_REMOVE_cgroup-debug.o = -pg
23CFLAGS_REMOVE_sched_clock.o = -pg 23CFLAGS_REMOVE_sched_clock.o = -pg
24CFLAGS_REMOVE_perf_event.o = -pg
24endif 25endif
25 26
26obj-$(CONFIG_FREEZER) += freezer.o 27obj-$(CONFIG_FREEZER) += freezer.o
@@ -82,6 +83,7 @@ obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
82obj-$(CONFIG_TREE_RCU) += rcutree.o 83obj-$(CONFIG_TREE_RCU) += rcutree.o
83obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o 84obj-$(CONFIG_TREE_PREEMPT_RCU) += rcutree.o
84obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o 85obj-$(CONFIG_TREE_RCU_TRACE) += rcutree_trace.o
86obj-$(CONFIG_TINY_RCU) += rcutiny.o
85obj-$(CONFIG_RELAY) += relay.o 87obj-$(CONFIG_RELAY) += relay.o
86obj-$(CONFIG_SYSCTL) += utsname_sysctl.o 88obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
87obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o 89obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
@@ -94,7 +96,10 @@ obj-$(CONFIG_X86_DS) += trace/
94obj-$(CONFIG_RING_BUFFER) += trace/ 96obj-$(CONFIG_RING_BUFFER) += trace/
95obj-$(CONFIG_SMP) += sched_cpupri.o 97obj-$(CONFIG_SMP) += sched_cpupri.o
96obj-$(CONFIG_SLOW_WORK) += slow-work.o 98obj-$(CONFIG_SLOW_WORK) += slow-work.o
99obj-$(CONFIG_SLOW_WORK_DEBUG) += slow-work-debugfs.o
97obj-$(CONFIG_PERF_EVENTS) += perf_event.o 100obj-$(CONFIG_PERF_EVENTS) += perf_event.o
101obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
102obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o
98 103
99ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) 104ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
100# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is 105# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
diff --git a/kernel/capability.c b/kernel/capability.c
index 4e17041963f5..7f876e60521f 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -29,7 +29,6 @@ EXPORT_SYMBOL(__cap_empty_set);
29EXPORT_SYMBOL(__cap_full_set); 29EXPORT_SYMBOL(__cap_full_set);
30EXPORT_SYMBOL(__cap_init_eff_set); 30EXPORT_SYMBOL(__cap_init_eff_set);
31 31
32#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
33int file_caps_enabled = 1; 32int file_caps_enabled = 1;
34 33
35static int __init file_caps_disable(char *str) 34static int __init file_caps_disable(char *str)
@@ -38,7 +37,6 @@ static int __init file_caps_disable(char *str)
38 return 1; 37 return 1;
39} 38}
40__setup("no_file_caps", file_caps_disable); 39__setup("no_file_caps", file_caps_disable);
41#endif
42 40
43/* 41/*
44 * More recent versions of libcap are available from: 42 * More recent versions of libcap are available from:
@@ -169,8 +167,8 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
169 kernel_cap_t pE, pI, pP; 167 kernel_cap_t pE, pI, pP;
170 168
171 ret = cap_validate_magic(header, &tocopy); 169 ret = cap_validate_magic(header, &tocopy);
172 if (ret != 0) 170 if ((dataptr == NULL) || (ret != 0))
173 return ret; 171 return ((dataptr == NULL) && (ret == -EINVAL)) ? 0 : ret;
174 172
175 if (get_user(pid, &header->pid)) 173 if (get_user(pid, &header->pid))
176 return -EFAULT; 174 return -EFAULT;
@@ -238,7 +236,7 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
238SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data) 236SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
239{ 237{
240 struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; 238 struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
241 unsigned i, tocopy; 239 unsigned i, tocopy, copybytes;
242 kernel_cap_t inheritable, permitted, effective; 240 kernel_cap_t inheritable, permitted, effective;
243 struct cred *new; 241 struct cred *new;
244 int ret; 242 int ret;
@@ -255,8 +253,11 @@ SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
255 if (pid != 0 && pid != task_pid_vnr(current)) 253 if (pid != 0 && pid != task_pid_vnr(current))
256 return -EPERM; 254 return -EPERM;
257 255
258 if (copy_from_user(&kdata, data, 256 copybytes = tocopy * sizeof(struct __user_cap_data_struct);
259 tocopy * sizeof(struct __user_cap_data_struct))) 257 if (copybytes > sizeof(kdata))
258 return -EFAULT;
259
260 if (copy_from_user(&kdata, data, copybytes))
260 return -EFAULT; 261 return -EFAULT;
261 262
262 for (i = 0; i < tocopy; i++) { 263 for (i = 0; i < tocopy; i++) {
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index ca83b73fba19..0249f4be9b5c 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1710,14 +1710,13 @@ static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
1710 return -EFAULT; 1710 return -EFAULT;
1711 1711
1712 buffer[nbytes] = 0; /* nul-terminate */ 1712 buffer[nbytes] = 0; /* nul-terminate */
1713 strstrip(buffer);
1714 if (cft->write_u64) { 1713 if (cft->write_u64) {
1715 u64 val = simple_strtoull(buffer, &end, 0); 1714 u64 val = simple_strtoull(strstrip(buffer), &end, 0);
1716 if (*end) 1715 if (*end)
1717 return -EINVAL; 1716 return -EINVAL;
1718 retval = cft->write_u64(cgrp, cft, val); 1717 retval = cft->write_u64(cgrp, cft, val);
1719 } else { 1718 } else {
1720 s64 val = simple_strtoll(buffer, &end, 0); 1719 s64 val = simple_strtoll(strstrip(buffer), &end, 0);
1721 if (*end) 1720 if (*end)
1722 return -EINVAL; 1721 return -EINVAL;
1723 retval = cft->write_s64(cgrp, cft, val); 1722 retval = cft->write_s64(cgrp, cft, val);
@@ -1753,8 +1752,7 @@ static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
1753 } 1752 }
1754 1753
1755 buffer[nbytes] = 0; /* nul-terminate */ 1754 buffer[nbytes] = 0; /* nul-terminate */
1756 strstrip(buffer); 1755 retval = cft->write_string(cgrp, cft, strstrip(buffer));
1757 retval = cft->write_string(cgrp, cft, buffer);
1758 if (!retval) 1756 if (!retval)
1759 retval = nbytes; 1757 retval = nbytes;
1760out: 1758out:
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index b5cb469d2545..3cf2183b472d 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -537,8 +537,7 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
537 * element of the partition (one sched domain) to be passed to 537 * element of the partition (one sched domain) to be passed to
538 * partition_sched_domains(). 538 * partition_sched_domains().
539 */ 539 */
540/* FIXME: see the FIXME in partition_sched_domains() */ 540static int generate_sched_domains(cpumask_var_t **domains,
541static int generate_sched_domains(struct cpumask **domains,
542 struct sched_domain_attr **attributes) 541 struct sched_domain_attr **attributes)
543{ 542{
544 LIST_HEAD(q); /* queue of cpusets to be scanned */ 543 LIST_HEAD(q); /* queue of cpusets to be scanned */
@@ -546,7 +545,7 @@ static int generate_sched_domains(struct cpumask **domains,
546 struct cpuset **csa; /* array of all cpuset ptrs */ 545 struct cpuset **csa; /* array of all cpuset ptrs */
547 int csn; /* how many cpuset ptrs in csa so far */ 546 int csn; /* how many cpuset ptrs in csa so far */
548 int i, j, k; /* indices for partition finding loops */ 547 int i, j, k; /* indices for partition finding loops */
549 struct cpumask *doms; /* resulting partition; i.e. sched domains */ 548 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
550 struct sched_domain_attr *dattr; /* attributes for custom domains */ 549 struct sched_domain_attr *dattr; /* attributes for custom domains */
551 int ndoms = 0; /* number of sched domains in result */ 550 int ndoms = 0; /* number of sched domains in result */
552 int nslot; /* next empty doms[] struct cpumask slot */ 551 int nslot; /* next empty doms[] struct cpumask slot */
@@ -557,7 +556,8 @@ static int generate_sched_domains(struct cpumask **domains,
557 556
558 /* Special case for the 99% of systems with one, full, sched domain */ 557 /* Special case for the 99% of systems with one, full, sched domain */
559 if (is_sched_load_balance(&top_cpuset)) { 558 if (is_sched_load_balance(&top_cpuset)) {
560 doms = kmalloc(cpumask_size(), GFP_KERNEL); 559 ndoms = 1;
560 doms = alloc_sched_domains(ndoms);
561 if (!doms) 561 if (!doms)
562 goto done; 562 goto done;
563 563
@@ -566,9 +566,8 @@ static int generate_sched_domains(struct cpumask **domains,
566 *dattr = SD_ATTR_INIT; 566 *dattr = SD_ATTR_INIT;
567 update_domain_attr_tree(dattr, &top_cpuset); 567 update_domain_attr_tree(dattr, &top_cpuset);
568 } 568 }
569 cpumask_copy(doms, top_cpuset.cpus_allowed); 569 cpumask_copy(doms[0], top_cpuset.cpus_allowed);
570 570
571 ndoms = 1;
572 goto done; 571 goto done;
573 } 572 }
574 573
@@ -636,7 +635,7 @@ restart:
636 * Now we know how many domains to create. 635 * Now we know how many domains to create.
637 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. 636 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
638 */ 637 */
639 doms = kmalloc(ndoms * cpumask_size(), GFP_KERNEL); 638 doms = alloc_sched_domains(ndoms);
640 if (!doms) 639 if (!doms)
641 goto done; 640 goto done;
642 641
@@ -656,7 +655,7 @@ restart:
656 continue; 655 continue;
657 } 656 }
658 657
659 dp = doms + nslot; 658 dp = doms[nslot];
660 659
661 if (nslot == ndoms) { 660 if (nslot == ndoms) {
662 static int warnings = 10; 661 static int warnings = 10;
@@ -718,7 +717,7 @@ done:
718static void do_rebuild_sched_domains(struct work_struct *unused) 717static void do_rebuild_sched_domains(struct work_struct *unused)
719{ 718{
720 struct sched_domain_attr *attr; 719 struct sched_domain_attr *attr;
721 struct cpumask *doms; 720 cpumask_var_t *doms;
722 int ndoms; 721 int ndoms;
723 722
724 get_online_cpus(); 723 get_online_cpus();
@@ -2052,7 +2051,7 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
2052 unsigned long phase, void *unused_cpu) 2051 unsigned long phase, void *unused_cpu)
2053{ 2052{
2054 struct sched_domain_attr *attr; 2053 struct sched_domain_attr *attr;
2055 struct cpumask *doms; 2054 cpumask_var_t *doms;
2056 int ndoms; 2055 int ndoms;
2057 2056
2058 switch (phase) { 2057 switch (phase) {
@@ -2537,15 +2536,9 @@ const struct file_operations proc_cpuset_operations = {
2537}; 2536};
2538#endif /* CONFIG_PROC_PID_CPUSET */ 2537#endif /* CONFIG_PROC_PID_CPUSET */
2539 2538
2540/* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */ 2539/* Display task mems_allowed in /proc/<pid>/status file. */
2541void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) 2540void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
2542{ 2541{
2543 seq_printf(m, "Cpus_allowed:\t");
2544 seq_cpumask(m, &task->cpus_allowed);
2545 seq_printf(m, "\n");
2546 seq_printf(m, "Cpus_allowed_list:\t");
2547 seq_cpumask_list(m, &task->cpus_allowed);
2548 seq_printf(m, "\n");
2549 seq_printf(m, "Mems_allowed:\t"); 2542 seq_printf(m, "Mems_allowed:\t");
2550 seq_nodemask(m, &task->mems_allowed); 2543 seq_nodemask(m, &task->mems_allowed);
2551 seq_printf(m, "\n"); 2544 seq_printf(m, "\n");
diff --git a/kernel/exit.c b/kernel/exit.c
index 5859f598c951..1143012951e9 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -49,6 +49,7 @@
49#include <linux/init_task.h> 49#include <linux/init_task.h>
50#include <linux/perf_event.h> 50#include <linux/perf_event.h>
51#include <trace/events/sched.h> 51#include <trace/events/sched.h>
52#include <linux/hw_breakpoint.h>
52 53
53#include <asm/uaccess.h> 54#include <asm/uaccess.h>
54#include <asm/unistd.h> 55#include <asm/unistd.h>
@@ -110,9 +111,9 @@ static void __exit_signal(struct task_struct *tsk)
110 * We won't ever get here for the group leader, since it 111 * We won't ever get here for the group leader, since it
111 * will have been the last reference on the signal_struct. 112 * will have been the last reference on the signal_struct.
112 */ 113 */
113 sig->utime = cputime_add(sig->utime, task_utime(tsk)); 114 sig->utime = cputime_add(sig->utime, tsk->utime);
114 sig->stime = cputime_add(sig->stime, task_stime(tsk)); 115 sig->stime = cputime_add(sig->stime, tsk->stime);
115 sig->gtime = cputime_add(sig->gtime, task_gtime(tsk)); 116 sig->gtime = cputime_add(sig->gtime, tsk->gtime);
116 sig->min_flt += tsk->min_flt; 117 sig->min_flt += tsk->min_flt;
117 sig->maj_flt += tsk->maj_flt; 118 sig->maj_flt += tsk->maj_flt;
118 sig->nvcsw += tsk->nvcsw; 119 sig->nvcsw += tsk->nvcsw;
@@ -359,10 +360,8 @@ void __set_special_pids(struct pid *pid)
359{ 360{
360 struct task_struct *curr = current->group_leader; 361 struct task_struct *curr = current->group_leader;
361 362
362 if (task_session(curr) != pid) { 363 if (task_session(curr) != pid)
363 change_pid(curr, PIDTYPE_SID, pid); 364 change_pid(curr, PIDTYPE_SID, pid);
364 proc_sid_connector(curr);
365 }
366 365
367 if (task_pgrp(curr) != pid) 366 if (task_pgrp(curr) != pid)
368 change_pid(curr, PIDTYPE_PGID, pid); 367 change_pid(curr, PIDTYPE_PGID, pid);
@@ -980,6 +979,10 @@ NORET_TYPE void do_exit(long code)
980 proc_exit_connector(tsk); 979 proc_exit_connector(tsk);
981 980
982 /* 981 /*
982 * FIXME: do that only when needed, using sched_exit tracepoint
983 */
984 flush_ptrace_hw_breakpoint(tsk);
985 /*
983 * Flush inherited counters to the parent - before the parent 986 * Flush inherited counters to the parent - before the parent
984 * gets woken up by child-exit notifications. 987 * gets woken up by child-exit notifications.
985 */ 988 */
@@ -991,8 +994,6 @@ NORET_TYPE void do_exit(long code)
991 tsk->mempolicy = NULL; 994 tsk->mempolicy = NULL;
992#endif 995#endif
993#ifdef CONFIG_FUTEX 996#ifdef CONFIG_FUTEX
994 if (unlikely(!list_empty(&tsk->pi_state_list)))
995 exit_pi_state_list(tsk);
996 if (unlikely(current->pi_state_cache)) 997 if (unlikely(current->pi_state_cache))
997 kfree(current->pi_state_cache); 998 kfree(current->pi_state_cache);
998#endif 999#endif
@@ -1008,7 +1009,7 @@ NORET_TYPE void do_exit(long code)
1008 tsk->flags |= PF_EXITPIDONE; 1009 tsk->flags |= PF_EXITPIDONE;
1009 1010
1010 if (tsk->io_context) 1011 if (tsk->io_context)
1011 exit_io_context(); 1012 exit_io_context(tsk);
1012 1013
1013 if (tsk->splice_pipe) 1014 if (tsk->splice_pipe)
1014 __free_pipe_info(tsk->splice_pipe); 1015 __free_pipe_info(tsk->splice_pipe);
@@ -1209,6 +1210,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1209 struct signal_struct *psig; 1210 struct signal_struct *psig;
1210 struct signal_struct *sig; 1211 struct signal_struct *sig;
1211 unsigned long maxrss; 1212 unsigned long maxrss;
1213 cputime_t tgutime, tgstime;
1212 1214
1213 /* 1215 /*
1214 * The resource counters for the group leader are in its 1216 * The resource counters for the group leader are in its
@@ -1224,20 +1226,23 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1224 * need to protect the access to parent->signal fields, 1226 * need to protect the access to parent->signal fields,
1225 * as other threads in the parent group can be right 1227 * as other threads in the parent group can be right
1226 * here reaping other children at the same time. 1228 * here reaping other children at the same time.
1229 *
1230 * We use thread_group_times() to get times for the thread
1231 * group, which consolidates times for all threads in the
1232 * group including the group leader.
1227 */ 1233 */
1234 thread_group_times(p, &tgutime, &tgstime);
1228 spin_lock_irq(&p->real_parent->sighand->siglock); 1235 spin_lock_irq(&p->real_parent->sighand->siglock);
1229 psig = p->real_parent->signal; 1236 psig = p->real_parent->signal;
1230 sig = p->signal; 1237 sig = p->signal;
1231 psig->cutime = 1238 psig->cutime =
1232 cputime_add(psig->cutime, 1239 cputime_add(psig->cutime,
1233 cputime_add(p->utime, 1240 cputime_add(tgutime,
1234 cputime_add(sig->utime, 1241 sig->cutime));
1235 sig->cutime)));
1236 psig->cstime = 1242 psig->cstime =
1237 cputime_add(psig->cstime, 1243 cputime_add(psig->cstime,
1238 cputime_add(p->stime, 1244 cputime_add(tgstime,
1239 cputime_add(sig->stime, 1245 sig->cstime));
1240 sig->cstime)));
1241 psig->cgtime = 1246 psig->cgtime =
1242 cputime_add(psig->cgtime, 1247 cputime_add(psig->cgtime,
1243 cputime_add(p->gtime, 1248 cputime_add(p->gtime,
diff --git a/kernel/fork.c b/kernel/fork.c
index 266c6af6ef1b..1415dc4598ae 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -64,6 +64,7 @@
64#include <linux/magic.h> 64#include <linux/magic.h>
65#include <linux/perf_event.h> 65#include <linux/perf_event.h>
66#include <linux/posix-timers.h> 66#include <linux/posix-timers.h>
67#include <linux/user-return-notifier.h>
67 68
68#include <asm/pgtable.h> 69#include <asm/pgtable.h>
69#include <asm/pgalloc.h> 70#include <asm/pgalloc.h>
@@ -91,7 +92,7 @@ int nr_processes(void)
91 int cpu; 92 int cpu;
92 int total = 0; 93 int total = 0;
93 94
94 for_each_online_cpu(cpu) 95 for_each_possible_cpu(cpu)
95 total += per_cpu(process_counts, cpu); 96 total += per_cpu(process_counts, cpu);
96 97
97 return total; 98 return total;
@@ -249,6 +250,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
249 goto out; 250 goto out;
250 251
251 setup_thread_stack(tsk, orig); 252 setup_thread_stack(tsk, orig);
253 clear_user_return_notifier(tsk);
252 stackend = end_of_stack(tsk); 254 stackend = end_of_stack(tsk);
253 *stackend = STACK_END_MAGIC; /* for overflow detection */ 255 *stackend = STACK_END_MAGIC; /* for overflow detection */
254 256
@@ -570,12 +572,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
570 572
571 /* Get rid of any futexes when releasing the mm */ 573 /* Get rid of any futexes when releasing the mm */
572#ifdef CONFIG_FUTEX 574#ifdef CONFIG_FUTEX
573 if (unlikely(tsk->robust_list)) 575 if (unlikely(tsk->robust_list)) {
574 exit_robust_list(tsk); 576 exit_robust_list(tsk);
577 tsk->robust_list = NULL;
578 }
575#ifdef CONFIG_COMPAT 579#ifdef CONFIG_COMPAT
576 if (unlikely(tsk->compat_robust_list)) 580 if (unlikely(tsk->compat_robust_list)) {
577 compat_exit_robust_list(tsk); 581 compat_exit_robust_list(tsk);
582 tsk->compat_robust_list = NULL;
583 }
578#endif 584#endif
585 if (unlikely(!list_empty(&tsk->pi_state_list)))
586 exit_pi_state_list(tsk);
579#endif 587#endif
580 588
581 /* Get rid of any cached register state */ 589 /* Get rid of any cached register state */
@@ -878,6 +886,9 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
878 sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero; 886 sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
879 sig->gtime = cputime_zero; 887 sig->gtime = cputime_zero;
880 sig->cgtime = cputime_zero; 888 sig->cgtime = cputime_zero;
889#ifndef CONFIG_VIRT_CPU_ACCOUNTING
890 sig->prev_utime = sig->prev_stime = cputime_zero;
891#endif
881 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; 892 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
882 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; 893 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
883 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; 894 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
@@ -1060,8 +1071,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1060 p->gtime = cputime_zero; 1071 p->gtime = cputime_zero;
1061 p->utimescaled = cputime_zero; 1072 p->utimescaled = cputime_zero;
1062 p->stimescaled = cputime_zero; 1073 p->stimescaled = cputime_zero;
1074#ifndef CONFIG_VIRT_CPU_ACCOUNTING
1063 p->prev_utime = cputime_zero; 1075 p->prev_utime = cputime_zero;
1064 p->prev_stime = cputime_zero; 1076 p->prev_stime = cputime_zero;
1077#endif
1065 1078
1066 p->default_timer_slack_ns = current->timer_slack_ns; 1079 p->default_timer_slack_ns = current->timer_slack_ns;
1067 1080
@@ -1304,7 +1317,8 @@ bad_fork_free_pid:
1304 if (pid != &init_struct_pid) 1317 if (pid != &init_struct_pid)
1305 free_pid(pid); 1318 free_pid(pid);
1306bad_fork_cleanup_io: 1319bad_fork_cleanup_io:
1307 put_io_context(p->io_context); 1320 if (p->io_context)
1321 exit_io_context(p);
1308bad_fork_cleanup_namespaces: 1322bad_fork_cleanup_namespaces:
1309 exit_task_namespaces(p); 1323 exit_task_namespaces(p);
1310bad_fork_cleanup_mm: 1324bad_fork_cleanup_mm:
diff --git a/kernel/futex.c b/kernel/futex.c
index b911adceb2c4..fb65e822fc41 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -150,7 +150,8 @@ static struct futex_hash_bucket *hash_futex(union futex_key *key)
150 */ 150 */
151static inline int match_futex(union futex_key *key1, union futex_key *key2) 151static inline int match_futex(union futex_key *key1, union futex_key *key2)
152{ 152{
153 return (key1->both.word == key2->both.word 153 return (key1 && key2
154 && key1->both.word == key2->both.word
154 && key1->both.ptr == key2->both.ptr 155 && key1->both.ptr == key2->both.ptr
155 && key1->both.offset == key2->both.offset); 156 && key1->both.offset == key2->both.offset);
156} 157}
@@ -916,8 +917,8 @@ retry:
916 hb1 = hash_futex(&key1); 917 hb1 = hash_futex(&key1);
917 hb2 = hash_futex(&key2); 918 hb2 = hash_futex(&key2);
918 919
919 double_lock_hb(hb1, hb2);
920retry_private: 920retry_private:
921 double_lock_hb(hb1, hb2);
921 op_ret = futex_atomic_op_inuser(op, uaddr2); 922 op_ret = futex_atomic_op_inuser(op, uaddr2);
922 if (unlikely(op_ret < 0)) { 923 if (unlikely(op_ret < 0)) {
923 924
@@ -1028,7 +1029,6 @@ static inline
1028void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, 1029void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1029 struct futex_hash_bucket *hb) 1030 struct futex_hash_bucket *hb)
1030{ 1031{
1031 drop_futex_key_refs(&q->key);
1032 get_futex_key_refs(key); 1032 get_futex_key_refs(key);
1033 q->key = *key; 1033 q->key = *key;
1034 1034
@@ -1226,6 +1226,7 @@ retry_private:
1226 */ 1226 */
1227 if (ret == 1) { 1227 if (ret == 1) {
1228 WARN_ON(pi_state); 1228 WARN_ON(pi_state);
1229 drop_count++;
1229 task_count++; 1230 task_count++;
1230 ret = get_futex_value_locked(&curval2, uaddr2); 1231 ret = get_futex_value_locked(&curval2, uaddr2);
1231 if (!ret) 1232 if (!ret)
@@ -1304,6 +1305,7 @@ retry_private:
1304 if (ret == 1) { 1305 if (ret == 1) {
1305 /* We got the lock. */ 1306 /* We got the lock. */
1306 requeue_pi_wake_futex(this, &key2, hb2); 1307 requeue_pi_wake_futex(this, &key2, hb2);
1308 drop_count++;
1307 continue; 1309 continue;
1308 } else if (ret) { 1310 } else if (ret) {
1309 /* -EDEADLK */ 1311 /* -EDEADLK */
@@ -1791,6 +1793,7 @@ static int futex_wait(u32 __user *uaddr, int fshared,
1791 current->timer_slack_ns); 1793 current->timer_slack_ns);
1792 } 1794 }
1793 1795
1796retry:
1794 /* Prepare to wait on uaddr. */ 1797 /* Prepare to wait on uaddr. */
1795 ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); 1798 ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
1796 if (ret) 1799 if (ret)
@@ -1808,9 +1811,14 @@ static int futex_wait(u32 __user *uaddr, int fshared,
1808 goto out_put_key; 1811 goto out_put_key;
1809 1812
1810 /* 1813 /*
1811 * We expect signal_pending(current), but another thread may 1814 * We expect signal_pending(current), but we might be the
1812 * have handled it for us already. 1815 * victim of a spurious wakeup as well.
1813 */ 1816 */
1817 if (!signal_pending(current)) {
1818 put_futex_key(fshared, &q.key);
1819 goto retry;
1820 }
1821
1814 ret = -ERESTARTSYS; 1822 ret = -ERESTARTSYS;
1815 if (!abs_time) 1823 if (!abs_time)
1816 goto out_put_key; 1824 goto out_put_key;
@@ -2117,11 +2125,12 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2117 * Unqueue the futex_q and determine which it was. 2125 * Unqueue the futex_q and determine which it was.
2118 */ 2126 */
2119 plist_del(&q->list, &q->list.plist); 2127 plist_del(&q->list, &q->list.plist);
2120 drop_futex_key_refs(&q->key);
2121 2128
2129 /* Handle spurious wakeups gracefully */
2130 ret = -EWOULDBLOCK;
2122 if (timeout && !timeout->task) 2131 if (timeout && !timeout->task)
2123 ret = -ETIMEDOUT; 2132 ret = -ETIMEDOUT;
2124 else 2133 else if (signal_pending(current))
2125 ret = -ERESTARTNOINTR; 2134 ret = -ERESTARTNOINTR;
2126 } 2135 }
2127 return ret; 2136 return ret;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index c215b74cd953..ede527708123 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -726,8 +726,6 @@ static int hrtimer_switch_to_hres(void)
726 /* "Retrigger" the interrupt to get things going */ 726 /* "Retrigger" the interrupt to get things going */
727 retrigger_next_event(NULL); 727 retrigger_next_event(NULL);
728 local_irq_restore(flags); 728 local_irq_restore(flags);
729 printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n",
730 smp_processor_id());
731 return 1; 729 return 1;
732} 730}
733 731
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index d4e841747400..0c642d51aac2 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -144,7 +144,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
144 144
145 rcu_read_lock(); 145 rcu_read_lock();
146 do_each_thread(g, t) { 146 do_each_thread(g, t) {
147 if (!--max_count) 147 if (!max_count--)
148 goto unlock; 148 goto unlock;
149 if (!--batch_count) { 149 if (!--batch_count) {
150 batch_count = HUNG_TASK_BATCHING; 150 batch_count = HUNG_TASK_BATCHING;
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
new file mode 100644
index 000000000000..cf5ee1628411
--- /dev/null
+++ b/kernel/hw_breakpoint.c
@@ -0,0 +1,423 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2007 Alan Stern
17 * Copyright (C) IBM Corporation, 2009
18 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
19 *
20 * Thanks to Ingo Molnar for his many suggestions.
21 *
22 * Authors: Alan Stern <stern@rowland.harvard.edu>
23 * K.Prasad <prasad@linux.vnet.ibm.com>
24 * Frederic Weisbecker <fweisbec@gmail.com>
25 */
26
27/*
28 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
29 * using the CPU's debug registers.
30 * This file contains the arch-independent routines.
31 */
32
33#include <linux/irqflags.h>
34#include <linux/kallsyms.h>
35#include <linux/notifier.h>
36#include <linux/kprobes.h>
37#include <linux/kdebug.h>
38#include <linux/kernel.h>
39#include <linux/module.h>
40#include <linux/percpu.h>
41#include <linux/sched.h>
42#include <linux/init.h>
43#include <linux/smp.h>
44
45#include <linux/hw_breakpoint.h>
46
47/*
48 * Constraints data
49 */
50
51/* Number of pinned cpu breakpoints in a cpu */
52static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned);
53
54/* Number of pinned task breakpoints in a cpu */
55static DEFINE_PER_CPU(unsigned int, task_bp_pinned[HBP_NUM]);
56
57/* Number of non-pinned cpu/task breakpoints in a cpu */
58static DEFINE_PER_CPU(unsigned int, nr_bp_flexible);
59
60/* Gather the number of total pinned and un-pinned bp in a cpuset */
61struct bp_busy_slots {
62 unsigned int pinned;
63 unsigned int flexible;
64};
65
66/* Serialize accesses to the above constraints */
67static DEFINE_MUTEX(nr_bp_mutex);
68
69/*
70 * Report the maximum number of pinned breakpoints a task
71 * have in this cpu
72 */
73static unsigned int max_task_bp_pinned(int cpu)
74{
75 int i;
76 unsigned int *tsk_pinned = per_cpu(task_bp_pinned, cpu);
77
78 for (i = HBP_NUM -1; i >= 0; i--) {
79 if (tsk_pinned[i] > 0)
80 return i + 1;
81 }
82
83 return 0;
84}
85
86/*
87 * Report the number of pinned/un-pinned breakpoints we have in
88 * a given cpu (cpu > -1) or in all of them (cpu = -1).
89 */
90static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu)
91{
92 if (cpu >= 0) {
93 slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu);
94 slots->pinned += max_task_bp_pinned(cpu);
95 slots->flexible = per_cpu(nr_bp_flexible, cpu);
96
97 return;
98 }
99
100 for_each_online_cpu(cpu) {
101 unsigned int nr;
102
103 nr = per_cpu(nr_cpu_bp_pinned, cpu);
104 nr += max_task_bp_pinned(cpu);
105
106 if (nr > slots->pinned)
107 slots->pinned = nr;
108
109 nr = per_cpu(nr_bp_flexible, cpu);
110
111 if (nr > slots->flexible)
112 slots->flexible = nr;
113 }
114}
115
116/*
117 * Add a pinned breakpoint for the given task in our constraint table
118 */
119static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable)
120{
121 int count = 0;
122 struct perf_event *bp;
123 struct perf_event_context *ctx = tsk->perf_event_ctxp;
124 unsigned int *tsk_pinned;
125 struct list_head *list;
126 unsigned long flags;
127
128 if (WARN_ONCE(!ctx, "No perf context for this task"))
129 return;
130
131 list = &ctx->event_list;
132
133 spin_lock_irqsave(&ctx->lock, flags);
134
135 /*
136 * The current breakpoint counter is not included in the list
137 * at the open() callback time
138 */
139 list_for_each_entry(bp, list, event_entry) {
140 if (bp->attr.type == PERF_TYPE_BREAKPOINT)
141 count++;
142 }
143
144 spin_unlock_irqrestore(&ctx->lock, flags);
145
146 if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list"))
147 return;
148
149 tsk_pinned = per_cpu(task_bp_pinned, cpu);
150 if (enable) {
151 tsk_pinned[count]++;
152 if (count > 0)
153 tsk_pinned[count-1]--;
154 } else {
155 tsk_pinned[count]--;
156 if (count > 0)
157 tsk_pinned[count-1]++;
158 }
159}
160
161/*
162 * Add/remove the given breakpoint in our constraint table
163 */
164static void toggle_bp_slot(struct perf_event *bp, bool enable)
165{
166 int cpu = bp->cpu;
167 struct task_struct *tsk = bp->ctx->task;
168
169 /* Pinned counter task profiling */
170 if (tsk) {
171 if (cpu >= 0) {
172 toggle_bp_task_slot(tsk, cpu, enable);
173 return;
174 }
175
176 for_each_online_cpu(cpu)
177 toggle_bp_task_slot(tsk, cpu, enable);
178 return;
179 }
180
181 /* Pinned counter cpu profiling */
182 if (enable)
183 per_cpu(nr_cpu_bp_pinned, bp->cpu)++;
184 else
185 per_cpu(nr_cpu_bp_pinned, bp->cpu)--;
186}
187
188/*
189 * Contraints to check before allowing this new breakpoint counter:
190 *
191 * == Non-pinned counter == (Considered as pinned for now)
192 *
193 * - If attached to a single cpu, check:
194 *
195 * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
196 * + max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM
197 *
198 * -> If there are already non-pinned counters in this cpu, it means
199 * there is already a free slot for them.
200 * Otherwise, we check that the maximum number of per task
201 * breakpoints (for this cpu) plus the number of per cpu breakpoint
202 * (for this cpu) doesn't cover every registers.
203 *
204 * - If attached to every cpus, check:
205 *
206 * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
207 * + max(per_cpu(task_bp_pinned, *)))) < HBP_NUM
208 *
209 * -> This is roughly the same, except we check the number of per cpu
210 * bp for every cpu and we keep the max one. Same for the per tasks
211 * breakpoints.
212 *
213 *
214 * == Pinned counter ==
215 *
216 * - If attached to a single cpu, check:
217 *
218 * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
219 * + max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM
220 *
221 * -> Same checks as before. But now the nr_bp_flexible, if any, must keep
222 * one register at least (or they will never be fed).
223 *
224 * - If attached to every cpus, check:
225 *
226 * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
227 * + max(per_cpu(task_bp_pinned, *))) < HBP_NUM
228 */
229int reserve_bp_slot(struct perf_event *bp)
230{
231 struct bp_busy_slots slots = {0};
232 int ret = 0;
233
234 mutex_lock(&nr_bp_mutex);
235
236 fetch_bp_busy_slots(&slots, bp->cpu);
237
238 /* Flexible counters need to keep at least one slot */
239 if (slots.pinned + (!!slots.flexible) == HBP_NUM) {
240 ret = -ENOSPC;
241 goto end;
242 }
243
244 toggle_bp_slot(bp, true);
245
246end:
247 mutex_unlock(&nr_bp_mutex);
248
249 return ret;
250}
251
252void release_bp_slot(struct perf_event *bp)
253{
254 mutex_lock(&nr_bp_mutex);
255
256 toggle_bp_slot(bp, false);
257
258 mutex_unlock(&nr_bp_mutex);
259}
260
261
262int __register_perf_hw_breakpoint(struct perf_event *bp)
263{
264 int ret;
265
266 ret = reserve_bp_slot(bp);
267 if (ret)
268 return ret;
269
270 /*
271 * Ptrace breakpoints can be temporary perf events only
272 * meant to reserve a slot. In this case, it is created disabled and
273 * we don't want to check the params right now (as we put a null addr)
274 * But perf tools create events as disabled and we want to check
275 * the params for them.
276 * This is a quick hack that will be removed soon, once we remove
277 * the tmp breakpoints from ptrace
278 */
279 if (!bp->attr.disabled || bp->callback == perf_bp_event)
280 ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task);
281
282 return ret;
283}
284
285int register_perf_hw_breakpoint(struct perf_event *bp)
286{
287 bp->callback = perf_bp_event;
288
289 return __register_perf_hw_breakpoint(bp);
290}
291
292/**
293 * register_user_hw_breakpoint - register a hardware breakpoint for user space
294 * @attr: breakpoint attributes
295 * @triggered: callback to trigger when we hit the breakpoint
296 * @tsk: pointer to 'task_struct' of the process to which the address belongs
297 */
298struct perf_event *
299register_user_hw_breakpoint(struct perf_event_attr *attr,
300 perf_callback_t triggered,
301 struct task_struct *tsk)
302{
303 return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
304}
305EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
306
307/**
308 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
309 * @bp: the breakpoint structure to modify
310 * @attr: new breakpoint attributes
311 * @triggered: callback to trigger when we hit the breakpoint
312 * @tsk: pointer to 'task_struct' of the process to which the address belongs
313 */
314struct perf_event *
315modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr,
316 perf_callback_t triggered,
317 struct task_struct *tsk)
318{
319 /*
320 * FIXME: do it without unregistering
321 * - We don't want to lose our slot
322 * - If the new bp is incorrect, don't lose the older one
323 */
324 unregister_hw_breakpoint(bp);
325
326 return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
327}
328EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
329
330/**
331 * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
332 * @bp: the breakpoint structure to unregister
333 */
334void unregister_hw_breakpoint(struct perf_event *bp)
335{
336 if (!bp)
337 return;
338 perf_event_release_kernel(bp);
339}
340EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
341
342/**
343 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
344 * @attr: breakpoint attributes
345 * @triggered: callback to trigger when we hit the breakpoint
346 *
347 * @return a set of per_cpu pointers to perf events
348 */
349struct perf_event **
350register_wide_hw_breakpoint(struct perf_event_attr *attr,
351 perf_callback_t triggered)
352{
353 struct perf_event **cpu_events, **pevent, *bp;
354 long err;
355 int cpu;
356
357 cpu_events = alloc_percpu(typeof(*cpu_events));
358 if (!cpu_events)
359 return ERR_PTR(-ENOMEM);
360
361 for_each_possible_cpu(cpu) {
362 pevent = per_cpu_ptr(cpu_events, cpu);
363 bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered);
364
365 *pevent = bp;
366
367 if (IS_ERR(bp)) {
368 err = PTR_ERR(bp);
369 goto fail;
370 }
371 }
372
373 return cpu_events;
374
375fail:
376 for_each_possible_cpu(cpu) {
377 pevent = per_cpu_ptr(cpu_events, cpu);
378 if (IS_ERR(*pevent))
379 break;
380 unregister_hw_breakpoint(*pevent);
381 }
382 free_percpu(cpu_events);
383 /* return the error if any */
384 return ERR_PTR(err);
385}
386EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
387
388/**
389 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
390 * @cpu_events: the per cpu set of events to unregister
391 */
392void unregister_wide_hw_breakpoint(struct perf_event **cpu_events)
393{
394 int cpu;
395 struct perf_event **pevent;
396
397 for_each_possible_cpu(cpu) {
398 pevent = per_cpu_ptr(cpu_events, cpu);
399 unregister_hw_breakpoint(*pevent);
400 }
401 free_percpu(cpu_events);
402}
403EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
404
405static struct notifier_block hw_breakpoint_exceptions_nb = {
406 .notifier_call = hw_breakpoint_exceptions_notify,
407 /* we need to be notified first */
408 .priority = 0x7fffffff
409};
410
411static int __init init_hw_breakpoint(void)
412{
413 return register_die_notifier(&hw_breakpoint_exceptions_nb);
414}
415core_initcall(init_hw_breakpoint);
416
417
418struct pmu perf_ops_bp = {
419 .enable = arch_install_hw_breakpoint,
420 .disable = arch_uninstall_hw_breakpoint,
421 .read = hw_breakpoint_pmu_read,
422 .unthrottle = hw_breakpoint_pmu_unthrottle
423};
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index c1660194d115..ba566c261adc 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -166,11 +166,11 @@ int set_irq_data(unsigned int irq, void *data)
166EXPORT_SYMBOL(set_irq_data); 166EXPORT_SYMBOL(set_irq_data);
167 167
168/** 168/**
169 * set_irq_data - set irq type data for an irq 169 * set_irq_msi - set MSI descriptor data for an irq
170 * @irq: Interrupt number 170 * @irq: Interrupt number
171 * @entry: Pointer to MSI descriptor data 171 * @entry: Pointer to MSI descriptor data
172 * 172 *
173 * Set the hardware irq controller data for an irq 173 * Set the MSI descriptor entry for an irq
174 */ 174 */
175int set_irq_msi(unsigned int irq, struct msi_desc *entry) 175int set_irq_msi(unsigned int irq, struct msi_desc *entry)
176{ 176{
@@ -590,7 +590,7 @@ out_unlock:
590} 590}
591 591
592/** 592/**
593 * handle_percpu_IRQ - Per CPU local irq handler 593 * handle_percpu_irq - Per CPU local irq handler
594 * @irq: the interrupt number 594 * @irq: the interrupt number
595 * @desc: the interrupt description structure for this irq 595 * @desc: the interrupt description structure for this irq
596 * 596 *
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index a81cf80554db..17c71bb565c6 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/irq.h> 13#include <linux/irq.h>
14#include <linux/sched.h>
14#include <linux/slab.h> 15#include <linux/slab.h>
15#include <linux/module.h> 16#include <linux/module.h>
16#include <linux/random.h> 17#include <linux/random.h>
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 692363dd591f..0832145fea97 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -136,7 +136,7 @@ out:
136 136
137static int default_affinity_open(struct inode *inode, struct file *file) 137static int default_affinity_open(struct inode *inode, struct file *file)
138{ 138{
139 return single_open(file, default_affinity_show, NULL); 139 return single_open(file, default_affinity_show, PDE(inode)->data);
140} 140}
141 141
142static const struct file_operations default_affinity_proc_fops = { 142static const struct file_operations default_affinity_proc_fops = {
@@ -148,18 +148,28 @@ static const struct file_operations default_affinity_proc_fops = {
148}; 148};
149#endif 149#endif
150 150
151static int irq_spurious_read(char *page, char **start, off_t off, 151static int irq_spurious_proc_show(struct seq_file *m, void *v)
152 int count, int *eof, void *data)
153{ 152{
154 struct irq_desc *desc = irq_to_desc((long) data); 153 struct irq_desc *desc = irq_to_desc((long) m->private);
155 return sprintf(page, "count %u\n" 154
156 "unhandled %u\n" 155 seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
157 "last_unhandled %u ms\n", 156 desc->irq_count, desc->irqs_unhandled,
158 desc->irq_count, 157 jiffies_to_msecs(desc->last_unhandled));
159 desc->irqs_unhandled, 158 return 0;
160 jiffies_to_msecs(desc->last_unhandled)); 159}
160
161static int irq_spurious_proc_open(struct inode *inode, struct file *file)
162{
163 return single_open(file, irq_spurious_proc_show, NULL);
161} 164}
162 165
166static const struct file_operations irq_spurious_proc_fops = {
167 .open = irq_spurious_proc_open,
168 .read = seq_read,
169 .llseek = seq_lseek,
170 .release = single_release,
171};
172
163#define MAX_NAMELEN 128 173#define MAX_NAMELEN 128
164 174
165static int name_unique(unsigned int irq, struct irqaction *new_action) 175static int name_unique(unsigned int irq, struct irqaction *new_action)
@@ -204,7 +214,6 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
204void register_irq_proc(unsigned int irq, struct irq_desc *desc) 214void register_irq_proc(unsigned int irq, struct irq_desc *desc)
205{ 215{
206 char name [MAX_NAMELEN]; 216 char name [MAX_NAMELEN];
207 struct proc_dir_entry *entry;
208 217
209 if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir) 218 if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir)
210 return; 219 return;
@@ -214,6 +223,8 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
214 223
215 /* create /proc/irq/1234 */ 224 /* create /proc/irq/1234 */
216 desc->dir = proc_mkdir(name, root_irq_dir); 225 desc->dir = proc_mkdir(name, root_irq_dir);
226 if (!desc->dir)
227 return;
217 228
218#ifdef CONFIG_SMP 229#ifdef CONFIG_SMP
219 /* create /proc/irq/<irq>/smp_affinity */ 230 /* create /proc/irq/<irq>/smp_affinity */
@@ -221,11 +232,8 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
221 &irq_affinity_proc_fops, (void *)(long)irq); 232 &irq_affinity_proc_fops, (void *)(long)irq);
222#endif 233#endif
223 234
224 entry = create_proc_entry("spurious", 0444, desc->dir); 235 proc_create_data("spurious", 0444, desc->dir,
225 if (entry) { 236 &irq_spurious_proc_fops, (void *)(long)irq);
226 entry->data = (void *)(long)irq;
227 entry->read_proc = irq_spurious_read;
228 }
229} 237}
230 238
231#undef MAX_NAMELEN 239#undef MAX_NAMELEN
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 114e704760fe..22b0a6eedf24 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -104,7 +104,7 @@ static int misrouted_irq(int irq)
104 return ok; 104 return ok;
105} 105}
106 106
107static void poll_all_shared_irqs(void) 107static void poll_spurious_irqs(unsigned long dummy)
108{ 108{
109 struct irq_desc *desc; 109 struct irq_desc *desc;
110 int i; 110 int i;
@@ -121,25 +121,15 @@ static void poll_all_shared_irqs(void)
121 if (!(status & IRQ_SPURIOUS_DISABLED)) 121 if (!(status & IRQ_SPURIOUS_DISABLED))
122 continue; 122 continue;
123 123
124 local_irq_disable();
124 try_one_irq(i, desc); 125 try_one_irq(i, desc);
126 local_irq_enable();
125 } 127 }
126}
127
128static void poll_spurious_irqs(unsigned long dummy)
129{
130 poll_all_shared_irqs();
131 128
132 mod_timer(&poll_spurious_irq_timer, 129 mod_timer(&poll_spurious_irq_timer,
133 jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 130 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
134} 131}
135 132
136#ifdef CONFIG_DEBUG_SHIRQ
137void debug_poll_all_shared_irqs(void)
138{
139 poll_all_shared_irqs();
140}
141#endif
142
143/* 133/*
144 * If 99,900 of the previous 100,000 interrupts have not been handled 134 * If 99,900 of the previous 100,000 interrupts have not been handled
145 * then assume that the IRQ is stuck in some manner. Drop a diagnostic 135 * then assume that the IRQ is stuck in some manner. Drop a diagnostic
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 8b6b8b697c68..8e5288a8a355 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -181,6 +181,7 @@ unsigned long kallsyms_lookup_name(const char *name)
181 } 181 }
182 return module_kallsyms_lookup_name(name); 182 return module_kallsyms_lookup_name(name);
183} 183}
184EXPORT_SYMBOL_GPL(kallsyms_lookup_name);
184 185
185int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, 186int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
186 unsigned long), 187 unsigned long),
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index 9147a3190c9d..7d7014634022 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -870,7 +870,7 @@ static void gdb_cmd_getregs(struct kgdb_state *ks)
870 870
871 /* 871 /*
872 * All threads that don't have debuggerinfo should be 872 * All threads that don't have debuggerinfo should be
873 * in __schedule() sleeping, since all other CPUs 873 * in schedule() sleeping, since all other CPUs
874 * are in kgdb_wait, and thus have debuggerinfo. 874 * are in kgdb_wait, and thus have debuggerinfo.
875 */ 875 */
876 if (local_debuggerinfo) { 876 if (local_debuggerinfo) {
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 9fcb53a11f87..25b103190364 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -80,16 +80,16 @@ int __request_module(bool wait, const char *fmt, ...)
80#define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */ 80#define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
81 static int kmod_loop_msg; 81 static int kmod_loop_msg;
82 82
83 ret = security_kernel_module_request();
84 if (ret)
85 return ret;
86
87 va_start(args, fmt); 83 va_start(args, fmt);
88 ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args); 84 ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
89 va_end(args); 85 va_end(args);
90 if (ret >= MODULE_NAME_LEN) 86 if (ret >= MODULE_NAME_LEN)
91 return -ENAMETOOLONG; 87 return -ENAMETOOLONG;
92 88
89 ret = security_kernel_module_request(module_name);
90 if (ret)
91 return ret;
92
93 /* If modprobe needs a service that is in a module, we get a recursive 93 /* If modprobe needs a service that is in a module, we get a recursive
94 * loop. Limit the number of running kmod threads to max_threads/2 or 94 * loop. Limit the number of running kmod threads to max_threads/2 or
95 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method 95 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 5240d75f4c60..e5342a344c43 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -90,6 +90,9 @@ static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
90 */ 90 */
91static struct kprobe_blackpoint kprobe_blacklist[] = { 91static struct kprobe_blackpoint kprobe_blacklist[] = {
92 {"preempt_schedule",}, 92 {"preempt_schedule",},
93 {"native_get_debugreg",},
94 {"irq_entries_start",},
95 {"common_interrupt",},
93 {NULL} /* Terminator */ 96 {NULL} /* Terminator */
94}; 97};
95 98
@@ -673,6 +676,40 @@ static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
673 return (kprobe_opcode_t *)(((char *)addr) + p->offset); 676 return (kprobe_opcode_t *)(((char *)addr) + p->offset);
674} 677}
675 678
679/* Check passed kprobe is valid and return kprobe in kprobe_table. */
680static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
681{
682 struct kprobe *old_p, *list_p;
683
684 old_p = get_kprobe(p->addr);
685 if (unlikely(!old_p))
686 return NULL;
687
688 if (p != old_p) {
689 list_for_each_entry_rcu(list_p, &old_p->list, list)
690 if (list_p == p)
691 /* kprobe p is a valid probe */
692 goto valid;
693 return NULL;
694 }
695valid:
696 return old_p;
697}
698
699/* Return error if the kprobe is being re-registered */
700static inline int check_kprobe_rereg(struct kprobe *p)
701{
702 int ret = 0;
703 struct kprobe *old_p;
704
705 mutex_lock(&kprobe_mutex);
706 old_p = __get_valid_kprobe(p);
707 if (old_p)
708 ret = -EINVAL;
709 mutex_unlock(&kprobe_mutex);
710 return ret;
711}
712
676int __kprobes register_kprobe(struct kprobe *p) 713int __kprobes register_kprobe(struct kprobe *p)
677{ 714{
678 int ret = 0; 715 int ret = 0;
@@ -685,6 +722,10 @@ int __kprobes register_kprobe(struct kprobe *p)
685 return -EINVAL; 722 return -EINVAL;
686 p->addr = addr; 723 p->addr = addr;
687 724
725 ret = check_kprobe_rereg(p);
726 if (ret)
727 return ret;
728
688 preempt_disable(); 729 preempt_disable();
689 if (!kernel_text_address((unsigned long) p->addr) || 730 if (!kernel_text_address((unsigned long) p->addr) ||
690 in_kprobes_functions((unsigned long) p->addr)) { 731 in_kprobes_functions((unsigned long) p->addr)) {
@@ -754,26 +795,6 @@ out:
754} 795}
755EXPORT_SYMBOL_GPL(register_kprobe); 796EXPORT_SYMBOL_GPL(register_kprobe);
756 797
757/* Check passed kprobe is valid and return kprobe in kprobe_table. */
758static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
759{
760 struct kprobe *old_p, *list_p;
761
762 old_p = get_kprobe(p->addr);
763 if (unlikely(!old_p))
764 return NULL;
765
766 if (p != old_p) {
767 list_for_each_entry_rcu(list_p, &old_p->list, list)
768 if (list_p == p)
769 /* kprobe p is a valid probe */
770 goto valid;
771 return NULL;
772 }
773valid:
774 return old_p;
775}
776
777/* 798/*
778 * Unregister a kprobe without a scheduler synchronization. 799 * Unregister a kprobe without a scheduler synchronization.
779 */ 800 */
@@ -1014,9 +1035,9 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
1014 /* Pre-allocate memory for max kretprobe instances */ 1035 /* Pre-allocate memory for max kretprobe instances */
1015 if (rp->maxactive <= 0) { 1036 if (rp->maxactive <= 0) {
1016#ifdef CONFIG_PREEMPT 1037#ifdef CONFIG_PREEMPT
1017 rp->maxactive = max(10, 2 * NR_CPUS); 1038 rp->maxactive = max(10, 2 * num_possible_cpus());
1018#else 1039#else
1019 rp->maxactive = NR_CPUS; 1040 rp->maxactive = num_possible_cpus();
1020#endif 1041#endif
1021 } 1042 }
1022 spin_lock_init(&rp->lock); 1043 spin_lock_init(&rp->lock);
@@ -1141,6 +1162,13 @@ static void __kprobes kill_kprobe(struct kprobe *p)
1141 arch_remove_kprobe(p); 1162 arch_remove_kprobe(p);
1142} 1163}
1143 1164
1165void __kprobes dump_kprobe(struct kprobe *kp)
1166{
1167 printk(KERN_WARNING "Dumping kprobe:\n");
1168 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
1169 kp->symbol_name, kp->addr, kp->offset);
1170}
1171
1144/* Module notifier call back, checking kprobes on the module */ 1172/* Module notifier call back, checking kprobes on the module */
1145static int __kprobes kprobes_module_callback(struct notifier_block *nb, 1173static int __kprobes kprobes_module_callback(struct notifier_block *nb,
1146 unsigned long val, void *data) 1174 unsigned long val, void *data)
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 5fe709982caa..ab7ae57773e1 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -150,29 +150,6 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
150EXPORT_SYMBOL(kthread_create); 150EXPORT_SYMBOL(kthread_create);
151 151
152/** 152/**
153 * kthread_bind - bind a just-created kthread to a cpu.
154 * @k: thread created by kthread_create().
155 * @cpu: cpu (might not be online, must be possible) for @k to run on.
156 *
157 * Description: This function is equivalent to set_cpus_allowed(),
158 * except that @cpu doesn't need to be online, and the thread must be
159 * stopped (i.e., just returned from kthread_create()).
160 */
161void kthread_bind(struct task_struct *k, unsigned int cpu)
162{
163 /* Must have done schedule() in kthread() before we set_task_cpu */
164 if (!wait_task_inactive(k, TASK_UNINTERRUPTIBLE)) {
165 WARN_ON(1);
166 return;
167 }
168 set_task_cpu(k, cpu);
169 k->cpus_allowed = cpumask_of_cpu(cpu);
170 k->rt.nr_cpus_allowed = 1;
171 k->flags |= PF_THREAD_BOUND;
172}
173EXPORT_SYMBOL(kthread_bind);
174
175/**
176 * kthread_stop - stop a thread created by kthread_create(). 153 * kthread_stop - stop a thread created by kthread_create().
177 * @k: thread created by kthread_create(). 154 * @k: thread created by kthread_create().
178 * 155 *
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 3815ac1d58b2..f5dcd36d3151 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -49,7 +49,7 @@
49#include "lockdep_internals.h" 49#include "lockdep_internals.h"
50 50
51#define CREATE_TRACE_POINTS 51#define CREATE_TRACE_POINTS
52#include <trace/events/lockdep.h> 52#include <trace/events/lock.h>
53 53
54#ifdef CONFIG_PROVE_LOCKING 54#ifdef CONFIG_PROVE_LOCKING
55int prove_locking = 1; 55int prove_locking = 1;
@@ -142,6 +142,11 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
142#ifdef CONFIG_LOCK_STAT 142#ifdef CONFIG_LOCK_STAT
143static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); 143static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
144 144
145static inline u64 lockstat_clock(void)
146{
147 return cpu_clock(smp_processor_id());
148}
149
145static int lock_point(unsigned long points[], unsigned long ip) 150static int lock_point(unsigned long points[], unsigned long ip)
146{ 151{
147 int i; 152 int i;
@@ -158,7 +163,7 @@ static int lock_point(unsigned long points[], unsigned long ip)
158 return i; 163 return i;
159} 164}
160 165
161static void lock_time_inc(struct lock_time *lt, s64 time) 166static void lock_time_inc(struct lock_time *lt, u64 time)
162{ 167{
163 if (time > lt->max) 168 if (time > lt->max)
164 lt->max = time; 169 lt->max = time;
@@ -234,12 +239,12 @@ static void put_lock_stats(struct lock_class_stats *stats)
234static void lock_release_holdtime(struct held_lock *hlock) 239static void lock_release_holdtime(struct held_lock *hlock)
235{ 240{
236 struct lock_class_stats *stats; 241 struct lock_class_stats *stats;
237 s64 holdtime; 242 u64 holdtime;
238 243
239 if (!lock_stat) 244 if (!lock_stat)
240 return; 245 return;
241 246
242 holdtime = sched_clock() - hlock->holdtime_stamp; 247 holdtime = lockstat_clock() - hlock->holdtime_stamp;
243 248
244 stats = get_lock_stats(hlock_class(hlock)); 249 stats = get_lock_stats(hlock_class(hlock));
245 if (hlock->read) 250 if (hlock->read)
@@ -2792,7 +2797,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2792 hlock->references = references; 2797 hlock->references = references;
2793#ifdef CONFIG_LOCK_STAT 2798#ifdef CONFIG_LOCK_STAT
2794 hlock->waittime_stamp = 0; 2799 hlock->waittime_stamp = 0;
2795 hlock->holdtime_stamp = sched_clock(); 2800 hlock->holdtime_stamp = lockstat_clock();
2796#endif 2801#endif
2797 2802
2798 if (check == 2 && !mark_irqflags(curr, hlock)) 2803 if (check == 2 && !mark_irqflags(curr, hlock))
@@ -3322,7 +3327,7 @@ found_it:
3322 if (hlock->instance != lock) 3327 if (hlock->instance != lock)
3323 return; 3328 return;
3324 3329
3325 hlock->waittime_stamp = sched_clock(); 3330 hlock->waittime_stamp = lockstat_clock();
3326 3331
3327 contention_point = lock_point(hlock_class(hlock)->contention_point, ip); 3332 contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3328 contending_point = lock_point(hlock_class(hlock)->contending_point, 3333 contending_point = lock_point(hlock_class(hlock)->contending_point,
@@ -3345,8 +3350,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
3345 struct held_lock *hlock, *prev_hlock; 3350 struct held_lock *hlock, *prev_hlock;
3346 struct lock_class_stats *stats; 3351 struct lock_class_stats *stats;
3347 unsigned int depth; 3352 unsigned int depth;
3348 u64 now; 3353 u64 now, waittime = 0;
3349 s64 waittime = 0;
3350 int i, cpu; 3354 int i, cpu;
3351 3355
3352 depth = curr->lockdep_depth; 3356 depth = curr->lockdep_depth;
@@ -3374,7 +3378,7 @@ found_it:
3374 3378
3375 cpu = smp_processor_id(); 3379 cpu = smp_processor_id();
3376 if (hlock->waittime_stamp) { 3380 if (hlock->waittime_stamp) {
3377 now = sched_clock(); 3381 now = lockstat_clock();
3378 waittime = now - hlock->waittime_stamp; 3382 waittime = now - hlock->waittime_stamp;
3379 hlock->holdtime_stamp = now; 3383 hlock->holdtime_stamp = now;
3380 } 3384 }
diff --git a/kernel/module.c b/kernel/module.c
index 8b7d8805819d..5842a71cf052 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1187,7 +1187,8 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
1187 1187
1188 /* Count loaded sections and allocate structures */ 1188 /* Count loaded sections and allocate structures */
1189 for (i = 0; i < nsect; i++) 1189 for (i = 0; i < nsect; i++)
1190 if (sechdrs[i].sh_flags & SHF_ALLOC) 1190 if (sechdrs[i].sh_flags & SHF_ALLOC
1191 && sechdrs[i].sh_size)
1191 nloaded++; 1192 nloaded++;
1192 size[0] = ALIGN(sizeof(*sect_attrs) 1193 size[0] = ALIGN(sizeof(*sect_attrs)
1193 + nloaded * sizeof(sect_attrs->attrs[0]), 1194 + nloaded * sizeof(sect_attrs->attrs[0]),
@@ -1207,6 +1208,8 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
1207 for (i = 0; i < nsect; i++) { 1208 for (i = 0; i < nsect; i++) {
1208 if (! (sechdrs[i].sh_flags & SHF_ALLOC)) 1209 if (! (sechdrs[i].sh_flags & SHF_ALLOC))
1209 continue; 1210 continue;
1211 if (!sechdrs[i].sh_size)
1212 continue;
1210 sattr->address = sechdrs[i].sh_addr; 1213 sattr->address = sechdrs[i].sh_addr;
1211 sattr->name = kstrdup(secstrings + sechdrs[i].sh_name, 1214 sattr->name = kstrdup(secstrings + sechdrs[i].sh_name,
1212 GFP_KERNEL); 1215 GFP_KERNEL);
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
index 50d022e5a560..ec815a960b5d 100644
--- a/kernel/mutex-debug.c
+++ b/kernel/mutex-debug.c
@@ -16,6 +16,7 @@
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/poison.h> 18#include <linux/poison.h>
19#include <linux/sched.h>
19#include <linux/spinlock.h> 20#include <linux/spinlock.h>
20#include <linux/kallsyms.h> 21#include <linux/kallsyms.h>
21#include <linux/interrupt.h> 22#include <linux/interrupt.h>
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 947b3ad551f8..632f04c57d82 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -148,8 +148,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
148 148
149 preempt_disable(); 149 preempt_disable();
150 mutex_acquire(&lock->dep_map, subclass, 0, ip); 150 mutex_acquire(&lock->dep_map, subclass, 0, ip);
151#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && \ 151
152 !defined(CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES) 152#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
153 /* 153 /*
154 * Optimistic spinning. 154 * Optimistic spinning.
155 * 155 *
diff --git a/kernel/notifier.c b/kernel/notifier.c
index 61d5aa5eced3..acd24e7643eb 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -558,7 +558,7 @@ EXPORT_SYMBOL(unregister_reboot_notifier);
558 558
559static ATOMIC_NOTIFIER_HEAD(die_chain); 559static ATOMIC_NOTIFIER_HEAD(die_chain);
560 560
561int notrace notify_die(enum die_val val, const char *str, 561int notrace __kprobes notify_die(enum die_val val, const char *str,
562 struct pt_regs *regs, long err, int trap, int sig) 562 struct pt_regs *regs, long err, int trap, int sig)
563{ 563{
564 struct die_args args = { 564 struct die_args args = {
diff --git a/kernel/panic.c b/kernel/panic.c
index bcdef26e3332..96b45d0b4ba5 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -90,6 +90,8 @@ NORET_TYPE void panic(const char * fmt, ...)
90 90
91 atomic_notifier_call_chain(&panic_notifier_list, 0, buf); 91 atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
92 92
93 bust_spinlocks(0);
94
93 if (!panic_blink) 95 if (!panic_blink)
94 panic_blink = no_blink; 96 panic_blink = no_blink;
95 97
@@ -136,7 +138,6 @@ NORET_TYPE void panic(const char * fmt, ...)
136 mdelay(1); 138 mdelay(1);
137 i++; 139 i++;
138 } 140 }
139 bust_spinlocks(0);
140} 141}
141 142
142EXPORT_SYMBOL(panic); 143EXPORT_SYMBOL(panic);
diff --git a/kernel/params.c b/kernel/params.c
index 9da58eabdcb2..d656c276508d 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -218,15 +218,11 @@ int param_set_charp(const char *val, struct kernel_param *kp)
218 return -ENOSPC; 218 return -ENOSPC;
219 } 219 }
220 220
221 if (kp->flags & KPARAM_KMALLOCED)
222 kfree(*(char **)kp->arg);
223
224 /* This is a hack. We can't need to strdup in early boot, and we 221 /* This is a hack. We can't need to strdup in early boot, and we
225 * don't need to; this mangled commandline is preserved. */ 222 * don't need to; this mangled commandline is preserved. */
226 if (slab_is_available()) { 223 if (slab_is_available()) {
227 kp->flags |= KPARAM_KMALLOCED;
228 *(char **)kp->arg = kstrdup(val, GFP_KERNEL); 224 *(char **)kp->arg = kstrdup(val, GFP_KERNEL);
229 if (!kp->arg) 225 if (!*(char **)kp->arg)
230 return -ENOMEM; 226 return -ENOMEM;
231 } else 227 } else
232 *(const char **)kp->arg = val; 228 *(const char **)kp->arg = val;
@@ -304,6 +300,7 @@ static int param_array(const char *name,
304 unsigned int min, unsigned int max, 300 unsigned int min, unsigned int max,
305 void *elem, int elemsize, 301 void *elem, int elemsize,
306 int (*set)(const char *, struct kernel_param *kp), 302 int (*set)(const char *, struct kernel_param *kp),
303 u16 flags,
307 unsigned int *num) 304 unsigned int *num)
308{ 305{
309 int ret; 306 int ret;
@@ -313,6 +310,7 @@ static int param_array(const char *name,
313 /* Get the name right for errors. */ 310 /* Get the name right for errors. */
314 kp.name = name; 311 kp.name = name;
315 kp.arg = elem; 312 kp.arg = elem;
313 kp.flags = flags;
316 314
317 /* No equals sign? */ 315 /* No equals sign? */
318 if (!val) { 316 if (!val) {
@@ -358,7 +356,8 @@ int param_array_set(const char *val, struct kernel_param *kp)
358 unsigned int temp_num; 356 unsigned int temp_num;
359 357
360 return param_array(kp->name, val, 1, arr->max, arr->elem, 358 return param_array(kp->name, val, 1, arr->max, arr->elem,
361 arr->elemsize, arr->set, arr->num ?: &temp_num); 359 arr->elemsize, arr->set, kp->flags,
360 arr->num ?: &temp_num);
362} 361}
363 362
364int param_array_get(char *buffer, struct kernel_param *kp) 363int param_array_get(char *buffer, struct kernel_param *kp)
@@ -605,11 +604,7 @@ void module_param_sysfs_remove(struct module *mod)
605 604
606void destroy_params(const struct kernel_param *params, unsigned num) 605void destroy_params(const struct kernel_param *params, unsigned num)
607{ 606{
608 unsigned int i; 607 /* FIXME: This should free kmalloced charp parameters. It doesn't. */
609
610 for (i = 0; i < num; i++)
611 if (params[i].flags & KPARAM_KMALLOCED)
612 kfree(*(char **)params[i].arg);
613} 608}
614 609
615static void __init kernel_add_sysfs_param(const char *name, 610static void __init kernel_add_sysfs_param(const char *name,
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 0f86feb6db0c..6b7ddba1dd64 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -20,6 +20,7 @@
20#include <linux/percpu.h> 20#include <linux/percpu.h>
21#include <linux/ptrace.h> 21#include <linux/ptrace.h>
22#include <linux/vmstat.h> 22#include <linux/vmstat.h>
23#include <linux/vmalloc.h>
23#include <linux/hardirq.h> 24#include <linux/hardirq.h>
24#include <linux/rculist.h> 25#include <linux/rculist.h>
25#include <linux/uaccess.h> 26#include <linux/uaccess.h>
@@ -27,6 +28,8 @@
27#include <linux/anon_inodes.h> 28#include <linux/anon_inodes.h>
28#include <linux/kernel_stat.h> 29#include <linux/kernel_stat.h>
29#include <linux/perf_event.h> 30#include <linux/perf_event.h>
31#include <linux/ftrace_event.h>
32#include <linux/hw_breakpoint.h>
30 33
31#include <asm/irq_regs.h> 34#include <asm/irq_regs.h>
32 35
@@ -243,6 +246,49 @@ static void perf_unpin_context(struct perf_event_context *ctx)
243 put_ctx(ctx); 246 put_ctx(ctx);
244} 247}
245 248
249static inline u64 perf_clock(void)
250{
251 return cpu_clock(smp_processor_id());
252}
253
254/*
255 * Update the record of the current time in a context.
256 */
257static void update_context_time(struct perf_event_context *ctx)
258{
259 u64 now = perf_clock();
260
261 ctx->time += now - ctx->timestamp;
262 ctx->timestamp = now;
263}
264
265/*
266 * Update the total_time_enabled and total_time_running fields for a event.
267 */
268static void update_event_times(struct perf_event *event)
269{
270 struct perf_event_context *ctx = event->ctx;
271 u64 run_end;
272
273 if (event->state < PERF_EVENT_STATE_INACTIVE ||
274 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
275 return;
276
277 if (ctx->is_active)
278 run_end = ctx->time;
279 else
280 run_end = event->tstamp_stopped;
281
282 event->total_time_enabled = run_end - event->tstamp_enabled;
283
284 if (event->state == PERF_EVENT_STATE_INACTIVE)
285 run_end = event->tstamp_stopped;
286 else
287 run_end = ctx->time;
288
289 event->total_time_running = run_end - event->tstamp_running;
290}
291
246/* 292/*
247 * Add a event from the lists for its context. 293 * Add a event from the lists for its context.
248 * Must be called with ctx->mutex and ctx->lock held. 294 * Must be called with ctx->mutex and ctx->lock held.
@@ -291,6 +337,18 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
291 if (event->group_leader != event) 337 if (event->group_leader != event)
292 event->group_leader->nr_siblings--; 338 event->group_leader->nr_siblings--;
293 339
340 update_event_times(event);
341
342 /*
343 * If event was in error state, then keep it
344 * that way, otherwise bogus counts will be
345 * returned on read(). The only way to get out
346 * of error state is by explicit re-enabling
347 * of the event
348 */
349 if (event->state > PERF_EVENT_STATE_OFF)
350 event->state = PERF_EVENT_STATE_OFF;
351
294 /* 352 /*
295 * If this was a group event with sibling events then 353 * If this was a group event with sibling events then
296 * upgrade the siblings to singleton events by adding them 354 * upgrade the siblings to singleton events by adding them
@@ -444,50 +502,11 @@ retry:
444 * can remove the event safely, if the call above did not 502 * can remove the event safely, if the call above did not
445 * succeed. 503 * succeed.
446 */ 504 */
447 if (!list_empty(&event->group_entry)) { 505 if (!list_empty(&event->group_entry))
448 list_del_event(event, ctx); 506 list_del_event(event, ctx);
449 }
450 spin_unlock_irq(&ctx->lock); 507 spin_unlock_irq(&ctx->lock);
451} 508}
452 509
453static inline u64 perf_clock(void)
454{
455 return cpu_clock(smp_processor_id());
456}
457
458/*
459 * Update the record of the current time in a context.
460 */
461static void update_context_time(struct perf_event_context *ctx)
462{
463 u64 now = perf_clock();
464
465 ctx->time += now - ctx->timestamp;
466 ctx->timestamp = now;
467}
468
469/*
470 * Update the total_time_enabled and total_time_running fields for a event.
471 */
472static void update_event_times(struct perf_event *event)
473{
474 struct perf_event_context *ctx = event->ctx;
475 u64 run_end;
476
477 if (event->state < PERF_EVENT_STATE_INACTIVE ||
478 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
479 return;
480
481 event->total_time_enabled = ctx->time - event->tstamp_enabled;
482
483 if (event->state == PERF_EVENT_STATE_INACTIVE)
484 run_end = event->tstamp_stopped;
485 else
486 run_end = ctx->time;
487
488 event->total_time_running = run_end - event->tstamp_running;
489}
490
491/* 510/*
492 * Update total_time_enabled and total_time_running for all events in a group. 511 * Update total_time_enabled and total_time_running for all events in a group.
493 */ 512 */
@@ -1031,12 +1050,8 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
1031 1050
1032 perf_disable(); 1051 perf_disable();
1033 if (ctx->nr_active) { 1052 if (ctx->nr_active) {
1034 list_for_each_entry(event, &ctx->group_list, group_entry) { 1053 list_for_each_entry(event, &ctx->group_list, group_entry)
1035 if (event != event->group_leader) 1054 group_sched_out(event, cpuctx, ctx);
1036 event_sched_out(event, cpuctx, ctx);
1037 else
1038 group_sched_out(event, cpuctx, ctx);
1039 }
1040 } 1055 }
1041 perf_enable(); 1056 perf_enable();
1042 out: 1057 out:
@@ -1062,8 +1077,6 @@ static int context_equiv(struct perf_event_context *ctx1,
1062 && !ctx1->pin_count && !ctx2->pin_count; 1077 && !ctx1->pin_count && !ctx2->pin_count;
1063} 1078}
1064 1079
1065static void __perf_event_read(void *event);
1066
1067static void __perf_event_sync_stat(struct perf_event *event, 1080static void __perf_event_sync_stat(struct perf_event *event,
1068 struct perf_event *next_event) 1081 struct perf_event *next_event)
1069{ 1082{
@@ -1081,8 +1094,8 @@ static void __perf_event_sync_stat(struct perf_event *event,
1081 */ 1094 */
1082 switch (event->state) { 1095 switch (event->state) {
1083 case PERF_EVENT_STATE_ACTIVE: 1096 case PERF_EVENT_STATE_ACTIVE:
1084 __perf_event_read(event); 1097 event->pmu->read(event);
1085 break; 1098 /* fall-through */
1086 1099
1087 case PERF_EVENT_STATE_INACTIVE: 1100 case PERF_EVENT_STATE_INACTIVE:
1088 update_event_times(event); 1101 update_event_times(event);
@@ -1121,6 +1134,8 @@ static void perf_event_sync_stat(struct perf_event_context *ctx,
1121 if (!ctx->nr_stat) 1134 if (!ctx->nr_stat)
1122 return; 1135 return;
1123 1136
1137 update_context_time(ctx);
1138
1124 event = list_first_entry(&ctx->event_list, 1139 event = list_first_entry(&ctx->event_list,
1125 struct perf_event, event_entry); 1140 struct perf_event, event_entry);
1126 1141
@@ -1164,8 +1179,6 @@ void perf_event_task_sched_out(struct task_struct *task,
1164 if (likely(!ctx || !cpuctx->task_ctx)) 1179 if (likely(!ctx || !cpuctx->task_ctx))
1165 return; 1180 return;
1166 1181
1167 update_context_time(ctx);
1168
1169 rcu_read_lock(); 1182 rcu_read_lock();
1170 parent = rcu_dereference(ctx->parent_ctx); 1183 parent = rcu_dereference(ctx->parent_ctx);
1171 next_ctx = next->perf_event_ctxp; 1184 next_ctx = next->perf_event_ctxp;
@@ -1258,12 +1271,8 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1258 if (event->cpu != -1 && event->cpu != cpu) 1271 if (event->cpu != -1 && event->cpu != cpu)
1259 continue; 1272 continue;
1260 1273
1261 if (event != event->group_leader) 1274 if (group_can_go_on(event, cpuctx, 1))
1262 event_sched_in(event, cpuctx, ctx, cpu); 1275 group_sched_in(event, cpuctx, ctx, cpu);
1263 else {
1264 if (group_can_go_on(event, cpuctx, 1))
1265 group_sched_in(event, cpuctx, ctx, cpu);
1266 }
1267 1276
1268 /* 1277 /*
1269 * If this pinned group hasn't been scheduled, 1278 * If this pinned group hasn't been scheduled,
@@ -1291,15 +1300,9 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1291 if (event->cpu != -1 && event->cpu != cpu) 1300 if (event->cpu != -1 && event->cpu != cpu)
1292 continue; 1301 continue;
1293 1302
1294 if (event != event->group_leader) { 1303 if (group_can_go_on(event, cpuctx, can_add_hw))
1295 if (event_sched_in(event, cpuctx, ctx, cpu)) 1304 if (group_sched_in(event, cpuctx, ctx, cpu))
1296 can_add_hw = 0; 1305 can_add_hw = 0;
1297 } else {
1298 if (group_can_go_on(event, cpuctx, can_add_hw)) {
1299 if (group_sched_in(event, cpuctx, ctx, cpu))
1300 can_add_hw = 0;
1301 }
1302 }
1303 } 1306 }
1304 perf_enable(); 1307 perf_enable();
1305 out: 1308 out:
@@ -1368,7 +1371,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1368 u64 interrupts, freq; 1371 u64 interrupts, freq;
1369 1372
1370 spin_lock(&ctx->lock); 1373 spin_lock(&ctx->lock);
1371 list_for_each_entry(event, &ctx->group_list, group_entry) { 1374 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
1372 if (event->state != PERF_EVENT_STATE_ACTIVE) 1375 if (event->state != PERF_EVENT_STATE_ACTIVE)
1373 continue; 1376 continue;
1374 1377
@@ -1528,7 +1531,6 @@ static void __perf_event_read(void *info)
1528 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 1531 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1529 struct perf_event *event = info; 1532 struct perf_event *event = info;
1530 struct perf_event_context *ctx = event->ctx; 1533 struct perf_event_context *ctx = event->ctx;
1531 unsigned long flags;
1532 1534
1533 /* 1535 /*
1534 * If this is a task context, we need to check whether it is 1536 * If this is a task context, we need to check whether it is
@@ -1540,12 +1542,12 @@ static void __perf_event_read(void *info)
1540 if (ctx->task && cpuctx->task_ctx != ctx) 1542 if (ctx->task && cpuctx->task_ctx != ctx)
1541 return; 1543 return;
1542 1544
1543 local_irq_save(flags); 1545 spin_lock(&ctx->lock);
1544 if (ctx->is_active) 1546 update_context_time(ctx);
1545 update_context_time(ctx);
1546 event->pmu->read(event);
1547 update_event_times(event); 1547 update_event_times(event);
1548 local_irq_restore(flags); 1548 spin_unlock(&ctx->lock);
1549
1550 event->pmu->read(event);
1549} 1551}
1550 1552
1551static u64 perf_event_read(struct perf_event *event) 1553static u64 perf_event_read(struct perf_event *event)
@@ -1558,7 +1560,13 @@ static u64 perf_event_read(struct perf_event *event)
1558 smp_call_function_single(event->oncpu, 1560 smp_call_function_single(event->oncpu,
1559 __perf_event_read, event, 1); 1561 __perf_event_read, event, 1);
1560 } else if (event->state == PERF_EVENT_STATE_INACTIVE) { 1562 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
1563 struct perf_event_context *ctx = event->ctx;
1564 unsigned long flags;
1565
1566 spin_lock_irqsave(&ctx->lock, flags);
1567 update_context_time(ctx);
1561 update_event_times(event); 1568 update_event_times(event);
1569 spin_unlock_irqrestore(&ctx->lock, flags);
1562 } 1570 }
1563 1571
1564 return atomic64_read(&event->count); 1572 return atomic64_read(&event->count);
@@ -1671,6 +1679,8 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1671 return ERR_PTR(err); 1679 return ERR_PTR(err);
1672} 1680}
1673 1681
1682static void perf_event_free_filter(struct perf_event *event);
1683
1674static void free_event_rcu(struct rcu_head *head) 1684static void free_event_rcu(struct rcu_head *head)
1675{ 1685{
1676 struct perf_event *event; 1686 struct perf_event *event;
@@ -1678,6 +1688,7 @@ static void free_event_rcu(struct rcu_head *head)
1678 event = container_of(head, struct perf_event, rcu_head); 1688 event = container_of(head, struct perf_event, rcu_head);
1679 if (event->ns) 1689 if (event->ns)
1680 put_pid_ns(event->ns); 1690 put_pid_ns(event->ns);
1691 perf_event_free_filter(event);
1681 kfree(event); 1692 kfree(event);
1682} 1693}
1683 1694
@@ -1709,16 +1720,10 @@ static void free_event(struct perf_event *event)
1709 call_rcu(&event->rcu_head, free_event_rcu); 1720 call_rcu(&event->rcu_head, free_event_rcu);
1710} 1721}
1711 1722
1712/* 1723int perf_event_release_kernel(struct perf_event *event)
1713 * Called when the last reference to the file is gone.
1714 */
1715static int perf_release(struct inode *inode, struct file *file)
1716{ 1724{
1717 struct perf_event *event = file->private_data;
1718 struct perf_event_context *ctx = event->ctx; 1725 struct perf_event_context *ctx = event->ctx;
1719 1726
1720 file->private_data = NULL;
1721
1722 WARN_ON_ONCE(ctx->parent_ctx); 1727 WARN_ON_ONCE(ctx->parent_ctx);
1723 mutex_lock(&ctx->mutex); 1728 mutex_lock(&ctx->mutex);
1724 perf_event_remove_from_context(event); 1729 perf_event_remove_from_context(event);
@@ -1733,6 +1738,19 @@ static int perf_release(struct inode *inode, struct file *file)
1733 1738
1734 return 0; 1739 return 0;
1735} 1740}
1741EXPORT_SYMBOL_GPL(perf_event_release_kernel);
1742
1743/*
1744 * Called when the last reference to the file is gone.
1745 */
1746static int perf_release(struct inode *inode, struct file *file)
1747{
1748 struct perf_event *event = file->private_data;
1749
1750 file->private_data = NULL;
1751
1752 return perf_event_release_kernel(event);
1753}
1736 1754
1737static int perf_event_read_size(struct perf_event *event) 1755static int perf_event_read_size(struct perf_event *event)
1738{ 1756{
@@ -1759,91 +1777,94 @@ static int perf_event_read_size(struct perf_event *event)
1759 return size; 1777 return size;
1760} 1778}
1761 1779
1762static u64 perf_event_read_value(struct perf_event *event) 1780u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
1763{ 1781{
1764 struct perf_event *child; 1782 struct perf_event *child;
1765 u64 total = 0; 1783 u64 total = 0;
1766 1784
1785 *enabled = 0;
1786 *running = 0;
1787
1788 mutex_lock(&event->child_mutex);
1767 total += perf_event_read(event); 1789 total += perf_event_read(event);
1768 list_for_each_entry(child, &event->child_list, child_list) 1790 *enabled += event->total_time_enabled +
1791 atomic64_read(&event->child_total_time_enabled);
1792 *running += event->total_time_running +
1793 atomic64_read(&event->child_total_time_running);
1794
1795 list_for_each_entry(child, &event->child_list, child_list) {
1769 total += perf_event_read(child); 1796 total += perf_event_read(child);
1797 *enabled += child->total_time_enabled;
1798 *running += child->total_time_running;
1799 }
1800 mutex_unlock(&event->child_mutex);
1770 1801
1771 return total; 1802 return total;
1772} 1803}
1773 1804EXPORT_SYMBOL_GPL(perf_event_read_value);
1774static int perf_event_read_entry(struct perf_event *event,
1775 u64 read_format, char __user *buf)
1776{
1777 int n = 0, count = 0;
1778 u64 values[2];
1779
1780 values[n++] = perf_event_read_value(event);
1781 if (read_format & PERF_FORMAT_ID)
1782 values[n++] = primary_event_id(event);
1783
1784 count = n * sizeof(u64);
1785
1786 if (copy_to_user(buf, values, count))
1787 return -EFAULT;
1788
1789 return count;
1790}
1791 1805
1792static int perf_event_read_group(struct perf_event *event, 1806static int perf_event_read_group(struct perf_event *event,
1793 u64 read_format, char __user *buf) 1807 u64 read_format, char __user *buf)
1794{ 1808{
1795 struct perf_event *leader = event->group_leader, *sub; 1809 struct perf_event *leader = event->group_leader, *sub;
1796 int n = 0, size = 0, err = -EFAULT; 1810 int n = 0, size = 0, ret = -EFAULT;
1797 u64 values[3]; 1811 struct perf_event_context *ctx = leader->ctx;
1812 u64 values[5];
1813 u64 count, enabled, running;
1814
1815 mutex_lock(&ctx->mutex);
1816 count = perf_event_read_value(leader, &enabled, &running);
1798 1817
1799 values[n++] = 1 + leader->nr_siblings; 1818 values[n++] = 1 + leader->nr_siblings;
1800 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 1819 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1801 values[n++] = leader->total_time_enabled + 1820 values[n++] = enabled;
1802 atomic64_read(&leader->child_total_time_enabled); 1821 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1803 } 1822 values[n++] = running;
1804 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 1823 values[n++] = count;
1805 values[n++] = leader->total_time_running + 1824 if (read_format & PERF_FORMAT_ID)
1806 atomic64_read(&leader->child_total_time_running); 1825 values[n++] = primary_event_id(leader);
1807 }
1808 1826
1809 size = n * sizeof(u64); 1827 size = n * sizeof(u64);
1810 1828
1811 if (copy_to_user(buf, values, size)) 1829 if (copy_to_user(buf, values, size))
1812 return -EFAULT; 1830 goto unlock;
1813
1814 err = perf_event_read_entry(leader, read_format, buf + size);
1815 if (err < 0)
1816 return err;
1817 1831
1818 size += err; 1832 ret = size;
1819 1833
1820 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 1834 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1821 err = perf_event_read_entry(sub, read_format, 1835 n = 0;
1822 buf + size); 1836
1823 if (err < 0) 1837 values[n++] = perf_event_read_value(sub, &enabled, &running);
1824 return err; 1838 if (read_format & PERF_FORMAT_ID)
1839 values[n++] = primary_event_id(sub);
1840
1841 size = n * sizeof(u64);
1842
1843 if (copy_to_user(buf + ret, values, size)) {
1844 ret = -EFAULT;
1845 goto unlock;
1846 }
1825 1847
1826 size += err; 1848 ret += size;
1827 } 1849 }
1850unlock:
1851 mutex_unlock(&ctx->mutex);
1828 1852
1829 return size; 1853 return ret;
1830} 1854}
1831 1855
1832static int perf_event_read_one(struct perf_event *event, 1856static int perf_event_read_one(struct perf_event *event,
1833 u64 read_format, char __user *buf) 1857 u64 read_format, char __user *buf)
1834{ 1858{
1859 u64 enabled, running;
1835 u64 values[4]; 1860 u64 values[4];
1836 int n = 0; 1861 int n = 0;
1837 1862
1838 values[n++] = perf_event_read_value(event); 1863 values[n++] = perf_event_read_value(event, &enabled, &running);
1839 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 1864 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1840 values[n++] = event->total_time_enabled + 1865 values[n++] = enabled;
1841 atomic64_read(&event->child_total_time_enabled); 1866 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1842 } 1867 values[n++] = running;
1843 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1844 values[n++] = event->total_time_running +
1845 atomic64_read(&event->child_total_time_running);
1846 }
1847 if (read_format & PERF_FORMAT_ID) 1868 if (read_format & PERF_FORMAT_ID)
1848 values[n++] = primary_event_id(event); 1869 values[n++] = primary_event_id(event);
1849 1870
@@ -1874,12 +1895,10 @@ perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
1874 return -ENOSPC; 1895 return -ENOSPC;
1875 1896
1876 WARN_ON_ONCE(event->ctx->parent_ctx); 1897 WARN_ON_ONCE(event->ctx->parent_ctx);
1877 mutex_lock(&event->child_mutex);
1878 if (read_format & PERF_FORMAT_GROUP) 1898 if (read_format & PERF_FORMAT_GROUP)
1879 ret = perf_event_read_group(event, read_format, buf); 1899 ret = perf_event_read_group(event, read_format, buf);
1880 else 1900 else
1881 ret = perf_event_read_one(event, read_format, buf); 1901 ret = perf_event_read_one(event, read_format, buf);
1882 mutex_unlock(&event->child_mutex);
1883 1902
1884 return ret; 1903 return ret;
1885} 1904}
@@ -1987,7 +2006,8 @@ unlock:
1987 return ret; 2006 return ret;
1988} 2007}
1989 2008
1990int perf_event_set_output(struct perf_event *event, int output_fd); 2009static int perf_event_set_output(struct perf_event *event, int output_fd);
2010static int perf_event_set_filter(struct perf_event *event, void __user *arg);
1991 2011
1992static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2012static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1993{ 2013{
@@ -2015,6 +2035,9 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2015 case PERF_EVENT_IOC_SET_OUTPUT: 2035 case PERF_EVENT_IOC_SET_OUTPUT:
2016 return perf_event_set_output(event, arg); 2036 return perf_event_set_output(event, arg);
2017 2037
2038 case PERF_EVENT_IOC_SET_FILTER:
2039 return perf_event_set_filter(event, (void __user *)arg);
2040
2018 default: 2041 default:
2019 return -ENOTTY; 2042 return -ENOTTY;
2020 } 2043 }
@@ -2105,49 +2128,31 @@ unlock:
2105 rcu_read_unlock(); 2128 rcu_read_unlock();
2106} 2129}
2107 2130
2108static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2131static unsigned long perf_data_size(struct perf_mmap_data *data)
2109{ 2132{
2110 struct perf_event *event = vma->vm_file->private_data; 2133 return data->nr_pages << (PAGE_SHIFT + data->data_order);
2111 struct perf_mmap_data *data; 2134}
2112 int ret = VM_FAULT_SIGBUS;
2113
2114 if (vmf->flags & FAULT_FLAG_MKWRITE) {
2115 if (vmf->pgoff == 0)
2116 ret = 0;
2117 return ret;
2118 }
2119
2120 rcu_read_lock();
2121 data = rcu_dereference(event->data);
2122 if (!data)
2123 goto unlock;
2124
2125 if (vmf->pgoff == 0) {
2126 vmf->page = virt_to_page(data->user_page);
2127 } else {
2128 int nr = vmf->pgoff - 1;
2129
2130 if ((unsigned)nr > data->nr_pages)
2131 goto unlock;
2132 2135
2133 if (vmf->flags & FAULT_FLAG_WRITE) 2136#ifndef CONFIG_PERF_USE_VMALLOC
2134 goto unlock;
2135 2137
2136 vmf->page = virt_to_page(data->data_pages[nr]); 2138/*
2137 } 2139 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
2140 */
2138 2141
2139 get_page(vmf->page); 2142static struct page *
2140 vmf->page->mapping = vma->vm_file->f_mapping; 2143perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
2141 vmf->page->index = vmf->pgoff; 2144{
2145 if (pgoff > data->nr_pages)
2146 return NULL;
2142 2147
2143 ret = 0; 2148 if (pgoff == 0)
2144unlock: 2149 return virt_to_page(data->user_page);
2145 rcu_read_unlock();
2146 2150
2147 return ret; 2151 return virt_to_page(data->data_pages[pgoff - 1]);
2148} 2152}
2149 2153
2150static int perf_mmap_data_alloc(struct perf_event *event, int nr_pages) 2154static struct perf_mmap_data *
2155perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2151{ 2156{
2152 struct perf_mmap_data *data; 2157 struct perf_mmap_data *data;
2153 unsigned long size; 2158 unsigned long size;
@@ -2172,19 +2177,10 @@ static int perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2172 goto fail_data_pages; 2177 goto fail_data_pages;
2173 } 2178 }
2174 2179
2180 data->data_order = 0;
2175 data->nr_pages = nr_pages; 2181 data->nr_pages = nr_pages;
2176 atomic_set(&data->lock, -1);
2177 2182
2178 if (event->attr.watermark) { 2183 return data;
2179 data->watermark = min_t(long, PAGE_SIZE * nr_pages,
2180 event->attr.wakeup_watermark);
2181 }
2182 if (!data->watermark)
2183 data->watermark = max(PAGE_SIZE, PAGE_SIZE * nr_pages / 4);
2184
2185 rcu_assign_pointer(event->data, data);
2186
2187 return 0;
2188 2184
2189fail_data_pages: 2185fail_data_pages:
2190 for (i--; i >= 0; i--) 2186 for (i--; i >= 0; i--)
@@ -2196,7 +2192,7 @@ fail_user_page:
2196 kfree(data); 2192 kfree(data);
2197 2193
2198fail: 2194fail:
2199 return -ENOMEM; 2195 return NULL;
2200} 2196}
2201 2197
2202static void perf_mmap_free_page(unsigned long addr) 2198static void perf_mmap_free_page(unsigned long addr)
@@ -2207,28 +2203,170 @@ static void perf_mmap_free_page(unsigned long addr)
2207 __free_page(page); 2203 __free_page(page);
2208} 2204}
2209 2205
2210static void __perf_mmap_data_free(struct rcu_head *rcu_head) 2206static void perf_mmap_data_free(struct perf_mmap_data *data)
2211{ 2207{
2212 struct perf_mmap_data *data;
2213 int i; 2208 int i;
2214 2209
2215 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
2216
2217 perf_mmap_free_page((unsigned long)data->user_page); 2210 perf_mmap_free_page((unsigned long)data->user_page);
2218 for (i = 0; i < data->nr_pages; i++) 2211 for (i = 0; i < data->nr_pages; i++)
2219 perf_mmap_free_page((unsigned long)data->data_pages[i]); 2212 perf_mmap_free_page((unsigned long)data->data_pages[i]);
2213 kfree(data);
2214}
2215
2216#else
2217
2218/*
2219 * Back perf_mmap() with vmalloc memory.
2220 *
2221 * Required for architectures that have d-cache aliasing issues.
2222 */
2223
2224static struct page *
2225perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
2226{
2227 if (pgoff > (1UL << data->data_order))
2228 return NULL;
2229
2230 return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE);
2231}
2232
2233static void perf_mmap_unmark_page(void *addr)
2234{
2235 struct page *page = vmalloc_to_page(addr);
2236
2237 page->mapping = NULL;
2238}
2239
2240static void perf_mmap_data_free_work(struct work_struct *work)
2241{
2242 struct perf_mmap_data *data;
2243 void *base;
2244 int i, nr;
2245
2246 data = container_of(work, struct perf_mmap_data, work);
2247 nr = 1 << data->data_order;
2248
2249 base = data->user_page;
2250 for (i = 0; i < nr + 1; i++)
2251 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
2252
2253 vfree(base);
2254 kfree(data);
2255}
2256
2257static void perf_mmap_data_free(struct perf_mmap_data *data)
2258{
2259 schedule_work(&data->work);
2260}
2261
2262static struct perf_mmap_data *
2263perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2264{
2265 struct perf_mmap_data *data;
2266 unsigned long size;
2267 void *all_buf;
2268
2269 WARN_ON(atomic_read(&event->mmap_count));
2270
2271 size = sizeof(struct perf_mmap_data);
2272 size += sizeof(void *);
2273
2274 data = kzalloc(size, GFP_KERNEL);
2275 if (!data)
2276 goto fail;
2220 2277
2278 INIT_WORK(&data->work, perf_mmap_data_free_work);
2279
2280 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
2281 if (!all_buf)
2282 goto fail_all_buf;
2283
2284 data->user_page = all_buf;
2285 data->data_pages[0] = all_buf + PAGE_SIZE;
2286 data->data_order = ilog2(nr_pages);
2287 data->nr_pages = 1;
2288
2289 return data;
2290
2291fail_all_buf:
2221 kfree(data); 2292 kfree(data);
2293
2294fail:
2295 return NULL;
2296}
2297
2298#endif
2299
2300static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2301{
2302 struct perf_event *event = vma->vm_file->private_data;
2303 struct perf_mmap_data *data;
2304 int ret = VM_FAULT_SIGBUS;
2305
2306 if (vmf->flags & FAULT_FLAG_MKWRITE) {
2307 if (vmf->pgoff == 0)
2308 ret = 0;
2309 return ret;
2310 }
2311
2312 rcu_read_lock();
2313 data = rcu_dereference(event->data);
2314 if (!data)
2315 goto unlock;
2316
2317 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
2318 goto unlock;
2319
2320 vmf->page = perf_mmap_to_page(data, vmf->pgoff);
2321 if (!vmf->page)
2322 goto unlock;
2323
2324 get_page(vmf->page);
2325 vmf->page->mapping = vma->vm_file->f_mapping;
2326 vmf->page->index = vmf->pgoff;
2327
2328 ret = 0;
2329unlock:
2330 rcu_read_unlock();
2331
2332 return ret;
2222} 2333}
2223 2334
2224static void perf_mmap_data_free(struct perf_event *event) 2335static void
2336perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data)
2337{
2338 long max_size = perf_data_size(data);
2339
2340 atomic_set(&data->lock, -1);
2341
2342 if (event->attr.watermark) {
2343 data->watermark = min_t(long, max_size,
2344 event->attr.wakeup_watermark);
2345 }
2346
2347 if (!data->watermark)
2348 data->watermark = max_size / 2;
2349
2350
2351 rcu_assign_pointer(event->data, data);
2352}
2353
2354static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head)
2355{
2356 struct perf_mmap_data *data;
2357
2358 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
2359 perf_mmap_data_free(data);
2360}
2361
2362static void perf_mmap_data_release(struct perf_event *event)
2225{ 2363{
2226 struct perf_mmap_data *data = event->data; 2364 struct perf_mmap_data *data = event->data;
2227 2365
2228 WARN_ON(atomic_read(&event->mmap_count)); 2366 WARN_ON(atomic_read(&event->mmap_count));
2229 2367
2230 rcu_assign_pointer(event->data, NULL); 2368 rcu_assign_pointer(event->data, NULL);
2231 call_rcu(&data->rcu_head, __perf_mmap_data_free); 2369 call_rcu(&data->rcu_head, perf_mmap_data_free_rcu);
2232} 2370}
2233 2371
2234static void perf_mmap_open(struct vm_area_struct *vma) 2372static void perf_mmap_open(struct vm_area_struct *vma)
@@ -2244,11 +2382,12 @@ static void perf_mmap_close(struct vm_area_struct *vma)
2244 2382
2245 WARN_ON_ONCE(event->ctx->parent_ctx); 2383 WARN_ON_ONCE(event->ctx->parent_ctx);
2246 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { 2384 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
2385 unsigned long size = perf_data_size(event->data);
2247 struct user_struct *user = current_user(); 2386 struct user_struct *user = current_user();
2248 2387
2249 atomic_long_sub(event->data->nr_pages + 1, &user->locked_vm); 2388 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
2250 vma->vm_mm->locked_vm -= event->data->nr_locked; 2389 vma->vm_mm->locked_vm -= event->data->nr_locked;
2251 perf_mmap_data_free(event); 2390 perf_mmap_data_release(event);
2252 mutex_unlock(&event->mmap_mutex); 2391 mutex_unlock(&event->mmap_mutex);
2253 } 2392 }
2254} 2393}
@@ -2266,6 +2405,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2266 unsigned long user_locked, user_lock_limit; 2405 unsigned long user_locked, user_lock_limit;
2267 struct user_struct *user = current_user(); 2406 struct user_struct *user = current_user();
2268 unsigned long locked, lock_limit; 2407 unsigned long locked, lock_limit;
2408 struct perf_mmap_data *data;
2269 unsigned long vma_size; 2409 unsigned long vma_size;
2270 unsigned long nr_pages; 2410 unsigned long nr_pages;
2271 long user_extra, extra; 2411 long user_extra, extra;
@@ -2328,10 +2468,15 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2328 } 2468 }
2329 2469
2330 WARN_ON(event->data); 2470 WARN_ON(event->data);
2331 ret = perf_mmap_data_alloc(event, nr_pages); 2471
2332 if (ret) 2472 data = perf_mmap_data_alloc(event, nr_pages);
2473 ret = -ENOMEM;
2474 if (!data)
2333 goto unlock; 2475 goto unlock;
2334 2476
2477 ret = 0;
2478 perf_mmap_data_init(event, data);
2479
2335 atomic_set(&event->mmap_count, 1); 2480 atomic_set(&event->mmap_count, 1);
2336 atomic_long_add(user_extra, &user->locked_vm); 2481 atomic_long_add(user_extra, &user->locked_vm);
2337 vma->vm_mm->locked_vm += extra; 2482 vma->vm_mm->locked_vm += extra;
@@ -2519,7 +2664,7 @@ static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail,
2519 if (!data->writable) 2664 if (!data->writable)
2520 return true; 2665 return true;
2521 2666
2522 mask = (data->nr_pages << PAGE_SHIFT) - 1; 2667 mask = perf_data_size(data) - 1;
2523 2668
2524 offset = (offset - tail) & mask; 2669 offset = (offset - tail) & mask;
2525 head = (head - tail) & mask; 2670 head = (head - tail) & mask;
@@ -2558,20 +2703,21 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
2558static void perf_output_lock(struct perf_output_handle *handle) 2703static void perf_output_lock(struct perf_output_handle *handle)
2559{ 2704{
2560 struct perf_mmap_data *data = handle->data; 2705 struct perf_mmap_data *data = handle->data;
2561 int cpu; 2706 int cur, cpu = get_cpu();
2562 2707
2563 handle->locked = 0; 2708 handle->locked = 0;
2564 2709
2565 local_irq_save(handle->flags); 2710 for (;;) {
2566 cpu = smp_processor_id(); 2711 cur = atomic_cmpxchg(&data->lock, -1, cpu);
2567 2712 if (cur == -1) {
2568 if (in_nmi() && atomic_read(&data->lock) == cpu) 2713 handle->locked = 1;
2569 return; 2714 break;
2715 }
2716 if (cur == cpu)
2717 break;
2570 2718
2571 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2572 cpu_relax(); 2719 cpu_relax();
2573 2720 }
2574 handle->locked = 1;
2575} 2721}
2576 2722
2577static void perf_output_unlock(struct perf_output_handle *handle) 2723static void perf_output_unlock(struct perf_output_handle *handle)
@@ -2617,14 +2763,14 @@ again:
2617 if (atomic_xchg(&data->wakeup, 0)) 2763 if (atomic_xchg(&data->wakeup, 0))
2618 perf_output_wakeup(handle); 2764 perf_output_wakeup(handle);
2619out: 2765out:
2620 local_irq_restore(handle->flags); 2766 put_cpu();
2621} 2767}
2622 2768
2623void perf_output_copy(struct perf_output_handle *handle, 2769void perf_output_copy(struct perf_output_handle *handle,
2624 const void *buf, unsigned int len) 2770 const void *buf, unsigned int len)
2625{ 2771{
2626 unsigned int pages_mask; 2772 unsigned int pages_mask;
2627 unsigned int offset; 2773 unsigned long offset;
2628 unsigned int size; 2774 unsigned int size;
2629 void **pages; 2775 void **pages;
2630 2776
@@ -2633,12 +2779,14 @@ void perf_output_copy(struct perf_output_handle *handle,
2633 pages = handle->data->data_pages; 2779 pages = handle->data->data_pages;
2634 2780
2635 do { 2781 do {
2636 unsigned int page_offset; 2782 unsigned long page_offset;
2783 unsigned long page_size;
2637 int nr; 2784 int nr;
2638 2785
2639 nr = (offset >> PAGE_SHIFT) & pages_mask; 2786 nr = (offset >> PAGE_SHIFT) & pages_mask;
2640 page_offset = offset & (PAGE_SIZE - 1); 2787 page_size = 1UL << (handle->data->data_order + PAGE_SHIFT);
2641 size = min_t(unsigned int, PAGE_SIZE - page_offset, len); 2788 page_offset = offset & (page_size - 1);
2789 size = min_t(unsigned int, page_size - page_offset, len);
2642 2790
2643 memcpy(pages[nr] + page_offset, buf, size); 2791 memcpy(pages[nr] + page_offset, buf, size);
2644 2792
@@ -3126,15 +3274,10 @@ static void perf_event_task_ctx(struct perf_event_context *ctx,
3126{ 3274{
3127 struct perf_event *event; 3275 struct perf_event *event;
3128 3276
3129 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3130 return;
3131
3132 rcu_read_lock();
3133 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 3277 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3134 if (perf_event_task_match(event)) 3278 if (perf_event_task_match(event))
3135 perf_event_task_output(event, task_event); 3279 perf_event_task_output(event, task_event);
3136 } 3280 }
3137 rcu_read_unlock();
3138} 3281}
3139 3282
3140static void perf_event_task_event(struct perf_task_event *task_event) 3283static void perf_event_task_event(struct perf_task_event *task_event)
@@ -3142,11 +3285,11 @@ static void perf_event_task_event(struct perf_task_event *task_event)
3142 struct perf_cpu_context *cpuctx; 3285 struct perf_cpu_context *cpuctx;
3143 struct perf_event_context *ctx = task_event->task_ctx; 3286 struct perf_event_context *ctx = task_event->task_ctx;
3144 3287
3288 rcu_read_lock();
3145 cpuctx = &get_cpu_var(perf_cpu_context); 3289 cpuctx = &get_cpu_var(perf_cpu_context);
3146 perf_event_task_ctx(&cpuctx->ctx, task_event); 3290 perf_event_task_ctx(&cpuctx->ctx, task_event);
3147 put_cpu_var(perf_cpu_context); 3291 put_cpu_var(perf_cpu_context);
3148 3292
3149 rcu_read_lock();
3150 if (!ctx) 3293 if (!ctx)
3151 ctx = rcu_dereference(task_event->task->perf_event_ctxp); 3294 ctx = rcu_dereference(task_event->task->perf_event_ctxp);
3152 if (ctx) 3295 if (ctx)
@@ -3238,15 +3381,10 @@ static void perf_event_comm_ctx(struct perf_event_context *ctx,
3238{ 3381{
3239 struct perf_event *event; 3382 struct perf_event *event;
3240 3383
3241 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3242 return;
3243
3244 rcu_read_lock();
3245 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 3384 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3246 if (perf_event_comm_match(event)) 3385 if (perf_event_comm_match(event))
3247 perf_event_comm_output(event, comm_event); 3386 perf_event_comm_output(event, comm_event);
3248 } 3387 }
3249 rcu_read_unlock();
3250} 3388}
3251 3389
3252static void perf_event_comm_event(struct perf_comm_event *comm_event) 3390static void perf_event_comm_event(struct perf_comm_event *comm_event)
@@ -3257,7 +3395,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
3257 char comm[TASK_COMM_LEN]; 3395 char comm[TASK_COMM_LEN];
3258 3396
3259 memset(comm, 0, sizeof(comm)); 3397 memset(comm, 0, sizeof(comm));
3260 strncpy(comm, comm_event->task->comm, sizeof(comm)); 3398 strlcpy(comm, comm_event->task->comm, sizeof(comm));
3261 size = ALIGN(strlen(comm)+1, sizeof(u64)); 3399 size = ALIGN(strlen(comm)+1, sizeof(u64));
3262 3400
3263 comm_event->comm = comm; 3401 comm_event->comm = comm;
@@ -3265,11 +3403,11 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
3265 3403
3266 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; 3404 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
3267 3405
3406 rcu_read_lock();
3268 cpuctx = &get_cpu_var(perf_cpu_context); 3407 cpuctx = &get_cpu_var(perf_cpu_context);
3269 perf_event_comm_ctx(&cpuctx->ctx, comm_event); 3408 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
3270 put_cpu_var(perf_cpu_context); 3409 put_cpu_var(perf_cpu_context);
3271 3410
3272 rcu_read_lock();
3273 /* 3411 /*
3274 * doesn't really matter which of the child contexts the 3412 * doesn't really matter which of the child contexts the
3275 * events ends up in. 3413 * events ends up in.
@@ -3362,15 +3500,10 @@ static void perf_event_mmap_ctx(struct perf_event_context *ctx,
3362{ 3500{
3363 struct perf_event *event; 3501 struct perf_event *event;
3364 3502
3365 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3366 return;
3367
3368 rcu_read_lock();
3369 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 3503 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3370 if (perf_event_mmap_match(event, mmap_event)) 3504 if (perf_event_mmap_match(event, mmap_event))
3371 perf_event_mmap_output(event, mmap_event); 3505 perf_event_mmap_output(event, mmap_event);
3372 } 3506 }
3373 rcu_read_unlock();
3374} 3507}
3375 3508
3376static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) 3509static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
@@ -3426,11 +3559,11 @@ got_name:
3426 3559
3427 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; 3560 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
3428 3561
3562 rcu_read_lock();
3429 cpuctx = &get_cpu_var(perf_cpu_context); 3563 cpuctx = &get_cpu_var(perf_cpu_context);
3430 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event); 3564 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
3431 put_cpu_var(perf_cpu_context); 3565 put_cpu_var(perf_cpu_context);
3432 3566
3433 rcu_read_lock();
3434 /* 3567 /*
3435 * doesn't really matter which of the child contexts the 3568 * doesn't really matter which of the child contexts the
3436 * events ends up in. 3569 * events ends up in.
@@ -3569,7 +3702,11 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
3569 perf_event_disable(event); 3702 perf_event_disable(event);
3570 } 3703 }
3571 3704
3572 perf_event_output(event, nmi, data, regs); 3705 if (event->overflow_handler)
3706 event->overflow_handler(event, nmi, data, regs);
3707 else
3708 perf_event_output(event, nmi, data, regs);
3709
3573 return ret; 3710 return ret;
3574} 3711}
3575 3712
@@ -3614,16 +3751,16 @@ again:
3614 return nr; 3751 return nr;
3615} 3752}
3616 3753
3617static void perf_swevent_overflow(struct perf_event *event, 3754static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
3618 int nmi, struct perf_sample_data *data, 3755 int nmi, struct perf_sample_data *data,
3619 struct pt_regs *regs) 3756 struct pt_regs *regs)
3620{ 3757{
3621 struct hw_perf_event *hwc = &event->hw; 3758 struct hw_perf_event *hwc = &event->hw;
3622 int throttle = 0; 3759 int throttle = 0;
3623 u64 overflow;
3624 3760
3625 data->period = event->hw.last_period; 3761 data->period = event->hw.last_period;
3626 overflow = perf_swevent_set_period(event); 3762 if (!overflow)
3763 overflow = perf_swevent_set_period(event);
3627 3764
3628 if (hwc->interrupts == MAX_INTERRUPTS) 3765 if (hwc->interrupts == MAX_INTERRUPTS)
3629 return; 3766 return;
@@ -3656,14 +3793,19 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
3656 3793
3657 atomic64_add(nr, &event->count); 3794 atomic64_add(nr, &event->count);
3658 3795
3796 if (!regs)
3797 return;
3798
3659 if (!hwc->sample_period) 3799 if (!hwc->sample_period)
3660 return; 3800 return;
3661 3801
3662 if (!regs) 3802 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
3803 return perf_swevent_overflow(event, 1, nmi, data, regs);
3804
3805 if (atomic64_add_negative(nr, &hwc->period_left))
3663 return; 3806 return;
3664 3807
3665 if (!atomic64_add_negative(nr, &hwc->period_left)) 3808 perf_swevent_overflow(event, 0, nmi, data, regs);
3666 perf_swevent_overflow(event, nmi, data, regs);
3667} 3809}
3668 3810
3669static int perf_swevent_is_counting(struct perf_event *event) 3811static int perf_swevent_is_counting(struct perf_event *event)
@@ -3696,25 +3838,44 @@ static int perf_swevent_is_counting(struct perf_event *event)
3696 return 1; 3838 return 1;
3697} 3839}
3698 3840
3841static int perf_tp_event_match(struct perf_event *event,
3842 struct perf_sample_data *data);
3843
3844static int perf_exclude_event(struct perf_event *event,
3845 struct pt_regs *regs)
3846{
3847 if (regs) {
3848 if (event->attr.exclude_user && user_mode(regs))
3849 return 1;
3850
3851 if (event->attr.exclude_kernel && !user_mode(regs))
3852 return 1;
3853 }
3854
3855 return 0;
3856}
3857
3699static int perf_swevent_match(struct perf_event *event, 3858static int perf_swevent_match(struct perf_event *event,
3700 enum perf_type_id type, 3859 enum perf_type_id type,
3701 u32 event_id, struct pt_regs *regs) 3860 u32 event_id,
3861 struct perf_sample_data *data,
3862 struct pt_regs *regs)
3702{ 3863{
3703 if (!perf_swevent_is_counting(event)) 3864 if (!perf_swevent_is_counting(event))
3704 return 0; 3865 return 0;
3705 3866
3706 if (event->attr.type != type) 3867 if (event->attr.type != type)
3707 return 0; 3868 return 0;
3869
3708 if (event->attr.config != event_id) 3870 if (event->attr.config != event_id)
3709 return 0; 3871 return 0;
3710 3872
3711 if (regs) { 3873 if (perf_exclude_event(event, regs))
3712 if (event->attr.exclude_user && user_mode(regs)) 3874 return 0;
3713 return 0;
3714 3875
3715 if (event->attr.exclude_kernel && !user_mode(regs)) 3876 if (event->attr.type == PERF_TYPE_TRACEPOINT &&
3716 return 0; 3877 !perf_tp_event_match(event, data))
3717 } 3878 return 0;
3718 3879
3719 return 1; 3880 return 1;
3720} 3881}
@@ -3727,49 +3888,59 @@ static void perf_swevent_ctx_event(struct perf_event_context *ctx,
3727{ 3888{
3728 struct perf_event *event; 3889 struct perf_event *event;
3729 3890
3730 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3731 return;
3732
3733 rcu_read_lock();
3734 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 3891 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3735 if (perf_swevent_match(event, type, event_id, regs)) 3892 if (perf_swevent_match(event, type, event_id, data, regs))
3736 perf_swevent_add(event, nr, nmi, data, regs); 3893 perf_swevent_add(event, nr, nmi, data, regs);
3737 } 3894 }
3738 rcu_read_unlock();
3739} 3895}
3740 3896
3741static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx) 3897int perf_swevent_get_recursion_context(void)
3742{ 3898{
3899 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
3900 int rctx;
3901
3743 if (in_nmi()) 3902 if (in_nmi())
3744 return &cpuctx->recursion[3]; 3903 rctx = 3;
3904 else if (in_irq())
3905 rctx = 2;
3906 else if (in_softirq())
3907 rctx = 1;
3908 else
3909 rctx = 0;
3745 3910
3746 if (in_irq()) 3911 if (cpuctx->recursion[rctx]) {
3747 return &cpuctx->recursion[2]; 3912 put_cpu_var(perf_cpu_context);
3913 return -1;
3914 }
3748 3915
3749 if (in_softirq()) 3916 cpuctx->recursion[rctx]++;
3750 return &cpuctx->recursion[1]; 3917 barrier();
3751 3918
3752 return &cpuctx->recursion[0]; 3919 return rctx;
3920}
3921EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
3922
3923void perf_swevent_put_recursion_context(int rctx)
3924{
3925 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
3926 barrier();
3927 cpuctx->recursion[rctx]--;
3928 put_cpu_var(perf_cpu_context);
3753} 3929}
3930EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
3754 3931
3755static void do_perf_sw_event(enum perf_type_id type, u32 event_id, 3932static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
3756 u64 nr, int nmi, 3933 u64 nr, int nmi,
3757 struct perf_sample_data *data, 3934 struct perf_sample_data *data,
3758 struct pt_regs *regs) 3935 struct pt_regs *regs)
3759{ 3936{
3760 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); 3937 struct perf_cpu_context *cpuctx;
3761 int *recursion = perf_swevent_recursion_context(cpuctx);
3762 struct perf_event_context *ctx; 3938 struct perf_event_context *ctx;
3763 3939
3764 if (*recursion) 3940 cpuctx = &__get_cpu_var(perf_cpu_context);
3765 goto out; 3941 rcu_read_lock();
3766
3767 (*recursion)++;
3768 barrier();
3769
3770 perf_swevent_ctx_event(&cpuctx->ctx, type, event_id, 3942 perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
3771 nr, nmi, data, regs); 3943 nr, nmi, data, regs);
3772 rcu_read_lock();
3773 /* 3944 /*
3774 * doesn't really matter which of the child contexts the 3945 * doesn't really matter which of the child contexts the
3775 * events ends up in. 3946 * events ends up in.
@@ -3778,23 +3949,24 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
3778 if (ctx) 3949 if (ctx)
3779 perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs); 3950 perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs);
3780 rcu_read_unlock(); 3951 rcu_read_unlock();
3781
3782 barrier();
3783 (*recursion)--;
3784
3785out:
3786 put_cpu_var(perf_cpu_context);
3787} 3952}
3788 3953
3789void __perf_sw_event(u32 event_id, u64 nr, int nmi, 3954void __perf_sw_event(u32 event_id, u64 nr, int nmi,
3790 struct pt_regs *regs, u64 addr) 3955 struct pt_regs *regs, u64 addr)
3791{ 3956{
3792 struct perf_sample_data data = { 3957 struct perf_sample_data data;
3793 .addr = addr, 3958 int rctx;
3794 };
3795 3959
3796 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, 3960 rctx = perf_swevent_get_recursion_context();
3797 &data, regs); 3961 if (rctx < 0)
3962 return;
3963
3964 data.addr = addr;
3965 data.raw = NULL;
3966
3967 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
3968
3969 perf_swevent_put_recursion_context(rctx);
3798} 3970}
3799 3971
3800static void perf_swevent_read(struct perf_event *event) 3972static void perf_swevent_read(struct perf_event *event)
@@ -3839,6 +4011,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
3839 event->pmu->read(event); 4011 event->pmu->read(event);
3840 4012
3841 data.addr = 0; 4013 data.addr = 0;
4014 data.period = event->hw.last_period;
3842 regs = get_irq_regs(); 4015 regs = get_irq_regs();
3843 /* 4016 /*
3844 * In case we exclude kernel IPs or are somehow not in interrupt 4017 * In case we exclude kernel IPs or are somehow not in interrupt
@@ -3849,8 +4022,9 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
3849 regs = task_pt_regs(current); 4022 regs = task_pt_regs(current);
3850 4023
3851 if (regs) { 4024 if (regs) {
3852 if (perf_event_overflow(event, 0, &data, regs)) 4025 if (!(event->attr.exclude_idle && current->pid == 0))
3853 ret = HRTIMER_NORESTART; 4026 if (perf_event_overflow(event, 0, &data, regs))
4027 ret = HRTIMER_NORESTART;
3854 } 4028 }
3855 4029
3856 period = max_t(u64, 10000, event->hw.sample_period); 4030 period = max_t(u64, 10000, event->hw.sample_period);
@@ -3859,6 +4033,42 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
3859 return ret; 4033 return ret;
3860} 4034}
3861 4035
4036static void perf_swevent_start_hrtimer(struct perf_event *event)
4037{
4038 struct hw_perf_event *hwc = &event->hw;
4039
4040 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4041 hwc->hrtimer.function = perf_swevent_hrtimer;
4042 if (hwc->sample_period) {
4043 u64 period;
4044
4045 if (hwc->remaining) {
4046 if (hwc->remaining < 0)
4047 period = 10000;
4048 else
4049 period = hwc->remaining;
4050 hwc->remaining = 0;
4051 } else {
4052 period = max_t(u64, 10000, hwc->sample_period);
4053 }
4054 __hrtimer_start_range_ns(&hwc->hrtimer,
4055 ns_to_ktime(period), 0,
4056 HRTIMER_MODE_REL, 0);
4057 }
4058}
4059
4060static void perf_swevent_cancel_hrtimer(struct perf_event *event)
4061{
4062 struct hw_perf_event *hwc = &event->hw;
4063
4064 if (hwc->sample_period) {
4065 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
4066 hwc->remaining = ktime_to_ns(remaining);
4067
4068 hrtimer_cancel(&hwc->hrtimer);
4069 }
4070}
4071
3862/* 4072/*
3863 * Software event: cpu wall time clock 4073 * Software event: cpu wall time clock
3864 */ 4074 */
@@ -3881,22 +4091,14 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
3881 int cpu = raw_smp_processor_id(); 4091 int cpu = raw_smp_processor_id();
3882 4092
3883 atomic64_set(&hwc->prev_count, cpu_clock(cpu)); 4093 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3884 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4094 perf_swevent_start_hrtimer(event);
3885 hwc->hrtimer.function = perf_swevent_hrtimer;
3886 if (hwc->sample_period) {
3887 u64 period = max_t(u64, 10000, hwc->sample_period);
3888 __hrtimer_start_range_ns(&hwc->hrtimer,
3889 ns_to_ktime(period), 0,
3890 HRTIMER_MODE_REL, 0);
3891 }
3892 4095
3893 return 0; 4096 return 0;
3894} 4097}
3895 4098
3896static void cpu_clock_perf_event_disable(struct perf_event *event) 4099static void cpu_clock_perf_event_disable(struct perf_event *event)
3897{ 4100{
3898 if (event->hw.sample_period) 4101 perf_swevent_cancel_hrtimer(event);
3899 hrtimer_cancel(&event->hw.hrtimer);
3900 cpu_clock_perf_event_update(event); 4102 cpu_clock_perf_event_update(event);
3901} 4103}
3902 4104
@@ -3933,22 +4135,15 @@ static int task_clock_perf_event_enable(struct perf_event *event)
3933 now = event->ctx->time; 4135 now = event->ctx->time;
3934 4136
3935 atomic64_set(&hwc->prev_count, now); 4137 atomic64_set(&hwc->prev_count, now);
3936 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4138
3937 hwc->hrtimer.function = perf_swevent_hrtimer; 4139 perf_swevent_start_hrtimer(event);
3938 if (hwc->sample_period) {
3939 u64 period = max_t(u64, 10000, hwc->sample_period);
3940 __hrtimer_start_range_ns(&hwc->hrtimer,
3941 ns_to_ktime(period), 0,
3942 HRTIMER_MODE_REL, 0);
3943 }
3944 4140
3945 return 0; 4141 return 0;
3946} 4142}
3947 4143
3948static void task_clock_perf_event_disable(struct perf_event *event) 4144static void task_clock_perf_event_disable(struct perf_event *event)
3949{ 4145{
3950 if (event->hw.sample_period) 4146 perf_swevent_cancel_hrtimer(event);
3951 hrtimer_cancel(&event->hw.hrtimer);
3952 task_clock_perf_event_update(event, event->ctx->time); 4147 task_clock_perf_event_update(event, event->ctx->time);
3953 4148
3954} 4149}
@@ -3976,6 +4171,7 @@ static const struct pmu perf_ops_task_clock = {
3976}; 4171};
3977 4172
3978#ifdef CONFIG_EVENT_PROFILE 4173#ifdef CONFIG_EVENT_PROFILE
4174
3979void perf_tp_event(int event_id, u64 addr, u64 count, void *record, 4175void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
3980 int entry_size) 4176 int entry_size)
3981{ 4177{
@@ -3994,13 +4190,21 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
3994 if (!regs) 4190 if (!regs)
3995 regs = task_pt_regs(current); 4191 regs = task_pt_regs(current);
3996 4192
4193 /* Trace events already protected against recursion */
3997 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, 4194 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
3998 &data, regs); 4195 &data, regs);
3999} 4196}
4000EXPORT_SYMBOL_GPL(perf_tp_event); 4197EXPORT_SYMBOL_GPL(perf_tp_event);
4001 4198
4002extern int ftrace_profile_enable(int); 4199static int perf_tp_event_match(struct perf_event *event,
4003extern void ftrace_profile_disable(int); 4200 struct perf_sample_data *data)
4201{
4202 void *record = data->raw->data;
4203
4204 if (likely(!event->filter) || filter_match_preds(event->filter, record))
4205 return 1;
4206 return 0;
4207}
4004 4208
4005static void tp_perf_event_destroy(struct perf_event *event) 4209static void tp_perf_event_destroy(struct perf_event *event)
4006{ 4210{
@@ -4025,11 +4229,99 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
4025 4229
4026 return &perf_ops_generic; 4230 return &perf_ops_generic;
4027} 4231}
4232
4233static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4234{
4235 char *filter_str;
4236 int ret;
4237
4238 if (event->attr.type != PERF_TYPE_TRACEPOINT)
4239 return -EINVAL;
4240
4241 filter_str = strndup_user(arg, PAGE_SIZE);
4242 if (IS_ERR(filter_str))
4243 return PTR_ERR(filter_str);
4244
4245 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
4246
4247 kfree(filter_str);
4248 return ret;
4249}
4250
4251static void perf_event_free_filter(struct perf_event *event)
4252{
4253 ftrace_profile_free_filter(event);
4254}
4255
4028#else 4256#else
4257
4258static int perf_tp_event_match(struct perf_event *event,
4259 struct perf_sample_data *data)
4260{
4261 return 1;
4262}
4263
4029static const struct pmu *tp_perf_event_init(struct perf_event *event) 4264static const struct pmu *tp_perf_event_init(struct perf_event *event)
4030{ 4265{
4031 return NULL; 4266 return NULL;
4032} 4267}
4268
4269static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4270{
4271 return -ENOENT;
4272}
4273
4274static void perf_event_free_filter(struct perf_event *event)
4275{
4276}
4277
4278#endif /* CONFIG_EVENT_PROFILE */
4279
4280#ifdef CONFIG_HAVE_HW_BREAKPOINT
4281static void bp_perf_event_destroy(struct perf_event *event)
4282{
4283 release_bp_slot(event);
4284}
4285
4286static const struct pmu *bp_perf_event_init(struct perf_event *bp)
4287{
4288 int err;
4289 /*
4290 * The breakpoint is already filled if we haven't created the counter
4291 * through perf syscall
4292 * FIXME: manage to get trigerred to NULL if it comes from syscalls
4293 */
4294 if (!bp->callback)
4295 err = register_perf_hw_breakpoint(bp);
4296 else
4297 err = __register_perf_hw_breakpoint(bp);
4298 if (err)
4299 return ERR_PTR(err);
4300
4301 bp->destroy = bp_perf_event_destroy;
4302
4303 return &perf_ops_bp;
4304}
4305
4306void perf_bp_event(struct perf_event *bp, void *data)
4307{
4308 struct perf_sample_data sample;
4309 struct pt_regs *regs = data;
4310
4311 sample.addr = bp->attr.bp_addr;
4312
4313 if (!perf_exclude_event(bp, regs))
4314 perf_swevent_add(bp, 1, 1, &sample, regs);
4315}
4316#else
4317static const struct pmu *bp_perf_event_init(struct perf_event *bp)
4318{
4319 return NULL;
4320}
4321
4322void perf_bp_event(struct perf_event *bp, void *regs)
4323{
4324}
4033#endif 4325#endif
4034 4326
4035atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; 4327atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
@@ -4076,6 +4368,8 @@ static const struct pmu *sw_perf_event_init(struct perf_event *event)
4076 case PERF_COUNT_SW_PAGE_FAULTS_MAJ: 4368 case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
4077 case PERF_COUNT_SW_CONTEXT_SWITCHES: 4369 case PERF_COUNT_SW_CONTEXT_SWITCHES:
4078 case PERF_COUNT_SW_CPU_MIGRATIONS: 4370 case PERF_COUNT_SW_CPU_MIGRATIONS:
4371 case PERF_COUNT_SW_ALIGNMENT_FAULTS:
4372 case PERF_COUNT_SW_EMULATION_FAULTS:
4079 if (!event->parent) { 4373 if (!event->parent) {
4080 atomic_inc(&perf_swevent_enabled[event_id]); 4374 atomic_inc(&perf_swevent_enabled[event_id]);
4081 event->destroy = sw_perf_event_destroy; 4375 event->destroy = sw_perf_event_destroy;
@@ -4096,6 +4390,7 @@ perf_event_alloc(struct perf_event_attr *attr,
4096 struct perf_event_context *ctx, 4390 struct perf_event_context *ctx,
4097 struct perf_event *group_leader, 4391 struct perf_event *group_leader,
4098 struct perf_event *parent_event, 4392 struct perf_event *parent_event,
4393 perf_callback_t callback,
4099 gfp_t gfpflags) 4394 gfp_t gfpflags)
4100{ 4395{
4101 const struct pmu *pmu; 4396 const struct pmu *pmu;
@@ -4138,6 +4433,11 @@ perf_event_alloc(struct perf_event_attr *attr,
4138 4433
4139 event->state = PERF_EVENT_STATE_INACTIVE; 4434 event->state = PERF_EVENT_STATE_INACTIVE;
4140 4435
4436 if (!callback && parent_event)
4437 callback = parent_event->callback;
4438
4439 event->callback = callback;
4440
4141 if (attr->disabled) 4441 if (attr->disabled)
4142 event->state = PERF_EVENT_STATE_OFF; 4442 event->state = PERF_EVENT_STATE_OFF;
4143 4443
@@ -4172,6 +4472,11 @@ perf_event_alloc(struct perf_event_attr *attr,
4172 pmu = tp_perf_event_init(event); 4472 pmu = tp_perf_event_init(event);
4173 break; 4473 break;
4174 4474
4475 case PERF_TYPE_BREAKPOINT:
4476 pmu = bp_perf_event_init(event);
4477 break;
4478
4479
4175 default: 4480 default:
4176 break; 4481 break;
4177 } 4482 }
@@ -4284,7 +4589,7 @@ err_size:
4284 goto out; 4589 goto out;
4285} 4590}
4286 4591
4287int perf_event_set_output(struct perf_event *event, int output_fd) 4592static int perf_event_set_output(struct perf_event *event, int output_fd)
4288{ 4593{
4289 struct perf_event *output_event = NULL; 4594 struct perf_event *output_event = NULL;
4290 struct file *output_file = NULL; 4595 struct file *output_file = NULL;
@@ -4414,7 +4719,7 @@ SYSCALL_DEFINE5(perf_event_open,
4414 } 4719 }
4415 4720
4416 event = perf_event_alloc(&attr, cpu, ctx, group_leader, 4721 event = perf_event_alloc(&attr, cpu, ctx, group_leader,
4417 NULL, GFP_KERNEL); 4722 NULL, NULL, GFP_KERNEL);
4418 err = PTR_ERR(event); 4723 err = PTR_ERR(event);
4419 if (IS_ERR(event)) 4724 if (IS_ERR(event))
4420 goto err_put_context; 4725 goto err_put_context;
@@ -4462,6 +4767,60 @@ err_put_context:
4462 return err; 4767 return err;
4463} 4768}
4464 4769
4770/**
4771 * perf_event_create_kernel_counter
4772 *
4773 * @attr: attributes of the counter to create
4774 * @cpu: cpu in which the counter is bound
4775 * @pid: task to profile
4776 */
4777struct perf_event *
4778perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
4779 pid_t pid, perf_callback_t callback)
4780{
4781 struct perf_event *event;
4782 struct perf_event_context *ctx;
4783 int err;
4784
4785 /*
4786 * Get the target context (task or percpu):
4787 */
4788
4789 ctx = find_get_context(pid, cpu);
4790 if (IS_ERR(ctx)) {
4791 err = PTR_ERR(ctx);
4792 goto err_exit;
4793 }
4794
4795 event = perf_event_alloc(attr, cpu, ctx, NULL,
4796 NULL, callback, GFP_KERNEL);
4797 if (IS_ERR(event)) {
4798 err = PTR_ERR(event);
4799 goto err_put_context;
4800 }
4801
4802 event->filp = NULL;
4803 WARN_ON_ONCE(ctx->parent_ctx);
4804 mutex_lock(&ctx->mutex);
4805 perf_install_in_context(ctx, event, cpu);
4806 ++ctx->generation;
4807 mutex_unlock(&ctx->mutex);
4808
4809 event->owner = current;
4810 get_task_struct(current);
4811 mutex_lock(&current->perf_event_mutex);
4812 list_add_tail(&event->owner_entry, &current->perf_event_list);
4813 mutex_unlock(&current->perf_event_mutex);
4814
4815 return event;
4816
4817 err_put_context:
4818 put_ctx(ctx);
4819 err_exit:
4820 return ERR_PTR(err);
4821}
4822EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
4823
4465/* 4824/*
4466 * inherit a event from parent task to child task: 4825 * inherit a event from parent task to child task:
4467 */ 4826 */
@@ -4487,7 +4846,7 @@ inherit_event(struct perf_event *parent_event,
4487 child_event = perf_event_alloc(&parent_event->attr, 4846 child_event = perf_event_alloc(&parent_event->attr,
4488 parent_event->cpu, child_ctx, 4847 parent_event->cpu, child_ctx,
4489 group_leader, parent_event, 4848 group_leader, parent_event,
4490 GFP_KERNEL); 4849 NULL, GFP_KERNEL);
4491 if (IS_ERR(child_event)) 4850 if (IS_ERR(child_event))
4492 return child_event; 4851 return child_event;
4493 get_ctx(child_ctx); 4852 get_ctx(child_ctx);
@@ -4505,6 +4864,8 @@ inherit_event(struct perf_event *parent_event,
4505 if (parent_event->attr.freq) 4864 if (parent_event->attr.freq)
4506 child_event->hw.sample_period = parent_event->hw.sample_period; 4865 child_event->hw.sample_period = parent_event->hw.sample_period;
4507 4866
4867 child_event->overflow_handler = parent_event->overflow_handler;
4868
4508 /* 4869 /*
4509 * Link it up in the child's context: 4870 * Link it up in the child's context:
4510 */ 4871 */
@@ -4594,7 +4955,6 @@ __perf_event_exit_task(struct perf_event *child_event,
4594{ 4955{
4595 struct perf_event *parent_event; 4956 struct perf_event *parent_event;
4596 4957
4597 update_event_times(child_event);
4598 perf_event_remove_from_context(child_event); 4958 perf_event_remove_from_context(child_event);
4599 4959
4600 parent_event = child_event->parent; 4960 parent_event = child_event->parent;
@@ -4646,6 +5006,7 @@ void perf_event_exit_task(struct task_struct *child)
4646 * the events from it. 5006 * the events from it.
4647 */ 5007 */
4648 unclone_ctx(child_ctx); 5008 unclone_ctx(child_ctx);
5009 update_context_time(child_ctx);
4649 spin_unlock_irqrestore(&child_ctx->lock, flags); 5010 spin_unlock_irqrestore(&child_ctx->lock, flags);
4650 5011
4651 /* 5012 /*
@@ -4781,9 +5142,7 @@ int perf_event_init_task(struct task_struct *child)
4781 * We dont have to disable NMIs - we are only looking at 5142 * We dont have to disable NMIs - we are only looking at
4782 * the list, not manipulating it: 5143 * the list, not manipulating it:
4783 */ 5144 */
4784 list_for_each_entry_rcu(event, &parent_ctx->event_list, event_entry) { 5145 list_for_each_entry(event, &parent_ctx->group_list, group_entry) {
4785 if (event != event->group_leader)
4786 continue;
4787 5146
4788 if (!event->attr.inherit) { 5147 if (!event->attr.inherit) {
4789 inherited_all = 0; 5148 inherited_all = 0;
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index c3b81c30e5d5..43191815f874 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_PM_SLEEP) += console.o
8obj-$(CONFIG_FREEZER) += process.o 8obj-$(CONFIG_FREEZER) += process.o
9obj-$(CONFIG_SUSPEND) += suspend.o 9obj-$(CONFIG_SUSPEND) += suspend.o
10obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o 10obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o
11obj-$(CONFIG_HIBERNATION) += swsusp.o hibernate.o snapshot.o swap.o user.o 11obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o
12obj-$(CONFIG_HIBERNATION_NVS) += hibernate_nvs.o 12obj-$(CONFIG_HIBERNATION_NVS) += hibernate_nvs.o
13 13
14obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o 14obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 04b3a83d686f..bbfe472d7524 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -32,6 +32,7 @@ static int noresume = 0;
32static char resume_file[256] = CONFIG_PM_STD_PARTITION; 32static char resume_file[256] = CONFIG_PM_STD_PARTITION;
33dev_t swsusp_resume_device; 33dev_t swsusp_resume_device;
34sector_t swsusp_resume_block; 34sector_t swsusp_resume_block;
35int in_suspend __nosavedata = 0;
35 36
36enum { 37enum {
37 HIBERNATION_INVALID, 38 HIBERNATION_INVALID,
@@ -202,6 +203,35 @@ static void platform_recover(int platform_mode)
202} 203}
203 204
204/** 205/**
206 * swsusp_show_speed - print the time elapsed between two events.
207 * @start: Starting event.
208 * @stop: Final event.
209 * @nr_pages - number of pages processed between @start and @stop
210 * @msg - introductory message to print
211 */
212
213void swsusp_show_speed(struct timeval *start, struct timeval *stop,
214 unsigned nr_pages, char *msg)
215{
216 s64 elapsed_centisecs64;
217 int centisecs;
218 int k;
219 int kps;
220
221 elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start);
222 do_div(elapsed_centisecs64, NSEC_PER_SEC / 100);
223 centisecs = elapsed_centisecs64;
224 if (centisecs == 0)
225 centisecs = 1; /* avoid div-by-zero */
226 k = nr_pages * (PAGE_SIZE / 1024);
227 kps = (k * 100) / centisecs;
228 printk(KERN_INFO "PM: %s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n",
229 msg, k,
230 centisecs / 100, centisecs % 100,
231 kps / 1000, (kps % 1000) / 10);
232}
233
234/**
205 * create_image - freeze devices that need to be frozen with interrupts 235 * create_image - freeze devices that need to be frozen with interrupts
206 * off, create the hibernation image and thaw those devices. Control 236 * off, create the hibernation image and thaw those devices. Control
207 * reappears in this routine after a restore. 237 * reappears in this routine after a restore.
@@ -693,21 +723,22 @@ static int software_resume(void)
693 /* The snapshot device should not be opened while we're running */ 723 /* The snapshot device should not be opened while we're running */
694 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { 724 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
695 error = -EBUSY; 725 error = -EBUSY;
726 swsusp_close(FMODE_READ);
696 goto Unlock; 727 goto Unlock;
697 } 728 }
698 729
699 pm_prepare_console(); 730 pm_prepare_console();
700 error = pm_notifier_call_chain(PM_RESTORE_PREPARE); 731 error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
701 if (error) 732 if (error)
702 goto Finish; 733 goto close_finish;
703 734
704 error = usermodehelper_disable(); 735 error = usermodehelper_disable();
705 if (error) 736 if (error)
706 goto Finish; 737 goto close_finish;
707 738
708 error = create_basic_memory_bitmaps(); 739 error = create_basic_memory_bitmaps();
709 if (error) 740 if (error)
710 goto Finish; 741 goto close_finish;
711 742
712 pr_debug("PM: Preparing processes for restore.\n"); 743 pr_debug("PM: Preparing processes for restore.\n");
713 error = prepare_processes(); 744 error = prepare_processes();
@@ -719,6 +750,7 @@ static int software_resume(void)
719 pr_debug("PM: Reading hibernation image.\n"); 750 pr_debug("PM: Reading hibernation image.\n");
720 751
721 error = swsusp_read(&flags); 752 error = swsusp_read(&flags);
753 swsusp_close(FMODE_READ);
722 if (!error) 754 if (!error)
723 hibernation_restore(flags & SF_PLATFORM_MODE); 755 hibernation_restore(flags & SF_PLATFORM_MODE);
724 756
@@ -737,6 +769,9 @@ static int software_resume(void)
737 mutex_unlock(&pm_mutex); 769 mutex_unlock(&pm_mutex);
738 pr_debug("PM: Resume from disk failed.\n"); 770 pr_debug("PM: Resume from disk failed.\n");
739 return error; 771 return error;
772close_finish:
773 swsusp_close(FMODE_READ);
774 goto Finish;
740} 775}
741 776
742late_initcall(software_resume); 777late_initcall(software_resume);
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 347d2cc88cd0..0998c7139053 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -220,6 +220,7 @@ static struct attribute_group attr_group = {
220 220
221#ifdef CONFIG_PM_RUNTIME 221#ifdef CONFIG_PM_RUNTIME
222struct workqueue_struct *pm_wq; 222struct workqueue_struct *pm_wq;
223EXPORT_SYMBOL_GPL(pm_wq);
223 224
224static int __init pm_start_workqueue(void) 225static int __init pm_start_workqueue(void)
225{ 226{
diff --git a/kernel/power/process.c b/kernel/power/process.c
index cc2e55373b68..5ade1bdcf366 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/syscalls.h> 15#include <linux/syscalls.h>
16#include <linux/freezer.h> 16#include <linux/freezer.h>
17#include <linux/delay.h>
17 18
18/* 19/*
19 * Timeout for stopping processes 20 * Timeout for stopping processes
@@ -41,7 +42,7 @@ static int try_to_freeze_tasks(bool sig_only)
41 do_gettimeofday(&start); 42 do_gettimeofday(&start);
42 43
43 end_time = jiffies + TIMEOUT; 44 end_time = jiffies + TIMEOUT;
44 do { 45 while (true) {
45 todo = 0; 46 todo = 0;
46 read_lock(&tasklist_lock); 47 read_lock(&tasklist_lock);
47 do_each_thread(g, p) { 48 do_each_thread(g, p) {
@@ -62,10 +63,15 @@ static int try_to_freeze_tasks(bool sig_only)
62 todo++; 63 todo++;
63 } while_each_thread(g, p); 64 } while_each_thread(g, p);
64 read_unlock(&tasklist_lock); 65 read_unlock(&tasklist_lock);
65 yield(); /* Yield is okay here */ 66 if (!todo || time_after(jiffies, end_time))
66 if (time_after(jiffies, end_time))
67 break; 67 break;
68 } while (todo); 68
69 /*
70 * We need to retry, but first give the freezing tasks some
71 * time to enter the regrigerator.
72 */
73 msleep(10);
74 }
69 75
70 do_gettimeofday(&end); 76 do_gettimeofday(&end);
71 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start); 77 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
index 17d8bb1acf9c..25596e450ac7 100644
--- a/kernel/power/suspend_test.c
+++ b/kernel/power/suspend_test.c
@@ -19,7 +19,7 @@
19 * The time it takes is system-specific though, so when we test this 19 * The time it takes is system-specific though, so when we test this
20 * during system bootup we allow a LOT of time. 20 * during system bootup we allow a LOT of time.
21 */ 21 */
22#define TEST_SUSPEND_SECONDS 5 22#define TEST_SUSPEND_SECONDS 10
23 23
24static unsigned long suspend_test_start_time; 24static unsigned long suspend_test_start_time;
25 25
@@ -49,7 +49,8 @@ void suspend_test_finish(const char *label)
49 * has some performance issues. The stack dump of a WARN_ON 49 * has some performance issues. The stack dump of a WARN_ON
50 * is more likely to get the right attention than a printk... 50 * is more likely to get the right attention than a printk...
51 */ 51 */
52 WARN(msec > (TEST_SUSPEND_SECONDS * 1000), "Component: %s\n", label); 52 WARN(msec > (TEST_SUSPEND_SECONDS * 1000),
53 "Component: %s, time: %u\n", label, msec);
53} 54}
54 55
55/* 56/*
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index b101cdc4df3f..09b2b0ae9e9d 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -38,6 +38,107 @@ struct swsusp_header {
38 38
39static struct swsusp_header *swsusp_header; 39static struct swsusp_header *swsusp_header;
40 40
41/**
42 * The following functions are used for tracing the allocated
43 * swap pages, so that they can be freed in case of an error.
44 */
45
46struct swsusp_extent {
47 struct rb_node node;
48 unsigned long start;
49 unsigned long end;
50};
51
52static struct rb_root swsusp_extents = RB_ROOT;
53
54static int swsusp_extents_insert(unsigned long swap_offset)
55{
56 struct rb_node **new = &(swsusp_extents.rb_node);
57 struct rb_node *parent = NULL;
58 struct swsusp_extent *ext;
59
60 /* Figure out where to put the new node */
61 while (*new) {
62 ext = container_of(*new, struct swsusp_extent, node);
63 parent = *new;
64 if (swap_offset < ext->start) {
65 /* Try to merge */
66 if (swap_offset == ext->start - 1) {
67 ext->start--;
68 return 0;
69 }
70 new = &((*new)->rb_left);
71 } else if (swap_offset > ext->end) {
72 /* Try to merge */
73 if (swap_offset == ext->end + 1) {
74 ext->end++;
75 return 0;
76 }
77 new = &((*new)->rb_right);
78 } else {
79 /* It already is in the tree */
80 return -EINVAL;
81 }
82 }
83 /* Add the new node and rebalance the tree. */
84 ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
85 if (!ext)
86 return -ENOMEM;
87
88 ext->start = swap_offset;
89 ext->end = swap_offset;
90 rb_link_node(&ext->node, parent, new);
91 rb_insert_color(&ext->node, &swsusp_extents);
92 return 0;
93}
94
95/**
96 * alloc_swapdev_block - allocate a swap page and register that it has
97 * been allocated, so that it can be freed in case of an error.
98 */
99
100sector_t alloc_swapdev_block(int swap)
101{
102 unsigned long offset;
103
104 offset = swp_offset(get_swap_page_of_type(swap));
105 if (offset) {
106 if (swsusp_extents_insert(offset))
107 swap_free(swp_entry(swap, offset));
108 else
109 return swapdev_block(swap, offset);
110 }
111 return 0;
112}
113
114/**
115 * free_all_swap_pages - free swap pages allocated for saving image data.
116 * It also frees the extents used to register which swap entres had been
117 * allocated.
118 */
119
120void free_all_swap_pages(int swap)
121{
122 struct rb_node *node;
123
124 while ((node = swsusp_extents.rb_node)) {
125 struct swsusp_extent *ext;
126 unsigned long offset;
127
128 ext = container_of(node, struct swsusp_extent, node);
129 rb_erase(node, &swsusp_extents);
130 for (offset = ext->start; offset <= ext->end; offset++)
131 swap_free(swp_entry(swap, offset));
132
133 kfree(ext);
134 }
135}
136
137int swsusp_swap_in_use(void)
138{
139 return (swsusp_extents.rb_node != NULL);
140}
141
41/* 142/*
42 * General things 143 * General things
43 */ 144 */
@@ -314,7 +415,6 @@ static int save_image(struct swap_map_handle *handle,
314{ 415{
315 unsigned int m; 416 unsigned int m;
316 int ret; 417 int ret;
317 int error = 0;
318 int nr_pages; 418 int nr_pages;
319 int err2; 419 int err2;
320 struct bio *bio; 420 struct bio *bio;
@@ -329,26 +429,27 @@ static int save_image(struct swap_map_handle *handle,
329 nr_pages = 0; 429 nr_pages = 0;
330 bio = NULL; 430 bio = NULL;
331 do_gettimeofday(&start); 431 do_gettimeofday(&start);
332 do { 432 while (1) {
333 ret = snapshot_read_next(snapshot, PAGE_SIZE); 433 ret = snapshot_read_next(snapshot, PAGE_SIZE);
334 if (ret > 0) { 434 if (ret <= 0)
335 error = swap_write_page(handle, data_of(*snapshot), 435 break;
336 &bio); 436 ret = swap_write_page(handle, data_of(*snapshot), &bio);
337 if (error) 437 if (ret)
338 break; 438 break;
339 if (!(nr_pages % m)) 439 if (!(nr_pages % m))
340 printk("\b\b\b\b%3d%%", nr_pages / m); 440 printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m);
341 nr_pages++; 441 nr_pages++;
342 } 442 }
343 } while (ret > 0);
344 err2 = wait_on_bio_chain(&bio); 443 err2 = wait_on_bio_chain(&bio);
345 do_gettimeofday(&stop); 444 do_gettimeofday(&stop);
346 if (!error) 445 if (!ret)
347 error = err2; 446 ret = err2;
348 if (!error) 447 if (!ret)
349 printk("\b\b\b\bdone\n"); 448 printk(KERN_CONT "\b\b\b\bdone\n");
449 else
450 printk(KERN_CONT "\n");
350 swsusp_show_speed(&start, &stop, nr_to_write, "Wrote"); 451 swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
351 return error; 452 return ret;
352} 453}
353 454
354/** 455/**
@@ -536,7 +637,8 @@ static int load_image(struct swap_map_handle *handle,
536 snapshot_write_finalize(snapshot); 637 snapshot_write_finalize(snapshot);
537 if (!snapshot_image_loaded(snapshot)) 638 if (!snapshot_image_loaded(snapshot))
538 error = -ENODATA; 639 error = -ENODATA;
539 } 640 } else
641 printk("\n");
540 swsusp_show_speed(&start, &stop, nr_to_read, "Read"); 642 swsusp_show_speed(&start, &stop, nr_to_read, "Read");
541 return error; 643 return error;
542} 644}
@@ -572,8 +674,6 @@ int swsusp_read(unsigned int *flags_p)
572 error = load_image(&handle, &snapshot, header->pages - 1); 674 error = load_image(&handle, &snapshot, header->pages - 1);
573 release_swap_reader(&handle); 675 release_swap_reader(&handle);
574 676
575 blkdev_put(resume_bdev, FMODE_READ);
576
577 if (!error) 677 if (!error)
578 pr_debug("PM: Image successfully loaded\n"); 678 pr_debug("PM: Image successfully loaded\n");
579 else 679 else
@@ -596,7 +696,7 @@ int swsusp_check(void)
596 error = bio_read_page(swsusp_resume_block, 696 error = bio_read_page(swsusp_resume_block,
597 swsusp_header, NULL); 697 swsusp_header, NULL);
598 if (error) 698 if (error)
599 return error; 699 goto put;
600 700
601 if (!memcmp(SWSUSP_SIG, swsusp_header->sig, 10)) { 701 if (!memcmp(SWSUSP_SIG, swsusp_header->sig, 10)) {
602 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); 702 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
@@ -604,8 +704,10 @@ int swsusp_check(void)
604 error = bio_write_page(swsusp_resume_block, 704 error = bio_write_page(swsusp_resume_block,
605 swsusp_header, NULL); 705 swsusp_header, NULL);
606 } else { 706 } else {
607 return -EINVAL; 707 error = -EINVAL;
608 } 708 }
709
710put:
609 if (error) 711 if (error)
610 blkdev_put(resume_bdev, FMODE_READ); 712 blkdev_put(resume_bdev, FMODE_READ);
611 else 713 else
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 6a07f4dbf2f8..5b3601bd1893 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -56,133 +56,3 @@
56#include "power.h" 56#include "power.h"
57 57
58int in_suspend __nosavedata = 0; 58int in_suspend __nosavedata = 0;
59
60/**
61 * The following functions are used for tracing the allocated
62 * swap pages, so that they can be freed in case of an error.
63 */
64
65struct swsusp_extent {
66 struct rb_node node;
67 unsigned long start;
68 unsigned long end;
69};
70
71static struct rb_root swsusp_extents = RB_ROOT;
72
73static int swsusp_extents_insert(unsigned long swap_offset)
74{
75 struct rb_node **new = &(swsusp_extents.rb_node);
76 struct rb_node *parent = NULL;
77 struct swsusp_extent *ext;
78
79 /* Figure out where to put the new node */
80 while (*new) {
81 ext = container_of(*new, struct swsusp_extent, node);
82 parent = *new;
83 if (swap_offset < ext->start) {
84 /* Try to merge */
85 if (swap_offset == ext->start - 1) {
86 ext->start--;
87 return 0;
88 }
89 new = &((*new)->rb_left);
90 } else if (swap_offset > ext->end) {
91 /* Try to merge */
92 if (swap_offset == ext->end + 1) {
93 ext->end++;
94 return 0;
95 }
96 new = &((*new)->rb_right);
97 } else {
98 /* It already is in the tree */
99 return -EINVAL;
100 }
101 }
102 /* Add the new node and rebalance the tree. */
103 ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
104 if (!ext)
105 return -ENOMEM;
106
107 ext->start = swap_offset;
108 ext->end = swap_offset;
109 rb_link_node(&ext->node, parent, new);
110 rb_insert_color(&ext->node, &swsusp_extents);
111 return 0;
112}
113
114/**
115 * alloc_swapdev_block - allocate a swap page and register that it has
116 * been allocated, so that it can be freed in case of an error.
117 */
118
119sector_t alloc_swapdev_block(int swap)
120{
121 unsigned long offset;
122
123 offset = swp_offset(get_swap_page_of_type(swap));
124 if (offset) {
125 if (swsusp_extents_insert(offset))
126 swap_free(swp_entry(swap, offset));
127 else
128 return swapdev_block(swap, offset);
129 }
130 return 0;
131}
132
133/**
134 * free_all_swap_pages - free swap pages allocated for saving image data.
135 * It also frees the extents used to register which swap entres had been
136 * allocated.
137 */
138
139void free_all_swap_pages(int swap)
140{
141 struct rb_node *node;
142
143 while ((node = swsusp_extents.rb_node)) {
144 struct swsusp_extent *ext;
145 unsigned long offset;
146
147 ext = container_of(node, struct swsusp_extent, node);
148 rb_erase(node, &swsusp_extents);
149 for (offset = ext->start; offset <= ext->end; offset++)
150 swap_free(swp_entry(swap, offset));
151
152 kfree(ext);
153 }
154}
155
156int swsusp_swap_in_use(void)
157{
158 return (swsusp_extents.rb_node != NULL);
159}
160
161/**
162 * swsusp_show_speed - print the time elapsed between two events represented by
163 * @start and @stop
164 *
165 * @nr_pages - number of pages processed between @start and @stop
166 * @msg - introductory message to print
167 */
168
169void swsusp_show_speed(struct timeval *start, struct timeval *stop,
170 unsigned nr_pages, char *msg)
171{
172 s64 elapsed_centisecs64;
173 int centisecs;
174 int k;
175 int kps;
176
177 elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start);
178 do_div(elapsed_centisecs64, NSEC_PER_SEC / 100);
179 centisecs = elapsed_centisecs64;
180 if (centisecs == 0)
181 centisecs = 1; /* avoid div-by-zero */
182 k = nr_pages * (PAGE_SIZE / 1024);
183 kps = (k * 100) / centisecs;
184 printk(KERN_INFO "PM: %s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n",
185 msg, k,
186 centisecs / 100, centisecs % 100,
187 kps / 1000, (kps % 1000) / 10);
188}
diff --git a/kernel/printk.c b/kernel/printk.c
index f38b07f78a4e..b5ac4d99c667 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -33,6 +33,7 @@
33#include <linux/bootmem.h> 33#include <linux/bootmem.h>
34#include <linux/syscalls.h> 34#include <linux/syscalls.h>
35#include <linux/kexec.h> 35#include <linux/kexec.h>
36#include <linux/ratelimit.h>
36 37
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
38 39
@@ -1376,11 +1377,11 @@ late_initcall(disable_boot_consoles);
1376 */ 1377 */
1377DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10); 1378DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
1378 1379
1379int printk_ratelimit(void) 1380int __printk_ratelimit(const char *func)
1380{ 1381{
1381 return __ratelimit(&printk_ratelimit_state); 1382 return ___ratelimit(&printk_ratelimit_state, func);
1382} 1383}
1383EXPORT_SYMBOL(printk_ratelimit); 1384EXPORT_SYMBOL(__printk_ratelimit);
1384 1385
1385/** 1386/**
1386 * printk_timed_ratelimit - caller-controlled printk ratelimiting 1387 * printk_timed_ratelimit - caller-controlled printk ratelimiting
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 37ac45483082..9b7fd4723878 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -44,23 +44,13 @@
44#include <linux/cpu.h> 44#include <linux/cpu.h>
45#include <linux/mutex.h> 45#include <linux/mutex.h>
46#include <linux/module.h> 46#include <linux/module.h>
47#include <linux/kernel_stat.h>
48 47
49enum rcu_barrier { 48#ifdef CONFIG_DEBUG_LOCK_ALLOC
50 RCU_BARRIER_STD, 49static struct lock_class_key rcu_lock_key;
51 RCU_BARRIER_BH, 50struct lockdep_map rcu_lock_map =
52 RCU_BARRIER_SCHED, 51 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
53}; 52EXPORT_SYMBOL_GPL(rcu_lock_map);
54 53#endif
55static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
56static atomic_t rcu_barrier_cpu_count;
57static DEFINE_MUTEX(rcu_barrier_mutex);
58static struct completion rcu_barrier_completion;
59int rcu_scheduler_active __read_mostly;
60
61static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
62static struct rcu_head rcu_migrate_head[3];
63static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
64 54
65/* 55/*
66 * Awaken the corresponding synchronize_rcu() instance now that a 56 * Awaken the corresponding synchronize_rcu() instance now that a
@@ -73,241 +63,3 @@ void wakeme_after_rcu(struct rcu_head *head)
73 rcu = container_of(head, struct rcu_synchronize, head); 63 rcu = container_of(head, struct rcu_synchronize, head);
74 complete(&rcu->completion); 64 complete(&rcu->completion);
75} 65}
76
77#ifdef CONFIG_TREE_PREEMPT_RCU
78
79/**
80 * synchronize_rcu - wait until a grace period has elapsed.
81 *
82 * Control will return to the caller some time after a full grace
83 * period has elapsed, in other words after all currently executing RCU
84 * read-side critical sections have completed. RCU read-side critical
85 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
86 * and may be nested.
87 */
88void synchronize_rcu(void)
89{
90 struct rcu_synchronize rcu;
91
92 if (!rcu_scheduler_active)
93 return;
94
95 init_completion(&rcu.completion);
96 /* Will wake me after RCU finished. */
97 call_rcu(&rcu.head, wakeme_after_rcu);
98 /* Wait for it. */
99 wait_for_completion(&rcu.completion);
100}
101EXPORT_SYMBOL_GPL(synchronize_rcu);
102
103#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
104
105/**
106 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
107 *
108 * Control will return to the caller some time after a full rcu-sched
109 * grace period has elapsed, in other words after all currently executing
110 * rcu-sched read-side critical sections have completed. These read-side
111 * critical sections are delimited by rcu_read_lock_sched() and
112 * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
113 * local_irq_disable(), and so on may be used in place of
114 * rcu_read_lock_sched().
115 *
116 * This means that all preempt_disable code sequences, including NMI and
117 * hardware-interrupt handlers, in progress on entry will have completed
118 * before this primitive returns. However, this does not guarantee that
119 * softirq handlers will have completed, since in some kernels, these
120 * handlers can run in process context, and can block.
121 *
122 * This primitive provides the guarantees made by the (now removed)
123 * synchronize_kernel() API. In contrast, synchronize_rcu() only
124 * guarantees that rcu_read_lock() sections will have completed.
125 * In "classic RCU", these two guarantees happen to be one and
126 * the same, but can differ in realtime RCU implementations.
127 */
128void synchronize_sched(void)
129{
130 struct rcu_synchronize rcu;
131
132 if (rcu_blocking_is_gp())
133 return;
134
135 init_completion(&rcu.completion);
136 /* Will wake me after RCU finished. */
137 call_rcu_sched(&rcu.head, wakeme_after_rcu);
138 /* Wait for it. */
139 wait_for_completion(&rcu.completion);
140}
141EXPORT_SYMBOL_GPL(synchronize_sched);
142
143/**
144 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
145 *
146 * Control will return to the caller some time after a full rcu_bh grace
147 * period has elapsed, in other words after all currently executing rcu_bh
148 * read-side critical sections have completed. RCU read-side critical
149 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
150 * and may be nested.
151 */
152void synchronize_rcu_bh(void)
153{
154 struct rcu_synchronize rcu;
155
156 if (rcu_blocking_is_gp())
157 return;
158
159 init_completion(&rcu.completion);
160 /* Will wake me after RCU finished. */
161 call_rcu_bh(&rcu.head, wakeme_after_rcu);
162 /* Wait for it. */
163 wait_for_completion(&rcu.completion);
164}
165EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
166
167static void rcu_barrier_callback(struct rcu_head *notused)
168{
169 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
170 complete(&rcu_barrier_completion);
171}
172
173/*
174 * Called with preemption disabled, and from cross-cpu IRQ context.
175 */
176static void rcu_barrier_func(void *type)
177{
178 int cpu = smp_processor_id();
179 struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
180
181 atomic_inc(&rcu_barrier_cpu_count);
182 switch ((enum rcu_barrier)type) {
183 case RCU_BARRIER_STD:
184 call_rcu(head, rcu_barrier_callback);
185 break;
186 case RCU_BARRIER_BH:
187 call_rcu_bh(head, rcu_barrier_callback);
188 break;
189 case RCU_BARRIER_SCHED:
190 call_rcu_sched(head, rcu_barrier_callback);
191 break;
192 }
193}
194
195static inline void wait_migrated_callbacks(void)
196{
197 wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
198 smp_mb(); /* In case we didn't sleep. */
199}
200
201/*
202 * Orchestrate the specified type of RCU barrier, waiting for all
203 * RCU callbacks of the specified type to complete.
204 */
205static void _rcu_barrier(enum rcu_barrier type)
206{
207 BUG_ON(in_interrupt());
208 /* Take cpucontrol mutex to protect against CPU hotplug */
209 mutex_lock(&rcu_barrier_mutex);
210 init_completion(&rcu_barrier_completion);
211 /*
212 * Initialize rcu_barrier_cpu_count to 1, then invoke
213 * rcu_barrier_func() on each CPU, so that each CPU also has
214 * incremented rcu_barrier_cpu_count. Only then is it safe to
215 * decrement rcu_barrier_cpu_count -- otherwise the first CPU
216 * might complete its grace period before all of the other CPUs
217 * did their increment, causing this function to return too
218 * early.
219 */
220 atomic_set(&rcu_barrier_cpu_count, 1);
221 on_each_cpu(rcu_barrier_func, (void *)type, 1);
222 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
223 complete(&rcu_barrier_completion);
224 wait_for_completion(&rcu_barrier_completion);
225 mutex_unlock(&rcu_barrier_mutex);
226 wait_migrated_callbacks();
227}
228
229/**
230 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
231 */
232void rcu_barrier(void)
233{
234 _rcu_barrier(RCU_BARRIER_STD);
235}
236EXPORT_SYMBOL_GPL(rcu_barrier);
237
238/**
239 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
240 */
241void rcu_barrier_bh(void)
242{
243 _rcu_barrier(RCU_BARRIER_BH);
244}
245EXPORT_SYMBOL_GPL(rcu_barrier_bh);
246
247/**
248 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
249 */
250void rcu_barrier_sched(void)
251{
252 _rcu_barrier(RCU_BARRIER_SCHED);
253}
254EXPORT_SYMBOL_GPL(rcu_barrier_sched);
255
256static void rcu_migrate_callback(struct rcu_head *notused)
257{
258 if (atomic_dec_and_test(&rcu_migrate_type_count))
259 wake_up(&rcu_migrate_wq);
260}
261
262extern int rcu_cpu_notify(struct notifier_block *self,
263 unsigned long action, void *hcpu);
264
265static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
266 unsigned long action, void *hcpu)
267{
268 rcu_cpu_notify(self, action, hcpu);
269 if (action == CPU_DYING) {
270 /*
271 * preempt_disable() in on_each_cpu() prevents stop_machine(),
272 * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
273 * returns, all online cpus have queued rcu_barrier_func(),
274 * and the dead cpu(if it exist) queues rcu_migrate_callback()s.
275 *
276 * These callbacks ensure _rcu_barrier() waits for all
277 * RCU callbacks of the specified type to complete.
278 */
279 atomic_set(&rcu_migrate_type_count, 3);
280 call_rcu_bh(rcu_migrate_head, rcu_migrate_callback);
281 call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback);
282 call_rcu(rcu_migrate_head + 2, rcu_migrate_callback);
283 } else if (action == CPU_DOWN_PREPARE) {
284 /* Don't need to wait until next removal operation. */
285 /* rcu_migrate_head is protected by cpu_add_remove_lock */
286 wait_migrated_callbacks();
287 }
288
289 return NOTIFY_OK;
290}
291
292void __init rcu_init(void)
293{
294 int i;
295
296 __rcu_init();
297 cpu_notifier(rcu_barrier_cpu_hotplug, 0);
298
299 /*
300 * We don't need protection against CPU-hotplug here because
301 * this is called early in boot, before either interrupts
302 * or the scheduler are operational.
303 */
304 for_each_online_cpu(i)
305 rcu_barrier_cpu_hotplug(NULL, CPU_UP_PREPARE, (void *)(long)i);
306}
307
308void rcu_scheduler_starting(void)
309{
310 WARN_ON(num_online_cpus() != 1);
311 WARN_ON(nr_context_switches() > 0);
312 rcu_scheduler_active = 1;
313}
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
new file mode 100644
index 000000000000..9f6d9ff2572c
--- /dev/null
+++ b/kernel/rcutiny.c
@@ -0,0 +1,282 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21 *
22 * For detailed explanation of Read-Copy Update mechanism see -
23 * Documentation/RCU
24 */
25#include <linux/moduleparam.h>
26#include <linux/completion.h>
27#include <linux/interrupt.h>
28#include <linux/notifier.h>
29#include <linux/rcupdate.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/mutex.h>
33#include <linux/sched.h>
34#include <linux/types.h>
35#include <linux/init.h>
36#include <linux/time.h>
37#include <linux/cpu.h>
38
39/* Global control variables for rcupdate callback mechanism. */
40struct rcu_ctrlblk {
41 struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
42 struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
43 struct rcu_head **curtail; /* ->next pointer of last CB. */
44};
45
46/* Definition for rcupdate control block. */
47static struct rcu_ctrlblk rcu_ctrlblk = {
48 .donetail = &rcu_ctrlblk.rcucblist,
49 .curtail = &rcu_ctrlblk.rcucblist,
50};
51
52static struct rcu_ctrlblk rcu_bh_ctrlblk = {
53 .donetail = &rcu_bh_ctrlblk.rcucblist,
54 .curtail = &rcu_bh_ctrlblk.rcucblist,
55};
56
57#ifdef CONFIG_NO_HZ
58
59static long rcu_dynticks_nesting = 1;
60
61/*
62 * Enter dynticks-idle mode, which is an extended quiescent state
63 * if we have fully entered that mode (i.e., if the new value of
64 * dynticks_nesting is zero).
65 */
66void rcu_enter_nohz(void)
67{
68 if (--rcu_dynticks_nesting == 0)
69 rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
70}
71
72/*
73 * Exit dynticks-idle mode, so that we are no longer in an extended
74 * quiescent state.
75 */
76void rcu_exit_nohz(void)
77{
78 rcu_dynticks_nesting++;
79}
80
81#endif /* #ifdef CONFIG_NO_HZ */
82
83/*
84 * Helper function for rcu_qsctr_inc() and rcu_bh_qsctr_inc().
85 * Also disable irqs to avoid confusion due to interrupt handlers
86 * invoking call_rcu().
87 */
88static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
89{
90 unsigned long flags;
91
92 local_irq_save(flags);
93 if (rcp->rcucblist != NULL &&
94 rcp->donetail != rcp->curtail) {
95 rcp->donetail = rcp->curtail;
96 local_irq_restore(flags);
97 return 1;
98 }
99 local_irq_restore(flags);
100
101 return 0;
102}
103
104/*
105 * Record an rcu quiescent state. And an rcu_bh quiescent state while we
106 * are at it, given that any rcu quiescent state is also an rcu_bh
107 * quiescent state. Use "+" instead of "||" to defeat short circuiting.
108 */
109void rcu_sched_qs(int cpu)
110{
111 if (rcu_qsctr_help(&rcu_ctrlblk) + rcu_qsctr_help(&rcu_bh_ctrlblk))
112 raise_softirq(RCU_SOFTIRQ);
113}
114
115/*
116 * Record an rcu_bh quiescent state.
117 */
118void rcu_bh_qs(int cpu)
119{
120 if (rcu_qsctr_help(&rcu_bh_ctrlblk))
121 raise_softirq(RCU_SOFTIRQ);
122}
123
124/*
125 * Check to see if the scheduling-clock interrupt came from an extended
126 * quiescent state, and, if so, tell RCU about it.
127 */
128void rcu_check_callbacks(int cpu, int user)
129{
130 if (user ||
131 (idle_cpu(cpu) &&
132 !in_softirq() &&
133 hardirq_count() <= (1 << HARDIRQ_SHIFT)))
134 rcu_sched_qs(cpu);
135 else if (!in_softirq())
136 rcu_bh_qs(cpu);
137}
138
139/*
140 * Helper function for rcu_process_callbacks() that operates on the
141 * specified rcu_ctrlkblk structure.
142 */
143static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
144{
145 struct rcu_head *next, *list;
146 unsigned long flags;
147
148 /* If no RCU callbacks ready to invoke, just return. */
149 if (&rcp->rcucblist == rcp->donetail)
150 return;
151
152 /* Move the ready-to-invoke callbacks to a local list. */
153 local_irq_save(flags);
154 list = rcp->rcucblist;
155 rcp->rcucblist = *rcp->donetail;
156 *rcp->donetail = NULL;
157 if (rcp->curtail == rcp->donetail)
158 rcp->curtail = &rcp->rcucblist;
159 rcp->donetail = &rcp->rcucblist;
160 local_irq_restore(flags);
161
162 /* Invoke the callbacks on the local list. */
163 while (list) {
164 next = list->next;
165 prefetch(next);
166 list->func(list);
167 list = next;
168 }
169}
170
171/*
172 * Invoke any callbacks whose grace period has completed.
173 */
174static void rcu_process_callbacks(struct softirq_action *unused)
175{
176 __rcu_process_callbacks(&rcu_ctrlblk);
177 __rcu_process_callbacks(&rcu_bh_ctrlblk);
178}
179
180/*
181 * Wait for a grace period to elapse. But it is illegal to invoke
182 * synchronize_sched() from within an RCU read-side critical section.
183 * Therefore, any legal call to synchronize_sched() is a quiescent
184 * state, and so on a UP system, synchronize_sched() need do nothing.
185 * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the
186 * benefits of doing might_sleep() to reduce latency.)
187 *
188 * Cool, huh? (Due to Josh Triplett.)
189 *
190 * But we want to make this a static inline later.
191 */
192void synchronize_sched(void)
193{
194 cond_resched();
195}
196EXPORT_SYMBOL_GPL(synchronize_sched);
197
198void synchronize_rcu_bh(void)
199{
200 synchronize_sched();
201}
202EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
203
204/*
205 * Helper function for call_rcu() and call_rcu_bh().
206 */
207static void __call_rcu(struct rcu_head *head,
208 void (*func)(struct rcu_head *rcu),
209 struct rcu_ctrlblk *rcp)
210{
211 unsigned long flags;
212
213 head->func = func;
214 head->next = NULL;
215
216 local_irq_save(flags);
217 *rcp->curtail = head;
218 rcp->curtail = &head->next;
219 local_irq_restore(flags);
220}
221
222/*
223 * Post an RCU callback to be invoked after the end of an RCU grace
224 * period. But since we have but one CPU, that would be after any
225 * quiescent state.
226 */
227void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
228{
229 __call_rcu(head, func, &rcu_ctrlblk);
230}
231EXPORT_SYMBOL_GPL(call_rcu);
232
233/*
234 * Post an RCU bottom-half callback to be invoked after any subsequent
235 * quiescent state.
236 */
237void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
238{
239 __call_rcu(head, func, &rcu_bh_ctrlblk);
240}
241EXPORT_SYMBOL_GPL(call_rcu_bh);
242
243void rcu_barrier(void)
244{
245 struct rcu_synchronize rcu;
246
247 init_completion(&rcu.completion);
248 /* Will wake me after RCU finished. */
249 call_rcu(&rcu.head, wakeme_after_rcu);
250 /* Wait for it. */
251 wait_for_completion(&rcu.completion);
252}
253EXPORT_SYMBOL_GPL(rcu_barrier);
254
255void rcu_barrier_bh(void)
256{
257 struct rcu_synchronize rcu;
258
259 init_completion(&rcu.completion);
260 /* Will wake me after RCU finished. */
261 call_rcu_bh(&rcu.head, wakeme_after_rcu);
262 /* Wait for it. */
263 wait_for_completion(&rcu.completion);
264}
265EXPORT_SYMBOL_GPL(rcu_barrier_bh);
266
267void rcu_barrier_sched(void)
268{
269 struct rcu_synchronize rcu;
270
271 init_completion(&rcu.completion);
272 /* Will wake me after RCU finished. */
273 call_rcu_sched(&rcu.head, wakeme_after_rcu);
274 /* Wait for it. */
275 wait_for_completion(&rcu.completion);
276}
277EXPORT_SYMBOL_GPL(rcu_barrier_sched);
278
279void __init rcu_init(void)
280{
281 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
282}
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 233768f21f97..a621a67ef4e3 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -327,6 +327,11 @@ rcu_torture_cb(struct rcu_head *p)
327 cur_ops->deferred_free(rp); 327 cur_ops->deferred_free(rp);
328} 328}
329 329
330static int rcu_no_completed(void)
331{
332 return 0;
333}
334
330static void rcu_torture_deferred_free(struct rcu_torture *p) 335static void rcu_torture_deferred_free(struct rcu_torture *p)
331{ 336{
332 call_rcu(&p->rtort_rcu, rcu_torture_cb); 337 call_rcu(&p->rtort_rcu, rcu_torture_cb);
@@ -388,6 +393,21 @@ static struct rcu_torture_ops rcu_sync_ops = {
388 .name = "rcu_sync" 393 .name = "rcu_sync"
389}; 394};
390 395
396static struct rcu_torture_ops rcu_expedited_ops = {
397 .init = rcu_sync_torture_init,
398 .cleanup = NULL,
399 .readlock = rcu_torture_read_lock,
400 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
401 .readunlock = rcu_torture_read_unlock,
402 .completed = rcu_no_completed,
403 .deferred_free = rcu_sync_torture_deferred_free,
404 .sync = synchronize_rcu_expedited,
405 .cb_barrier = NULL,
406 .stats = NULL,
407 .irq_capable = 1,
408 .name = "rcu_expedited"
409};
410
391/* 411/*
392 * Definitions for rcu_bh torture testing. 412 * Definitions for rcu_bh torture testing.
393 */ 413 */
@@ -547,6 +567,25 @@ static struct rcu_torture_ops srcu_ops = {
547 .name = "srcu" 567 .name = "srcu"
548}; 568};
549 569
570static void srcu_torture_synchronize_expedited(void)
571{
572 synchronize_srcu_expedited(&srcu_ctl);
573}
574
575static struct rcu_torture_ops srcu_expedited_ops = {
576 .init = srcu_torture_init,
577 .cleanup = srcu_torture_cleanup,
578 .readlock = srcu_torture_read_lock,
579 .read_delay = srcu_read_delay,
580 .readunlock = srcu_torture_read_unlock,
581 .completed = srcu_torture_completed,
582 .deferred_free = rcu_sync_torture_deferred_free,
583 .sync = srcu_torture_synchronize_expedited,
584 .cb_barrier = NULL,
585 .stats = srcu_torture_stats,
586 .name = "srcu_expedited"
587};
588
550/* 589/*
551 * Definitions for sched torture testing. 590 * Definitions for sched torture testing.
552 */ 591 */
@@ -562,11 +601,6 @@ static void sched_torture_read_unlock(int idx)
562 preempt_enable(); 601 preempt_enable();
563} 602}
564 603
565static int sched_torture_completed(void)
566{
567 return 0;
568}
569
570static void rcu_sched_torture_deferred_free(struct rcu_torture *p) 604static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
571{ 605{
572 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb); 606 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
@@ -583,7 +617,7 @@ static struct rcu_torture_ops sched_ops = {
583 .readlock = sched_torture_read_lock, 617 .readlock = sched_torture_read_lock,
584 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 618 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
585 .readunlock = sched_torture_read_unlock, 619 .readunlock = sched_torture_read_unlock,
586 .completed = sched_torture_completed, 620 .completed = rcu_no_completed,
587 .deferred_free = rcu_sched_torture_deferred_free, 621 .deferred_free = rcu_sched_torture_deferred_free,
588 .sync = sched_torture_synchronize, 622 .sync = sched_torture_synchronize,
589 .cb_barrier = rcu_barrier_sched, 623 .cb_barrier = rcu_barrier_sched,
@@ -592,13 +626,13 @@ static struct rcu_torture_ops sched_ops = {
592 .name = "sched" 626 .name = "sched"
593}; 627};
594 628
595static struct rcu_torture_ops sched_ops_sync = { 629static struct rcu_torture_ops sched_sync_ops = {
596 .init = rcu_sync_torture_init, 630 .init = rcu_sync_torture_init,
597 .cleanup = NULL, 631 .cleanup = NULL,
598 .readlock = sched_torture_read_lock, 632 .readlock = sched_torture_read_lock,
599 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 633 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
600 .readunlock = sched_torture_read_unlock, 634 .readunlock = sched_torture_read_unlock,
601 .completed = sched_torture_completed, 635 .completed = rcu_no_completed,
602 .deferred_free = rcu_sync_torture_deferred_free, 636 .deferred_free = rcu_sync_torture_deferred_free,
603 .sync = sched_torture_synchronize, 637 .sync = sched_torture_synchronize,
604 .cb_barrier = NULL, 638 .cb_barrier = NULL,
@@ -606,15 +640,13 @@ static struct rcu_torture_ops sched_ops_sync = {
606 .name = "sched_sync" 640 .name = "sched_sync"
607}; 641};
608 642
609extern int rcu_expedited_torture_stats(char *page);
610
611static struct rcu_torture_ops sched_expedited_ops = { 643static struct rcu_torture_ops sched_expedited_ops = {
612 .init = rcu_sync_torture_init, 644 .init = rcu_sync_torture_init,
613 .cleanup = NULL, 645 .cleanup = NULL,
614 .readlock = sched_torture_read_lock, 646 .readlock = sched_torture_read_lock,
615 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 647 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
616 .readunlock = sched_torture_read_unlock, 648 .readunlock = sched_torture_read_unlock,
617 .completed = sched_torture_completed, 649 .completed = rcu_no_completed,
618 .deferred_free = rcu_sync_torture_deferred_free, 650 .deferred_free = rcu_sync_torture_deferred_free,
619 .sync = synchronize_sched_expedited, 651 .sync = synchronize_sched_expedited,
620 .cb_barrier = NULL, 652 .cb_barrier = NULL,
@@ -650,7 +682,7 @@ rcu_torture_writer(void *arg)
650 old_rp = rcu_torture_current; 682 old_rp = rcu_torture_current;
651 rp->rtort_mbtest = 1; 683 rp->rtort_mbtest = 1;
652 rcu_assign_pointer(rcu_torture_current, rp); 684 rcu_assign_pointer(rcu_torture_current, rp);
653 smp_wmb(); 685 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
654 if (old_rp) { 686 if (old_rp) {
655 i = old_rp->rtort_pipe_count; 687 i = old_rp->rtort_pipe_count;
656 if (i > RCU_TORTURE_PIPE_LEN) 688 if (i > RCU_TORTURE_PIPE_LEN)
@@ -1099,9 +1131,10 @@ rcu_torture_init(void)
1099 int cpu; 1131 int cpu;
1100 int firsterr = 0; 1132 int firsterr = 0;
1101 static struct rcu_torture_ops *torture_ops[] = 1133 static struct rcu_torture_ops *torture_ops[] =
1102 { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, 1134 { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
1103 &sched_expedited_ops, 1135 &rcu_bh_ops, &rcu_bh_sync_ops,
1104 &srcu_ops, &sched_ops, &sched_ops_sync, }; 1136 &srcu_ops, &srcu_expedited_ops,
1137 &sched_ops, &sched_sync_ops, &sched_expedited_ops, };
1105 1138
1106 mutex_lock(&fullstop_mutex); 1139 mutex_lock(&fullstop_mutex);
1107 1140
@@ -1112,8 +1145,12 @@ rcu_torture_init(void)
1112 break; 1145 break;
1113 } 1146 }
1114 if (i == ARRAY_SIZE(torture_ops)) { 1147 if (i == ARRAY_SIZE(torture_ops)) {
1115 printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n", 1148 printk(KERN_ALERT "rcu-torture: invalid torture type: \"%s\"\n",
1116 torture_type); 1149 torture_type);
1150 printk(KERN_ALERT "rcu-torture types:");
1151 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1152 printk(KERN_ALERT " %s", torture_ops[i]->name);
1153 printk(KERN_ALERT "\n");
1117 mutex_unlock(&fullstop_mutex); 1154 mutex_unlock(&fullstop_mutex);
1118 return -EINVAL; 1155 return -EINVAL;
1119 } 1156 }
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 52b06f6e158c..53ae9598f798 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -46,30 +46,30 @@
46#include <linux/cpu.h> 46#include <linux/cpu.h>
47#include <linux/mutex.h> 47#include <linux/mutex.h>
48#include <linux/time.h> 48#include <linux/time.h>
49#include <linux/kernel_stat.h>
49 50
50#include "rcutree.h" 51#include "rcutree.h"
51 52
52#ifdef CONFIG_DEBUG_LOCK_ALLOC
53static struct lock_class_key rcu_lock_key;
54struct lockdep_map rcu_lock_map =
55 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
56EXPORT_SYMBOL_GPL(rcu_lock_map);
57#endif
58
59/* Data structures. */ 53/* Data structures. */
60 54
55static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
56
61#define RCU_STATE_INITIALIZER(name) { \ 57#define RCU_STATE_INITIALIZER(name) { \
62 .level = { &name.node[0] }, \ 58 .level = { &name.node[0] }, \
63 .levelcnt = { \ 59 .levelcnt = { \
64 NUM_RCU_LVL_0, /* root of hierarchy. */ \ 60 NUM_RCU_LVL_0, /* root of hierarchy. */ \
65 NUM_RCU_LVL_1, \ 61 NUM_RCU_LVL_1, \
66 NUM_RCU_LVL_2, \ 62 NUM_RCU_LVL_2, \
67 NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ 63 NUM_RCU_LVL_3, \
64 NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \
68 }, \ 65 }, \
69 .signaled = RCU_SIGNAL_INIT, \ 66 .signaled = RCU_GP_IDLE, \
70 .gpnum = -300, \ 67 .gpnum = -300, \
71 .completed = -300, \ 68 .completed = -300, \
72 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ 69 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \
70 .orphan_cbs_list = NULL, \
71 .orphan_cbs_tail = &name.orphan_cbs_list, \
72 .orphan_qlen = 0, \
73 .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \ 73 .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \
74 .n_force_qs = 0, \ 74 .n_force_qs = 0, \
75 .n_force_qs_ngp = 0, \ 75 .n_force_qs_ngp = 0, \
@@ -81,24 +81,18 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
81struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); 81struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
82DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); 82DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
83 83
84extern long rcu_batches_completed_sched(void); 84static int rcu_scheduler_active __read_mostly;
85static struct rcu_node *rcu_get_root(struct rcu_state *rsp);
86static void cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp,
87 struct rcu_node *rnp, unsigned long flags);
88static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags);
89#ifdef CONFIG_HOTPLUG_CPU
90static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp);
91#endif /* #ifdef CONFIG_HOTPLUG_CPU */
92static void __rcu_process_callbacks(struct rcu_state *rsp,
93 struct rcu_data *rdp);
94static void __call_rcu(struct rcu_head *head,
95 void (*func)(struct rcu_head *rcu),
96 struct rcu_state *rsp);
97static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp);
98static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp,
99 int preemptable);
100 85
101#include "rcutree_plugin.h" 86
87/*
88 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
89 * permit this function to be invoked without holding the root rcu_node
90 * structure's ->lock, but of course results can be subject to change.
91 */
92static int rcu_gp_in_progress(struct rcu_state *rsp)
93{
94 return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
95}
102 96
103/* 97/*
104 * Note a quiescent state. Because we do not need to know 98 * Note a quiescent state. Because we do not need to know
@@ -110,7 +104,7 @@ void rcu_sched_qs(int cpu)
110 struct rcu_data *rdp; 104 struct rcu_data *rdp;
111 105
112 rdp = &per_cpu(rcu_sched_data, cpu); 106 rdp = &per_cpu(rcu_sched_data, cpu);
113 rdp->passed_quiesc_completed = rdp->completed; 107 rdp->passed_quiesc_completed = rdp->gpnum - 1;
114 barrier(); 108 barrier();
115 rdp->passed_quiesc = 1; 109 rdp->passed_quiesc = 1;
116 rcu_preempt_note_context_switch(cpu); 110 rcu_preempt_note_context_switch(cpu);
@@ -121,7 +115,7 @@ void rcu_bh_qs(int cpu)
121 struct rcu_data *rdp; 115 struct rcu_data *rdp;
122 116
123 rdp = &per_cpu(rcu_bh_data, cpu); 117 rdp = &per_cpu(rcu_bh_data, cpu);
124 rdp->passed_quiesc_completed = rdp->completed; 118 rdp->passed_quiesc_completed = rdp->gpnum - 1;
125 barrier(); 119 barrier();
126 rdp->passed_quiesc = 1; 120 rdp->passed_quiesc = 1;
127} 121}
@@ -137,6 +131,10 @@ static int blimit = 10; /* Maximum callbacks per softirq. */
137static int qhimark = 10000; /* If this many pending, ignore blimit. */ 131static int qhimark = 10000; /* If this many pending, ignore blimit. */
138static int qlowmark = 100; /* Once only this many pending, use blimit. */ 132static int qlowmark = 100; /* Once only this many pending, use blimit. */
139 133
134module_param(blimit, int, 0);
135module_param(qhimark, int, 0);
136module_param(qlowmark, int, 0);
137
140static void force_quiescent_state(struct rcu_state *rsp, int relaxed); 138static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
141static int rcu_pending(int cpu); 139static int rcu_pending(int cpu);
142 140
@@ -173,9 +171,7 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
173static int 171static int
174cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) 172cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
175{ 173{
176 /* ACCESS_ONCE() because we are accessing outside of lock. */ 174 return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
177 return *rdp->nxttail[RCU_DONE_TAIL] &&
178 ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum);
179} 175}
180 176
181/* 177/*
@@ -345,31 +341,12 @@ void rcu_irq_exit(void)
345 set_need_resched(); 341 set_need_resched();
346} 342}
347 343
348/*
349 * Record the specified "completed" value, which is later used to validate
350 * dynticks counter manipulations. Specify "rsp->completed - 1" to
351 * unconditionally invalidate any future dynticks manipulations (which is
352 * useful at the beginning of a grace period).
353 */
354static void dyntick_record_completed(struct rcu_state *rsp, long comp)
355{
356 rsp->dynticks_completed = comp;
357}
358
359#ifdef CONFIG_SMP 344#ifdef CONFIG_SMP
360 345
361/* 346/*
362 * Recall the previously recorded value of the completion for dynticks.
363 */
364static long dyntick_recall_completed(struct rcu_state *rsp)
365{
366 return rsp->dynticks_completed;
367}
368
369/*
370 * Snapshot the specified CPU's dynticks counter so that we can later 347 * Snapshot the specified CPU's dynticks counter so that we can later
371 * credit them with an implicit quiescent state. Return 1 if this CPU 348 * credit them with an implicit quiescent state. Return 1 if this CPU
372 * is already in a quiescent state courtesy of dynticks idle mode. 349 * is in dynticks idle mode, which is an extended quiescent state.
373 */ 350 */
374static int dyntick_save_progress_counter(struct rcu_data *rdp) 351static int dyntick_save_progress_counter(struct rcu_data *rdp)
375{ 352{
@@ -429,24 +406,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
429 406
430#else /* #ifdef CONFIG_NO_HZ */ 407#else /* #ifdef CONFIG_NO_HZ */
431 408
432static void dyntick_record_completed(struct rcu_state *rsp, long comp)
433{
434}
435
436#ifdef CONFIG_SMP 409#ifdef CONFIG_SMP
437 410
438/*
439 * If there are no dynticks, then the only way that a CPU can passively
440 * be in a quiescent state is to be offline. Unlike dynticks idle, which
441 * is a point in time during the prior (already finished) grace period,
442 * an offline CPU is always in a quiescent state, and thus can be
443 * unconditionally applied. So just return the current value of completed.
444 */
445static long dyntick_recall_completed(struct rcu_state *rsp)
446{
447 return rsp->completed;
448}
449
450static int dyntick_save_progress_counter(struct rcu_data *rdp) 411static int dyntick_save_progress_counter(struct rcu_data *rdp)
451{ 412{
452 return 0; 413 return 0;
@@ -475,30 +436,34 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
475 long delta; 436 long delta;
476 unsigned long flags; 437 unsigned long flags;
477 struct rcu_node *rnp = rcu_get_root(rsp); 438 struct rcu_node *rnp = rcu_get_root(rsp);
478 struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1];
479 struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
480 439
481 /* Only let one CPU complain about others per time interval. */ 440 /* Only let one CPU complain about others per time interval. */
482 441
483 spin_lock_irqsave(&rnp->lock, flags); 442 spin_lock_irqsave(&rnp->lock, flags);
484 delta = jiffies - rsp->jiffies_stall; 443 delta = jiffies - rsp->jiffies_stall;
485 if (delta < RCU_STALL_RAT_DELAY || rsp->gpnum == rsp->completed) { 444 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
486 spin_unlock_irqrestore(&rnp->lock, flags); 445 spin_unlock_irqrestore(&rnp->lock, flags);
487 return; 446 return;
488 } 447 }
489 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; 448 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
449
450 /*
451 * Now rat on any tasks that got kicked up to the root rcu_node
452 * due to CPU offlining.
453 */
454 rcu_print_task_stall(rnp);
490 spin_unlock_irqrestore(&rnp->lock, flags); 455 spin_unlock_irqrestore(&rnp->lock, flags);
491 456
492 /* OK, time to rat on our buddy... */ 457 /* OK, time to rat on our buddy... */
493 458
494 printk(KERN_ERR "INFO: RCU detected CPU stalls:"); 459 printk(KERN_ERR "INFO: RCU detected CPU stalls:");
495 for (; rnp_cur < rnp_end; rnp_cur++) { 460 rcu_for_each_leaf_node(rsp, rnp) {
496 rcu_print_task_stall(rnp); 461 rcu_print_task_stall(rnp);
497 if (rnp_cur->qsmask == 0) 462 if (rnp->qsmask == 0)
498 continue; 463 continue;
499 for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++) 464 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
500 if (rnp_cur->qsmask & (1UL << cpu)) 465 if (rnp->qsmask & (1UL << cpu))
501 printk(" %d", rnp_cur->grplo + cpu); 466 printk(" %d", rnp->grplo + cpu);
502 } 467 }
503 printk(" (detected by %d, t=%ld jiffies)\n", 468 printk(" (detected by %d, t=%ld jiffies)\n",
504 smp_processor_id(), (long)(jiffies - rsp->gp_start)); 469 smp_processor_id(), (long)(jiffies - rsp->gp_start));
@@ -537,8 +502,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
537 /* We haven't checked in, so go dump stack. */ 502 /* We haven't checked in, so go dump stack. */
538 print_cpu_stall(rsp); 503 print_cpu_stall(rsp);
539 504
540 } else if (rsp->gpnum != rsp->completed && 505 } else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) {
541 delta >= RCU_STALL_RAT_DELAY) {
542 506
543 /* They had two time units to dump stack, so complain. */ 507 /* They had two time units to dump stack, so complain. */
544 print_other_cpu_stall(rsp); 508 print_other_cpu_stall(rsp);
@@ -560,13 +524,33 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
560/* 524/*
561 * Update CPU-local rcu_data state to record the newly noticed grace period. 525 * Update CPU-local rcu_data state to record the newly noticed grace period.
562 * This is used both when we started the grace period and when we notice 526 * This is used both when we started the grace period and when we notice
563 * that someone else started the grace period. 527 * that someone else started the grace period. The caller must hold the
528 * ->lock of the leaf rcu_node structure corresponding to the current CPU,
529 * and must have irqs disabled.
564 */ 530 */
531static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
532{
533 if (rdp->gpnum != rnp->gpnum) {
534 rdp->qs_pending = 1;
535 rdp->passed_quiesc = 0;
536 rdp->gpnum = rnp->gpnum;
537 }
538}
539
565static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) 540static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
566{ 541{
567 rdp->qs_pending = 1; 542 unsigned long flags;
568 rdp->passed_quiesc = 0; 543 struct rcu_node *rnp;
569 rdp->gpnum = rsp->gpnum; 544
545 local_irq_save(flags);
546 rnp = rdp->mynode;
547 if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */
548 !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */
549 local_irq_restore(flags);
550 return;
551 }
552 __note_new_gpnum(rsp, rnp, rdp);
553 spin_unlock_irqrestore(&rnp->lock, flags);
570} 554}
571 555
572/* 556/*
@@ -590,6 +574,79 @@ check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
590} 574}
591 575
592/* 576/*
577 * Advance this CPU's callbacks, but only if the current grace period
578 * has ended. This may be called only from the CPU to whom the rdp
579 * belongs. In addition, the corresponding leaf rcu_node structure's
580 * ->lock must be held by the caller, with irqs disabled.
581 */
582static void
583__rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
584{
585 /* Did another grace period end? */
586 if (rdp->completed != rnp->completed) {
587
588 /* Advance callbacks. No harm if list empty. */
589 rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
590 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
591 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
592
593 /* Remember that we saw this grace-period completion. */
594 rdp->completed = rnp->completed;
595 }
596}
597
598/*
599 * Advance this CPU's callbacks, but only if the current grace period
600 * has ended. This may be called only from the CPU to whom the rdp
601 * belongs.
602 */
603static void
604rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
605{
606 unsigned long flags;
607 struct rcu_node *rnp;
608
609 local_irq_save(flags);
610 rnp = rdp->mynode;
611 if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */
612 !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */
613 local_irq_restore(flags);
614 return;
615 }
616 __rcu_process_gp_end(rsp, rnp, rdp);
617 spin_unlock_irqrestore(&rnp->lock, flags);
618}
619
620/*
621 * Do per-CPU grace-period initialization for running CPU. The caller
622 * must hold the lock of the leaf rcu_node structure corresponding to
623 * this CPU.
624 */
625static void
626rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
627{
628 /* Prior grace period ended, so advance callbacks for current CPU. */
629 __rcu_process_gp_end(rsp, rnp, rdp);
630
631 /*
632 * Because this CPU just now started the new grace period, we know
633 * that all of its callbacks will be covered by this upcoming grace
634 * period, even the ones that were registered arbitrarily recently.
635 * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
636 *
637 * Other CPUs cannot be sure exactly when the grace period started.
638 * Therefore, their recently registered callbacks must pass through
639 * an additional RCU_NEXT_READY stage, so that they will be handled
640 * by the next RCU grace period.
641 */
642 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
643 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
644
645 /* Set state so that this CPU will detect the next quiescent state. */
646 __note_new_gpnum(rsp, rnp, rdp);
647}
648
649/*
593 * Start a new RCU grace period if warranted, re-initializing the hierarchy 650 * Start a new RCU grace period if warranted, re-initializing the hierarchy
594 * in preparation for detecting the next grace period. The caller must hold 651 * in preparation for detecting the next grace period. The caller must hold
595 * the root node's ->lock, which is released before return. Hard irqs must 652 * the root node's ->lock, which is released before return. Hard irqs must
@@ -603,7 +660,23 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
603 struct rcu_node *rnp = rcu_get_root(rsp); 660 struct rcu_node *rnp = rcu_get_root(rsp);
604 661
605 if (!cpu_needs_another_gp(rsp, rdp)) { 662 if (!cpu_needs_another_gp(rsp, rdp)) {
606 spin_unlock_irqrestore(&rnp->lock, flags); 663 if (rnp->completed == rsp->completed) {
664 spin_unlock_irqrestore(&rnp->lock, flags);
665 return;
666 }
667 spin_unlock(&rnp->lock); /* irqs remain disabled. */
668
669 /*
670 * Propagate new ->completed value to rcu_node structures
671 * so that other CPUs don't have to wait until the start
672 * of the next grace period to process their callbacks.
673 */
674 rcu_for_each_node_breadth_first(rsp, rnp) {
675 spin_lock(&rnp->lock); /* irqs already disabled. */
676 rnp->completed = rsp->completed;
677 spin_unlock(&rnp->lock); /* irqs remain disabled. */
678 }
679 local_irq_restore(flags);
607 return; 680 return;
608 } 681 }
609 682
@@ -613,23 +686,15 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
613 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ 686 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
614 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; 687 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
615 record_gp_stall_check_time(rsp); 688 record_gp_stall_check_time(rsp);
616 dyntick_record_completed(rsp, rsp->completed - 1);
617 note_new_gpnum(rsp, rdp);
618
619 /*
620 * Because we are first, we know that all our callbacks will
621 * be covered by this upcoming grace period, even the ones
622 * that were registered arbitrarily recently.
623 */
624 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
625 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
626 689
627 /* Special-case the common single-level case. */ 690 /* Special-case the common single-level case. */
628 if (NUM_RCU_NODES == 1) { 691 if (NUM_RCU_NODES == 1) {
629 rcu_preempt_check_blocked_tasks(rnp); 692 rcu_preempt_check_blocked_tasks(rnp);
630 rnp->qsmask = rnp->qsmaskinit; 693 rnp->qsmask = rnp->qsmaskinit;
631 rnp->gpnum = rsp->gpnum; 694 rnp->gpnum = rsp->gpnum;
695 rnp->completed = rsp->completed;
632 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ 696 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
697 rcu_start_gp_per_cpu(rsp, rnp, rdp);
633 spin_unlock_irqrestore(&rnp->lock, flags); 698 spin_unlock_irqrestore(&rnp->lock, flags);
634 return; 699 return;
635 } 700 }
@@ -657,70 +722,51 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
657 * one corresponding to this CPU, due to the fact that we have 722 * one corresponding to this CPU, due to the fact that we have
658 * irqs disabled. 723 * irqs disabled.
659 */ 724 */
660 for (rnp = &rsp->node[0]; rnp < &rsp->node[NUM_RCU_NODES]; rnp++) { 725 rcu_for_each_node_breadth_first(rsp, rnp) {
661 spin_lock(&rnp->lock); /* irqs already disabled. */ 726 spin_lock(&rnp->lock); /* irqs already disabled. */
662 rcu_preempt_check_blocked_tasks(rnp); 727 rcu_preempt_check_blocked_tasks(rnp);
663 rnp->qsmask = rnp->qsmaskinit; 728 rnp->qsmask = rnp->qsmaskinit;
664 rnp->gpnum = rsp->gpnum; 729 rnp->gpnum = rsp->gpnum;
665 spin_unlock(&rnp->lock); /* irqs already disabled. */ 730 rnp->completed = rsp->completed;
731 if (rnp == rdp->mynode)
732 rcu_start_gp_per_cpu(rsp, rnp, rdp);
733 spin_unlock(&rnp->lock); /* irqs remain disabled. */
666 } 734 }
667 735
736 rnp = rcu_get_root(rsp);
737 spin_lock(&rnp->lock); /* irqs already disabled. */
668 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ 738 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
739 spin_unlock(&rnp->lock); /* irqs remain disabled. */
669 spin_unlock_irqrestore(&rsp->onofflock, flags); 740 spin_unlock_irqrestore(&rsp->onofflock, flags);
670} 741}
671 742
672/* 743/*
673 * Advance this CPU's callbacks, but only if the current grace period 744 * Report a full set of quiescent states to the specified rcu_state
674 * has ended. This may be called only from the CPU to whom the rdp 745 * data structure. This involves cleaning up after the prior grace
675 * belongs. 746 * period and letting rcu_start_gp() start up the next grace period
676 */ 747 * if one is needed. Note that the caller must hold rnp->lock, as
677static void 748 * required by rcu_start_gp(), which will release it.
678rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
679{
680 long completed_snap;
681 unsigned long flags;
682
683 local_irq_save(flags);
684 completed_snap = ACCESS_ONCE(rsp->completed); /* outside of lock. */
685
686 /* Did another grace period end? */
687 if (rdp->completed != completed_snap) {
688
689 /* Advance callbacks. No harm if list empty. */
690 rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
691 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
692 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
693
694 /* Remember that we saw this grace-period completion. */
695 rdp->completed = completed_snap;
696 }
697 local_irq_restore(flags);
698}
699
700/*
701 * Clean up after the prior grace period and let rcu_start_gp() start up
702 * the next grace period if one is needed. Note that the caller must
703 * hold rnp->lock, as required by rcu_start_gp(), which will release it.
704 */ 749 */
705static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) 750static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
706 __releases(rnp->lock) 751 __releases(rcu_get_root(rsp)->lock)
707{ 752{
708 WARN_ON_ONCE(rsp->completed == rsp->gpnum); 753 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
709 rsp->completed = rsp->gpnum; 754 rsp->completed = rsp->gpnum;
710 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); 755 rsp->signaled = RCU_GP_IDLE;
711 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ 756 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
712} 757}
713 758
714/* 759/*
715 * Similar to cpu_quiet(), for which it is a helper function. Allows 760 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
716 * a group of CPUs to be quieted at one go, though all the CPUs in the 761 * Allows quiescent states for a group of CPUs to be reported at one go
717 * group must be represented by the same leaf rcu_node structure. 762 * to the specified rcu_node structure, though all the CPUs in the group
718 * That structure's lock must be held upon entry, and it is released 763 * must be represented by the same rcu_node structure (which need not be
719 * before return. 764 * a leaf rcu_node structure, though it often will be). That structure's
765 * lock must be held upon entry, and it is released before return.
720 */ 766 */
721static void 767static void
722cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, 768rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
723 unsigned long flags) 769 struct rcu_node *rnp, unsigned long flags)
724 __releases(rnp->lock) 770 __releases(rnp->lock)
725{ 771{
726 struct rcu_node *rnp_c; 772 struct rcu_node *rnp_c;
@@ -756,21 +802,23 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
756 802
757 /* 803 /*
758 * Get here if we are the last CPU to pass through a quiescent 804 * Get here if we are the last CPU to pass through a quiescent
759 * state for this grace period. Invoke cpu_quiet_msk_finish() 805 * state for this grace period. Invoke rcu_report_qs_rsp()
760 * to clean up and start the next grace period if one is needed. 806 * to clean up and start the next grace period if one is needed.
761 */ 807 */
762 cpu_quiet_msk_finish(rsp, flags); /* releases rnp->lock. */ 808 rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
763} 809}
764 810
765/* 811/*
766 * Record a quiescent state for the specified CPU, which must either be 812 * Record a quiescent state for the specified CPU to that CPU's rcu_data
767 * the current CPU. The lastcomp argument is used to make sure we are 813 * structure. This must be either called from the specified CPU, or
768 * still in the grace period of interest. We don't want to end the current 814 * called when the specified CPU is known to be offline (and when it is
769 * grace period based on quiescent states detected in an earlier grace 815 * also known that no other CPU is concurrently trying to help the offline
770 * period! 816 * CPU). The lastcomp argument is used to make sure we are still in the
817 * grace period of interest. We don't want to end the current grace period
818 * based on quiescent states detected in an earlier grace period!
771 */ 819 */
772static void 820static void
773cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) 821rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
774{ 822{
775 unsigned long flags; 823 unsigned long flags;
776 unsigned long mask; 824 unsigned long mask;
@@ -778,15 +826,15 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
778 826
779 rnp = rdp->mynode; 827 rnp = rdp->mynode;
780 spin_lock_irqsave(&rnp->lock, flags); 828 spin_lock_irqsave(&rnp->lock, flags);
781 if (lastcomp != ACCESS_ONCE(rsp->completed)) { 829 if (lastcomp != rnp->completed) {
782 830
783 /* 831 /*
784 * Someone beat us to it for this grace period, so leave. 832 * Someone beat us to it for this grace period, so leave.
785 * The race with GP start is resolved by the fact that we 833 * The race with GP start is resolved by the fact that we
786 * hold the leaf rcu_node lock, so that the per-CPU bits 834 * hold the leaf rcu_node lock, so that the per-CPU bits
787 * cannot yet be initialized -- so we would simply find our 835 * cannot yet be initialized -- so we would simply find our
788 * CPU's bit already cleared in cpu_quiet_msk() if this race 836 * CPU's bit already cleared in rcu_report_qs_rnp() if this
789 * occurred. 837 * race occurred.
790 */ 838 */
791 rdp->passed_quiesc = 0; /* try again later! */ 839 rdp->passed_quiesc = 0; /* try again later! */
792 spin_unlock_irqrestore(&rnp->lock, flags); 840 spin_unlock_irqrestore(&rnp->lock, flags);
@@ -804,7 +852,7 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
804 */ 852 */
805 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 853 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
806 854
807 cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */ 855 rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
808 } 856 }
809} 857}
810 858
@@ -835,24 +883,73 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
835 if (!rdp->passed_quiesc) 883 if (!rdp->passed_quiesc)
836 return; 884 return;
837 885
838 /* Tell RCU we are done (but cpu_quiet() will be the judge of that). */ 886 /*
839 cpu_quiet(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed); 887 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
888 * judge of that).
889 */
890 rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed);
840} 891}
841 892
842#ifdef CONFIG_HOTPLUG_CPU 893#ifdef CONFIG_HOTPLUG_CPU
843 894
844/* 895/*
896 * Move a dying CPU's RCU callbacks to the ->orphan_cbs_list for the
897 * specified flavor of RCU. The callbacks will be adopted by the next
898 * _rcu_barrier() invocation or by the CPU_DEAD notifier, whichever
899 * comes first. Because this is invoked from the CPU_DYING notifier,
900 * irqs are already disabled.
901 */
902static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
903{
904 int i;
905 struct rcu_data *rdp = rsp->rda[smp_processor_id()];
906
907 if (rdp->nxtlist == NULL)
908 return; /* irqs disabled, so comparison is stable. */
909 spin_lock(&rsp->onofflock); /* irqs already disabled. */
910 *rsp->orphan_cbs_tail = rdp->nxtlist;
911 rsp->orphan_cbs_tail = rdp->nxttail[RCU_NEXT_TAIL];
912 rdp->nxtlist = NULL;
913 for (i = 0; i < RCU_NEXT_SIZE; i++)
914 rdp->nxttail[i] = &rdp->nxtlist;
915 rsp->orphan_qlen += rdp->qlen;
916 rdp->qlen = 0;
917 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
918}
919
920/*
921 * Adopt previously orphaned RCU callbacks.
922 */
923static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
924{
925 unsigned long flags;
926 struct rcu_data *rdp;
927
928 spin_lock_irqsave(&rsp->onofflock, flags);
929 rdp = rsp->rda[smp_processor_id()];
930 if (rsp->orphan_cbs_list == NULL) {
931 spin_unlock_irqrestore(&rsp->onofflock, flags);
932 return;
933 }
934 *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list;
935 rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail;
936 rdp->qlen += rsp->orphan_qlen;
937 rsp->orphan_cbs_list = NULL;
938 rsp->orphan_cbs_tail = &rsp->orphan_cbs_list;
939 rsp->orphan_qlen = 0;
940 spin_unlock_irqrestore(&rsp->onofflock, flags);
941}
942
943/*
845 * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy 944 * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy
846 * and move all callbacks from the outgoing CPU to the current one. 945 * and move all callbacks from the outgoing CPU to the current one.
847 */ 946 */
848static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) 947static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
849{ 948{
850 int i;
851 unsigned long flags; 949 unsigned long flags;
852 long lastcomp;
853 unsigned long mask; 950 unsigned long mask;
951 int need_report = 0;
854 struct rcu_data *rdp = rsp->rda[cpu]; 952 struct rcu_data *rdp = rsp->rda[cpu];
855 struct rcu_data *rdp_me;
856 struct rcu_node *rnp; 953 struct rcu_node *rnp;
857 954
858 /* Exclude any attempts to start a new grace period. */ 955 /* Exclude any attempts to start a new grace period. */
@@ -865,42 +962,34 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
865 spin_lock(&rnp->lock); /* irqs already disabled. */ 962 spin_lock(&rnp->lock); /* irqs already disabled. */
866 rnp->qsmaskinit &= ~mask; 963 rnp->qsmaskinit &= ~mask;
867 if (rnp->qsmaskinit != 0) { 964 if (rnp->qsmaskinit != 0) {
868 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 965 if (rnp != rdp->mynode)
966 spin_unlock(&rnp->lock); /* irqs remain disabled. */
869 break; 967 break;
870 } 968 }
871 rcu_preempt_offline_tasks(rsp, rnp, rdp); 969 if (rnp == rdp->mynode)
970 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
971 else
972 spin_unlock(&rnp->lock); /* irqs remain disabled. */
872 mask = rnp->grpmask; 973 mask = rnp->grpmask;
873 spin_unlock(&rnp->lock); /* irqs remain disabled. */
874 rnp = rnp->parent; 974 rnp = rnp->parent;
875 } while (rnp != NULL); 975 } while (rnp != NULL);
876 lastcomp = rsp->completed;
877
878 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
879 976
880 /* 977 /*
881 * Move callbacks from the outgoing CPU to the running CPU. 978 * We still hold the leaf rcu_node structure lock here, and
882 * Note that the outgoing CPU is now quiscent, so it is now 979 * irqs are still disabled. The reason for this subterfuge is
883 * (uncharacteristically) safe to access its rcu_data structure. 980 * because invoking rcu_report_unblock_qs_rnp() with ->onofflock
884 * Note also that we must carefully retain the order of the 981 * held leads to deadlock.
885 * outgoing CPU's callbacks in order for rcu_barrier() to work
886 * correctly. Finally, note that we start all the callbacks
887 * afresh, even those that have passed through a grace period
888 * and are therefore ready to invoke. The theory is that hotplug
889 * events are rare, and that if they are frequent enough to
890 * indefinitely delay callbacks, you have far worse things to
891 * be worrying about.
892 */ 982 */
893 rdp_me = rsp->rda[smp_processor_id()]; 983 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
894 if (rdp->nxtlist != NULL) { 984 rnp = rdp->mynode;
895 *rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist; 985 if (need_report & RCU_OFL_TASKS_NORM_GP)
896 rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 986 rcu_report_unblock_qs_rnp(rnp, flags);
897 rdp->nxtlist = NULL; 987 else
898 for (i = 0; i < RCU_NEXT_SIZE; i++) 988 spin_unlock_irqrestore(&rnp->lock, flags);
899 rdp->nxttail[i] = &rdp->nxtlist; 989 if (need_report & RCU_OFL_TASKS_EXP_GP)
900 rdp_me->qlen += rdp->qlen; 990 rcu_report_exp_rnp(rsp, rnp);
901 rdp->qlen = 0; 991
902 } 992 rcu_adopt_orphan_cbs(rsp);
903 local_irq_restore(flags);
904} 993}
905 994
906/* 995/*
@@ -918,6 +1007,14 @@ static void rcu_offline_cpu(int cpu)
918 1007
919#else /* #ifdef CONFIG_HOTPLUG_CPU */ 1008#else /* #ifdef CONFIG_HOTPLUG_CPU */
920 1009
1010static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
1011{
1012}
1013
1014static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
1015{
1016}
1017
921static void rcu_offline_cpu(int cpu) 1018static void rcu_offline_cpu(int cpu)
922{ 1019{
923} 1020}
@@ -928,7 +1025,7 @@ static void rcu_offline_cpu(int cpu)
928 * Invoke any RCU callbacks that have made it to the end of their grace 1025 * Invoke any RCU callbacks that have made it to the end of their grace
929 * period. Thottle as specified by rdp->blimit. 1026 * period. Thottle as specified by rdp->blimit.
930 */ 1027 */
931static void rcu_do_batch(struct rcu_data *rdp) 1028static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
932{ 1029{
933 unsigned long flags; 1030 unsigned long flags;
934 struct rcu_head *next, *list, **tail; 1031 struct rcu_head *next, *list, **tail;
@@ -981,6 +1078,13 @@ static void rcu_do_batch(struct rcu_data *rdp)
981 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) 1078 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
982 rdp->blimit = blimit; 1079 rdp->blimit = blimit;
983 1080
1081 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
1082 if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
1083 rdp->qlen_last_fqs_check = 0;
1084 rdp->n_force_qs_snap = rsp->n_force_qs;
1085 } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
1086 rdp->qlen_last_fqs_check = rdp->qlen;
1087
984 local_irq_restore(flags); 1088 local_irq_restore(flags);
985 1089
986 /* Re-raise the RCU softirq if there are callbacks remaining. */ 1090 /* Re-raise the RCU softirq if there are callbacks remaining. */
@@ -1050,33 +1154,32 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
1050 int cpu; 1154 int cpu;
1051 unsigned long flags; 1155 unsigned long flags;
1052 unsigned long mask; 1156 unsigned long mask;
1053 struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1]; 1157 struct rcu_node *rnp;
1054 struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
1055 1158
1056 for (; rnp_cur < rnp_end; rnp_cur++) { 1159 rcu_for_each_leaf_node(rsp, rnp) {
1057 mask = 0; 1160 mask = 0;
1058 spin_lock_irqsave(&rnp_cur->lock, flags); 1161 spin_lock_irqsave(&rnp->lock, flags);
1059 if (rsp->completed != lastcomp) { 1162 if (rnp->completed != lastcomp) {
1060 spin_unlock_irqrestore(&rnp_cur->lock, flags); 1163 spin_unlock_irqrestore(&rnp->lock, flags);
1061 return 1; 1164 return 1;
1062 } 1165 }
1063 if (rnp_cur->qsmask == 0) { 1166 if (rnp->qsmask == 0) {
1064 spin_unlock_irqrestore(&rnp_cur->lock, flags); 1167 spin_unlock_irqrestore(&rnp->lock, flags);
1065 continue; 1168 continue;
1066 } 1169 }
1067 cpu = rnp_cur->grplo; 1170 cpu = rnp->grplo;
1068 bit = 1; 1171 bit = 1;
1069 for (; cpu <= rnp_cur->grphi; cpu++, bit <<= 1) { 1172 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
1070 if ((rnp_cur->qsmask & bit) != 0 && f(rsp->rda[cpu])) 1173 if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu]))
1071 mask |= bit; 1174 mask |= bit;
1072 } 1175 }
1073 if (mask != 0 && rsp->completed == lastcomp) { 1176 if (mask != 0 && rnp->completed == lastcomp) {
1074 1177
1075 /* cpu_quiet_msk() releases rnp_cur->lock. */ 1178 /* rcu_report_qs_rnp() releases rnp->lock. */
1076 cpu_quiet_msk(mask, rsp, rnp_cur, flags); 1179 rcu_report_qs_rnp(mask, rsp, rnp, flags);
1077 continue; 1180 continue;
1078 } 1181 }
1079 spin_unlock_irqrestore(&rnp_cur->lock, flags); 1182 spin_unlock_irqrestore(&rnp->lock, flags);
1080 } 1183 }
1081 return 0; 1184 return 0;
1082} 1185}
@@ -1091,8 +1194,9 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1091 long lastcomp; 1194 long lastcomp;
1092 struct rcu_node *rnp = rcu_get_root(rsp); 1195 struct rcu_node *rnp = rcu_get_root(rsp);
1093 u8 signaled; 1196 u8 signaled;
1197 u8 forcenow;
1094 1198
1095 if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) 1199 if (!rcu_gp_in_progress(rsp))
1096 return; /* No grace period in progress, nothing to force. */ 1200 return; /* No grace period in progress, nothing to force. */
1097 if (!spin_trylock_irqsave(&rsp->fqslock, flags)) { 1201 if (!spin_trylock_irqsave(&rsp->fqslock, flags)) {
1098 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ 1202 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
@@ -1103,19 +1207,20 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1103 goto unlock_ret; /* no emergency and done recently. */ 1207 goto unlock_ret; /* no emergency and done recently. */
1104 rsp->n_force_qs++; 1208 rsp->n_force_qs++;
1105 spin_lock(&rnp->lock); 1209 spin_lock(&rnp->lock);
1106 lastcomp = rsp->completed; 1210 lastcomp = rsp->gpnum - 1;
1107 signaled = rsp->signaled; 1211 signaled = rsp->signaled;
1108 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; 1212 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
1109 if (lastcomp == rsp->gpnum) { 1213 if(!rcu_gp_in_progress(rsp)) {
1110 rsp->n_force_qs_ngp++; 1214 rsp->n_force_qs_ngp++;
1111 spin_unlock(&rnp->lock); 1215 spin_unlock(&rnp->lock);
1112 goto unlock_ret; /* no GP in progress, time updated. */ 1216 goto unlock_ret; /* no GP in progress, time updated. */
1113 } 1217 }
1114 spin_unlock(&rnp->lock); 1218 spin_unlock(&rnp->lock);
1115 switch (signaled) { 1219 switch (signaled) {
1220 case RCU_GP_IDLE:
1116 case RCU_GP_INIT: 1221 case RCU_GP_INIT:
1117 1222
1118 break; /* grace period still initializing, ignore. */ 1223 break; /* grace period idle or initializing, ignore. */
1119 1224
1120 case RCU_SAVE_DYNTICK: 1225 case RCU_SAVE_DYNTICK:
1121 1226
@@ -1126,20 +1231,29 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1126 if (rcu_process_dyntick(rsp, lastcomp, 1231 if (rcu_process_dyntick(rsp, lastcomp,
1127 dyntick_save_progress_counter)) 1232 dyntick_save_progress_counter))
1128 goto unlock_ret; 1233 goto unlock_ret;
1234 /* fall into next case. */
1235
1236 case RCU_SAVE_COMPLETED:
1129 1237
1130 /* Update state, record completion counter. */ 1238 /* Update state, record completion counter. */
1239 forcenow = 0;
1131 spin_lock(&rnp->lock); 1240 spin_lock(&rnp->lock);
1132 if (lastcomp == rsp->completed) { 1241 if (lastcomp + 1 == rsp->gpnum &&
1242 lastcomp == rsp->completed &&
1243 rsp->signaled == signaled) {
1133 rsp->signaled = RCU_FORCE_QS; 1244 rsp->signaled = RCU_FORCE_QS;
1134 dyntick_record_completed(rsp, lastcomp); 1245 rsp->completed_fqs = lastcomp;
1246 forcenow = signaled == RCU_SAVE_COMPLETED;
1135 } 1247 }
1136 spin_unlock(&rnp->lock); 1248 spin_unlock(&rnp->lock);
1137 break; 1249 if (!forcenow)
1250 break;
1251 /* fall into next case. */
1138 1252
1139 case RCU_FORCE_QS: 1253 case RCU_FORCE_QS:
1140 1254
1141 /* Check dyntick-idle state, send IPI to laggarts. */ 1255 /* Check dyntick-idle state, send IPI to laggarts. */
1142 if (rcu_process_dyntick(rsp, dyntick_recall_completed(rsp), 1256 if (rcu_process_dyntick(rsp, rsp->completed_fqs,
1143 rcu_implicit_dynticks_qs)) 1257 rcu_implicit_dynticks_qs))
1144 goto unlock_ret; 1258 goto unlock_ret;
1145 1259
@@ -1195,7 +1309,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1195 } 1309 }
1196 1310
1197 /* If there are callbacks ready, invoke them. */ 1311 /* If there are callbacks ready, invoke them. */
1198 rcu_do_batch(rdp); 1312 rcu_do_batch(rsp, rdp);
1199} 1313}
1200 1314
1201/* 1315/*
@@ -1251,7 +1365,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1251 rdp->nxttail[RCU_NEXT_TAIL] = &head->next; 1365 rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
1252 1366
1253 /* Start a new grace period if one not already started. */ 1367 /* Start a new grace period if one not already started. */
1254 if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) { 1368 if (!rcu_gp_in_progress(rsp)) {
1255 unsigned long nestflag; 1369 unsigned long nestflag;
1256 struct rcu_node *rnp_root = rcu_get_root(rsp); 1370 struct rcu_node *rnp_root = rcu_get_root(rsp);
1257 1371
@@ -1259,10 +1373,20 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1259 rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ 1373 rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */
1260 } 1374 }
1261 1375
1262 /* Force the grace period if too many callbacks or too long waiting. */ 1376 /*
1263 if (unlikely(++rdp->qlen > qhimark)) { 1377 * Force the grace period if too many callbacks or too long waiting.
1378 * Enforce hysteresis, and don't invoke force_quiescent_state()
1379 * if some other CPU has recently done so. Also, don't bother
1380 * invoking force_quiescent_state() if the newly enqueued callback
1381 * is the only one waiting for a grace period to complete.
1382 */
1383 if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
1264 rdp->blimit = LONG_MAX; 1384 rdp->blimit = LONG_MAX;
1265 force_quiescent_state(rsp, 0); 1385 if (rsp->n_force_qs == rdp->n_force_qs_snap &&
1386 *rdp->nxttail[RCU_DONE_TAIL] != head)
1387 force_quiescent_state(rsp, 0);
1388 rdp->n_force_qs_snap = rsp->n_force_qs;
1389 rdp->qlen_last_fqs_check = rdp->qlen;
1266 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) 1390 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
1267 force_quiescent_state(rsp, 1); 1391 force_quiescent_state(rsp, 1);
1268 local_irq_restore(flags); 1392 local_irq_restore(flags);
@@ -1286,6 +1410,68 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1286} 1410}
1287EXPORT_SYMBOL_GPL(call_rcu_bh); 1411EXPORT_SYMBOL_GPL(call_rcu_bh);
1288 1412
1413/**
1414 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
1415 *
1416 * Control will return to the caller some time after a full rcu-sched
1417 * grace period has elapsed, in other words after all currently executing
1418 * rcu-sched read-side critical sections have completed. These read-side
1419 * critical sections are delimited by rcu_read_lock_sched() and
1420 * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
1421 * local_irq_disable(), and so on may be used in place of
1422 * rcu_read_lock_sched().
1423 *
1424 * This means that all preempt_disable code sequences, including NMI and
1425 * hardware-interrupt handlers, in progress on entry will have completed
1426 * before this primitive returns. However, this does not guarantee that
1427 * softirq handlers will have completed, since in some kernels, these
1428 * handlers can run in process context, and can block.
1429 *
1430 * This primitive provides the guarantees made by the (now removed)
1431 * synchronize_kernel() API. In contrast, synchronize_rcu() only
1432 * guarantees that rcu_read_lock() sections will have completed.
1433 * In "classic RCU", these two guarantees happen to be one and
1434 * the same, but can differ in realtime RCU implementations.
1435 */
1436void synchronize_sched(void)
1437{
1438 struct rcu_synchronize rcu;
1439
1440 if (rcu_blocking_is_gp())
1441 return;
1442
1443 init_completion(&rcu.completion);
1444 /* Will wake me after RCU finished. */
1445 call_rcu_sched(&rcu.head, wakeme_after_rcu);
1446 /* Wait for it. */
1447 wait_for_completion(&rcu.completion);
1448}
1449EXPORT_SYMBOL_GPL(synchronize_sched);
1450
1451/**
1452 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
1453 *
1454 * Control will return to the caller some time after a full rcu_bh grace
1455 * period has elapsed, in other words after all currently executing rcu_bh
1456 * read-side critical sections have completed. RCU read-side critical
1457 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
1458 * and may be nested.
1459 */
1460void synchronize_rcu_bh(void)
1461{
1462 struct rcu_synchronize rcu;
1463
1464 if (rcu_blocking_is_gp())
1465 return;
1466
1467 init_completion(&rcu.completion);
1468 /* Will wake me after RCU finished. */
1469 call_rcu_bh(&rcu.head, wakeme_after_rcu);
1470 /* Wait for it. */
1471 wait_for_completion(&rcu.completion);
1472}
1473EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
1474
1289/* 1475/*
1290 * Check to see if there is any immediate RCU-related work to be done 1476 * Check to see if there is any immediate RCU-related work to be done
1291 * by the current CPU, for the specified type of RCU, returning 1 if so. 1477 * by the current CPU, for the specified type of RCU, returning 1 if so.
@@ -1295,6 +1481,8 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
1295 */ 1481 */
1296static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) 1482static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1297{ 1483{
1484 struct rcu_node *rnp = rdp->mynode;
1485
1298 rdp->n_rcu_pending++; 1486 rdp->n_rcu_pending++;
1299 1487
1300 /* Check for CPU stalls, if enabled. */ 1488 /* Check for CPU stalls, if enabled. */
@@ -1319,19 +1507,19 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1319 } 1507 }
1320 1508
1321 /* Has another RCU grace period completed? */ 1509 /* Has another RCU grace period completed? */
1322 if (ACCESS_ONCE(rsp->completed) != rdp->completed) { /* outside lock */ 1510 if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
1323 rdp->n_rp_gp_completed++; 1511 rdp->n_rp_gp_completed++;
1324 return 1; 1512 return 1;
1325 } 1513 }
1326 1514
1327 /* Has a new RCU grace period started? */ 1515 /* Has a new RCU grace period started? */
1328 if (ACCESS_ONCE(rsp->gpnum) != rdp->gpnum) { /* outside lock */ 1516 if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */
1329 rdp->n_rp_gp_started++; 1517 rdp->n_rp_gp_started++;
1330 return 1; 1518 return 1;
1331 } 1519 }
1332 1520
1333 /* Has an RCU GP gone long enough to send resched IPIs &c? */ 1521 /* Has an RCU GP gone long enough to send resched IPIs &c? */
1334 if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) && 1522 if (rcu_gp_in_progress(rsp) &&
1335 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) { 1523 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) {
1336 rdp->n_rp_need_fqs++; 1524 rdp->n_rp_need_fqs++;
1337 return 1; 1525 return 1;
@@ -1369,6 +1557,97 @@ int rcu_needs_cpu(int cpu)
1369} 1557}
1370 1558
1371/* 1559/*
1560 * This function is invoked towards the end of the scheduler's initialization
1561 * process. Before this is called, the idle task might contain
1562 * RCU read-side critical sections (during which time, this idle
1563 * task is booting the system). After this function is called, the
1564 * idle tasks are prohibited from containing RCU read-side critical
1565 * sections.
1566 */
1567void rcu_scheduler_starting(void)
1568{
1569 WARN_ON(num_online_cpus() != 1);
1570 WARN_ON(nr_context_switches() > 0);
1571 rcu_scheduler_active = 1;
1572}
1573
1574static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
1575static atomic_t rcu_barrier_cpu_count;
1576static DEFINE_MUTEX(rcu_barrier_mutex);
1577static struct completion rcu_barrier_completion;
1578
1579static void rcu_barrier_callback(struct rcu_head *notused)
1580{
1581 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
1582 complete(&rcu_barrier_completion);
1583}
1584
1585/*
1586 * Called with preemption disabled, and from cross-cpu IRQ context.
1587 */
1588static void rcu_barrier_func(void *type)
1589{
1590 int cpu = smp_processor_id();
1591 struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
1592 void (*call_rcu_func)(struct rcu_head *head,
1593 void (*func)(struct rcu_head *head));
1594
1595 atomic_inc(&rcu_barrier_cpu_count);
1596 call_rcu_func = type;
1597 call_rcu_func(head, rcu_barrier_callback);
1598}
1599
1600/*
1601 * Orchestrate the specified type of RCU barrier, waiting for all
1602 * RCU callbacks of the specified type to complete.
1603 */
1604static void _rcu_barrier(struct rcu_state *rsp,
1605 void (*call_rcu_func)(struct rcu_head *head,
1606 void (*func)(struct rcu_head *head)))
1607{
1608 BUG_ON(in_interrupt());
1609 /* Take mutex to serialize concurrent rcu_barrier() requests. */
1610 mutex_lock(&rcu_barrier_mutex);
1611 init_completion(&rcu_barrier_completion);
1612 /*
1613 * Initialize rcu_barrier_cpu_count to 1, then invoke
1614 * rcu_barrier_func() on each CPU, so that each CPU also has
1615 * incremented rcu_barrier_cpu_count. Only then is it safe to
1616 * decrement rcu_barrier_cpu_count -- otherwise the first CPU
1617 * might complete its grace period before all of the other CPUs
1618 * did their increment, causing this function to return too
1619 * early.
1620 */
1621 atomic_set(&rcu_barrier_cpu_count, 1);
1622 preempt_disable(); /* stop CPU_DYING from filling orphan_cbs_list */
1623 rcu_adopt_orphan_cbs(rsp);
1624 on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1);
1625 preempt_enable(); /* CPU_DYING can again fill orphan_cbs_list */
1626 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
1627 complete(&rcu_barrier_completion);
1628 wait_for_completion(&rcu_barrier_completion);
1629 mutex_unlock(&rcu_barrier_mutex);
1630}
1631
1632/**
1633 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
1634 */
1635void rcu_barrier_bh(void)
1636{
1637 _rcu_barrier(&rcu_bh_state, call_rcu_bh);
1638}
1639EXPORT_SYMBOL_GPL(rcu_barrier_bh);
1640
1641/**
1642 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
1643 */
1644void rcu_barrier_sched(void)
1645{
1646 _rcu_barrier(&rcu_sched_state, call_rcu_sched);
1647}
1648EXPORT_SYMBOL_GPL(rcu_barrier_sched);
1649
1650/*
1372 * Do boot-time initialization of a CPU's per-CPU RCU data. 1651 * Do boot-time initialization of a CPU's per-CPU RCU data.
1373 */ 1652 */
1374static void __init 1653static void __init
@@ -1403,21 +1682,18 @@ static void __cpuinit
1403rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) 1682rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1404{ 1683{
1405 unsigned long flags; 1684 unsigned long flags;
1406 long lastcomp;
1407 unsigned long mask; 1685 unsigned long mask;
1408 struct rcu_data *rdp = rsp->rda[cpu]; 1686 struct rcu_data *rdp = rsp->rda[cpu];
1409 struct rcu_node *rnp = rcu_get_root(rsp); 1687 struct rcu_node *rnp = rcu_get_root(rsp);
1410 1688
1411 /* Set up local state, ensuring consistent view of global state. */ 1689 /* Set up local state, ensuring consistent view of global state. */
1412 spin_lock_irqsave(&rnp->lock, flags); 1690 spin_lock_irqsave(&rnp->lock, flags);
1413 lastcomp = rsp->completed;
1414 rdp->completed = lastcomp;
1415 rdp->gpnum = lastcomp;
1416 rdp->passed_quiesc = 0; /* We could be racing with new GP, */ 1691 rdp->passed_quiesc = 0; /* We could be racing with new GP, */
1417 rdp->qs_pending = 1; /* so set up to respond to current GP. */ 1692 rdp->qs_pending = 1; /* so set up to respond to current GP. */
1418 rdp->beenonline = 1; /* We have now been online. */ 1693 rdp->beenonline = 1; /* We have now been online. */
1419 rdp->preemptable = preemptable; 1694 rdp->preemptable = preemptable;
1420 rdp->passed_quiesc_completed = lastcomp - 1; 1695 rdp->qlen_last_fqs_check = 0;
1696 rdp->n_force_qs_snap = rsp->n_force_qs;
1421 rdp->blimit = blimit; 1697 rdp->blimit = blimit;
1422 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1698 spin_unlock(&rnp->lock); /* irqs remain disabled. */
1423 1699
@@ -1437,6 +1713,11 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1437 spin_lock(&rnp->lock); /* irqs already disabled. */ 1713 spin_lock(&rnp->lock); /* irqs already disabled. */
1438 rnp->qsmaskinit |= mask; 1714 rnp->qsmaskinit |= mask;
1439 mask = rnp->grpmask; 1715 mask = rnp->grpmask;
1716 if (rnp == rdp->mynode) {
1717 rdp->gpnum = rnp->completed; /* if GP in progress... */
1718 rdp->completed = rnp->completed;
1719 rdp->passed_quiesc_completed = rnp->completed - 1;
1720 }
1440 spin_unlock(&rnp->lock); /* irqs already disabled. */ 1721 spin_unlock(&rnp->lock); /* irqs already disabled. */
1441 rnp = rnp->parent; 1722 rnp = rnp->parent;
1442 } while (rnp != NULL && !(rnp->qsmaskinit & mask)); 1723 } while (rnp != NULL && !(rnp->qsmaskinit & mask));
@@ -1454,8 +1735,8 @@ static void __cpuinit rcu_online_cpu(int cpu)
1454/* 1735/*
1455 * Handle CPU online/offline notification events. 1736 * Handle CPU online/offline notification events.
1456 */ 1737 */
1457int __cpuinit rcu_cpu_notify(struct notifier_block *self, 1738static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1458 unsigned long action, void *hcpu) 1739 unsigned long action, void *hcpu)
1459{ 1740{
1460 long cpu = (long)hcpu; 1741 long cpu = (long)hcpu;
1461 1742
@@ -1464,6 +1745,22 @@ int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1464 case CPU_UP_PREPARE_FROZEN: 1745 case CPU_UP_PREPARE_FROZEN:
1465 rcu_online_cpu(cpu); 1746 rcu_online_cpu(cpu);
1466 break; 1747 break;
1748 case CPU_DYING:
1749 case CPU_DYING_FROZEN:
1750 /*
1751 * preempt_disable() in _rcu_barrier() prevents stop_machine(),
1752 * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
1753 * returns, all online cpus have queued rcu_barrier_func().
1754 * The dying CPU clears its cpu_online_mask bit and
1755 * moves all of its RCU callbacks to ->orphan_cbs_list
1756 * in the context of stop_machine(), so subsequent calls
1757 * to _rcu_barrier() will adopt these callbacks and only
1758 * then queue rcu_barrier_func() on all remaining CPUs.
1759 */
1760 rcu_send_cbs_to_orphanage(&rcu_bh_state);
1761 rcu_send_cbs_to_orphanage(&rcu_sched_state);
1762 rcu_preempt_send_cbs_to_orphanage();
1763 break;
1467 case CPU_DEAD: 1764 case CPU_DEAD:
1468 case CPU_DEAD_FROZEN: 1765 case CPU_DEAD_FROZEN:
1469 case CPU_UP_CANCELED: 1766 case CPU_UP_CANCELED:
@@ -1527,6 +1824,7 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1527 rnp = rsp->level[i]; 1824 rnp = rsp->level[i];
1528 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { 1825 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
1529 spin_lock_init(&rnp->lock); 1826 spin_lock_init(&rnp->lock);
1827 lockdep_set_class(&rnp->lock, &rcu_node_class[i]);
1530 rnp->gpnum = 0; 1828 rnp->gpnum = 0;
1531 rnp->qsmask = 0; 1829 rnp->qsmask = 0;
1532 rnp->qsmaskinit = 0; 1830 rnp->qsmaskinit = 0;
@@ -1547,6 +1845,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1547 rnp->level = i; 1845 rnp->level = i;
1548 INIT_LIST_HEAD(&rnp->blocked_tasks[0]); 1846 INIT_LIST_HEAD(&rnp->blocked_tasks[0]);
1549 INIT_LIST_HEAD(&rnp->blocked_tasks[1]); 1847 INIT_LIST_HEAD(&rnp->blocked_tasks[1]);
1848 INIT_LIST_HEAD(&rnp->blocked_tasks[2]);
1849 INIT_LIST_HEAD(&rnp->blocked_tasks[3]);
1550 } 1850 }
1551 } 1851 }
1552} 1852}
@@ -1558,6 +1858,10 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1558 */ 1858 */
1559#define RCU_INIT_FLAVOR(rsp, rcu_data) \ 1859#define RCU_INIT_FLAVOR(rsp, rcu_data) \
1560do { \ 1860do { \
1861 int i; \
1862 int j; \
1863 struct rcu_node *rnp; \
1864 \
1561 rcu_init_one(rsp); \ 1865 rcu_init_one(rsp); \
1562 rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ 1866 rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \
1563 j = 0; \ 1867 j = 0; \
@@ -1570,41 +1874,30 @@ do { \
1570 } \ 1874 } \
1571} while (0) 1875} while (0)
1572 1876
1573#ifdef CONFIG_TREE_PREEMPT_RCU 1877void __init rcu_init(void)
1574
1575void __init __rcu_init_preempt(void)
1576{
1577 int i; /* All used by RCU_INIT_FLAVOR(). */
1578 int j;
1579 struct rcu_node *rnp;
1580
1581 RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data);
1582}
1583
1584#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1585
1586void __init __rcu_init_preempt(void)
1587{ 1878{
1588} 1879 int i;
1589
1590#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1591
1592void __init __rcu_init(void)
1593{
1594 int i; /* All used by RCU_INIT_FLAVOR(). */
1595 int j;
1596 struct rcu_node *rnp;
1597 1880
1598 rcu_bootup_announce(); 1881 rcu_bootup_announce();
1599#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 1882#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
1600 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); 1883 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
1601#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 1884#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
1885#if NUM_RCU_LVL_4 != 0
1886 printk(KERN_INFO "Experimental four-level hierarchy is enabled.\n");
1887#endif /* #if NUM_RCU_LVL_4 != 0 */
1602 RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); 1888 RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data);
1603 RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); 1889 RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data);
1604 __rcu_init_preempt(); 1890 __rcu_init_preempt();
1605 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 1891 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
1892
1893 /*
1894 * We don't need protection against CPU-hotplug here because
1895 * this is called early in boot, before either interrupts
1896 * or the scheduler are operational.
1897 */
1898 cpu_notifier(rcu_cpu_notify, 0);
1899 for_each_online_cpu(i)
1900 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)i);
1606} 1901}
1607 1902
1608module_param(blimit, int, 0); 1903#include "rcutree_plugin.h"
1609module_param(qhimark, int, 0);
1610module_param(qlowmark, int, 0);
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 8e8287a983c2..d2a0046f63b2 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -34,10 +34,11 @@
34 * In practice, this has not been tested, so there is probably some 34 * In practice, this has not been tested, so there is probably some
35 * bug somewhere. 35 * bug somewhere.
36 */ 36 */
37#define MAX_RCU_LVLS 3 37#define MAX_RCU_LVLS 4
38#define RCU_FANOUT (CONFIG_RCU_FANOUT) 38#define RCU_FANOUT (CONFIG_RCU_FANOUT)
39#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT) 39#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT)
40#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT) 40#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT)
41#define RCU_FANOUT_FOURTH (RCU_FANOUT_CUBE * RCU_FANOUT)
41 42
42#if NR_CPUS <= RCU_FANOUT 43#if NR_CPUS <= RCU_FANOUT
43# define NUM_RCU_LVLS 1 44# define NUM_RCU_LVLS 1
@@ -45,23 +46,33 @@
45# define NUM_RCU_LVL_1 (NR_CPUS) 46# define NUM_RCU_LVL_1 (NR_CPUS)
46# define NUM_RCU_LVL_2 0 47# define NUM_RCU_LVL_2 0
47# define NUM_RCU_LVL_3 0 48# define NUM_RCU_LVL_3 0
49# define NUM_RCU_LVL_4 0
48#elif NR_CPUS <= RCU_FANOUT_SQ 50#elif NR_CPUS <= RCU_FANOUT_SQ
49# define NUM_RCU_LVLS 2 51# define NUM_RCU_LVLS 2
50# define NUM_RCU_LVL_0 1 52# define NUM_RCU_LVL_0 1
51# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT) 53# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
52# define NUM_RCU_LVL_2 (NR_CPUS) 54# define NUM_RCU_LVL_2 (NR_CPUS)
53# define NUM_RCU_LVL_3 0 55# define NUM_RCU_LVL_3 0
56# define NUM_RCU_LVL_4 0
54#elif NR_CPUS <= RCU_FANOUT_CUBE 57#elif NR_CPUS <= RCU_FANOUT_CUBE
55# define NUM_RCU_LVLS 3 58# define NUM_RCU_LVLS 3
56# define NUM_RCU_LVL_0 1 59# define NUM_RCU_LVL_0 1
57# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ) 60# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ)
58# define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT)) 61# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
59# define NUM_RCU_LVL_3 NR_CPUS 62# define NUM_RCU_LVL_3 NR_CPUS
63# define NUM_RCU_LVL_4 0
64#elif NR_CPUS <= RCU_FANOUT_FOURTH
65# define NUM_RCU_LVLS 4
66# define NUM_RCU_LVL_0 1
67# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_CUBE)
68# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_SQ)
69# define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT)
70# define NUM_RCU_LVL_4 NR_CPUS
60#else 71#else
61# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" 72# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
62#endif /* #if (NR_CPUS) <= RCU_FANOUT */ 73#endif /* #if (NR_CPUS) <= RCU_FANOUT */
63 74
64#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3) 75#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4)
65#define NUM_RCU_NODES (RCU_SUM - NR_CPUS) 76#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
66 77
67/* 78/*
@@ -79,24 +90,67 @@ struct rcu_dynticks {
79 * Definition for node within the RCU grace-period-detection hierarchy. 90 * Definition for node within the RCU grace-period-detection hierarchy.
80 */ 91 */
81struct rcu_node { 92struct rcu_node {
82 spinlock_t lock; 93 spinlock_t lock; /* Root rcu_node's lock protects some */
94 /* rcu_state fields as well as following. */
83 long gpnum; /* Current grace period for this node. */ 95 long gpnum; /* Current grace period for this node. */
84 /* This will either be equal to or one */ 96 /* This will either be equal to or one */
85 /* behind the root rcu_node's gpnum. */ 97 /* behind the root rcu_node's gpnum. */
98 long completed; /* Last grace period completed for this node. */
99 /* This will either be equal to or one */
100 /* behind the root rcu_node's gpnum. */
86 unsigned long qsmask; /* CPUs or groups that need to switch in */ 101 unsigned long qsmask; /* CPUs or groups that need to switch in */
87 /* order for current grace period to proceed.*/ 102 /* order for current grace period to proceed.*/
103 /* In leaf rcu_node, each bit corresponds to */
104 /* an rcu_data structure, otherwise, each */
105 /* bit corresponds to a child rcu_node */
106 /* structure. */
107 unsigned long expmask; /* Groups that have ->blocked_tasks[] */
108 /* elements that need to drain to allow the */
109 /* current expedited grace period to */
110 /* complete (only for TREE_PREEMPT_RCU). */
88 unsigned long qsmaskinit; 111 unsigned long qsmaskinit;
89 /* Per-GP initialization for qsmask. */ 112 /* Per-GP initial value for qsmask & expmask. */
90 unsigned long grpmask; /* Mask to apply to parent qsmask. */ 113 unsigned long grpmask; /* Mask to apply to parent qsmask. */
114 /* Only one bit will be set in this mask. */
91 int grplo; /* lowest-numbered CPU or group here. */ 115 int grplo; /* lowest-numbered CPU or group here. */
92 int grphi; /* highest-numbered CPU or group here. */ 116 int grphi; /* highest-numbered CPU or group here. */
93 u8 grpnum; /* CPU/group number for next level up. */ 117 u8 grpnum; /* CPU/group number for next level up. */
94 u8 level; /* root is at level 0. */ 118 u8 level; /* root is at level 0. */
95 struct rcu_node *parent; 119 struct rcu_node *parent;
96 struct list_head blocked_tasks[2]; 120 struct list_head blocked_tasks[4];
97 /* Tasks blocked in RCU read-side critsect. */ 121 /* Tasks blocked in RCU read-side critsect. */
122 /* Grace period number (->gpnum) x blocked */
123 /* by tasks on the (x & 0x1) element of the */
124 /* blocked_tasks[] array. */
98} ____cacheline_internodealigned_in_smp; 125} ____cacheline_internodealigned_in_smp;
99 126
127/*
128 * Do a full breadth-first scan of the rcu_node structures for the
129 * specified rcu_state structure.
130 */
131#define rcu_for_each_node_breadth_first(rsp, rnp) \
132 for ((rnp) = &(rsp)->node[0]; \
133 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
134
135/*
136 * Do a breadth-first scan of the non-leaf rcu_node structures for the
137 * specified rcu_state structure. Note that if there is a singleton
138 * rcu_node tree with but one rcu_node structure, this loop is a no-op.
139 */
140#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
141 for ((rnp) = &(rsp)->node[0]; \
142 (rnp) < (rsp)->level[NUM_RCU_LVLS - 1]; (rnp)++)
143
144/*
145 * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
146 * structure. Note that if there is a singleton rcu_node tree with but
147 * one rcu_node structure, this loop -will- visit the rcu_node structure.
148 * It is still a leaf node, even if it is also the root node.
149 */
150#define rcu_for_each_leaf_node(rsp, rnp) \
151 for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \
152 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
153
100/* Index values for nxttail array in struct rcu_data. */ 154/* Index values for nxttail array in struct rcu_data. */
101#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ 155#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
102#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ 156#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
@@ -126,23 +180,30 @@ struct rcu_data {
126 * Any of the partitions might be empty, in which case the 180 * Any of the partitions might be empty, in which case the
127 * pointer to that partition will be equal to the pointer for 181 * pointer to that partition will be equal to the pointer for
128 * the following partition. When the list is empty, all of 182 * the following partition. When the list is empty, all of
129 * the nxttail elements point to nxtlist, which is NULL. 183 * the nxttail elements point to the ->nxtlist pointer itself,
184 * which in that case is NULL.
130 * 185 *
131 * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]):
132 * Entries that might have arrived after current GP ended
133 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
134 * Entries known to have arrived before current GP ended
135 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
136 * Entries that batch # <= ->completed - 1: waiting for current GP
137 * [nxtlist, *nxttail[RCU_DONE_TAIL]): 186 * [nxtlist, *nxttail[RCU_DONE_TAIL]):
138 * Entries that batch # <= ->completed 187 * Entries that batch # <= ->completed
139 * The grace period for these entries has completed, and 188 * The grace period for these entries has completed, and
140 * the other grace-period-completed entries may be moved 189 * the other grace-period-completed entries may be moved
141 * here temporarily in rcu_process_callbacks(). 190 * here temporarily in rcu_process_callbacks().
191 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
192 * Entries that batch # <= ->completed - 1: waiting for current GP
193 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
194 * Entries known to have arrived before current GP ended
195 * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]):
196 * Entries that might have arrived after current GP ended
197 * Note that the value of *nxttail[RCU_NEXT_TAIL] will
198 * always be NULL, as this is the end of the list.
142 */ 199 */
143 struct rcu_head *nxtlist; 200 struct rcu_head *nxtlist;
144 struct rcu_head **nxttail[RCU_NEXT_SIZE]; 201 struct rcu_head **nxttail[RCU_NEXT_SIZE];
145 long qlen; /* # of queued callbacks */ 202 long qlen; /* # of queued callbacks */
203 long qlen_last_fqs_check;
204 /* qlen at last check for QS forcing */
205 unsigned long n_force_qs_snap;
206 /* did other CPU force QS recently? */
146 long blimit; /* Upper limit on a processed batch */ 207 long blimit; /* Upper limit on a processed batch */
147 208
148#ifdef CONFIG_NO_HZ 209#ifdef CONFIG_NO_HZ
@@ -173,13 +234,15 @@ struct rcu_data {
173}; 234};
174 235
175/* Values for signaled field in struct rcu_state. */ 236/* Values for signaled field in struct rcu_state. */
176#define RCU_GP_INIT 0 /* Grace period being initialized. */ 237#define RCU_GP_IDLE 0 /* No grace period in progress. */
177#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */ 238#define RCU_GP_INIT 1 /* Grace period being initialized. */
178#define RCU_FORCE_QS 2 /* Need to force quiescent state. */ 239#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
240#define RCU_SAVE_COMPLETED 3 /* Need to save rsp->completed. */
241#define RCU_FORCE_QS 4 /* Need to force quiescent state. */
179#ifdef CONFIG_NO_HZ 242#ifdef CONFIG_NO_HZ
180#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK 243#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
181#else /* #ifdef CONFIG_NO_HZ */ 244#else /* #ifdef CONFIG_NO_HZ */
182#define RCU_SIGNAL_INIT RCU_FORCE_QS 245#define RCU_SIGNAL_INIT RCU_SAVE_COMPLETED
183#endif /* #else #ifdef CONFIG_NO_HZ */ 246#endif /* #else #ifdef CONFIG_NO_HZ */
184 247
185#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ 248#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
@@ -216,10 +279,23 @@ struct rcu_state {
216 /* Force QS state. */ 279 /* Force QS state. */
217 long gpnum; /* Current gp number. */ 280 long gpnum; /* Current gp number. */
218 long completed; /* # of last completed gp. */ 281 long completed; /* # of last completed gp. */
282
283 /* End of fields guarded by root rcu_node's lock. */
284
219 spinlock_t onofflock; /* exclude on/offline and */ 285 spinlock_t onofflock; /* exclude on/offline and */
220 /* starting new GP. */ 286 /* starting new GP. Also */
287 /* protects the following */
288 /* orphan_cbs fields. */
289 struct rcu_head *orphan_cbs_list; /* list of rcu_head structs */
290 /* orphaned by all CPUs in */
291 /* a given leaf rcu_node */
292 /* going offline. */
293 struct rcu_head **orphan_cbs_tail; /* And tail pointer. */
294 long orphan_qlen; /* Number of orphaned cbs. */
221 spinlock_t fqslock; /* Only one task forcing */ 295 spinlock_t fqslock; /* Only one task forcing */
222 /* quiescent states. */ 296 /* quiescent states. */
297 long completed_fqs; /* Value of completed @ snap. */
298 /* Protected by fqslock. */
223 unsigned long jiffies_force_qs; /* Time at which to invoke */ 299 unsigned long jiffies_force_qs; /* Time at which to invoke */
224 /* force_quiescent_state(). */ 300 /* force_quiescent_state(). */
225 unsigned long n_force_qs; /* Number of calls to */ 301 unsigned long n_force_qs; /* Number of calls to */
@@ -234,11 +310,15 @@ struct rcu_state {
234 unsigned long jiffies_stall; /* Time at which to check */ 310 unsigned long jiffies_stall; /* Time at which to check */
235 /* for CPU stalls. */ 311 /* for CPU stalls. */
236#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 312#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
237#ifdef CONFIG_NO_HZ
238 long dynticks_completed; /* Value of completed @ snap. */
239#endif /* #ifdef CONFIG_NO_HZ */
240}; 313};
241 314
315/* Return values for rcu_preempt_offline_tasks(). */
316
317#define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */
318 /* GP were moved to root. */
319#define RCU_OFL_TASKS_EXP_GP 0x2 /* Tasks blocking expedited */
320 /* GP were moved to root. */
321
242#ifdef RCU_TREE_NONCORE 322#ifdef RCU_TREE_NONCORE
243 323
244/* 324/*
@@ -255,5 +335,37 @@ extern struct rcu_state rcu_preempt_state;
255DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data); 335DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
256#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 336#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
257 337
258#endif /* #ifdef RCU_TREE_NONCORE */ 338#else /* #ifdef RCU_TREE_NONCORE */
339
340/* Forward declarations for rcutree_plugin.h */
341static void rcu_bootup_announce(void);
342long rcu_batches_completed(void);
343static void rcu_preempt_note_context_switch(int cpu);
344static int rcu_preempted_readers(struct rcu_node *rnp);
345#ifdef CONFIG_HOTPLUG_CPU
346static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
347 unsigned long flags);
348#endif /* #ifdef CONFIG_HOTPLUG_CPU */
349#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
350static void rcu_print_task_stall(struct rcu_node *rnp);
351#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
352static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
353#ifdef CONFIG_HOTPLUG_CPU
354static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
355 struct rcu_node *rnp,
356 struct rcu_data *rdp);
357static void rcu_preempt_offline_cpu(int cpu);
358#endif /* #ifdef CONFIG_HOTPLUG_CPU */
359static void rcu_preempt_check_callbacks(int cpu);
360static void rcu_preempt_process_callbacks(void);
361void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
362#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU)
363static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp);
364#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */
365static int rcu_preempt_pending(int cpu);
366static int rcu_preempt_needs_cpu(int cpu);
367static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
368static void rcu_preempt_send_cbs_to_orphanage(void);
369static void __init __rcu_init_preempt(void);
259 370
371#endif /* #else #ifdef RCU_TREE_NONCORE */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 1cee04f627eb..37fbccdf41d5 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -24,16 +24,19 @@
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> 24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
25 */ 25 */
26 26
27#include <linux/delay.h>
27 28
28#ifdef CONFIG_TREE_PREEMPT_RCU 29#ifdef CONFIG_TREE_PREEMPT_RCU
29 30
30struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); 31struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
31DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); 32DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
32 33
34static int rcu_preempted_readers_exp(struct rcu_node *rnp);
35
33/* 36/*
34 * Tell them what RCU they are running. 37 * Tell them what RCU they are running.
35 */ 38 */
36static inline void rcu_bootup_announce(void) 39static void __init rcu_bootup_announce(void)
37{ 40{
38 printk(KERN_INFO 41 printk(KERN_INFO
39 "Experimental preemptable hierarchical RCU implementation.\n"); 42 "Experimental preemptable hierarchical RCU implementation.\n");
@@ -67,7 +70,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed);
67static void rcu_preempt_qs(int cpu) 70static void rcu_preempt_qs(int cpu)
68{ 71{
69 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); 72 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
70 rdp->passed_quiesc_completed = rdp->completed; 73 rdp->passed_quiesc_completed = rdp->gpnum - 1;
71 barrier(); 74 barrier();
72 rdp->passed_quiesc = 1; 75 rdp->passed_quiesc = 1;
73} 76}
@@ -150,11 +153,65 @@ void __rcu_read_lock(void)
150} 153}
151EXPORT_SYMBOL_GPL(__rcu_read_lock); 154EXPORT_SYMBOL_GPL(__rcu_read_lock);
152 155
156/*
157 * Check for preempted RCU readers blocking the current grace period
158 * for the specified rcu_node structure. If the caller needs a reliable
159 * answer, it must hold the rcu_node's ->lock.
160 */
161static int rcu_preempted_readers(struct rcu_node *rnp)
162{
163 int phase = rnp->gpnum & 0x1;
164
165 return !list_empty(&rnp->blocked_tasks[phase]) ||
166 !list_empty(&rnp->blocked_tasks[phase + 2]);
167}
168
169/*
170 * Record a quiescent state for all tasks that were previously queued
171 * on the specified rcu_node structure and that were blocking the current
172 * RCU grace period. The caller must hold the specified rnp->lock with
173 * irqs disabled, and this lock is released upon return, but irqs remain
174 * disabled.
175 */
176static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
177 __releases(rnp->lock)
178{
179 unsigned long mask;
180 struct rcu_node *rnp_p;
181
182 if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) {
183 spin_unlock_irqrestore(&rnp->lock, flags);
184 return; /* Still need more quiescent states! */
185 }
186
187 rnp_p = rnp->parent;
188 if (rnp_p == NULL) {
189 /*
190 * Either there is only one rcu_node in the tree,
191 * or tasks were kicked up to root rcu_node due to
192 * CPUs going offline.
193 */
194 rcu_report_qs_rsp(&rcu_preempt_state, flags);
195 return;
196 }
197
198 /* Report up the rest of the hierarchy. */
199 mask = rnp->grpmask;
200 spin_unlock(&rnp->lock); /* irqs remain disabled. */
201 spin_lock(&rnp_p->lock); /* irqs already disabled. */
202 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
203}
204
205/*
206 * Handle special cases during rcu_read_unlock(), such as needing to
207 * notify RCU core processing or task having blocked during the RCU
208 * read-side critical section.
209 */
153static void rcu_read_unlock_special(struct task_struct *t) 210static void rcu_read_unlock_special(struct task_struct *t)
154{ 211{
155 int empty; 212 int empty;
213 int empty_exp;
156 unsigned long flags; 214 unsigned long flags;
157 unsigned long mask;
158 struct rcu_node *rnp; 215 struct rcu_node *rnp;
159 int special; 216 int special;
160 217
@@ -196,37 +253,31 @@ static void rcu_read_unlock_special(struct task_struct *t)
196 break; 253 break;
197 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 254 spin_unlock(&rnp->lock); /* irqs remain disabled. */
198 } 255 }
199 empty = list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]); 256 empty = !rcu_preempted_readers(rnp);
257 empty_exp = !rcu_preempted_readers_exp(rnp);
258 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
200 list_del_init(&t->rcu_node_entry); 259 list_del_init(&t->rcu_node_entry);
201 t->rcu_blocked_node = NULL; 260 t->rcu_blocked_node = NULL;
202 261
203 /* 262 /*
204 * If this was the last task on the current list, and if 263 * If this was the last task on the current list, and if
205 * we aren't waiting on any CPUs, report the quiescent state. 264 * we aren't waiting on any CPUs, report the quiescent state.
206 * Note that both cpu_quiet_msk_finish() and cpu_quiet_msk() 265 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
207 * drop rnp->lock and restore irq.
208 */ 266 */
209 if (!empty && rnp->qsmask == 0 && 267 if (empty)
210 list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) {
211 struct rcu_node *rnp_p;
212
213 if (rnp->parent == NULL) {
214 /* Only one rcu_node in the tree. */
215 cpu_quiet_msk_finish(&rcu_preempt_state, flags);
216 return;
217 }
218 /* Report up the rest of the hierarchy. */
219 mask = rnp->grpmask;
220 spin_unlock_irqrestore(&rnp->lock, flags); 268 spin_unlock_irqrestore(&rnp->lock, flags);
221 rnp_p = rnp->parent; 269 else
222 spin_lock_irqsave(&rnp_p->lock, flags); 270 rcu_report_unblock_qs_rnp(rnp, flags);
223 WARN_ON_ONCE(rnp->qsmask); 271
224 cpu_quiet_msk(mask, &rcu_preempt_state, rnp_p, flags); 272 /*
225 return; 273 * If this was the last task on the expedited lists,
226 } 274 * then we need to report up the rcu_node hierarchy.
227 spin_unlock(&rnp->lock); 275 */
276 if (!empty_exp && !rcu_preempted_readers_exp(rnp))
277 rcu_report_exp_rnp(&rcu_preempt_state, rnp);
278 } else {
279 local_irq_restore(flags);
228 } 280 }
229 local_irq_restore(flags);
230} 281}
231 282
232/* 283/*
@@ -257,12 +308,12 @@ static void rcu_print_task_stall(struct rcu_node *rnp)
257{ 308{
258 unsigned long flags; 309 unsigned long flags;
259 struct list_head *lp; 310 struct list_head *lp;
260 int phase = rnp->gpnum & 0x1; 311 int phase;
261 struct task_struct *t; 312 struct task_struct *t;
262 313
263 if (!list_empty(&rnp->blocked_tasks[phase])) { 314 if (rcu_preempted_readers(rnp)) {
264 spin_lock_irqsave(&rnp->lock, flags); 315 spin_lock_irqsave(&rnp->lock, flags);
265 phase = rnp->gpnum & 0x1; /* re-read under lock. */ 316 phase = rnp->gpnum & 0x1;
266 lp = &rnp->blocked_tasks[phase]; 317 lp = &rnp->blocked_tasks[phase];
267 list_for_each_entry(t, lp, rcu_node_entry) 318 list_for_each_entry(t, lp, rcu_node_entry)
268 printk(" P%d", t->pid); 319 printk(" P%d", t->pid);
@@ -281,20 +332,10 @@ static void rcu_print_task_stall(struct rcu_node *rnp)
281 */ 332 */
282static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 333static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
283{ 334{
284 WARN_ON_ONCE(!list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])); 335 WARN_ON_ONCE(rcu_preempted_readers(rnp));
285 WARN_ON_ONCE(rnp->qsmask); 336 WARN_ON_ONCE(rnp->qsmask);
286} 337}
287 338
288/*
289 * Check for preempted RCU readers for the specified rcu_node structure.
290 * If the caller needs a reliable answer, it must hold the rcu_node's
291 * >lock.
292 */
293static int rcu_preempted_readers(struct rcu_node *rnp)
294{
295 return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
296}
297
298#ifdef CONFIG_HOTPLUG_CPU 339#ifdef CONFIG_HOTPLUG_CPU
299 340
300/* 341/*
@@ -303,26 +344,34 @@ static int rcu_preempted_readers(struct rcu_node *rnp)
303 * rcu_node. The reason for not just moving them to the immediate 344 * rcu_node. The reason for not just moving them to the immediate
304 * parent is to remove the need for rcu_read_unlock_special() to 345 * parent is to remove the need for rcu_read_unlock_special() to
305 * make more than two attempts to acquire the target rcu_node's lock. 346 * make more than two attempts to acquire the target rcu_node's lock.
347 * Returns true if there were tasks blocking the current RCU grace
348 * period.
349 *
350 * Returns 1 if there was previously a task blocking the current grace
351 * period on the specified rcu_node structure.
306 * 352 *
307 * The caller must hold rnp->lock with irqs disabled. 353 * The caller must hold rnp->lock with irqs disabled.
308 */ 354 */
309static void rcu_preempt_offline_tasks(struct rcu_state *rsp, 355static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
310 struct rcu_node *rnp, 356 struct rcu_node *rnp,
311 struct rcu_data *rdp) 357 struct rcu_data *rdp)
312{ 358{
313 int i; 359 int i;
314 struct list_head *lp; 360 struct list_head *lp;
315 struct list_head *lp_root; 361 struct list_head *lp_root;
362 int retval = 0;
316 struct rcu_node *rnp_root = rcu_get_root(rsp); 363 struct rcu_node *rnp_root = rcu_get_root(rsp);
317 struct task_struct *tp; 364 struct task_struct *tp;
318 365
319 if (rnp == rnp_root) { 366 if (rnp == rnp_root) {
320 WARN_ONCE(1, "Last CPU thought to be offlined?"); 367 WARN_ONCE(1, "Last CPU thought to be offlined?");
321 return; /* Shouldn't happen: at least one CPU online. */ 368 return 0; /* Shouldn't happen: at least one CPU online. */
322 } 369 }
323 WARN_ON_ONCE(rnp != rdp->mynode && 370 WARN_ON_ONCE(rnp != rdp->mynode &&
324 (!list_empty(&rnp->blocked_tasks[0]) || 371 (!list_empty(&rnp->blocked_tasks[0]) ||
325 !list_empty(&rnp->blocked_tasks[1]))); 372 !list_empty(&rnp->blocked_tasks[1]) ||
373 !list_empty(&rnp->blocked_tasks[2]) ||
374 !list_empty(&rnp->blocked_tasks[3])));
326 375
327 /* 376 /*
328 * Move tasks up to root rcu_node. Rely on the fact that the 377 * Move tasks up to root rcu_node. Rely on the fact that the
@@ -330,7 +379,11 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
330 * rcu_nodes in terms of gp_num value. This fact allows us to 379 * rcu_nodes in terms of gp_num value. This fact allows us to
331 * move the blocked_tasks[] array directly, element by element. 380 * move the blocked_tasks[] array directly, element by element.
332 */ 381 */
333 for (i = 0; i < 2; i++) { 382 if (rcu_preempted_readers(rnp))
383 retval |= RCU_OFL_TASKS_NORM_GP;
384 if (rcu_preempted_readers_exp(rnp))
385 retval |= RCU_OFL_TASKS_EXP_GP;
386 for (i = 0; i < 4; i++) {
334 lp = &rnp->blocked_tasks[i]; 387 lp = &rnp->blocked_tasks[i];
335 lp_root = &rnp_root->blocked_tasks[i]; 388 lp_root = &rnp_root->blocked_tasks[i];
336 while (!list_empty(lp)) { 389 while (!list_empty(lp)) {
@@ -342,6 +395,7 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
342 spin_unlock(&rnp_root->lock); /* irqs remain disabled */ 395 spin_unlock(&rnp_root->lock); /* irqs remain disabled */
343 } 396 }
344 } 397 }
398 return retval;
345} 399}
346 400
347/* 401/*
@@ -392,6 +446,186 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
392} 446}
393EXPORT_SYMBOL_GPL(call_rcu); 447EXPORT_SYMBOL_GPL(call_rcu);
394 448
449/**
450 * synchronize_rcu - wait until a grace period has elapsed.
451 *
452 * Control will return to the caller some time after a full grace
453 * period has elapsed, in other words after all currently executing RCU
454 * read-side critical sections have completed. RCU read-side critical
455 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
456 * and may be nested.
457 */
458void synchronize_rcu(void)
459{
460 struct rcu_synchronize rcu;
461
462 if (!rcu_scheduler_active)
463 return;
464
465 init_completion(&rcu.completion);
466 /* Will wake me after RCU finished. */
467 call_rcu(&rcu.head, wakeme_after_rcu);
468 /* Wait for it. */
469 wait_for_completion(&rcu.completion);
470}
471EXPORT_SYMBOL_GPL(synchronize_rcu);
472
473static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
474static long sync_rcu_preempt_exp_count;
475static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
476
477/*
478 * Return non-zero if there are any tasks in RCU read-side critical
479 * sections blocking the current preemptible-RCU expedited grace period.
480 * If there is no preemptible-RCU expedited grace period currently in
481 * progress, returns zero unconditionally.
482 */
483static int rcu_preempted_readers_exp(struct rcu_node *rnp)
484{
485 return !list_empty(&rnp->blocked_tasks[2]) ||
486 !list_empty(&rnp->blocked_tasks[3]);
487}
488
489/*
490 * return non-zero if there is no RCU expedited grace period in progress
491 * for the specified rcu_node structure, in other words, if all CPUs and
492 * tasks covered by the specified rcu_node structure have done their bit
493 * for the current expedited grace period. Works only for preemptible
494 * RCU -- other RCU implementation use other means.
495 *
496 * Caller must hold sync_rcu_preempt_exp_mutex.
497 */
498static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
499{
500 return !rcu_preempted_readers_exp(rnp) &&
501 ACCESS_ONCE(rnp->expmask) == 0;
502}
503
504/*
505 * Report the exit from RCU read-side critical section for the last task
506 * that queued itself during or before the current expedited preemptible-RCU
507 * grace period. This event is reported either to the rcu_node structure on
508 * which the task was queued or to one of that rcu_node structure's ancestors,
509 * recursively up the tree. (Calm down, calm down, we do the recursion
510 * iteratively!)
511 *
512 * Caller must hold sync_rcu_preempt_exp_mutex.
513 */
514static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
515{
516 unsigned long flags;
517 unsigned long mask;
518
519 spin_lock_irqsave(&rnp->lock, flags);
520 for (;;) {
521 if (!sync_rcu_preempt_exp_done(rnp))
522 break;
523 if (rnp->parent == NULL) {
524 wake_up(&sync_rcu_preempt_exp_wq);
525 break;
526 }
527 mask = rnp->grpmask;
528 spin_unlock(&rnp->lock); /* irqs remain disabled */
529 rnp = rnp->parent;
530 spin_lock(&rnp->lock); /* irqs already disabled */
531 rnp->expmask &= ~mask;
532 }
533 spin_unlock_irqrestore(&rnp->lock, flags);
534}
535
536/*
537 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
538 * grace period for the specified rcu_node structure. If there are no such
539 * tasks, report it up the rcu_node hierarchy.
540 *
541 * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
542 */
543static void
544sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
545{
546 int must_wait;
547
548 spin_lock(&rnp->lock); /* irqs already disabled */
549 list_splice_init(&rnp->blocked_tasks[0], &rnp->blocked_tasks[2]);
550 list_splice_init(&rnp->blocked_tasks[1], &rnp->blocked_tasks[3]);
551 must_wait = rcu_preempted_readers_exp(rnp);
552 spin_unlock(&rnp->lock); /* irqs remain disabled */
553 if (!must_wait)
554 rcu_report_exp_rnp(rsp, rnp);
555}
556
557/*
558 * Wait for an rcu-preempt grace period, but expedite it. The basic idea
559 * is to invoke synchronize_sched_expedited() to push all the tasks to
560 * the ->blocked_tasks[] lists, move all entries from the first set of
561 * ->blocked_tasks[] lists to the second set, and finally wait for this
562 * second set to drain.
563 */
564void synchronize_rcu_expedited(void)
565{
566 unsigned long flags;
567 struct rcu_node *rnp;
568 struct rcu_state *rsp = &rcu_preempt_state;
569 long snap;
570 int trycount = 0;
571
572 smp_mb(); /* Caller's modifications seen first by other CPUs. */
573 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
574 smp_mb(); /* Above access cannot bleed into critical section. */
575
576 /*
577 * Acquire lock, falling back to synchronize_rcu() if too many
578 * lock-acquisition failures. Of course, if someone does the
579 * expedited grace period for us, just leave.
580 */
581 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
582 if (trycount++ < 10)
583 udelay(trycount * num_online_cpus());
584 else {
585 synchronize_rcu();
586 return;
587 }
588 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
589 goto mb_ret; /* Others did our work for us. */
590 }
591 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
592 goto unlock_mb_ret; /* Others did our work for us. */
593
594 /* force all RCU readers onto blocked_tasks[]. */
595 synchronize_sched_expedited();
596
597 spin_lock_irqsave(&rsp->onofflock, flags);
598
599 /* Initialize ->expmask for all non-leaf rcu_node structures. */
600 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
601 spin_lock(&rnp->lock); /* irqs already disabled. */
602 rnp->expmask = rnp->qsmaskinit;
603 spin_unlock(&rnp->lock); /* irqs remain disabled. */
604 }
605
606 /* Snapshot current state of ->blocked_tasks[] lists. */
607 rcu_for_each_leaf_node(rsp, rnp)
608 sync_rcu_preempt_exp_init(rsp, rnp);
609 if (NUM_RCU_NODES > 1)
610 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
611
612 spin_unlock_irqrestore(&rsp->onofflock, flags);
613
614 /* Wait for snapshotted ->blocked_tasks[] lists to drain. */
615 rnp = rcu_get_root(rsp);
616 wait_event(sync_rcu_preempt_exp_wq,
617 sync_rcu_preempt_exp_done(rnp));
618
619 /* Clean up and exit. */
620 smp_mb(); /* ensure expedited GP seen before counter increment. */
621 ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
622unlock_mb_ret:
623 mutex_unlock(&sync_rcu_preempt_exp_mutex);
624mb_ret:
625 smp_mb(); /* ensure subsequent action seen after grace period. */
626}
627EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
628
395/* 629/*
396 * Check to see if there is any immediate preemptable-RCU-related work 630 * Check to see if there is any immediate preemptable-RCU-related work
397 * to be done. 631 * to be done.
@@ -410,6 +644,15 @@ static int rcu_preempt_needs_cpu(int cpu)
410 return !!per_cpu(rcu_preempt_data, cpu).nxtlist; 644 return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
411} 645}
412 646
647/**
648 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
649 */
650void rcu_barrier(void)
651{
652 _rcu_barrier(&rcu_preempt_state, call_rcu);
653}
654EXPORT_SYMBOL_GPL(rcu_barrier);
655
413/* 656/*
414 * Initialize preemptable RCU's per-CPU data. 657 * Initialize preemptable RCU's per-CPU data.
415 */ 658 */
@@ -419,6 +662,22 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
419} 662}
420 663
421/* 664/*
665 * Move preemptable RCU's callbacks to ->orphan_cbs_list.
666 */
667static void rcu_preempt_send_cbs_to_orphanage(void)
668{
669 rcu_send_cbs_to_orphanage(&rcu_preempt_state);
670}
671
672/*
673 * Initialize preemptable RCU's state structures.
674 */
675static void __init __rcu_init_preempt(void)
676{
677 RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data);
678}
679
680/*
422 * Check for a task exiting while in a preemptable-RCU read-side 681 * Check for a task exiting while in a preemptable-RCU read-side
423 * critical section, clean up if so. No need to issue warnings, 682 * critical section, clean up if so. No need to issue warnings,
424 * as debug_check_no_locks_held() already does this if lockdep 683 * as debug_check_no_locks_held() already does this if lockdep
@@ -439,7 +698,7 @@ void exit_rcu(void)
439/* 698/*
440 * Tell them what RCU they are running. 699 * Tell them what RCU they are running.
441 */ 700 */
442static inline void rcu_bootup_announce(void) 701static void __init rcu_bootup_announce(void)
443{ 702{
444 printk(KERN_INFO "Hierarchical RCU implementation.\n"); 703 printk(KERN_INFO "Hierarchical RCU implementation.\n");
445} 704}
@@ -461,6 +720,25 @@ static void rcu_preempt_note_context_switch(int cpu)
461{ 720{
462} 721}
463 722
723/*
724 * Because preemptable RCU does not exist, there are never any preempted
725 * RCU readers.
726 */
727static int rcu_preempted_readers(struct rcu_node *rnp)
728{
729 return 0;
730}
731
732#ifdef CONFIG_HOTPLUG_CPU
733
734/* Because preemptible RCU does not exist, no quieting of tasks. */
735static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
736{
737 spin_unlock_irqrestore(&rnp->lock, flags);
738}
739
740#endif /* #ifdef CONFIG_HOTPLUG_CPU */
741
464#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 742#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
465 743
466/* 744/*
@@ -483,25 +761,19 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
483 WARN_ON_ONCE(rnp->qsmask); 761 WARN_ON_ONCE(rnp->qsmask);
484} 762}
485 763
486/*
487 * Because preemptable RCU does not exist, there are never any preempted
488 * RCU readers.
489 */
490static int rcu_preempted_readers(struct rcu_node *rnp)
491{
492 return 0;
493}
494
495#ifdef CONFIG_HOTPLUG_CPU 764#ifdef CONFIG_HOTPLUG_CPU
496 765
497/* 766/*
498 * Because preemptable RCU does not exist, it never needs to migrate 767 * Because preemptable RCU does not exist, it never needs to migrate
499 * tasks that were blocked within RCU read-side critical sections. 768 * tasks that were blocked within RCU read-side critical sections, and
769 * such non-existent tasks cannot possibly have been blocking the current
770 * grace period.
500 */ 771 */
501static void rcu_preempt_offline_tasks(struct rcu_state *rsp, 772static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
502 struct rcu_node *rnp, 773 struct rcu_node *rnp,
503 struct rcu_data *rdp) 774 struct rcu_data *rdp)
504{ 775{
776 return 0;
505} 777}
506 778
507/* 779/*
@@ -518,7 +790,7 @@ static void rcu_preempt_offline_cpu(int cpu)
518 * Because preemptable RCU does not exist, it never has any callbacks 790 * Because preemptable RCU does not exist, it never has any callbacks
519 * to check. 791 * to check.
520 */ 792 */
521void rcu_preempt_check_callbacks(int cpu) 793static void rcu_preempt_check_callbacks(int cpu)
522{ 794{
523} 795}
524 796
@@ -526,7 +798,7 @@ void rcu_preempt_check_callbacks(int cpu)
526 * Because preemptable RCU does not exist, it never has any callbacks 798 * Because preemptable RCU does not exist, it never has any callbacks
527 * to process. 799 * to process.
528 */ 800 */
529void rcu_preempt_process_callbacks(void) 801static void rcu_preempt_process_callbacks(void)
530{ 802{
531} 803}
532 804
@@ -540,6 +812,30 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
540EXPORT_SYMBOL_GPL(call_rcu); 812EXPORT_SYMBOL_GPL(call_rcu);
541 813
542/* 814/*
815 * Wait for an rcu-preempt grace period, but make it happen quickly.
816 * But because preemptable RCU does not exist, map to rcu-sched.
817 */
818void synchronize_rcu_expedited(void)
819{
820 synchronize_sched_expedited();
821}
822EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
823
824#ifdef CONFIG_HOTPLUG_CPU
825
826/*
827 * Because preemptable RCU does not exist, there is never any need to
828 * report on tasks preempted in RCU read-side critical sections during
829 * expedited RCU grace periods.
830 */
831static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
832{
833 return;
834}
835
836#endif /* #ifdef CONFIG_HOTPLUG_CPU */
837
838/*
543 * Because preemptable RCU does not exist, it never has any work to do. 839 * Because preemptable RCU does not exist, it never has any work to do.
544 */ 840 */
545static int rcu_preempt_pending(int cpu) 841static int rcu_preempt_pending(int cpu)
@@ -556,6 +852,16 @@ static int rcu_preempt_needs_cpu(int cpu)
556} 852}
557 853
558/* 854/*
855 * Because preemptable RCU does not exist, rcu_barrier() is just
856 * another name for rcu_barrier_sched().
857 */
858void rcu_barrier(void)
859{
860 rcu_barrier_sched();
861}
862EXPORT_SYMBOL_GPL(rcu_barrier);
863
864/*
559 * Because preemptable RCU does not exist, there is no per-CPU 865 * Because preemptable RCU does not exist, there is no per-CPU
560 * data to initialize. 866 * data to initialize.
561 */ 867 */
@@ -563,4 +869,18 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
563{ 869{
564} 870}
565 871
872/*
873 * Because there is no preemptable RCU, there are no callbacks to move.
874 */
875static void rcu_preempt_send_cbs_to_orphanage(void)
876{
877}
878
879/*
880 * Because preemptable RCU does not exist, it need not be initialized.
881 */
882static void __init __rcu_init_preempt(void)
883{
884}
885
566#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ 886#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 179e6ad80dc0..9d2c88423b31 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -155,24 +155,32 @@ static const struct file_operations rcudata_csv_fops = {
155 155
156static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp) 156static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
157{ 157{
158 long gpnum;
158 int level = 0; 159 int level = 0;
160 int phase;
159 struct rcu_node *rnp; 161 struct rcu_node *rnp;
160 162
163 gpnum = rsp->gpnum;
161 seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x " 164 seq_printf(m, "c=%ld g=%ld s=%d jfq=%ld j=%x "
162 "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu\n", 165 "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld\n",
163 rsp->completed, rsp->gpnum, rsp->signaled, 166 rsp->completed, gpnum, rsp->signaled,
164 (long)(rsp->jiffies_force_qs - jiffies), 167 (long)(rsp->jiffies_force_qs - jiffies),
165 (int)(jiffies & 0xffff), 168 (int)(jiffies & 0xffff),
166 rsp->n_force_qs, rsp->n_force_qs_ngp, 169 rsp->n_force_qs, rsp->n_force_qs_ngp,
167 rsp->n_force_qs - rsp->n_force_qs_ngp, 170 rsp->n_force_qs - rsp->n_force_qs_ngp,
168 rsp->n_force_qs_lh); 171 rsp->n_force_qs_lh, rsp->orphan_qlen);
169 for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < NUM_RCU_NODES; rnp++) { 172 for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < NUM_RCU_NODES; rnp++) {
170 if (rnp->level != level) { 173 if (rnp->level != level) {
171 seq_puts(m, "\n"); 174 seq_puts(m, "\n");
172 level = rnp->level; 175 level = rnp->level;
173 } 176 }
174 seq_printf(m, "%lx/%lx %d:%d ^%d ", 177 phase = gpnum & 0x1;
178 seq_printf(m, "%lx/%lx %c%c>%c%c %d:%d ^%d ",
175 rnp->qsmask, rnp->qsmaskinit, 179 rnp->qsmask, rnp->qsmaskinit,
180 "T."[list_empty(&rnp->blocked_tasks[phase])],
181 "E."[list_empty(&rnp->blocked_tasks[phase + 2])],
182 "T."[list_empty(&rnp->blocked_tasks[!phase])],
183 "E."[list_empty(&rnp->blocked_tasks[!phase + 2])],
176 rnp->grplo, rnp->grphi, rnp->grpnum); 184 rnp->grplo, rnp->grphi, rnp->grpnum);
177 } 185 }
178 seq_puts(m, "\n"); 186 seq_puts(m, "\n");
diff --git a/kernel/sched.c b/kernel/sched.c
index 1535f3884b88..e7f2cfa6a257 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -309,6 +309,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
309 */ 309 */
310static DEFINE_SPINLOCK(task_group_lock); 310static DEFINE_SPINLOCK(task_group_lock);
311 311
312#ifdef CONFIG_FAIR_GROUP_SCHED
313
312#ifdef CONFIG_SMP 314#ifdef CONFIG_SMP
313static int root_task_group_empty(void) 315static int root_task_group_empty(void)
314{ 316{
@@ -316,7 +318,6 @@ static int root_task_group_empty(void)
316} 318}
317#endif 319#endif
318 320
319#ifdef CONFIG_FAIR_GROUP_SCHED
320#ifdef CONFIG_USER_SCHED 321#ifdef CONFIG_USER_SCHED
321# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) 322# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
322#else /* !CONFIG_USER_SCHED */ 323#else /* !CONFIG_USER_SCHED */
@@ -534,14 +535,12 @@ struct rq {
534 #define CPU_LOAD_IDX_MAX 5 535 #define CPU_LOAD_IDX_MAX 5
535 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 536 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
536#ifdef CONFIG_NO_HZ 537#ifdef CONFIG_NO_HZ
537 unsigned long last_tick_seen;
538 unsigned char in_nohz_recently; 538 unsigned char in_nohz_recently;
539#endif 539#endif
540 /* capture load from *all* tasks on this cpu: */ 540 /* capture load from *all* tasks on this cpu: */
541 struct load_weight load; 541 struct load_weight load;
542 unsigned long nr_load_updates; 542 unsigned long nr_load_updates;
543 u64 nr_switches; 543 u64 nr_switches;
544 u64 nr_migrations_in;
545 544
546 struct cfs_rq cfs; 545 struct cfs_rq cfs;
547 struct rt_rq rt; 546 struct rt_rq rt;
@@ -590,6 +589,8 @@ struct rq {
590 589
591 u64 rt_avg; 590 u64 rt_avg;
592 u64 age_stamp; 591 u64 age_stamp;
592 u64 idle_stamp;
593 u64 avg_idle;
593#endif 594#endif
594 595
595 /* calc_load related fields */ 596 /* calc_load related fields */
@@ -676,6 +677,7 @@ inline void update_rq_clock(struct rq *rq)
676 677
677/** 678/**
678 * runqueue_is_locked 679 * runqueue_is_locked
680 * @cpu: the processor in question.
679 * 681 *
680 * Returns true if the current cpu runqueue is locked. 682 * Returns true if the current cpu runqueue is locked.
681 * This interface allows printk to be called with the runqueue lock 683 * This interface allows printk to be called with the runqueue lock
@@ -770,7 +772,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
770 if (!sched_feat_names[i]) 772 if (!sched_feat_names[i])
771 return -EINVAL; 773 return -EINVAL;
772 774
773 filp->f_pos += cnt; 775 *ppos += cnt;
774 776
775 return cnt; 777 return cnt;
776} 778}
@@ -1563,11 +1565,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
1563 1565
1564#ifdef CONFIG_FAIR_GROUP_SCHED 1566#ifdef CONFIG_FAIR_GROUP_SCHED
1565 1567
1566struct update_shares_data { 1568static __read_mostly unsigned long *update_shares_data;
1567 unsigned long rq_weight[NR_CPUS];
1568};
1569
1570static DEFINE_PER_CPU(struct update_shares_data, update_shares_data);
1571 1569
1572static void __set_se_shares(struct sched_entity *se, unsigned long shares); 1570static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1573 1571
@@ -1577,12 +1575,12 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1577static void update_group_shares_cpu(struct task_group *tg, int cpu, 1575static void update_group_shares_cpu(struct task_group *tg, int cpu,
1578 unsigned long sd_shares, 1576 unsigned long sd_shares,
1579 unsigned long sd_rq_weight, 1577 unsigned long sd_rq_weight,
1580 struct update_shares_data *usd) 1578 unsigned long *usd_rq_weight)
1581{ 1579{
1582 unsigned long shares, rq_weight; 1580 unsigned long shares, rq_weight;
1583 int boost = 0; 1581 int boost = 0;
1584 1582
1585 rq_weight = usd->rq_weight[cpu]; 1583 rq_weight = usd_rq_weight[cpu];
1586 if (!rq_weight) { 1584 if (!rq_weight) {
1587 boost = 1; 1585 boost = 1;
1588 rq_weight = NICE_0_LOAD; 1586 rq_weight = NICE_0_LOAD;
@@ -1617,7 +1615,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
1617static int tg_shares_up(struct task_group *tg, void *data) 1615static int tg_shares_up(struct task_group *tg, void *data)
1618{ 1616{
1619 unsigned long weight, rq_weight = 0, shares = 0; 1617 unsigned long weight, rq_weight = 0, shares = 0;
1620 struct update_shares_data *usd; 1618 unsigned long *usd_rq_weight;
1621 struct sched_domain *sd = data; 1619 struct sched_domain *sd = data;
1622 unsigned long flags; 1620 unsigned long flags;
1623 int i; 1621 int i;
@@ -1626,11 +1624,11 @@ static int tg_shares_up(struct task_group *tg, void *data)
1626 return 0; 1624 return 0;
1627 1625
1628 local_irq_save(flags); 1626 local_irq_save(flags);
1629 usd = &__get_cpu_var(update_shares_data); 1627 usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id());
1630 1628
1631 for_each_cpu(i, sched_domain_span(sd)) { 1629 for_each_cpu(i, sched_domain_span(sd)) {
1632 weight = tg->cfs_rq[i]->load.weight; 1630 weight = tg->cfs_rq[i]->load.weight;
1633 usd->rq_weight[i] = weight; 1631 usd_rq_weight[i] = weight;
1634 1632
1635 /* 1633 /*
1636 * If there are currently no tasks on the cpu pretend there 1634 * If there are currently no tasks on the cpu pretend there
@@ -1651,7 +1649,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
1651 shares = tg->shares; 1649 shares = tg->shares;
1652 1650
1653 for_each_cpu(i, sched_domain_span(sd)) 1651 for_each_cpu(i, sched_domain_span(sd))
1654 update_group_shares_cpu(tg, i, shares, rq_weight, usd); 1652 update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight);
1655 1653
1656 local_irq_restore(flags); 1654 local_irq_restore(flags);
1657 1655
@@ -1995,6 +1993,39 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1995 p->sched_class->prio_changed(rq, p, oldprio, running); 1993 p->sched_class->prio_changed(rq, p, oldprio, running);
1996} 1994}
1997 1995
1996/**
1997 * kthread_bind - bind a just-created kthread to a cpu.
1998 * @p: thread created by kthread_create().
1999 * @cpu: cpu (might not be online, must be possible) for @k to run on.
2000 *
2001 * Description: This function is equivalent to set_cpus_allowed(),
2002 * except that @cpu doesn't need to be online, and the thread must be
2003 * stopped (i.e., just returned from kthread_create()).
2004 *
2005 * Function lives here instead of kthread.c because it messes with
2006 * scheduler internals which require locking.
2007 */
2008void kthread_bind(struct task_struct *p, unsigned int cpu)
2009{
2010 struct rq *rq = cpu_rq(cpu);
2011 unsigned long flags;
2012
2013 /* Must have done schedule() in kthread() before we set_task_cpu */
2014 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
2015 WARN_ON(1);
2016 return;
2017 }
2018
2019 spin_lock_irqsave(&rq->lock, flags);
2020 update_rq_clock(rq);
2021 set_task_cpu(p, cpu);
2022 p->cpus_allowed = cpumask_of_cpu(cpu);
2023 p->rt.nr_cpus_allowed = 1;
2024 p->flags |= PF_THREAD_BOUND;
2025 spin_unlock_irqrestore(&rq->lock, flags);
2026}
2027EXPORT_SYMBOL(kthread_bind);
2028
1998#ifdef CONFIG_SMP 2029#ifdef CONFIG_SMP
1999/* 2030/*
2000 * Is this task likely cache-hot: 2031 * Is this task likely cache-hot:
@@ -2007,7 +2038,7 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2007 /* 2038 /*
2008 * Buddy candidates are cache hot: 2039 * Buddy candidates are cache hot:
2009 */ 2040 */
2010 if (sched_feat(CACHE_HOT_BUDDY) && 2041 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
2011 (&p->se == cfs_rq_of(&p->se)->next || 2042 (&p->se == cfs_rq_of(&p->se)->next ||
2012 &p->se == cfs_rq_of(&p->se)->last)) 2043 &p->se == cfs_rq_of(&p->se)->last))
2013 return 1; 2044 return 1;
@@ -2048,7 +2079,6 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2048#endif 2079#endif
2049 if (old_cpu != new_cpu) { 2080 if (old_cpu != new_cpu) {
2050 p->se.nr_migrations++; 2081 p->se.nr_migrations++;
2051 new_rq->nr_migrations_in++;
2052#ifdef CONFIG_SCHEDSTATS 2082#ifdef CONFIG_SCHEDSTATS
2053 if (task_hot(p, old_rq->clock, NULL)) 2083 if (task_hot(p, old_rq->clock, NULL))
2054 schedstat_inc(p, se.nr_forced2_migrations); 2084 schedstat_inc(p, se.nr_forced2_migrations);
@@ -2085,6 +2115,7 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
2085 * it is sufficient to simply update the task's cpu field. 2115 * it is sufficient to simply update the task's cpu field.
2086 */ 2116 */
2087 if (!p->se.on_rq && !task_running(rq, p)) { 2117 if (!p->se.on_rq && !task_running(rq, p)) {
2118 update_rq_clock(rq);
2088 set_task_cpu(p, dest_cpu); 2119 set_task_cpu(p, dest_cpu);
2089 return 0; 2120 return 0;
2090 } 2121 }
@@ -2311,7 +2342,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2311{ 2342{
2312 int cpu, orig_cpu, this_cpu, success = 0; 2343 int cpu, orig_cpu, this_cpu, success = 0;
2313 unsigned long flags; 2344 unsigned long flags;
2314 struct rq *rq; 2345 struct rq *rq, *orig_rq;
2315 2346
2316 if (!sched_feat(SYNC_WAKEUPS)) 2347 if (!sched_feat(SYNC_WAKEUPS))
2317 wake_flags &= ~WF_SYNC; 2348 wake_flags &= ~WF_SYNC;
@@ -2319,7 +2350,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2319 this_cpu = get_cpu(); 2350 this_cpu = get_cpu();
2320 2351
2321 smp_wmb(); 2352 smp_wmb();
2322 rq = task_rq_lock(p, &flags); 2353 rq = orig_rq = task_rq_lock(p, &flags);
2323 update_rq_clock(rq); 2354 update_rq_clock(rq);
2324 if (!(p->state & state)) 2355 if (!(p->state & state))
2325 goto out; 2356 goto out;
@@ -2346,10 +2377,15 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2346 task_rq_unlock(rq, &flags); 2377 task_rq_unlock(rq, &flags);
2347 2378
2348 cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags); 2379 cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
2349 if (cpu != orig_cpu) 2380 if (cpu != orig_cpu) {
2381 local_irq_save(flags);
2382 rq = cpu_rq(cpu);
2383 update_rq_clock(rq);
2350 set_task_cpu(p, cpu); 2384 set_task_cpu(p, cpu);
2351 2385 local_irq_restore(flags);
2386 }
2352 rq = task_rq_lock(p, &flags); 2387 rq = task_rq_lock(p, &flags);
2388
2353 WARN_ON(p->state != TASK_WAKING); 2389 WARN_ON(p->state != TASK_WAKING);
2354 cpu = task_cpu(p); 2390 cpu = task_cpu(p);
2355 2391
@@ -2406,6 +2442,17 @@ out_running:
2406#ifdef CONFIG_SMP 2442#ifdef CONFIG_SMP
2407 if (p->sched_class->task_wake_up) 2443 if (p->sched_class->task_wake_up)
2408 p->sched_class->task_wake_up(rq, p); 2444 p->sched_class->task_wake_up(rq, p);
2445
2446 if (unlikely(rq->idle_stamp)) {
2447 u64 delta = rq->clock - rq->idle_stamp;
2448 u64 max = 2*sysctl_sched_migration_cost;
2449
2450 if (delta > max)
2451 rq->avg_idle = max;
2452 else
2453 update_avg(&rq->avg_idle, delta);
2454 rq->idle_stamp = 0;
2455 }
2409#endif 2456#endif
2410out: 2457out:
2411 task_rq_unlock(rq, &flags); 2458 task_rq_unlock(rq, &flags);
@@ -2511,26 +2558,22 @@ static void __sched_fork(struct task_struct *p)
2511void sched_fork(struct task_struct *p, int clone_flags) 2558void sched_fork(struct task_struct *p, int clone_flags)
2512{ 2559{
2513 int cpu = get_cpu(); 2560 int cpu = get_cpu();
2561 unsigned long flags;
2514 2562
2515 __sched_fork(p); 2563 __sched_fork(p);
2516 2564
2517 /* 2565 /*
2518 * Make sure we do not leak PI boosting priority to the child.
2519 */
2520 p->prio = current->normal_prio;
2521
2522 /*
2523 * Revert to default priority/policy on fork if requested. 2566 * Revert to default priority/policy on fork if requested.
2524 */ 2567 */
2525 if (unlikely(p->sched_reset_on_fork)) { 2568 if (unlikely(p->sched_reset_on_fork)) {
2526 if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) 2569 if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
2527 p->policy = SCHED_NORMAL; 2570 p->policy = SCHED_NORMAL;
2528 2571 p->normal_prio = p->static_prio;
2529 if (p->normal_prio < DEFAULT_PRIO) 2572 }
2530 p->prio = DEFAULT_PRIO;
2531 2573
2532 if (PRIO_TO_NICE(p->static_prio) < 0) { 2574 if (PRIO_TO_NICE(p->static_prio) < 0) {
2533 p->static_prio = NICE_TO_PRIO(0); 2575 p->static_prio = NICE_TO_PRIO(0);
2576 p->normal_prio = p->static_prio;
2534 set_load_weight(p); 2577 set_load_weight(p);
2535 } 2578 }
2536 2579
@@ -2541,13 +2584,21 @@ void sched_fork(struct task_struct *p, int clone_flags)
2541 p->sched_reset_on_fork = 0; 2584 p->sched_reset_on_fork = 0;
2542 } 2585 }
2543 2586
2587 /*
2588 * Make sure we do not leak PI boosting priority to the child.
2589 */
2590 p->prio = current->normal_prio;
2591
2544 if (!rt_prio(p->prio)) 2592 if (!rt_prio(p->prio))
2545 p->sched_class = &fair_sched_class; 2593 p->sched_class = &fair_sched_class;
2546 2594
2547#ifdef CONFIG_SMP 2595#ifdef CONFIG_SMP
2548 cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0); 2596 cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0);
2549#endif 2597#endif
2598 local_irq_save(flags);
2599 update_rq_clock(cpu_rq(cpu));
2550 set_task_cpu(p, cpu); 2600 set_task_cpu(p, cpu);
2601 local_irq_restore(flags);
2551 2602
2552#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 2603#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
2553 if (likely(sched_info_on())) 2604 if (likely(sched_info_on()))
@@ -2581,8 +2632,6 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2581 BUG_ON(p->state != TASK_RUNNING); 2632 BUG_ON(p->state != TASK_RUNNING);
2582 update_rq_clock(rq); 2633 update_rq_clock(rq);
2583 2634
2584 p->prio = effective_prio(p);
2585
2586 if (!p->sched_class->task_new || !current->se.on_rq) { 2635 if (!p->sched_class->task_new || !current->se.on_rq) {
2587 activate_task(rq, p, 0); 2636 activate_task(rq, p, 0);
2588 } else { 2637 } else {
@@ -2816,14 +2865,14 @@ context_switch(struct rq *rq, struct task_struct *prev,
2816 */ 2865 */
2817 arch_start_context_switch(prev); 2866 arch_start_context_switch(prev);
2818 2867
2819 if (unlikely(!mm)) { 2868 if (likely(!mm)) {
2820 next->active_mm = oldmm; 2869 next->active_mm = oldmm;
2821 atomic_inc(&oldmm->mm_count); 2870 atomic_inc(&oldmm->mm_count);
2822 enter_lazy_tlb(oldmm, next); 2871 enter_lazy_tlb(oldmm, next);
2823 } else 2872 } else
2824 switch_mm(oldmm, mm, next); 2873 switch_mm(oldmm, mm, next);
2825 2874
2826 if (unlikely(!prev->mm)) { 2875 if (likely(!prev->mm)) {
2827 prev->active_mm = NULL; 2876 prev->active_mm = NULL;
2828 rq->prev_mm = oldmm; 2877 rq->prev_mm = oldmm;
2829 } 2878 }
@@ -2986,15 +3035,6 @@ static void calc_load_account_active(struct rq *this_rq)
2986} 3035}
2987 3036
2988/* 3037/*
2989 * Externally visible per-cpu scheduler statistics:
2990 * cpu_nr_migrations(cpu) - number of migrations into that cpu
2991 */
2992u64 cpu_nr_migrations(int cpu)
2993{
2994 return cpu_rq(cpu)->nr_migrations_in;
2995}
2996
2997/*
2998 * Update rq->cpu_load[] statistics. This function is usually called every 3038 * Update rq->cpu_load[] statistics. This function is usually called every
2999 * scheduler tick (TICK_NSEC). 3039 * scheduler tick (TICK_NSEC).
3000 */ 3040 */
@@ -3658,6 +3698,7 @@ static void update_group_power(struct sched_domain *sd, int cpu)
3658 3698
3659/** 3699/**
3660 * update_sg_lb_stats - Update sched_group's statistics for load balancing. 3700 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
3701 * @sd: The sched_domain whose statistics are to be updated.
3661 * @group: sched_group whose statistics are to be updated. 3702 * @group: sched_group whose statistics are to be updated.
3662 * @this_cpu: Cpu for which load balance is currently performed. 3703 * @this_cpu: Cpu for which load balance is currently performed.
3663 * @idle: Idle status of this_cpu 3704 * @idle: Idle status of this_cpu
@@ -4093,7 +4134,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
4093 unsigned long flags; 4134 unsigned long flags;
4094 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); 4135 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
4095 4136
4096 cpumask_setall(cpus); 4137 cpumask_copy(cpus, cpu_online_mask);
4097 4138
4098 /* 4139 /*
4099 * When power savings policy is enabled for the parent domain, idle 4140 * When power savings policy is enabled for the parent domain, idle
@@ -4256,7 +4297,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
4256 int all_pinned = 0; 4297 int all_pinned = 0;
4257 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask); 4298 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
4258 4299
4259 cpumask_setall(cpus); 4300 cpumask_copy(cpus, cpu_online_mask);
4260 4301
4261 /* 4302 /*
4262 * When power savings policy is enabled for the parent domain, idle 4303 * When power savings policy is enabled for the parent domain, idle
@@ -4396,6 +4437,11 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
4396 int pulled_task = 0; 4437 int pulled_task = 0;
4397 unsigned long next_balance = jiffies + HZ; 4438 unsigned long next_balance = jiffies + HZ;
4398 4439
4440 this_rq->idle_stamp = this_rq->clock;
4441
4442 if (this_rq->avg_idle < sysctl_sched_migration_cost)
4443 return;
4444
4399 for_each_domain(this_cpu, sd) { 4445 for_each_domain(this_cpu, sd) {
4400 unsigned long interval; 4446 unsigned long interval;
4401 4447
@@ -4410,8 +4456,10 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
4410 interval = msecs_to_jiffies(sd->balance_interval); 4456 interval = msecs_to_jiffies(sd->balance_interval);
4411 if (time_after(next_balance, sd->last_balance + interval)) 4457 if (time_after(next_balance, sd->last_balance + interval))
4412 next_balance = sd->last_balance + interval; 4458 next_balance = sd->last_balance + interval;
4413 if (pulled_task) 4459 if (pulled_task) {
4460 this_rq->idle_stamp = 0;
4414 break; 4461 break;
4462 }
4415 } 4463 }
4416 if (pulled_task || time_after(jiffies, this_rq->next_balance)) { 4464 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
4417 /* 4465 /*
@@ -5013,8 +5061,13 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime,
5013 p->gtime = cputime_add(p->gtime, cputime); 5061 p->gtime = cputime_add(p->gtime, cputime);
5014 5062
5015 /* Add guest time to cpustat. */ 5063 /* Add guest time to cpustat. */
5016 cpustat->user = cputime64_add(cpustat->user, tmp); 5064 if (TASK_NICE(p) > 0) {
5017 cpustat->guest = cputime64_add(cpustat->guest, tmp); 5065 cpustat->nice = cputime64_add(cpustat->nice, tmp);
5066 cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
5067 } else {
5068 cpustat->user = cputime64_add(cpustat->user, tmp);
5069 cpustat->guest = cputime64_add(cpustat->guest, tmp);
5070 }
5018} 5071}
5019 5072
5020/* 5073/*
@@ -5129,60 +5182,86 @@ void account_idle_ticks(unsigned long ticks)
5129 * Use precise platform statistics if available: 5182 * Use precise platform statistics if available:
5130 */ 5183 */
5131#ifdef CONFIG_VIRT_CPU_ACCOUNTING 5184#ifdef CONFIG_VIRT_CPU_ACCOUNTING
5132cputime_t task_utime(struct task_struct *p) 5185void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
5133{ 5186{
5134 return p->utime; 5187 *ut = p->utime;
5188 *st = p->stime;
5135} 5189}
5136 5190
5137cputime_t task_stime(struct task_struct *p) 5191void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
5138{ 5192{
5139 return p->stime; 5193 struct task_cputime cputime;
5194
5195 thread_group_cputime(p, &cputime);
5196
5197 *ut = cputime.utime;
5198 *st = cputime.stime;
5140} 5199}
5141#else 5200#else
5142cputime_t task_utime(struct task_struct *p) 5201
5202#ifndef nsecs_to_cputime
5203# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
5204#endif
5205
5206void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
5143{ 5207{
5144 clock_t utime = cputime_to_clock_t(p->utime), 5208 cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
5145 total = utime + cputime_to_clock_t(p->stime);
5146 u64 temp;
5147 5209
5148 /* 5210 /*
5149 * Use CFS's precise accounting: 5211 * Use CFS's precise accounting:
5150 */ 5212 */
5151 temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime); 5213 rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
5152 5214
5153 if (total) { 5215 if (total) {
5154 temp *= utime; 5216 u64 temp;
5217
5218 temp = (u64)(rtime * utime);
5155 do_div(temp, total); 5219 do_div(temp, total);
5156 } 5220 utime = (cputime_t)temp;
5157 utime = (clock_t)temp; 5221 } else
5222 utime = rtime;
5158 5223
5159 p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime)); 5224 /*
5160 return p->prev_utime; 5225 * Compare with previous values, to keep monotonicity:
5226 */
5227 p->prev_utime = max(p->prev_utime, utime);
5228 p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
5229
5230 *ut = p->prev_utime;
5231 *st = p->prev_stime;
5161} 5232}
5162 5233
5163cputime_t task_stime(struct task_struct *p) 5234/*
5235 * Must be called with siglock held.
5236 */
5237void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
5164{ 5238{
5165 clock_t stime; 5239 struct signal_struct *sig = p->signal;
5240 struct task_cputime cputime;
5241 cputime_t rtime, utime, total;
5166 5242
5167 /* 5243 thread_group_cputime(p, &cputime);
5168 * Use CFS's precise accounting. (we subtract utime from
5169 * the total, to make sure the total observed by userspace
5170 * grows monotonically - apps rely on that):
5171 */
5172 stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
5173 cputime_to_clock_t(task_utime(p));
5174 5244
5175 if (stime >= 0) 5245 total = cputime_add(cputime.utime, cputime.stime);
5176 p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime)); 5246 rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
5177 5247
5178 return p->prev_stime; 5248 if (total) {
5179} 5249 u64 temp;
5180#endif
5181 5250
5182inline cputime_t task_gtime(struct task_struct *p) 5251 temp = (u64)(rtime * cputime.utime);
5183{ 5252 do_div(temp, total);
5184 return p->gtime; 5253 utime = (cputime_t)temp;
5254 } else
5255 utime = rtime;
5256
5257 sig->prev_utime = max(sig->prev_utime, utime);
5258 sig->prev_stime = max(sig->prev_stime,
5259 cputime_sub(rtime, sig->prev_utime));
5260
5261 *ut = sig->prev_utime;
5262 *st = sig->prev_stime;
5185} 5263}
5264#endif
5186 5265
5187/* 5266/*
5188 * This function gets called by the timer code, with HZ frequency. 5267 * This function gets called by the timer code, with HZ frequency.
@@ -5448,7 +5527,7 @@ need_resched_nonpreemptible:
5448} 5527}
5449EXPORT_SYMBOL(schedule); 5528EXPORT_SYMBOL(schedule);
5450 5529
5451#ifdef CONFIG_SMP 5530#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
5452/* 5531/*
5453 * Look out! "owner" is an entirely speculative pointer 5532 * Look out! "owner" is an entirely speculative pointer
5454 * access and not reliable. 5533 * access and not reliable.
@@ -6142,22 +6221,14 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
6142 BUG_ON(p->se.on_rq); 6221 BUG_ON(p->se.on_rq);
6143 6222
6144 p->policy = policy; 6223 p->policy = policy;
6145 switch (p->policy) {
6146 case SCHED_NORMAL:
6147 case SCHED_BATCH:
6148 case SCHED_IDLE:
6149 p->sched_class = &fair_sched_class;
6150 break;
6151 case SCHED_FIFO:
6152 case SCHED_RR:
6153 p->sched_class = &rt_sched_class;
6154 break;
6155 }
6156
6157 p->rt_priority = prio; 6224 p->rt_priority = prio;
6158 p->normal_prio = normal_prio(p); 6225 p->normal_prio = normal_prio(p);
6159 /* we are holding p->pi_lock already */ 6226 /* we are holding p->pi_lock already */
6160 p->prio = rt_mutex_getprio(p); 6227 p->prio = rt_mutex_getprio(p);
6228 if (rt_prio(p->prio))
6229 p->sched_class = &rt_sched_class;
6230 else
6231 p->sched_class = &fair_sched_class;
6161 set_load_weight(p); 6232 set_load_weight(p);
6162} 6233}
6163 6234
@@ -6720,9 +6791,6 @@ EXPORT_SYMBOL(yield);
6720/* 6791/*
6721 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 6792 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
6722 * that process accounting knows that this is a task in IO wait state. 6793 * that process accounting knows that this is a task in IO wait state.
6723 *
6724 * But don't do that if it is a deliberate, throttling IO wait (this task
6725 * has set its backing_dev_info: the queue against which it should throttle)
6726 */ 6794 */
6727void __sched io_schedule(void) 6795void __sched io_schedule(void)
6728{ 6796{
@@ -6905,7 +6973,7 @@ void show_state_filter(unsigned long state_filter)
6905 /* 6973 /*
6906 * Only show locks if all tasks are dumped: 6974 * Only show locks if all tasks are dumped:
6907 */ 6975 */
6908 if (state_filter == -1) 6976 if (!state_filter)
6909 debug_show_all_locks(); 6977 debug_show_all_locks();
6910} 6978}
6911 6979
@@ -7376,17 +7444,16 @@ static struct ctl_table sd_ctl_dir[] = {
7376 .procname = "sched_domain", 7444 .procname = "sched_domain",
7377 .mode = 0555, 7445 .mode = 0555,
7378 }, 7446 },
7379 {0, }, 7447 {}
7380}; 7448};
7381 7449
7382static struct ctl_table sd_ctl_root[] = { 7450static struct ctl_table sd_ctl_root[] = {
7383 { 7451 {
7384 .ctl_name = CTL_KERN,
7385 .procname = "kernel", 7452 .procname = "kernel",
7386 .mode = 0555, 7453 .mode = 0555,
7387 .child = sd_ctl_dir, 7454 .child = sd_ctl_dir,
7388 }, 7455 },
7389 {0, }, 7456 {}
7390}; 7457};
7391 7458
7392static struct ctl_table *sd_alloc_ctl_entry(int n) 7459static struct ctl_table *sd_alloc_ctl_entry(int n)
@@ -7710,6 +7777,16 @@ early_initcall(migration_init);
7710 7777
7711#ifdef CONFIG_SCHED_DEBUG 7778#ifdef CONFIG_SCHED_DEBUG
7712 7779
7780static __read_mostly int sched_domain_debug_enabled;
7781
7782static int __init sched_domain_debug_setup(char *str)
7783{
7784 sched_domain_debug_enabled = 1;
7785
7786 return 0;
7787}
7788early_param("sched_debug", sched_domain_debug_setup);
7789
7713static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 7790static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
7714 struct cpumask *groupmask) 7791 struct cpumask *groupmask)
7715{ 7792{
@@ -7796,6 +7873,9 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
7796 cpumask_var_t groupmask; 7873 cpumask_var_t groupmask;
7797 int level = 0; 7874 int level = 0;
7798 7875
7876 if (!sched_domain_debug_enabled)
7877 return;
7878
7799 if (!sd) { 7879 if (!sd) {
7800 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); 7880 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
7801 return; 7881 return;
@@ -7875,6 +7955,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
7875 7955
7876static void free_rootdomain(struct root_domain *rd) 7956static void free_rootdomain(struct root_domain *rd)
7877{ 7957{
7958 synchronize_sched();
7959
7878 cpupri_cleanup(&rd->cpupri); 7960 cpupri_cleanup(&rd->cpupri);
7879 7961
7880 free_cpumask_var(rd->rto_mask); 7962 free_cpumask_var(rd->rto_mask);
@@ -8015,6 +8097,7 @@ static cpumask_var_t cpu_isolated_map;
8015/* Setup the mask of cpus configured for isolated domains */ 8097/* Setup the mask of cpus configured for isolated domains */
8016static int __init isolated_cpu_setup(char *str) 8098static int __init isolated_cpu_setup(char *str)
8017{ 8099{
8100 alloc_bootmem_cpumask_var(&cpu_isolated_map);
8018 cpulist_parse(str, cpu_isolated_map); 8101 cpulist_parse(str, cpu_isolated_map);
8019 return 1; 8102 return 1;
8020} 8103}
@@ -8851,7 +8934,7 @@ static int build_sched_domains(const struct cpumask *cpu_map)
8851 return __build_sched_domains(cpu_map, NULL); 8934 return __build_sched_domains(cpu_map, NULL);
8852} 8935}
8853 8936
8854static struct cpumask *doms_cur; /* current sched domains */ 8937static cpumask_var_t *doms_cur; /* current sched domains */
8855static int ndoms_cur; /* number of sched domains in 'doms_cur' */ 8938static int ndoms_cur; /* number of sched domains in 'doms_cur' */
8856static struct sched_domain_attr *dattr_cur; 8939static struct sched_domain_attr *dattr_cur;
8857 /* attribues of custom domains in 'doms_cur' */ 8940 /* attribues of custom domains in 'doms_cur' */
@@ -8873,6 +8956,31 @@ int __attribute__((weak)) arch_update_cpu_topology(void)
8873 return 0; 8956 return 0;
8874} 8957}
8875 8958
8959cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
8960{
8961 int i;
8962 cpumask_var_t *doms;
8963
8964 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
8965 if (!doms)
8966 return NULL;
8967 for (i = 0; i < ndoms; i++) {
8968 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
8969 free_sched_domains(doms, i);
8970 return NULL;
8971 }
8972 }
8973 return doms;
8974}
8975
8976void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
8977{
8978 unsigned int i;
8979 for (i = 0; i < ndoms; i++)
8980 free_cpumask_var(doms[i]);
8981 kfree(doms);
8982}
8983
8876/* 8984/*
8877 * Set up scheduler domains and groups. Callers must hold the hotplug lock. 8985 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
8878 * For now this just excludes isolated cpus, but could be used to 8986 * For now this just excludes isolated cpus, but could be used to
@@ -8884,12 +8992,12 @@ static int arch_init_sched_domains(const struct cpumask *cpu_map)
8884 8992
8885 arch_update_cpu_topology(); 8993 arch_update_cpu_topology();
8886 ndoms_cur = 1; 8994 ndoms_cur = 1;
8887 doms_cur = kmalloc(cpumask_size(), GFP_KERNEL); 8995 doms_cur = alloc_sched_domains(ndoms_cur);
8888 if (!doms_cur) 8996 if (!doms_cur)
8889 doms_cur = fallback_doms; 8997 doms_cur = &fallback_doms;
8890 cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map); 8998 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
8891 dattr_cur = NULL; 8999 dattr_cur = NULL;
8892 err = build_sched_domains(doms_cur); 9000 err = build_sched_domains(doms_cur[0]);
8893 register_sched_domain_sysctl(); 9001 register_sched_domain_sysctl();
8894 9002
8895 return err; 9003 return err;
@@ -8939,19 +9047,19 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
8939 * doms_new[] to the current sched domain partitioning, doms_cur[]. 9047 * doms_new[] to the current sched domain partitioning, doms_cur[].
8940 * It destroys each deleted domain and builds each new domain. 9048 * It destroys each deleted domain and builds each new domain.
8941 * 9049 *
8942 * 'doms_new' is an array of cpumask's of length 'ndoms_new'. 9050 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
8943 * The masks don't intersect (don't overlap.) We should setup one 9051 * The masks don't intersect (don't overlap.) We should setup one
8944 * sched domain for each mask. CPUs not in any of the cpumasks will 9052 * sched domain for each mask. CPUs not in any of the cpumasks will
8945 * not be load balanced. If the same cpumask appears both in the 9053 * not be load balanced. If the same cpumask appears both in the
8946 * current 'doms_cur' domains and in the new 'doms_new', we can leave 9054 * current 'doms_cur' domains and in the new 'doms_new', we can leave
8947 * it as it is. 9055 * it as it is.
8948 * 9056 *
8949 * The passed in 'doms_new' should be kmalloc'd. This routine takes 9057 * The passed in 'doms_new' should be allocated using
8950 * ownership of it and will kfree it when done with it. If the caller 9058 * alloc_sched_domains. This routine takes ownership of it and will
8951 * failed the kmalloc call, then it can pass in doms_new == NULL && 9059 * free_sched_domains it when done with it. If the caller failed the
8952 * ndoms_new == 1, and partition_sched_domains() will fallback to 9060 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
8953 * the single partition 'fallback_doms', it also forces the domains 9061 * and partition_sched_domains() will fallback to the single partition
8954 * to be rebuilt. 9062 * 'fallback_doms', it also forces the domains to be rebuilt.
8955 * 9063 *
8956 * If doms_new == NULL it will be replaced with cpu_online_mask. 9064 * If doms_new == NULL it will be replaced with cpu_online_mask.
8957 * ndoms_new == 0 is a special case for destroying existing domains, 9065 * ndoms_new == 0 is a special case for destroying existing domains,
@@ -8959,8 +9067,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
8959 * 9067 *
8960 * Call with hotplug lock held 9068 * Call with hotplug lock held
8961 */ 9069 */
8962/* FIXME: Change to struct cpumask *doms_new[] */ 9070void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
8963void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
8964 struct sched_domain_attr *dattr_new) 9071 struct sched_domain_attr *dattr_new)
8965{ 9072{
8966 int i, j, n; 9073 int i, j, n;
@@ -8979,40 +9086,40 @@ void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
8979 /* Destroy deleted domains */ 9086 /* Destroy deleted domains */
8980 for (i = 0; i < ndoms_cur; i++) { 9087 for (i = 0; i < ndoms_cur; i++) {
8981 for (j = 0; j < n && !new_topology; j++) { 9088 for (j = 0; j < n && !new_topology; j++) {
8982 if (cpumask_equal(&doms_cur[i], &doms_new[j]) 9089 if (cpumask_equal(doms_cur[i], doms_new[j])
8983 && dattrs_equal(dattr_cur, i, dattr_new, j)) 9090 && dattrs_equal(dattr_cur, i, dattr_new, j))
8984 goto match1; 9091 goto match1;
8985 } 9092 }
8986 /* no match - a current sched domain not in new doms_new[] */ 9093 /* no match - a current sched domain not in new doms_new[] */
8987 detach_destroy_domains(doms_cur + i); 9094 detach_destroy_domains(doms_cur[i]);
8988match1: 9095match1:
8989 ; 9096 ;
8990 } 9097 }
8991 9098
8992 if (doms_new == NULL) { 9099 if (doms_new == NULL) {
8993 ndoms_cur = 0; 9100 ndoms_cur = 0;
8994 doms_new = fallback_doms; 9101 doms_new = &fallback_doms;
8995 cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map); 9102 cpumask_andnot(doms_new[0], cpu_online_mask, cpu_isolated_map);
8996 WARN_ON_ONCE(dattr_new); 9103 WARN_ON_ONCE(dattr_new);
8997 } 9104 }
8998 9105
8999 /* Build new domains */ 9106 /* Build new domains */
9000 for (i = 0; i < ndoms_new; i++) { 9107 for (i = 0; i < ndoms_new; i++) {
9001 for (j = 0; j < ndoms_cur && !new_topology; j++) { 9108 for (j = 0; j < ndoms_cur && !new_topology; j++) {
9002 if (cpumask_equal(&doms_new[i], &doms_cur[j]) 9109 if (cpumask_equal(doms_new[i], doms_cur[j])
9003 && dattrs_equal(dattr_new, i, dattr_cur, j)) 9110 && dattrs_equal(dattr_new, i, dattr_cur, j))
9004 goto match2; 9111 goto match2;
9005 } 9112 }
9006 /* no match - add a new doms_new */ 9113 /* no match - add a new doms_new */
9007 __build_sched_domains(doms_new + i, 9114 __build_sched_domains(doms_new[i],
9008 dattr_new ? dattr_new + i : NULL); 9115 dattr_new ? dattr_new + i : NULL);
9009match2: 9116match2:
9010 ; 9117 ;
9011 } 9118 }
9012 9119
9013 /* Remember the new sched domains */ 9120 /* Remember the new sched domains */
9014 if (doms_cur != fallback_doms) 9121 if (doms_cur != &fallback_doms)
9015 kfree(doms_cur); 9122 free_sched_domains(doms_cur, ndoms_cur);
9016 kfree(dattr_cur); /* kfree(NULL) is safe */ 9123 kfree(dattr_cur); /* kfree(NULL) is safe */
9017 doms_cur = doms_new; 9124 doms_cur = doms_new;
9018 dattr_cur = dattr_new; 9125 dattr_cur = dattr_new;
@@ -9334,10 +9441,6 @@ void __init sched_init(void)
9334#ifdef CONFIG_CPUMASK_OFFSTACK 9441#ifdef CONFIG_CPUMASK_OFFSTACK
9335 alloc_size += num_possible_cpus() * cpumask_size(); 9442 alloc_size += num_possible_cpus() * cpumask_size();
9336#endif 9443#endif
9337 /*
9338 * As sched_init() is called before page_alloc is setup,
9339 * we use alloc_bootmem().
9340 */
9341 if (alloc_size) { 9444 if (alloc_size) {
9342 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); 9445 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
9343 9446
@@ -9406,6 +9509,10 @@ void __init sched_init(void)
9406#endif /* CONFIG_USER_SCHED */ 9509#endif /* CONFIG_USER_SCHED */
9407#endif /* CONFIG_GROUP_SCHED */ 9510#endif /* CONFIG_GROUP_SCHED */
9408 9511
9512#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
9513 update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
9514 __alignof__(unsigned long));
9515#endif
9409 for_each_possible_cpu(i) { 9516 for_each_possible_cpu(i) {
9410 struct rq *rq; 9517 struct rq *rq;
9411 9518
@@ -9488,6 +9595,8 @@ void __init sched_init(void)
9488 rq->cpu = i; 9595 rq->cpu = i;
9489 rq->online = 0; 9596 rq->online = 0;
9490 rq->migration_thread = NULL; 9597 rq->migration_thread = NULL;
9598 rq->idle_stamp = 0;
9599 rq->avg_idle = 2*sysctl_sched_migration_cost;
9491 INIT_LIST_HEAD(&rq->migration_queue); 9600 INIT_LIST_HEAD(&rq->migration_queue);
9492 rq_attach_root(rq, &def_root_domain); 9601 rq_attach_root(rq, &def_root_domain);
9493#endif 9602#endif
@@ -9531,13 +9640,15 @@ void __init sched_init(void)
9531 current->sched_class = &fair_sched_class; 9640 current->sched_class = &fair_sched_class;
9532 9641
9533 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ 9642 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
9534 alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); 9643 zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
9535#ifdef CONFIG_SMP 9644#ifdef CONFIG_SMP
9536#ifdef CONFIG_NO_HZ 9645#ifdef CONFIG_NO_HZ
9537 alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); 9646 zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
9538 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); 9647 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
9539#endif 9648#endif
9540 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 9649 /* May be allocated at isolcpus cmdline parse time */
9650 if (cpu_isolated_map == NULL)
9651 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
9541#endif /* SMP */ 9652#endif /* SMP */
9542 9653
9543 perf_event_init(); 9654 perf_event_init();
@@ -10867,6 +10978,7 @@ void synchronize_sched_expedited(void)
10867 spin_unlock_irqrestore(&rq->lock, flags); 10978 spin_unlock_irqrestore(&rq->lock, flags);
10868 } 10979 }
10869 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; 10980 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
10981 synchronize_sched_expedited_count++;
10870 mutex_unlock(&rcu_sched_expedited_mutex); 10982 mutex_unlock(&rcu_sched_expedited_mutex);
10871 put_online_cpus(); 10983 put_online_cpus();
10872 if (need_full_sync) 10984 if (need_full_sync)
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index efb84409bc43..6988cf08f705 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -285,12 +285,16 @@ static void print_cpu(struct seq_file *m, int cpu)
285 285
286#ifdef CONFIG_SCHEDSTATS 286#ifdef CONFIG_SCHEDSTATS
287#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n); 287#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
288#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
288 289
289 P(yld_count); 290 P(yld_count);
290 291
291 P(sched_switch); 292 P(sched_switch);
292 P(sched_count); 293 P(sched_count);
293 P(sched_goidle); 294 P(sched_goidle);
295#ifdef CONFIG_SMP
296 P64(avg_idle);
297#endif
294 298
295 P(ttwu_count); 299 P(ttwu_count);
296 P(ttwu_local); 300 P(ttwu_local);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 4e777b47eeda..f61837ad336d 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -822,6 +822,26 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
822 * re-elected due to buddy favours. 822 * re-elected due to buddy favours.
823 */ 823 */
824 clear_buddies(cfs_rq, curr); 824 clear_buddies(cfs_rq, curr);
825 return;
826 }
827
828 /*
829 * Ensure that a task that missed wakeup preemption by a
830 * narrow margin doesn't have to wait for a full slice.
831 * This also mitigates buddy induced latencies under load.
832 */
833 if (!sched_feat(WAKEUP_PREEMPT))
834 return;
835
836 if (delta_exec < sysctl_sched_min_granularity)
837 return;
838
839 if (cfs_rq->nr_running > 1) {
840 struct sched_entity *se = __pick_next_entity(cfs_rq);
841 s64 delta = curr->vruntime - se->vruntime;
842
843 if (delta > ideal_runtime)
844 resched_task(rq_of(cfs_rq)->curr);
825 } 845 }
826} 846}
827 847
@@ -861,12 +881,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
861static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) 881static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
862{ 882{
863 struct sched_entity *se = __pick_next_entity(cfs_rq); 883 struct sched_entity *se = __pick_next_entity(cfs_rq);
884 struct sched_entity *left = se;
864 885
865 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, se) < 1) 886 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
866 return cfs_rq->next; 887 se = cfs_rq->next;
867 888
868 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, se) < 1) 889 /*
869 return cfs_rq->last; 890 * Prefer last buddy, try to return the CPU to a preempted task.
891 */
892 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
893 se = cfs_rq->last;
894
895 clear_buddies(cfs_rq, se);
870 896
871 return se; 897 return se;
872} 898}
@@ -1319,6 +1345,37 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1319} 1345}
1320 1346
1321/* 1347/*
1348 * Try and locate an idle CPU in the sched_domain.
1349 */
1350static int
1351select_idle_sibling(struct task_struct *p, struct sched_domain *sd, int target)
1352{
1353 int cpu = smp_processor_id();
1354 int prev_cpu = task_cpu(p);
1355 int i;
1356
1357 /*
1358 * If this domain spans both cpu and prev_cpu (see the SD_WAKE_AFFINE
1359 * test in select_task_rq_fair) and the prev_cpu is idle then that's
1360 * always a better target than the current cpu.
1361 */
1362 if (target == cpu && !cpu_rq(prev_cpu)->cfs.nr_running)
1363 return prev_cpu;
1364
1365 /*
1366 * Otherwise, iterate the domain and find an elegible idle cpu.
1367 */
1368 for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
1369 if (!cpu_rq(i)->cfs.nr_running) {
1370 target = i;
1371 break;
1372 }
1373 }
1374
1375 return target;
1376}
1377
1378/*
1322 * sched_balance_self: balance the current task (running on cpu) in domains 1379 * sched_balance_self: balance the current task (running on cpu) in domains
1323 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and 1380 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
1324 * SD_BALANCE_EXEC. 1381 * SD_BALANCE_EXEC.
@@ -1372,11 +1429,35 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
1372 want_sd = 0; 1429 want_sd = 0;
1373 } 1430 }
1374 1431
1375 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && 1432 /*
1376 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { 1433 * While iterating the domains looking for a spanning
1434 * WAKE_AFFINE domain, adjust the affine target to any idle cpu
1435 * in cache sharing domains along the way.
1436 */
1437 if (want_affine) {
1438 int target = -1;
1377 1439
1378 affine_sd = tmp; 1440 /*
1379 want_affine = 0; 1441 * If both cpu and prev_cpu are part of this domain,
1442 * cpu is a valid SD_WAKE_AFFINE target.
1443 */
1444 if (cpumask_test_cpu(prev_cpu, sched_domain_span(tmp)))
1445 target = cpu;
1446
1447 /*
1448 * If there's an idle sibling in this domain, make that
1449 * the wake_affine target instead of the current cpu.
1450 */
1451 if (tmp->flags & SD_PREFER_SIBLING)
1452 target = select_idle_sibling(p, tmp, target);
1453
1454 if (target >= 0) {
1455 if (tmp->flags & SD_WAKE_AFFINE) {
1456 affine_sd = tmp;
1457 want_affine = 0;
1458 }
1459 cpu = target;
1460 }
1380 } 1461 }
1381 1462
1382 if (!want_sd && !want_affine) 1463 if (!want_sd && !want_affine)
@@ -1568,6 +1649,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1568 struct sched_entity *se = &curr->se, *pse = &p->se; 1649 struct sched_entity *se = &curr->se, *pse = &p->se;
1569 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 1650 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1570 int sync = wake_flags & WF_SYNC; 1651 int sync = wake_flags & WF_SYNC;
1652 int scale = cfs_rq->nr_running >= sched_nr_latency;
1571 1653
1572 update_curr(cfs_rq); 1654 update_curr(cfs_rq);
1573 1655
@@ -1582,18 +1664,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1582 if (unlikely(se == pse)) 1664 if (unlikely(se == pse))
1583 return; 1665 return;
1584 1666
1585 /* 1667 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK))
1586 * Only set the backward buddy when the current task is still on the
1587 * rq. This can happen when a wakeup gets interleaved with schedule on
1588 * the ->pre_schedule() or idle_balance() point, either of which can
1589 * drop the rq lock.
1590 *
1591 * Also, during early boot the idle thread is in the fair class, for
1592 * obvious reasons its a bad idea to schedule back to the idle thread.
1593 */
1594 if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle))
1595 set_last_buddy(se);
1596 if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK))
1597 set_next_buddy(pse); 1668 set_next_buddy(pse);
1598 1669
1599 /* 1670 /*
@@ -1639,8 +1710,22 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1639 1710
1640 BUG_ON(!pse); 1711 BUG_ON(!pse);
1641 1712
1642 if (wakeup_preempt_entity(se, pse) == 1) 1713 if (wakeup_preempt_entity(se, pse) == 1) {
1643 resched_task(curr); 1714 resched_task(curr);
1715 /*
1716 * Only set the backward buddy when the current task is still
1717 * on the rq. This can happen when a wakeup gets interleaved
1718 * with schedule on the ->pre_schedule() or idle_balance()
1719 * point, either of which can * drop the rq lock.
1720 *
1721 * Also, during early boot the idle thread is in the fair class,
1722 * for obvious reasons its a bad idea to schedule back to it.
1723 */
1724 if (unlikely(!se->on_rq || curr == rq->idle))
1725 return;
1726 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
1727 set_last_buddy(se);
1728 }
1644} 1729}
1645 1730
1646static struct task_struct *pick_next_task_fair(struct rq *rq) 1731static struct task_struct *pick_next_task_fair(struct rq *rq)
@@ -1649,21 +1734,11 @@ static struct task_struct *pick_next_task_fair(struct rq *rq)
1649 struct cfs_rq *cfs_rq = &rq->cfs; 1734 struct cfs_rq *cfs_rq = &rq->cfs;
1650 struct sched_entity *se; 1735 struct sched_entity *se;
1651 1736
1652 if (unlikely(!cfs_rq->nr_running)) 1737 if (!cfs_rq->nr_running)
1653 return NULL; 1738 return NULL;
1654 1739
1655 do { 1740 do {
1656 se = pick_next_entity(cfs_rq); 1741 se = pick_next_entity(cfs_rq);
1657 /*
1658 * If se was a buddy, clear it so that it will have to earn
1659 * the favour again.
1660 *
1661 * If se was not a buddy, clear the buddies because neither
1662 * was elegible to run, let them earn it again.
1663 *
1664 * IOW. unconditionally clear buddies.
1665 */
1666 __clear_buddies(cfs_rq, NULL);
1667 set_next_entity(cfs_rq, se); 1742 set_next_entity(cfs_rq, se);
1668 cfs_rq = group_cfs_rq(se); 1743 cfs_rq = group_cfs_rq(se);
1669 } while (cfs_rq); 1744 } while (cfs_rq);
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index a4d790cddb19..5c5fef378415 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1153,29 +1153,12 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1153 1153
1154static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); 1154static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1155 1155
1156static inline int pick_optimal_cpu(int this_cpu,
1157 const struct cpumask *mask)
1158{
1159 int first;
1160
1161 /* "this_cpu" is cheaper to preempt than a remote processor */
1162 if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask))
1163 return this_cpu;
1164
1165 first = cpumask_first(mask);
1166 if (first < nr_cpu_ids)
1167 return first;
1168
1169 return -1;
1170}
1171
1172static int find_lowest_rq(struct task_struct *task) 1156static int find_lowest_rq(struct task_struct *task)
1173{ 1157{
1174 struct sched_domain *sd; 1158 struct sched_domain *sd;
1175 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); 1159 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1176 int this_cpu = smp_processor_id(); 1160 int this_cpu = smp_processor_id();
1177 int cpu = task_cpu(task); 1161 int cpu = task_cpu(task);
1178 cpumask_var_t domain_mask;
1179 1162
1180 if (task->rt.nr_cpus_allowed == 1) 1163 if (task->rt.nr_cpus_allowed == 1)
1181 return -1; /* No other targets possible */ 1164 return -1; /* No other targets possible */
@@ -1198,28 +1181,26 @@ static int find_lowest_rq(struct task_struct *task)
1198 * Otherwise, we consult the sched_domains span maps to figure 1181 * Otherwise, we consult the sched_domains span maps to figure
1199 * out which cpu is logically closest to our hot cache data. 1182 * out which cpu is logically closest to our hot cache data.
1200 */ 1183 */
1201 if (this_cpu == cpu) 1184 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1202 this_cpu = -1; /* Skip this_cpu opt if the same */ 1185 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1203
1204 if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) {
1205 for_each_domain(cpu, sd) {
1206 if (sd->flags & SD_WAKE_AFFINE) {
1207 int best_cpu;
1208 1186
1209 cpumask_and(domain_mask, 1187 for_each_domain(cpu, sd) {
1210 sched_domain_span(sd), 1188 if (sd->flags & SD_WAKE_AFFINE) {
1211 lowest_mask); 1189 int best_cpu;
1212 1190
1213 best_cpu = pick_optimal_cpu(this_cpu, 1191 /*
1214 domain_mask); 1192 * "this_cpu" is cheaper to preempt than a
1215 1193 * remote processor.
1216 if (best_cpu != -1) { 1194 */
1217 free_cpumask_var(domain_mask); 1195 if (this_cpu != -1 &&
1218 return best_cpu; 1196 cpumask_test_cpu(this_cpu, sched_domain_span(sd)))
1219 } 1197 return this_cpu;
1220 } 1198
1199 best_cpu = cpumask_first_and(lowest_mask,
1200 sched_domain_span(sd));
1201 if (best_cpu < nr_cpu_ids)
1202 return best_cpu;
1221 } 1203 }
1222 free_cpumask_var(domain_mask);
1223 } 1204 }
1224 1205
1225 /* 1206 /*
@@ -1227,7 +1208,13 @@ static int find_lowest_rq(struct task_struct *task)
1227 * just give the caller *something* to work with from the compatible 1208 * just give the caller *something* to work with from the compatible
1228 * locations. 1209 * locations.
1229 */ 1210 */
1230 return pick_optimal_cpu(this_cpu, lowest_mask); 1211 if (this_cpu != -1)
1212 return this_cpu;
1213
1214 cpu = cpumask_any(lowest_mask);
1215 if (cpu < nr_cpu_ids)
1216 return cpu;
1217 return -1;
1231} 1218}
1232 1219
1233/* Will lock the rq it finds */ 1220/* Will lock the rq it finds */
diff --git a/kernel/signal.c b/kernel/signal.c
index 6705320784fd..6b982f2cf524 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -22,12 +22,14 @@
22#include <linux/ptrace.h> 22#include <linux/ptrace.h>
23#include <linux/signal.h> 23#include <linux/signal.h>
24#include <linux/signalfd.h> 24#include <linux/signalfd.h>
25#include <linux/ratelimit.h>
25#include <linux/tracehook.h> 26#include <linux/tracehook.h>
26#include <linux/capability.h> 27#include <linux/capability.h>
27#include <linux/freezer.h> 28#include <linux/freezer.h>
28#include <linux/pid_namespace.h> 29#include <linux/pid_namespace.h>
29#include <linux/nsproxy.h> 30#include <linux/nsproxy.h>
30#include <trace/events/sched.h> 31#define CREATE_TRACE_POINTS
32#include <trace/events/signal.h>
31 33
32#include <asm/param.h> 34#include <asm/param.h>
33#include <asm/uaccess.h> 35#include <asm/uaccess.h>
@@ -41,6 +43,8 @@
41 43
42static struct kmem_cache *sigqueue_cachep; 44static struct kmem_cache *sigqueue_cachep;
43 45
46int print_fatal_signals __read_mostly;
47
44static void __user *sig_handler(struct task_struct *t, int sig) 48static void __user *sig_handler(struct task_struct *t, int sig)
45{ 49{
46 return t->sighand->action[sig - 1].sa.sa_handler; 50 return t->sighand->action[sig - 1].sa.sa_handler;
@@ -159,7 +163,7 @@ int next_signal(struct sigpending *pending, sigset_t *mask)
159{ 163{
160 unsigned long i, *s, *m, x; 164 unsigned long i, *s, *m, x;
161 int sig = 0; 165 int sig = 0;
162 166
163 s = pending->signal.sig; 167 s = pending->signal.sig;
164 m = mask->sig; 168 m = mask->sig;
165 switch (_NSIG_WORDS) { 169 switch (_NSIG_WORDS) {
@@ -184,17 +188,31 @@ int next_signal(struct sigpending *pending, sigset_t *mask)
184 sig = ffz(~x) + 1; 188 sig = ffz(~x) + 1;
185 break; 189 break;
186 } 190 }
187 191
188 return sig; 192 return sig;
189} 193}
190 194
195static inline void print_dropped_signal(int sig)
196{
197 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
198
199 if (!print_fatal_signals)
200 return;
201
202 if (!__ratelimit(&ratelimit_state))
203 return;
204
205 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
206 current->comm, current->pid, sig);
207}
208
191/* 209/*
192 * allocate a new signal queue record 210 * allocate a new signal queue record
193 * - this may be called without locks if and only if t == current, otherwise an 211 * - this may be called without locks if and only if t == current, otherwise an
194 * appopriate lock must be held to stop the target task from exiting 212 * appopriate lock must be held to stop the target task from exiting
195 */ 213 */
196static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, 214static struct sigqueue *
197 int override_rlimit) 215__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
198{ 216{
199 struct sigqueue *q = NULL; 217 struct sigqueue *q = NULL;
200 struct user_struct *user; 218 struct user_struct *user;
@@ -207,10 +225,15 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
207 */ 225 */
208 user = get_uid(__task_cred(t)->user); 226 user = get_uid(__task_cred(t)->user);
209 atomic_inc(&user->sigpending); 227 atomic_inc(&user->sigpending);
228
210 if (override_rlimit || 229 if (override_rlimit ||
211 atomic_read(&user->sigpending) <= 230 atomic_read(&user->sigpending) <=
212 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) 231 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) {
213 q = kmem_cache_alloc(sigqueue_cachep, flags); 232 q = kmem_cache_alloc(sigqueue_cachep, flags);
233 } else {
234 print_dropped_signal(sig);
235 }
236
214 if (unlikely(q == NULL)) { 237 if (unlikely(q == NULL)) {
215 atomic_dec(&user->sigpending); 238 atomic_dec(&user->sigpending);
216 free_uid(user); 239 free_uid(user);
@@ -834,7 +857,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
834 struct sigqueue *q; 857 struct sigqueue *q;
835 int override_rlimit; 858 int override_rlimit;
836 859
837 trace_sched_signal_send(sig, t); 860 trace_signal_generate(sig, info, t);
838 861
839 assert_spin_locked(&t->sighand->siglock); 862 assert_spin_locked(&t->sighand->siglock);
840 863
@@ -869,7 +892,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
869 else 892 else
870 override_rlimit = 0; 893 override_rlimit = 0;
871 894
872 q = __sigqueue_alloc(t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, 895 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
873 override_rlimit); 896 override_rlimit);
874 if (q) { 897 if (q) {
875 list_add_tail(&q->list, &pending->list); 898 list_add_tail(&q->list, &pending->list);
@@ -896,12 +919,21 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
896 break; 919 break;
897 } 920 }
898 } else if (!is_si_special(info)) { 921 } else if (!is_si_special(info)) {
899 if (sig >= SIGRTMIN && info->si_code != SI_USER) 922 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
900 /* 923 /*
901 * Queue overflow, abort. We may abort if the signal was rt 924 * Queue overflow, abort. We may abort if the
902 * and sent by user using something other than kill(). 925 * signal was rt and sent by user using something
903 */ 926 * other than kill().
927 */
928 trace_signal_overflow_fail(sig, group, info);
904 return -EAGAIN; 929 return -EAGAIN;
930 } else {
931 /*
932 * This is a silent loss of information. We still
933 * send the signal, but the *info bits are lost.
934 */
935 trace_signal_lose_info(sig, group, info);
936 }
905 } 937 }
906 938
907out_set: 939out_set:
@@ -925,8 +957,6 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
925 return __send_signal(sig, info, t, group, from_ancestor_ns); 957 return __send_signal(sig, info, t, group, from_ancestor_ns);
926} 958}
927 959
928int print_fatal_signals;
929
930static void print_fatal_signal(struct pt_regs *regs, int signr) 960static void print_fatal_signal(struct pt_regs *regs, int signr)
931{ 961{
932 printk("%s/%d: potentially unexpected fatal signal %d.\n", 962 printk("%s/%d: potentially unexpected fatal signal %d.\n",
@@ -1293,19 +1323,19 @@ EXPORT_SYMBOL(kill_pid);
1293 * These functions support sending signals using preallocated sigqueue 1323 * These functions support sending signals using preallocated sigqueue
1294 * structures. This is needed "because realtime applications cannot 1324 * structures. This is needed "because realtime applications cannot
1295 * afford to lose notifications of asynchronous events, like timer 1325 * afford to lose notifications of asynchronous events, like timer
1296 * expirations or I/O completions". In the case of Posix Timers 1326 * expirations or I/O completions". In the case of Posix Timers
1297 * we allocate the sigqueue structure from the timer_create. If this 1327 * we allocate the sigqueue structure from the timer_create. If this
1298 * allocation fails we are able to report the failure to the application 1328 * allocation fails we are able to report the failure to the application
1299 * with an EAGAIN error. 1329 * with an EAGAIN error.
1300 */ 1330 */
1301
1302struct sigqueue *sigqueue_alloc(void) 1331struct sigqueue *sigqueue_alloc(void)
1303{ 1332{
1304 struct sigqueue *q; 1333 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1305 1334
1306 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0))) 1335 if (q)
1307 q->flags |= SIGQUEUE_PREALLOC; 1336 q->flags |= SIGQUEUE_PREALLOC;
1308 return(q); 1337
1338 return q;
1309} 1339}
1310 1340
1311void sigqueue_free(struct sigqueue *q) 1341void sigqueue_free(struct sigqueue *q)
@@ -1839,6 +1869,9 @@ relock:
1839 ka = &sighand->action[signr-1]; 1869 ka = &sighand->action[signr-1];
1840 } 1870 }
1841 1871
1872 /* Trace actually delivered signals. */
1873 trace_signal_deliver(signr, info, ka);
1874
1842 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ 1875 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1843 continue; 1876 continue;
1844 if (ka->sa.sa_handler != SIG_DFL) { 1877 if (ka->sa.sa_handler != SIG_DFL) {
diff --git a/kernel/slow-work-debugfs.c b/kernel/slow-work-debugfs.c
new file mode 100644
index 000000000000..e45c43645298
--- /dev/null
+++ b/kernel/slow-work-debugfs.c
@@ -0,0 +1,227 @@
1/* Slow work debugging
2 *
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/slow-work.h>
14#include <linux/fs.h>
15#include <linux/time.h>
16#include <linux/seq_file.h>
17#include "slow-work.h"
18
19#define ITERATOR_SHIFT (BITS_PER_LONG - 4)
20#define ITERATOR_SELECTOR (0xfUL << ITERATOR_SHIFT)
21#define ITERATOR_COUNTER (~ITERATOR_SELECTOR)
22
23void slow_work_new_thread_desc(struct slow_work *work, struct seq_file *m)
24{
25 seq_puts(m, "Slow-work: New thread");
26}
27
28/*
29 * Render the time mark field on a work item into a 5-char time with units plus
30 * a space
31 */
32static void slow_work_print_mark(struct seq_file *m, struct slow_work *work)
33{
34 struct timespec now, diff;
35
36 now = CURRENT_TIME;
37 diff = timespec_sub(now, work->mark);
38
39 if (diff.tv_sec < 0)
40 seq_puts(m, " -ve ");
41 else if (diff.tv_sec == 0 && diff.tv_nsec < 1000)
42 seq_printf(m, "%3luns ", diff.tv_nsec);
43 else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000)
44 seq_printf(m, "%3luus ", diff.tv_nsec / 1000);
45 else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000000)
46 seq_printf(m, "%3lums ", diff.tv_nsec / 1000000);
47 else if (diff.tv_sec <= 1)
48 seq_puts(m, " 1s ");
49 else if (diff.tv_sec < 60)
50 seq_printf(m, "%4lus ", diff.tv_sec);
51 else if (diff.tv_sec < 60 * 60)
52 seq_printf(m, "%4lum ", diff.tv_sec / 60);
53 else if (diff.tv_sec < 60 * 60 * 24)
54 seq_printf(m, "%4luh ", diff.tv_sec / 3600);
55 else
56 seq_puts(m, "exces ");
57}
58
59/*
60 * Describe a slow work item for debugfs
61 */
62static int slow_work_runqueue_show(struct seq_file *m, void *v)
63{
64 struct slow_work *work;
65 struct list_head *p = v;
66 unsigned long id;
67
68 switch ((unsigned long) v) {
69 case 1:
70 seq_puts(m, "THR PID ITEM ADDR FL MARK DESC\n");
71 return 0;
72 case 2:
73 seq_puts(m, "=== ===== ================ == ===== ==========\n");
74 return 0;
75
76 case 3 ... 3 + SLOW_WORK_THREAD_LIMIT - 1:
77 id = (unsigned long) v - 3;
78
79 read_lock(&slow_work_execs_lock);
80 work = slow_work_execs[id];
81 if (work) {
82 smp_read_barrier_depends();
83
84 seq_printf(m, "%3lu %5d %16p %2lx ",
85 id, slow_work_pids[id], work, work->flags);
86 slow_work_print_mark(m, work);
87
88 if (work->ops->desc)
89 work->ops->desc(work, m);
90 seq_putc(m, '\n');
91 }
92 read_unlock(&slow_work_execs_lock);
93 return 0;
94
95 default:
96 work = list_entry(p, struct slow_work, link);
97 seq_printf(m, "%3s - %16p %2lx ",
98 work->flags & SLOW_WORK_VERY_SLOW ? "vsq" : "sq",
99 work, work->flags);
100 slow_work_print_mark(m, work);
101
102 if (work->ops->desc)
103 work->ops->desc(work, m);
104 seq_putc(m, '\n');
105 return 0;
106 }
107}
108
109/*
110 * map the iterator to a work item
111 */
112static void *slow_work_runqueue_index(struct seq_file *m, loff_t *_pos)
113{
114 struct list_head *p;
115 unsigned long count, id;
116
117 switch (*_pos >> ITERATOR_SHIFT) {
118 case 0x0:
119 if (*_pos == 0)
120 *_pos = 1;
121 if (*_pos < 3)
122 return (void *)(unsigned long) *_pos;
123 if (*_pos < 3 + SLOW_WORK_THREAD_LIMIT)
124 for (id = *_pos - 3;
125 id < SLOW_WORK_THREAD_LIMIT;
126 id++, (*_pos)++)
127 if (slow_work_execs[id])
128 return (void *)(unsigned long) *_pos;
129 *_pos = 0x1UL << ITERATOR_SHIFT;
130
131 case 0x1:
132 count = *_pos & ITERATOR_COUNTER;
133 list_for_each(p, &slow_work_queue) {
134 if (count == 0)
135 return p;
136 count--;
137 }
138 *_pos = 0x2UL << ITERATOR_SHIFT;
139
140 case 0x2:
141 count = *_pos & ITERATOR_COUNTER;
142 list_for_each(p, &vslow_work_queue) {
143 if (count == 0)
144 return p;
145 count--;
146 }
147 *_pos = 0x3UL << ITERATOR_SHIFT;
148
149 default:
150 return NULL;
151 }
152}
153
154/*
155 * set up the iterator to start reading from the first line
156 */
157static void *slow_work_runqueue_start(struct seq_file *m, loff_t *_pos)
158{
159 spin_lock_irq(&slow_work_queue_lock);
160 return slow_work_runqueue_index(m, _pos);
161}
162
163/*
164 * move to the next line
165 */
166static void *slow_work_runqueue_next(struct seq_file *m, void *v, loff_t *_pos)
167{
168 struct list_head *p = v;
169 unsigned long selector = *_pos >> ITERATOR_SHIFT;
170
171 (*_pos)++;
172 switch (selector) {
173 case 0x0:
174 return slow_work_runqueue_index(m, _pos);
175
176 case 0x1:
177 if (*_pos >> ITERATOR_SHIFT == 0x1) {
178 p = p->next;
179 if (p != &slow_work_queue)
180 return p;
181 }
182 *_pos = 0x2UL << ITERATOR_SHIFT;
183 p = &vslow_work_queue;
184
185 case 0x2:
186 if (*_pos >> ITERATOR_SHIFT == 0x2) {
187 p = p->next;
188 if (p != &vslow_work_queue)
189 return p;
190 }
191 *_pos = 0x3UL << ITERATOR_SHIFT;
192
193 default:
194 return NULL;
195 }
196}
197
198/*
199 * clean up after reading
200 */
201static void slow_work_runqueue_stop(struct seq_file *m, void *v)
202{
203 spin_unlock_irq(&slow_work_queue_lock);
204}
205
206static const struct seq_operations slow_work_runqueue_ops = {
207 .start = slow_work_runqueue_start,
208 .stop = slow_work_runqueue_stop,
209 .next = slow_work_runqueue_next,
210 .show = slow_work_runqueue_show,
211};
212
213/*
214 * open "/sys/kernel/debug/slow_work/runqueue" to list queue contents
215 */
216static int slow_work_runqueue_open(struct inode *inode, struct file *file)
217{
218 return seq_open(file, &slow_work_runqueue_ops);
219}
220
221const struct file_operations slow_work_runqueue_fops = {
222 .owner = THIS_MODULE,
223 .open = slow_work_runqueue_open,
224 .read = seq_read,
225 .llseek = seq_lseek,
226 .release = seq_release,
227};
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index 0d31135efbf4..7494bbf5a270 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -16,11 +16,8 @@
16#include <linux/kthread.h> 16#include <linux/kthread.h>
17#include <linux/freezer.h> 17#include <linux/freezer.h>
18#include <linux/wait.h> 18#include <linux/wait.h>
19 19#include <linux/debugfs.h>
20#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of 20#include "slow-work.h"
21 * things to do */
22#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
23 * OOM */
24 21
25static void slow_work_cull_timeout(unsigned long); 22static void slow_work_cull_timeout(unsigned long);
26static void slow_work_oom_timeout(unsigned long); 23static void slow_work_oom_timeout(unsigned long);
@@ -46,13 +43,12 @@ static unsigned vslow_work_proportion = 50; /* % of threads that may process
46 43
47#ifdef CONFIG_SYSCTL 44#ifdef CONFIG_SYSCTL
48static const int slow_work_min_min_threads = 2; 45static const int slow_work_min_min_threads = 2;
49static int slow_work_max_max_threads = 255; 46static int slow_work_max_max_threads = SLOW_WORK_THREAD_LIMIT;
50static const int slow_work_min_vslow = 1; 47static const int slow_work_min_vslow = 1;
51static const int slow_work_max_vslow = 99; 48static const int slow_work_max_vslow = 99;
52 49
53ctl_table slow_work_sysctls[] = { 50ctl_table slow_work_sysctls[] = {
54 { 51 {
55 .ctl_name = CTL_UNNUMBERED,
56 .procname = "min-threads", 52 .procname = "min-threads",
57 .data = &slow_work_min_threads, 53 .data = &slow_work_min_threads,
58 .maxlen = sizeof(unsigned), 54 .maxlen = sizeof(unsigned),
@@ -62,7 +58,6 @@ ctl_table slow_work_sysctls[] = {
62 .extra2 = &slow_work_max_threads, 58 .extra2 = &slow_work_max_threads,
63 }, 59 },
64 { 60 {
65 .ctl_name = CTL_UNNUMBERED,
66 .procname = "max-threads", 61 .procname = "max-threads",
67 .data = &slow_work_max_threads, 62 .data = &slow_work_max_threads,
68 .maxlen = sizeof(unsigned), 63 .maxlen = sizeof(unsigned),
@@ -72,16 +67,15 @@ ctl_table slow_work_sysctls[] = {
72 .extra2 = (void *) &slow_work_max_max_threads, 67 .extra2 = (void *) &slow_work_max_max_threads,
73 }, 68 },
74 { 69 {
75 .ctl_name = CTL_UNNUMBERED,
76 .procname = "vslow-percentage", 70 .procname = "vslow-percentage",
77 .data = &vslow_work_proportion, 71 .data = &vslow_work_proportion,
78 .maxlen = sizeof(unsigned), 72 .maxlen = sizeof(unsigned),
79 .mode = 0644, 73 .mode = 0644,
80 .proc_handler = &proc_dointvec_minmax, 74 .proc_handler = proc_dointvec_minmax,
81 .extra1 = (void *) &slow_work_min_vslow, 75 .extra1 = (void *) &slow_work_min_vslow,
82 .extra2 = (void *) &slow_work_max_vslow, 76 .extra2 = (void *) &slow_work_max_vslow,
83 }, 77 },
84 { .ctl_name = 0 } 78 {}
85}; 79};
86#endif 80#endif
87 81
@@ -98,6 +92,56 @@ static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0);
98static struct slow_work slow_work_new_thread; /* new thread starter */ 92static struct slow_work slow_work_new_thread; /* new thread starter */
99 93
100/* 94/*
95 * slow work ID allocation (use slow_work_queue_lock)
96 */
97static DECLARE_BITMAP(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
98
99/*
100 * Unregistration tracking to prevent put_ref() from disappearing during module
101 * unload
102 */
103#ifdef CONFIG_MODULES
104static struct module *slow_work_thread_processing[SLOW_WORK_THREAD_LIMIT];
105static struct module *slow_work_unreg_module;
106static struct slow_work *slow_work_unreg_work_item;
107static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq);
108static DEFINE_MUTEX(slow_work_unreg_sync_lock);
109
110static void slow_work_set_thread_processing(int id, struct slow_work *work)
111{
112 if (work)
113 slow_work_thread_processing[id] = work->owner;
114}
115static void slow_work_done_thread_processing(int id, struct slow_work *work)
116{
117 struct module *module = slow_work_thread_processing[id];
118
119 slow_work_thread_processing[id] = NULL;
120 smp_mb();
121 if (slow_work_unreg_work_item == work ||
122 slow_work_unreg_module == module)
123 wake_up_all(&slow_work_unreg_wq);
124}
125static void slow_work_clear_thread_processing(int id)
126{
127 slow_work_thread_processing[id] = NULL;
128}
129#else
130static void slow_work_set_thread_processing(int id, struct slow_work *work) {}
131static void slow_work_done_thread_processing(int id, struct slow_work *work) {}
132static void slow_work_clear_thread_processing(int id) {}
133#endif
134
135/*
136 * Data for tracking currently executing items for indication through /proc
137 */
138#ifdef CONFIG_SLOW_WORK_DEBUG
139struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT];
140pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT];
141DEFINE_RWLOCK(slow_work_execs_lock);
142#endif
143
144/*
101 * The queues of work items and the lock governing access to them. These are 145 * The queues of work items and the lock governing access to them. These are
102 * shared between all the CPUs. It doesn't make sense to have per-CPU queues 146 * shared between all the CPUs. It doesn't make sense to have per-CPU queues
103 * as the number of threads bears no relation to the number of CPUs. 147 * as the number of threads bears no relation to the number of CPUs.
@@ -105,9 +149,18 @@ static struct slow_work slow_work_new_thread; /* new thread starter */
105 * There are two queues of work items: one for slow work items, and one for 149 * There are two queues of work items: one for slow work items, and one for
106 * very slow work items. 150 * very slow work items.
107 */ 151 */
108static LIST_HEAD(slow_work_queue); 152LIST_HEAD(slow_work_queue);
109static LIST_HEAD(vslow_work_queue); 153LIST_HEAD(vslow_work_queue);
110static DEFINE_SPINLOCK(slow_work_queue_lock); 154DEFINE_SPINLOCK(slow_work_queue_lock);
155
156/*
157 * The following are two wait queues that get pinged when a work item is placed
158 * on an empty queue. These allow work items that are hogging a thread by
159 * sleeping in a way that could be deferred to yield their thread and enqueue
160 * themselves.
161 */
162static DECLARE_WAIT_QUEUE_HEAD(slow_work_queue_waits_for_occupation);
163static DECLARE_WAIT_QUEUE_HEAD(vslow_work_queue_waits_for_occupation);
111 164
112/* 165/*
113 * The thread controls. A variable used to signal to the threads that they 166 * The thread controls. A variable used to signal to the threads that they
@@ -126,6 +179,20 @@ static DECLARE_COMPLETION(slow_work_last_thread_exited);
126static int slow_work_user_count; 179static int slow_work_user_count;
127static DEFINE_MUTEX(slow_work_user_lock); 180static DEFINE_MUTEX(slow_work_user_lock);
128 181
182static inline int slow_work_get_ref(struct slow_work *work)
183{
184 if (work->ops->get_ref)
185 return work->ops->get_ref(work);
186
187 return 0;
188}
189
190static inline void slow_work_put_ref(struct slow_work *work)
191{
192 if (work->ops->put_ref)
193 work->ops->put_ref(work);
194}
195
129/* 196/*
130 * Calculate the maximum number of active threads in the pool that are 197 * Calculate the maximum number of active threads in the pool that are
131 * permitted to process very slow work items. 198 * permitted to process very slow work items.
@@ -149,7 +216,7 @@ static unsigned slow_work_calc_vsmax(void)
149 * Attempt to execute stuff queued on a slow thread. Return true if we managed 216 * Attempt to execute stuff queued on a slow thread. Return true if we managed
150 * it, false if there was nothing to do. 217 * it, false if there was nothing to do.
151 */ 218 */
152static bool slow_work_execute(void) 219static noinline bool slow_work_execute(int id)
153{ 220{
154 struct slow_work *work = NULL; 221 struct slow_work *work = NULL;
155 unsigned vsmax; 222 unsigned vsmax;
@@ -186,6 +253,13 @@ static bool slow_work_execute(void)
186 } else { 253 } else {
187 very_slow = false; /* avoid the compiler warning */ 254 very_slow = false; /* avoid the compiler warning */
188 } 255 }
256
257 slow_work_set_thread_processing(id, work);
258 if (work) {
259 slow_work_mark_time(work);
260 slow_work_begin_exec(id, work);
261 }
262
189 spin_unlock_irq(&slow_work_queue_lock); 263 spin_unlock_irq(&slow_work_queue_lock);
190 264
191 if (!work) 265 if (!work)
@@ -194,12 +268,19 @@ static bool slow_work_execute(void)
194 if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags)) 268 if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags))
195 BUG(); 269 BUG();
196 270
197 work->ops->execute(work); 271 /* don't execute if the work is in the process of being cancelled */
272 if (!test_bit(SLOW_WORK_CANCELLING, &work->flags))
273 work->ops->execute(work);
198 274
199 if (very_slow) 275 if (very_slow)
200 atomic_dec(&vslow_work_executing_count); 276 atomic_dec(&vslow_work_executing_count);
201 clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags); 277 clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags);
202 278
279 /* wake up anyone waiting for this work to be complete */
280 wake_up_bit(&work->flags, SLOW_WORK_EXECUTING);
281
282 slow_work_end_exec(id, work);
283
203 /* if someone tried to enqueue the item whilst we were executing it, 284 /* if someone tried to enqueue the item whilst we were executing it,
204 * then it'll be left unenqueued to avoid multiple threads trying to 285 * then it'll be left unenqueued to avoid multiple threads trying to
205 * execute it simultaneously 286 * execute it simultaneously
@@ -219,7 +300,10 @@ static bool slow_work_execute(void)
219 spin_unlock_irq(&slow_work_queue_lock); 300 spin_unlock_irq(&slow_work_queue_lock);
220 } 301 }
221 302
222 work->ops->put_ref(work); 303 /* sort out the race between module unloading and put_ref() */
304 slow_work_put_ref(work);
305 slow_work_done_thread_processing(id, work);
306
223 return true; 307 return true;
224 308
225auto_requeue: 309auto_requeue:
@@ -227,15 +311,61 @@ auto_requeue:
227 * - we transfer our ref on the item back to the appropriate queue 311 * - we transfer our ref on the item back to the appropriate queue
228 * - don't wake another thread up as we're awake already 312 * - don't wake another thread up as we're awake already
229 */ 313 */
314 slow_work_mark_time(work);
230 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) 315 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
231 list_add_tail(&work->link, &vslow_work_queue); 316 list_add_tail(&work->link, &vslow_work_queue);
232 else 317 else
233 list_add_tail(&work->link, &slow_work_queue); 318 list_add_tail(&work->link, &slow_work_queue);
234 spin_unlock_irq(&slow_work_queue_lock); 319 spin_unlock_irq(&slow_work_queue_lock);
320 slow_work_clear_thread_processing(id);
235 return true; 321 return true;
236} 322}
237 323
238/** 324/**
325 * slow_work_sleep_till_thread_needed - Sleep till thread needed by other work
326 * work: The work item under execution that wants to sleep
327 * _timeout: Scheduler sleep timeout
328 *
329 * Allow a requeueable work item to sleep on a slow-work processor thread until
330 * that thread is needed to do some other work or the sleep is interrupted by
331 * some other event.
332 *
333 * The caller must set up a wake up event before calling this and must have set
334 * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own
335 * condition before calling this function as no test is made here.
336 *
337 * False is returned if there is nothing on the queue; true is returned if the
338 * work item should be requeued
339 */
340bool slow_work_sleep_till_thread_needed(struct slow_work *work,
341 signed long *_timeout)
342{
343 wait_queue_head_t *wfo_wq;
344 struct list_head *queue;
345
346 DEFINE_WAIT(wait);
347
348 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
349 wfo_wq = &vslow_work_queue_waits_for_occupation;
350 queue = &vslow_work_queue;
351 } else {
352 wfo_wq = &slow_work_queue_waits_for_occupation;
353 queue = &slow_work_queue;
354 }
355
356 if (!list_empty(queue))
357 return true;
358
359 add_wait_queue_exclusive(wfo_wq, &wait);
360 if (list_empty(queue))
361 *_timeout = schedule_timeout(*_timeout);
362 finish_wait(wfo_wq, &wait);
363
364 return !list_empty(queue);
365}
366EXPORT_SYMBOL(slow_work_sleep_till_thread_needed);
367
368/**
239 * slow_work_enqueue - Schedule a slow work item for processing 369 * slow_work_enqueue - Schedule a slow work item for processing
240 * @work: The work item to queue 370 * @work: The work item to queue
241 * 371 *
@@ -260,16 +390,22 @@ auto_requeue:
260 * allowed to pick items to execute. This ensures that very slow items won't 390 * allowed to pick items to execute. This ensures that very slow items won't
261 * overly block ones that are just ordinarily slow. 391 * overly block ones that are just ordinarily slow.
262 * 392 *
263 * Returns 0 if successful, -EAGAIN if not. 393 * Returns 0 if successful, -EAGAIN if not (or -ECANCELED if cancelled work is
394 * attempted queued)
264 */ 395 */
265int slow_work_enqueue(struct slow_work *work) 396int slow_work_enqueue(struct slow_work *work)
266{ 397{
398 wait_queue_head_t *wfo_wq;
399 struct list_head *queue;
267 unsigned long flags; 400 unsigned long flags;
401 int ret;
402
403 if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
404 return -ECANCELED;
268 405
269 BUG_ON(slow_work_user_count <= 0); 406 BUG_ON(slow_work_user_count <= 0);
270 BUG_ON(!work); 407 BUG_ON(!work);
271 BUG_ON(!work->ops); 408 BUG_ON(!work->ops);
272 BUG_ON(!work->ops->get_ref);
273 409
274 /* when honouring an enqueue request, we only promise that we will run 410 /* when honouring an enqueue request, we only promise that we will run
275 * the work function in the future; we do not promise to run it once 411 * the work function in the future; we do not promise to run it once
@@ -280,8 +416,19 @@ int slow_work_enqueue(struct slow_work *work)
280 * maintaining our promise 416 * maintaining our promise
281 */ 417 */
282 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { 418 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
419 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
420 wfo_wq = &vslow_work_queue_waits_for_occupation;
421 queue = &vslow_work_queue;
422 } else {
423 wfo_wq = &slow_work_queue_waits_for_occupation;
424 queue = &slow_work_queue;
425 }
426
283 spin_lock_irqsave(&slow_work_queue_lock, flags); 427 spin_lock_irqsave(&slow_work_queue_lock, flags);
284 428
429 if (unlikely(test_bit(SLOW_WORK_CANCELLING, &work->flags)))
430 goto cancelled;
431
285 /* we promise that we will not attempt to execute the work 432 /* we promise that we will not attempt to execute the work
286 * function in more than one thread simultaneously 433 * function in more than one thread simultaneously
287 * 434 *
@@ -299,25 +446,221 @@ int slow_work_enqueue(struct slow_work *work)
299 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { 446 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
300 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); 447 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
301 } else { 448 } else {
302 if (work->ops->get_ref(work) < 0) 449 ret = slow_work_get_ref(work);
303 goto cant_get_ref; 450 if (ret < 0)
304 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) 451 goto failed;
305 list_add_tail(&work->link, &vslow_work_queue); 452 slow_work_mark_time(work);
306 else 453 list_add_tail(&work->link, queue);
307 list_add_tail(&work->link, &slow_work_queue);
308 wake_up(&slow_work_thread_wq); 454 wake_up(&slow_work_thread_wq);
455
456 /* if someone who could be requeued is sleeping on a
457 * thread, then ask them to yield their thread */
458 if (work->link.prev == queue)
459 wake_up(wfo_wq);
309 } 460 }
310 461
311 spin_unlock_irqrestore(&slow_work_queue_lock, flags); 462 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
312 } 463 }
313 return 0; 464 return 0;
314 465
315cant_get_ref: 466cancelled:
467 ret = -ECANCELED;
468failed:
316 spin_unlock_irqrestore(&slow_work_queue_lock, flags); 469 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
317 return -EAGAIN; 470 return ret;
318} 471}
319EXPORT_SYMBOL(slow_work_enqueue); 472EXPORT_SYMBOL(slow_work_enqueue);
320 473
474static int slow_work_wait(void *word)
475{
476 schedule();
477 return 0;
478}
479
480/**
481 * slow_work_cancel - Cancel a slow work item
482 * @work: The work item to cancel
483 *
484 * This function will cancel a previously enqueued work item. If we cannot
485 * cancel the work item, it is guarenteed to have run when this function
486 * returns.
487 */
488void slow_work_cancel(struct slow_work *work)
489{
490 bool wait = true, put = false;
491
492 set_bit(SLOW_WORK_CANCELLING, &work->flags);
493 smp_mb();
494
495 /* if the work item is a delayed work item with an active timer, we
496 * need to wait for the timer to finish _before_ getting the spinlock,
497 * lest we deadlock against the timer routine
498 *
499 * the timer routine will leave DELAYED set if it notices the
500 * CANCELLING flag in time
501 */
502 if (test_bit(SLOW_WORK_DELAYED, &work->flags)) {
503 struct delayed_slow_work *dwork =
504 container_of(work, struct delayed_slow_work, work);
505 del_timer_sync(&dwork->timer);
506 }
507
508 spin_lock_irq(&slow_work_queue_lock);
509
510 if (test_bit(SLOW_WORK_DELAYED, &work->flags)) {
511 /* the timer routine aborted or never happened, so we are left
512 * holding the timer's reference on the item and should just
513 * drop the pending flag and wait for any ongoing execution to
514 * finish */
515 struct delayed_slow_work *dwork =
516 container_of(work, struct delayed_slow_work, work);
517
518 BUG_ON(timer_pending(&dwork->timer));
519 BUG_ON(!list_empty(&work->link));
520
521 clear_bit(SLOW_WORK_DELAYED, &work->flags);
522 put = true;
523 clear_bit(SLOW_WORK_PENDING, &work->flags);
524
525 } else if (test_bit(SLOW_WORK_PENDING, &work->flags) &&
526 !list_empty(&work->link)) {
527 /* the link in the pending queue holds a reference on the item
528 * that we will need to release */
529 list_del_init(&work->link);
530 wait = false;
531 put = true;
532 clear_bit(SLOW_WORK_PENDING, &work->flags);
533
534 } else if (test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) {
535 /* the executor is holding our only reference on the item, so
536 * we merely need to wait for it to finish executing */
537 clear_bit(SLOW_WORK_PENDING, &work->flags);
538 }
539
540 spin_unlock_irq(&slow_work_queue_lock);
541
542 /* the EXECUTING flag is set by the executor whilst the spinlock is set
543 * and before the item is dequeued - so assuming the above doesn't
544 * actually dequeue it, simply waiting for the EXECUTING flag to be
545 * released here should be sufficient */
546 if (wait)
547 wait_on_bit(&work->flags, SLOW_WORK_EXECUTING, slow_work_wait,
548 TASK_UNINTERRUPTIBLE);
549
550 clear_bit(SLOW_WORK_CANCELLING, &work->flags);
551 if (put)
552 slow_work_put_ref(work);
553}
554EXPORT_SYMBOL(slow_work_cancel);
555
556/*
557 * Handle expiry of the delay timer, indicating that a delayed slow work item
558 * should now be queued if not cancelled
559 */
560static void delayed_slow_work_timer(unsigned long data)
561{
562 wait_queue_head_t *wfo_wq;
563 struct list_head *queue;
564 struct slow_work *work = (struct slow_work *) data;
565 unsigned long flags;
566 bool queued = false, put = false, first = false;
567
568 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
569 wfo_wq = &vslow_work_queue_waits_for_occupation;
570 queue = &vslow_work_queue;
571 } else {
572 wfo_wq = &slow_work_queue_waits_for_occupation;
573 queue = &slow_work_queue;
574 }
575
576 spin_lock_irqsave(&slow_work_queue_lock, flags);
577 if (likely(!test_bit(SLOW_WORK_CANCELLING, &work->flags))) {
578 clear_bit(SLOW_WORK_DELAYED, &work->flags);
579
580 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
581 /* we discard the reference the timer was holding in
582 * favour of the one the executor holds */
583 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
584 put = true;
585 } else {
586 slow_work_mark_time(work);
587 list_add_tail(&work->link, queue);
588 queued = true;
589 if (work->link.prev == queue)
590 first = true;
591 }
592 }
593
594 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
595 if (put)
596 slow_work_put_ref(work);
597 if (first)
598 wake_up(wfo_wq);
599 if (queued)
600 wake_up(&slow_work_thread_wq);
601}
602
603/**
604 * delayed_slow_work_enqueue - Schedule a delayed slow work item for processing
605 * @dwork: The delayed work item to queue
606 * @delay: When to start executing the work, in jiffies from now
607 *
608 * This is similar to slow_work_enqueue(), but it adds a delay before the work
609 * is actually queued for processing.
610 *
611 * The item can have delayed processing requested on it whilst it is being
612 * executed. The delay will begin immediately, and if it expires before the
613 * item finishes executing, the item will be placed back on the queue when it
614 * has done executing.
615 */
616int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
617 unsigned long delay)
618{
619 struct slow_work *work = &dwork->work;
620 unsigned long flags;
621 int ret;
622
623 if (delay == 0)
624 return slow_work_enqueue(&dwork->work);
625
626 BUG_ON(slow_work_user_count <= 0);
627 BUG_ON(!work);
628 BUG_ON(!work->ops);
629
630 if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
631 return -ECANCELED;
632
633 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
634 spin_lock_irqsave(&slow_work_queue_lock, flags);
635
636 if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
637 goto cancelled;
638
639 /* the timer holds a reference whilst it is pending */
640 ret = work->ops->get_ref(work);
641 if (ret < 0)
642 goto cant_get_ref;
643
644 if (test_and_set_bit(SLOW_WORK_DELAYED, &work->flags))
645 BUG();
646 dwork->timer.expires = jiffies + delay;
647 dwork->timer.data = (unsigned long) work;
648 dwork->timer.function = delayed_slow_work_timer;
649 add_timer(&dwork->timer);
650
651 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
652 }
653
654 return 0;
655
656cancelled:
657 ret = -ECANCELED;
658cant_get_ref:
659 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
660 return ret;
661}
662EXPORT_SYMBOL(delayed_slow_work_enqueue);
663
321/* 664/*
322 * Schedule a cull of the thread pool at some time in the near future 665 * Schedule a cull of the thread pool at some time in the near future
323 */ 666 */
@@ -368,13 +711,23 @@ static inline bool slow_work_available(int vsmax)
368 */ 711 */
369static int slow_work_thread(void *_data) 712static int slow_work_thread(void *_data)
370{ 713{
371 int vsmax; 714 int vsmax, id;
372 715
373 DEFINE_WAIT(wait); 716 DEFINE_WAIT(wait);
374 717
375 set_freezable(); 718 set_freezable();
376 set_user_nice(current, -5); 719 set_user_nice(current, -5);
377 720
721 /* allocate ourselves an ID */
722 spin_lock_irq(&slow_work_queue_lock);
723 id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
724 BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT);
725 __set_bit(id, slow_work_ids);
726 slow_work_set_thread_pid(id, current->pid);
727 spin_unlock_irq(&slow_work_queue_lock);
728
729 sprintf(current->comm, "kslowd%03u", id);
730
378 for (;;) { 731 for (;;) {
379 vsmax = vslow_work_proportion; 732 vsmax = vslow_work_proportion;
380 vsmax *= atomic_read(&slow_work_thread_count); 733 vsmax *= atomic_read(&slow_work_thread_count);
@@ -395,7 +748,7 @@ static int slow_work_thread(void *_data)
395 vsmax *= atomic_read(&slow_work_thread_count); 748 vsmax *= atomic_read(&slow_work_thread_count);
396 vsmax /= 100; 749 vsmax /= 100;
397 750
398 if (slow_work_available(vsmax) && slow_work_execute()) { 751 if (slow_work_available(vsmax) && slow_work_execute(id)) {
399 cond_resched(); 752 cond_resched();
400 if (list_empty(&slow_work_queue) && 753 if (list_empty(&slow_work_queue) &&
401 list_empty(&vslow_work_queue) && 754 list_empty(&vslow_work_queue) &&
@@ -412,6 +765,11 @@ static int slow_work_thread(void *_data)
412 break; 765 break;
413 } 766 }
414 767
768 spin_lock_irq(&slow_work_queue_lock);
769 slow_work_set_thread_pid(id, 0);
770 __clear_bit(id, slow_work_ids);
771 spin_unlock_irq(&slow_work_queue_lock);
772
415 if (atomic_dec_and_test(&slow_work_thread_count)) 773 if (atomic_dec_and_test(&slow_work_thread_count))
416 complete_and_exit(&slow_work_last_thread_exited, 0); 774 complete_and_exit(&slow_work_last_thread_exited, 0);
417 return 0; 775 return 0;
@@ -427,21 +785,6 @@ static void slow_work_cull_timeout(unsigned long data)
427} 785}
428 786
429/* 787/*
430 * Get a reference on slow work thread starter
431 */
432static int slow_work_new_thread_get_ref(struct slow_work *work)
433{
434 return 0;
435}
436
437/*
438 * Drop a reference on slow work thread starter
439 */
440static void slow_work_new_thread_put_ref(struct slow_work *work)
441{
442}
443
444/*
445 * Start a new slow work thread 788 * Start a new slow work thread
446 */ 789 */
447static void slow_work_new_thread_execute(struct slow_work *work) 790static void slow_work_new_thread_execute(struct slow_work *work)
@@ -475,9 +818,11 @@ static void slow_work_new_thread_execute(struct slow_work *work)
475} 818}
476 819
477static const struct slow_work_ops slow_work_new_thread_ops = { 820static const struct slow_work_ops slow_work_new_thread_ops = {
478 .get_ref = slow_work_new_thread_get_ref, 821 .owner = THIS_MODULE,
479 .put_ref = slow_work_new_thread_put_ref,
480 .execute = slow_work_new_thread_execute, 822 .execute = slow_work_new_thread_execute,
823#ifdef CONFIG_SLOW_WORK_DEBUG
824 .desc = slow_work_new_thread_desc,
825#endif
481}; 826};
482 827
483/* 828/*
@@ -546,12 +891,13 @@ static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
546 891
547/** 892/**
548 * slow_work_register_user - Register a user of the facility 893 * slow_work_register_user - Register a user of the facility
894 * @module: The module about to make use of the facility
549 * 895 *
550 * Register a user of the facility, starting up the initial threads if there 896 * Register a user of the facility, starting up the initial threads if there
551 * aren't any other users at this point. This will return 0 if successful, or 897 * aren't any other users at this point. This will return 0 if successful, or
552 * an error if not. 898 * an error if not.
553 */ 899 */
554int slow_work_register_user(void) 900int slow_work_register_user(struct module *module)
555{ 901{
556 struct task_struct *p; 902 struct task_struct *p;
557 int loop; 903 int loop;
@@ -598,14 +944,81 @@ error:
598} 944}
599EXPORT_SYMBOL(slow_work_register_user); 945EXPORT_SYMBOL(slow_work_register_user);
600 946
947/*
948 * wait for all outstanding items from the calling module to complete
949 * - note that more items may be queued whilst we're waiting
950 */
951static void slow_work_wait_for_items(struct module *module)
952{
953#ifdef CONFIG_MODULES
954 DECLARE_WAITQUEUE(myself, current);
955 struct slow_work *work;
956 int loop;
957
958 mutex_lock(&slow_work_unreg_sync_lock);
959 add_wait_queue(&slow_work_unreg_wq, &myself);
960
961 for (;;) {
962 spin_lock_irq(&slow_work_queue_lock);
963
964 /* first of all, we wait for the last queued item in each list
965 * to be processed */
966 list_for_each_entry_reverse(work, &vslow_work_queue, link) {
967 if (work->owner == module) {
968 set_current_state(TASK_UNINTERRUPTIBLE);
969 slow_work_unreg_work_item = work;
970 goto do_wait;
971 }
972 }
973 list_for_each_entry_reverse(work, &slow_work_queue, link) {
974 if (work->owner == module) {
975 set_current_state(TASK_UNINTERRUPTIBLE);
976 slow_work_unreg_work_item = work;
977 goto do_wait;
978 }
979 }
980
981 /* then we wait for the items being processed to finish */
982 slow_work_unreg_module = module;
983 smp_mb();
984 for (loop = 0; loop < SLOW_WORK_THREAD_LIMIT; loop++) {
985 if (slow_work_thread_processing[loop] == module)
986 goto do_wait;
987 }
988 spin_unlock_irq(&slow_work_queue_lock);
989 break; /* okay, we're done */
990
991 do_wait:
992 spin_unlock_irq(&slow_work_queue_lock);
993 schedule();
994 slow_work_unreg_work_item = NULL;
995 slow_work_unreg_module = NULL;
996 }
997
998 remove_wait_queue(&slow_work_unreg_wq, &myself);
999 mutex_unlock(&slow_work_unreg_sync_lock);
1000#endif /* CONFIG_MODULES */
1001}
1002
601/** 1003/**
602 * slow_work_unregister_user - Unregister a user of the facility 1004 * slow_work_unregister_user - Unregister a user of the facility
1005 * @module: The module whose items should be cleared
603 * 1006 *
604 * Unregister a user of the facility, killing all the threads if this was the 1007 * Unregister a user of the facility, killing all the threads if this was the
605 * last one. 1008 * last one.
1009 *
1010 * This waits for all the work items belonging to the nominated module to go
1011 * away before proceeding.
606 */ 1012 */
607void slow_work_unregister_user(void) 1013void slow_work_unregister_user(struct module *module)
608{ 1014{
1015 /* first of all, wait for all outstanding items from the calling module
1016 * to complete */
1017 if (module)
1018 slow_work_wait_for_items(module);
1019
1020 /* then we can actually go about shutting down the facility if need
1021 * be */
609 mutex_lock(&slow_work_user_lock); 1022 mutex_lock(&slow_work_user_lock);
610 1023
611 BUG_ON(slow_work_user_count <= 0); 1024 BUG_ON(slow_work_user_count <= 0);
@@ -639,6 +1052,16 @@ static int __init init_slow_work(void)
639 if (slow_work_max_max_threads < nr_cpus * 2) 1052 if (slow_work_max_max_threads < nr_cpus * 2)
640 slow_work_max_max_threads = nr_cpus * 2; 1053 slow_work_max_max_threads = nr_cpus * 2;
641#endif 1054#endif
1055#ifdef CONFIG_SLOW_WORK_DEBUG
1056 {
1057 struct dentry *dbdir;
1058
1059 dbdir = debugfs_create_dir("slow_work", NULL);
1060 if (dbdir && !IS_ERR(dbdir))
1061 debugfs_create_file("runqueue", S_IFREG | 0400, dbdir,
1062 NULL, &slow_work_runqueue_fops);
1063 }
1064#endif
642 return 0; 1065 return 0;
643} 1066}
644 1067
diff --git a/kernel/slow-work.h b/kernel/slow-work.h
new file mode 100644
index 000000000000..321f3c59d732
--- /dev/null
+++ b/kernel/slow-work.h
@@ -0,0 +1,72 @@
1/* Slow work private definitions
2 *
3 * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of
13 * things to do */
14#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
15 * OOM */
16
17#define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */
18
19/*
20 * slow-work.c
21 */
22#ifdef CONFIG_SLOW_WORK_DEBUG
23extern struct slow_work *slow_work_execs[];
24extern pid_t slow_work_pids[];
25extern rwlock_t slow_work_execs_lock;
26#endif
27
28extern struct list_head slow_work_queue;
29extern struct list_head vslow_work_queue;
30extern spinlock_t slow_work_queue_lock;
31
32/*
33 * slow-work-debugfs.c
34 */
35#ifdef CONFIG_SLOW_WORK_DEBUG
36extern const struct file_operations slow_work_runqueue_fops;
37
38extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *);
39#endif
40
41/*
42 * Helper functions
43 */
44static inline void slow_work_set_thread_pid(int id, pid_t pid)
45{
46#ifdef CONFIG_SLOW_WORK_PROC
47 slow_work_pids[id] = pid;
48#endif
49}
50
51static inline void slow_work_mark_time(struct slow_work *work)
52{
53#ifdef CONFIG_SLOW_WORK_PROC
54 work->mark = CURRENT_TIME;
55#endif
56}
57
58static inline void slow_work_begin_exec(int id, struct slow_work *work)
59{
60#ifdef CONFIG_SLOW_WORK_PROC
61 slow_work_execs[id] = work;
62#endif
63}
64
65static inline void slow_work_end_exec(int id, struct slow_work *work)
66{
67#ifdef CONFIG_SLOW_WORK_PROC
68 write_lock(&slow_work_execs_lock);
69 slow_work_execs[id] = NULL;
70 write_unlock(&slow_work_execs_lock);
71#endif
72}
diff --git a/kernel/smp.c b/kernel/smp.c
index c9d1c7835c2f..a8c76069cf50 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -265,9 +265,7 @@ static DEFINE_PER_CPU(struct call_single_data, csd_data);
265 * @info: An arbitrary pointer to pass to the function. 265 * @info: An arbitrary pointer to pass to the function.
266 * @wait: If true, wait until function has completed on other CPUs. 266 * @wait: If true, wait until function has completed on other CPUs.
267 * 267 *
268 * Returns 0 on success, else a negative status code. Note that @wait 268 * Returns 0 on success, else a negative status code.
269 * will be implicitly turned on in case of allocation failures, since
270 * we fall back to on-stack allocation.
271 */ 269 */
272int smp_call_function_single(int cpu, void (*func) (void *info), void *info, 270int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
273 int wait) 271 int wait)
@@ -321,6 +319,51 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
321} 319}
322EXPORT_SYMBOL(smp_call_function_single); 320EXPORT_SYMBOL(smp_call_function_single);
323 321
322/*
323 * smp_call_function_any - Run a function on any of the given cpus
324 * @mask: The mask of cpus it can run on.
325 * @func: The function to run. This must be fast and non-blocking.
326 * @info: An arbitrary pointer to pass to the function.
327 * @wait: If true, wait until function has completed.
328 *
329 * Returns 0 on success, else a negative status code (if no cpus were online).
330 * Note that @wait will be implicitly turned on in case of allocation failures,
331 * since we fall back to on-stack allocation.
332 *
333 * Selection preference:
334 * 1) current cpu if in @mask
335 * 2) any cpu of current node if in @mask
336 * 3) any other online cpu in @mask
337 */
338int smp_call_function_any(const struct cpumask *mask,
339 void (*func)(void *info), void *info, int wait)
340{
341 unsigned int cpu;
342 const struct cpumask *nodemask;
343 int ret;
344
345 /* Try for same CPU (cheapest) */
346 cpu = get_cpu();
347 if (cpumask_test_cpu(cpu, mask))
348 goto call;
349
350 /* Try for same node. */
351 nodemask = cpumask_of_node(cpu);
352 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
353 cpu = cpumask_next_and(cpu, nodemask, mask)) {
354 if (cpu_online(cpu))
355 goto call;
356 }
357
358 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
359 cpu = cpumask_any_and(mask, cpu_online_mask);
360call:
361 ret = smp_call_function_single(cpu, func, info, wait);
362 put_cpu();
363 return ret;
364}
365EXPORT_SYMBOL_GPL(smp_call_function_any);
366
324/** 367/**
325 * __smp_call_function_single(): Run a function on another CPU 368 * __smp_call_function_single(): Run a function on another CPU
326 * @cpu: The CPU to run on. 369 * @cpu: The CPU to run on.
@@ -355,9 +398,7 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
355 * @wait: If true, wait (atomically) until function has completed 398 * @wait: If true, wait (atomically) until function has completed
356 * on other CPUs. 399 * on other CPUs.
357 * 400 *
358 * If @wait is true, then returns once @func has returned. Note that @wait 401 * If @wait is true, then returns once @func has returned.
359 * will be implicitly turned on in case of allocation failures, since
360 * we fall back to on-stack allocation.
361 * 402 *
362 * You must not call this function with disabled interrupts or from a 403 * You must not call this function with disabled interrupts or from a
363 * hardware interrupt handler or from a bottom half handler. Preemption 404 * hardware interrupt handler or from a bottom half handler. Preemption
@@ -443,8 +484,7 @@ EXPORT_SYMBOL(smp_call_function_many);
443 * Returns 0. 484 * Returns 0.
444 * 485 *
445 * If @wait is true, then returns once @func has returned; otherwise 486 * If @wait is true, then returns once @func has returned; otherwise
446 * it returns just before the target cpu calls @func. In case of allocation 487 * it returns just before the target cpu calls @func.
447 * failure, @wait will be implicitly turned on.
448 * 488 *
449 * You must not call this function with disabled interrupts or from a 489 * You must not call this function with disabled interrupts or from a
450 * hardware interrupt handler or from a bottom half handler. 490 * hardware interrupt handler or from a bottom half handler.
diff --git a/kernel/softirq.c b/kernel/softirq.c
index f8749e5216e0..21939d9e830e 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -302,9 +302,9 @@ void irq_exit(void)
302 if (!in_interrupt() && local_softirq_pending()) 302 if (!in_interrupt() && local_softirq_pending())
303 invoke_softirq(); 303 invoke_softirq();
304 304
305 rcu_irq_exit();
305#ifdef CONFIG_NO_HZ 306#ifdef CONFIG_NO_HZ
306 /* Make sure that timer wheel updates are propagated */ 307 /* Make sure that timer wheel updates are propagated */
307 rcu_irq_exit();
308 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) 308 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
309 tick_nohz_stop_sched_tick(0); 309 tick_nohz_stop_sched_tick(0);
310#endif 310#endif
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 5ddab730cb2f..41e042219ff6 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -21,145 +21,28 @@
21#include <linux/debug_locks.h> 21#include <linux/debug_locks.h>
22#include <linux/module.h> 22#include <linux/module.h>
23 23
24#ifndef _spin_trylock
25int __lockfunc _spin_trylock(spinlock_t *lock)
26{
27 return __spin_trylock(lock);
28}
29EXPORT_SYMBOL(_spin_trylock);
30#endif
31
32#ifndef _read_trylock
33int __lockfunc _read_trylock(rwlock_t *lock)
34{
35 return __read_trylock(lock);
36}
37EXPORT_SYMBOL(_read_trylock);
38#endif
39
40#ifndef _write_trylock
41int __lockfunc _write_trylock(rwlock_t *lock)
42{
43 return __write_trylock(lock);
44}
45EXPORT_SYMBOL(_write_trylock);
46#endif
47
48/* 24/*
49 * If lockdep is enabled then we use the non-preemption spin-ops 25 * If lockdep is enabled then we use the non-preemption spin-ops
50 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are 26 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
51 * not re-enabled during lock-acquire (which the preempt-spin-ops do): 27 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
52 */ 28 */
53#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) 29#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
54
55#ifndef _read_lock
56void __lockfunc _read_lock(rwlock_t *lock)
57{
58 __read_lock(lock);
59}
60EXPORT_SYMBOL(_read_lock);
61#endif
62
63#ifndef _spin_lock_irqsave
64unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
65{
66 return __spin_lock_irqsave(lock);
67}
68EXPORT_SYMBOL(_spin_lock_irqsave);
69#endif
70
71#ifndef _spin_lock_irq
72void __lockfunc _spin_lock_irq(spinlock_t *lock)
73{
74 __spin_lock_irq(lock);
75}
76EXPORT_SYMBOL(_spin_lock_irq);
77#endif
78
79#ifndef _spin_lock_bh
80void __lockfunc _spin_lock_bh(spinlock_t *lock)
81{
82 __spin_lock_bh(lock);
83}
84EXPORT_SYMBOL(_spin_lock_bh);
85#endif
86
87#ifndef _read_lock_irqsave
88unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
89{
90 return __read_lock_irqsave(lock);
91}
92EXPORT_SYMBOL(_read_lock_irqsave);
93#endif
94
95#ifndef _read_lock_irq
96void __lockfunc _read_lock_irq(rwlock_t *lock)
97{
98 __read_lock_irq(lock);
99}
100EXPORT_SYMBOL(_read_lock_irq);
101#endif
102
103#ifndef _read_lock_bh
104void __lockfunc _read_lock_bh(rwlock_t *lock)
105{
106 __read_lock_bh(lock);
107}
108EXPORT_SYMBOL(_read_lock_bh);
109#endif
110
111#ifndef _write_lock_irqsave
112unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
113{
114 return __write_lock_irqsave(lock);
115}
116EXPORT_SYMBOL(_write_lock_irqsave);
117#endif
118
119#ifndef _write_lock_irq
120void __lockfunc _write_lock_irq(rwlock_t *lock)
121{
122 __write_lock_irq(lock);
123}
124EXPORT_SYMBOL(_write_lock_irq);
125#endif
126
127#ifndef _write_lock_bh
128void __lockfunc _write_lock_bh(rwlock_t *lock)
129{
130 __write_lock_bh(lock);
131}
132EXPORT_SYMBOL(_write_lock_bh);
133#endif
134
135#ifndef _spin_lock
136void __lockfunc _spin_lock(spinlock_t *lock)
137{
138 __spin_lock(lock);
139}
140EXPORT_SYMBOL(_spin_lock);
141#endif
142
143#ifndef _write_lock
144void __lockfunc _write_lock(rwlock_t *lock)
145{
146 __write_lock(lock);
147}
148EXPORT_SYMBOL(_write_lock);
149#endif
150
151#else /* CONFIG_PREEMPT: */
152
153/* 30/*
31 * The __lock_function inlines are taken from
32 * include/linux/spinlock_api_smp.h
33 */
34#else
35/*
36 * We build the __lock_function inlines here. They are too large for
37 * inlining all over the place, but here is only one user per function
38 * which embedds them into the calling _lock_function below.
39 *
154 * This could be a long-held lock. We both prepare to spin for a long 40 * This could be a long-held lock. We both prepare to spin for a long
155 * time (making _this_ CPU preemptable if possible), and we also signal 41 * time (making _this_ CPU preemptable if possible), and we also signal
156 * towards that other CPU that it should break the lock ASAP. 42 * towards that other CPU that it should break the lock ASAP.
157 *
158 * (We do this in a function because inlining it would be excessive.)
159 */ 43 */
160
161#define BUILD_LOCK_OPS(op, locktype) \ 44#define BUILD_LOCK_OPS(op, locktype) \
162void __lockfunc _##op##_lock(locktype##_t *lock) \ 45void __lockfunc __##op##_lock(locktype##_t *lock) \
163{ \ 46{ \
164 for (;;) { \ 47 for (;;) { \
165 preempt_disable(); \ 48 preempt_disable(); \
@@ -175,9 +58,7 @@ void __lockfunc _##op##_lock(locktype##_t *lock) \
175 (lock)->break_lock = 0; \ 58 (lock)->break_lock = 0; \
176} \ 59} \
177 \ 60 \
178EXPORT_SYMBOL(_##op##_lock); \ 61unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \
179 \
180unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
181{ \ 62{ \
182 unsigned long flags; \ 63 unsigned long flags; \
183 \ 64 \
@@ -198,16 +79,12 @@ unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
198 return flags; \ 79 return flags; \
199} \ 80} \
200 \ 81 \
201EXPORT_SYMBOL(_##op##_lock_irqsave); \ 82void __lockfunc __##op##_lock_irq(locktype##_t *lock) \
202 \
203void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
204{ \ 83{ \
205 _##op##_lock_irqsave(lock); \ 84 _##op##_lock_irqsave(lock); \
206} \ 85} \
207 \ 86 \
208EXPORT_SYMBOL(_##op##_lock_irq); \ 87void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
209 \
210void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
211{ \ 88{ \
212 unsigned long flags; \ 89 unsigned long flags; \
213 \ 90 \
@@ -220,23 +97,21 @@ void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
220 local_bh_disable(); \ 97 local_bh_disable(); \
221 local_irq_restore(flags); \ 98 local_irq_restore(flags); \
222} \ 99} \
223 \
224EXPORT_SYMBOL(_##op##_lock_bh)
225 100
226/* 101/*
227 * Build preemption-friendly versions of the following 102 * Build preemption-friendly versions of the following
228 * lock-spinning functions: 103 * lock-spinning functions:
229 * 104 *
230 * _[spin|read|write]_lock() 105 * __[spin|read|write]_lock()
231 * _[spin|read|write]_lock_irq() 106 * __[spin|read|write]_lock_irq()
232 * _[spin|read|write]_lock_irqsave() 107 * __[spin|read|write]_lock_irqsave()
233 * _[spin|read|write]_lock_bh() 108 * __[spin|read|write]_lock_bh()
234 */ 109 */
235BUILD_LOCK_OPS(spin, spinlock); 110BUILD_LOCK_OPS(spin, spinlock);
236BUILD_LOCK_OPS(read, rwlock); 111BUILD_LOCK_OPS(read, rwlock);
237BUILD_LOCK_OPS(write, rwlock); 112BUILD_LOCK_OPS(write, rwlock);
238 113
239#endif /* CONFIG_PREEMPT */ 114#endif
240 115
241#ifdef CONFIG_DEBUG_LOCK_ALLOC 116#ifdef CONFIG_DEBUG_LOCK_ALLOC
242 117
@@ -248,7 +123,8 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
248} 123}
249EXPORT_SYMBOL(_spin_lock_nested); 124EXPORT_SYMBOL(_spin_lock_nested);
250 125
251unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) 126unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock,
127 int subclass)
252{ 128{
253 unsigned long flags; 129 unsigned long flags;
254 130
@@ -272,7 +148,127 @@ EXPORT_SYMBOL(_spin_lock_nest_lock);
272 148
273#endif 149#endif
274 150
275#ifndef _spin_unlock 151#ifndef CONFIG_INLINE_SPIN_TRYLOCK
152int __lockfunc _spin_trylock(spinlock_t *lock)
153{
154 return __spin_trylock(lock);
155}
156EXPORT_SYMBOL(_spin_trylock);
157#endif
158
159#ifndef CONFIG_INLINE_READ_TRYLOCK
160int __lockfunc _read_trylock(rwlock_t *lock)
161{
162 return __read_trylock(lock);
163}
164EXPORT_SYMBOL(_read_trylock);
165#endif
166
167#ifndef CONFIG_INLINE_WRITE_TRYLOCK
168int __lockfunc _write_trylock(rwlock_t *lock)
169{
170 return __write_trylock(lock);
171}
172EXPORT_SYMBOL(_write_trylock);
173#endif
174
175#ifndef CONFIG_INLINE_READ_LOCK
176void __lockfunc _read_lock(rwlock_t *lock)
177{
178 __read_lock(lock);
179}
180EXPORT_SYMBOL(_read_lock);
181#endif
182
183#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
184unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
185{
186 return __spin_lock_irqsave(lock);
187}
188EXPORT_SYMBOL(_spin_lock_irqsave);
189#endif
190
191#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
192void __lockfunc _spin_lock_irq(spinlock_t *lock)
193{
194 __spin_lock_irq(lock);
195}
196EXPORT_SYMBOL(_spin_lock_irq);
197#endif
198
199#ifndef CONFIG_INLINE_SPIN_LOCK_BH
200void __lockfunc _spin_lock_bh(spinlock_t *lock)
201{
202 __spin_lock_bh(lock);
203}
204EXPORT_SYMBOL(_spin_lock_bh);
205#endif
206
207#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
208unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
209{
210 return __read_lock_irqsave(lock);
211}
212EXPORT_SYMBOL(_read_lock_irqsave);
213#endif
214
215#ifndef CONFIG_INLINE_READ_LOCK_IRQ
216void __lockfunc _read_lock_irq(rwlock_t *lock)
217{
218 __read_lock_irq(lock);
219}
220EXPORT_SYMBOL(_read_lock_irq);
221#endif
222
223#ifndef CONFIG_INLINE_READ_LOCK_BH
224void __lockfunc _read_lock_bh(rwlock_t *lock)
225{
226 __read_lock_bh(lock);
227}
228EXPORT_SYMBOL(_read_lock_bh);
229#endif
230
231#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
232unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
233{
234 return __write_lock_irqsave(lock);
235}
236EXPORT_SYMBOL(_write_lock_irqsave);
237#endif
238
239#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
240void __lockfunc _write_lock_irq(rwlock_t *lock)
241{
242 __write_lock_irq(lock);
243}
244EXPORT_SYMBOL(_write_lock_irq);
245#endif
246
247#ifndef CONFIG_INLINE_WRITE_LOCK_BH
248void __lockfunc _write_lock_bh(rwlock_t *lock)
249{
250 __write_lock_bh(lock);
251}
252EXPORT_SYMBOL(_write_lock_bh);
253#endif
254
255#ifndef CONFIG_INLINE_SPIN_LOCK
256void __lockfunc _spin_lock(spinlock_t *lock)
257{
258 __spin_lock(lock);
259}
260EXPORT_SYMBOL(_spin_lock);
261#endif
262
263#ifndef CONFIG_INLINE_WRITE_LOCK
264void __lockfunc _write_lock(rwlock_t *lock)
265{
266 __write_lock(lock);
267}
268EXPORT_SYMBOL(_write_lock);
269#endif
270
271#ifndef CONFIG_INLINE_SPIN_UNLOCK
276void __lockfunc _spin_unlock(spinlock_t *lock) 272void __lockfunc _spin_unlock(spinlock_t *lock)
277{ 273{
278 __spin_unlock(lock); 274 __spin_unlock(lock);
@@ -280,7 +276,7 @@ void __lockfunc _spin_unlock(spinlock_t *lock)
280EXPORT_SYMBOL(_spin_unlock); 276EXPORT_SYMBOL(_spin_unlock);
281#endif 277#endif
282 278
283#ifndef _write_unlock 279#ifndef CONFIG_INLINE_WRITE_UNLOCK
284void __lockfunc _write_unlock(rwlock_t *lock) 280void __lockfunc _write_unlock(rwlock_t *lock)
285{ 281{
286 __write_unlock(lock); 282 __write_unlock(lock);
@@ -288,7 +284,7 @@ void __lockfunc _write_unlock(rwlock_t *lock)
288EXPORT_SYMBOL(_write_unlock); 284EXPORT_SYMBOL(_write_unlock);
289#endif 285#endif
290 286
291#ifndef _read_unlock 287#ifndef CONFIG_INLINE_READ_UNLOCK
292void __lockfunc _read_unlock(rwlock_t *lock) 288void __lockfunc _read_unlock(rwlock_t *lock)
293{ 289{
294 __read_unlock(lock); 290 __read_unlock(lock);
@@ -296,7 +292,7 @@ void __lockfunc _read_unlock(rwlock_t *lock)
296EXPORT_SYMBOL(_read_unlock); 292EXPORT_SYMBOL(_read_unlock);
297#endif 293#endif
298 294
299#ifndef _spin_unlock_irqrestore 295#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
300void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 296void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
301{ 297{
302 __spin_unlock_irqrestore(lock, flags); 298 __spin_unlock_irqrestore(lock, flags);
@@ -304,7 +300,7 @@ void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
304EXPORT_SYMBOL(_spin_unlock_irqrestore); 300EXPORT_SYMBOL(_spin_unlock_irqrestore);
305#endif 301#endif
306 302
307#ifndef _spin_unlock_irq 303#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
308void __lockfunc _spin_unlock_irq(spinlock_t *lock) 304void __lockfunc _spin_unlock_irq(spinlock_t *lock)
309{ 305{
310 __spin_unlock_irq(lock); 306 __spin_unlock_irq(lock);
@@ -312,7 +308,7 @@ void __lockfunc _spin_unlock_irq(spinlock_t *lock)
312EXPORT_SYMBOL(_spin_unlock_irq); 308EXPORT_SYMBOL(_spin_unlock_irq);
313#endif 309#endif
314 310
315#ifndef _spin_unlock_bh 311#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
316void __lockfunc _spin_unlock_bh(spinlock_t *lock) 312void __lockfunc _spin_unlock_bh(spinlock_t *lock)
317{ 313{
318 __spin_unlock_bh(lock); 314 __spin_unlock_bh(lock);
@@ -320,7 +316,7 @@ void __lockfunc _spin_unlock_bh(spinlock_t *lock)
320EXPORT_SYMBOL(_spin_unlock_bh); 316EXPORT_SYMBOL(_spin_unlock_bh);
321#endif 317#endif
322 318
323#ifndef _read_unlock_irqrestore 319#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
324void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 320void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
325{ 321{
326 __read_unlock_irqrestore(lock, flags); 322 __read_unlock_irqrestore(lock, flags);
@@ -328,7 +324,7 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
328EXPORT_SYMBOL(_read_unlock_irqrestore); 324EXPORT_SYMBOL(_read_unlock_irqrestore);
329#endif 325#endif
330 326
331#ifndef _read_unlock_irq 327#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
332void __lockfunc _read_unlock_irq(rwlock_t *lock) 328void __lockfunc _read_unlock_irq(rwlock_t *lock)
333{ 329{
334 __read_unlock_irq(lock); 330 __read_unlock_irq(lock);
@@ -336,7 +332,7 @@ void __lockfunc _read_unlock_irq(rwlock_t *lock)
336EXPORT_SYMBOL(_read_unlock_irq); 332EXPORT_SYMBOL(_read_unlock_irq);
337#endif 333#endif
338 334
339#ifndef _read_unlock_bh 335#ifndef CONFIG_INLINE_READ_UNLOCK_BH
340void __lockfunc _read_unlock_bh(rwlock_t *lock) 336void __lockfunc _read_unlock_bh(rwlock_t *lock)
341{ 337{
342 __read_unlock_bh(lock); 338 __read_unlock_bh(lock);
@@ -344,7 +340,7 @@ void __lockfunc _read_unlock_bh(rwlock_t *lock)
344EXPORT_SYMBOL(_read_unlock_bh); 340EXPORT_SYMBOL(_read_unlock_bh);
345#endif 341#endif
346 342
347#ifndef _write_unlock_irqrestore 343#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
348void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 344void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
349{ 345{
350 __write_unlock_irqrestore(lock, flags); 346 __write_unlock_irqrestore(lock, flags);
@@ -352,7 +348,7 @@ void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
352EXPORT_SYMBOL(_write_unlock_irqrestore); 348EXPORT_SYMBOL(_write_unlock_irqrestore);
353#endif 349#endif
354 350
355#ifndef _write_unlock_irq 351#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
356void __lockfunc _write_unlock_irq(rwlock_t *lock) 352void __lockfunc _write_unlock_irq(rwlock_t *lock)
357{ 353{
358 __write_unlock_irq(lock); 354 __write_unlock_irq(lock);
@@ -360,7 +356,7 @@ void __lockfunc _write_unlock_irq(rwlock_t *lock)
360EXPORT_SYMBOL(_write_unlock_irq); 356EXPORT_SYMBOL(_write_unlock_irq);
361#endif 357#endif
362 358
363#ifndef _write_unlock_bh 359#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
364void __lockfunc _write_unlock_bh(rwlock_t *lock) 360void __lockfunc _write_unlock_bh(rwlock_t *lock)
365{ 361{
366 __write_unlock_bh(lock); 362 __write_unlock_bh(lock);
@@ -368,7 +364,7 @@ void __lockfunc _write_unlock_bh(rwlock_t *lock)
368EXPORT_SYMBOL(_write_unlock_bh); 364EXPORT_SYMBOL(_write_unlock_bh);
369#endif 365#endif
370 366
371#ifndef _spin_trylock_bh 367#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
372int __lockfunc _spin_trylock_bh(spinlock_t *lock) 368int __lockfunc _spin_trylock_bh(spinlock_t *lock)
373{ 369{
374 return __spin_trylock_bh(lock); 370 return __spin_trylock_bh(lock);
diff --git a/kernel/srcu.c b/kernel/srcu.c
index b0aeeaf22ce4..818d7d9aa03c 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -49,6 +49,7 @@ int init_srcu_struct(struct srcu_struct *sp)
49 sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array); 49 sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
50 return (sp->per_cpu_ref ? 0 : -ENOMEM); 50 return (sp->per_cpu_ref ? 0 : -ENOMEM);
51} 51}
52EXPORT_SYMBOL_GPL(init_srcu_struct);
52 53
53/* 54/*
54 * srcu_readers_active_idx -- returns approximate number of readers 55 * srcu_readers_active_idx -- returns approximate number of readers
@@ -97,6 +98,7 @@ void cleanup_srcu_struct(struct srcu_struct *sp)
97 free_percpu(sp->per_cpu_ref); 98 free_percpu(sp->per_cpu_ref);
98 sp->per_cpu_ref = NULL; 99 sp->per_cpu_ref = NULL;
99} 100}
101EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
100 102
101/** 103/**
102 * srcu_read_lock - register a new reader for an SRCU-protected structure. 104 * srcu_read_lock - register a new reader for an SRCU-protected structure.
@@ -118,6 +120,7 @@ int srcu_read_lock(struct srcu_struct *sp)
118 preempt_enable(); 120 preempt_enable();
119 return idx; 121 return idx;
120} 122}
123EXPORT_SYMBOL_GPL(srcu_read_lock);
121 124
122/** 125/**
123 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. 126 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
@@ -136,22 +139,12 @@ void srcu_read_unlock(struct srcu_struct *sp, int idx)
136 per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--; 139 per_cpu_ptr(sp->per_cpu_ref, smp_processor_id())->c[idx]--;
137 preempt_enable(); 140 preempt_enable();
138} 141}
142EXPORT_SYMBOL_GPL(srcu_read_unlock);
139 143
140/** 144/*
141 * synchronize_srcu - wait for prior SRCU read-side critical-section completion 145 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
142 * @sp: srcu_struct with which to synchronize.
143 *
144 * Flip the completed counter, and wait for the old count to drain to zero.
145 * As with classic RCU, the updater must use some separate means of
146 * synchronizing concurrent updates. Can block; must be called from
147 * process context.
148 *
149 * Note that it is illegal to call synchornize_srcu() from the corresponding
150 * SRCU read-side critical section; doing so will result in deadlock.
151 * However, it is perfectly legal to call synchronize_srcu() on one
152 * srcu_struct from some other srcu_struct's read-side critical section.
153 */ 146 */
154void synchronize_srcu(struct srcu_struct *sp) 147void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void))
155{ 148{
156 int idx; 149 int idx;
157 150
@@ -173,7 +166,7 @@ void synchronize_srcu(struct srcu_struct *sp)
173 return; 166 return;
174 } 167 }
175 168
176 synchronize_sched(); /* Force memory barrier on all CPUs. */ 169 sync_func(); /* Force memory barrier on all CPUs. */
177 170
178 /* 171 /*
179 * The preceding synchronize_sched() ensures that any CPU that 172 * The preceding synchronize_sched() ensures that any CPU that
@@ -190,7 +183,7 @@ void synchronize_srcu(struct srcu_struct *sp)
190 idx = sp->completed & 0x1; 183 idx = sp->completed & 0x1;
191 sp->completed++; 184 sp->completed++;
192 185
193 synchronize_sched(); /* Force memory barrier on all CPUs. */ 186 sync_func(); /* Force memory barrier on all CPUs. */
194 187
195 /* 188 /*
196 * At this point, because of the preceding synchronize_sched(), 189 * At this point, because of the preceding synchronize_sched(),
@@ -203,7 +196,7 @@ void synchronize_srcu(struct srcu_struct *sp)
203 while (srcu_readers_active_idx(sp, idx)) 196 while (srcu_readers_active_idx(sp, idx))
204 schedule_timeout_interruptible(1); 197 schedule_timeout_interruptible(1);
205 198
206 synchronize_sched(); /* Force memory barrier on all CPUs. */ 199 sync_func(); /* Force memory barrier on all CPUs. */
207 200
208 /* 201 /*
209 * The preceding synchronize_sched() forces all srcu_read_unlock() 202 * The preceding synchronize_sched() forces all srcu_read_unlock()
@@ -237,6 +230,47 @@ void synchronize_srcu(struct srcu_struct *sp)
237} 230}
238 231
239/** 232/**
233 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
234 * @sp: srcu_struct with which to synchronize.
235 *
236 * Flip the completed counter, and wait for the old count to drain to zero.
237 * As with classic RCU, the updater must use some separate means of
238 * synchronizing concurrent updates. Can block; must be called from
239 * process context.
240 *
241 * Note that it is illegal to call synchronize_srcu() from the corresponding
242 * SRCU read-side critical section; doing so will result in deadlock.
243 * However, it is perfectly legal to call synchronize_srcu() on one
244 * srcu_struct from some other srcu_struct's read-side critical section.
245 */
246void synchronize_srcu(struct srcu_struct *sp)
247{
248 __synchronize_srcu(sp, synchronize_sched);
249}
250EXPORT_SYMBOL_GPL(synchronize_srcu);
251
252/**
253 * synchronize_srcu_expedited - like synchronize_srcu, but less patient
254 * @sp: srcu_struct with which to synchronize.
255 *
256 * Flip the completed counter, and wait for the old count to drain to zero.
257 * As with classic RCU, the updater must use some separate means of
258 * synchronizing concurrent updates. Can block; must be called from
259 * process context.
260 *
261 * Note that it is illegal to call synchronize_srcu_expedited()
262 * from the corresponding SRCU read-side critical section; doing so
263 * will result in deadlock. However, it is perfectly legal to call
264 * synchronize_srcu_expedited() on one srcu_struct from some other
265 * srcu_struct's read-side critical section.
266 */
267void synchronize_srcu_expedited(struct srcu_struct *sp)
268{
269 __synchronize_srcu(sp, synchronize_sched_expedited);
270}
271EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
272
273/**
240 * srcu_batches_completed - return batches completed. 274 * srcu_batches_completed - return batches completed.
241 * @sp: srcu_struct on which to report batch completion. 275 * @sp: srcu_struct on which to report batch completion.
242 * 276 *
@@ -248,10 +282,4 @@ long srcu_batches_completed(struct srcu_struct *sp)
248{ 282{
249 return sp->completed; 283 return sp->completed;
250} 284}
251
252EXPORT_SYMBOL_GPL(init_srcu_struct);
253EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
254EXPORT_SYMBOL_GPL(srcu_read_lock);
255EXPORT_SYMBOL_GPL(srcu_read_unlock);
256EXPORT_SYMBOL_GPL(synchronize_srcu);
257EXPORT_SYMBOL_GPL(srcu_batches_completed); 285EXPORT_SYMBOL_GPL(srcu_batches_completed);
diff --git a/kernel/sys.c b/kernel/sys.c
index 255475d163e0..9968c5fb55b9 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -911,16 +911,15 @@ change_okay:
911 911
912void do_sys_times(struct tms *tms) 912void do_sys_times(struct tms *tms)
913{ 913{
914 struct task_cputime cputime; 914 cputime_t tgutime, tgstime, cutime, cstime;
915 cputime_t cutime, cstime;
916 915
917 thread_group_cputime(current, &cputime);
918 spin_lock_irq(&current->sighand->siglock); 916 spin_lock_irq(&current->sighand->siglock);
917 thread_group_times(current, &tgutime, &tgstime);
919 cutime = current->signal->cutime; 918 cutime = current->signal->cutime;
920 cstime = current->signal->cstime; 919 cstime = current->signal->cstime;
921 spin_unlock_irq(&current->sighand->siglock); 920 spin_unlock_irq(&current->sighand->siglock);
922 tms->tms_utime = cputime_to_clock_t(cputime.utime); 921 tms->tms_utime = cputime_to_clock_t(tgutime);
923 tms->tms_stime = cputime_to_clock_t(cputime.stime); 922 tms->tms_stime = cputime_to_clock_t(tgstime);
924 tms->tms_cutime = cputime_to_clock_t(cutime); 923 tms->tms_cutime = cputime_to_clock_t(cutime);
925 tms->tms_cstime = cputime_to_clock_t(cstime); 924 tms->tms_cstime = cputime_to_clock_t(cstime);
926} 925}
@@ -1110,6 +1109,8 @@ SYSCALL_DEFINE0(setsid)
1110 err = session; 1109 err = session;
1111out: 1110out:
1112 write_unlock_irq(&tasklist_lock); 1111 write_unlock_irq(&tasklist_lock);
1112 if (err > 0)
1113 proc_sid_connector(group_leader);
1113 return err; 1114 return err;
1114} 1115}
1115 1116
@@ -1336,16 +1337,14 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1336{ 1337{
1337 struct task_struct *t; 1338 struct task_struct *t;
1338 unsigned long flags; 1339 unsigned long flags;
1339 cputime_t utime, stime; 1340 cputime_t tgutime, tgstime, utime, stime;
1340 struct task_cputime cputime;
1341 unsigned long maxrss = 0; 1341 unsigned long maxrss = 0;
1342 1342
1343 memset((char *) r, 0, sizeof *r); 1343 memset((char *) r, 0, sizeof *r);
1344 utime = stime = cputime_zero; 1344 utime = stime = cputime_zero;
1345 1345
1346 if (who == RUSAGE_THREAD) { 1346 if (who == RUSAGE_THREAD) {
1347 utime = task_utime(current); 1347 task_times(current, &utime, &stime);
1348 stime = task_stime(current);
1349 accumulate_thread_rusage(p, r); 1348 accumulate_thread_rusage(p, r);
1350 maxrss = p->signal->maxrss; 1349 maxrss = p->signal->maxrss;
1351 goto out; 1350 goto out;
@@ -1371,9 +1370,9 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1371 break; 1370 break;
1372 1371
1373 case RUSAGE_SELF: 1372 case RUSAGE_SELF:
1374 thread_group_cputime(p, &cputime); 1373 thread_group_times(p, &tgutime, &tgstime);
1375 utime = cputime_add(utime, cputime.utime); 1374 utime = cputime_add(utime, tgutime);
1376 stime = cputime_add(stime, cputime.stime); 1375 stime = cputime_add(stime, tgstime);
1377 r->ru_nvcsw += p->signal->nvcsw; 1376 r->ru_nvcsw += p->signal->nvcsw;
1378 r->ru_nivcsw += p->signal->nivcsw; 1377 r->ru_nivcsw += p->signal->nivcsw;
1379 r->ru_minflt += p->signal->min_flt; 1378 r->ru_minflt += p->signal->min_flt;
@@ -1546,24 +1545,37 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
1546 if (arg4 | arg5) 1545 if (arg4 | arg5)
1547 return -EINVAL; 1546 return -EINVAL;
1548 switch (arg2) { 1547 switch (arg2) {
1549 case 0: 1548 case PR_MCE_KILL_CLEAR:
1550 if (arg3 != 0) 1549 if (arg3 != 0)
1551 return -EINVAL; 1550 return -EINVAL;
1552 current->flags &= ~PF_MCE_PROCESS; 1551 current->flags &= ~PF_MCE_PROCESS;
1553 break; 1552 break;
1554 case 1: 1553 case PR_MCE_KILL_SET:
1555 current->flags |= PF_MCE_PROCESS; 1554 current->flags |= PF_MCE_PROCESS;
1556 if (arg3 != 0) 1555 if (arg3 == PR_MCE_KILL_EARLY)
1557 current->flags |= PF_MCE_EARLY; 1556 current->flags |= PF_MCE_EARLY;
1558 else 1557 else if (arg3 == PR_MCE_KILL_LATE)
1559 current->flags &= ~PF_MCE_EARLY; 1558 current->flags &= ~PF_MCE_EARLY;
1559 else if (arg3 == PR_MCE_KILL_DEFAULT)
1560 current->flags &=
1561 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
1562 else
1563 return -EINVAL;
1560 break; 1564 break;
1561 default: 1565 default:
1562 return -EINVAL; 1566 return -EINVAL;
1563 } 1567 }
1564 error = 0; 1568 error = 0;
1565 break; 1569 break;
1566 1570 case PR_MCE_KILL_GET:
1571 if (arg2 | arg3 | arg4 | arg5)
1572 return -EINVAL;
1573 if (current->flags & PF_MCE_PROCESS)
1574 error = (current->flags & PF_MCE_EARLY) ?
1575 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
1576 else
1577 error = PR_MCE_KILL_DEFAULT;
1578 break;
1567 default: 1579 default:
1568 error = -EINVAL; 1580 error = -EINVAL;
1569 break; 1581 break;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index e06d0b8d1951..695384f12a7d 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -48,8 +48,10 @@ cond_syscall(sys_shutdown);
48cond_syscall(sys_sendmsg); 48cond_syscall(sys_sendmsg);
49cond_syscall(compat_sys_sendmsg); 49cond_syscall(compat_sys_sendmsg);
50cond_syscall(sys_recvmsg); 50cond_syscall(sys_recvmsg);
51cond_syscall(sys_recvmmsg);
51cond_syscall(compat_sys_recvmsg); 52cond_syscall(compat_sys_recvmsg);
52cond_syscall(compat_sys_recvfrom); 53cond_syscall(compat_sys_recvfrom);
54cond_syscall(compat_sys_recvmmsg);
53cond_syscall(sys_socketcall); 55cond_syscall(sys_socketcall);
54cond_syscall(sys_futex); 56cond_syscall(sys_futex);
55cond_syscall(compat_sys_futex); 57cond_syscall(compat_sys_futex);
@@ -139,7 +141,6 @@ cond_syscall(sys_pciconfig_read);
139cond_syscall(sys_pciconfig_write); 141cond_syscall(sys_pciconfig_write);
140cond_syscall(sys_pciconfig_iobase); 142cond_syscall(sys_pciconfig_iobase);
141cond_syscall(sys32_ipc); 143cond_syscall(sys32_ipc);
142cond_syscall(sys32_sysctl);
143cond_syscall(ppc_rtas); 144cond_syscall(ppc_rtas);
144cond_syscall(sys_spu_run); 145cond_syscall(sys_spu_run);
145cond_syscall(sys_spu_create); 146cond_syscall(sys_spu_create);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 0d949c517412..9327a26765c5 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -27,7 +27,6 @@
27#include <linux/security.h> 27#include <linux/security.h>
28#include <linux/ctype.h> 28#include <linux/ctype.h>
29#include <linux/kmemcheck.h> 29#include <linux/kmemcheck.h>
30#include <linux/smp_lock.h>
31#include <linux/fs.h> 30#include <linux/fs.h>
32#include <linux/init.h> 31#include <linux/init.h>
33#include <linux/kernel.h> 32#include <linux/kernel.h>
@@ -36,6 +35,7 @@
36#include <linux/sysrq.h> 35#include <linux/sysrq.h>
37#include <linux/highuid.h> 36#include <linux/highuid.h>
38#include <linux/writeback.h> 37#include <linux/writeback.h>
38#include <linux/ratelimit.h>
39#include <linux/hugetlb.h> 39#include <linux/hugetlb.h>
40#include <linux/initrd.h> 40#include <linux/initrd.h>
41#include <linux/key.h> 41#include <linux/key.h>
@@ -60,7 +60,6 @@
60#include <asm/io.h> 60#include <asm/io.h>
61#endif 61#endif
62 62
63static int deprecated_sysctl_warning(struct __sysctl_args *args);
64 63
65#if defined(CONFIG_SYSCTL) 64#if defined(CONFIG_SYSCTL)
66 65
@@ -158,6 +157,8 @@ extern int no_unaligned_warning;
158extern int unaligned_dump_stack; 157extern int unaligned_dump_stack;
159#endif 158#endif
160 159
160extern struct ratelimit_state printk_ratelimit_state;
161
161#ifdef CONFIG_RT_MUTEXES 162#ifdef CONFIG_RT_MUTEXES
162extern int max_lock_depth; 163extern int max_lock_depth;
163#endif 164#endif
@@ -207,31 +208,26 @@ extern int lock_stat;
207 208
208static struct ctl_table root_table[] = { 209static struct ctl_table root_table[] = {
209 { 210 {
210 .ctl_name = CTL_KERN,
211 .procname = "kernel", 211 .procname = "kernel",
212 .mode = 0555, 212 .mode = 0555,
213 .child = kern_table, 213 .child = kern_table,
214 }, 214 },
215 { 215 {
216 .ctl_name = CTL_VM,
217 .procname = "vm", 216 .procname = "vm",
218 .mode = 0555, 217 .mode = 0555,
219 .child = vm_table, 218 .child = vm_table,
220 }, 219 },
221 { 220 {
222 .ctl_name = CTL_FS,
223 .procname = "fs", 221 .procname = "fs",
224 .mode = 0555, 222 .mode = 0555,
225 .child = fs_table, 223 .child = fs_table,
226 }, 224 },
227 { 225 {
228 .ctl_name = CTL_DEBUG,
229 .procname = "debug", 226 .procname = "debug",
230 .mode = 0555, 227 .mode = 0555,
231 .child = debug_table, 228 .child = debug_table,
232 }, 229 },
233 { 230 {
234 .ctl_name = CTL_DEV,
235 .procname = "dev", 231 .procname = "dev",
236 .mode = 0555, 232 .mode = 0555,
237 .child = dev_table, 233 .child = dev_table,
@@ -240,7 +236,7 @@ static struct ctl_table root_table[] = {
240 * NOTE: do not add new entries to this table unless you have read 236 * NOTE: do not add new entries to this table unless you have read
241 * Documentation/sysctl/ctl_unnumbered.txt 237 * Documentation/sysctl/ctl_unnumbered.txt
242 */ 238 */
243 { .ctl_name = 0 } 239 { }
244}; 240};
245 241
246#ifdef CONFIG_SCHED_DEBUG 242#ifdef CONFIG_SCHED_DEBUG
@@ -252,192 +248,166 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
252 248
253static struct ctl_table kern_table[] = { 249static struct ctl_table kern_table[] = {
254 { 250 {
255 .ctl_name = CTL_UNNUMBERED,
256 .procname = "sched_child_runs_first", 251 .procname = "sched_child_runs_first",
257 .data = &sysctl_sched_child_runs_first, 252 .data = &sysctl_sched_child_runs_first,
258 .maxlen = sizeof(unsigned int), 253 .maxlen = sizeof(unsigned int),
259 .mode = 0644, 254 .mode = 0644,
260 .proc_handler = &proc_dointvec, 255 .proc_handler = proc_dointvec,
261 }, 256 },
262#ifdef CONFIG_SCHED_DEBUG 257#ifdef CONFIG_SCHED_DEBUG
263 { 258 {
264 .ctl_name = CTL_UNNUMBERED,
265 .procname = "sched_min_granularity_ns", 259 .procname = "sched_min_granularity_ns",
266 .data = &sysctl_sched_min_granularity, 260 .data = &sysctl_sched_min_granularity,
267 .maxlen = sizeof(unsigned int), 261 .maxlen = sizeof(unsigned int),
268 .mode = 0644, 262 .mode = 0644,
269 .proc_handler = &sched_nr_latency_handler, 263 .proc_handler = sched_nr_latency_handler,
270 .strategy = &sysctl_intvec,
271 .extra1 = &min_sched_granularity_ns, 264 .extra1 = &min_sched_granularity_ns,
272 .extra2 = &max_sched_granularity_ns, 265 .extra2 = &max_sched_granularity_ns,
273 }, 266 },
274 { 267 {
275 .ctl_name = CTL_UNNUMBERED,
276 .procname = "sched_latency_ns", 268 .procname = "sched_latency_ns",
277 .data = &sysctl_sched_latency, 269 .data = &sysctl_sched_latency,
278 .maxlen = sizeof(unsigned int), 270 .maxlen = sizeof(unsigned int),
279 .mode = 0644, 271 .mode = 0644,
280 .proc_handler = &sched_nr_latency_handler, 272 .proc_handler = sched_nr_latency_handler,
281 .strategy = &sysctl_intvec,
282 .extra1 = &min_sched_granularity_ns, 273 .extra1 = &min_sched_granularity_ns,
283 .extra2 = &max_sched_granularity_ns, 274 .extra2 = &max_sched_granularity_ns,
284 }, 275 },
285 { 276 {
286 .ctl_name = CTL_UNNUMBERED,
287 .procname = "sched_wakeup_granularity_ns", 277 .procname = "sched_wakeup_granularity_ns",
288 .data = &sysctl_sched_wakeup_granularity, 278 .data = &sysctl_sched_wakeup_granularity,
289 .maxlen = sizeof(unsigned int), 279 .maxlen = sizeof(unsigned int),
290 .mode = 0644, 280 .mode = 0644,
291 .proc_handler = &proc_dointvec_minmax, 281 .proc_handler = proc_dointvec_minmax,
292 .strategy = &sysctl_intvec,
293 .extra1 = &min_wakeup_granularity_ns, 282 .extra1 = &min_wakeup_granularity_ns,
294 .extra2 = &max_wakeup_granularity_ns, 283 .extra2 = &max_wakeup_granularity_ns,
295 }, 284 },
296 { 285 {
297 .ctl_name = CTL_UNNUMBERED,
298 .procname = "sched_shares_ratelimit", 286 .procname = "sched_shares_ratelimit",
299 .data = &sysctl_sched_shares_ratelimit, 287 .data = &sysctl_sched_shares_ratelimit,
300 .maxlen = sizeof(unsigned int), 288 .maxlen = sizeof(unsigned int),
301 .mode = 0644, 289 .mode = 0644,
302 .proc_handler = &proc_dointvec, 290 .proc_handler = proc_dointvec,
303 }, 291 },
304 { 292 {
305 .ctl_name = CTL_UNNUMBERED,
306 .procname = "sched_shares_thresh", 293 .procname = "sched_shares_thresh",
307 .data = &sysctl_sched_shares_thresh, 294 .data = &sysctl_sched_shares_thresh,
308 .maxlen = sizeof(unsigned int), 295 .maxlen = sizeof(unsigned int),
309 .mode = 0644, 296 .mode = 0644,
310 .proc_handler = &proc_dointvec_minmax, 297 .proc_handler = proc_dointvec_minmax,
311 .strategy = &sysctl_intvec,
312 .extra1 = &zero, 298 .extra1 = &zero,
313 }, 299 },
314 { 300 {
315 .ctl_name = CTL_UNNUMBERED,
316 .procname = "sched_features", 301 .procname = "sched_features",
317 .data = &sysctl_sched_features, 302 .data = &sysctl_sched_features,
318 .maxlen = sizeof(unsigned int), 303 .maxlen = sizeof(unsigned int),
319 .mode = 0644, 304 .mode = 0644,
320 .proc_handler = &proc_dointvec, 305 .proc_handler = proc_dointvec,
321 }, 306 },
322 { 307 {
323 .ctl_name = CTL_UNNUMBERED,
324 .procname = "sched_migration_cost", 308 .procname = "sched_migration_cost",
325 .data = &sysctl_sched_migration_cost, 309 .data = &sysctl_sched_migration_cost,
326 .maxlen = sizeof(unsigned int), 310 .maxlen = sizeof(unsigned int),
327 .mode = 0644, 311 .mode = 0644,
328 .proc_handler = &proc_dointvec, 312 .proc_handler = proc_dointvec,
329 }, 313 },
330 { 314 {
331 .ctl_name = CTL_UNNUMBERED,
332 .procname = "sched_nr_migrate", 315 .procname = "sched_nr_migrate",
333 .data = &sysctl_sched_nr_migrate, 316 .data = &sysctl_sched_nr_migrate,
334 .maxlen = sizeof(unsigned int), 317 .maxlen = sizeof(unsigned int),
335 .mode = 0644, 318 .mode = 0644,
336 .proc_handler = &proc_dointvec, 319 .proc_handler = proc_dointvec,
337 }, 320 },
338 { 321 {
339 .ctl_name = CTL_UNNUMBERED,
340 .procname = "sched_time_avg", 322 .procname = "sched_time_avg",
341 .data = &sysctl_sched_time_avg, 323 .data = &sysctl_sched_time_avg,
342 .maxlen = sizeof(unsigned int), 324 .maxlen = sizeof(unsigned int),
343 .mode = 0644, 325 .mode = 0644,
344 .proc_handler = &proc_dointvec, 326 .proc_handler = proc_dointvec,
345 }, 327 },
346 { 328 {
347 .ctl_name = CTL_UNNUMBERED,
348 .procname = "timer_migration", 329 .procname = "timer_migration",
349 .data = &sysctl_timer_migration, 330 .data = &sysctl_timer_migration,
350 .maxlen = sizeof(unsigned int), 331 .maxlen = sizeof(unsigned int),
351 .mode = 0644, 332 .mode = 0644,
352 .proc_handler = &proc_dointvec_minmax, 333 .proc_handler = proc_dointvec_minmax,
353 .strategy = &sysctl_intvec,
354 .extra1 = &zero, 334 .extra1 = &zero,
355 .extra2 = &one, 335 .extra2 = &one,
356 }, 336 },
357#endif 337#endif
358 { 338 {
359 .ctl_name = CTL_UNNUMBERED,
360 .procname = "sched_rt_period_us", 339 .procname = "sched_rt_period_us",
361 .data = &sysctl_sched_rt_period, 340 .data = &sysctl_sched_rt_period,
362 .maxlen = sizeof(unsigned int), 341 .maxlen = sizeof(unsigned int),
363 .mode = 0644, 342 .mode = 0644,
364 .proc_handler = &sched_rt_handler, 343 .proc_handler = sched_rt_handler,
365 }, 344 },
366 { 345 {
367 .ctl_name = CTL_UNNUMBERED,
368 .procname = "sched_rt_runtime_us", 346 .procname = "sched_rt_runtime_us",
369 .data = &sysctl_sched_rt_runtime, 347 .data = &sysctl_sched_rt_runtime,
370 .maxlen = sizeof(int), 348 .maxlen = sizeof(int),
371 .mode = 0644, 349 .mode = 0644,
372 .proc_handler = &sched_rt_handler, 350 .proc_handler = sched_rt_handler,
373 }, 351 },
374 { 352 {
375 .ctl_name = CTL_UNNUMBERED,
376 .procname = "sched_compat_yield", 353 .procname = "sched_compat_yield",
377 .data = &sysctl_sched_compat_yield, 354 .data = &sysctl_sched_compat_yield,
378 .maxlen = sizeof(unsigned int), 355 .maxlen = sizeof(unsigned int),
379 .mode = 0644, 356 .mode = 0644,
380 .proc_handler = &proc_dointvec, 357 .proc_handler = proc_dointvec,
381 }, 358 },
382#ifdef CONFIG_PROVE_LOCKING 359#ifdef CONFIG_PROVE_LOCKING
383 { 360 {
384 .ctl_name = CTL_UNNUMBERED,
385 .procname = "prove_locking", 361 .procname = "prove_locking",
386 .data = &prove_locking, 362 .data = &prove_locking,
387 .maxlen = sizeof(int), 363 .maxlen = sizeof(int),
388 .mode = 0644, 364 .mode = 0644,
389 .proc_handler = &proc_dointvec, 365 .proc_handler = proc_dointvec,
390 }, 366 },
391#endif 367#endif
392#ifdef CONFIG_LOCK_STAT 368#ifdef CONFIG_LOCK_STAT
393 { 369 {
394 .ctl_name = CTL_UNNUMBERED,
395 .procname = "lock_stat", 370 .procname = "lock_stat",
396 .data = &lock_stat, 371 .data = &lock_stat,
397 .maxlen = sizeof(int), 372 .maxlen = sizeof(int),
398 .mode = 0644, 373 .mode = 0644,
399 .proc_handler = &proc_dointvec, 374 .proc_handler = proc_dointvec,
400 }, 375 },
401#endif 376#endif
402 { 377 {
403 .ctl_name = KERN_PANIC,
404 .procname = "panic", 378 .procname = "panic",
405 .data = &panic_timeout, 379 .data = &panic_timeout,
406 .maxlen = sizeof(int), 380 .maxlen = sizeof(int),
407 .mode = 0644, 381 .mode = 0644,
408 .proc_handler = &proc_dointvec, 382 .proc_handler = proc_dointvec,
409 }, 383 },
410 { 384 {
411 .ctl_name = KERN_CORE_USES_PID,
412 .procname = "core_uses_pid", 385 .procname = "core_uses_pid",
413 .data = &core_uses_pid, 386 .data = &core_uses_pid,
414 .maxlen = sizeof(int), 387 .maxlen = sizeof(int),
415 .mode = 0644, 388 .mode = 0644,
416 .proc_handler = &proc_dointvec, 389 .proc_handler = proc_dointvec,
417 }, 390 },
418 { 391 {
419 .ctl_name = KERN_CORE_PATTERN,
420 .procname = "core_pattern", 392 .procname = "core_pattern",
421 .data = core_pattern, 393 .data = core_pattern,
422 .maxlen = CORENAME_MAX_SIZE, 394 .maxlen = CORENAME_MAX_SIZE,
423 .mode = 0644, 395 .mode = 0644,
424 .proc_handler = &proc_dostring, 396 .proc_handler = proc_dostring,
425 .strategy = &sysctl_string,
426 }, 397 },
427 { 398 {
428 .ctl_name = CTL_UNNUMBERED,
429 .procname = "core_pipe_limit", 399 .procname = "core_pipe_limit",
430 .data = &core_pipe_limit, 400 .data = &core_pipe_limit,
431 .maxlen = sizeof(unsigned int), 401 .maxlen = sizeof(unsigned int),
432 .mode = 0644, 402 .mode = 0644,
433 .proc_handler = &proc_dointvec, 403 .proc_handler = proc_dointvec,
434 }, 404 },
435#ifdef CONFIG_PROC_SYSCTL 405#ifdef CONFIG_PROC_SYSCTL
436 { 406 {
437 .procname = "tainted", 407 .procname = "tainted",
438 .maxlen = sizeof(long), 408 .maxlen = sizeof(long),
439 .mode = 0644, 409 .mode = 0644,
440 .proc_handler = &proc_taint, 410 .proc_handler = proc_taint,
441 }, 411 },
442#endif 412#endif
443#ifdef CONFIG_LATENCYTOP 413#ifdef CONFIG_LATENCYTOP
@@ -446,181 +416,160 @@ static struct ctl_table kern_table[] = {
446 .data = &latencytop_enabled, 416 .data = &latencytop_enabled,
447 .maxlen = sizeof(int), 417 .maxlen = sizeof(int),
448 .mode = 0644, 418 .mode = 0644,
449 .proc_handler = &proc_dointvec, 419 .proc_handler = proc_dointvec,
450 }, 420 },
451#endif 421#endif
452#ifdef CONFIG_BLK_DEV_INITRD 422#ifdef CONFIG_BLK_DEV_INITRD
453 { 423 {
454 .ctl_name = KERN_REALROOTDEV,
455 .procname = "real-root-dev", 424 .procname = "real-root-dev",
456 .data = &real_root_dev, 425 .data = &real_root_dev,
457 .maxlen = sizeof(int), 426 .maxlen = sizeof(int),
458 .mode = 0644, 427 .mode = 0644,
459 .proc_handler = &proc_dointvec, 428 .proc_handler = proc_dointvec,
460 }, 429 },
461#endif 430#endif
462 { 431 {
463 .ctl_name = CTL_UNNUMBERED,
464 .procname = "print-fatal-signals", 432 .procname = "print-fatal-signals",
465 .data = &print_fatal_signals, 433 .data = &print_fatal_signals,
466 .maxlen = sizeof(int), 434 .maxlen = sizeof(int),
467 .mode = 0644, 435 .mode = 0644,
468 .proc_handler = &proc_dointvec, 436 .proc_handler = proc_dointvec,
469 }, 437 },
470#ifdef CONFIG_SPARC 438#ifdef CONFIG_SPARC
471 { 439 {
472 .ctl_name = KERN_SPARC_REBOOT,
473 .procname = "reboot-cmd", 440 .procname = "reboot-cmd",
474 .data = reboot_command, 441 .data = reboot_command,
475 .maxlen = 256, 442 .maxlen = 256,
476 .mode = 0644, 443 .mode = 0644,
477 .proc_handler = &proc_dostring, 444 .proc_handler = proc_dostring,
478 .strategy = &sysctl_string,
479 }, 445 },
480 { 446 {
481 .ctl_name = KERN_SPARC_STOP_A,
482 .procname = "stop-a", 447 .procname = "stop-a",
483 .data = &stop_a_enabled, 448 .data = &stop_a_enabled,
484 .maxlen = sizeof (int), 449 .maxlen = sizeof (int),
485 .mode = 0644, 450 .mode = 0644,
486 .proc_handler = &proc_dointvec, 451 .proc_handler = proc_dointvec,
487 }, 452 },
488 { 453 {
489 .ctl_name = KERN_SPARC_SCONS_PWROFF,
490 .procname = "scons-poweroff", 454 .procname = "scons-poweroff",
491 .data = &scons_pwroff, 455 .data = &scons_pwroff,
492 .maxlen = sizeof (int), 456 .maxlen = sizeof (int),
493 .mode = 0644, 457 .mode = 0644,
494 .proc_handler = &proc_dointvec, 458 .proc_handler = proc_dointvec,
495 }, 459 },
496#endif 460#endif
497#ifdef CONFIG_SPARC64 461#ifdef CONFIG_SPARC64
498 { 462 {
499 .ctl_name = CTL_UNNUMBERED,
500 .procname = "tsb-ratio", 463 .procname = "tsb-ratio",
501 .data = &sysctl_tsb_ratio, 464 .data = &sysctl_tsb_ratio,
502 .maxlen = sizeof (int), 465 .maxlen = sizeof (int),
503 .mode = 0644, 466 .mode = 0644,
504 .proc_handler = &proc_dointvec, 467 .proc_handler = proc_dointvec,
505 }, 468 },
506#endif 469#endif
507#ifdef __hppa__ 470#ifdef __hppa__
508 { 471 {
509 .ctl_name = KERN_HPPA_PWRSW,
510 .procname = "soft-power", 472 .procname = "soft-power",
511 .data = &pwrsw_enabled, 473 .data = &pwrsw_enabled,
512 .maxlen = sizeof (int), 474 .maxlen = sizeof (int),
513 .mode = 0644, 475 .mode = 0644,
514 .proc_handler = &proc_dointvec, 476 .proc_handler = proc_dointvec,
515 }, 477 },
516 { 478 {
517 .ctl_name = KERN_HPPA_UNALIGNED,
518 .procname = "unaligned-trap", 479 .procname = "unaligned-trap",
519 .data = &unaligned_enabled, 480 .data = &unaligned_enabled,
520 .maxlen = sizeof (int), 481 .maxlen = sizeof (int),
521 .mode = 0644, 482 .mode = 0644,
522 .proc_handler = &proc_dointvec, 483 .proc_handler = proc_dointvec,
523 }, 484 },
524#endif 485#endif
525 { 486 {
526 .ctl_name = KERN_CTLALTDEL,
527 .procname = "ctrl-alt-del", 487 .procname = "ctrl-alt-del",
528 .data = &C_A_D, 488 .data = &C_A_D,
529 .maxlen = sizeof(int), 489 .maxlen = sizeof(int),
530 .mode = 0644, 490 .mode = 0644,
531 .proc_handler = &proc_dointvec, 491 .proc_handler = proc_dointvec,
532 }, 492 },
533#ifdef CONFIG_FUNCTION_TRACER 493#ifdef CONFIG_FUNCTION_TRACER
534 { 494 {
535 .ctl_name = CTL_UNNUMBERED,
536 .procname = "ftrace_enabled", 495 .procname = "ftrace_enabled",
537 .data = &ftrace_enabled, 496 .data = &ftrace_enabled,
538 .maxlen = sizeof(int), 497 .maxlen = sizeof(int),
539 .mode = 0644, 498 .mode = 0644,
540 .proc_handler = &ftrace_enable_sysctl, 499 .proc_handler = ftrace_enable_sysctl,
541 }, 500 },
542#endif 501#endif
543#ifdef CONFIG_STACK_TRACER 502#ifdef CONFIG_STACK_TRACER
544 { 503 {
545 .ctl_name = CTL_UNNUMBERED,
546 .procname = "stack_tracer_enabled", 504 .procname = "stack_tracer_enabled",
547 .data = &stack_tracer_enabled, 505 .data = &stack_tracer_enabled,
548 .maxlen = sizeof(int), 506 .maxlen = sizeof(int),
549 .mode = 0644, 507 .mode = 0644,
550 .proc_handler = &stack_trace_sysctl, 508 .proc_handler = stack_trace_sysctl,
551 }, 509 },
552#endif 510#endif
553#ifdef CONFIG_TRACING 511#ifdef CONFIG_TRACING
554 { 512 {
555 .ctl_name = CTL_UNNUMBERED,
556 .procname = "ftrace_dump_on_oops", 513 .procname = "ftrace_dump_on_oops",
557 .data = &ftrace_dump_on_oops, 514 .data = &ftrace_dump_on_oops,
558 .maxlen = sizeof(int), 515 .maxlen = sizeof(int),
559 .mode = 0644, 516 .mode = 0644,
560 .proc_handler = &proc_dointvec, 517 .proc_handler = proc_dointvec,
561 }, 518 },
562#endif 519#endif
563#ifdef CONFIG_MODULES 520#ifdef CONFIG_MODULES
564 { 521 {
565 .ctl_name = KERN_MODPROBE,
566 .procname = "modprobe", 522 .procname = "modprobe",
567 .data = &modprobe_path, 523 .data = &modprobe_path,
568 .maxlen = KMOD_PATH_LEN, 524 .maxlen = KMOD_PATH_LEN,
569 .mode = 0644, 525 .mode = 0644,
570 .proc_handler = &proc_dostring, 526 .proc_handler = proc_dostring,
571 .strategy = &sysctl_string,
572 }, 527 },
573 { 528 {
574 .ctl_name = CTL_UNNUMBERED,
575 .procname = "modules_disabled", 529 .procname = "modules_disabled",
576 .data = &modules_disabled, 530 .data = &modules_disabled,
577 .maxlen = sizeof(int), 531 .maxlen = sizeof(int),
578 .mode = 0644, 532 .mode = 0644,
579 /* only handle a transition from default "0" to "1" */ 533 /* only handle a transition from default "0" to "1" */
580 .proc_handler = &proc_dointvec_minmax, 534 .proc_handler = proc_dointvec_minmax,
581 .extra1 = &one, 535 .extra1 = &one,
582 .extra2 = &one, 536 .extra2 = &one,
583 }, 537 },
584#endif 538#endif
585#if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET) 539#if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET)
586 { 540 {
587 .ctl_name = KERN_HOTPLUG,
588 .procname = "hotplug", 541 .procname = "hotplug",
589 .data = &uevent_helper, 542 .data = &uevent_helper,
590 .maxlen = UEVENT_HELPER_PATH_LEN, 543 .maxlen = UEVENT_HELPER_PATH_LEN,
591 .mode = 0644, 544 .mode = 0644,
592 .proc_handler = &proc_dostring, 545 .proc_handler = proc_dostring,
593 .strategy = &sysctl_string,
594 }, 546 },
595#endif 547#endif
596#ifdef CONFIG_CHR_DEV_SG 548#ifdef CONFIG_CHR_DEV_SG
597 { 549 {
598 .ctl_name = KERN_SG_BIG_BUFF,
599 .procname = "sg-big-buff", 550 .procname = "sg-big-buff",
600 .data = &sg_big_buff, 551 .data = &sg_big_buff,
601 .maxlen = sizeof (int), 552 .maxlen = sizeof (int),
602 .mode = 0444, 553 .mode = 0444,
603 .proc_handler = &proc_dointvec, 554 .proc_handler = proc_dointvec,
604 }, 555 },
605#endif 556#endif
606#ifdef CONFIG_BSD_PROCESS_ACCT 557#ifdef CONFIG_BSD_PROCESS_ACCT
607 { 558 {
608 .ctl_name = KERN_ACCT,
609 .procname = "acct", 559 .procname = "acct",
610 .data = &acct_parm, 560 .data = &acct_parm,
611 .maxlen = 3*sizeof(int), 561 .maxlen = 3*sizeof(int),
612 .mode = 0644, 562 .mode = 0644,
613 .proc_handler = &proc_dointvec, 563 .proc_handler = proc_dointvec,
614 }, 564 },
615#endif 565#endif
616#ifdef CONFIG_MAGIC_SYSRQ 566#ifdef CONFIG_MAGIC_SYSRQ
617 { 567 {
618 .ctl_name = KERN_SYSRQ,
619 .procname = "sysrq", 568 .procname = "sysrq",
620 .data = &__sysrq_enabled, 569 .data = &__sysrq_enabled,
621 .maxlen = sizeof (int), 570 .maxlen = sizeof (int),
622 .mode = 0644, 571 .mode = 0644,
623 .proc_handler = &proc_dointvec, 572 .proc_handler = proc_dointvec,
624 }, 573 },
625#endif 574#endif
626#ifdef CONFIG_PROC_SYSCTL 575#ifdef CONFIG_PROC_SYSCTL
@@ -629,215 +578,188 @@ static struct ctl_table kern_table[] = {
629 .data = NULL, 578 .data = NULL,
630 .maxlen = sizeof (int), 579 .maxlen = sizeof (int),
631 .mode = 0600, 580 .mode = 0600,
632 .proc_handler = &proc_do_cad_pid, 581 .proc_handler = proc_do_cad_pid,
633 }, 582 },
634#endif 583#endif
635 { 584 {
636 .ctl_name = KERN_MAX_THREADS,
637 .procname = "threads-max", 585 .procname = "threads-max",
638 .data = &max_threads, 586 .data = &max_threads,
639 .maxlen = sizeof(int), 587 .maxlen = sizeof(int),
640 .mode = 0644, 588 .mode = 0644,
641 .proc_handler = &proc_dointvec, 589 .proc_handler = proc_dointvec,
642 }, 590 },
643 { 591 {
644 .ctl_name = KERN_RANDOM,
645 .procname = "random", 592 .procname = "random",
646 .mode = 0555, 593 .mode = 0555,
647 .child = random_table, 594 .child = random_table,
648 }, 595 },
649 { 596 {
650 .ctl_name = KERN_OVERFLOWUID,
651 .procname = "overflowuid", 597 .procname = "overflowuid",
652 .data = &overflowuid, 598 .data = &overflowuid,
653 .maxlen = sizeof(int), 599 .maxlen = sizeof(int),
654 .mode = 0644, 600 .mode = 0644,
655 .proc_handler = &proc_dointvec_minmax, 601 .proc_handler = proc_dointvec_minmax,
656 .strategy = &sysctl_intvec,
657 .extra1 = &minolduid, 602 .extra1 = &minolduid,
658 .extra2 = &maxolduid, 603 .extra2 = &maxolduid,
659 }, 604 },
660 { 605 {
661 .ctl_name = KERN_OVERFLOWGID,
662 .procname = "overflowgid", 606 .procname = "overflowgid",
663 .data = &overflowgid, 607 .data = &overflowgid,
664 .maxlen = sizeof(int), 608 .maxlen = sizeof(int),
665 .mode = 0644, 609 .mode = 0644,
666 .proc_handler = &proc_dointvec_minmax, 610 .proc_handler = proc_dointvec_minmax,
667 .strategy = &sysctl_intvec,
668 .extra1 = &minolduid, 611 .extra1 = &minolduid,
669 .extra2 = &maxolduid, 612 .extra2 = &maxolduid,
670 }, 613 },
671#ifdef CONFIG_S390 614#ifdef CONFIG_S390
672#ifdef CONFIG_MATHEMU 615#ifdef CONFIG_MATHEMU
673 { 616 {
674 .ctl_name = KERN_IEEE_EMULATION_WARNINGS,
675 .procname = "ieee_emulation_warnings", 617 .procname = "ieee_emulation_warnings",
676 .data = &sysctl_ieee_emulation_warnings, 618 .data = &sysctl_ieee_emulation_warnings,
677 .maxlen = sizeof(int), 619 .maxlen = sizeof(int),
678 .mode = 0644, 620 .mode = 0644,
679 .proc_handler = &proc_dointvec, 621 .proc_handler = proc_dointvec,
680 }, 622 },
681#endif 623#endif
682 { 624 {
683 .ctl_name = KERN_S390_USER_DEBUG_LOGGING,
684 .procname = "userprocess_debug", 625 .procname = "userprocess_debug",
685 .data = &sysctl_userprocess_debug, 626 .data = &sysctl_userprocess_debug,
686 .maxlen = sizeof(int), 627 .maxlen = sizeof(int),
687 .mode = 0644, 628 .mode = 0644,
688 .proc_handler = &proc_dointvec, 629 .proc_handler = proc_dointvec,
689 }, 630 },
690#endif 631#endif
691 { 632 {
692 .ctl_name = KERN_PIDMAX,
693 .procname = "pid_max", 633 .procname = "pid_max",
694 .data = &pid_max, 634 .data = &pid_max,
695 .maxlen = sizeof (int), 635 .maxlen = sizeof (int),
696 .mode = 0644, 636 .mode = 0644,
697 .proc_handler = &proc_dointvec_minmax, 637 .proc_handler = proc_dointvec_minmax,
698 .strategy = sysctl_intvec,
699 .extra1 = &pid_max_min, 638 .extra1 = &pid_max_min,
700 .extra2 = &pid_max_max, 639 .extra2 = &pid_max_max,
701 }, 640 },
702 { 641 {
703 .ctl_name = KERN_PANIC_ON_OOPS,
704 .procname = "panic_on_oops", 642 .procname = "panic_on_oops",
705 .data = &panic_on_oops, 643 .data = &panic_on_oops,
706 .maxlen = sizeof(int), 644 .maxlen = sizeof(int),
707 .mode = 0644, 645 .mode = 0644,
708 .proc_handler = &proc_dointvec, 646 .proc_handler = proc_dointvec,
709 }, 647 },
710#if defined CONFIG_PRINTK 648#if defined CONFIG_PRINTK
711 { 649 {
712 .ctl_name = KERN_PRINTK,
713 .procname = "printk", 650 .procname = "printk",
714 .data = &console_loglevel, 651 .data = &console_loglevel,
715 .maxlen = 4*sizeof(int), 652 .maxlen = 4*sizeof(int),
716 .mode = 0644, 653 .mode = 0644,
717 .proc_handler = &proc_dointvec, 654 .proc_handler = proc_dointvec,
718 }, 655 },
719 { 656 {
720 .ctl_name = KERN_PRINTK_RATELIMIT,
721 .procname = "printk_ratelimit", 657 .procname = "printk_ratelimit",
722 .data = &printk_ratelimit_state.interval, 658 .data = &printk_ratelimit_state.interval,
723 .maxlen = sizeof(int), 659 .maxlen = sizeof(int),
724 .mode = 0644, 660 .mode = 0644,
725 .proc_handler = &proc_dointvec_jiffies, 661 .proc_handler = proc_dointvec_jiffies,
726 .strategy = &sysctl_jiffies,
727 }, 662 },
728 { 663 {
729 .ctl_name = KERN_PRINTK_RATELIMIT_BURST,
730 .procname = "printk_ratelimit_burst", 664 .procname = "printk_ratelimit_burst",
731 .data = &printk_ratelimit_state.burst, 665 .data = &printk_ratelimit_state.burst,
732 .maxlen = sizeof(int), 666 .maxlen = sizeof(int),
733 .mode = 0644, 667 .mode = 0644,
734 .proc_handler = &proc_dointvec, 668 .proc_handler = proc_dointvec,
735 }, 669 },
736 { 670 {
737 .ctl_name = CTL_UNNUMBERED,
738 .procname = "printk_delay", 671 .procname = "printk_delay",
739 .data = &printk_delay_msec, 672 .data = &printk_delay_msec,
740 .maxlen = sizeof(int), 673 .maxlen = sizeof(int),
741 .mode = 0644, 674 .mode = 0644,
742 .proc_handler = &proc_dointvec_minmax, 675 .proc_handler = proc_dointvec_minmax,
743 .strategy = &sysctl_intvec,
744 .extra1 = &zero, 676 .extra1 = &zero,
745 .extra2 = &ten_thousand, 677 .extra2 = &ten_thousand,
746 }, 678 },
747#endif 679#endif
748 { 680 {
749 .ctl_name = KERN_NGROUPS_MAX,
750 .procname = "ngroups_max", 681 .procname = "ngroups_max",
751 .data = &ngroups_max, 682 .data = &ngroups_max,
752 .maxlen = sizeof (int), 683 .maxlen = sizeof (int),
753 .mode = 0444, 684 .mode = 0444,
754 .proc_handler = &proc_dointvec, 685 .proc_handler = proc_dointvec,
755 }, 686 },
756#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) 687#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86)
757 { 688 {
758 .ctl_name = KERN_UNKNOWN_NMI_PANIC,
759 .procname = "unknown_nmi_panic", 689 .procname = "unknown_nmi_panic",
760 .data = &unknown_nmi_panic, 690 .data = &unknown_nmi_panic,
761 .maxlen = sizeof (int), 691 .maxlen = sizeof (int),
762 .mode = 0644, 692 .mode = 0644,
763 .proc_handler = &proc_dointvec, 693 .proc_handler = proc_dointvec,
764 }, 694 },
765 { 695 {
766 .procname = "nmi_watchdog", 696 .procname = "nmi_watchdog",
767 .data = &nmi_watchdog_enabled, 697 .data = &nmi_watchdog_enabled,
768 .maxlen = sizeof (int), 698 .maxlen = sizeof (int),
769 .mode = 0644, 699 .mode = 0644,
770 .proc_handler = &proc_nmi_enabled, 700 .proc_handler = proc_nmi_enabled,
771 }, 701 },
772#endif 702#endif
773#if defined(CONFIG_X86) 703#if defined(CONFIG_X86)
774 { 704 {
775 .ctl_name = KERN_PANIC_ON_NMI,
776 .procname = "panic_on_unrecovered_nmi", 705 .procname = "panic_on_unrecovered_nmi",
777 .data = &panic_on_unrecovered_nmi, 706 .data = &panic_on_unrecovered_nmi,
778 .maxlen = sizeof(int), 707 .maxlen = sizeof(int),
779 .mode = 0644, 708 .mode = 0644,
780 .proc_handler = &proc_dointvec, 709 .proc_handler = proc_dointvec,
781 }, 710 },
782 { 711 {
783 .ctl_name = CTL_UNNUMBERED,
784 .procname = "panic_on_io_nmi", 712 .procname = "panic_on_io_nmi",
785 .data = &panic_on_io_nmi, 713 .data = &panic_on_io_nmi,
786 .maxlen = sizeof(int), 714 .maxlen = sizeof(int),
787 .mode = 0644, 715 .mode = 0644,
788 .proc_handler = &proc_dointvec, 716 .proc_handler = proc_dointvec,
789 }, 717 },
790 { 718 {
791 .ctl_name = KERN_BOOTLOADER_TYPE,
792 .procname = "bootloader_type", 719 .procname = "bootloader_type",
793 .data = &bootloader_type, 720 .data = &bootloader_type,
794 .maxlen = sizeof (int), 721 .maxlen = sizeof (int),
795 .mode = 0444, 722 .mode = 0444,
796 .proc_handler = &proc_dointvec, 723 .proc_handler = proc_dointvec,
797 }, 724 },
798 { 725 {
799 .ctl_name = CTL_UNNUMBERED,
800 .procname = "bootloader_version", 726 .procname = "bootloader_version",
801 .data = &bootloader_version, 727 .data = &bootloader_version,
802 .maxlen = sizeof (int), 728 .maxlen = sizeof (int),
803 .mode = 0444, 729 .mode = 0444,
804 .proc_handler = &proc_dointvec, 730 .proc_handler = proc_dointvec,
805 }, 731 },
806 { 732 {
807 .ctl_name = CTL_UNNUMBERED,
808 .procname = "kstack_depth_to_print", 733 .procname = "kstack_depth_to_print",
809 .data = &kstack_depth_to_print, 734 .data = &kstack_depth_to_print,
810 .maxlen = sizeof(int), 735 .maxlen = sizeof(int),
811 .mode = 0644, 736 .mode = 0644,
812 .proc_handler = &proc_dointvec, 737 .proc_handler = proc_dointvec,
813 }, 738 },
814 { 739 {
815 .ctl_name = CTL_UNNUMBERED,
816 .procname = "io_delay_type", 740 .procname = "io_delay_type",
817 .data = &io_delay_type, 741 .data = &io_delay_type,
818 .maxlen = sizeof(int), 742 .maxlen = sizeof(int),
819 .mode = 0644, 743 .mode = 0644,
820 .proc_handler = &proc_dointvec, 744 .proc_handler = proc_dointvec,
821 }, 745 },
822#endif 746#endif
823#if defined(CONFIG_MMU) 747#if defined(CONFIG_MMU)
824 { 748 {
825 .ctl_name = KERN_RANDOMIZE,
826 .procname = "randomize_va_space", 749 .procname = "randomize_va_space",
827 .data = &randomize_va_space, 750 .data = &randomize_va_space,
828 .maxlen = sizeof(int), 751 .maxlen = sizeof(int),
829 .mode = 0644, 752 .mode = 0644,
830 .proc_handler = &proc_dointvec, 753 .proc_handler = proc_dointvec,
831 }, 754 },
832#endif 755#endif
833#if defined(CONFIG_S390) && defined(CONFIG_SMP) 756#if defined(CONFIG_S390) && defined(CONFIG_SMP)
834 { 757 {
835 .ctl_name = KERN_SPIN_RETRY,
836 .procname = "spin_retry", 758 .procname = "spin_retry",
837 .data = &spin_retry, 759 .data = &spin_retry,
838 .maxlen = sizeof (int), 760 .maxlen = sizeof (int),
839 .mode = 0644, 761 .mode = 0644,
840 .proc_handler = &proc_dointvec, 762 .proc_handler = proc_dointvec,
841 }, 763 },
842#endif 764#endif
843#if defined(CONFIG_ACPI_SLEEP) && defined(CONFIG_X86) 765#if defined(CONFIG_ACPI_SLEEP) && defined(CONFIG_X86)
@@ -846,123 +768,104 @@ static struct ctl_table kern_table[] = {
846 .data = &acpi_realmode_flags, 768 .data = &acpi_realmode_flags,
847 .maxlen = sizeof (unsigned long), 769 .maxlen = sizeof (unsigned long),
848 .mode = 0644, 770 .mode = 0644,
849 .proc_handler = &proc_doulongvec_minmax, 771 .proc_handler = proc_doulongvec_minmax,
850 }, 772 },
851#endif 773#endif
852#ifdef CONFIG_IA64 774#ifdef CONFIG_IA64
853 { 775 {
854 .ctl_name = KERN_IA64_UNALIGNED,
855 .procname = "ignore-unaligned-usertrap", 776 .procname = "ignore-unaligned-usertrap",
856 .data = &no_unaligned_warning, 777 .data = &no_unaligned_warning,
857 .maxlen = sizeof (int), 778 .maxlen = sizeof (int),
858 .mode = 0644, 779 .mode = 0644,
859 .proc_handler = &proc_dointvec, 780 .proc_handler = proc_dointvec,
860 }, 781 },
861 { 782 {
862 .ctl_name = CTL_UNNUMBERED,
863 .procname = "unaligned-dump-stack", 783 .procname = "unaligned-dump-stack",
864 .data = &unaligned_dump_stack, 784 .data = &unaligned_dump_stack,
865 .maxlen = sizeof (int), 785 .maxlen = sizeof (int),
866 .mode = 0644, 786 .mode = 0644,
867 .proc_handler = &proc_dointvec, 787 .proc_handler = proc_dointvec,
868 }, 788 },
869#endif 789#endif
870#ifdef CONFIG_DETECT_SOFTLOCKUP 790#ifdef CONFIG_DETECT_SOFTLOCKUP
871 { 791 {
872 .ctl_name = CTL_UNNUMBERED,
873 .procname = "softlockup_panic", 792 .procname = "softlockup_panic",
874 .data = &softlockup_panic, 793 .data = &softlockup_panic,
875 .maxlen = sizeof(int), 794 .maxlen = sizeof(int),
876 .mode = 0644, 795 .mode = 0644,
877 .proc_handler = &proc_dointvec_minmax, 796 .proc_handler = proc_dointvec_minmax,
878 .strategy = &sysctl_intvec,
879 .extra1 = &zero, 797 .extra1 = &zero,
880 .extra2 = &one, 798 .extra2 = &one,
881 }, 799 },
882 { 800 {
883 .ctl_name = CTL_UNNUMBERED,
884 .procname = "softlockup_thresh", 801 .procname = "softlockup_thresh",
885 .data = &softlockup_thresh, 802 .data = &softlockup_thresh,
886 .maxlen = sizeof(int), 803 .maxlen = sizeof(int),
887 .mode = 0644, 804 .mode = 0644,
888 .proc_handler = &proc_dosoftlockup_thresh, 805 .proc_handler = proc_dosoftlockup_thresh,
889 .strategy = &sysctl_intvec,
890 .extra1 = &neg_one, 806 .extra1 = &neg_one,
891 .extra2 = &sixty, 807 .extra2 = &sixty,
892 }, 808 },
893#endif 809#endif
894#ifdef CONFIG_DETECT_HUNG_TASK 810#ifdef CONFIG_DETECT_HUNG_TASK
895 { 811 {
896 .ctl_name = CTL_UNNUMBERED,
897 .procname = "hung_task_panic", 812 .procname = "hung_task_panic",
898 .data = &sysctl_hung_task_panic, 813 .data = &sysctl_hung_task_panic,
899 .maxlen = sizeof(int), 814 .maxlen = sizeof(int),
900 .mode = 0644, 815 .mode = 0644,
901 .proc_handler = &proc_dointvec_minmax, 816 .proc_handler = proc_dointvec_minmax,
902 .strategy = &sysctl_intvec,
903 .extra1 = &zero, 817 .extra1 = &zero,
904 .extra2 = &one, 818 .extra2 = &one,
905 }, 819 },
906 { 820 {
907 .ctl_name = CTL_UNNUMBERED,
908 .procname = "hung_task_check_count", 821 .procname = "hung_task_check_count",
909 .data = &sysctl_hung_task_check_count, 822 .data = &sysctl_hung_task_check_count,
910 .maxlen = sizeof(unsigned long), 823 .maxlen = sizeof(unsigned long),
911 .mode = 0644, 824 .mode = 0644,
912 .proc_handler = &proc_doulongvec_minmax, 825 .proc_handler = proc_doulongvec_minmax,
913 .strategy = &sysctl_intvec,
914 }, 826 },
915 { 827 {
916 .ctl_name = CTL_UNNUMBERED,
917 .procname = "hung_task_timeout_secs", 828 .procname = "hung_task_timeout_secs",
918 .data = &sysctl_hung_task_timeout_secs, 829 .data = &sysctl_hung_task_timeout_secs,
919 .maxlen = sizeof(unsigned long), 830 .maxlen = sizeof(unsigned long),
920 .mode = 0644, 831 .mode = 0644,
921 .proc_handler = &proc_dohung_task_timeout_secs, 832 .proc_handler = proc_dohung_task_timeout_secs,
922 .strategy = &sysctl_intvec,
923 }, 833 },
924 { 834 {
925 .ctl_name = CTL_UNNUMBERED,
926 .procname = "hung_task_warnings", 835 .procname = "hung_task_warnings",
927 .data = &sysctl_hung_task_warnings, 836 .data = &sysctl_hung_task_warnings,
928 .maxlen = sizeof(unsigned long), 837 .maxlen = sizeof(unsigned long),
929 .mode = 0644, 838 .mode = 0644,
930 .proc_handler = &proc_doulongvec_minmax, 839 .proc_handler = proc_doulongvec_minmax,
931 .strategy = &sysctl_intvec,
932 }, 840 },
933#endif 841#endif
934#ifdef CONFIG_COMPAT 842#ifdef CONFIG_COMPAT
935 { 843 {
936 .ctl_name = KERN_COMPAT_LOG,
937 .procname = "compat-log", 844 .procname = "compat-log",
938 .data = &compat_log, 845 .data = &compat_log,
939 .maxlen = sizeof (int), 846 .maxlen = sizeof (int),
940 .mode = 0644, 847 .mode = 0644,
941 .proc_handler = &proc_dointvec, 848 .proc_handler = proc_dointvec,
942 }, 849 },
943#endif 850#endif
944#ifdef CONFIG_RT_MUTEXES 851#ifdef CONFIG_RT_MUTEXES
945 { 852 {
946 .ctl_name = KERN_MAX_LOCK_DEPTH,
947 .procname = "max_lock_depth", 853 .procname = "max_lock_depth",
948 .data = &max_lock_depth, 854 .data = &max_lock_depth,
949 .maxlen = sizeof(int), 855 .maxlen = sizeof(int),
950 .mode = 0644, 856 .mode = 0644,
951 .proc_handler = &proc_dointvec, 857 .proc_handler = proc_dointvec,
952 }, 858 },
953#endif 859#endif
954 { 860 {
955 .ctl_name = CTL_UNNUMBERED,
956 .procname = "poweroff_cmd", 861 .procname = "poweroff_cmd",
957 .data = &poweroff_cmd, 862 .data = &poweroff_cmd,
958 .maxlen = POWEROFF_CMD_PATH_LEN, 863 .maxlen = POWEROFF_CMD_PATH_LEN,
959 .mode = 0644, 864 .mode = 0644,
960 .proc_handler = &proc_dostring, 865 .proc_handler = proc_dostring,
961 .strategy = &sysctl_string,
962 }, 866 },
963#ifdef CONFIG_KEYS 867#ifdef CONFIG_KEYS
964 { 868 {
965 .ctl_name = CTL_UNNUMBERED,
966 .procname = "keys", 869 .procname = "keys",
967 .mode = 0555, 870 .mode = 0555,
968 .child = key_sysctls, 871 .child = key_sysctls,
@@ -970,17 +873,15 @@ static struct ctl_table kern_table[] = {
970#endif 873#endif
971#ifdef CONFIG_RCU_TORTURE_TEST 874#ifdef CONFIG_RCU_TORTURE_TEST
972 { 875 {
973 .ctl_name = CTL_UNNUMBERED,
974 .procname = "rcutorture_runnable", 876 .procname = "rcutorture_runnable",
975 .data = &rcutorture_runnable, 877 .data = &rcutorture_runnable,
976 .maxlen = sizeof(int), 878 .maxlen = sizeof(int),
977 .mode = 0644, 879 .mode = 0644,
978 .proc_handler = &proc_dointvec, 880 .proc_handler = proc_dointvec,
979 }, 881 },
980#endif 882#endif
981#ifdef CONFIG_SLOW_WORK 883#ifdef CONFIG_SLOW_WORK
982 { 884 {
983 .ctl_name = CTL_UNNUMBERED,
984 .procname = "slow-work", 885 .procname = "slow-work",
985 .mode = 0555, 886 .mode = 0555,
986 .child = slow_work_sysctls, 887 .child = slow_work_sysctls,
@@ -988,146 +889,127 @@ static struct ctl_table kern_table[] = {
988#endif 889#endif
989#ifdef CONFIG_PERF_EVENTS 890#ifdef CONFIG_PERF_EVENTS
990 { 891 {
991 .ctl_name = CTL_UNNUMBERED,
992 .procname = "perf_event_paranoid", 892 .procname = "perf_event_paranoid",
993 .data = &sysctl_perf_event_paranoid, 893 .data = &sysctl_perf_event_paranoid,
994 .maxlen = sizeof(sysctl_perf_event_paranoid), 894 .maxlen = sizeof(sysctl_perf_event_paranoid),
995 .mode = 0644, 895 .mode = 0644,
996 .proc_handler = &proc_dointvec, 896 .proc_handler = proc_dointvec,
997 }, 897 },
998 { 898 {
999 .ctl_name = CTL_UNNUMBERED,
1000 .procname = "perf_event_mlock_kb", 899 .procname = "perf_event_mlock_kb",
1001 .data = &sysctl_perf_event_mlock, 900 .data = &sysctl_perf_event_mlock,
1002 .maxlen = sizeof(sysctl_perf_event_mlock), 901 .maxlen = sizeof(sysctl_perf_event_mlock),
1003 .mode = 0644, 902 .mode = 0644,
1004 .proc_handler = &proc_dointvec, 903 .proc_handler = proc_dointvec,
1005 }, 904 },
1006 { 905 {
1007 .ctl_name = CTL_UNNUMBERED,
1008 .procname = "perf_event_max_sample_rate", 906 .procname = "perf_event_max_sample_rate",
1009 .data = &sysctl_perf_event_sample_rate, 907 .data = &sysctl_perf_event_sample_rate,
1010 .maxlen = sizeof(sysctl_perf_event_sample_rate), 908 .maxlen = sizeof(sysctl_perf_event_sample_rate),
1011 .mode = 0644, 909 .mode = 0644,
1012 .proc_handler = &proc_dointvec, 910 .proc_handler = proc_dointvec,
1013 }, 911 },
1014#endif 912#endif
1015#ifdef CONFIG_KMEMCHECK 913#ifdef CONFIG_KMEMCHECK
1016 { 914 {
1017 .ctl_name = CTL_UNNUMBERED,
1018 .procname = "kmemcheck", 915 .procname = "kmemcheck",
1019 .data = &kmemcheck_enabled, 916 .data = &kmemcheck_enabled,
1020 .maxlen = sizeof(int), 917 .maxlen = sizeof(int),
1021 .mode = 0644, 918 .mode = 0644,
1022 .proc_handler = &proc_dointvec, 919 .proc_handler = proc_dointvec,
1023 }, 920 },
1024#endif 921#endif
1025#ifdef CONFIG_BLOCK 922#ifdef CONFIG_BLOCK
1026 { 923 {
1027 .ctl_name = CTL_UNNUMBERED,
1028 .procname = "blk_iopoll", 924 .procname = "blk_iopoll",
1029 .data = &blk_iopoll_enabled, 925 .data = &blk_iopoll_enabled,
1030 .maxlen = sizeof(int), 926 .maxlen = sizeof(int),
1031 .mode = 0644, 927 .mode = 0644,
1032 .proc_handler = &proc_dointvec, 928 .proc_handler = proc_dointvec,
1033 }, 929 },
1034#endif 930#endif
1035/* 931/*
1036 * NOTE: do not add new entries to this table unless you have read 932 * NOTE: do not add new entries to this table unless you have read
1037 * Documentation/sysctl/ctl_unnumbered.txt 933 * Documentation/sysctl/ctl_unnumbered.txt
1038 */ 934 */
1039 { .ctl_name = 0 } 935 { }
1040}; 936};
1041 937
1042static struct ctl_table vm_table[] = { 938static struct ctl_table vm_table[] = {
1043 { 939 {
1044 .ctl_name = VM_OVERCOMMIT_MEMORY,
1045 .procname = "overcommit_memory", 940 .procname = "overcommit_memory",
1046 .data = &sysctl_overcommit_memory, 941 .data = &sysctl_overcommit_memory,
1047 .maxlen = sizeof(sysctl_overcommit_memory), 942 .maxlen = sizeof(sysctl_overcommit_memory),
1048 .mode = 0644, 943 .mode = 0644,
1049 .proc_handler = &proc_dointvec, 944 .proc_handler = proc_dointvec,
1050 }, 945 },
1051 { 946 {
1052 .ctl_name = VM_PANIC_ON_OOM,
1053 .procname = "panic_on_oom", 947 .procname = "panic_on_oom",
1054 .data = &sysctl_panic_on_oom, 948 .data = &sysctl_panic_on_oom,
1055 .maxlen = sizeof(sysctl_panic_on_oom), 949 .maxlen = sizeof(sysctl_panic_on_oom),
1056 .mode = 0644, 950 .mode = 0644,
1057 .proc_handler = &proc_dointvec, 951 .proc_handler = proc_dointvec,
1058 }, 952 },
1059 { 953 {
1060 .ctl_name = CTL_UNNUMBERED,
1061 .procname = "oom_kill_allocating_task", 954 .procname = "oom_kill_allocating_task",
1062 .data = &sysctl_oom_kill_allocating_task, 955 .data = &sysctl_oom_kill_allocating_task,
1063 .maxlen = sizeof(sysctl_oom_kill_allocating_task), 956 .maxlen = sizeof(sysctl_oom_kill_allocating_task),
1064 .mode = 0644, 957 .mode = 0644,
1065 .proc_handler = &proc_dointvec, 958 .proc_handler = proc_dointvec,
1066 }, 959 },
1067 { 960 {
1068 .ctl_name = CTL_UNNUMBERED,
1069 .procname = "oom_dump_tasks", 961 .procname = "oom_dump_tasks",
1070 .data = &sysctl_oom_dump_tasks, 962 .data = &sysctl_oom_dump_tasks,
1071 .maxlen = sizeof(sysctl_oom_dump_tasks), 963 .maxlen = sizeof(sysctl_oom_dump_tasks),
1072 .mode = 0644, 964 .mode = 0644,
1073 .proc_handler = &proc_dointvec, 965 .proc_handler = proc_dointvec,
1074 }, 966 },
1075 { 967 {
1076 .ctl_name = VM_OVERCOMMIT_RATIO,
1077 .procname = "overcommit_ratio", 968 .procname = "overcommit_ratio",
1078 .data = &sysctl_overcommit_ratio, 969 .data = &sysctl_overcommit_ratio,
1079 .maxlen = sizeof(sysctl_overcommit_ratio), 970 .maxlen = sizeof(sysctl_overcommit_ratio),
1080 .mode = 0644, 971 .mode = 0644,
1081 .proc_handler = &proc_dointvec, 972 .proc_handler = proc_dointvec,
1082 }, 973 },
1083 { 974 {
1084 .ctl_name = VM_PAGE_CLUSTER,
1085 .procname = "page-cluster", 975 .procname = "page-cluster",
1086 .data = &page_cluster, 976 .data = &page_cluster,
1087 .maxlen = sizeof(int), 977 .maxlen = sizeof(int),
1088 .mode = 0644, 978 .mode = 0644,
1089 .proc_handler = &proc_dointvec, 979 .proc_handler = proc_dointvec,
1090 }, 980 },
1091 { 981 {
1092 .ctl_name = VM_DIRTY_BACKGROUND,
1093 .procname = "dirty_background_ratio", 982 .procname = "dirty_background_ratio",
1094 .data = &dirty_background_ratio, 983 .data = &dirty_background_ratio,
1095 .maxlen = sizeof(dirty_background_ratio), 984 .maxlen = sizeof(dirty_background_ratio),
1096 .mode = 0644, 985 .mode = 0644,
1097 .proc_handler = &dirty_background_ratio_handler, 986 .proc_handler = dirty_background_ratio_handler,
1098 .strategy = &sysctl_intvec,
1099 .extra1 = &zero, 987 .extra1 = &zero,
1100 .extra2 = &one_hundred, 988 .extra2 = &one_hundred,
1101 }, 989 },
1102 { 990 {
1103 .ctl_name = CTL_UNNUMBERED,
1104 .procname = "dirty_background_bytes", 991 .procname = "dirty_background_bytes",
1105 .data = &dirty_background_bytes, 992 .data = &dirty_background_bytes,
1106 .maxlen = sizeof(dirty_background_bytes), 993 .maxlen = sizeof(dirty_background_bytes),
1107 .mode = 0644, 994 .mode = 0644,
1108 .proc_handler = &dirty_background_bytes_handler, 995 .proc_handler = dirty_background_bytes_handler,
1109 .strategy = &sysctl_intvec,
1110 .extra1 = &one_ul, 996 .extra1 = &one_ul,
1111 }, 997 },
1112 { 998 {
1113 .ctl_name = VM_DIRTY_RATIO,
1114 .procname = "dirty_ratio", 999 .procname = "dirty_ratio",
1115 .data = &vm_dirty_ratio, 1000 .data = &vm_dirty_ratio,
1116 .maxlen = sizeof(vm_dirty_ratio), 1001 .maxlen = sizeof(vm_dirty_ratio),
1117 .mode = 0644, 1002 .mode = 0644,
1118 .proc_handler = &dirty_ratio_handler, 1003 .proc_handler = dirty_ratio_handler,
1119 .strategy = &sysctl_intvec,
1120 .extra1 = &zero, 1004 .extra1 = &zero,
1121 .extra2 = &one_hundred, 1005 .extra2 = &one_hundred,
1122 }, 1006 },
1123 { 1007 {
1124 .ctl_name = CTL_UNNUMBERED,
1125 .procname = "dirty_bytes", 1008 .procname = "dirty_bytes",
1126 .data = &vm_dirty_bytes, 1009 .data = &vm_dirty_bytes,
1127 .maxlen = sizeof(vm_dirty_bytes), 1010 .maxlen = sizeof(vm_dirty_bytes),
1128 .mode = 0644, 1011 .mode = 0644,
1129 .proc_handler = &dirty_bytes_handler, 1012 .proc_handler = dirty_bytes_handler,
1130 .strategy = &sysctl_intvec,
1131 .extra1 = &dirty_bytes_min, 1013 .extra1 = &dirty_bytes_min,
1132 }, 1014 },
1133 { 1015 {
@@ -1135,31 +1017,28 @@ static struct ctl_table vm_table[] = {
1135 .data = &dirty_writeback_interval, 1017 .data = &dirty_writeback_interval,
1136 .maxlen = sizeof(dirty_writeback_interval), 1018 .maxlen = sizeof(dirty_writeback_interval),
1137 .mode = 0644, 1019 .mode = 0644,
1138 .proc_handler = &dirty_writeback_centisecs_handler, 1020 .proc_handler = dirty_writeback_centisecs_handler,
1139 }, 1021 },
1140 { 1022 {
1141 .procname = "dirty_expire_centisecs", 1023 .procname = "dirty_expire_centisecs",
1142 .data = &dirty_expire_interval, 1024 .data = &dirty_expire_interval,
1143 .maxlen = sizeof(dirty_expire_interval), 1025 .maxlen = sizeof(dirty_expire_interval),
1144 .mode = 0644, 1026 .mode = 0644,
1145 .proc_handler = &proc_dointvec, 1027 .proc_handler = proc_dointvec,
1146 }, 1028 },
1147 { 1029 {
1148 .ctl_name = VM_NR_PDFLUSH_THREADS,
1149 .procname = "nr_pdflush_threads", 1030 .procname = "nr_pdflush_threads",
1150 .data = &nr_pdflush_threads, 1031 .data = &nr_pdflush_threads,
1151 .maxlen = sizeof nr_pdflush_threads, 1032 .maxlen = sizeof nr_pdflush_threads,
1152 .mode = 0444 /* read-only*/, 1033 .mode = 0444 /* read-only*/,
1153 .proc_handler = &proc_dointvec, 1034 .proc_handler = proc_dointvec,
1154 }, 1035 },
1155 { 1036 {
1156 .ctl_name = VM_SWAPPINESS,
1157 .procname = "swappiness", 1037 .procname = "swappiness",
1158 .data = &vm_swappiness, 1038 .data = &vm_swappiness,
1159 .maxlen = sizeof(vm_swappiness), 1039 .maxlen = sizeof(vm_swappiness),
1160 .mode = 0644, 1040 .mode = 0644,
1161 .proc_handler = &proc_dointvec_minmax, 1041 .proc_handler = proc_dointvec_minmax,
1162 .strategy = &sysctl_intvec,
1163 .extra1 = &zero, 1042 .extra1 = &zero,
1164 .extra2 = &one_hundred, 1043 .extra2 = &one_hundred,
1165 }, 1044 },
@@ -1169,255 +1048,213 @@ static struct ctl_table vm_table[] = {
1169 .data = NULL, 1048 .data = NULL,
1170 .maxlen = sizeof(unsigned long), 1049 .maxlen = sizeof(unsigned long),
1171 .mode = 0644, 1050 .mode = 0644,
1172 .proc_handler = &hugetlb_sysctl_handler, 1051 .proc_handler = hugetlb_sysctl_handler,
1173 .extra1 = (void *)&hugetlb_zero, 1052 .extra1 = (void *)&hugetlb_zero,
1174 .extra2 = (void *)&hugetlb_infinity, 1053 .extra2 = (void *)&hugetlb_infinity,
1175 }, 1054 },
1176 { 1055 {
1177 .ctl_name = VM_HUGETLB_GROUP,
1178 .procname = "hugetlb_shm_group", 1056 .procname = "hugetlb_shm_group",
1179 .data = &sysctl_hugetlb_shm_group, 1057 .data = &sysctl_hugetlb_shm_group,
1180 .maxlen = sizeof(gid_t), 1058 .maxlen = sizeof(gid_t),
1181 .mode = 0644, 1059 .mode = 0644,
1182 .proc_handler = &proc_dointvec, 1060 .proc_handler = proc_dointvec,
1183 }, 1061 },
1184 { 1062 {
1185 .ctl_name = CTL_UNNUMBERED,
1186 .procname = "hugepages_treat_as_movable", 1063 .procname = "hugepages_treat_as_movable",
1187 .data = &hugepages_treat_as_movable, 1064 .data = &hugepages_treat_as_movable,
1188 .maxlen = sizeof(int), 1065 .maxlen = sizeof(int),
1189 .mode = 0644, 1066 .mode = 0644,
1190 .proc_handler = &hugetlb_treat_movable_handler, 1067 .proc_handler = hugetlb_treat_movable_handler,
1191 }, 1068 },
1192 { 1069 {
1193 .ctl_name = CTL_UNNUMBERED,
1194 .procname = "nr_overcommit_hugepages", 1070 .procname = "nr_overcommit_hugepages",
1195 .data = NULL, 1071 .data = NULL,
1196 .maxlen = sizeof(unsigned long), 1072 .maxlen = sizeof(unsigned long),
1197 .mode = 0644, 1073 .mode = 0644,
1198 .proc_handler = &hugetlb_overcommit_handler, 1074 .proc_handler = hugetlb_overcommit_handler,
1199 .extra1 = (void *)&hugetlb_zero, 1075 .extra1 = (void *)&hugetlb_zero,
1200 .extra2 = (void *)&hugetlb_infinity, 1076 .extra2 = (void *)&hugetlb_infinity,
1201 }, 1077 },
1202#endif 1078#endif
1203 { 1079 {
1204 .ctl_name = VM_LOWMEM_RESERVE_RATIO,
1205 .procname = "lowmem_reserve_ratio", 1080 .procname = "lowmem_reserve_ratio",
1206 .data = &sysctl_lowmem_reserve_ratio, 1081 .data = &sysctl_lowmem_reserve_ratio,
1207 .maxlen = sizeof(sysctl_lowmem_reserve_ratio), 1082 .maxlen = sizeof(sysctl_lowmem_reserve_ratio),
1208 .mode = 0644, 1083 .mode = 0644,
1209 .proc_handler = &lowmem_reserve_ratio_sysctl_handler, 1084 .proc_handler = lowmem_reserve_ratio_sysctl_handler,
1210 .strategy = &sysctl_intvec,
1211 }, 1085 },
1212 { 1086 {
1213 .ctl_name = VM_DROP_PAGECACHE,
1214 .procname = "drop_caches", 1087 .procname = "drop_caches",
1215 .data = &sysctl_drop_caches, 1088 .data = &sysctl_drop_caches,
1216 .maxlen = sizeof(int), 1089 .maxlen = sizeof(int),
1217 .mode = 0644, 1090 .mode = 0644,
1218 .proc_handler = drop_caches_sysctl_handler, 1091 .proc_handler = drop_caches_sysctl_handler,
1219 .strategy = &sysctl_intvec,
1220 }, 1092 },
1221 { 1093 {
1222 .ctl_name = VM_MIN_FREE_KBYTES,
1223 .procname = "min_free_kbytes", 1094 .procname = "min_free_kbytes",
1224 .data = &min_free_kbytes, 1095 .data = &min_free_kbytes,
1225 .maxlen = sizeof(min_free_kbytes), 1096 .maxlen = sizeof(min_free_kbytes),
1226 .mode = 0644, 1097 .mode = 0644,
1227 .proc_handler = &min_free_kbytes_sysctl_handler, 1098 .proc_handler = min_free_kbytes_sysctl_handler,
1228 .strategy = &sysctl_intvec,
1229 .extra1 = &zero, 1099 .extra1 = &zero,
1230 }, 1100 },
1231 { 1101 {
1232 .ctl_name = VM_PERCPU_PAGELIST_FRACTION,
1233 .procname = "percpu_pagelist_fraction", 1102 .procname = "percpu_pagelist_fraction",
1234 .data = &percpu_pagelist_fraction, 1103 .data = &percpu_pagelist_fraction,
1235 .maxlen = sizeof(percpu_pagelist_fraction), 1104 .maxlen = sizeof(percpu_pagelist_fraction),
1236 .mode = 0644, 1105 .mode = 0644,
1237 .proc_handler = &percpu_pagelist_fraction_sysctl_handler, 1106 .proc_handler = percpu_pagelist_fraction_sysctl_handler,
1238 .strategy = &sysctl_intvec,
1239 .extra1 = &min_percpu_pagelist_fract, 1107 .extra1 = &min_percpu_pagelist_fract,
1240 }, 1108 },
1241#ifdef CONFIG_MMU 1109#ifdef CONFIG_MMU
1242 { 1110 {
1243 .ctl_name = VM_MAX_MAP_COUNT,
1244 .procname = "max_map_count", 1111 .procname = "max_map_count",
1245 .data = &sysctl_max_map_count, 1112 .data = &sysctl_max_map_count,
1246 .maxlen = sizeof(sysctl_max_map_count), 1113 .maxlen = sizeof(sysctl_max_map_count),
1247 .mode = 0644, 1114 .mode = 0644,
1248 .proc_handler = &proc_dointvec 1115 .proc_handler = proc_dointvec
1249 }, 1116 },
1250#else 1117#else
1251 { 1118 {
1252 .ctl_name = CTL_UNNUMBERED,
1253 .procname = "nr_trim_pages", 1119 .procname = "nr_trim_pages",
1254 .data = &sysctl_nr_trim_pages, 1120 .data = &sysctl_nr_trim_pages,
1255 .maxlen = sizeof(sysctl_nr_trim_pages), 1121 .maxlen = sizeof(sysctl_nr_trim_pages),
1256 .mode = 0644, 1122 .mode = 0644,
1257 .proc_handler = &proc_dointvec_minmax, 1123 .proc_handler = proc_dointvec_minmax,
1258 .strategy = &sysctl_intvec,
1259 .extra1 = &zero, 1124 .extra1 = &zero,
1260 }, 1125 },
1261#endif 1126#endif
1262 { 1127 {
1263 .ctl_name = VM_LAPTOP_MODE,
1264 .procname = "laptop_mode", 1128 .procname = "laptop_mode",
1265 .data = &laptop_mode, 1129 .data = &laptop_mode,
1266 .maxlen = sizeof(laptop_mode), 1130 .maxlen = sizeof(laptop_mode),
1267 .mode = 0644, 1131 .mode = 0644,
1268 .proc_handler = &proc_dointvec_jiffies, 1132 .proc_handler = proc_dointvec_jiffies,
1269 .strategy = &sysctl_jiffies,
1270 }, 1133 },
1271 { 1134 {
1272 .ctl_name = VM_BLOCK_DUMP,
1273 .procname = "block_dump", 1135 .procname = "block_dump",
1274 .data = &block_dump, 1136 .data = &block_dump,
1275 .maxlen = sizeof(block_dump), 1137 .maxlen = sizeof(block_dump),
1276 .mode = 0644, 1138 .mode = 0644,
1277 .proc_handler = &proc_dointvec, 1139 .proc_handler = proc_dointvec,
1278 .strategy = &sysctl_intvec,
1279 .extra1 = &zero, 1140 .extra1 = &zero,
1280 }, 1141 },
1281 { 1142 {
1282 .ctl_name = VM_VFS_CACHE_PRESSURE,
1283 .procname = "vfs_cache_pressure", 1143 .procname = "vfs_cache_pressure",
1284 .data = &sysctl_vfs_cache_pressure, 1144 .data = &sysctl_vfs_cache_pressure,
1285 .maxlen = sizeof(sysctl_vfs_cache_pressure), 1145 .maxlen = sizeof(sysctl_vfs_cache_pressure),
1286 .mode = 0644, 1146 .mode = 0644,
1287 .proc_handler = &proc_dointvec, 1147 .proc_handler = proc_dointvec,
1288 .strategy = &sysctl_intvec,
1289 .extra1 = &zero, 1148 .extra1 = &zero,
1290 }, 1149 },
1291#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT 1150#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
1292 { 1151 {
1293 .ctl_name = VM_LEGACY_VA_LAYOUT,
1294 .procname = "legacy_va_layout", 1152 .procname = "legacy_va_layout",
1295 .data = &sysctl_legacy_va_layout, 1153 .data = &sysctl_legacy_va_layout,
1296 .maxlen = sizeof(sysctl_legacy_va_layout), 1154 .maxlen = sizeof(sysctl_legacy_va_layout),
1297 .mode = 0644, 1155 .mode = 0644,
1298 .proc_handler = &proc_dointvec, 1156 .proc_handler = proc_dointvec,
1299 .strategy = &sysctl_intvec,
1300 .extra1 = &zero, 1157 .extra1 = &zero,
1301 }, 1158 },
1302#endif 1159#endif
1303#ifdef CONFIG_NUMA 1160#ifdef CONFIG_NUMA
1304 { 1161 {
1305 .ctl_name = VM_ZONE_RECLAIM_MODE,
1306 .procname = "zone_reclaim_mode", 1162 .procname = "zone_reclaim_mode",
1307 .data = &zone_reclaim_mode, 1163 .data = &zone_reclaim_mode,
1308 .maxlen = sizeof(zone_reclaim_mode), 1164 .maxlen = sizeof(zone_reclaim_mode),
1309 .mode = 0644, 1165 .mode = 0644,
1310 .proc_handler = &proc_dointvec, 1166 .proc_handler = proc_dointvec,
1311 .strategy = &sysctl_intvec,
1312 .extra1 = &zero, 1167 .extra1 = &zero,
1313 }, 1168 },
1314 { 1169 {
1315 .ctl_name = VM_MIN_UNMAPPED,
1316 .procname = "min_unmapped_ratio", 1170 .procname = "min_unmapped_ratio",
1317 .data = &sysctl_min_unmapped_ratio, 1171 .data = &sysctl_min_unmapped_ratio,
1318 .maxlen = sizeof(sysctl_min_unmapped_ratio), 1172 .maxlen = sizeof(sysctl_min_unmapped_ratio),
1319 .mode = 0644, 1173 .mode = 0644,
1320 .proc_handler = &sysctl_min_unmapped_ratio_sysctl_handler, 1174 .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler,
1321 .strategy = &sysctl_intvec,
1322 .extra1 = &zero, 1175 .extra1 = &zero,
1323 .extra2 = &one_hundred, 1176 .extra2 = &one_hundred,
1324 }, 1177 },
1325 { 1178 {
1326 .ctl_name = VM_MIN_SLAB,
1327 .procname = "min_slab_ratio", 1179 .procname = "min_slab_ratio",
1328 .data = &sysctl_min_slab_ratio, 1180 .data = &sysctl_min_slab_ratio,
1329 .maxlen = sizeof(sysctl_min_slab_ratio), 1181 .maxlen = sizeof(sysctl_min_slab_ratio),
1330 .mode = 0644, 1182 .mode = 0644,
1331 .proc_handler = &sysctl_min_slab_ratio_sysctl_handler, 1183 .proc_handler = sysctl_min_slab_ratio_sysctl_handler,
1332 .strategy = &sysctl_intvec,
1333 .extra1 = &zero, 1184 .extra1 = &zero,
1334 .extra2 = &one_hundred, 1185 .extra2 = &one_hundred,
1335 }, 1186 },
1336#endif 1187#endif
1337#ifdef CONFIG_SMP 1188#ifdef CONFIG_SMP
1338 { 1189 {
1339 .ctl_name = CTL_UNNUMBERED,
1340 .procname = "stat_interval", 1190 .procname = "stat_interval",
1341 .data = &sysctl_stat_interval, 1191 .data = &sysctl_stat_interval,
1342 .maxlen = sizeof(sysctl_stat_interval), 1192 .maxlen = sizeof(sysctl_stat_interval),
1343 .mode = 0644, 1193 .mode = 0644,
1344 .proc_handler = &proc_dointvec_jiffies, 1194 .proc_handler = proc_dointvec_jiffies,
1345 .strategy = &sysctl_jiffies,
1346 }, 1195 },
1347#endif 1196#endif
1348 { 1197 {
1349 .ctl_name = CTL_UNNUMBERED,
1350 .procname = "mmap_min_addr", 1198 .procname = "mmap_min_addr",
1351 .data = &dac_mmap_min_addr, 1199 .data = &dac_mmap_min_addr,
1352 .maxlen = sizeof(unsigned long), 1200 .maxlen = sizeof(unsigned long),
1353 .mode = 0644, 1201 .mode = 0644,
1354 .proc_handler = &mmap_min_addr_handler, 1202 .proc_handler = mmap_min_addr_handler,
1355 }, 1203 },
1356#ifdef CONFIG_NUMA 1204#ifdef CONFIG_NUMA
1357 { 1205 {
1358 .ctl_name = CTL_UNNUMBERED,
1359 .procname = "numa_zonelist_order", 1206 .procname = "numa_zonelist_order",
1360 .data = &numa_zonelist_order, 1207 .data = &numa_zonelist_order,
1361 .maxlen = NUMA_ZONELIST_ORDER_LEN, 1208 .maxlen = NUMA_ZONELIST_ORDER_LEN,
1362 .mode = 0644, 1209 .mode = 0644,
1363 .proc_handler = &numa_zonelist_order_handler, 1210 .proc_handler = numa_zonelist_order_handler,
1364 .strategy = &sysctl_string,
1365 }, 1211 },
1366#endif 1212#endif
1367#if (defined(CONFIG_X86_32) && !defined(CONFIG_UML))|| \ 1213#if (defined(CONFIG_X86_32) && !defined(CONFIG_UML))|| \
1368 (defined(CONFIG_SUPERH) && defined(CONFIG_VSYSCALL)) 1214 (defined(CONFIG_SUPERH) && defined(CONFIG_VSYSCALL))
1369 { 1215 {
1370 .ctl_name = VM_VDSO_ENABLED,
1371 .procname = "vdso_enabled", 1216 .procname = "vdso_enabled",
1372 .data = &vdso_enabled, 1217 .data = &vdso_enabled,
1373 .maxlen = sizeof(vdso_enabled), 1218 .maxlen = sizeof(vdso_enabled),
1374 .mode = 0644, 1219 .mode = 0644,
1375 .proc_handler = &proc_dointvec, 1220 .proc_handler = proc_dointvec,
1376 .strategy = &sysctl_intvec,
1377 .extra1 = &zero, 1221 .extra1 = &zero,
1378 }, 1222 },
1379#endif 1223#endif
1380#ifdef CONFIG_HIGHMEM 1224#ifdef CONFIG_HIGHMEM
1381 { 1225 {
1382 .ctl_name = CTL_UNNUMBERED,
1383 .procname = "highmem_is_dirtyable", 1226 .procname = "highmem_is_dirtyable",
1384 .data = &vm_highmem_is_dirtyable, 1227 .data = &vm_highmem_is_dirtyable,
1385 .maxlen = sizeof(vm_highmem_is_dirtyable), 1228 .maxlen = sizeof(vm_highmem_is_dirtyable),
1386 .mode = 0644, 1229 .mode = 0644,
1387 .proc_handler = &proc_dointvec_minmax, 1230 .proc_handler = proc_dointvec_minmax,
1388 .strategy = &sysctl_intvec,
1389 .extra1 = &zero, 1231 .extra1 = &zero,
1390 .extra2 = &one, 1232 .extra2 = &one,
1391 }, 1233 },
1392#endif 1234#endif
1393 { 1235 {
1394 .ctl_name = CTL_UNNUMBERED,
1395 .procname = "scan_unevictable_pages", 1236 .procname = "scan_unevictable_pages",
1396 .data = &scan_unevictable_pages, 1237 .data = &scan_unevictable_pages,
1397 .maxlen = sizeof(scan_unevictable_pages), 1238 .maxlen = sizeof(scan_unevictable_pages),
1398 .mode = 0644, 1239 .mode = 0644,
1399 .proc_handler = &scan_unevictable_handler, 1240 .proc_handler = scan_unevictable_handler,
1400 }, 1241 },
1401#ifdef CONFIG_MEMORY_FAILURE 1242#ifdef CONFIG_MEMORY_FAILURE
1402 { 1243 {
1403 .ctl_name = CTL_UNNUMBERED,
1404 .procname = "memory_failure_early_kill", 1244 .procname = "memory_failure_early_kill",
1405 .data = &sysctl_memory_failure_early_kill, 1245 .data = &sysctl_memory_failure_early_kill,
1406 .maxlen = sizeof(sysctl_memory_failure_early_kill), 1246 .maxlen = sizeof(sysctl_memory_failure_early_kill),
1407 .mode = 0644, 1247 .mode = 0644,
1408 .proc_handler = &proc_dointvec_minmax, 1248 .proc_handler = proc_dointvec_minmax,
1409 .strategy = &sysctl_intvec,
1410 .extra1 = &zero, 1249 .extra1 = &zero,
1411 .extra2 = &one, 1250 .extra2 = &one,
1412 }, 1251 },
1413 { 1252 {
1414 .ctl_name = CTL_UNNUMBERED,
1415 .procname = "memory_failure_recovery", 1253 .procname = "memory_failure_recovery",
1416 .data = &sysctl_memory_failure_recovery, 1254 .data = &sysctl_memory_failure_recovery,
1417 .maxlen = sizeof(sysctl_memory_failure_recovery), 1255 .maxlen = sizeof(sysctl_memory_failure_recovery),
1418 .mode = 0644, 1256 .mode = 0644,
1419 .proc_handler = &proc_dointvec_minmax, 1257 .proc_handler = proc_dointvec_minmax,
1420 .strategy = &sysctl_intvec,
1421 .extra1 = &zero, 1258 .extra1 = &zero,
1422 .extra2 = &one, 1259 .extra2 = &one,
1423 }, 1260 },
@@ -1427,116 +1264,104 @@ static struct ctl_table vm_table[] = {
1427 * NOTE: do not add new entries to this table unless you have read 1264 * NOTE: do not add new entries to this table unless you have read
1428 * Documentation/sysctl/ctl_unnumbered.txt 1265 * Documentation/sysctl/ctl_unnumbered.txt
1429 */ 1266 */
1430 { .ctl_name = 0 } 1267 { }
1431}; 1268};
1432 1269
1433#if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE) 1270#if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE)
1434static struct ctl_table binfmt_misc_table[] = { 1271static struct ctl_table binfmt_misc_table[] = {
1435 { .ctl_name = 0 } 1272 { }
1436}; 1273};
1437#endif 1274#endif
1438 1275
1439static struct ctl_table fs_table[] = { 1276static struct ctl_table fs_table[] = {
1440 { 1277 {
1441 .ctl_name = FS_NRINODE,
1442 .procname = "inode-nr", 1278 .procname = "inode-nr",
1443 .data = &inodes_stat, 1279 .data = &inodes_stat,
1444 .maxlen = 2*sizeof(int), 1280 .maxlen = 2*sizeof(int),
1445 .mode = 0444, 1281 .mode = 0444,
1446 .proc_handler = &proc_dointvec, 1282 .proc_handler = proc_dointvec,
1447 }, 1283 },
1448 { 1284 {
1449 .ctl_name = FS_STATINODE,
1450 .procname = "inode-state", 1285 .procname = "inode-state",
1451 .data = &inodes_stat, 1286 .data = &inodes_stat,
1452 .maxlen = 7*sizeof(int), 1287 .maxlen = 7*sizeof(int),
1453 .mode = 0444, 1288 .mode = 0444,
1454 .proc_handler = &proc_dointvec, 1289 .proc_handler = proc_dointvec,
1455 }, 1290 },
1456 { 1291 {
1457 .procname = "file-nr", 1292 .procname = "file-nr",
1458 .data = &files_stat, 1293 .data = &files_stat,
1459 .maxlen = 3*sizeof(int), 1294 .maxlen = 3*sizeof(int),
1460 .mode = 0444, 1295 .mode = 0444,
1461 .proc_handler = &proc_nr_files, 1296 .proc_handler = proc_nr_files,
1462 }, 1297 },
1463 { 1298 {
1464 .ctl_name = FS_MAXFILE,
1465 .procname = "file-max", 1299 .procname = "file-max",
1466 .data = &files_stat.max_files, 1300 .data = &files_stat.max_files,
1467 .maxlen = sizeof(int), 1301 .maxlen = sizeof(int),
1468 .mode = 0644, 1302 .mode = 0644,
1469 .proc_handler = &proc_dointvec, 1303 .proc_handler = proc_dointvec,
1470 }, 1304 },
1471 { 1305 {
1472 .ctl_name = CTL_UNNUMBERED,
1473 .procname = "nr_open", 1306 .procname = "nr_open",
1474 .data = &sysctl_nr_open, 1307 .data = &sysctl_nr_open,
1475 .maxlen = sizeof(int), 1308 .maxlen = sizeof(int),
1476 .mode = 0644, 1309 .mode = 0644,
1477 .proc_handler = &proc_dointvec_minmax, 1310 .proc_handler = proc_dointvec_minmax,
1478 .extra1 = &sysctl_nr_open_min, 1311 .extra1 = &sysctl_nr_open_min,
1479 .extra2 = &sysctl_nr_open_max, 1312 .extra2 = &sysctl_nr_open_max,
1480 }, 1313 },
1481 { 1314 {
1482 .ctl_name = FS_DENTRY,
1483 .procname = "dentry-state", 1315 .procname = "dentry-state",
1484 .data = &dentry_stat, 1316 .data = &dentry_stat,
1485 .maxlen = 6*sizeof(int), 1317 .maxlen = 6*sizeof(int),
1486 .mode = 0444, 1318 .mode = 0444,
1487 .proc_handler = &proc_dointvec, 1319 .proc_handler = proc_dointvec,
1488 }, 1320 },
1489 { 1321 {
1490 .ctl_name = FS_OVERFLOWUID,
1491 .procname = "overflowuid", 1322 .procname = "overflowuid",
1492 .data = &fs_overflowuid, 1323 .data = &fs_overflowuid,
1493 .maxlen = sizeof(int), 1324 .maxlen = sizeof(int),
1494 .mode = 0644, 1325 .mode = 0644,
1495 .proc_handler = &proc_dointvec_minmax, 1326 .proc_handler = proc_dointvec_minmax,
1496 .strategy = &sysctl_intvec,
1497 .extra1 = &minolduid, 1327 .extra1 = &minolduid,
1498 .extra2 = &maxolduid, 1328 .extra2 = &maxolduid,
1499 }, 1329 },
1500 { 1330 {
1501 .ctl_name = FS_OVERFLOWGID,
1502 .procname = "overflowgid", 1331 .procname = "overflowgid",
1503 .data = &fs_overflowgid, 1332 .data = &fs_overflowgid,
1504 .maxlen = sizeof(int), 1333 .maxlen = sizeof(int),
1505 .mode = 0644, 1334 .mode = 0644,
1506 .proc_handler = &proc_dointvec_minmax, 1335 .proc_handler = proc_dointvec_minmax,
1507 .strategy = &sysctl_intvec,
1508 .extra1 = &minolduid, 1336 .extra1 = &minolduid,
1509 .extra2 = &maxolduid, 1337 .extra2 = &maxolduid,
1510 }, 1338 },
1511#ifdef CONFIG_FILE_LOCKING 1339#ifdef CONFIG_FILE_LOCKING
1512 { 1340 {
1513 .ctl_name = FS_LEASES,
1514 .procname = "leases-enable", 1341 .procname = "leases-enable",
1515 .data = &leases_enable, 1342 .data = &leases_enable,
1516 .maxlen = sizeof(int), 1343 .maxlen = sizeof(int),
1517 .mode = 0644, 1344 .mode = 0644,
1518 .proc_handler = &proc_dointvec, 1345 .proc_handler = proc_dointvec,
1519 }, 1346 },
1520#endif 1347#endif
1521#ifdef CONFIG_DNOTIFY 1348#ifdef CONFIG_DNOTIFY
1522 { 1349 {
1523 .ctl_name = FS_DIR_NOTIFY,
1524 .procname = "dir-notify-enable", 1350 .procname = "dir-notify-enable",
1525 .data = &dir_notify_enable, 1351 .data = &dir_notify_enable,
1526 .maxlen = sizeof(int), 1352 .maxlen = sizeof(int),
1527 .mode = 0644, 1353 .mode = 0644,
1528 .proc_handler = &proc_dointvec, 1354 .proc_handler = proc_dointvec,
1529 }, 1355 },
1530#endif 1356#endif
1531#ifdef CONFIG_MMU 1357#ifdef CONFIG_MMU
1532#ifdef CONFIG_FILE_LOCKING 1358#ifdef CONFIG_FILE_LOCKING
1533 { 1359 {
1534 .ctl_name = FS_LEASE_TIME,
1535 .procname = "lease-break-time", 1360 .procname = "lease-break-time",
1536 .data = &lease_break_time, 1361 .data = &lease_break_time,
1537 .maxlen = sizeof(int), 1362 .maxlen = sizeof(int),
1538 .mode = 0644, 1363 .mode = 0644,
1539 .proc_handler = &proc_dointvec, 1364 .proc_handler = proc_dointvec,
1540 }, 1365 },
1541#endif 1366#endif
1542#ifdef CONFIG_AIO 1367#ifdef CONFIG_AIO
@@ -1545,19 +1370,18 @@ static struct ctl_table fs_table[] = {
1545 .data = &aio_nr, 1370 .data = &aio_nr,
1546 .maxlen = sizeof(aio_nr), 1371 .maxlen = sizeof(aio_nr),
1547 .mode = 0444, 1372 .mode = 0444,
1548 .proc_handler = &proc_doulongvec_minmax, 1373 .proc_handler = proc_doulongvec_minmax,
1549 }, 1374 },
1550 { 1375 {
1551 .procname = "aio-max-nr", 1376 .procname = "aio-max-nr",
1552 .data = &aio_max_nr, 1377 .data = &aio_max_nr,
1553 .maxlen = sizeof(aio_max_nr), 1378 .maxlen = sizeof(aio_max_nr),
1554 .mode = 0644, 1379 .mode = 0644,
1555 .proc_handler = &proc_doulongvec_minmax, 1380 .proc_handler = proc_doulongvec_minmax,
1556 }, 1381 },
1557#endif /* CONFIG_AIO */ 1382#endif /* CONFIG_AIO */
1558#ifdef CONFIG_INOTIFY_USER 1383#ifdef CONFIG_INOTIFY_USER
1559 { 1384 {
1560 .ctl_name = FS_INOTIFY,
1561 .procname = "inotify", 1385 .procname = "inotify",
1562 .mode = 0555, 1386 .mode = 0555,
1563 .child = inotify_table, 1387 .child = inotify_table,
@@ -1572,19 +1396,16 @@ static struct ctl_table fs_table[] = {
1572#endif 1396#endif
1573#endif 1397#endif
1574 { 1398 {
1575 .ctl_name = KERN_SETUID_DUMPABLE,
1576 .procname = "suid_dumpable", 1399 .procname = "suid_dumpable",
1577 .data = &suid_dumpable, 1400 .data = &suid_dumpable,
1578 .maxlen = sizeof(int), 1401 .maxlen = sizeof(int),
1579 .mode = 0644, 1402 .mode = 0644,
1580 .proc_handler = &proc_dointvec_minmax, 1403 .proc_handler = proc_dointvec_minmax,
1581 .strategy = &sysctl_intvec,
1582 .extra1 = &zero, 1404 .extra1 = &zero,
1583 .extra2 = &two, 1405 .extra2 = &two,
1584 }, 1406 },
1585#if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE) 1407#if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE)
1586 { 1408 {
1587 .ctl_name = CTL_UNNUMBERED,
1588 .procname = "binfmt_misc", 1409 .procname = "binfmt_misc",
1589 .mode = 0555, 1410 .mode = 0555,
1590 .child = binfmt_misc_table, 1411 .child = binfmt_misc_table,
@@ -1594,13 +1415,12 @@ static struct ctl_table fs_table[] = {
1594 * NOTE: do not add new entries to this table unless you have read 1415 * NOTE: do not add new entries to this table unless you have read
1595 * Documentation/sysctl/ctl_unnumbered.txt 1416 * Documentation/sysctl/ctl_unnumbered.txt
1596 */ 1417 */
1597 { .ctl_name = 0 } 1418 { }
1598}; 1419};
1599 1420
1600static struct ctl_table debug_table[] = { 1421static struct ctl_table debug_table[] = {
1601#if defined(CONFIG_X86) || defined(CONFIG_PPC) 1422#if defined(CONFIG_X86) || defined(CONFIG_PPC)
1602 { 1423 {
1603 .ctl_name = CTL_UNNUMBERED,
1604 .procname = "exception-trace", 1424 .procname = "exception-trace",
1605 .data = &show_unhandled_signals, 1425 .data = &show_unhandled_signals,
1606 .maxlen = sizeof(int), 1426 .maxlen = sizeof(int),
@@ -1608,11 +1428,11 @@ static struct ctl_table debug_table[] = {
1608 .proc_handler = proc_dointvec 1428 .proc_handler = proc_dointvec
1609 }, 1429 },
1610#endif 1430#endif
1611 { .ctl_name = 0 } 1431 { }
1612}; 1432};
1613 1433
1614static struct ctl_table dev_table[] = { 1434static struct ctl_table dev_table[] = {
1615 { .ctl_name = 0 } 1435 { }
1616}; 1436};
1617 1437
1618static DEFINE_SPINLOCK(sysctl_lock); 1438static DEFINE_SPINLOCK(sysctl_lock);
@@ -1766,122 +1586,6 @@ void register_sysctl_root(struct ctl_table_root *root)
1766 spin_unlock(&sysctl_lock); 1586 spin_unlock(&sysctl_lock);
1767} 1587}
1768 1588
1769#ifdef CONFIG_SYSCTL_SYSCALL
1770/* Perform the actual read/write of a sysctl table entry. */
1771static int do_sysctl_strategy(struct ctl_table_root *root,
1772 struct ctl_table *table,
1773 void __user *oldval, size_t __user *oldlenp,
1774 void __user *newval, size_t newlen)
1775{
1776 int op = 0, rc;
1777
1778 if (oldval)
1779 op |= MAY_READ;
1780 if (newval)
1781 op |= MAY_WRITE;
1782 if (sysctl_perm(root, table, op))
1783 return -EPERM;
1784
1785 if (table->strategy) {
1786 rc = table->strategy(table, oldval, oldlenp, newval, newlen);
1787 if (rc < 0)
1788 return rc;
1789 if (rc > 0)
1790 return 0;
1791 }
1792
1793 /* If there is no strategy routine, or if the strategy returns
1794 * zero, proceed with automatic r/w */
1795 if (table->data && table->maxlen) {
1796 rc = sysctl_data(table, oldval, oldlenp, newval, newlen);
1797 if (rc < 0)
1798 return rc;
1799 }
1800 return 0;
1801}
1802
1803static int parse_table(int __user *name, int nlen,
1804 void __user *oldval, size_t __user *oldlenp,
1805 void __user *newval, size_t newlen,
1806 struct ctl_table_root *root,
1807 struct ctl_table *table)
1808{
1809 int n;
1810repeat:
1811 if (!nlen)
1812 return -ENOTDIR;
1813 if (get_user(n, name))
1814 return -EFAULT;
1815 for ( ; table->ctl_name || table->procname; table++) {
1816 if (!table->ctl_name)
1817 continue;
1818 if (n == table->ctl_name) {
1819 int error;
1820 if (table->child) {
1821 if (sysctl_perm(root, table, MAY_EXEC))
1822 return -EPERM;
1823 name++;
1824 nlen--;
1825 table = table->child;
1826 goto repeat;
1827 }
1828 error = do_sysctl_strategy(root, table,
1829 oldval, oldlenp,
1830 newval, newlen);
1831 return error;
1832 }
1833 }
1834 return -ENOTDIR;
1835}
1836
1837int do_sysctl(int __user *name, int nlen, void __user *oldval, size_t __user *oldlenp,
1838 void __user *newval, size_t newlen)
1839{
1840 struct ctl_table_header *head;
1841 int error = -ENOTDIR;
1842
1843 if (nlen <= 0 || nlen >= CTL_MAXNAME)
1844 return -ENOTDIR;
1845 if (oldval) {
1846 int old_len;
1847 if (!oldlenp || get_user(old_len, oldlenp))
1848 return -EFAULT;
1849 }
1850
1851 for (head = sysctl_head_next(NULL); head;
1852 head = sysctl_head_next(head)) {
1853 error = parse_table(name, nlen, oldval, oldlenp,
1854 newval, newlen,
1855 head->root, head->ctl_table);
1856 if (error != -ENOTDIR) {
1857 sysctl_head_finish(head);
1858 break;
1859 }
1860 }
1861 return error;
1862}
1863
1864SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args)
1865{
1866 struct __sysctl_args tmp;
1867 int error;
1868
1869 if (copy_from_user(&tmp, args, sizeof(tmp)))
1870 return -EFAULT;
1871
1872 error = deprecated_sysctl_warning(&tmp);
1873 if (error)
1874 goto out;
1875
1876 lock_kernel();
1877 error = do_sysctl(tmp.name, tmp.nlen, tmp.oldval, tmp.oldlenp,
1878 tmp.newval, tmp.newlen);
1879 unlock_kernel();
1880out:
1881 return error;
1882}
1883#endif /* CONFIG_SYSCTL_SYSCALL */
1884
1885/* 1589/*
1886 * sysctl_perm does NOT grant the superuser all rights automatically, because 1590 * sysctl_perm does NOT grant the superuser all rights automatically, because
1887 * some sysctl variables are readonly even to root. 1591 * some sysctl variables are readonly even to root.
@@ -1917,7 +1621,7 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
1917 1621
1918static void sysctl_set_parent(struct ctl_table *parent, struct ctl_table *table) 1622static void sysctl_set_parent(struct ctl_table *parent, struct ctl_table *table)
1919{ 1623{
1920 for (; table->ctl_name || table->procname; table++) { 1624 for (; table->procname; table++) {
1921 table->parent = parent; 1625 table->parent = parent;
1922 if (table->child) 1626 if (table->child)
1923 sysctl_set_parent(table, table->child); 1627 sysctl_set_parent(table, table->child);
@@ -1949,11 +1653,11 @@ static struct ctl_table *is_branch_in(struct ctl_table *branch,
1949 return NULL; 1653 return NULL;
1950 1654
1951 /* ... and nothing else */ 1655 /* ... and nothing else */
1952 if (branch[1].procname || branch[1].ctl_name) 1656 if (branch[1].procname)
1953 return NULL; 1657 return NULL;
1954 1658
1955 /* table should contain subdirectory with the same name */ 1659 /* table should contain subdirectory with the same name */
1956 for (p = table; p->procname || p->ctl_name; p++) { 1660 for (p = table; p->procname; p++) {
1957 if (!p->child) 1661 if (!p->child)
1958 continue; 1662 continue;
1959 if (p->procname && strcmp(p->procname, s) == 0) 1663 if (p->procname && strcmp(p->procname, s) == 0)
@@ -1998,9 +1702,6 @@ static void try_attach(struct ctl_table_header *p, struct ctl_table_header *q)
1998 * 1702 *
1999 * The members of the &struct ctl_table structure are used as follows: 1703 * The members of the &struct ctl_table structure are used as follows:
2000 * 1704 *
2001 * ctl_name - This is the numeric sysctl value used by sysctl(2). The number
2002 * must be unique within that level of sysctl
2003 *
2004 * procname - the name of the sysctl file under /proc/sys. Set to %NULL to not 1705 * procname - the name of the sysctl file under /proc/sys. Set to %NULL to not
2005 * enter a sysctl file 1706 * enter a sysctl file
2006 * 1707 *
@@ -2015,8 +1716,6 @@ static void try_attach(struct ctl_table_header *p, struct ctl_table_header *q)
2015 * 1716 *
2016 * proc_handler - the text handler routine (described below) 1717 * proc_handler - the text handler routine (described below)
2017 * 1718 *
2018 * strategy - the strategy routine (described below)
2019 *
2020 * de - for internal use by the sysctl routines 1719 * de - for internal use by the sysctl routines
2021 * 1720 *
2022 * extra1, extra2 - extra pointers usable by the proc handler routines 1721 * extra1, extra2 - extra pointers usable by the proc handler routines
@@ -2029,19 +1728,6 @@ static void try_attach(struct ctl_table_header *p, struct ctl_table_header *q)
2029 * struct enable minimal validation of the values being written to be 1728 * struct enable minimal validation of the values being written to be
2030 * performed, and the mode field allows minimal authentication. 1729 * performed, and the mode field allows minimal authentication.
2031 * 1730 *
2032 * More sophisticated management can be enabled by the provision of a
2033 * strategy routine with the table entry. This will be called before
2034 * any automatic read or write of the data is performed.
2035 *
2036 * The strategy routine may return
2037 *
2038 * < 0 - Error occurred (error is passed to user process)
2039 *
2040 * 0 - OK - proceed with automatic read or write.
2041 *
2042 * > 0 - OK - read or write has been done by the strategy routine, so
2043 * return immediately.
2044 *
2045 * There must be a proc_handler routine for any terminal nodes 1731 * There must be a proc_handler routine for any terminal nodes
2046 * mirrored under /proc/sys (non-terminals are handled by a built-in 1732 * mirrored under /proc/sys (non-terminals are handled by a built-in
2047 * directory handler). Several default handlers are available to 1733 * directory handler). Several default handlers are available to
@@ -2068,13 +1754,13 @@ struct ctl_table_header *__register_sysctl_paths(
2068 struct ctl_table_set *set; 1754 struct ctl_table_set *set;
2069 1755
2070 /* Count the path components */ 1756 /* Count the path components */
2071 for (npath = 0; path[npath].ctl_name || path[npath].procname; ++npath) 1757 for (npath = 0; path[npath].procname; ++npath)
2072 ; 1758 ;
2073 1759
2074 /* 1760 /*
2075 * For each path component, allocate a 2-element ctl_table array. 1761 * For each path component, allocate a 2-element ctl_table array.
2076 * The first array element will be filled with the sysctl entry 1762 * The first array element will be filled with the sysctl entry
2077 * for this, the second will be the sentinel (ctl_name == 0). 1763 * for this, the second will be the sentinel (procname == 0).
2078 * 1764 *
2079 * We allocate everything in one go so that we don't have to 1765 * We allocate everything in one go so that we don't have to
2080 * worry about freeing additional memory in unregister_sysctl_table. 1766 * worry about freeing additional memory in unregister_sysctl_table.
@@ -2091,7 +1777,6 @@ struct ctl_table_header *__register_sysctl_paths(
2091 for (n = 0; n < npath; ++n, ++path) { 1777 for (n = 0; n < npath; ++n, ++path) {
2092 /* Copy the procname */ 1778 /* Copy the procname */
2093 new->procname = path->procname; 1779 new->procname = path->procname;
2094 new->ctl_name = path->ctl_name;
2095 new->mode = 0555; 1780 new->mode = 0555;
2096 1781
2097 *prevp = new; 1782 *prevp = new;
@@ -2953,286 +2638,6 @@ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
2953 2638
2954#endif /* CONFIG_PROC_FS */ 2639#endif /* CONFIG_PROC_FS */
2955 2640
2956
2957#ifdef CONFIG_SYSCTL_SYSCALL
2958/*
2959 * General sysctl support routines
2960 */
2961
2962/* The generic sysctl data routine (used if no strategy routine supplied) */
2963int sysctl_data(struct ctl_table *table,
2964 void __user *oldval, size_t __user *oldlenp,
2965 void __user *newval, size_t newlen)
2966{
2967 size_t len;
2968
2969 /* Get out of I don't have a variable */
2970 if (!table->data || !table->maxlen)
2971 return -ENOTDIR;
2972
2973 if (oldval && oldlenp) {
2974 if (get_user(len, oldlenp))
2975 return -EFAULT;
2976 if (len) {
2977 if (len > table->maxlen)
2978 len = table->maxlen;
2979 if (copy_to_user(oldval, table->data, len))
2980 return -EFAULT;
2981 if (put_user(len, oldlenp))
2982 return -EFAULT;
2983 }
2984 }
2985
2986 if (newval && newlen) {
2987 if (newlen > table->maxlen)
2988 newlen = table->maxlen;
2989
2990 if (copy_from_user(table->data, newval, newlen))
2991 return -EFAULT;
2992 }
2993 return 1;
2994}
2995
2996/* The generic string strategy routine: */
2997int sysctl_string(struct ctl_table *table,
2998 void __user *oldval, size_t __user *oldlenp,
2999 void __user *newval, size_t newlen)
3000{
3001 if (!table->data || !table->maxlen)
3002 return -ENOTDIR;
3003
3004 if (oldval && oldlenp) {
3005 size_t bufsize;
3006 if (get_user(bufsize, oldlenp))
3007 return -EFAULT;
3008 if (bufsize) {
3009 size_t len = strlen(table->data), copied;
3010
3011 /* This shouldn't trigger for a well-formed sysctl */
3012 if (len > table->maxlen)
3013 len = table->maxlen;
3014
3015 /* Copy up to a max of bufsize-1 bytes of the string */
3016 copied = (len >= bufsize) ? bufsize - 1 : len;
3017
3018 if (copy_to_user(oldval, table->data, copied) ||
3019 put_user(0, (char __user *)(oldval + copied)))
3020 return -EFAULT;
3021 if (put_user(len, oldlenp))
3022 return -EFAULT;
3023 }
3024 }
3025 if (newval && newlen) {
3026 size_t len = newlen;
3027 if (len > table->maxlen)
3028 len = table->maxlen;
3029 if(copy_from_user(table->data, newval, len))
3030 return -EFAULT;
3031 if (len == table->maxlen)
3032 len--;
3033 ((char *) table->data)[len] = 0;
3034 }
3035 return 1;
3036}
3037
3038/*
3039 * This function makes sure that all of the integers in the vector
3040 * are between the minimum and maximum values given in the arrays
3041 * table->extra1 and table->extra2, respectively.
3042 */
3043int sysctl_intvec(struct ctl_table *table,
3044 void __user *oldval, size_t __user *oldlenp,
3045 void __user *newval, size_t newlen)
3046{
3047
3048 if (newval && newlen) {
3049 int __user *vec = (int __user *) newval;
3050 int *min = (int *) table->extra1;
3051 int *max = (int *) table->extra2;
3052 size_t length;
3053 int i;
3054
3055 if (newlen % sizeof(int) != 0)
3056 return -EINVAL;
3057
3058 if (!table->extra1 && !table->extra2)
3059 return 0;
3060
3061 if (newlen > table->maxlen)
3062 newlen = table->maxlen;
3063 length = newlen / sizeof(int);
3064
3065 for (i = 0; i < length; i++) {
3066 int value;
3067 if (get_user(value, vec + i))
3068 return -EFAULT;
3069 if (min && value < min[i])
3070 return -EINVAL;
3071 if (max && value > max[i])
3072 return -EINVAL;
3073 }
3074 }
3075 return 0;
3076}
3077
3078/* Strategy function to convert jiffies to seconds */
3079int sysctl_jiffies(struct ctl_table *table,
3080 void __user *oldval, size_t __user *oldlenp,
3081 void __user *newval, size_t newlen)
3082{
3083 if (oldval && oldlenp) {
3084 size_t olen;
3085
3086 if (get_user(olen, oldlenp))
3087 return -EFAULT;
3088 if (olen) {
3089 int val;
3090
3091 if (olen < sizeof(int))
3092 return -EINVAL;
3093
3094 val = *(int *)(table->data) / HZ;
3095 if (put_user(val, (int __user *)oldval))
3096 return -EFAULT;
3097 if (put_user(sizeof(int), oldlenp))
3098 return -EFAULT;
3099 }
3100 }
3101 if (newval && newlen) {
3102 int new;
3103 if (newlen != sizeof(int))
3104 return -EINVAL;
3105 if (get_user(new, (int __user *)newval))
3106 return -EFAULT;
3107 *(int *)(table->data) = new*HZ;
3108 }
3109 return 1;
3110}
3111
3112/* Strategy function to convert jiffies to seconds */
3113int sysctl_ms_jiffies(struct ctl_table *table,
3114 void __user *oldval, size_t __user *oldlenp,
3115 void __user *newval, size_t newlen)
3116{
3117 if (oldval && oldlenp) {
3118 size_t olen;
3119
3120 if (get_user(olen, oldlenp))
3121 return -EFAULT;
3122 if (olen) {
3123 int val;
3124
3125 if (olen < sizeof(int))
3126 return -EINVAL;
3127
3128 val = jiffies_to_msecs(*(int *)(table->data));
3129 if (put_user(val, (int __user *)oldval))
3130 return -EFAULT;
3131 if (put_user(sizeof(int), oldlenp))
3132 return -EFAULT;
3133 }
3134 }
3135 if (newval && newlen) {
3136 int new;
3137 if (newlen != sizeof(int))
3138 return -EINVAL;
3139 if (get_user(new, (int __user *)newval))
3140 return -EFAULT;
3141 *(int *)(table->data) = msecs_to_jiffies(new);
3142 }
3143 return 1;
3144}
3145
3146
3147
3148#else /* CONFIG_SYSCTL_SYSCALL */
3149
3150
3151SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args)
3152{
3153 struct __sysctl_args tmp;
3154 int error;
3155
3156 if (copy_from_user(&tmp, args, sizeof(tmp)))
3157 return -EFAULT;
3158
3159 error = deprecated_sysctl_warning(&tmp);
3160
3161 /* If no error reading the parameters then just -ENOSYS ... */
3162 if (!error)
3163 error = -ENOSYS;
3164
3165 return error;
3166}
3167
3168int sysctl_data(struct ctl_table *table,
3169 void __user *oldval, size_t __user *oldlenp,
3170 void __user *newval, size_t newlen)
3171{
3172 return -ENOSYS;
3173}
3174
3175int sysctl_string(struct ctl_table *table,
3176 void __user *oldval, size_t __user *oldlenp,
3177 void __user *newval, size_t newlen)
3178{
3179 return -ENOSYS;
3180}
3181
3182int sysctl_intvec(struct ctl_table *table,
3183 void __user *oldval, size_t __user *oldlenp,
3184 void __user *newval, size_t newlen)
3185{
3186 return -ENOSYS;
3187}
3188
3189int sysctl_jiffies(struct ctl_table *table,
3190 void __user *oldval, size_t __user *oldlenp,
3191 void __user *newval, size_t newlen)
3192{
3193 return -ENOSYS;
3194}
3195
3196int sysctl_ms_jiffies(struct ctl_table *table,
3197 void __user *oldval, size_t __user *oldlenp,
3198 void __user *newval, size_t newlen)
3199{
3200 return -ENOSYS;
3201}
3202
3203#endif /* CONFIG_SYSCTL_SYSCALL */
3204
3205static int deprecated_sysctl_warning(struct __sysctl_args *args)
3206{
3207 static int msg_count;
3208 int name[CTL_MAXNAME];
3209 int i;
3210
3211 /* Check args->nlen. */
3212 if (args->nlen < 0 || args->nlen > CTL_MAXNAME)
3213 return -ENOTDIR;
3214
3215 /* Read in the sysctl name for better debug message logging */
3216 for (i = 0; i < args->nlen; i++)
3217 if (get_user(name[i], args->name + i))
3218 return -EFAULT;
3219
3220 /* Ignore accesses to kernel.version */
3221 if ((args->nlen == 2) && (name[0] == CTL_KERN) && (name[1] == KERN_VERSION))
3222 return 0;
3223
3224 if (msg_count < 5) {
3225 msg_count++;
3226 printk(KERN_INFO
3227 "warning: process `%s' used the deprecated sysctl "
3228 "system call with ", current->comm);
3229 for (i = 0; i < args->nlen; i++)
3230 printk("%d.", name[i]);
3231 printk("\n");
3232 }
3233 return 0;
3234}
3235
3236/* 2641/*
3237 * No sense putting this after each symbol definition, twice, 2642 * No sense putting this after each symbol definition, twice,
3238 * exception granted :-) 2643 * exception granted :-)
@@ -3247,9 +2652,4 @@ EXPORT_SYMBOL(proc_doulongvec_minmax);
3247EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax); 2652EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
3248EXPORT_SYMBOL(register_sysctl_table); 2653EXPORT_SYMBOL(register_sysctl_table);
3249EXPORT_SYMBOL(register_sysctl_paths); 2654EXPORT_SYMBOL(register_sysctl_paths);
3250EXPORT_SYMBOL(sysctl_intvec);
3251EXPORT_SYMBOL(sysctl_jiffies);
3252EXPORT_SYMBOL(sysctl_ms_jiffies);
3253EXPORT_SYMBOL(sysctl_string);
3254EXPORT_SYMBOL(sysctl_data);
3255EXPORT_SYMBOL(unregister_sysctl_table); 2655EXPORT_SYMBOL(unregister_sysctl_table);
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
new file mode 100644
index 000000000000..b75dbf40f573
--- /dev/null
+++ b/kernel/sysctl_binary.c
@@ -0,0 +1,1507 @@
1#include <linux/stat.h>
2#include <linux/sysctl.h>
3#include "../fs/xfs/linux-2.6/xfs_sysctl.h"
4#include <linux/sunrpc/debug.h>
5#include <linux/string.h>
6#include <net/ip_vs.h>
7#include <linux/syscalls.h>
8#include <linux/namei.h>
9#include <linux/mount.h>
10#include <linux/fs.h>
11#include <linux/nsproxy.h>
12#include <linux/pid_namespace.h>
13#include <linux/file.h>
14#include <linux/ctype.h>
15#include <linux/netdevice.h>
16
17#ifdef CONFIG_SYSCTL_SYSCALL
18
19struct bin_table;
20typedef ssize_t bin_convert_t(struct file *file,
21 void __user *oldval, size_t oldlen, void __user *newval, size_t newlen);
22
23static bin_convert_t bin_dir;
24static bin_convert_t bin_string;
25static bin_convert_t bin_intvec;
26static bin_convert_t bin_ulongvec;
27static bin_convert_t bin_uuid;
28static bin_convert_t bin_dn_node_address;
29
30#define CTL_DIR bin_dir
31#define CTL_STR bin_string
32#define CTL_INT bin_intvec
33#define CTL_ULONG bin_ulongvec
34#define CTL_UUID bin_uuid
35#define CTL_DNADR bin_dn_node_address
36
37#define BUFSZ 256
38
39struct bin_table {
40 bin_convert_t *convert;
41 int ctl_name;
42 const char *procname;
43 const struct bin_table *child;
44};
45
46static const struct bin_table bin_random_table[] = {
47 { CTL_INT, RANDOM_POOLSIZE, "poolsize" },
48 { CTL_INT, RANDOM_ENTROPY_COUNT, "entropy_avail" },
49 { CTL_INT, RANDOM_READ_THRESH, "read_wakeup_threshold" },
50 { CTL_INT, RANDOM_WRITE_THRESH, "write_wakeup_threshold" },
51 { CTL_UUID, RANDOM_BOOT_ID, "boot_id" },
52 { CTL_UUID, RANDOM_UUID, "uuid" },
53 {}
54};
55
56static const struct bin_table bin_pty_table[] = {
57 { CTL_INT, PTY_MAX, "max" },
58 { CTL_INT, PTY_NR, "nr" },
59 {}
60};
61
62static const struct bin_table bin_kern_table[] = {
63 { CTL_STR, KERN_OSTYPE, "ostype" },
64 { CTL_STR, KERN_OSRELEASE, "osrelease" },
65 /* KERN_OSREV not used */
66 { CTL_STR, KERN_VERSION, "version" },
67 /* KERN_SECUREMASK not used */
68 /* KERN_PROF not used */
69 { CTL_STR, KERN_NODENAME, "hostname" },
70 { CTL_STR, KERN_DOMAINNAME, "domainname" },
71
72 { CTL_INT, KERN_PANIC, "panic" },
73 { CTL_INT, KERN_REALROOTDEV, "real-root-dev" },
74
75 { CTL_STR, KERN_SPARC_REBOOT, "reboot-cmd" },
76 { CTL_INT, KERN_CTLALTDEL, "ctrl-alt-del" },
77 { CTL_INT, KERN_PRINTK, "printk" },
78
79 /* KERN_NAMETRANS not used */
80 /* KERN_PPC_HTABRECLAIM not used */
81 /* KERN_PPC_ZEROPAGED not used */
82 { CTL_INT, KERN_PPC_POWERSAVE_NAP, "powersave-nap" },
83
84 { CTL_STR, KERN_MODPROBE, "modprobe" },
85 { CTL_INT, KERN_SG_BIG_BUFF, "sg-big-buff" },
86 { CTL_INT, KERN_ACCT, "acct" },
87 /* KERN_PPC_L2CR "l2cr" no longer used */
88
89 /* KERN_RTSIGNR not used */
90 /* KERN_RTSIGMAX not used */
91
92 { CTL_ULONG, KERN_SHMMAX, "shmmax" },
93 { CTL_INT, KERN_MSGMAX, "msgmax" },
94 { CTL_INT, KERN_MSGMNB, "msgmnb" },
95 /* KERN_MSGPOOL not used*/
96 { CTL_INT, KERN_SYSRQ, "sysrq" },
97 { CTL_INT, KERN_MAX_THREADS, "threads-max" },
98 { CTL_DIR, KERN_RANDOM, "random", bin_random_table },
99 { CTL_ULONG, KERN_SHMALL, "shmall" },
100 { CTL_INT, KERN_MSGMNI, "msgmni" },
101 { CTL_INT, KERN_SEM, "sem" },
102 { CTL_INT, KERN_SPARC_STOP_A, "stop-a" },
103 { CTL_INT, KERN_SHMMNI, "shmmni" },
104
105 { CTL_INT, KERN_OVERFLOWUID, "overflowuid" },
106 { CTL_INT, KERN_OVERFLOWGID, "overflowgid" },
107
108 { CTL_STR, KERN_HOTPLUG, "hotplug", },
109 { CTL_INT, KERN_IEEE_EMULATION_WARNINGS, "ieee_emulation_warnings" },
110
111 { CTL_INT, KERN_S390_USER_DEBUG_LOGGING, "userprocess_debug" },
112 { CTL_INT, KERN_CORE_USES_PID, "core_uses_pid" },
113 /* KERN_TAINTED "tainted" no longer used */
114 { CTL_INT, KERN_CADPID, "cad_pid" },
115 { CTL_INT, KERN_PIDMAX, "pid_max" },
116 { CTL_STR, KERN_CORE_PATTERN, "core_pattern" },
117 { CTL_INT, KERN_PANIC_ON_OOPS, "panic_on_oops" },
118 { CTL_INT, KERN_HPPA_PWRSW, "soft-power" },
119 { CTL_INT, KERN_HPPA_UNALIGNED, "unaligned-trap" },
120
121 { CTL_INT, KERN_PRINTK_RATELIMIT, "printk_ratelimit" },
122 { CTL_INT, KERN_PRINTK_RATELIMIT_BURST, "printk_ratelimit_burst" },
123
124 { CTL_DIR, KERN_PTY, "pty", bin_pty_table },
125 { CTL_INT, KERN_NGROUPS_MAX, "ngroups_max" },
126 { CTL_INT, KERN_SPARC_SCONS_PWROFF, "scons-poweroff" },
127 /* KERN_HZ_TIMER "hz_timer" no longer used */
128 { CTL_INT, KERN_UNKNOWN_NMI_PANIC, "unknown_nmi_panic" },
129 { CTL_INT, KERN_BOOTLOADER_TYPE, "bootloader_type" },
130 { CTL_INT, KERN_RANDOMIZE, "randomize_va_space" },
131
132 { CTL_INT, KERN_SPIN_RETRY, "spin_retry" },
133 /* KERN_ACPI_VIDEO_FLAGS "acpi_video_flags" no longer used */
134 { CTL_INT, KERN_IA64_UNALIGNED, "ignore-unaligned-usertrap" },
135 { CTL_INT, KERN_COMPAT_LOG, "compat-log" },
136 { CTL_INT, KERN_MAX_LOCK_DEPTH, "max_lock_depth" },
137 { CTL_INT, KERN_NMI_WATCHDOG, "nmi_watchdog" },
138 { CTL_INT, KERN_PANIC_ON_NMI, "panic_on_unrecovered_nmi" },
139 {}
140};
141
142static const struct bin_table bin_vm_table[] = {
143 { CTL_INT, VM_OVERCOMMIT_MEMORY, "overcommit_memory" },
144 { CTL_INT, VM_PAGE_CLUSTER, "page-cluster" },
145 { CTL_INT, VM_DIRTY_BACKGROUND, "dirty_background_ratio" },
146 { CTL_INT, VM_DIRTY_RATIO, "dirty_ratio" },
147 /* VM_DIRTY_WB_CS "dirty_writeback_centisecs" no longer used */
148 /* VM_DIRTY_EXPIRE_CS "dirty_expire_centisecs" no longer used */
149 { CTL_INT, VM_NR_PDFLUSH_THREADS, "nr_pdflush_threads" },
150 { CTL_INT, VM_OVERCOMMIT_RATIO, "overcommit_ratio" },
151 /* VM_PAGEBUF unused */
152 /* VM_HUGETLB_PAGES "nr_hugepages" no longer used */
153 { CTL_INT, VM_SWAPPINESS, "swappiness" },
154 { CTL_INT, VM_LOWMEM_RESERVE_RATIO, "lowmem_reserve_ratio" },
155 { CTL_INT, VM_MIN_FREE_KBYTES, "min_free_kbytes" },
156 { CTL_INT, VM_MAX_MAP_COUNT, "max_map_count" },
157 { CTL_INT, VM_LAPTOP_MODE, "laptop_mode" },
158 { CTL_INT, VM_BLOCK_DUMP, "block_dump" },
159 { CTL_INT, VM_HUGETLB_GROUP, "hugetlb_shm_group" },
160 { CTL_INT, VM_VFS_CACHE_PRESSURE, "vfs_cache_pressure" },
161 { CTL_INT, VM_LEGACY_VA_LAYOUT, "legacy_va_layout" },
162 /* VM_SWAP_TOKEN_TIMEOUT unused */
163 { CTL_INT, VM_DROP_PAGECACHE, "drop_caches" },
164 { CTL_INT, VM_PERCPU_PAGELIST_FRACTION, "percpu_pagelist_fraction" },
165 { CTL_INT, VM_ZONE_RECLAIM_MODE, "zone_reclaim_mode" },
166 { CTL_INT, VM_MIN_UNMAPPED, "min_unmapped_ratio" },
167 { CTL_INT, VM_PANIC_ON_OOM, "panic_on_oom" },
168 { CTL_INT, VM_VDSO_ENABLED, "vdso_enabled" },
169 { CTL_INT, VM_MIN_SLAB, "min_slab_ratio" },
170
171 {}
172};
173
174static const struct bin_table bin_net_core_table[] = {
175 { CTL_INT, NET_CORE_WMEM_MAX, "wmem_max" },
176 { CTL_INT, NET_CORE_RMEM_MAX, "rmem_max" },
177 { CTL_INT, NET_CORE_WMEM_DEFAULT, "wmem_default" },
178 { CTL_INT, NET_CORE_RMEM_DEFAULT, "rmem_default" },
179 /* NET_CORE_DESTROY_DELAY unused */
180 { CTL_INT, NET_CORE_MAX_BACKLOG, "netdev_max_backlog" },
181 /* NET_CORE_FASTROUTE unused */
182 { CTL_INT, NET_CORE_MSG_COST, "message_cost" },
183 { CTL_INT, NET_CORE_MSG_BURST, "message_burst" },
184 { CTL_INT, NET_CORE_OPTMEM_MAX, "optmem_max" },
185 /* NET_CORE_HOT_LIST_LENGTH unused */
186 /* NET_CORE_DIVERT_VERSION unused */
187 /* NET_CORE_NO_CONG_THRESH unused */
188 /* NET_CORE_NO_CONG unused */
189 /* NET_CORE_LO_CONG unused */
190 /* NET_CORE_MOD_CONG unused */
191 { CTL_INT, NET_CORE_DEV_WEIGHT, "dev_weight" },
192 { CTL_INT, NET_CORE_SOMAXCONN, "somaxconn" },
193 { CTL_INT, NET_CORE_BUDGET, "netdev_budget" },
194 { CTL_INT, NET_CORE_AEVENT_ETIME, "xfrm_aevent_etime" },
195 { CTL_INT, NET_CORE_AEVENT_RSEQTH, "xfrm_aevent_rseqth" },
196 { CTL_INT, NET_CORE_WARNINGS, "warnings" },
197 {},
198};
199
200static const struct bin_table bin_net_unix_table[] = {
201 /* NET_UNIX_DESTROY_DELAY unused */
202 /* NET_UNIX_DELETE_DELAY unused */
203 { CTL_INT, NET_UNIX_MAX_DGRAM_QLEN, "max_dgram_qlen" },
204 {}
205};
206
207static const struct bin_table bin_net_ipv4_route_table[] = {
208 { CTL_INT, NET_IPV4_ROUTE_FLUSH, "flush" },
209 /* NET_IPV4_ROUTE_MIN_DELAY "min_delay" no longer used */
210 /* NET_IPV4_ROUTE_MAX_DELAY "max_delay" no longer used */
211 { CTL_INT, NET_IPV4_ROUTE_GC_THRESH, "gc_thresh" },
212 { CTL_INT, NET_IPV4_ROUTE_MAX_SIZE, "max_size" },
213 { CTL_INT, NET_IPV4_ROUTE_GC_MIN_INTERVAL, "gc_min_interval" },
214 { CTL_INT, NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS, "gc_min_interval_ms" },
215 { CTL_INT, NET_IPV4_ROUTE_GC_TIMEOUT, "gc_timeout" },
216 { CTL_INT, NET_IPV4_ROUTE_GC_INTERVAL, "gc_interval" },
217 { CTL_INT, NET_IPV4_ROUTE_REDIRECT_LOAD, "redirect_load" },
218 { CTL_INT, NET_IPV4_ROUTE_REDIRECT_NUMBER, "redirect_number" },
219 { CTL_INT, NET_IPV4_ROUTE_REDIRECT_SILENCE, "redirect_silence" },
220 { CTL_INT, NET_IPV4_ROUTE_ERROR_COST, "error_cost" },
221 { CTL_INT, NET_IPV4_ROUTE_ERROR_BURST, "error_burst" },
222 { CTL_INT, NET_IPV4_ROUTE_GC_ELASTICITY, "gc_elasticity" },
223 { CTL_INT, NET_IPV4_ROUTE_MTU_EXPIRES, "mtu_expires" },
224 { CTL_INT, NET_IPV4_ROUTE_MIN_PMTU, "min_pmtu" },
225 { CTL_INT, NET_IPV4_ROUTE_MIN_ADVMSS, "min_adv_mss" },
226 { CTL_INT, NET_IPV4_ROUTE_SECRET_INTERVAL, "secret_interval" },
227 {}
228};
229
230static const struct bin_table bin_net_ipv4_conf_vars_table[] = {
231 { CTL_INT, NET_IPV4_CONF_FORWARDING, "forwarding" },
232 { CTL_INT, NET_IPV4_CONF_MC_FORWARDING, "mc_forwarding" },
233
234 { CTL_INT, NET_IPV4_CONF_ACCEPT_REDIRECTS, "accept_redirects" },
235 { CTL_INT, NET_IPV4_CONF_SECURE_REDIRECTS, "secure_redirects" },
236 { CTL_INT, NET_IPV4_CONF_SEND_REDIRECTS, "send_redirects" },
237 { CTL_INT, NET_IPV4_CONF_SHARED_MEDIA, "shared_media" },
238 { CTL_INT, NET_IPV4_CONF_RP_FILTER, "rp_filter" },
239 { CTL_INT, NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE, "accept_source_route" },
240 { CTL_INT, NET_IPV4_CONF_PROXY_ARP, "proxy_arp" },
241 { CTL_INT, NET_IPV4_CONF_MEDIUM_ID, "medium_id" },
242 { CTL_INT, NET_IPV4_CONF_BOOTP_RELAY, "bootp_relay" },
243 { CTL_INT, NET_IPV4_CONF_LOG_MARTIANS, "log_martians" },
244 { CTL_INT, NET_IPV4_CONF_TAG, "tag" },
245 { CTL_INT, NET_IPV4_CONF_ARPFILTER, "arp_filter" },
246 { CTL_INT, NET_IPV4_CONF_ARP_ANNOUNCE, "arp_announce" },
247 { CTL_INT, NET_IPV4_CONF_ARP_IGNORE, "arp_ignore" },
248 { CTL_INT, NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" },
249 { CTL_INT, NET_IPV4_CONF_ARP_NOTIFY, "arp_notify" },
250
251 { CTL_INT, NET_IPV4_CONF_NOXFRM, "disable_xfrm" },
252 { CTL_INT, NET_IPV4_CONF_NOPOLICY, "disable_policy" },
253 { CTL_INT, NET_IPV4_CONF_FORCE_IGMP_VERSION, "force_igmp_version" },
254 { CTL_INT, NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" },
255 {}
256};
257
258static const struct bin_table bin_net_ipv4_conf_table[] = {
259 { CTL_DIR, NET_PROTO_CONF_ALL, "all", bin_net_ipv4_conf_vars_table },
260 { CTL_DIR, NET_PROTO_CONF_DEFAULT, "default", bin_net_ipv4_conf_vars_table },
261 { CTL_DIR, 0, NULL, bin_net_ipv4_conf_vars_table },
262 {}
263};
264
265static const struct bin_table bin_net_neigh_vars_table[] = {
266 { CTL_INT, NET_NEIGH_MCAST_SOLICIT, "mcast_solicit" },
267 { CTL_INT, NET_NEIGH_UCAST_SOLICIT, "ucast_solicit" },
268 { CTL_INT, NET_NEIGH_APP_SOLICIT, "app_solicit" },
269 /* NET_NEIGH_RETRANS_TIME "retrans_time" no longer used */
270 { CTL_INT, NET_NEIGH_REACHABLE_TIME, "base_reachable_time" },
271 { CTL_INT, NET_NEIGH_DELAY_PROBE_TIME, "delay_first_probe_time" },
272 { CTL_INT, NET_NEIGH_GC_STALE_TIME, "gc_stale_time" },
273 { CTL_INT, NET_NEIGH_UNRES_QLEN, "unres_qlen" },
274 { CTL_INT, NET_NEIGH_PROXY_QLEN, "proxy_qlen" },
275 /* NET_NEIGH_ANYCAST_DELAY "anycast_delay" no longer used */
276 /* NET_NEIGH_PROXY_DELAY "proxy_delay" no longer used */
277 /* NET_NEIGH_LOCKTIME "locktime" no longer used */
278 { CTL_INT, NET_NEIGH_GC_INTERVAL, "gc_interval" },
279 { CTL_INT, NET_NEIGH_GC_THRESH1, "gc_thresh1" },
280 { CTL_INT, NET_NEIGH_GC_THRESH2, "gc_thresh2" },
281 { CTL_INT, NET_NEIGH_GC_THRESH3, "gc_thresh3" },
282 { CTL_INT, NET_NEIGH_RETRANS_TIME_MS, "retrans_time_ms" },
283 { CTL_INT, NET_NEIGH_REACHABLE_TIME_MS, "base_reachable_time_ms" },
284 {}
285};
286
287static const struct bin_table bin_net_neigh_table[] = {
288 { CTL_DIR, NET_PROTO_CONF_DEFAULT, "default", bin_net_neigh_vars_table },
289 { CTL_DIR, 0, NULL, bin_net_neigh_vars_table },
290 {}
291};
292
293static const struct bin_table bin_net_ipv4_netfilter_table[] = {
294 { CTL_INT, NET_IPV4_NF_CONNTRACK_MAX, "ip_conntrack_max" },
295
296 /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT "ip_conntrack_tcp_timeout_syn_sent" no longer used */
297 /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV "ip_conntrack_tcp_timeout_syn_recv" no longer used */
298 /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED "ip_conntrack_tcp_timeout_established" no longer used */
299 /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT "ip_conntrack_tcp_timeout_fin_wait" no longer used */
300 /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT "ip_conntrack_tcp_timeout_close_wait" no longer used */
301 /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK "ip_conntrack_tcp_timeout_last_ack" no longer used */
302 /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT "ip_conntrack_tcp_timeout_time_wait" no longer used */
303 /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE "ip_conntrack_tcp_timeout_close" no longer used */
304
305 /* NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT "ip_conntrack_udp_timeout" no longer used */
306 /* NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM "ip_conntrack_udp_timeout_stream" no longer used */
307 /* NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT "ip_conntrack_icmp_timeout" no longer used */
308 /* NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT "ip_conntrack_generic_timeout" no longer used */
309
310 { CTL_INT, NET_IPV4_NF_CONNTRACK_BUCKETS, "ip_conntrack_buckets" },
311 { CTL_INT, NET_IPV4_NF_CONNTRACK_LOG_INVALID, "ip_conntrack_log_invalid" },
312 /* NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS "ip_conntrack_tcp_timeout_max_retrans" no longer used */
313 { CTL_INT, NET_IPV4_NF_CONNTRACK_TCP_LOOSE, "ip_conntrack_tcp_loose" },
314 { CTL_INT, NET_IPV4_NF_CONNTRACK_TCP_BE_LIBERAL, "ip_conntrack_tcp_be_liberal" },
315 { CTL_INT, NET_IPV4_NF_CONNTRACK_TCP_MAX_RETRANS, "ip_conntrack_tcp_max_retrans" },
316
317 /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED "ip_conntrack_sctp_timeout_closed" no longer used */
318 /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT "ip_conntrack_sctp_timeout_cookie_wait" no longer used */
319 /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED "ip_conntrack_sctp_timeout_cookie_echoed" no longer used */
320 /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED "ip_conntrack_sctp_timeout_established" no longer used */
321 /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT "ip_conntrack_sctp_timeout_shutdown_sent" no longer used */
322 /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD "ip_conntrack_sctp_timeout_shutdown_recd" no longer used */
323 /* NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT "ip_conntrack_sctp_timeout_shutdown_ack_sent" no longer used */
324
325 { CTL_INT, NET_IPV4_NF_CONNTRACK_COUNT, "ip_conntrack_count" },
326 { CTL_INT, NET_IPV4_NF_CONNTRACK_CHECKSUM, "ip_conntrack_checksum" },
327 {}
328};
329
330static const struct bin_table bin_net_ipv4_table[] = {
331 {CTL_INT, NET_IPV4_FORWARD, "ip_forward" },
332
333 { CTL_DIR, NET_IPV4_CONF, "conf", bin_net_ipv4_conf_table },
334 { CTL_DIR, NET_IPV4_NEIGH, "neigh", bin_net_neigh_table },
335 { CTL_DIR, NET_IPV4_ROUTE, "route", bin_net_ipv4_route_table },
336 /* NET_IPV4_FIB_HASH unused */
337 { CTL_DIR, NET_IPV4_NETFILTER, "netfilter", bin_net_ipv4_netfilter_table },
338
339 { CTL_INT, NET_IPV4_TCP_TIMESTAMPS, "tcp_timestamps" },
340 { CTL_INT, NET_IPV4_TCP_WINDOW_SCALING, "tcp_window_scaling" },
341 { CTL_INT, NET_IPV4_TCP_SACK, "tcp_sack" },
342 { CTL_INT, NET_IPV4_TCP_RETRANS_COLLAPSE, "tcp_retrans_collapse" },
343 { CTL_INT, NET_IPV4_DEFAULT_TTL, "ip_default_ttl" },
344 /* NET_IPV4_AUTOCONFIG unused */
345 { CTL_INT, NET_IPV4_NO_PMTU_DISC, "ip_no_pmtu_disc" },
346 { CTL_INT, NET_IPV4_NONLOCAL_BIND, "ip_nonlocal_bind" },
347 { CTL_INT, NET_IPV4_TCP_SYN_RETRIES, "tcp_syn_retries" },
348 { CTL_INT, NET_TCP_SYNACK_RETRIES, "tcp_synack_retries" },
349 { CTL_INT, NET_TCP_MAX_ORPHANS, "tcp_max_orphans" },
350 { CTL_INT, NET_TCP_MAX_TW_BUCKETS, "tcp_max_tw_buckets" },
351 { CTL_INT, NET_IPV4_DYNADDR, "ip_dynaddr" },
352 { CTL_INT, NET_IPV4_TCP_KEEPALIVE_TIME, "tcp_keepalive_time" },
353 { CTL_INT, NET_IPV4_TCP_KEEPALIVE_PROBES, "tcp_keepalive_probes" },
354 { CTL_INT, NET_IPV4_TCP_KEEPALIVE_INTVL, "tcp_keepalive_intvl" },
355 { CTL_INT, NET_IPV4_TCP_RETRIES1, "tcp_retries1" },
356 { CTL_INT, NET_IPV4_TCP_RETRIES2, "tcp_retries2" },
357 { CTL_INT, NET_IPV4_TCP_FIN_TIMEOUT, "tcp_fin_timeout" },
358 { CTL_INT, NET_TCP_SYNCOOKIES, "tcp_syncookies" },
359 { CTL_INT, NET_TCP_TW_RECYCLE, "tcp_tw_recycle" },
360 { CTL_INT, NET_TCP_ABORT_ON_OVERFLOW, "tcp_abort_on_overflow" },
361 { CTL_INT, NET_TCP_STDURG, "tcp_stdurg" },
362 { CTL_INT, NET_TCP_RFC1337, "tcp_rfc1337" },
363 { CTL_INT, NET_TCP_MAX_SYN_BACKLOG, "tcp_max_syn_backlog" },
364 { CTL_INT, NET_IPV4_LOCAL_PORT_RANGE, "ip_local_port_range" },
365 { CTL_INT, NET_IPV4_IGMP_MAX_MEMBERSHIPS, "igmp_max_memberships" },
366 { CTL_INT, NET_IPV4_IGMP_MAX_MSF, "igmp_max_msf" },
367 { CTL_INT, NET_IPV4_INET_PEER_THRESHOLD, "inet_peer_threshold" },
368 { CTL_INT, NET_IPV4_INET_PEER_MINTTL, "inet_peer_minttl" },
369 { CTL_INT, NET_IPV4_INET_PEER_MAXTTL, "inet_peer_maxttl" },
370 { CTL_INT, NET_IPV4_INET_PEER_GC_MINTIME, "inet_peer_gc_mintime" },
371 { CTL_INT, NET_IPV4_INET_PEER_GC_MAXTIME, "inet_peer_gc_maxtime" },
372 { CTL_INT, NET_TCP_ORPHAN_RETRIES, "tcp_orphan_retries" },
373 { CTL_INT, NET_TCP_FACK, "tcp_fack" },
374 { CTL_INT, NET_TCP_REORDERING, "tcp_reordering" },
375 { CTL_INT, NET_TCP_ECN, "tcp_ecn" },
376 { CTL_INT, NET_TCP_DSACK, "tcp_dsack" },
377 { CTL_INT, NET_TCP_MEM, "tcp_mem" },
378 { CTL_INT, NET_TCP_WMEM, "tcp_wmem" },
379 { CTL_INT, NET_TCP_RMEM, "tcp_rmem" },
380 { CTL_INT, NET_TCP_APP_WIN, "tcp_app_win" },
381 { CTL_INT, NET_TCP_ADV_WIN_SCALE, "tcp_adv_win_scale" },
382 { CTL_INT, NET_TCP_TW_REUSE, "tcp_tw_reuse" },
383 { CTL_INT, NET_TCP_FRTO, "tcp_frto" },
384 { CTL_INT, NET_TCP_FRTO_RESPONSE, "tcp_frto_response" },
385 { CTL_INT, NET_TCP_LOW_LATENCY, "tcp_low_latency" },
386 { CTL_INT, NET_TCP_NO_METRICS_SAVE, "tcp_no_metrics_save" },
387 { CTL_INT, NET_TCP_MODERATE_RCVBUF, "tcp_moderate_rcvbuf" },
388 { CTL_INT, NET_TCP_TSO_WIN_DIVISOR, "tcp_tso_win_divisor" },
389 { CTL_STR, NET_TCP_CONG_CONTROL, "tcp_congestion_control" },
390 { CTL_INT, NET_TCP_ABC, "tcp_abc" },
391 { CTL_INT, NET_TCP_MTU_PROBING, "tcp_mtu_probing" },
392 { CTL_INT, NET_TCP_BASE_MSS, "tcp_base_mss" },
393 { CTL_INT, NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, "tcp_workaround_signed_windows" },
394 { CTL_INT, NET_TCP_DMA_COPYBREAK, "tcp_dma_copybreak" },
395 { CTL_INT, NET_TCP_SLOW_START_AFTER_IDLE, "tcp_slow_start_after_idle" },
396 { CTL_INT, NET_CIPSOV4_CACHE_ENABLE, "cipso_cache_enable" },
397 { CTL_INT, NET_CIPSOV4_CACHE_BUCKET_SIZE, "cipso_cache_bucket_size" },
398 { CTL_INT, NET_CIPSOV4_RBM_OPTFMT, "cipso_rbm_optfmt" },
399 { CTL_INT, NET_CIPSOV4_RBM_STRICTVALID, "cipso_rbm_strictvalid" },
400 /* NET_TCP_AVAIL_CONG_CONTROL "tcp_available_congestion_control" no longer used */
401 { CTL_STR, NET_TCP_ALLOWED_CONG_CONTROL, "tcp_allowed_congestion_control" },
402 { CTL_INT, NET_TCP_MAX_SSTHRESH, "tcp_max_ssthresh" },
403
404 { CTL_INT, NET_IPV4_ICMP_ECHO_IGNORE_ALL, "icmp_echo_ignore_all" },
405 { CTL_INT, NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS, "icmp_echo_ignore_broadcasts" },
406 { CTL_INT, NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES, "icmp_ignore_bogus_error_responses" },
407 { CTL_INT, NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR, "icmp_errors_use_inbound_ifaddr" },
408 { CTL_INT, NET_IPV4_ICMP_RATELIMIT, "icmp_ratelimit" },
409 { CTL_INT, NET_IPV4_ICMP_RATEMASK, "icmp_ratemask" },
410
411 { CTL_INT, NET_IPV4_IPFRAG_HIGH_THRESH, "ipfrag_high_thresh" },
412 { CTL_INT, NET_IPV4_IPFRAG_LOW_THRESH, "ipfrag_low_thresh" },
413 { CTL_INT, NET_IPV4_IPFRAG_TIME, "ipfrag_time" },
414
415 { CTL_INT, NET_IPV4_IPFRAG_SECRET_INTERVAL, "ipfrag_secret_interval" },
416 /* NET_IPV4_IPFRAG_MAX_DIST "ipfrag_max_dist" no longer used */
417
418 { CTL_INT, 2088 /* NET_IPQ_QMAX */, "ip_queue_maxlen" },
419
420 /* NET_TCP_DEFAULT_WIN_SCALE unused */
421 /* NET_TCP_BIC_BETA unused */
422 /* NET_IPV4_TCP_MAX_KA_PROBES unused */
423 /* NET_IPV4_IP_MASQ_DEBUG unused */
424 /* NET_TCP_SYN_TAILDROP unused */
425 /* NET_IPV4_ICMP_SOURCEQUENCH_RATE unused */
426 /* NET_IPV4_ICMP_DESTUNREACH_RATE unused */
427 /* NET_IPV4_ICMP_TIMEEXCEED_RATE unused */
428 /* NET_IPV4_ICMP_PARAMPROB_RATE unused */
429 /* NET_IPV4_ICMP_ECHOREPLY_RATE unused */
430 /* NET_IPV4_ALWAYS_DEFRAG unused */
431 {}
432};
433
434static const struct bin_table bin_net_ipx_table[] = {
435 { CTL_INT, NET_IPX_PPROP_BROADCASTING, "ipx_pprop_broadcasting" },
436 /* NET_IPX_FORWARDING unused */
437 {}
438};
439
440static const struct bin_table bin_net_atalk_table[] = {
441 { CTL_INT, NET_ATALK_AARP_EXPIRY_TIME, "aarp-expiry-time" },
442 { CTL_INT, NET_ATALK_AARP_TICK_TIME, "aarp-tick-time" },
443 { CTL_INT, NET_ATALK_AARP_RETRANSMIT_LIMIT, "aarp-retransmit-limit" },
444 { CTL_INT, NET_ATALK_AARP_RESOLVE_TIME, "aarp-resolve-time" },
445 {},
446};
447
448static const struct bin_table bin_net_netrom_table[] = {
449 { CTL_INT, NET_NETROM_DEFAULT_PATH_QUALITY, "default_path_quality" },
450 { CTL_INT, NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER, "obsolescence_count_initialiser" },
451 { CTL_INT, NET_NETROM_NETWORK_TTL_INITIALISER, "network_ttl_initialiser" },
452 { CTL_INT, NET_NETROM_TRANSPORT_TIMEOUT, "transport_timeout" },
453 { CTL_INT, NET_NETROM_TRANSPORT_MAXIMUM_TRIES, "transport_maximum_tries" },
454 { CTL_INT, NET_NETROM_TRANSPORT_ACKNOWLEDGE_DELAY, "transport_acknowledge_delay" },
455 { CTL_INT, NET_NETROM_TRANSPORT_BUSY_DELAY, "transport_busy_delay" },
456 { CTL_INT, NET_NETROM_TRANSPORT_REQUESTED_WINDOW_SIZE, "transport_requested_window_size" },
457 { CTL_INT, NET_NETROM_TRANSPORT_NO_ACTIVITY_TIMEOUT, "transport_no_activity_timeout" },
458 { CTL_INT, NET_NETROM_ROUTING_CONTROL, "routing_control" },
459 { CTL_INT, NET_NETROM_LINK_FAILS_COUNT, "link_fails_count" },
460 { CTL_INT, NET_NETROM_RESET, "reset" },
461 {}
462};
463
464static const struct bin_table bin_net_ax25_param_table[] = {
465 { CTL_INT, NET_AX25_IP_DEFAULT_MODE, "ip_default_mode" },
466 { CTL_INT, NET_AX25_DEFAULT_MODE, "ax25_default_mode" },
467 { CTL_INT, NET_AX25_BACKOFF_TYPE, "backoff_type" },
468 { CTL_INT, NET_AX25_CONNECT_MODE, "connect_mode" },
469 { CTL_INT, NET_AX25_STANDARD_WINDOW, "standard_window_size" },
470 { CTL_INT, NET_AX25_EXTENDED_WINDOW, "extended_window_size" },
471 { CTL_INT, NET_AX25_T1_TIMEOUT, "t1_timeout" },
472 { CTL_INT, NET_AX25_T2_TIMEOUT, "t2_timeout" },
473 { CTL_INT, NET_AX25_T3_TIMEOUT, "t3_timeout" },
474 { CTL_INT, NET_AX25_IDLE_TIMEOUT, "idle_timeout" },
475 { CTL_INT, NET_AX25_N2, "maximum_retry_count" },
476 { CTL_INT, NET_AX25_PACLEN, "maximum_packet_length" },
477 { CTL_INT, NET_AX25_PROTOCOL, "protocol" },
478 { CTL_INT, NET_AX25_DAMA_SLAVE_TIMEOUT, "dama_slave_timeout" },
479 {}
480};
481
482static const struct bin_table bin_net_ax25_table[] = {
483 { CTL_DIR, 0, NULL, bin_net_ax25_param_table },
484 {}
485};
486
487static const struct bin_table bin_net_rose_table[] = {
488 { CTL_INT, NET_ROSE_RESTART_REQUEST_TIMEOUT, "restart_request_timeout" },
489 { CTL_INT, NET_ROSE_CALL_REQUEST_TIMEOUT, "call_request_timeout" },
490 { CTL_INT, NET_ROSE_RESET_REQUEST_TIMEOUT, "reset_request_timeout" },
491 { CTL_INT, NET_ROSE_CLEAR_REQUEST_TIMEOUT, "clear_request_timeout" },
492 { CTL_INT, NET_ROSE_ACK_HOLD_BACK_TIMEOUT, "acknowledge_hold_back_timeout" },
493 { CTL_INT, NET_ROSE_ROUTING_CONTROL, "routing_control" },
494 { CTL_INT, NET_ROSE_LINK_FAIL_TIMEOUT, "link_fail_timeout" },
495 { CTL_INT, NET_ROSE_MAX_VCS, "maximum_virtual_circuits" },
496 { CTL_INT, NET_ROSE_WINDOW_SIZE, "window_size" },
497 { CTL_INT, NET_ROSE_NO_ACTIVITY_TIMEOUT, "no_activity_timeout" },
498 {}
499};
500
501static const struct bin_table bin_net_ipv6_conf_var_table[] = {
502 { CTL_INT, NET_IPV6_FORWARDING, "forwarding" },
503 { CTL_INT, NET_IPV6_HOP_LIMIT, "hop_limit" },
504 { CTL_INT, NET_IPV6_MTU, "mtu" },
505 { CTL_INT, NET_IPV6_ACCEPT_RA, "accept_ra" },
506 { CTL_INT, NET_IPV6_ACCEPT_REDIRECTS, "accept_redirects" },
507 { CTL_INT, NET_IPV6_AUTOCONF, "autoconf" },
508 { CTL_INT, NET_IPV6_DAD_TRANSMITS, "dad_transmits" },
509 { CTL_INT, NET_IPV6_RTR_SOLICITS, "router_solicitations" },
510 { CTL_INT, NET_IPV6_RTR_SOLICIT_INTERVAL, "router_solicitation_interval" },
511 { CTL_INT, NET_IPV6_RTR_SOLICIT_DELAY, "router_solicitation_delay" },
512 { CTL_INT, NET_IPV6_USE_TEMPADDR, "use_tempaddr" },
513 { CTL_INT, NET_IPV6_TEMP_VALID_LFT, "temp_valid_lft" },
514 { CTL_INT, NET_IPV6_TEMP_PREFERED_LFT, "temp_prefered_lft" },
515 { CTL_INT, NET_IPV6_REGEN_MAX_RETRY, "regen_max_retry" },
516 { CTL_INT, NET_IPV6_MAX_DESYNC_FACTOR, "max_desync_factor" },
517 { CTL_INT, NET_IPV6_MAX_ADDRESSES, "max_addresses" },
518 { CTL_INT, NET_IPV6_FORCE_MLD_VERSION, "force_mld_version" },
519 { CTL_INT, NET_IPV6_ACCEPT_RA_DEFRTR, "accept_ra_defrtr" },
520 { CTL_INT, NET_IPV6_ACCEPT_RA_PINFO, "accept_ra_pinfo" },
521 { CTL_INT, NET_IPV6_ACCEPT_RA_RTR_PREF, "accept_ra_rtr_pref" },
522 { CTL_INT, NET_IPV6_RTR_PROBE_INTERVAL, "router_probe_interval" },
523 { CTL_INT, NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN, "accept_ra_rt_info_max_plen" },
524 { CTL_INT, NET_IPV6_PROXY_NDP, "proxy_ndp" },
525 { CTL_INT, NET_IPV6_ACCEPT_SOURCE_ROUTE, "accept_source_route" },
526 {}
527};
528
529static const struct bin_table bin_net_ipv6_conf_table[] = {
530 { CTL_DIR, NET_PROTO_CONF_ALL, "all", bin_net_ipv6_conf_var_table },
531 { CTL_DIR, NET_PROTO_CONF_DEFAULT, "default", bin_net_ipv6_conf_var_table },
532 { CTL_DIR, 0, NULL, bin_net_ipv6_conf_var_table },
533 {}
534};
535
536static const struct bin_table bin_net_ipv6_route_table[] = {
537 /* NET_IPV6_ROUTE_FLUSH "flush" no longer used */
538 { CTL_INT, NET_IPV6_ROUTE_GC_THRESH, "gc_thresh" },
539 { CTL_INT, NET_IPV6_ROUTE_MAX_SIZE, "max_size" },
540 { CTL_INT, NET_IPV6_ROUTE_GC_MIN_INTERVAL, "gc_min_interval" },
541 { CTL_INT, NET_IPV6_ROUTE_GC_TIMEOUT, "gc_timeout" },
542 { CTL_INT, NET_IPV6_ROUTE_GC_INTERVAL, "gc_interval" },
543 { CTL_INT, NET_IPV6_ROUTE_GC_ELASTICITY, "gc_elasticity" },
544 { CTL_INT, NET_IPV6_ROUTE_MTU_EXPIRES, "mtu_expires" },
545 { CTL_INT, NET_IPV6_ROUTE_MIN_ADVMSS, "min_adv_mss" },
546 { CTL_INT, NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS, "gc_min_interval_ms" },
547 {}
548};
549
550static const struct bin_table bin_net_ipv6_icmp_table[] = {
551 { CTL_INT, NET_IPV6_ICMP_RATELIMIT, "ratelimit" },
552 {}
553};
554
555static const struct bin_table bin_net_ipv6_table[] = {
556 { CTL_DIR, NET_IPV6_CONF, "conf", bin_net_ipv6_conf_table },
557 { CTL_DIR, NET_IPV6_NEIGH, "neigh", bin_net_neigh_table },
558 { CTL_DIR, NET_IPV6_ROUTE, "route", bin_net_ipv6_route_table },
559 { CTL_DIR, NET_IPV6_ICMP, "icmp", bin_net_ipv6_icmp_table },
560 { CTL_INT, NET_IPV6_BINDV6ONLY, "bindv6only" },
561 { CTL_INT, NET_IPV6_IP6FRAG_HIGH_THRESH, "ip6frag_high_thresh" },
562 { CTL_INT, NET_IPV6_IP6FRAG_LOW_THRESH, "ip6frag_low_thresh" },
563 { CTL_INT, NET_IPV6_IP6FRAG_TIME, "ip6frag_time" },
564 { CTL_INT, NET_IPV6_IP6FRAG_SECRET_INTERVAL, "ip6frag_secret_interval" },
565 { CTL_INT, NET_IPV6_MLD_MAX_MSF, "mld_max_msf" },
566 { CTL_INT, 2088 /* IPQ_QMAX */, "ip6_queue_maxlen" },
567 {}
568};
569
570static const struct bin_table bin_net_x25_table[] = {
571 { CTL_INT, NET_X25_RESTART_REQUEST_TIMEOUT, "restart_request_timeout" },
572 { CTL_INT, NET_X25_CALL_REQUEST_TIMEOUT, "call_request_timeout" },
573 { CTL_INT, NET_X25_RESET_REQUEST_TIMEOUT, "reset_request_timeout" },
574 { CTL_INT, NET_X25_CLEAR_REQUEST_TIMEOUT, "clear_request_timeout" },
575 { CTL_INT, NET_X25_ACK_HOLD_BACK_TIMEOUT, "acknowledgement_hold_back_timeout" },
576 { CTL_INT, NET_X25_FORWARD, "x25_forward" },
577 {}
578};
579
580static const struct bin_table bin_net_tr_table[] = {
581 { CTL_INT, NET_TR_RIF_TIMEOUT, "rif_timeout" },
582 {}
583};
584
585
586static const struct bin_table bin_net_decnet_conf_vars[] = {
587 { CTL_INT, NET_DECNET_CONF_DEV_FORWARDING, "forwarding" },
588 { CTL_INT, NET_DECNET_CONF_DEV_PRIORITY, "priority" },
589 { CTL_INT, NET_DECNET_CONF_DEV_T2, "t2" },
590 { CTL_INT, NET_DECNET_CONF_DEV_T3, "t3" },
591 {}
592};
593
594static const struct bin_table bin_net_decnet_conf[] = {
595 { CTL_DIR, NET_DECNET_CONF_ETHER, "ethernet", bin_net_decnet_conf_vars },
596 { CTL_DIR, NET_DECNET_CONF_GRE, "ipgre", bin_net_decnet_conf_vars },
597 { CTL_DIR, NET_DECNET_CONF_X25, "x25", bin_net_decnet_conf_vars },
598 { CTL_DIR, NET_DECNET_CONF_PPP, "ppp", bin_net_decnet_conf_vars },
599 { CTL_DIR, NET_DECNET_CONF_DDCMP, "ddcmp", bin_net_decnet_conf_vars },
600 { CTL_DIR, NET_DECNET_CONF_LOOPBACK, "loopback", bin_net_decnet_conf_vars },
601 { CTL_DIR, 0, NULL, bin_net_decnet_conf_vars },
602 {}
603};
604
605static const struct bin_table bin_net_decnet_table[] = {
606 { CTL_DIR, NET_DECNET_CONF, "conf", bin_net_decnet_conf },
607 { CTL_DNADR, NET_DECNET_NODE_ADDRESS, "node_address" },
608 { CTL_STR, NET_DECNET_NODE_NAME, "node_name" },
609 { CTL_STR, NET_DECNET_DEFAULT_DEVICE, "default_device" },
610 { CTL_INT, NET_DECNET_TIME_WAIT, "time_wait" },
611 { CTL_INT, NET_DECNET_DN_COUNT, "dn_count" },
612 { CTL_INT, NET_DECNET_DI_COUNT, "di_count" },
613 { CTL_INT, NET_DECNET_DR_COUNT, "dr_count" },
614 { CTL_INT, NET_DECNET_DST_GC_INTERVAL, "dst_gc_interval" },
615 { CTL_INT, NET_DECNET_NO_FC_MAX_CWND, "no_fc_max_cwnd" },
616 { CTL_INT, NET_DECNET_MEM, "decnet_mem" },
617 { CTL_INT, NET_DECNET_RMEM, "decnet_rmem" },
618 { CTL_INT, NET_DECNET_WMEM, "decnet_wmem" },
619 { CTL_INT, NET_DECNET_DEBUG_LEVEL, "debug" },
620 {}
621};
622
623static const struct bin_table bin_net_sctp_table[] = {
624 { CTL_INT, NET_SCTP_RTO_INITIAL, "rto_initial" },
625 { CTL_INT, NET_SCTP_RTO_MIN, "rto_min" },
626 { CTL_INT, NET_SCTP_RTO_MAX, "rto_max" },
627 { CTL_INT, NET_SCTP_RTO_ALPHA, "rto_alpha_exp_divisor" },
628 { CTL_INT, NET_SCTP_RTO_BETA, "rto_beta_exp_divisor" },
629 { CTL_INT, NET_SCTP_VALID_COOKIE_LIFE, "valid_cookie_life" },
630 { CTL_INT, NET_SCTP_ASSOCIATION_MAX_RETRANS, "association_max_retrans" },
631 { CTL_INT, NET_SCTP_PATH_MAX_RETRANS, "path_max_retrans" },
632 { CTL_INT, NET_SCTP_MAX_INIT_RETRANSMITS, "max_init_retransmits" },
633 { CTL_INT, NET_SCTP_HB_INTERVAL, "hb_interval" },
634 { CTL_INT, NET_SCTP_PRESERVE_ENABLE, "cookie_preserve_enable" },
635 { CTL_INT, NET_SCTP_MAX_BURST, "max_burst" },
636 { CTL_INT, NET_SCTP_ADDIP_ENABLE, "addip_enable" },
637 { CTL_INT, NET_SCTP_PRSCTP_ENABLE, "prsctp_enable" },
638 { CTL_INT, NET_SCTP_SNDBUF_POLICY, "sndbuf_policy" },
639 { CTL_INT, NET_SCTP_SACK_TIMEOUT, "sack_timeout" },
640 { CTL_INT, NET_SCTP_RCVBUF_POLICY, "rcvbuf_policy" },
641 {}
642};
643
644static const struct bin_table bin_net_llc_llc2_timeout_table[] = {
645 { CTL_INT, NET_LLC2_ACK_TIMEOUT, "ack" },
646 { CTL_INT, NET_LLC2_P_TIMEOUT, "p" },
647 { CTL_INT, NET_LLC2_REJ_TIMEOUT, "rej" },
648 { CTL_INT, NET_LLC2_BUSY_TIMEOUT, "busy" },
649 {}
650};
651
652static const struct bin_table bin_net_llc_station_table[] = {
653 { CTL_INT, NET_LLC_STATION_ACK_TIMEOUT, "ack_timeout" },
654 {}
655};
656
657static const struct bin_table bin_net_llc_llc2_table[] = {
658 { CTL_DIR, NET_LLC2, "timeout", bin_net_llc_llc2_timeout_table },
659 {}
660};
661
662static const struct bin_table bin_net_llc_table[] = {
663 { CTL_DIR, NET_LLC2, "llc2", bin_net_llc_llc2_table },
664 { CTL_DIR, NET_LLC_STATION, "station", bin_net_llc_station_table },
665 {}
666};
667
668static const struct bin_table bin_net_netfilter_table[] = {
669 { CTL_INT, NET_NF_CONNTRACK_MAX, "nf_conntrack_max" },
670 /* NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT "nf_conntrack_tcp_timeout_syn_sent" no longer used */
671 /* NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV "nf_conntrack_tcp_timeout_syn_recv" no longer used */
672 /* NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED "nf_conntrack_tcp_timeout_established" no longer used */
673 /* NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT "nf_conntrack_tcp_timeout_fin_wait" no longer used */
674 /* NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT "nf_conntrack_tcp_timeout_close_wait" no longer used */
675 /* NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK "nf_conntrack_tcp_timeout_last_ack" no longer used */
676 /* NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT "nf_conntrack_tcp_timeout_time_wait" no longer used */
677 /* NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE "nf_conntrack_tcp_timeout_close" no longer used */
678 /* NET_NF_CONNTRACK_UDP_TIMEOUT "nf_conntrack_udp_timeout" no longer used */
679 /* NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM "nf_conntrack_udp_timeout_stream" no longer used */
680 /* NET_NF_CONNTRACK_ICMP_TIMEOUT "nf_conntrack_icmp_timeout" no longer used */
681 /* NET_NF_CONNTRACK_GENERIC_TIMEOUT "nf_conntrack_generic_timeout" no longer used */
682 { CTL_INT, NET_NF_CONNTRACK_BUCKETS, "nf_conntrack_buckets" },
683 { CTL_INT, NET_NF_CONNTRACK_LOG_INVALID, "nf_conntrack_log_invalid" },
684 /* NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS "nf_conntrack_tcp_timeout_max_retrans" no longer used */
685 { CTL_INT, NET_NF_CONNTRACK_TCP_LOOSE, "nf_conntrack_tcp_loose" },
686 { CTL_INT, NET_NF_CONNTRACK_TCP_BE_LIBERAL, "nf_conntrack_tcp_be_liberal" },
687 { CTL_INT, NET_NF_CONNTRACK_TCP_MAX_RETRANS, "nf_conntrack_tcp_max_retrans" },
688 /* NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED "nf_conntrack_sctp_timeout_closed" no longer used */
689 /* NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT "nf_conntrack_sctp_timeout_cookie_wait" no longer used */
690 /* NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED "nf_conntrack_sctp_timeout_cookie_echoed" no longer used */
691 /* NET_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED "nf_conntrack_sctp_timeout_established" no longer used */
692 /* NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT "nf_conntrack_sctp_timeout_shutdown_sent" no longer used */
693 /* NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD "nf_conntrack_sctp_timeout_shutdown_recd" no longer used */
694 /* NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT "nf_conntrack_sctp_timeout_shutdown_ack_sent" no longer used */
695 { CTL_INT, NET_NF_CONNTRACK_COUNT, "nf_conntrack_count" },
696 /* NET_NF_CONNTRACK_ICMPV6_TIMEOUT "nf_conntrack_icmpv6_timeout" no longer used */
697 /* NET_NF_CONNTRACK_FRAG6_TIMEOUT "nf_conntrack_frag6_timeout" no longer used */
698 { CTL_INT, NET_NF_CONNTRACK_FRAG6_LOW_THRESH, "nf_conntrack_frag6_low_thresh" },
699 { CTL_INT, NET_NF_CONNTRACK_FRAG6_HIGH_THRESH, "nf_conntrack_frag6_high_thresh" },
700 { CTL_INT, NET_NF_CONNTRACK_CHECKSUM, "nf_conntrack_checksum" },
701
702 {}
703};
704
705static const struct bin_table bin_net_irda_table[] = {
706 { CTL_INT, NET_IRDA_DISCOVERY, "discovery" },
707 { CTL_STR, NET_IRDA_DEVNAME, "devname" },
708 { CTL_INT, NET_IRDA_DEBUG, "debug" },
709 { CTL_INT, NET_IRDA_FAST_POLL, "fast_poll_increase" },
710 { CTL_INT, NET_IRDA_DISCOVERY_SLOTS, "discovery_slots" },
711 { CTL_INT, NET_IRDA_DISCOVERY_TIMEOUT, "discovery_timeout" },
712 { CTL_INT, NET_IRDA_SLOT_TIMEOUT, "slot_timeout" },
713 { CTL_INT, NET_IRDA_MAX_BAUD_RATE, "max_baud_rate" },
714 { CTL_INT, NET_IRDA_MIN_TX_TURN_TIME, "min_tx_turn_time" },
715 { CTL_INT, NET_IRDA_MAX_TX_DATA_SIZE, "max_tx_data_size" },
716 { CTL_INT, NET_IRDA_MAX_TX_WINDOW, "max_tx_window" },
717 { CTL_INT, NET_IRDA_MAX_NOREPLY_TIME, "max_noreply_time" },
718 { CTL_INT, NET_IRDA_WARN_NOREPLY_TIME, "warn_noreply_time" },
719 { CTL_INT, NET_IRDA_LAP_KEEPALIVE_TIME, "lap_keepalive_time" },
720 {}
721};
722
723static const struct bin_table bin_net_table[] = {
724 { CTL_DIR, NET_CORE, "core", bin_net_core_table },
725 /* NET_ETHER not used */
726 /* NET_802 not used */
727 { CTL_DIR, NET_UNIX, "unix", bin_net_unix_table },
728 { CTL_DIR, NET_IPV4, "ipv4", bin_net_ipv4_table },
729 { CTL_DIR, NET_IPX, "ipx", bin_net_ipx_table },
730 { CTL_DIR, NET_ATALK, "appletalk", bin_net_atalk_table },
731 { CTL_DIR, NET_NETROM, "netrom", bin_net_netrom_table },
732 { CTL_DIR, NET_AX25, "ax25", bin_net_ax25_table },
733 /* NET_BRIDGE "bridge" no longer used */
734 { CTL_DIR, NET_ROSE, "rose", bin_net_rose_table },
735 { CTL_DIR, NET_IPV6, "ipv6", bin_net_ipv6_table },
736 { CTL_DIR, NET_X25, "x25", bin_net_x25_table },
737 { CTL_DIR, NET_TR, "token-ring", bin_net_tr_table },
738 { CTL_DIR, NET_DECNET, "decnet", bin_net_decnet_table },
739 /* NET_ECONET not used */
740 { CTL_DIR, NET_SCTP, "sctp", bin_net_sctp_table },
741 { CTL_DIR, NET_LLC, "llc", bin_net_llc_table },
742 { CTL_DIR, NET_NETFILTER, "netfilter", bin_net_netfilter_table },
743 /* NET_DCCP "dccp" no longer used */
744 { CTL_DIR, NET_IRDA, "irda", bin_net_irda_table },
745 { CTL_INT, 2089, "nf_conntrack_max" },
746 {}
747};
748
749static const struct bin_table bin_fs_quota_table[] = {
750 { CTL_INT, FS_DQ_LOOKUPS, "lookups" },
751 { CTL_INT, FS_DQ_DROPS, "drops" },
752 { CTL_INT, FS_DQ_READS, "reads" },
753 { CTL_INT, FS_DQ_WRITES, "writes" },
754 { CTL_INT, FS_DQ_CACHE_HITS, "cache_hits" },
755 { CTL_INT, FS_DQ_ALLOCATED, "allocated_dquots" },
756 { CTL_INT, FS_DQ_FREE, "free_dquots" },
757 { CTL_INT, FS_DQ_SYNCS, "syncs" },
758 { CTL_INT, FS_DQ_WARNINGS, "warnings" },
759 {}
760};
761
762static const struct bin_table bin_fs_xfs_table[] = {
763 { CTL_INT, XFS_SGID_INHERIT, "irix_sgid_inherit" },
764 { CTL_INT, XFS_SYMLINK_MODE, "irix_symlink_mode" },
765 { CTL_INT, XFS_PANIC_MASK, "panic_mask" },
766
767 { CTL_INT, XFS_ERRLEVEL, "error_level" },
768 { CTL_INT, XFS_SYNCD_TIMER, "xfssyncd_centisecs" },
769 { CTL_INT, XFS_INHERIT_SYNC, "inherit_sync" },
770 { CTL_INT, XFS_INHERIT_NODUMP, "inherit_nodump" },
771 { CTL_INT, XFS_INHERIT_NOATIME, "inherit_noatime" },
772 { CTL_INT, XFS_BUF_TIMER, "xfsbufd_centisecs" },
773 { CTL_INT, XFS_BUF_AGE, "age_buffer_centisecs" },
774 { CTL_INT, XFS_INHERIT_NOSYM, "inherit_nosymlinks" },
775 { CTL_INT, XFS_ROTORSTEP, "rotorstep" },
776 { CTL_INT, XFS_INHERIT_NODFRG, "inherit_nodefrag" },
777 { CTL_INT, XFS_FILESTREAM_TIMER, "filestream_centisecs" },
778 { CTL_INT, XFS_STATS_CLEAR, "stats_clear" },
779 {}
780};
781
782static const struct bin_table bin_fs_ocfs2_nm_table[] = {
783 { CTL_STR, 1, "hb_ctl_path" },
784 {}
785};
786
787static const struct bin_table bin_fs_ocfs2_table[] = {
788 { CTL_DIR, 1, "nm", bin_fs_ocfs2_nm_table },
789 {}
790};
791
792static const struct bin_table bin_inotify_table[] = {
793 { CTL_INT, INOTIFY_MAX_USER_INSTANCES, "max_user_instances" },
794 { CTL_INT, INOTIFY_MAX_USER_WATCHES, "max_user_watches" },
795 { CTL_INT, INOTIFY_MAX_QUEUED_EVENTS, "max_queued_events" },
796 {}
797};
798
799static const struct bin_table bin_fs_table[] = {
800 { CTL_INT, FS_NRINODE, "inode-nr" },
801 { CTL_INT, FS_STATINODE, "inode-state" },
802 /* FS_MAXINODE unused */
803 /* FS_NRDQUOT unused */
804 /* FS_MAXDQUOT unused */
805 /* FS_NRFILE "file-nr" no longer used */
806 { CTL_INT, FS_MAXFILE, "file-max" },
807 { CTL_INT, FS_DENTRY, "dentry-state" },
808 /* FS_NRSUPER unused */
809 /* FS_MAXUPSER unused */
810 { CTL_INT, FS_OVERFLOWUID, "overflowuid" },
811 { CTL_INT, FS_OVERFLOWGID, "overflowgid" },
812 { CTL_INT, FS_LEASES, "leases-enable" },
813 { CTL_INT, FS_DIR_NOTIFY, "dir-notify-enable" },
814 { CTL_INT, FS_LEASE_TIME, "lease-break-time" },
815 { CTL_DIR, FS_DQSTATS, "quota", bin_fs_quota_table },
816 { CTL_DIR, FS_XFS, "xfs", bin_fs_xfs_table },
817 { CTL_ULONG, FS_AIO_NR, "aio-nr" },
818 { CTL_ULONG, FS_AIO_MAX_NR, "aio-max-nr" },
819 { CTL_DIR, FS_INOTIFY, "inotify", bin_inotify_table },
820 { CTL_DIR, FS_OCFS2, "ocfs2", bin_fs_ocfs2_table },
821 { CTL_INT, KERN_SETUID_DUMPABLE, "suid_dumpable" },
822 {}
823};
824
825static const struct bin_table bin_ipmi_table[] = {
826 { CTL_INT, DEV_IPMI_POWEROFF_POWERCYCLE, "poweroff_powercycle" },
827 {}
828};
829
830static const struct bin_table bin_mac_hid_files[] = {
831 /* DEV_MAC_HID_KEYBOARD_SENDS_LINUX_KEYCODES unused */
832 /* DEV_MAC_HID_KEYBOARD_LOCK_KEYCODES unused */
833 { CTL_INT, DEV_MAC_HID_MOUSE_BUTTON_EMULATION, "mouse_button_emulation" },
834 { CTL_INT, DEV_MAC_HID_MOUSE_BUTTON2_KEYCODE, "mouse_button2_keycode" },
835 { CTL_INT, DEV_MAC_HID_MOUSE_BUTTON3_KEYCODE, "mouse_button3_keycode" },
836 /* DEV_MAC_HID_ADB_MOUSE_SENDS_KEYCODES unused */
837 {}
838};
839
840static const struct bin_table bin_raid_table[] = {
841 { CTL_INT, DEV_RAID_SPEED_LIMIT_MIN, "speed_limit_min" },
842 { CTL_INT, DEV_RAID_SPEED_LIMIT_MAX, "speed_limit_max" },
843 {}
844};
845
846static const struct bin_table bin_scsi_table[] = {
847 { CTL_INT, DEV_SCSI_LOGGING_LEVEL, "logging_level" },
848 {}
849};
850
851static const struct bin_table bin_dev_table[] = {
852 /* DEV_CDROM "cdrom" no longer used */
853 /* DEV_HWMON unused */
854 /* DEV_PARPORT "parport" no longer used */
855 { CTL_DIR, DEV_RAID, "raid", bin_raid_table },
856 { CTL_DIR, DEV_MAC_HID, "mac_hid", bin_mac_hid_files },
857 { CTL_DIR, DEV_SCSI, "scsi", bin_scsi_table },
858 { CTL_DIR, DEV_IPMI, "ipmi", bin_ipmi_table },
859 {}
860};
861
862static const struct bin_table bin_bus_isa_table[] = {
863 { CTL_INT, BUS_ISA_MEM_BASE, "membase" },
864 { CTL_INT, BUS_ISA_PORT_BASE, "portbase" },
865 { CTL_INT, BUS_ISA_PORT_SHIFT, "portshift" },
866 {}
867};
868
869static const struct bin_table bin_bus_table[] = {
870 { CTL_DIR, CTL_BUS_ISA, "isa", bin_bus_isa_table },
871 {}
872};
873
874
875static const struct bin_table bin_s390dbf_table[] = {
876 { CTL_INT, 5678 /* CTL_S390DBF_STOPPABLE */, "debug_stoppable" },
877 { CTL_INT, 5679 /* CTL_S390DBF_ACTIVE */, "debug_active" },
878 {}
879};
880
881static const struct bin_table bin_sunrpc_table[] = {
882 /* CTL_RPCDEBUG "rpc_debug" no longer used */
883 /* CTL_NFSDEBUG "nfs_debug" no longer used */
884 /* CTL_NFSDDEBUG "nfsd_debug" no longer used */
885 /* CTL_NLMDEBUG "nlm_debug" no longer used */
886
887 { CTL_INT, CTL_SLOTTABLE_UDP, "udp_slot_table_entries" },
888 { CTL_INT, CTL_SLOTTABLE_TCP, "tcp_slot_table_entries" },
889 { CTL_INT, CTL_MIN_RESVPORT, "min_resvport" },
890 { CTL_INT, CTL_MAX_RESVPORT, "max_resvport" },
891 {}
892};
893
894static const struct bin_table bin_pm_table[] = {
895 /* frv specific */
896 /* 1 == CTL_PM_SUSPEND "suspend" no longer used" */
897 { CTL_INT, 2 /* CTL_PM_CMODE */, "cmode" },
898 { CTL_INT, 3 /* CTL_PM_P0 */, "p0" },
899 { CTL_INT, 4 /* CTL_PM_CM */, "cm" },
900 {}
901};
902
903static const struct bin_table bin_root_table[] = {
904 { CTL_DIR, CTL_KERN, "kernel", bin_kern_table },
905 { CTL_DIR, CTL_VM, "vm", bin_vm_table },
906 { CTL_DIR, CTL_NET, "net", bin_net_table },
907 /* CTL_PROC not used */
908 { CTL_DIR, CTL_FS, "fs", bin_fs_table },
909 /* CTL_DEBUG "debug" no longer used */
910 { CTL_DIR, CTL_DEV, "dev", bin_dev_table },
911 { CTL_DIR, CTL_BUS, "bus", bin_bus_table },
912 { CTL_DIR, CTL_ABI, "abi" },
913 /* CTL_CPU not used */
914 /* CTL_ARLAN "arlan" no longer used */
915 { CTL_DIR, CTL_S390DBF, "s390dbf", bin_s390dbf_table },
916 { CTL_DIR, CTL_SUNRPC, "sunrpc", bin_sunrpc_table },
917 { CTL_DIR, CTL_PM, "pm", bin_pm_table },
918 {}
919};
920
921static ssize_t bin_dir(struct file *file,
922 void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
923{
924 return -ENOTDIR;
925}
926
927
928static ssize_t bin_string(struct file *file,
929 void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
930{
931 ssize_t result, copied = 0;
932
933 if (oldval && oldlen) {
934 char __user *lastp;
935 loff_t pos = 0;
936 int ch;
937
938 result = vfs_read(file, oldval, oldlen, &pos);
939 if (result < 0)
940 goto out;
941
942 copied = result;
943 lastp = oldval + copied - 1;
944
945 result = -EFAULT;
946 if (get_user(ch, lastp))
947 goto out;
948
949 /* Trim off the trailing newline */
950 if (ch == '\n') {
951 result = -EFAULT;
952 if (put_user('\0', lastp))
953 goto out;
954 copied -= 1;
955 }
956 }
957
958 if (newval && newlen) {
959 loff_t pos = 0;
960
961 result = vfs_write(file, newval, newlen, &pos);
962 if (result < 0)
963 goto out;
964 }
965
966 result = copied;
967out:
968 return result;
969}
970
971static ssize_t bin_intvec(struct file *file,
972 void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
973{
974 mm_segment_t old_fs = get_fs();
975 ssize_t copied = 0;
976 char *buffer;
977 ssize_t result;
978
979 result = -ENOMEM;
980 buffer = kmalloc(BUFSZ, GFP_KERNEL);
981 if (!buffer)
982 goto out;
983
984 if (oldval && oldlen) {
985 unsigned __user *vec = oldval;
986 size_t length = oldlen / sizeof(*vec);
987 loff_t pos = 0;
988 char *str, *end;
989 int i;
990
991 set_fs(KERNEL_DS);
992 result = vfs_read(file, buffer, BUFSZ - 1, &pos);
993 set_fs(old_fs);
994 if (result < 0)
995 goto out_kfree;
996
997 str = buffer;
998 end = str + result;
999 *end++ = '\0';
1000 for (i = 0; i < length; i++) {
1001 unsigned long value;
1002
1003 value = simple_strtoul(str, &str, 10);
1004 while (isspace(*str))
1005 str++;
1006
1007 result = -EFAULT;
1008 if (put_user(value, vec + i))
1009 goto out_kfree;
1010
1011 copied += sizeof(*vec);
1012 if (!isdigit(*str))
1013 break;
1014 }
1015 }
1016
1017 if (newval && newlen) {
1018 unsigned __user *vec = newval;
1019 size_t length = newlen / sizeof(*vec);
1020 loff_t pos = 0;
1021 char *str, *end;
1022 int i;
1023
1024 str = buffer;
1025 end = str + BUFSZ;
1026 for (i = 0; i < length; i++) {
1027 unsigned long value;
1028
1029 result = -EFAULT;
1030 if (get_user(value, vec + i))
1031 goto out_kfree;
1032
1033 str += snprintf(str, end - str, "%lu\t", value);
1034 }
1035
1036 set_fs(KERNEL_DS);
1037 result = vfs_write(file, buffer, str - buffer, &pos);
1038 set_fs(old_fs);
1039 if (result < 0)
1040 goto out_kfree;
1041 }
1042 result = copied;
1043out_kfree:
1044 kfree(buffer);
1045out:
1046 return result;
1047}
1048
1049static ssize_t bin_ulongvec(struct file *file,
1050 void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
1051{
1052 mm_segment_t old_fs = get_fs();
1053 ssize_t copied = 0;
1054 char *buffer;
1055 ssize_t result;
1056
1057 result = -ENOMEM;
1058 buffer = kmalloc(BUFSZ, GFP_KERNEL);
1059 if (!buffer)
1060 goto out;
1061
1062 if (oldval && oldlen) {
1063 unsigned long __user *vec = oldval;
1064 size_t length = oldlen / sizeof(*vec);
1065 loff_t pos = 0;
1066 char *str, *end;
1067 int i;
1068
1069 set_fs(KERNEL_DS);
1070 result = vfs_read(file, buffer, BUFSZ - 1, &pos);
1071 set_fs(old_fs);
1072 if (result < 0)
1073 goto out_kfree;
1074
1075 str = buffer;
1076 end = str + result;
1077 *end++ = '\0';
1078 for (i = 0; i < length; i++) {
1079 unsigned long value;
1080
1081 value = simple_strtoul(str, &str, 10);
1082 while (isspace(*str))
1083 str++;
1084
1085 result = -EFAULT;
1086 if (put_user(value, vec + i))
1087 goto out_kfree;
1088
1089 copied += sizeof(*vec);
1090 if (!isdigit(*str))
1091 break;
1092 }
1093 }
1094
1095 if (newval && newlen) {
1096 unsigned long __user *vec = newval;
1097 size_t length = newlen / sizeof(*vec);
1098 loff_t pos = 0;
1099 char *str, *end;
1100 int i;
1101
1102 str = buffer;
1103 end = str + BUFSZ;
1104 for (i = 0; i < length; i++) {
1105 unsigned long value;
1106
1107 result = -EFAULT;
1108 if (get_user(value, vec + i))
1109 goto out_kfree;
1110
1111 str += snprintf(str, end - str, "%lu\t", value);
1112 }
1113
1114 set_fs(KERNEL_DS);
1115 result = vfs_write(file, buffer, str - buffer, &pos);
1116 set_fs(old_fs);
1117 if (result < 0)
1118 goto out_kfree;
1119 }
1120 result = copied;
1121out_kfree:
1122 kfree(buffer);
1123out:
1124 return result;
1125}
1126
1127static unsigned hex_value(int ch)
1128{
1129 return isdigit(ch) ? ch - '0' : ((ch | 0x20) - 'a') + 10;
1130}
1131
1132static ssize_t bin_uuid(struct file *file,
1133 void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
1134{
1135 mm_segment_t old_fs = get_fs();
1136 ssize_t result, copied = 0;
1137
1138 /* Only supports reads */
1139 if (oldval && oldlen) {
1140 loff_t pos = 0;
1141 char buf[40], *str = buf;
1142 unsigned char uuid[16];
1143 int i;
1144
1145 set_fs(KERNEL_DS);
1146 result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
1147 set_fs(old_fs);
1148 if (result < 0)
1149 goto out;
1150
1151 buf[result] = '\0';
1152
1153 /* Convert the uuid to from a string to binary */
1154 for (i = 0; i < 16; i++) {
1155 result = -EIO;
1156 if (!isxdigit(str[0]) || !isxdigit(str[1]))
1157 goto out;
1158
1159 uuid[i] = (hex_value(str[0]) << 4) | hex_value(str[1]);
1160 str += 2;
1161 if (*str == '-')
1162 str++;
1163 }
1164
1165 if (oldlen > 16)
1166 oldlen = 16;
1167
1168 result = -EFAULT;
1169 if (copy_to_user(oldval, uuid, oldlen))
1170 goto out;
1171
1172 copied = oldlen;
1173 }
1174 result = copied;
1175out:
1176 return result;
1177}
1178
1179static ssize_t bin_dn_node_address(struct file *file,
1180 void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
1181{
1182 mm_segment_t old_fs = get_fs();
1183 ssize_t result, copied = 0;
1184
1185 if (oldval && oldlen) {
1186 loff_t pos = 0;
1187 char buf[15], *nodep;
1188 unsigned long area, node;
1189 __le16 dnaddr;
1190
1191 set_fs(KERNEL_DS);
1192 result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
1193 set_fs(old_fs);
1194 if (result < 0)
1195 goto out;
1196
1197 buf[result] = '\0';
1198
1199 /* Convert the decnet addresss to binary */
1200 result = -EIO;
1201 nodep = strchr(buf, '.') + 1;
1202 if (!nodep)
1203 goto out;
1204
1205 area = simple_strtoul(buf, NULL, 10);
1206 node = simple_strtoul(nodep, NULL, 10);
1207
1208 result = -EIO;
1209 if ((area > 63)||(node > 1023))
1210 goto out;
1211
1212 dnaddr = cpu_to_le16((area << 10) | node);
1213
1214 result = -EFAULT;
1215 if (put_user(dnaddr, (__le16 __user *)oldval))
1216 goto out;
1217
1218 copied = sizeof(dnaddr);
1219 }
1220
1221 if (newval && newlen) {
1222 loff_t pos = 0;
1223 __le16 dnaddr;
1224 char buf[15];
1225 int len;
1226
1227 result = -EINVAL;
1228 if (newlen != sizeof(dnaddr))
1229 goto out;
1230
1231 result = -EFAULT;
1232 if (get_user(dnaddr, (__le16 __user *)newval))
1233 goto out;
1234
1235 len = snprintf(buf, sizeof(buf), "%hu.%hu",
1236 le16_to_cpu(dnaddr) >> 10,
1237 le16_to_cpu(dnaddr) & 0x3ff);
1238
1239 set_fs(KERNEL_DS);
1240 result = vfs_write(file, buf, len, &pos);
1241 set_fs(old_fs);
1242 if (result < 0)
1243 goto out;
1244 }
1245
1246 result = copied;
1247out:
1248 return result;
1249}
1250
1251static const struct bin_table *get_sysctl(const int *name, int nlen, char *path)
1252{
1253 const struct bin_table *table = &bin_root_table[0];
1254 int ctl_name;
1255
1256 /* The binary sysctl tables have a small maximum depth so
1257 * there is no danger of overflowing our path as it PATH_MAX
1258 * bytes long.
1259 */
1260 memcpy(path, "sys/", 4);
1261 path += 4;
1262
1263repeat:
1264 if (!nlen)
1265 return ERR_PTR(-ENOTDIR);
1266 ctl_name = *name;
1267 name++;
1268 nlen--;
1269 for ( ; table->convert; table++) {
1270 int len = 0;
1271
1272 /*
1273 * For a wild card entry map from ifindex to network
1274 * device name.
1275 */
1276 if (!table->ctl_name) {
1277#ifdef CONFIG_NET
1278 struct net *net = current->nsproxy->net_ns;
1279 struct net_device *dev;
1280 dev = dev_get_by_index(net, ctl_name);
1281 if (dev) {
1282 len = strlen(dev->name);
1283 memcpy(path, dev->name, len);
1284 dev_put(dev);
1285 }
1286#endif
1287 /* Use the well known sysctl number to proc name mapping */
1288 } else if (ctl_name == table->ctl_name) {
1289 len = strlen(table->procname);
1290 memcpy(path, table->procname, len);
1291 }
1292 if (len) {
1293 path += len;
1294 if (table->child) {
1295 *path++ = '/';
1296 table = table->child;
1297 goto repeat;
1298 }
1299 *path = '\0';
1300 return table;
1301 }
1302 }
1303 return ERR_PTR(-ENOTDIR);
1304}
1305
1306static char *sysctl_getname(const int *name, int nlen, const struct bin_table **tablep)
1307{
1308 char *tmp, *result;
1309
1310 result = ERR_PTR(-ENOMEM);
1311 tmp = __getname();
1312 if (tmp) {
1313 const struct bin_table *table = get_sysctl(name, nlen, tmp);
1314 result = tmp;
1315 *tablep = table;
1316 if (IS_ERR(table)) {
1317 __putname(tmp);
1318 result = ERR_CAST(table);
1319 }
1320 }
1321 return result;
1322}
1323
1324static ssize_t binary_sysctl(const int *name, int nlen,
1325 void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
1326{
1327 const struct bin_table *table = NULL;
1328 struct nameidata nd;
1329 struct vfsmount *mnt;
1330 struct file *file;
1331 ssize_t result;
1332 char *pathname;
1333 int flags;
1334 int acc_mode, fmode;
1335
1336 pathname = sysctl_getname(name, nlen, &table);
1337 result = PTR_ERR(pathname);
1338 if (IS_ERR(pathname))
1339 goto out;
1340
1341 /* How should the sysctl be accessed? */
1342 if (oldval && oldlen && newval && newlen) {
1343 flags = O_RDWR;
1344 acc_mode = MAY_READ | MAY_WRITE;
1345 fmode = FMODE_READ | FMODE_WRITE;
1346 } else if (newval && newlen) {
1347 flags = O_WRONLY;
1348 acc_mode = MAY_WRITE;
1349 fmode = FMODE_WRITE;
1350 } else if (oldval && oldlen) {
1351 flags = O_RDONLY;
1352 acc_mode = MAY_READ;
1353 fmode = FMODE_READ;
1354 } else {
1355 result = 0;
1356 goto out_putname;
1357 }
1358
1359 mnt = current->nsproxy->pid_ns->proc_mnt;
1360 result = vfs_path_lookup(mnt->mnt_root, mnt, pathname, 0, &nd);
1361 if (result)
1362 goto out_putname;
1363
1364 result = may_open(&nd.path, acc_mode, fmode);
1365 if (result)
1366 goto out_putpath;
1367
1368 file = dentry_open(nd.path.dentry, nd.path.mnt, flags, current_cred());
1369 result = PTR_ERR(file);
1370 if (IS_ERR(file))
1371 goto out_putname;
1372
1373 result = table->convert(file, oldval, oldlen, newval, newlen);
1374
1375 fput(file);
1376out_putname:
1377 putname(pathname);
1378out:
1379 return result;
1380
1381out_putpath:
1382 path_put(&nd.path);
1383 goto out_putname;
1384}
1385
1386
1387#else /* CONFIG_SYSCTL_SYSCALL */
1388
1389static ssize_t binary_sysctl(const int *name, int nlen,
1390 void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
1391{
1392 return -ENOSYS;
1393}
1394
1395#endif /* CONFIG_SYSCTL_SYSCALL */
1396
1397
1398static void deprecated_sysctl_warning(const int *name, int nlen)
1399{
1400 int i;
1401
1402 if (printk_ratelimit()) {
1403 printk(KERN_INFO
1404 "warning: process `%s' used the deprecated sysctl "
1405 "system call with ", current->comm);
1406 for (i = 0; i < nlen; i++)
1407 printk("%d.", name[i]);
1408 printk("\n");
1409 }
1410 return;
1411}
1412
1413static ssize_t do_sysctl(int __user *args_name, int nlen,
1414 void __user *oldval, size_t oldlen, void __user *newval, size_t newlen)
1415{
1416 int name[CTL_MAXNAME];
1417 int i;
1418
1419 /* Check args->nlen. */
1420 if (nlen < 0 || nlen > CTL_MAXNAME)
1421 return -ENOTDIR;
1422 /* Read in the sysctl name for simplicity */
1423 for (i = 0; i < nlen; i++)
1424 if (get_user(name[i], args_name + i))
1425 return -EFAULT;
1426
1427 deprecated_sysctl_warning(name, nlen);
1428
1429 return binary_sysctl(name, nlen, oldval, oldlen, newval, newlen);
1430}
1431
1432SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args)
1433{
1434 struct __sysctl_args tmp;
1435 size_t oldlen = 0;
1436 ssize_t result;
1437
1438 if (copy_from_user(&tmp, args, sizeof(tmp)))
1439 return -EFAULT;
1440
1441 if (tmp.oldval && !tmp.oldlenp)
1442 return -EFAULT;
1443
1444 if (tmp.oldlenp && get_user(oldlen, tmp.oldlenp))
1445 return -EFAULT;
1446
1447 result = do_sysctl(tmp.name, tmp.nlen, tmp.oldval, oldlen,
1448 tmp.newval, tmp.newlen);
1449
1450 if (result >= 0) {
1451 oldlen = result;
1452 result = 0;
1453 }
1454
1455 if (tmp.oldlenp && put_user(oldlen, tmp.oldlenp))
1456 return -EFAULT;
1457
1458 return result;
1459}
1460
1461
1462#ifdef CONFIG_COMPAT
1463#include <asm/compat.h>
1464
1465struct compat_sysctl_args {
1466 compat_uptr_t name;
1467 int nlen;
1468 compat_uptr_t oldval;
1469 compat_uptr_t oldlenp;
1470 compat_uptr_t newval;
1471 compat_size_t newlen;
1472 compat_ulong_t __unused[4];
1473};
1474
1475asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args)
1476{
1477 struct compat_sysctl_args tmp;
1478 compat_size_t __user *compat_oldlenp;
1479 size_t oldlen = 0;
1480 ssize_t result;
1481
1482 if (copy_from_user(&tmp, args, sizeof(tmp)))
1483 return -EFAULT;
1484
1485 if (tmp.oldval && !tmp.oldlenp)
1486 return -EFAULT;
1487
1488 compat_oldlenp = compat_ptr(tmp.oldlenp);
1489 if (compat_oldlenp && get_user(oldlen, compat_oldlenp))
1490 return -EFAULT;
1491
1492 result = do_sysctl(compat_ptr(tmp.name), tmp.nlen,
1493 compat_ptr(tmp.oldval), oldlen,
1494 compat_ptr(tmp.newval), tmp.newlen);
1495
1496 if (result >= 0) {
1497 oldlen = result;
1498 result = 0;
1499 }
1500
1501 if (compat_oldlenp && put_user(oldlen, compat_oldlenp))
1502 return -EFAULT;
1503
1504 return result;
1505}
1506
1507#endif /* CONFIG_COMPAT */
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index b38423ca711a..04cdcf72c827 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -5,1239 +5,6 @@
5#include <linux/string.h> 5#include <linux/string.h>
6#include <net/ip_vs.h> 6#include <net/ip_vs.h>
7 7
8struct trans_ctl_table {
9 int ctl_name;
10 const char *procname;
11 const struct trans_ctl_table *child;
12};
13
14static const struct trans_ctl_table trans_random_table[] = {
15 { RANDOM_POOLSIZE, "poolsize" },
16 { RANDOM_ENTROPY_COUNT, "entropy_avail" },
17 { RANDOM_READ_THRESH, "read_wakeup_threshold" },
18 { RANDOM_WRITE_THRESH, "write_wakeup_threshold" },
19 { RANDOM_BOOT_ID, "boot_id" },
20 { RANDOM_UUID, "uuid" },
21 {}
22};
23
24static const struct trans_ctl_table trans_pty_table[] = {
25 { PTY_MAX, "max" },
26 { PTY_NR, "nr" },
27 {}
28};
29
30static const struct trans_ctl_table trans_kern_table[] = {
31 { KERN_OSTYPE, "ostype" },
32 { KERN_OSRELEASE, "osrelease" },
33 /* KERN_OSREV not used */
34 { KERN_VERSION, "version" },
35 /* KERN_SECUREMASK not used */
36 /* KERN_PROF not used */
37 { KERN_NODENAME, "hostname" },
38 { KERN_DOMAINNAME, "domainname" },
39
40 { KERN_PANIC, "panic" },
41 { KERN_REALROOTDEV, "real-root-dev" },
42
43 { KERN_SPARC_REBOOT, "reboot-cmd" },
44 { KERN_CTLALTDEL, "ctrl-alt-del" },
45 { KERN_PRINTK, "printk" },
46
47 /* KERN_NAMETRANS not used */
48 /* KERN_PPC_HTABRECLAIM not used */
49 /* KERN_PPC_ZEROPAGED not used */
50 { KERN_PPC_POWERSAVE_NAP, "powersave-nap" },
51
52 { KERN_MODPROBE, "modprobe" },
53 { KERN_SG_BIG_BUFF, "sg-big-buff" },
54 { KERN_ACCT, "acct" },
55 { KERN_PPC_L2CR, "l2cr" },
56
57 /* KERN_RTSIGNR not used */
58 /* KERN_RTSIGMAX not used */
59
60 { KERN_SHMMAX, "shmmax" },
61 { KERN_MSGMAX, "msgmax" },
62 { KERN_MSGMNB, "msgmnb" },
63 /* KERN_MSGPOOL not used*/
64 { KERN_SYSRQ, "sysrq" },
65 { KERN_MAX_THREADS, "threads-max" },
66 { KERN_RANDOM, "random", trans_random_table },
67 { KERN_SHMALL, "shmall" },
68 { KERN_MSGMNI, "msgmni" },
69 { KERN_SEM, "sem" },
70 { KERN_SPARC_STOP_A, "stop-a" },
71 { KERN_SHMMNI, "shmmni" },
72
73 { KERN_OVERFLOWUID, "overflowuid" },
74 { KERN_OVERFLOWGID, "overflowgid" },
75
76 { KERN_HOTPLUG, "hotplug", },
77 { KERN_IEEE_EMULATION_WARNINGS, "ieee_emulation_warnings" },
78
79 { KERN_S390_USER_DEBUG_LOGGING, "userprocess_debug" },
80 { KERN_CORE_USES_PID, "core_uses_pid" },
81 { KERN_TAINTED, "tainted" },
82 { KERN_CADPID, "cad_pid" },
83 { KERN_PIDMAX, "pid_max" },
84 { KERN_CORE_PATTERN, "core_pattern" },
85 { KERN_PANIC_ON_OOPS, "panic_on_oops" },
86 { KERN_HPPA_PWRSW, "soft-power" },
87 { KERN_HPPA_UNALIGNED, "unaligned-trap" },
88
89 { KERN_PRINTK_RATELIMIT, "printk_ratelimit" },
90 { KERN_PRINTK_RATELIMIT_BURST, "printk_ratelimit_burst" },
91
92 { KERN_PTY, "pty", trans_pty_table },
93 { KERN_NGROUPS_MAX, "ngroups_max" },
94 { KERN_SPARC_SCONS_PWROFF, "scons-poweroff" },
95 { KERN_HZ_TIMER, "hz_timer" },
96 { KERN_UNKNOWN_NMI_PANIC, "unknown_nmi_panic" },
97 { KERN_BOOTLOADER_TYPE, "bootloader_type" },
98 { KERN_RANDOMIZE, "randomize_va_space" },
99
100 { KERN_SPIN_RETRY, "spin_retry" },
101 { KERN_ACPI_VIDEO_FLAGS, "acpi_video_flags" },
102 { KERN_IA64_UNALIGNED, "ignore-unaligned-usertrap" },
103 { KERN_COMPAT_LOG, "compat-log" },
104 { KERN_MAX_LOCK_DEPTH, "max_lock_depth" },
105 { KERN_NMI_WATCHDOG, "nmi_watchdog" },
106 { KERN_PANIC_ON_NMI, "panic_on_unrecovered_nmi" },
107 {}
108};
109
110static const struct trans_ctl_table trans_vm_table[] = {
111 { VM_OVERCOMMIT_MEMORY, "overcommit_memory" },
112 { VM_PAGE_CLUSTER, "page-cluster" },
113 { VM_DIRTY_BACKGROUND, "dirty_background_ratio" },
114 { VM_DIRTY_RATIO, "dirty_ratio" },
115 { VM_DIRTY_WB_CS, "dirty_writeback_centisecs" },
116 { VM_DIRTY_EXPIRE_CS, "dirty_expire_centisecs" },
117 { VM_NR_PDFLUSH_THREADS, "nr_pdflush_threads" },
118 { VM_OVERCOMMIT_RATIO, "overcommit_ratio" },
119 /* VM_PAGEBUF unused */
120 { VM_HUGETLB_PAGES, "nr_hugepages" },
121 { VM_SWAPPINESS, "swappiness" },
122 { VM_LOWMEM_RESERVE_RATIO, "lowmem_reserve_ratio" },
123 { VM_MIN_FREE_KBYTES, "min_free_kbytes" },
124 { VM_MAX_MAP_COUNT, "max_map_count" },
125 { VM_LAPTOP_MODE, "laptop_mode" },
126 { VM_BLOCK_DUMP, "block_dump" },
127 { VM_HUGETLB_GROUP, "hugetlb_shm_group" },
128 { VM_VFS_CACHE_PRESSURE, "vfs_cache_pressure" },
129 { VM_LEGACY_VA_LAYOUT, "legacy_va_layout" },
130 /* VM_SWAP_TOKEN_TIMEOUT unused */
131 { VM_DROP_PAGECACHE, "drop_caches" },
132 { VM_PERCPU_PAGELIST_FRACTION, "percpu_pagelist_fraction" },
133 { VM_ZONE_RECLAIM_MODE, "zone_reclaim_mode" },
134 { VM_MIN_UNMAPPED, "min_unmapped_ratio" },
135 { VM_PANIC_ON_OOM, "panic_on_oom" },
136 { VM_VDSO_ENABLED, "vdso_enabled" },
137 { VM_MIN_SLAB, "min_slab_ratio" },
138
139 {}
140};
141
142static const struct trans_ctl_table trans_net_core_table[] = {
143 { NET_CORE_WMEM_MAX, "wmem_max" },
144 { NET_CORE_RMEM_MAX, "rmem_max" },
145 { NET_CORE_WMEM_DEFAULT, "wmem_default" },
146 { NET_CORE_RMEM_DEFAULT, "rmem_default" },
147 /* NET_CORE_DESTROY_DELAY unused */
148 { NET_CORE_MAX_BACKLOG, "netdev_max_backlog" },
149 /* NET_CORE_FASTROUTE unused */
150 { NET_CORE_MSG_COST, "message_cost" },
151 { NET_CORE_MSG_BURST, "message_burst" },
152 { NET_CORE_OPTMEM_MAX, "optmem_max" },
153 /* NET_CORE_HOT_LIST_LENGTH unused */
154 /* NET_CORE_DIVERT_VERSION unused */
155 /* NET_CORE_NO_CONG_THRESH unused */
156 /* NET_CORE_NO_CONG unused */
157 /* NET_CORE_LO_CONG unused */
158 /* NET_CORE_MOD_CONG unused */
159 { NET_CORE_DEV_WEIGHT, "dev_weight" },
160 { NET_CORE_SOMAXCONN, "somaxconn" },
161 { NET_CORE_BUDGET, "netdev_budget" },
162 { NET_CORE_AEVENT_ETIME, "xfrm_aevent_etime" },
163 { NET_CORE_AEVENT_RSEQTH, "xfrm_aevent_rseqth" },
164 { NET_CORE_WARNINGS, "warnings" },
165 {},
166};
167
168static const struct trans_ctl_table trans_net_unix_table[] = {
169 /* NET_UNIX_DESTROY_DELAY unused */
170 /* NET_UNIX_DELETE_DELAY unused */
171 { NET_UNIX_MAX_DGRAM_QLEN, "max_dgram_qlen" },
172 {}
173};
174
175static const struct trans_ctl_table trans_net_ipv4_route_table[] = {
176 { NET_IPV4_ROUTE_FLUSH, "flush" },
177 { NET_IPV4_ROUTE_MIN_DELAY, "min_delay" },
178 { NET_IPV4_ROUTE_MAX_DELAY, "max_delay" },
179 { NET_IPV4_ROUTE_GC_THRESH, "gc_thresh" },
180 { NET_IPV4_ROUTE_MAX_SIZE, "max_size" },
181 { NET_IPV4_ROUTE_GC_MIN_INTERVAL, "gc_min_interval" },
182 { NET_IPV4_ROUTE_GC_TIMEOUT, "gc_timeout" },
183 { NET_IPV4_ROUTE_GC_INTERVAL, "gc_interval" },
184 { NET_IPV4_ROUTE_REDIRECT_LOAD, "redirect_load" },
185 { NET_IPV4_ROUTE_REDIRECT_NUMBER, "redirect_number" },
186 { NET_IPV4_ROUTE_REDIRECT_SILENCE, "redirect_silence" },
187 { NET_IPV4_ROUTE_ERROR_COST, "error_cost" },
188 { NET_IPV4_ROUTE_ERROR_BURST, "error_burst" },
189 { NET_IPV4_ROUTE_GC_ELASTICITY, "gc_elasticity" },
190 { NET_IPV4_ROUTE_MTU_EXPIRES, "mtu_expires" },
191 { NET_IPV4_ROUTE_MIN_PMTU, "min_pmtu" },
192 { NET_IPV4_ROUTE_MIN_ADVMSS, "min_adv_mss" },
193 { NET_IPV4_ROUTE_SECRET_INTERVAL, "secret_interval" },
194 { NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS, "gc_min_interval_ms" },
195 {}
196};
197
198static const struct trans_ctl_table trans_net_ipv4_conf_vars_table[] = {
199 { NET_IPV4_CONF_FORWARDING, "forwarding" },
200 { NET_IPV4_CONF_MC_FORWARDING, "mc_forwarding" },
201
202 { NET_IPV4_CONF_PROXY_ARP, "proxy_arp" },
203 { NET_IPV4_CONF_ACCEPT_REDIRECTS, "accept_redirects" },
204 { NET_IPV4_CONF_SECURE_REDIRECTS, "secure_redirects" },
205 { NET_IPV4_CONF_SEND_REDIRECTS, "send_redirects" },
206 { NET_IPV4_CONF_SHARED_MEDIA, "shared_media" },
207 { NET_IPV4_CONF_RP_FILTER, "rp_filter" },
208 { NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE, "accept_source_route" },
209 { NET_IPV4_CONF_BOOTP_RELAY, "bootp_relay" },
210 { NET_IPV4_CONF_LOG_MARTIANS, "log_martians" },
211 { NET_IPV4_CONF_TAG, "tag" },
212 { NET_IPV4_CONF_ARPFILTER, "arp_filter" },
213 { NET_IPV4_CONF_MEDIUM_ID, "medium_id" },
214 { NET_IPV4_CONF_NOXFRM, "disable_xfrm" },
215 { NET_IPV4_CONF_NOPOLICY, "disable_policy" },
216 { NET_IPV4_CONF_FORCE_IGMP_VERSION, "force_igmp_version" },
217
218 { NET_IPV4_CONF_ARP_ANNOUNCE, "arp_announce" },
219 { NET_IPV4_CONF_ARP_IGNORE, "arp_ignore" },
220 { NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" },
221 { NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" },
222 { NET_IPV4_CONF_ARP_NOTIFY, "arp_notify" },
223 {}
224};
225
226static const struct trans_ctl_table trans_net_ipv4_conf_table[] = {
227 { NET_PROTO_CONF_ALL, "all", trans_net_ipv4_conf_vars_table },
228 { NET_PROTO_CONF_DEFAULT, "default", trans_net_ipv4_conf_vars_table },
229 { 0, NULL, trans_net_ipv4_conf_vars_table },
230 {}
231};
232
233static const struct trans_ctl_table trans_net_neigh_vars_table[] = {
234 { NET_NEIGH_MCAST_SOLICIT, "mcast_solicit" },
235 { NET_NEIGH_UCAST_SOLICIT, "ucast_solicit" },
236 { NET_NEIGH_APP_SOLICIT, "app_solicit" },
237 { NET_NEIGH_RETRANS_TIME, "retrans_time" },
238 { NET_NEIGH_REACHABLE_TIME, "base_reachable_time" },
239 { NET_NEIGH_DELAY_PROBE_TIME, "delay_first_probe_time" },
240 { NET_NEIGH_GC_STALE_TIME, "gc_stale_time" },
241 { NET_NEIGH_UNRES_QLEN, "unres_qlen" },
242 { NET_NEIGH_PROXY_QLEN, "proxy_qlen" },
243 { NET_NEIGH_ANYCAST_DELAY, "anycast_delay" },
244 { NET_NEIGH_PROXY_DELAY, "proxy_delay" },
245 { NET_NEIGH_LOCKTIME, "locktime" },
246 { NET_NEIGH_GC_INTERVAL, "gc_interval" },
247 { NET_NEIGH_GC_THRESH1, "gc_thresh1" },
248 { NET_NEIGH_GC_THRESH2, "gc_thresh2" },
249 { NET_NEIGH_GC_THRESH3, "gc_thresh3" },
250 { NET_NEIGH_RETRANS_TIME_MS, "retrans_time_ms" },
251 { NET_NEIGH_REACHABLE_TIME_MS, "base_reachable_time_ms" },
252 {}
253};
254
255static const struct trans_ctl_table trans_net_neigh_table[] = {
256 { NET_PROTO_CONF_DEFAULT, "default", trans_net_neigh_vars_table },
257 { 0, NULL, trans_net_neigh_vars_table },
258 {}
259};
260
261static const struct trans_ctl_table trans_net_ipv4_netfilter_table[] = {
262 { NET_IPV4_NF_CONNTRACK_MAX, "ip_conntrack_max" },
263
264 { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT, "ip_conntrack_tcp_timeout_syn_sent" },
265 { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV, "ip_conntrack_tcp_timeout_syn_recv" },
266 { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED, "ip_conntrack_tcp_timeout_established" },
267 { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT, "ip_conntrack_tcp_timeout_fin_wait" },
268 { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT, "ip_conntrack_tcp_timeout_close_wait" },
269 { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK, "ip_conntrack_tcp_timeout_last_ack" },
270 { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT, "ip_conntrack_tcp_timeout_time_wait" },
271 { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE, "ip_conntrack_tcp_timeout_close" },
272
273 { NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT, "ip_conntrack_udp_timeout" },
274 { NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM, "ip_conntrack_udp_timeout_stream" },
275 { NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT, "ip_conntrack_icmp_timeout" },
276 { NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT, "ip_conntrack_generic_timeout" },
277
278 { NET_IPV4_NF_CONNTRACK_BUCKETS, "ip_conntrack_buckets" },
279 { NET_IPV4_NF_CONNTRACK_LOG_INVALID, "ip_conntrack_log_invalid" },
280 { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS, "ip_conntrack_tcp_timeout_max_retrans" },
281 { NET_IPV4_NF_CONNTRACK_TCP_LOOSE, "ip_conntrack_tcp_loose" },
282 { NET_IPV4_NF_CONNTRACK_TCP_BE_LIBERAL, "ip_conntrack_tcp_be_liberal" },
283 { NET_IPV4_NF_CONNTRACK_TCP_MAX_RETRANS, "ip_conntrack_tcp_max_retrans" },
284
285 { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED, "ip_conntrack_sctp_timeout_closed" },
286 { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT, "ip_conntrack_sctp_timeout_cookie_wait" },
287 { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED, "ip_conntrack_sctp_timeout_cookie_echoed" },
288 { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED, "ip_conntrack_sctp_timeout_established" },
289 { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT, "ip_conntrack_sctp_timeout_shutdown_sent" },
290 { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD, "ip_conntrack_sctp_timeout_shutdown_recd" },
291 { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT, "ip_conntrack_sctp_timeout_shutdown_ack_sent" },
292
293 { NET_IPV4_NF_CONNTRACK_COUNT, "ip_conntrack_count" },
294 { NET_IPV4_NF_CONNTRACK_CHECKSUM, "ip_conntrack_checksum" },
295 {}
296};
297
298static const struct trans_ctl_table trans_net_ipv4_table[] = {
299 { NET_IPV4_FORWARD, "ip_forward" },
300 { NET_IPV4_DYNADDR, "ip_dynaddr" },
301
302 { NET_IPV4_CONF, "conf", trans_net_ipv4_conf_table },
303 { NET_IPV4_NEIGH, "neigh", trans_net_neigh_table },
304 { NET_IPV4_ROUTE, "route", trans_net_ipv4_route_table },
305 /* NET_IPV4_FIB_HASH unused */
306 { NET_IPV4_NETFILTER, "netfilter", trans_net_ipv4_netfilter_table },
307
308 { NET_IPV4_TCP_TIMESTAMPS, "tcp_timestamps" },
309 { NET_IPV4_TCP_WINDOW_SCALING, "tcp_window_scaling" },
310 { NET_IPV4_TCP_SACK, "tcp_sack" },
311 { NET_IPV4_TCP_RETRANS_COLLAPSE, "tcp_retrans_collapse" },
312 { NET_IPV4_DEFAULT_TTL, "ip_default_ttl" },
313 /* NET_IPV4_AUTOCONFIG unused */
314 { NET_IPV4_NO_PMTU_DISC, "ip_no_pmtu_disc" },
315 { NET_IPV4_TCP_SYN_RETRIES, "tcp_syn_retries" },
316 { NET_IPV4_IPFRAG_HIGH_THRESH, "ipfrag_high_thresh" },
317 { NET_IPV4_IPFRAG_LOW_THRESH, "ipfrag_low_thresh" },
318 { NET_IPV4_IPFRAG_TIME, "ipfrag_time" },
319 /* NET_IPV4_TCP_MAX_KA_PROBES unused */
320 { NET_IPV4_TCP_KEEPALIVE_TIME, "tcp_keepalive_time" },
321 { NET_IPV4_TCP_KEEPALIVE_PROBES, "tcp_keepalive_probes" },
322 { NET_IPV4_TCP_RETRIES1, "tcp_retries1" },
323 { NET_IPV4_TCP_RETRIES2, "tcp_retries2" },
324 { NET_IPV4_TCP_FIN_TIMEOUT, "tcp_fin_timeout" },
325 /* NET_IPV4_IP_MASQ_DEBUG unused */
326 { NET_TCP_SYNCOOKIES, "tcp_syncookies" },
327 { NET_TCP_STDURG, "tcp_stdurg" },
328 { NET_TCP_RFC1337, "tcp_rfc1337" },
329 /* NET_TCP_SYN_TAILDROP unused */
330 { NET_TCP_MAX_SYN_BACKLOG, "tcp_max_syn_backlog" },
331 { NET_IPV4_LOCAL_PORT_RANGE, "ip_local_port_range" },
332 { NET_IPV4_ICMP_ECHO_IGNORE_ALL, "icmp_echo_ignore_all" },
333 { NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS, "icmp_echo_ignore_broadcasts" },
334 /* NET_IPV4_ICMP_SOURCEQUENCH_RATE unused */
335 /* NET_IPV4_ICMP_DESTUNREACH_RATE unused */
336 /* NET_IPV4_ICMP_TIMEEXCEED_RATE unused */
337 /* NET_IPV4_ICMP_PARAMPROB_RATE unused */
338 /* NET_IPV4_ICMP_ECHOREPLY_RATE unused */
339 { NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES, "icmp_ignore_bogus_error_responses" },
340 { NET_IPV4_IGMP_MAX_MEMBERSHIPS, "igmp_max_memberships" },
341 { NET_TCP_TW_RECYCLE, "tcp_tw_recycle" },
342 /* NET_IPV4_ALWAYS_DEFRAG unused */
343 { NET_IPV4_TCP_KEEPALIVE_INTVL, "tcp_keepalive_intvl" },
344 { NET_IPV4_INET_PEER_THRESHOLD, "inet_peer_threshold" },
345 { NET_IPV4_INET_PEER_MINTTL, "inet_peer_minttl" },
346 { NET_IPV4_INET_PEER_MAXTTL, "inet_peer_maxttl" },
347 { NET_IPV4_INET_PEER_GC_MINTIME, "inet_peer_gc_mintime" },
348 { NET_IPV4_INET_PEER_GC_MAXTIME, "inet_peer_gc_maxtime" },
349 { NET_TCP_ORPHAN_RETRIES, "tcp_orphan_retries" },
350 { NET_TCP_ABORT_ON_OVERFLOW, "tcp_abort_on_overflow" },
351 { NET_TCP_SYNACK_RETRIES, "tcp_synack_retries" },
352 { NET_TCP_MAX_ORPHANS, "tcp_max_orphans" },
353 { NET_TCP_MAX_TW_BUCKETS, "tcp_max_tw_buckets" },
354 { NET_TCP_FACK, "tcp_fack" },
355 { NET_TCP_REORDERING, "tcp_reordering" },
356 { NET_TCP_ECN, "tcp_ecn" },
357 { NET_TCP_DSACK, "tcp_dsack" },
358 { NET_TCP_MEM, "tcp_mem" },
359 { NET_TCP_WMEM, "tcp_wmem" },
360 { NET_TCP_RMEM, "tcp_rmem" },
361 { NET_TCP_APP_WIN, "tcp_app_win" },
362 { NET_TCP_ADV_WIN_SCALE, "tcp_adv_win_scale" },
363 { NET_IPV4_NONLOCAL_BIND, "ip_nonlocal_bind" },
364 { NET_IPV4_ICMP_RATELIMIT, "icmp_ratelimit" },
365 { NET_IPV4_ICMP_RATEMASK, "icmp_ratemask" },
366 { NET_TCP_TW_REUSE, "tcp_tw_reuse" },
367 { NET_TCP_FRTO, "tcp_frto" },
368 { NET_TCP_LOW_LATENCY, "tcp_low_latency" },
369 { NET_IPV4_IPFRAG_SECRET_INTERVAL, "ipfrag_secret_interval" },
370 { NET_IPV4_IGMP_MAX_MSF, "igmp_max_msf" },
371 { NET_TCP_NO_METRICS_SAVE, "tcp_no_metrics_save" },
372 /* NET_TCP_DEFAULT_WIN_SCALE unused */
373 { NET_TCP_MODERATE_RCVBUF, "tcp_moderate_rcvbuf" },
374 { NET_TCP_TSO_WIN_DIVISOR, "tcp_tso_win_divisor" },
375 /* NET_TCP_BIC_BETA unused */
376 { NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR, "icmp_errors_use_inbound_ifaddr" },
377 { NET_TCP_CONG_CONTROL, "tcp_congestion_control" },
378 { NET_TCP_ABC, "tcp_abc" },
379 { NET_IPV4_IPFRAG_MAX_DIST, "ipfrag_max_dist" },
380 { NET_TCP_MTU_PROBING, "tcp_mtu_probing" },
381 { NET_TCP_BASE_MSS, "tcp_base_mss" },
382 { NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, "tcp_workaround_signed_windows" },
383 { NET_TCP_DMA_COPYBREAK, "tcp_dma_copybreak" },
384 { NET_TCP_SLOW_START_AFTER_IDLE, "tcp_slow_start_after_idle" },
385 { NET_CIPSOV4_CACHE_ENABLE, "cipso_cache_enable" },
386 { NET_CIPSOV4_CACHE_BUCKET_SIZE, "cipso_cache_bucket_size" },
387 { NET_CIPSOV4_RBM_OPTFMT, "cipso_rbm_optfmt" },
388 { NET_CIPSOV4_RBM_STRICTVALID, "cipso_rbm_strictvalid" },
389 { NET_TCP_AVAIL_CONG_CONTROL, "tcp_available_congestion_control" },
390 { NET_TCP_ALLOWED_CONG_CONTROL, "tcp_allowed_congestion_control" },
391 { NET_TCP_MAX_SSTHRESH, "tcp_max_ssthresh" },
392 { NET_TCP_FRTO_RESPONSE, "tcp_frto_response" },
393 { 2088 /* NET_IPQ_QMAX */, "ip_queue_maxlen" },
394 {}
395};
396
397static const struct trans_ctl_table trans_net_ipx_table[] = {
398 { NET_IPX_PPROP_BROADCASTING, "ipx_pprop_broadcasting" },
399 /* NET_IPX_FORWARDING unused */
400 {}
401};
402
403static const struct trans_ctl_table trans_net_atalk_table[] = {
404 { NET_ATALK_AARP_EXPIRY_TIME, "aarp-expiry-time" },
405 { NET_ATALK_AARP_TICK_TIME, "aarp-tick-time" },
406 { NET_ATALK_AARP_RETRANSMIT_LIMIT, "aarp-retransmit-limit" },
407 { NET_ATALK_AARP_RESOLVE_TIME, "aarp-resolve-time" },
408 {},
409};
410
411static const struct trans_ctl_table trans_net_netrom_table[] = {
412 { NET_NETROM_DEFAULT_PATH_QUALITY, "default_path_quality" },
413 { NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER, "obsolescence_count_initialiser" },
414 { NET_NETROM_NETWORK_TTL_INITIALISER, "network_ttl_initialiser" },
415 { NET_NETROM_TRANSPORT_TIMEOUT, "transport_timeout" },
416 { NET_NETROM_TRANSPORT_MAXIMUM_TRIES, "transport_maximum_tries" },
417 { NET_NETROM_TRANSPORT_ACKNOWLEDGE_DELAY, "transport_acknowledge_delay" },
418 { NET_NETROM_TRANSPORT_BUSY_DELAY, "transport_busy_delay" },
419 { NET_NETROM_TRANSPORT_REQUESTED_WINDOW_SIZE, "transport_requested_window_size" },
420 { NET_NETROM_TRANSPORT_NO_ACTIVITY_TIMEOUT, "transport_no_activity_timeout" },
421 { NET_NETROM_ROUTING_CONTROL, "routing_control" },
422 { NET_NETROM_LINK_FAILS_COUNT, "link_fails_count" },
423 { NET_NETROM_RESET, "reset" },
424 {}
425};
426
427static const struct trans_ctl_table trans_net_ax25_param_table[] = {
428 { NET_AX25_IP_DEFAULT_MODE, "ip_default_mode" },
429 { NET_AX25_DEFAULT_MODE, "ax25_default_mode" },
430 { NET_AX25_BACKOFF_TYPE, "backoff_type" },
431 { NET_AX25_CONNECT_MODE, "connect_mode" },
432 { NET_AX25_STANDARD_WINDOW, "standard_window_size" },
433 { NET_AX25_EXTENDED_WINDOW, "extended_window_size" },
434 { NET_AX25_T1_TIMEOUT, "t1_timeout" },
435 { NET_AX25_T2_TIMEOUT, "t2_timeout" },
436 { NET_AX25_T3_TIMEOUT, "t3_timeout" },
437 { NET_AX25_IDLE_TIMEOUT, "idle_timeout" },
438 { NET_AX25_N2, "maximum_retry_count" },
439 { NET_AX25_PACLEN, "maximum_packet_length" },
440 { NET_AX25_PROTOCOL, "protocol" },
441 { NET_AX25_DAMA_SLAVE_TIMEOUT, "dama_slave_timeout" },
442 {}
443};
444
445static const struct trans_ctl_table trans_net_ax25_table[] = {
446 { 0, NULL, trans_net_ax25_param_table },
447 {}
448};
449
450static const struct trans_ctl_table trans_net_bridge_table[] = {
451 { NET_BRIDGE_NF_CALL_ARPTABLES, "bridge-nf-call-arptables" },
452 { NET_BRIDGE_NF_CALL_IPTABLES, "bridge-nf-call-iptables" },
453 { NET_BRIDGE_NF_CALL_IP6TABLES, "bridge-nf-call-ip6tables" },
454 { NET_BRIDGE_NF_FILTER_VLAN_TAGGED, "bridge-nf-filter-vlan-tagged" },
455 { NET_BRIDGE_NF_FILTER_PPPOE_TAGGED, "bridge-nf-filter-pppoe-tagged" },
456 {}
457};
458
459static const struct trans_ctl_table trans_net_rose_table[] = {
460 { NET_ROSE_RESTART_REQUEST_TIMEOUT, "restart_request_timeout" },
461 { NET_ROSE_CALL_REQUEST_TIMEOUT, "call_request_timeout" },
462 { NET_ROSE_RESET_REQUEST_TIMEOUT, "reset_request_timeout" },
463 { NET_ROSE_CLEAR_REQUEST_TIMEOUT, "clear_request_timeout" },
464 { NET_ROSE_ACK_HOLD_BACK_TIMEOUT, "acknowledge_hold_back_timeout" },
465 { NET_ROSE_ROUTING_CONTROL, "routing_control" },
466 { NET_ROSE_LINK_FAIL_TIMEOUT, "link_fail_timeout" },
467 { NET_ROSE_MAX_VCS, "maximum_virtual_circuits" },
468 { NET_ROSE_WINDOW_SIZE, "window_size" },
469 { NET_ROSE_NO_ACTIVITY_TIMEOUT, "no_activity_timeout" },
470 {}
471};
472
473static const struct trans_ctl_table trans_net_ipv6_conf_var_table[] = {
474 { NET_IPV6_FORWARDING, "forwarding" },
475 { NET_IPV6_HOP_LIMIT, "hop_limit" },
476 { NET_IPV6_MTU, "mtu" },
477 { NET_IPV6_ACCEPT_RA, "accept_ra" },
478 { NET_IPV6_ACCEPT_REDIRECTS, "accept_redirects" },
479 { NET_IPV6_AUTOCONF, "autoconf" },
480 { NET_IPV6_DAD_TRANSMITS, "dad_transmits" },
481 { NET_IPV6_RTR_SOLICITS, "router_solicitations" },
482 { NET_IPV6_RTR_SOLICIT_INTERVAL, "router_solicitation_interval" },
483 { NET_IPV6_RTR_SOLICIT_DELAY, "router_solicitation_delay" },
484 { NET_IPV6_USE_TEMPADDR, "use_tempaddr" },
485 { NET_IPV6_TEMP_VALID_LFT, "temp_valid_lft" },
486 { NET_IPV6_TEMP_PREFERED_LFT, "temp_prefered_lft" },
487 { NET_IPV6_REGEN_MAX_RETRY, "regen_max_retry" },
488 { NET_IPV6_MAX_DESYNC_FACTOR, "max_desync_factor" },
489 { NET_IPV6_MAX_ADDRESSES, "max_addresses" },
490 { NET_IPV6_FORCE_MLD_VERSION, "force_mld_version" },
491 { NET_IPV6_ACCEPT_RA_DEFRTR, "accept_ra_defrtr" },
492 { NET_IPV6_ACCEPT_RA_PINFO, "accept_ra_pinfo" },
493 { NET_IPV6_ACCEPT_RA_RTR_PREF, "accept_ra_rtr_pref" },
494 { NET_IPV6_RTR_PROBE_INTERVAL, "router_probe_interval" },
495 { NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN, "accept_ra_rt_info_max_plen" },
496 { NET_IPV6_PROXY_NDP, "proxy_ndp" },
497 { NET_IPV6_ACCEPT_SOURCE_ROUTE, "accept_source_route" },
498 {}
499};
500
501static const struct trans_ctl_table trans_net_ipv6_conf_table[] = {
502 { NET_PROTO_CONF_ALL, "all", trans_net_ipv6_conf_var_table },
503 { NET_PROTO_CONF_DEFAULT, "default", trans_net_ipv6_conf_var_table },
504 { 0, NULL, trans_net_ipv6_conf_var_table },
505 {}
506};
507
508static const struct trans_ctl_table trans_net_ipv6_route_table[] = {
509 { NET_IPV6_ROUTE_FLUSH, "flush" },
510 { NET_IPV6_ROUTE_GC_THRESH, "gc_thresh" },
511 { NET_IPV6_ROUTE_MAX_SIZE, "max_size" },
512 { NET_IPV6_ROUTE_GC_MIN_INTERVAL, "gc_min_interval" },
513 { NET_IPV6_ROUTE_GC_TIMEOUT, "gc_timeout" },
514 { NET_IPV6_ROUTE_GC_INTERVAL, "gc_interval" },
515 { NET_IPV6_ROUTE_GC_ELASTICITY, "gc_elasticity" },
516 { NET_IPV6_ROUTE_MTU_EXPIRES, "mtu_expires" },
517 { NET_IPV6_ROUTE_MIN_ADVMSS, "min_adv_mss" },
518 { NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS, "gc_min_interval_ms" },
519 {}
520};
521
522static const struct trans_ctl_table trans_net_ipv6_icmp_table[] = {
523 { NET_IPV6_ICMP_RATELIMIT, "ratelimit" },
524 {}
525};
526
527static const struct trans_ctl_table trans_net_ipv6_table[] = {
528 { NET_IPV6_CONF, "conf", trans_net_ipv6_conf_table },
529 { NET_IPV6_NEIGH, "neigh", trans_net_neigh_table },
530 { NET_IPV6_ROUTE, "route", trans_net_ipv6_route_table },
531 { NET_IPV6_ICMP, "icmp", trans_net_ipv6_icmp_table },
532 { NET_IPV6_BINDV6ONLY, "bindv6only" },
533 { NET_IPV6_IP6FRAG_HIGH_THRESH, "ip6frag_high_thresh" },
534 { NET_IPV6_IP6FRAG_LOW_THRESH, "ip6frag_low_thresh" },
535 { NET_IPV6_IP6FRAG_TIME, "ip6frag_time" },
536 { NET_IPV6_IP6FRAG_SECRET_INTERVAL, "ip6frag_secret_interval" },
537 { NET_IPV6_MLD_MAX_MSF, "mld_max_msf" },
538 { 2088 /* IPQ_QMAX */, "ip6_queue_maxlen" },
539 {}
540};
541
542static const struct trans_ctl_table trans_net_x25_table[] = {
543 { NET_X25_RESTART_REQUEST_TIMEOUT, "restart_request_timeout" },
544 { NET_X25_CALL_REQUEST_TIMEOUT, "call_request_timeout" },
545 { NET_X25_RESET_REQUEST_TIMEOUT, "reset_request_timeout" },
546 { NET_X25_CLEAR_REQUEST_TIMEOUT, "clear_request_timeout" },
547 { NET_X25_ACK_HOLD_BACK_TIMEOUT, "acknowledgement_hold_back_timeout" },
548 { NET_X25_FORWARD, "x25_forward" },
549 {}
550};
551
552static const struct trans_ctl_table trans_net_tr_table[] = {
553 { NET_TR_RIF_TIMEOUT, "rif_timeout" },
554 {}
555};
556
557
558static const struct trans_ctl_table trans_net_decnet_conf_vars[] = {
559 { NET_DECNET_CONF_DEV_FORWARDING, "forwarding" },
560 { NET_DECNET_CONF_DEV_PRIORITY, "priority" },
561 { NET_DECNET_CONF_DEV_T2, "t2" },
562 { NET_DECNET_CONF_DEV_T3, "t3" },
563 {}
564};
565
566static const struct trans_ctl_table trans_net_decnet_conf[] = {
567 { 0, NULL, trans_net_decnet_conf_vars },
568 {}
569};
570
571static const struct trans_ctl_table trans_net_decnet_table[] = {
572 { NET_DECNET_CONF, "conf", trans_net_decnet_conf },
573 { NET_DECNET_NODE_ADDRESS, "node_address" },
574 { NET_DECNET_NODE_NAME, "node_name" },
575 { NET_DECNET_DEFAULT_DEVICE, "default_device" },
576 { NET_DECNET_TIME_WAIT, "time_wait" },
577 { NET_DECNET_DN_COUNT, "dn_count" },
578 { NET_DECNET_DI_COUNT, "di_count" },
579 { NET_DECNET_DR_COUNT, "dr_count" },
580 { NET_DECNET_DST_GC_INTERVAL, "dst_gc_interval" },
581 { NET_DECNET_NO_FC_MAX_CWND, "no_fc_max_cwnd" },
582 { NET_DECNET_MEM, "decnet_mem" },
583 { NET_DECNET_RMEM, "decnet_rmem" },
584 { NET_DECNET_WMEM, "decnet_wmem" },
585 { NET_DECNET_DEBUG_LEVEL, "debug" },
586 {}
587};
588
589static const struct trans_ctl_table trans_net_sctp_table[] = {
590 { NET_SCTP_RTO_INITIAL, "rto_initial" },
591 { NET_SCTP_RTO_MIN, "rto_min" },
592 { NET_SCTP_RTO_MAX, "rto_max" },
593 { NET_SCTP_RTO_ALPHA, "rto_alpha_exp_divisor" },
594 { NET_SCTP_RTO_BETA, "rto_beta_exp_divisor" },
595 { NET_SCTP_VALID_COOKIE_LIFE, "valid_cookie_life" },
596 { NET_SCTP_ASSOCIATION_MAX_RETRANS, "association_max_retrans" },
597 { NET_SCTP_PATH_MAX_RETRANS, "path_max_retrans" },
598 { NET_SCTP_MAX_INIT_RETRANSMITS, "max_init_retransmits" },
599 { NET_SCTP_HB_INTERVAL, "hb_interval" },
600 { NET_SCTP_PRESERVE_ENABLE, "cookie_preserve_enable" },
601 { NET_SCTP_MAX_BURST, "max_burst" },
602 { NET_SCTP_ADDIP_ENABLE, "addip_enable" },
603 { NET_SCTP_PRSCTP_ENABLE, "prsctp_enable" },
604 { NET_SCTP_SNDBUF_POLICY, "sndbuf_policy" },
605 { NET_SCTP_SACK_TIMEOUT, "sack_timeout" },
606 { NET_SCTP_RCVBUF_POLICY, "rcvbuf_policy" },
607 {}
608};
609
610static const struct trans_ctl_table trans_net_llc_llc2_timeout_table[] = {
611 { NET_LLC2_ACK_TIMEOUT, "ack" },
612 { NET_LLC2_P_TIMEOUT, "p" },
613 { NET_LLC2_REJ_TIMEOUT, "rej" },
614 { NET_LLC2_BUSY_TIMEOUT, "busy" },
615 {}
616};
617
618static const struct trans_ctl_table trans_net_llc_station_table[] = {
619 { NET_LLC_STATION_ACK_TIMEOUT, "ack_timeout" },
620 {}
621};
622
623static const struct trans_ctl_table trans_net_llc_llc2_table[] = {
624 { NET_LLC2, "timeout", trans_net_llc_llc2_timeout_table },
625 {}
626};
627
628static const struct trans_ctl_table trans_net_llc_table[] = {
629 { NET_LLC2, "llc2", trans_net_llc_llc2_table },
630 { NET_LLC_STATION, "station", trans_net_llc_station_table },
631 {}
632};
633
634static const struct trans_ctl_table trans_net_netfilter_table[] = {
635 { NET_NF_CONNTRACK_MAX, "nf_conntrack_max" },
636 { NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT, "nf_conntrack_tcp_timeout_syn_sent" },
637 { NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV, "nf_conntrack_tcp_timeout_syn_recv" },
638 { NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED, "nf_conntrack_tcp_timeout_established" },
639 { NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT, "nf_conntrack_tcp_timeout_fin_wait" },
640 { NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT, "nf_conntrack_tcp_timeout_close_wait" },
641 { NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK, "nf_conntrack_tcp_timeout_last_ack" },
642 { NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT, "nf_conntrack_tcp_timeout_time_wait" },
643 { NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE, "nf_conntrack_tcp_timeout_close" },
644 { NET_NF_CONNTRACK_UDP_TIMEOUT, "nf_conntrack_udp_timeout" },
645 { NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM, "nf_conntrack_udp_timeout_stream" },
646 { NET_NF_CONNTRACK_ICMP_TIMEOUT, "nf_conntrack_icmp_timeout" },
647 { NET_NF_CONNTRACK_GENERIC_TIMEOUT, "nf_conntrack_generic_timeout" },
648 { NET_NF_CONNTRACK_BUCKETS, "nf_conntrack_buckets" },
649 { NET_NF_CONNTRACK_LOG_INVALID, "nf_conntrack_log_invalid" },
650 { NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS, "nf_conntrack_tcp_timeout_max_retrans" },
651 { NET_NF_CONNTRACK_TCP_LOOSE, "nf_conntrack_tcp_loose" },
652 { NET_NF_CONNTRACK_TCP_BE_LIBERAL, "nf_conntrack_tcp_be_liberal" },
653 { NET_NF_CONNTRACK_TCP_MAX_RETRANS, "nf_conntrack_tcp_max_retrans" },
654 { NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED, "nf_conntrack_sctp_timeout_closed" },
655 { NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT, "nf_conntrack_sctp_timeout_cookie_wait" },
656 { NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED, "nf_conntrack_sctp_timeout_cookie_echoed" },
657 { NET_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED, "nf_conntrack_sctp_timeout_established" },
658 { NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT, "nf_conntrack_sctp_timeout_shutdown_sent" },
659 { NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD, "nf_conntrack_sctp_timeout_shutdown_recd" },
660 { NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT, "nf_conntrack_sctp_timeout_shutdown_ack_sent" },
661 { NET_NF_CONNTRACK_COUNT, "nf_conntrack_count" },
662 { NET_NF_CONNTRACK_ICMPV6_TIMEOUT, "nf_conntrack_icmpv6_timeout" },
663 { NET_NF_CONNTRACK_FRAG6_TIMEOUT, "nf_conntrack_frag6_timeout" },
664 { NET_NF_CONNTRACK_FRAG6_LOW_THRESH, "nf_conntrack_frag6_low_thresh" },
665 { NET_NF_CONNTRACK_FRAG6_HIGH_THRESH, "nf_conntrack_frag6_high_thresh" },
666 { NET_NF_CONNTRACK_CHECKSUM, "nf_conntrack_checksum" },
667
668 {}
669};
670
671static const struct trans_ctl_table trans_net_dccp_table[] = {
672 { NET_DCCP_DEFAULT, "default" },
673 {}
674};
675
676static const struct trans_ctl_table trans_net_irda_table[] = {
677 { NET_IRDA_DISCOVERY, "discovery" },
678 { NET_IRDA_DEVNAME, "devname" },
679 { NET_IRDA_DEBUG, "debug" },
680 { NET_IRDA_FAST_POLL, "fast_poll_increase" },
681 { NET_IRDA_DISCOVERY_SLOTS, "discovery_slots" },
682 { NET_IRDA_DISCOVERY_TIMEOUT, "discovery_timeout" },
683 { NET_IRDA_SLOT_TIMEOUT, "slot_timeout" },
684 { NET_IRDA_MAX_BAUD_RATE, "max_baud_rate" },
685 { NET_IRDA_MIN_TX_TURN_TIME, "min_tx_turn_time" },
686 { NET_IRDA_MAX_TX_DATA_SIZE, "max_tx_data_size" },
687 { NET_IRDA_MAX_TX_WINDOW, "max_tx_window" },
688 { NET_IRDA_MAX_NOREPLY_TIME, "max_noreply_time" },
689 { NET_IRDA_WARN_NOREPLY_TIME, "warn_noreply_time" },
690 { NET_IRDA_LAP_KEEPALIVE_TIME, "lap_keepalive_time" },
691 {}
692};
693
694static const struct trans_ctl_table trans_net_table[] = {
695 { NET_CORE, "core", trans_net_core_table },
696 /* NET_ETHER not used */
697 /* NET_802 not used */
698 { NET_UNIX, "unix", trans_net_unix_table },
699 { NET_IPV4, "ipv4", trans_net_ipv4_table },
700 { NET_IPX, "ipx", trans_net_ipx_table },
701 { NET_ATALK, "appletalk", trans_net_atalk_table },
702 { NET_NETROM, "netrom", trans_net_netrom_table },
703 { NET_AX25, "ax25", trans_net_ax25_table },
704 { NET_BRIDGE, "bridge", trans_net_bridge_table },
705 { NET_ROSE, "rose", trans_net_rose_table },
706 { NET_IPV6, "ipv6", trans_net_ipv6_table },
707 { NET_X25, "x25", trans_net_x25_table },
708 { NET_TR, "token-ring", trans_net_tr_table },
709 { NET_DECNET, "decnet", trans_net_decnet_table },
710 /* NET_ECONET not used */
711 { NET_SCTP, "sctp", trans_net_sctp_table },
712 { NET_LLC, "llc", trans_net_llc_table },
713 { NET_NETFILTER, "netfilter", trans_net_netfilter_table },
714 { NET_DCCP, "dccp", trans_net_dccp_table },
715 { NET_IRDA, "irda", trans_net_irda_table },
716 { 2089, "nf_conntrack_max" },
717 {}
718};
719
720static const struct trans_ctl_table trans_fs_quota_table[] = {
721 { FS_DQ_LOOKUPS, "lookups" },
722 { FS_DQ_DROPS, "drops" },
723 { FS_DQ_READS, "reads" },
724 { FS_DQ_WRITES, "writes" },
725 { FS_DQ_CACHE_HITS, "cache_hits" },
726 { FS_DQ_ALLOCATED, "allocated_dquots" },
727 { FS_DQ_FREE, "free_dquots" },
728 { FS_DQ_SYNCS, "syncs" },
729 { FS_DQ_WARNINGS, "warnings" },
730 {}
731};
732
733static const struct trans_ctl_table trans_fs_xfs_table[] = {
734 { XFS_SGID_INHERIT, "irix_sgid_inherit" },
735 { XFS_SYMLINK_MODE, "irix_symlink_mode" },
736 { XFS_PANIC_MASK, "panic_mask" },
737
738 { XFS_ERRLEVEL, "error_level" },
739 { XFS_SYNCD_TIMER, "xfssyncd_centisecs" },
740 { XFS_INHERIT_SYNC, "inherit_sync" },
741 { XFS_INHERIT_NODUMP, "inherit_nodump" },
742 { XFS_INHERIT_NOATIME, "inherit_noatime" },
743 { XFS_BUF_TIMER, "xfsbufd_centisecs" },
744 { XFS_BUF_AGE, "age_buffer_centisecs" },
745 { XFS_INHERIT_NOSYM, "inherit_nosymlinks" },
746 { XFS_ROTORSTEP, "rotorstep" },
747 { XFS_INHERIT_NODFRG, "inherit_nodefrag" },
748 { XFS_FILESTREAM_TIMER, "filestream_centisecs" },
749 { XFS_STATS_CLEAR, "stats_clear" },
750 {}
751};
752
753static const struct trans_ctl_table trans_fs_ocfs2_nm_table[] = {
754 { 1, "hb_ctl_path" },
755 {}
756};
757
758static const struct trans_ctl_table trans_fs_ocfs2_table[] = {
759 { 1, "nm", trans_fs_ocfs2_nm_table },
760 {}
761};
762
763static const struct trans_ctl_table trans_inotify_table[] = {
764 { INOTIFY_MAX_USER_INSTANCES, "max_user_instances" },
765 { INOTIFY_MAX_USER_WATCHES, "max_user_watches" },
766 { INOTIFY_MAX_QUEUED_EVENTS, "max_queued_events" },
767 {}
768};
769
770static const struct trans_ctl_table trans_fs_table[] = {
771 { FS_NRINODE, "inode-nr" },
772 { FS_STATINODE, "inode-state" },
773 /* FS_MAXINODE unused */
774 /* FS_NRDQUOT unused */
775 /* FS_MAXDQUOT unused */
776 { FS_NRFILE, "file-nr" },
777 { FS_MAXFILE, "file-max" },
778 { FS_DENTRY, "dentry-state" },
779 /* FS_NRSUPER unused */
780 /* FS_MAXUPSER unused */
781 { FS_OVERFLOWUID, "overflowuid" },
782 { FS_OVERFLOWGID, "overflowgid" },
783 { FS_LEASES, "leases-enable" },
784 { FS_DIR_NOTIFY, "dir-notify-enable" },
785 { FS_LEASE_TIME, "lease-break-time" },
786 { FS_DQSTATS, "quota", trans_fs_quota_table },
787 { FS_XFS, "xfs", trans_fs_xfs_table },
788 { FS_AIO_NR, "aio-nr" },
789 { FS_AIO_MAX_NR, "aio-max-nr" },
790 { FS_INOTIFY, "inotify", trans_inotify_table },
791 { FS_OCFS2, "ocfs2", trans_fs_ocfs2_table },
792 { KERN_SETUID_DUMPABLE, "suid_dumpable" },
793 {}
794};
795
796static const struct trans_ctl_table trans_debug_table[] = {
797 {}
798};
799
800static const struct trans_ctl_table trans_cdrom_table[] = {
801 { DEV_CDROM_INFO, "info" },
802 { DEV_CDROM_AUTOCLOSE, "autoclose" },
803 { DEV_CDROM_AUTOEJECT, "autoeject" },
804 { DEV_CDROM_DEBUG, "debug" },
805 { DEV_CDROM_LOCK, "lock" },
806 { DEV_CDROM_CHECK_MEDIA, "check_media" },
807 {}
808};
809
810static const struct trans_ctl_table trans_ipmi_table[] = {
811 { DEV_IPMI_POWEROFF_POWERCYCLE, "poweroff_powercycle" },
812 {}
813};
814
815static const struct trans_ctl_table trans_mac_hid_files[] = {
816 /* DEV_MAC_HID_KEYBOARD_SENDS_LINUX_KEYCODES unused */
817 /* DEV_MAC_HID_KEYBOARD_LOCK_KEYCODES unused */
818 { DEV_MAC_HID_MOUSE_BUTTON_EMULATION, "mouse_button_emulation" },
819 { DEV_MAC_HID_MOUSE_BUTTON2_KEYCODE, "mouse_button2_keycode" },
820 { DEV_MAC_HID_MOUSE_BUTTON3_KEYCODE, "mouse_button3_keycode" },
821 /* DEV_MAC_HID_ADB_MOUSE_SENDS_KEYCODES unused */
822 {}
823};
824
825static const struct trans_ctl_table trans_raid_table[] = {
826 { DEV_RAID_SPEED_LIMIT_MIN, "speed_limit_min" },
827 { DEV_RAID_SPEED_LIMIT_MAX, "speed_limit_max" },
828 {}
829};
830
831static const struct trans_ctl_table trans_scsi_table[] = {
832 { DEV_SCSI_LOGGING_LEVEL, "logging_level" },
833 {}
834};
835
836static const struct trans_ctl_table trans_parport_default_table[] = {
837 { DEV_PARPORT_DEFAULT_TIMESLICE, "timeslice" },
838 { DEV_PARPORT_DEFAULT_SPINTIME, "spintime" },
839 {}
840};
841
842static const struct trans_ctl_table trans_parport_device_table[] = {
843 { DEV_PARPORT_DEVICE_TIMESLICE, "timeslice" },
844 {}
845};
846
847static const struct trans_ctl_table trans_parport_devices_table[] = {
848 { DEV_PARPORT_DEVICES_ACTIVE, "active" },
849 { 0, NULL, trans_parport_device_table },
850 {}
851};
852
853static const struct trans_ctl_table trans_parport_parport_table[] = {
854 { DEV_PARPORT_SPINTIME, "spintime" },
855 { DEV_PARPORT_BASE_ADDR, "base-addr" },
856 { DEV_PARPORT_IRQ, "irq" },
857 { DEV_PARPORT_DMA, "dma" },
858 { DEV_PARPORT_MODES, "modes" },
859 { DEV_PARPORT_DEVICES, "devices", trans_parport_devices_table },
860 { DEV_PARPORT_AUTOPROBE, "autoprobe" },
861 { DEV_PARPORT_AUTOPROBE + 1, "autoprobe0" },
862 { DEV_PARPORT_AUTOPROBE + 2, "autoprobe1" },
863 { DEV_PARPORT_AUTOPROBE + 3, "autoprobe2" },
864 { DEV_PARPORT_AUTOPROBE + 4, "autoprobe3" },
865 {}
866};
867static const struct trans_ctl_table trans_parport_table[] = {
868 { DEV_PARPORT_DEFAULT, "default", trans_parport_default_table },
869 { 0, NULL, trans_parport_parport_table },
870 {}
871};
872
873static const struct trans_ctl_table trans_dev_table[] = {
874 { DEV_CDROM, "cdrom", trans_cdrom_table },
875 /* DEV_HWMON unused */
876 { DEV_PARPORT, "parport", trans_parport_table },
877 { DEV_RAID, "raid", trans_raid_table },
878 { DEV_MAC_HID, "mac_hid", trans_mac_hid_files },
879 { DEV_SCSI, "scsi", trans_scsi_table },
880 { DEV_IPMI, "ipmi", trans_ipmi_table },
881 {}
882};
883
884static const struct trans_ctl_table trans_bus_isa_table[] = {
885 { BUS_ISA_MEM_BASE, "membase" },
886 { BUS_ISA_PORT_BASE, "portbase" },
887 { BUS_ISA_PORT_SHIFT, "portshift" },
888 {}
889};
890
891static const struct trans_ctl_table trans_bus_table[] = {
892 { CTL_BUS_ISA, "isa", trans_bus_isa_table },
893 {}
894};
895
896static const struct trans_ctl_table trans_arlan_conf_table0[] = {
897 { 1, "spreadingCode" },
898 { 2, "channelNumber" },
899 { 3, "scramblingDisable" },
900 { 4, "txAttenuation" },
901 { 5, "systemId" },
902 { 6, "maxDatagramSize" },
903 { 7, "maxFrameSize" },
904 { 8, "maxRetries" },
905 { 9, "receiveMode" },
906 { 10, "priority" },
907 { 11, "rootOrRepeater" },
908 { 12, "SID" },
909 { 13, "registrationMode" },
910 { 14, "registrationFill" },
911 { 15, "localTalkAddress" },
912 { 16, "codeFormat" },
913 { 17, "numChannels" },
914 { 18, "channel1" },
915 { 19, "channel2" },
916 { 20, "channel3" },
917 { 21, "channel4" },
918 { 22, "txClear" },
919 { 23, "txRetries" },
920 { 24, "txRouting" },
921 { 25, "txScrambled" },
922 { 26, "rxParameter" },
923 { 27, "txTimeoutMs" },
924 { 28, "waitCardTimeout" },
925 { 29, "channelSet" },
926 { 30, "name" },
927 { 31, "waitTime" },
928 { 32, "lParameter" },
929 { 33, "_15" },
930 { 34, "headerSize" },
931 { 36, "tx_delay_ms" },
932 { 37, "retries" },
933 { 38, "ReTransmitPacketMaxSize" },
934 { 39, "waitReTransmitPacketMaxSize" },
935 { 40, "fastReTransCount" },
936 { 41, "driverRetransmissions" },
937 { 42, "txAckTimeoutMs" },
938 { 43, "registrationInterrupts" },
939 { 44, "hardwareType" },
940 { 45, "radioType" },
941 { 46, "writeEEPROM" },
942 { 47, "writeRadioType" },
943 { 48, "entry_exit_debug" },
944 { 49, "debug" },
945 { 50, "in_speed" },
946 { 51, "out_speed" },
947 { 52, "in_speed10" },
948 { 53, "out_speed10" },
949 { 54, "in_speed_max" },
950 { 55, "out_speed_max" },
951 { 56, "measure_rate" },
952 { 57, "pre_Command_Wait" },
953 { 58, "rx_tweak1" },
954 { 59, "rx_tweak2" },
955 { 60, "tx_queue_len" },
956
957 { 150, "arlan0-txRing" },
958 { 151, "arlan0-rxRing" },
959 { 152, "arlan0-18" },
960 { 153, "arlan0-ring" },
961 { 154, "arlan0-shm-cpy" },
962 { 155, "config0" },
963 { 156, "reset0" },
964 {}
965};
966
967static const struct trans_ctl_table trans_arlan_conf_table1[] = {
968 { 1, "spreadingCode" },
969 { 2, "channelNumber" },
970 { 3, "scramblingDisable" },
971 { 4, "txAttenuation" },
972 { 5, "systemId" },
973 { 6, "maxDatagramSize" },
974 { 7, "maxFrameSize" },
975 { 8, "maxRetries" },
976 { 9, "receiveMode" },
977 { 10, "priority" },
978 { 11, "rootOrRepeater" },
979 { 12, "SID" },
980 { 13, "registrationMode" },
981 { 14, "registrationFill" },
982 { 15, "localTalkAddress" },
983 { 16, "codeFormat" },
984 { 17, "numChannels" },
985 { 18, "channel1" },
986 { 19, "channel2" },
987 { 20, "channel3" },
988 { 21, "channel4" },
989 { 22, "txClear" },
990 { 23, "txRetries" },
991 { 24, "txRouting" },
992 { 25, "txScrambled" },
993 { 26, "rxParameter" },
994 { 27, "txTimeoutMs" },
995 { 28, "waitCardTimeout" },
996 { 29, "channelSet" },
997 { 30, "name" },
998 { 31, "waitTime" },
999 { 32, "lParameter" },
1000 { 33, "_15" },
1001 { 34, "headerSize" },
1002 { 36, "tx_delay_ms" },
1003 { 37, "retries" },
1004 { 38, "ReTransmitPacketMaxSize" },
1005 { 39, "waitReTransmitPacketMaxSize" },
1006 { 40, "fastReTransCount" },
1007 { 41, "driverRetransmissions" },
1008 { 42, "txAckTimeoutMs" },
1009 { 43, "registrationInterrupts" },
1010 { 44, "hardwareType" },
1011 { 45, "radioType" },
1012 { 46, "writeEEPROM" },
1013 { 47, "writeRadioType" },
1014 { 48, "entry_exit_debug" },
1015 { 49, "debug" },
1016 { 50, "in_speed" },
1017 { 51, "out_speed" },
1018 { 52, "in_speed10" },
1019 { 53, "out_speed10" },
1020 { 54, "in_speed_max" },
1021 { 55, "out_speed_max" },
1022 { 56, "measure_rate" },
1023 { 57, "pre_Command_Wait" },
1024 { 58, "rx_tweak1" },
1025 { 59, "rx_tweak2" },
1026 { 60, "tx_queue_len" },
1027
1028 { 150, "arlan1-txRing" },
1029 { 151, "arlan1-rxRing" },
1030 { 152, "arlan1-18" },
1031 { 153, "arlan1-ring" },
1032 { 154, "arlan1-shm-cpy" },
1033 { 155, "config1" },
1034 { 156, "reset1" },
1035 {}
1036};
1037
1038static const struct trans_ctl_table trans_arlan_conf_table2[] = {
1039 { 1, "spreadingCode" },
1040 { 2, "channelNumber" },
1041 { 3, "scramblingDisable" },
1042 { 4, "txAttenuation" },
1043 { 5, "systemId" },
1044 { 6, "maxDatagramSize" },
1045 { 7, "maxFrameSize" },
1046 { 8, "maxRetries" },
1047 { 9, "receiveMode" },
1048 { 10, "priority" },
1049 { 11, "rootOrRepeater" },
1050 { 12, "SID" },
1051 { 13, "registrationMode" },
1052 { 14, "registrationFill" },
1053 { 15, "localTalkAddress" },
1054 { 16, "codeFormat" },
1055 { 17, "numChannels" },
1056 { 18, "channel1" },
1057 { 19, "channel2" },
1058 { 20, "channel3" },
1059 { 21, "channel4" },
1060 { 22, "txClear" },
1061 { 23, "txRetries" },
1062 { 24, "txRouting" },
1063 { 25, "txScrambled" },
1064 { 26, "rxParameter" },
1065 { 27, "txTimeoutMs" },
1066 { 28, "waitCardTimeout" },
1067 { 29, "channelSet" },
1068 { 30, "name" },
1069 { 31, "waitTime" },
1070 { 32, "lParameter" },
1071 { 33, "_15" },
1072 { 34, "headerSize" },
1073 { 36, "tx_delay_ms" },
1074 { 37, "retries" },
1075 { 38, "ReTransmitPacketMaxSize" },
1076 { 39, "waitReTransmitPacketMaxSize" },
1077 { 40, "fastReTransCount" },
1078 { 41, "driverRetransmissions" },
1079 { 42, "txAckTimeoutMs" },
1080 { 43, "registrationInterrupts" },
1081 { 44, "hardwareType" },
1082 { 45, "radioType" },
1083 { 46, "writeEEPROM" },
1084 { 47, "writeRadioType" },
1085 { 48, "entry_exit_debug" },
1086 { 49, "debug" },
1087 { 50, "in_speed" },
1088 { 51, "out_speed" },
1089 { 52, "in_speed10" },
1090 { 53, "out_speed10" },
1091 { 54, "in_speed_max" },
1092 { 55, "out_speed_max" },
1093 { 56, "measure_rate" },
1094 { 57, "pre_Command_Wait" },
1095 { 58, "rx_tweak1" },
1096 { 59, "rx_tweak2" },
1097 { 60, "tx_queue_len" },
1098
1099 { 150, "arlan2-txRing" },
1100 { 151, "arlan2-rxRing" },
1101 { 152, "arlan2-18" },
1102 { 153, "arlan2-ring" },
1103 { 154, "arlan2-shm-cpy" },
1104 { 155, "config2" },
1105 { 156, "reset2" },
1106 {}
1107};
1108
1109static const struct trans_ctl_table trans_arlan_conf_table3[] = {
1110 { 1, "spreadingCode" },
1111 { 2, "channelNumber" },
1112 { 3, "scramblingDisable" },
1113 { 4, "txAttenuation" },
1114 { 5, "systemId" },
1115 { 6, "maxDatagramSize" },
1116 { 7, "maxFrameSize" },
1117 { 8, "maxRetries" },
1118 { 9, "receiveMode" },
1119 { 10, "priority" },
1120 { 11, "rootOrRepeater" },
1121 { 12, "SID" },
1122 { 13, "registrationMode" },
1123 { 14, "registrationFill" },
1124 { 15, "localTalkAddress" },
1125 { 16, "codeFormat" },
1126 { 17, "numChannels" },
1127 { 18, "channel1" },
1128 { 19, "channel2" },
1129 { 20, "channel3" },
1130 { 21, "channel4" },
1131 { 22, "txClear" },
1132 { 23, "txRetries" },
1133 { 24, "txRouting" },
1134 { 25, "txScrambled" },
1135 { 26, "rxParameter" },
1136 { 27, "txTimeoutMs" },
1137 { 28, "waitCardTimeout" },
1138 { 29, "channelSet" },
1139 { 30, "name" },
1140 { 31, "waitTime" },
1141 { 32, "lParameter" },
1142 { 33, "_15" },
1143 { 34, "headerSize" },
1144 { 36, "tx_delay_ms" },
1145 { 37, "retries" },
1146 { 38, "ReTransmitPacketMaxSize" },
1147 { 39, "waitReTransmitPacketMaxSize" },
1148 { 40, "fastReTransCount" },
1149 { 41, "driverRetransmissions" },
1150 { 42, "txAckTimeoutMs" },
1151 { 43, "registrationInterrupts" },
1152 { 44, "hardwareType" },
1153 { 45, "radioType" },
1154 { 46, "writeEEPROM" },
1155 { 47, "writeRadioType" },
1156 { 48, "entry_exit_debug" },
1157 { 49, "debug" },
1158 { 50, "in_speed" },
1159 { 51, "out_speed" },
1160 { 52, "in_speed10" },
1161 { 53, "out_speed10" },
1162 { 54, "in_speed_max" },
1163 { 55, "out_speed_max" },
1164 { 56, "measure_rate" },
1165 { 57, "pre_Command_Wait" },
1166 { 58, "rx_tweak1" },
1167 { 59, "rx_tweak2" },
1168 { 60, "tx_queue_len" },
1169
1170 { 150, "arlan3-txRing" },
1171 { 151, "arlan3-rxRing" },
1172 { 152, "arlan3-18" },
1173 { 153, "arlan3-ring" },
1174 { 154, "arlan3-shm-cpy" },
1175 { 155, "config3" },
1176 { 156, "reset3" },
1177 {}
1178};
1179
1180static const struct trans_ctl_table trans_arlan_table[] = {
1181 { 1, "arlan0", trans_arlan_conf_table0 },
1182 { 2, "arlan1", trans_arlan_conf_table1 },
1183 { 3, "arlan2", trans_arlan_conf_table2 },
1184 { 4, "arlan3", trans_arlan_conf_table3 },
1185 {}
1186};
1187
1188static const struct trans_ctl_table trans_s390dbf_table[] = {
1189 { 5678 /* CTL_S390DBF_STOPPABLE */, "debug_stoppable" },
1190 { 5679 /* CTL_S390DBF_ACTIVE */, "debug_active" },
1191 {}
1192};
1193
1194static const struct trans_ctl_table trans_sunrpc_table[] = {
1195 { CTL_RPCDEBUG, "rpc_debug" },
1196 { CTL_NFSDEBUG, "nfs_debug" },
1197 { CTL_NFSDDEBUG, "nfsd_debug" },
1198 { CTL_NLMDEBUG, "nlm_debug" },
1199 { CTL_SLOTTABLE_UDP, "udp_slot_table_entries" },
1200 { CTL_SLOTTABLE_TCP, "tcp_slot_table_entries" },
1201 { CTL_MIN_RESVPORT, "min_resvport" },
1202 { CTL_MAX_RESVPORT, "max_resvport" },
1203 {}
1204};
1205
1206static const struct trans_ctl_table trans_pm_table[] = {
1207 { 1 /* CTL_PM_SUSPEND */, "suspend" },
1208 { 2 /* CTL_PM_CMODE */, "cmode" },
1209 { 3 /* CTL_PM_P0 */, "p0" },
1210 { 4 /* CTL_PM_CM */, "cm" },
1211 {}
1212};
1213
1214static const struct trans_ctl_table trans_frv_table[] = {
1215 { 1, "cache-mode" },
1216 { 2, "pin-cxnr" },
1217 {}
1218};
1219
1220static const struct trans_ctl_table trans_root_table[] = {
1221 { CTL_KERN, "kernel", trans_kern_table },
1222 { CTL_VM, "vm", trans_vm_table },
1223 { CTL_NET, "net", trans_net_table },
1224 /* CTL_PROC not used */
1225 { CTL_FS, "fs", trans_fs_table },
1226 { CTL_DEBUG, "debug", trans_debug_table },
1227 { CTL_DEV, "dev", trans_dev_table },
1228 { CTL_BUS, "bus", trans_bus_table },
1229 { CTL_ABI, "abi" },
1230 /* CTL_CPU not used */
1231 { CTL_ARLAN, "arlan", trans_arlan_table },
1232 { CTL_S390DBF, "s390dbf", trans_s390dbf_table },
1233 { CTL_SUNRPC, "sunrpc", trans_sunrpc_table },
1234 { CTL_PM, "pm", trans_pm_table },
1235 { CTL_FRV, "frv", trans_frv_table },
1236 {}
1237};
1238
1239
1240
1241 8
1242static int sysctl_depth(struct ctl_table *table) 9static int sysctl_depth(struct ctl_table *table)
1243{ 10{
@@ -1261,47 +28,6 @@ static struct ctl_table *sysctl_parent(struct ctl_table *table, int n)
1261 return table; 28 return table;
1262} 29}
1263 30
1264static const struct trans_ctl_table *sysctl_binary_lookup(struct ctl_table *table)
1265{
1266 struct ctl_table *test;
1267 const struct trans_ctl_table *ref;
1268 int cur_depth;
1269
1270 cur_depth = sysctl_depth(table);
1271
1272 ref = trans_root_table;
1273repeat:
1274 test = sysctl_parent(table, cur_depth);
1275 for (; ref->ctl_name || ref->procname || ref->child; ref++) {
1276 int match = 0;
1277
1278 if (cur_depth && !ref->child)
1279 continue;
1280
1281 if (test->procname && ref->procname &&
1282 (strcmp(test->procname, ref->procname) == 0))
1283 match++;
1284
1285 if (test->ctl_name && ref->ctl_name &&
1286 (test->ctl_name == ref->ctl_name))
1287 match++;
1288
1289 if (!ref->ctl_name && !ref->procname)
1290 match++;
1291
1292 if (match) {
1293 if (cur_depth != 0) {
1294 cur_depth--;
1295 ref = ref->child;
1296 goto repeat;
1297 }
1298 goto out;
1299 }
1300 }
1301 ref = NULL;
1302out:
1303 return ref;
1304}
1305 31
1306static void sysctl_print_path(struct ctl_table *table) 32static void sysctl_print_path(struct ctl_table *table)
1307{ 33{
@@ -1315,26 +41,6 @@ static void sysctl_print_path(struct ctl_table *table)
1315 } 41 }
1316 } 42 }
1317 printk(" "); 43 printk(" ");
1318 if (table->ctl_name) {
1319 for (i = depth; i >= 0; i--) {
1320 tmp = sysctl_parent(table, i);
1321 printk(".%d", tmp->ctl_name);
1322 }
1323 }
1324}
1325
1326static void sysctl_repair_table(struct ctl_table *table)
1327{
1328 /* Don't complain about the classic default
1329 * sysctl strategy routine. Maybe later we
1330 * can get the tables fixed and complain about
1331 * this.
1332 */
1333 if (table->ctl_name && table->procname &&
1334 (table->proc_handler == proc_dointvec) &&
1335 (!table->strategy)) {
1336 table->strategy = sysctl_data;
1337 }
1338} 44}
1339 45
1340static struct ctl_table *sysctl_check_lookup(struct nsproxy *namespaces, 46static struct ctl_table *sysctl_check_lookup(struct nsproxy *namespaces,
@@ -1352,7 +58,7 @@ static struct ctl_table *sysctl_check_lookup(struct nsproxy *namespaces,
1352 ref = head->ctl_table; 58 ref = head->ctl_table;
1353repeat: 59repeat:
1354 test = sysctl_parent(table, cur_depth); 60 test = sysctl_parent(table, cur_depth);
1355 for (; ref->ctl_name || ref->procname; ref++) { 61 for (; ref->procname; ref++) {
1356 int match = 0; 62 int match = 0;
1357 if (cur_depth && !ref->child) 63 if (cur_depth && !ref->child)
1358 continue; 64 continue;
@@ -1361,10 +67,6 @@ repeat:
1361 (strcmp(test->procname, ref->procname) == 0)) 67 (strcmp(test->procname, ref->procname) == 0))
1362 match++; 68 match++;
1363 69
1364 if (test->ctl_name && ref->ctl_name &&
1365 (test->ctl_name == ref->ctl_name))
1366 match++;
1367
1368 if (match) { 70 if (match) {
1369 if (cur_depth != 0) { 71 if (cur_depth != 0) {
1370 cur_depth--; 72 cur_depth--;
@@ -1392,38 +94,6 @@ static void set_fail(const char **fail, struct ctl_table *table, const char *str
1392 *fail = str; 94 *fail = str;
1393} 95}
1394 96
1395static int sysctl_check_dir(struct nsproxy *namespaces,
1396 struct ctl_table *table)
1397{
1398 struct ctl_table *ref;
1399 int error;
1400
1401 error = 0;
1402 ref = sysctl_check_lookup(namespaces, table);
1403 if (ref) {
1404 int match = 0;
1405 if ((!table->procname && !ref->procname) ||
1406 (table->procname && ref->procname &&
1407 (strcmp(table->procname, ref->procname) == 0)))
1408 match++;
1409
1410 if ((!table->ctl_name && !ref->ctl_name) ||
1411 (table->ctl_name && ref->ctl_name &&
1412 (table->ctl_name == ref->ctl_name)))
1413 match++;
1414
1415 if (match != 2) {
1416 printk(KERN_ERR "%s: failed: ", __func__);
1417 sysctl_print_path(table);
1418 printk(" ref: ");
1419 sysctl_print_path(ref);
1420 printk("\n");
1421 error = -EINVAL;
1422 }
1423 }
1424 return error;
1425}
1426
1427static void sysctl_check_leaf(struct nsproxy *namespaces, 97static void sysctl_check_leaf(struct nsproxy *namespaces,
1428 struct ctl_table *table, const char **fail) 98 struct ctl_table *table, const char **fail)
1429{ 99{
@@ -1434,37 +104,15 @@ static void sysctl_check_leaf(struct nsproxy *namespaces,
1434 set_fail(fail, table, "Sysctl already exists"); 104 set_fail(fail, table, "Sysctl already exists");
1435} 105}
1436 106
1437static void sysctl_check_bin_path(struct ctl_table *table, const char **fail)
1438{
1439 const struct trans_ctl_table *ref;
1440
1441 ref = sysctl_binary_lookup(table);
1442 if (table->ctl_name && !ref)
1443 set_fail(fail, table, "Unknown sysctl binary path");
1444 if (ref) {
1445 if (ref->procname &&
1446 (!table->procname ||
1447 (strcmp(table->procname, ref->procname) != 0)))
1448 set_fail(fail, table, "procname does not match binary path procname");
1449
1450 if (ref->ctl_name && table->ctl_name &&
1451 (table->ctl_name != ref->ctl_name))
1452 set_fail(fail, table, "ctl_name does not match binary path ctl_name");
1453 }
1454}
1455
1456int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table) 107int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
1457{ 108{
1458 int error = 0; 109 int error = 0;
1459 for (; table->ctl_name || table->procname; table++) { 110 for (; table->procname; table++) {
1460 const char *fail = NULL; 111 const char *fail = NULL;
1461 112
1462 sysctl_repair_table(table);
1463 if (table->parent) { 113 if (table->parent) {
1464 if (table->procname && !table->parent->procname) 114 if (table->procname && !table->parent->procname)
1465 set_fail(&fail, table, "Parent without procname"); 115 set_fail(&fail, table, "Parent without procname");
1466 if (table->ctl_name && !table->parent->ctl_name)
1467 set_fail(&fail, table, "Parent without ctl_name");
1468 } 116 }
1469 if (!table->procname) 117 if (!table->procname)
1470 set_fail(&fail, table, "No procname"); 118 set_fail(&fail, table, "No procname");
@@ -1477,21 +125,12 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
1477 set_fail(&fail, table, "Writable sysctl directory"); 125 set_fail(&fail, table, "Writable sysctl directory");
1478 if (table->proc_handler) 126 if (table->proc_handler)
1479 set_fail(&fail, table, "Directory with proc_handler"); 127 set_fail(&fail, table, "Directory with proc_handler");
1480 if (table->strategy)
1481 set_fail(&fail, table, "Directory with strategy");
1482 if (table->extra1) 128 if (table->extra1)
1483 set_fail(&fail, table, "Directory with extra1"); 129 set_fail(&fail, table, "Directory with extra1");
1484 if (table->extra2) 130 if (table->extra2)
1485 set_fail(&fail, table, "Directory with extra2"); 131 set_fail(&fail, table, "Directory with extra2");
1486 if (sysctl_check_dir(namespaces, table))
1487 set_fail(&fail, table, "Inconsistent directory names");
1488 } else { 132 } else {
1489 if ((table->strategy == sysctl_data) || 133 if ((table->proc_handler == proc_dostring) ||
1490 (table->strategy == sysctl_string) ||
1491 (table->strategy == sysctl_intvec) ||
1492 (table->strategy == sysctl_jiffies) ||
1493 (table->strategy == sysctl_ms_jiffies) ||
1494 (table->proc_handler == proc_dostring) ||
1495 (table->proc_handler == proc_dointvec) || 134 (table->proc_handler == proc_dointvec) ||
1496 (table->proc_handler == proc_dointvec_minmax) || 135 (table->proc_handler == proc_dointvec_minmax) ||
1497 (table->proc_handler == proc_dointvec_jiffies) || 136 (table->proc_handler == proc_dointvec_jiffies) ||
@@ -1513,15 +152,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
1513 set_fail(&fail, table, "No max"); 152 set_fail(&fail, table, "No max");
1514 } 153 }
1515 } 154 }
1516#ifdef CONFIG_SYSCTL_SYSCALL 155#ifdef CONFIG_PROC_SYSCTL
1517 if (table->ctl_name && !table->strategy)
1518 set_fail(&fail, table, "Missing strategy");
1519#endif
1520#if 0
1521 if (!table->ctl_name && table->strategy)
1522 set_fail(&fail, table, "Strategy without ctl_name");
1523#endif
1524#ifdef CONFIG_PROC_FS
1525 if (table->procname && !table->proc_handler) 156 if (table->procname && !table->proc_handler)
1526 set_fail(&fail, table, "No proc_handler"); 157 set_fail(&fail, table, "No proc_handler");
1527#endif 158#endif
@@ -1531,7 +162,6 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
1531#endif 162#endif
1532 sysctl_check_leaf(namespaces, table, &fail); 163 sysctl_check_leaf(namespaces, table, &fail);
1533 } 164 }
1534 sysctl_check_bin_path(table, &fail);
1535 if (table->mode > 0777) 165 if (table->mode > 0777)
1536 set_fail(&fail, table, "bogus .mode"); 166 set_fail(&fail, table, "bogus .mode");
1537 if (fail) { 167 if (fail) {
diff --git a/kernel/time.c b/kernel/time.c
index 2ef4fe2079b6..c6324d96009e 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -661,6 +661,36 @@ u64 nsec_to_clock_t(u64 x)
661#endif 661#endif
662} 662}
663 663
664/**
665 * nsecs_to_jiffies - Convert nsecs in u64 to jiffies
666 *
667 * @n: nsecs in u64
668 *
669 * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
670 * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
671 * for scheduler, not for use in device drivers to calculate timeout value.
672 *
673 * note:
674 * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
675 * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
676 */
677unsigned long nsecs_to_jiffies(u64 n)
678{
679#if (NSEC_PER_SEC % HZ) == 0
680 /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
681 return div_u64(n, NSEC_PER_SEC / HZ);
682#elif (HZ % 512) == 0
683 /* overflow after 292 years if HZ = 1024 */
684 return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
685#else
686 /*
687 * Generic case - optimized for cases where HZ is a multiple of 3.
688 * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
689 */
690 return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
691#endif
692}
693
664#if (BITS_PER_LONG < 64) 694#if (BITS_PER_LONG < 64)
665u64 get_jiffies_64(void) 695u64 get_jiffies_64(void)
666{ 696{
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 72a2dcbd8b48..d422c7b2236b 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -39,7 +39,7 @@ void timecounter_init(struct timecounter *tc,
39 tc->cycle_last = cc->read(cc); 39 tc->cycle_last = cc->read(cc);
40 tc->nsec = start_tstamp; 40 tc->nsec = start_tstamp;
41} 41}
42EXPORT_SYMBOL(timecounter_init); 42EXPORT_SYMBOL_GPL(timecounter_init);
43 43
44/** 44/**
45 * timecounter_read_delta - get nanoseconds since last call of this function 45 * timecounter_read_delta - get nanoseconds since last call of this function
@@ -83,7 +83,7 @@ u64 timecounter_read(struct timecounter *tc)
83 83
84 return nsec; 84 return nsec;
85} 85}
86EXPORT_SYMBOL(timecounter_read); 86EXPORT_SYMBOL_GPL(timecounter_read);
87 87
88u64 timecounter_cyc2time(struct timecounter *tc, 88u64 timecounter_cyc2time(struct timecounter *tc,
89 cycle_t cycle_tstamp) 89 cycle_t cycle_tstamp)
@@ -105,7 +105,7 @@ u64 timecounter_cyc2time(struct timecounter *tc,
105 105
106 return nsec; 106 return nsec;
107} 107}
108EXPORT_SYMBOL(timecounter_cyc2time); 108EXPORT_SYMBOL_GPL(timecounter_cyc2time);
109 109
110/** 110/**
111 * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks 111 * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index df133bc29f89..f992762d7f51 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -224,6 +224,13 @@ void tick_nohz_stop_sched_tick(int inidle)
224 if (!inidle && !ts->inidle) 224 if (!inidle && !ts->inidle)
225 goto end; 225 goto end;
226 226
227 /*
228 * Set ts->inidle unconditionally. Even if the system did not
229 * switch to NOHZ mode the cpu frequency governers rely on the
230 * update of the idle time accounting in tick_nohz_start_idle().
231 */
232 ts->inidle = 1;
233
227 now = tick_nohz_start_idle(ts); 234 now = tick_nohz_start_idle(ts);
228 235
229 /* 236 /*
@@ -241,8 +248,6 @@ void tick_nohz_stop_sched_tick(int inidle)
241 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) 248 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
242 goto end; 249 goto end;
243 250
244 ts->inidle = 1;
245
246 if (need_resched()) 251 if (need_resched())
247 goto end; 252 goto end;
248 253
diff --git a/kernel/time/timecompare.c b/kernel/time/timecompare.c
index 71e7f1a19156..96ff643a5a59 100644
--- a/kernel/time/timecompare.c
+++ b/kernel/time/timecompare.c
@@ -40,7 +40,7 @@ ktime_t timecompare_transform(struct timecompare *sync,
40 40
41 return ns_to_ktime(nsec); 41 return ns_to_ktime(nsec);
42} 42}
43EXPORT_SYMBOL(timecompare_transform); 43EXPORT_SYMBOL_GPL(timecompare_transform);
44 44
45int timecompare_offset(struct timecompare *sync, 45int timecompare_offset(struct timecompare *sync,
46 s64 *offset, 46 s64 *offset,
@@ -131,7 +131,7 @@ int timecompare_offset(struct timecompare *sync,
131 131
132 return used; 132 return used;
133} 133}
134EXPORT_SYMBOL(timecompare_offset); 134EXPORT_SYMBOL_GPL(timecompare_offset);
135 135
136void __timecompare_update(struct timecompare *sync, 136void __timecompare_update(struct timecompare *sync,
137 u64 source_tstamp) 137 u64 source_tstamp)
@@ -188,4 +188,4 @@ void __timecompare_update(struct timecompare *sync,
188 } 188 }
189 } 189 }
190} 190}
191EXPORT_SYMBOL(__timecompare_update); 191EXPORT_SYMBOL_GPL(__timecompare_update);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 5d4d4239a0aa..d1aebd73b191 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -13,6 +13,7 @@
13#include <linux/percpu.h> 13#include <linux/percpu.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/sched.h>
16#include <linux/sysdev.h> 17#include <linux/sysdev.h>
17#include <linux/clocksource.h> 18#include <linux/clocksource.h>
18#include <linux/jiffies.h> 19#include <linux/jiffies.h>
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index b416512ad17f..d006554888dc 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -339,6 +339,27 @@ config POWER_TRACER
339 power management decisions, specifically the C-state and P-state 339 power management decisions, specifically the C-state and P-state
340 behavior. 340 behavior.
341 341
342config KSYM_TRACER
343 bool "Trace read and write access on kernel memory locations"
344 depends on HAVE_HW_BREAKPOINT
345 select TRACING
346 help
347 This tracer helps find read and write operations on any given kernel
348 symbol i.e. /proc/kallsyms.
349
350config PROFILE_KSYM_TRACER
351 bool "Profile all kernel memory accesses on 'watched' variables"
352 depends on KSYM_TRACER
353 help
354 This tracer profiles kernel accesses on variables watched through the
355 ksym tracer ftrace plugin. Depending upon the hardware, all read
356 and write operations on kernel variables can be monitored for
357 accesses.
358
359 The results will be displayed in:
360 /debugfs/tracing/profile_ksym
361
362 Say N if unsure.
342 363
343config STACK_TRACER 364config STACK_TRACER
344 bool "Trace max stack" 365 bool "Trace max stack"
@@ -428,6 +449,23 @@ config BLK_DEV_IO_TRACE
428 449
429 If unsure, say N. 450 If unsure, say N.
430 451
452config KPROBE_EVENT
453 depends on KPROBES
454 depends on X86
455 bool "Enable kprobes-based dynamic events"
456 select TRACING
457 default y
458 help
459 This allows the user to add tracing events (similar to tracepoints) on the fly
460 via the ftrace interface. See Documentation/trace/kprobetrace.txt
461 for more details.
462
463 Those events can be inserted wherever kprobes can probe, and record
464 various register and memory values.
465
466 This option is also required by perf-probe subcommand of perf tools. If
467 you want to use perf tools, this option is strongly recommended.
468
431config DYNAMIC_FTRACE 469config DYNAMIC_FTRACE
432 bool "enable/disable ftrace tracepoints dynamically" 470 bool "enable/disable ftrace tracepoints dynamically"
433 depends on FUNCTION_TRACER 471 depends on FUNCTION_TRACER
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 26f03ac07c2b..cd9ecd89ec77 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -53,6 +53,8 @@ obj-$(CONFIG_EVENT_TRACING) += trace_export.o
53obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o 53obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
54obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o 54obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o
55obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o 55obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
56obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
57obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o
56obj-$(CONFIG_EVENT_TRACING) += power-traces.o 58obj-$(CONFIG_EVENT_TRACING) += power-traces.o
57 59
58libftrace-y := ftrace.o 60libftrace-y := ftrace.o
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 3eb159c277c8..d9d6206e0b14 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -856,6 +856,37 @@ static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
856} 856}
857 857
858/** 858/**
859 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
860 * @q: queue the io is for
861 * @rq: the source request
862 * @dev: target device
863 * @from: source sector
864 *
865 * Description:
866 * Device mapper remaps request to other devices.
867 * Add a trace for that action.
868 *
869 **/
870static void blk_add_trace_rq_remap(struct request_queue *q,
871 struct request *rq, dev_t dev,
872 sector_t from)
873{
874 struct blk_trace *bt = q->blk_trace;
875 struct blk_io_trace_remap r;
876
877 if (likely(!bt))
878 return;
879
880 r.device_from = cpu_to_be32(dev);
881 r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
882 r.sector_from = cpu_to_be64(from);
883
884 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
885 rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors,
886 sizeof(r), &r);
887}
888
889/**
859 * blk_add_driver_data - Add binary message with driver-specific data 890 * blk_add_driver_data - Add binary message with driver-specific data
860 * @q: queue the io is for 891 * @q: queue the io is for
861 * @rq: io request 892 * @rq: io request
@@ -922,10 +953,13 @@ static void blk_register_tracepoints(void)
922 WARN_ON(ret); 953 WARN_ON(ret);
923 ret = register_trace_block_remap(blk_add_trace_remap); 954 ret = register_trace_block_remap(blk_add_trace_remap);
924 WARN_ON(ret); 955 WARN_ON(ret);
956 ret = register_trace_block_rq_remap(blk_add_trace_rq_remap);
957 WARN_ON(ret);
925} 958}
926 959
927static void blk_unregister_tracepoints(void) 960static void blk_unregister_tracepoints(void)
928{ 961{
962 unregister_trace_block_rq_remap(blk_add_trace_rq_remap);
929 unregister_trace_block_remap(blk_add_trace_remap); 963 unregister_trace_block_remap(blk_add_trace_remap);
930 unregister_trace_block_split(blk_add_trace_split); 964 unregister_trace_block_split(blk_add_trace_split);
931 unregister_trace_block_unplug_io(blk_add_trace_unplug_io); 965 unregister_trace_block_unplug_io(blk_add_trace_unplug_io);
@@ -1657,6 +1691,11 @@ int blk_trace_init_sysfs(struct device *dev)
1657 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group); 1691 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1658} 1692}
1659 1693
1694void blk_trace_remove_sysfs(struct device *dev)
1695{
1696 sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1697}
1698
1660#endif /* CONFIG_BLK_DEV_IO_TRACE */ 1699#endif /* CONFIG_BLK_DEV_IO_TRACE */
1661 1700
1662#ifdef CONFIG_EVENT_TRACING 1701#ifdef CONFIG_EVENT_TRACING
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 46592feab5a6..e51a1bcb7bed 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -60,6 +60,13 @@ static int last_ftrace_enabled;
60/* Quick disabling of function tracer. */ 60/* Quick disabling of function tracer. */
61int function_trace_stop; 61int function_trace_stop;
62 62
63/* List for set_ftrace_pid's pids. */
64LIST_HEAD(ftrace_pids);
65struct ftrace_pid {
66 struct list_head list;
67 struct pid *pid;
68};
69
63/* 70/*
64 * ftrace_disabled is set when an anomaly is discovered. 71 * ftrace_disabled is set when an anomaly is discovered.
65 * ftrace_disabled is much stronger than ftrace_enabled. 72 * ftrace_disabled is much stronger than ftrace_enabled.
@@ -78,6 +85,10 @@ ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
78ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; 85ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
79ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; 86ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
80 87
88#ifdef CONFIG_FUNCTION_GRAPH_TRACER
89static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
90#endif
91
81static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) 92static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
82{ 93{
83 struct ftrace_ops *op = ftrace_list; 94 struct ftrace_ops *op = ftrace_list;
@@ -155,7 +166,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
155 else 166 else
156 func = ftrace_list_func; 167 func = ftrace_list_func;
157 168
158 if (ftrace_pid_trace) { 169 if (!list_empty(&ftrace_pids)) {
159 set_ftrace_pid_function(func); 170 set_ftrace_pid_function(func);
160 func = ftrace_pid_func; 171 func = ftrace_pid_func;
161 } 172 }
@@ -203,7 +214,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
203 if (ftrace_list->next == &ftrace_list_end) { 214 if (ftrace_list->next == &ftrace_list_end) {
204 ftrace_func_t func = ftrace_list->func; 215 ftrace_func_t func = ftrace_list->func;
205 216
206 if (ftrace_pid_trace) { 217 if (!list_empty(&ftrace_pids)) {
207 set_ftrace_pid_function(func); 218 set_ftrace_pid_function(func);
208 func = ftrace_pid_func; 219 func = ftrace_pid_func;
209 } 220 }
@@ -225,9 +236,13 @@ static void ftrace_update_pid_func(void)
225 if (ftrace_trace_function == ftrace_stub) 236 if (ftrace_trace_function == ftrace_stub)
226 return; 237 return;
227 238
239#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
228 func = ftrace_trace_function; 240 func = ftrace_trace_function;
241#else
242 func = __ftrace_trace_function;
243#endif
229 244
230 if (ftrace_pid_trace) { 245 if (!list_empty(&ftrace_pids)) {
231 set_ftrace_pid_function(func); 246 set_ftrace_pid_function(func);
232 func = ftrace_pid_func; 247 func = ftrace_pid_func;
233 } else { 248 } else {
@@ -736,7 +751,7 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
736 out: 751 out:
737 mutex_unlock(&ftrace_profile_lock); 752 mutex_unlock(&ftrace_profile_lock);
738 753
739 filp->f_pos += cnt; 754 *ppos += cnt;
740 755
741 return cnt; 756 return cnt;
742} 757}
@@ -817,8 +832,6 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
817} 832}
818#endif /* CONFIG_FUNCTION_PROFILER */ 833#endif /* CONFIG_FUNCTION_PROFILER */
819 834
820/* set when tracing only a pid */
821struct pid *ftrace_pid_trace;
822static struct pid * const ftrace_swapper_pid = &init_struct_pid; 835static struct pid * const ftrace_swapper_pid = &init_struct_pid;
823 836
824#ifdef CONFIG_DYNAMIC_FTRACE 837#ifdef CONFIG_DYNAMIC_FTRACE
@@ -1074,14 +1087,9 @@ static void ftrace_replace_code(int enable)
1074 failed = __ftrace_replace_code(rec, enable); 1087 failed = __ftrace_replace_code(rec, enable);
1075 if (failed) { 1088 if (failed) {
1076 rec->flags |= FTRACE_FL_FAILED; 1089 rec->flags |= FTRACE_FL_FAILED;
1077 if ((system_state == SYSTEM_BOOTING) || 1090 ftrace_bug(failed, rec->ip);
1078 !core_kernel_text(rec->ip)) { 1091 /* Stop processing */
1079 ftrace_free_rec(rec); 1092 return;
1080 } else {
1081 ftrace_bug(failed, rec->ip);
1082 /* Stop processing */
1083 return;
1084 }
1085 } 1093 }
1086 } while_for_each_ftrace_rec(); 1094 } while_for_each_ftrace_rec();
1087} 1095}
@@ -1262,12 +1270,34 @@ static int ftrace_update_code(struct module *mod)
1262 ftrace_new_addrs = p->newlist; 1270 ftrace_new_addrs = p->newlist;
1263 p->flags = 0L; 1271 p->flags = 0L;
1264 1272
1265 /* convert record (i.e, patch mcount-call with NOP) */ 1273 /*
1266 if (ftrace_code_disable(mod, p)) { 1274 * Do the initial record convertion from mcount jump
1267 p->flags |= FTRACE_FL_CONVERTED; 1275 * to the NOP instructions.
1268 ftrace_update_cnt++; 1276 */
1269 } else 1277 if (!ftrace_code_disable(mod, p)) {
1270 ftrace_free_rec(p); 1278 ftrace_free_rec(p);
1279 continue;
1280 }
1281
1282 p->flags |= FTRACE_FL_CONVERTED;
1283 ftrace_update_cnt++;
1284
1285 /*
1286 * If the tracing is enabled, go ahead and enable the record.
1287 *
1288 * The reason not to enable the record immediatelly is the
1289 * inherent check of ftrace_make_nop/ftrace_make_call for
1290 * correct previous instructions. Making first the NOP
1291 * conversion puts the module to the correct state, thus
1292 * passing the ftrace_make_call check.
1293 */
1294 if (ftrace_start_up) {
1295 int failed = __ftrace_replace_code(p, 1);
1296 if (failed) {
1297 ftrace_bug(failed, p->ip);
1298 ftrace_free_rec(p);
1299 }
1300 }
1271 } 1301 }
1272 1302
1273 stop = ftrace_now(raw_smp_processor_id()); 1303 stop = ftrace_now(raw_smp_processor_id());
@@ -1657,60 +1687,6 @@ ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
1657 return ret; 1687 return ret;
1658} 1688}
1659 1689
1660enum {
1661 MATCH_FULL,
1662 MATCH_FRONT_ONLY,
1663 MATCH_MIDDLE_ONLY,
1664 MATCH_END_ONLY,
1665};
1666
1667/*
1668 * (static function - no need for kernel doc)
1669 *
1670 * Pass in a buffer containing a glob and this function will
1671 * set search to point to the search part of the buffer and
1672 * return the type of search it is (see enum above).
1673 * This does modify buff.
1674 *
1675 * Returns enum type.
1676 * search returns the pointer to use for comparison.
1677 * not returns 1 if buff started with a '!'
1678 * 0 otherwise.
1679 */
1680static int
1681ftrace_setup_glob(char *buff, int len, char **search, int *not)
1682{
1683 int type = MATCH_FULL;
1684 int i;
1685
1686 if (buff[0] == '!') {
1687 *not = 1;
1688 buff++;
1689 len--;
1690 } else
1691 *not = 0;
1692
1693 *search = buff;
1694
1695 for (i = 0; i < len; i++) {
1696 if (buff[i] == '*') {
1697 if (!i) {
1698 *search = buff + 1;
1699 type = MATCH_END_ONLY;
1700 } else {
1701 if (type == MATCH_END_ONLY)
1702 type = MATCH_MIDDLE_ONLY;
1703 else
1704 type = MATCH_FRONT_ONLY;
1705 buff[i] = 0;
1706 break;
1707 }
1708 }
1709 }
1710
1711 return type;
1712}
1713
1714static int ftrace_match(char *str, char *regex, int len, int type) 1690static int ftrace_match(char *str, char *regex, int len, int type)
1715{ 1691{
1716 int matched = 0; 1692 int matched = 0;
@@ -1759,7 +1735,7 @@ static void ftrace_match_records(char *buff, int len, int enable)
1759 int not; 1735 int not;
1760 1736
1761 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; 1737 flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1762 type = ftrace_setup_glob(buff, len, &search, &not); 1738 type = filter_parse_regex(buff, len, &search, &not);
1763 1739
1764 search_len = strlen(search); 1740 search_len = strlen(search);
1765 1741
@@ -1827,7 +1803,7 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable)
1827 } 1803 }
1828 1804
1829 if (strlen(buff)) { 1805 if (strlen(buff)) {
1830 type = ftrace_setup_glob(buff, strlen(buff), &search, &not); 1806 type = filter_parse_regex(buff, strlen(buff), &search, &not);
1831 search_len = strlen(search); 1807 search_len = strlen(search);
1832 } 1808 }
1833 1809
@@ -1992,7 +1968,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
1992 int count = 0; 1968 int count = 0;
1993 char *search; 1969 char *search;
1994 1970
1995 type = ftrace_setup_glob(glob, strlen(glob), &search, &not); 1971 type = filter_parse_regex(glob, strlen(glob), &search, &not);
1996 len = strlen(search); 1972 len = strlen(search);
1997 1973
1998 /* we do not support '!' for function probes */ 1974 /* we do not support '!' for function probes */
@@ -2069,7 +2045,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2069 else if (glob) { 2045 else if (glob) {
2070 int not; 2046 int not;
2071 2047
2072 type = ftrace_setup_glob(glob, strlen(glob), &search, &not); 2048 type = filter_parse_regex(glob, strlen(glob), &search, &not);
2073 len = strlen(search); 2049 len = strlen(search);
2074 2050
2075 /* we do not support '!' for function probes */ 2051 /* we do not support '!' for function probes */
@@ -2223,15 +2199,15 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2223 ret = ftrace_process_regex(parser->buffer, 2199 ret = ftrace_process_regex(parser->buffer,
2224 parser->idx, enable); 2200 parser->idx, enable);
2225 if (ret) 2201 if (ret)
2226 goto out; 2202 goto out_unlock;
2227 2203
2228 trace_parser_clear(parser); 2204 trace_parser_clear(parser);
2229 } 2205 }
2230 2206
2231 ret = read; 2207 ret = read;
2232 2208out_unlock:
2233 mutex_unlock(&ftrace_regex_lock); 2209 mutex_unlock(&ftrace_regex_lock);
2234out: 2210
2235 return ret; 2211 return ret;
2236} 2212}
2237 2213
@@ -2313,6 +2289,32 @@ static int __init set_ftrace_filter(char *str)
2313} 2289}
2314__setup("ftrace_filter=", set_ftrace_filter); 2290__setup("ftrace_filter=", set_ftrace_filter);
2315 2291
2292#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2293static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
2294static int __init set_graph_function(char *str)
2295{
2296 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
2297 return 1;
2298}
2299__setup("ftrace_graph_filter=", set_graph_function);
2300
2301static void __init set_ftrace_early_graph(char *buf)
2302{
2303 int ret;
2304 char *func;
2305
2306 while (buf) {
2307 func = strsep(&buf, ",");
2308 /* we allow only one expression at a time */
2309 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2310 func);
2311 if (ret)
2312 printk(KERN_DEBUG "ftrace: function %s not "
2313 "traceable\n", func);
2314 }
2315}
2316#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2317
2316static void __init set_ftrace_early_filter(char *buf, int enable) 2318static void __init set_ftrace_early_filter(char *buf, int enable)
2317{ 2319{
2318 char *func; 2320 char *func;
@@ -2329,6 +2331,10 @@ static void __init set_ftrace_early_filters(void)
2329 set_ftrace_early_filter(ftrace_filter_buf, 1); 2331 set_ftrace_early_filter(ftrace_filter_buf, 1);
2330 if (ftrace_notrace_buf[0]) 2332 if (ftrace_notrace_buf[0])
2331 set_ftrace_early_filter(ftrace_notrace_buf, 0); 2333 set_ftrace_early_filter(ftrace_notrace_buf, 0);
2334#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2335 if (ftrace_graph_buf[0])
2336 set_ftrace_early_graph(ftrace_graph_buf);
2337#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2332} 2338}
2333 2339
2334static int 2340static int
@@ -2514,7 +2520,7 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2514 return -ENODEV; 2520 return -ENODEV;
2515 2521
2516 /* decode regex */ 2522 /* decode regex */
2517 type = ftrace_setup_glob(buffer, strlen(buffer), &search, &not); 2523 type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
2518 if (not) 2524 if (not)
2519 return -EINVAL; 2525 return -EINVAL;
2520 2526
@@ -2625,7 +2631,7 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
2625 return 0; 2631 return 0;
2626} 2632}
2627 2633
2628static int ftrace_convert_nops(struct module *mod, 2634static int ftrace_process_locs(struct module *mod,
2629 unsigned long *start, 2635 unsigned long *start,
2630 unsigned long *end) 2636 unsigned long *end)
2631{ 2637{
@@ -2658,19 +2664,17 @@ static int ftrace_convert_nops(struct module *mod,
2658} 2664}
2659 2665
2660#ifdef CONFIG_MODULES 2666#ifdef CONFIG_MODULES
2661void ftrace_release(void *start, void *end) 2667void ftrace_release_mod(struct module *mod)
2662{ 2668{
2663 struct dyn_ftrace *rec; 2669 struct dyn_ftrace *rec;
2664 struct ftrace_page *pg; 2670 struct ftrace_page *pg;
2665 unsigned long s = (unsigned long)start;
2666 unsigned long e = (unsigned long)end;
2667 2671
2668 if (ftrace_disabled || !start || start == end) 2672 if (ftrace_disabled)
2669 return; 2673 return;
2670 2674
2671 mutex_lock(&ftrace_lock); 2675 mutex_lock(&ftrace_lock);
2672 do_for_each_ftrace_rec(pg, rec) { 2676 do_for_each_ftrace_rec(pg, rec) {
2673 if ((rec->ip >= s) && (rec->ip < e)) { 2677 if (within_module_core(rec->ip, mod)) {
2674 /* 2678 /*
2675 * rec->ip is changed in ftrace_free_rec() 2679 * rec->ip is changed in ftrace_free_rec()
2676 * It should not between s and e if record was freed. 2680 * It should not between s and e if record was freed.
@@ -2687,7 +2691,7 @@ static void ftrace_init_module(struct module *mod,
2687{ 2691{
2688 if (ftrace_disabled || start == end) 2692 if (ftrace_disabled || start == end)
2689 return; 2693 return;
2690 ftrace_convert_nops(mod, start, end); 2694 ftrace_process_locs(mod, start, end);
2691} 2695}
2692 2696
2693static int ftrace_module_notify(struct notifier_block *self, 2697static int ftrace_module_notify(struct notifier_block *self,
@@ -2702,9 +2706,7 @@ static int ftrace_module_notify(struct notifier_block *self,
2702 mod->num_ftrace_callsites); 2706 mod->num_ftrace_callsites);
2703 break; 2707 break;
2704 case MODULE_STATE_GOING: 2708 case MODULE_STATE_GOING:
2705 ftrace_release(mod->ftrace_callsites, 2709 ftrace_release_mod(mod);
2706 mod->ftrace_callsites +
2707 mod->num_ftrace_callsites);
2708 break; 2710 break;
2709 } 2711 }
2710 2712
@@ -2750,7 +2752,7 @@ void __init ftrace_init(void)
2750 2752
2751 last_ftrace_enabled = ftrace_enabled = 1; 2753 last_ftrace_enabled = ftrace_enabled = 1;
2752 2754
2753 ret = ftrace_convert_nops(NULL, 2755 ret = ftrace_process_locs(NULL,
2754 __start_mcount_loc, 2756 __start_mcount_loc,
2755 __stop_mcount_loc); 2757 __stop_mcount_loc);
2756 2758
@@ -2783,23 +2785,6 @@ static inline void ftrace_startup_enable(int command) { }
2783# define ftrace_shutdown_sysctl() do { } while (0) 2785# define ftrace_shutdown_sysctl() do { } while (0)
2784#endif /* CONFIG_DYNAMIC_FTRACE */ 2786#endif /* CONFIG_DYNAMIC_FTRACE */
2785 2787
2786static ssize_t
2787ftrace_pid_read(struct file *file, char __user *ubuf,
2788 size_t cnt, loff_t *ppos)
2789{
2790 char buf[64];
2791 int r;
2792
2793 if (ftrace_pid_trace == ftrace_swapper_pid)
2794 r = sprintf(buf, "swapper tasks\n");
2795 else if (ftrace_pid_trace)
2796 r = sprintf(buf, "%u\n", pid_vnr(ftrace_pid_trace));
2797 else
2798 r = sprintf(buf, "no pid\n");
2799
2800 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2801}
2802
2803static void clear_ftrace_swapper(void) 2788static void clear_ftrace_swapper(void)
2804{ 2789{
2805 struct task_struct *p; 2790 struct task_struct *p;
@@ -2850,14 +2835,12 @@ static void set_ftrace_pid(struct pid *pid)
2850 rcu_read_unlock(); 2835 rcu_read_unlock();
2851} 2836}
2852 2837
2853static void clear_ftrace_pid_task(struct pid **pid) 2838static void clear_ftrace_pid_task(struct pid *pid)
2854{ 2839{
2855 if (*pid == ftrace_swapper_pid) 2840 if (pid == ftrace_swapper_pid)
2856 clear_ftrace_swapper(); 2841 clear_ftrace_swapper();
2857 else 2842 else
2858 clear_ftrace_pid(*pid); 2843 clear_ftrace_pid(pid);
2859
2860 *pid = NULL;
2861} 2844}
2862 2845
2863static void set_ftrace_pid_task(struct pid *pid) 2846static void set_ftrace_pid_task(struct pid *pid)
@@ -2868,74 +2851,184 @@ static void set_ftrace_pid_task(struct pid *pid)
2868 set_ftrace_pid(pid); 2851 set_ftrace_pid(pid);
2869} 2852}
2870 2853
2871static ssize_t 2854static int ftrace_pid_add(int p)
2872ftrace_pid_write(struct file *filp, const char __user *ubuf,
2873 size_t cnt, loff_t *ppos)
2874{ 2855{
2875 struct pid *pid; 2856 struct pid *pid;
2876 char buf[64]; 2857 struct ftrace_pid *fpid;
2877 long val; 2858 int ret = -EINVAL;
2878 int ret;
2879 2859
2880 if (cnt >= sizeof(buf)) 2860 mutex_lock(&ftrace_lock);
2881 return -EINVAL;
2882 2861
2883 if (copy_from_user(&buf, ubuf, cnt)) 2862 if (!p)
2884 return -EFAULT; 2863 pid = ftrace_swapper_pid;
2864 else
2865 pid = find_get_pid(p);
2885 2866
2886 buf[cnt] = 0; 2867 if (!pid)
2868 goto out;
2887 2869
2888 ret = strict_strtol(buf, 10, &val); 2870 ret = 0;
2889 if (ret < 0)
2890 return ret;
2891 2871
2892 mutex_lock(&ftrace_lock); 2872 list_for_each_entry(fpid, &ftrace_pids, list)
2893 if (val < 0) { 2873 if (fpid->pid == pid)
2894 /* disable pid tracing */ 2874 goto out_put;
2895 if (!ftrace_pid_trace)
2896 goto out;
2897 2875
2898 clear_ftrace_pid_task(&ftrace_pid_trace); 2876 ret = -ENOMEM;
2899 2877
2900 } else { 2878 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
2901 /* swapper task is special */ 2879 if (!fpid)
2902 if (!val) { 2880 goto out_put;
2903 pid = ftrace_swapper_pid;
2904 if (pid == ftrace_pid_trace)
2905 goto out;
2906 } else {
2907 pid = find_get_pid(val);
2908 2881
2909 if (pid == ftrace_pid_trace) { 2882 list_add(&fpid->list, &ftrace_pids);
2910 put_pid(pid); 2883 fpid->pid = pid;
2911 goto out;
2912 }
2913 }
2914 2884
2915 if (ftrace_pid_trace) 2885 set_ftrace_pid_task(pid);
2916 clear_ftrace_pid_task(&ftrace_pid_trace);
2917 2886
2918 if (!pid) 2887 ftrace_update_pid_func();
2919 goto out; 2888 ftrace_startup_enable(0);
2889
2890 mutex_unlock(&ftrace_lock);
2891 return 0;
2892
2893out_put:
2894 if (pid != ftrace_swapper_pid)
2895 put_pid(pid);
2920 2896
2921 ftrace_pid_trace = pid; 2897out:
2898 mutex_unlock(&ftrace_lock);
2899 return ret;
2900}
2901
2902static void ftrace_pid_reset(void)
2903{
2904 struct ftrace_pid *fpid, *safe;
2922 2905
2923 set_ftrace_pid_task(ftrace_pid_trace); 2906 mutex_lock(&ftrace_lock);
2907 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
2908 struct pid *pid = fpid->pid;
2909
2910 clear_ftrace_pid_task(pid);
2911
2912 list_del(&fpid->list);
2913 kfree(fpid);
2924 } 2914 }
2925 2915
2926 /* update the function call */
2927 ftrace_update_pid_func(); 2916 ftrace_update_pid_func();
2928 ftrace_startup_enable(0); 2917 ftrace_startup_enable(0);
2929 2918
2930 out:
2931 mutex_unlock(&ftrace_lock); 2919 mutex_unlock(&ftrace_lock);
2920}
2932 2921
2933 return cnt; 2922static void *fpid_start(struct seq_file *m, loff_t *pos)
2923{
2924 mutex_lock(&ftrace_lock);
2925
2926 if (list_empty(&ftrace_pids) && (!*pos))
2927 return (void *) 1;
2928
2929 return seq_list_start(&ftrace_pids, *pos);
2930}
2931
2932static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
2933{
2934 if (v == (void *)1)
2935 return NULL;
2936
2937 return seq_list_next(v, &ftrace_pids, pos);
2938}
2939
2940static void fpid_stop(struct seq_file *m, void *p)
2941{
2942 mutex_unlock(&ftrace_lock);
2943}
2944
2945static int fpid_show(struct seq_file *m, void *v)
2946{
2947 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
2948
2949 if (v == (void *)1) {
2950 seq_printf(m, "no pid\n");
2951 return 0;
2952 }
2953
2954 if (fpid->pid == ftrace_swapper_pid)
2955 seq_printf(m, "swapper tasks\n");
2956 else
2957 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
2958
2959 return 0;
2960}
2961
2962static const struct seq_operations ftrace_pid_sops = {
2963 .start = fpid_start,
2964 .next = fpid_next,
2965 .stop = fpid_stop,
2966 .show = fpid_show,
2967};
2968
2969static int
2970ftrace_pid_open(struct inode *inode, struct file *file)
2971{
2972 int ret = 0;
2973
2974 if ((file->f_mode & FMODE_WRITE) &&
2975 (file->f_flags & O_TRUNC))
2976 ftrace_pid_reset();
2977
2978 if (file->f_mode & FMODE_READ)
2979 ret = seq_open(file, &ftrace_pid_sops);
2980
2981 return ret;
2982}
2983
2984static ssize_t
2985ftrace_pid_write(struct file *filp, const char __user *ubuf,
2986 size_t cnt, loff_t *ppos)
2987{
2988 char buf[64], *tmp;
2989 long val;
2990 int ret;
2991
2992 if (cnt >= sizeof(buf))
2993 return -EINVAL;
2994
2995 if (copy_from_user(&buf, ubuf, cnt))
2996 return -EFAULT;
2997
2998 buf[cnt] = 0;
2999
3000 /*
3001 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3002 * to clean the filter quietly.
3003 */
3004 tmp = strstrip(buf);
3005 if (strlen(tmp) == 0)
3006 return 1;
3007
3008 ret = strict_strtol(tmp, 10, &val);
3009 if (ret < 0)
3010 return ret;
3011
3012 ret = ftrace_pid_add(val);
3013
3014 return ret ? ret : cnt;
3015}
3016
3017static int
3018ftrace_pid_release(struct inode *inode, struct file *file)
3019{
3020 if (file->f_mode & FMODE_READ)
3021 seq_release(inode, file);
3022
3023 return 0;
2934} 3024}
2935 3025
2936static const struct file_operations ftrace_pid_fops = { 3026static const struct file_operations ftrace_pid_fops = {
2937 .read = ftrace_pid_read, 3027 .open = ftrace_pid_open,
2938 .write = ftrace_pid_write, 3028 .write = ftrace_pid_write,
3029 .read = seq_read,
3030 .llseek = seq_lseek,
3031 .release = ftrace_pid_release,
2939}; 3032};
2940 3033
2941static __init int ftrace_init_debugfs(void) 3034static __init int ftrace_init_debugfs(void)
@@ -3298,4 +3391,3 @@ void ftrace_graph_stop(void)
3298 ftrace_stop(); 3391 ftrace_stop();
3299} 3392}
3300#endif 3393#endif
3301
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
index 81b1645c8549..a91da69f153a 100644
--- a/kernel/trace/kmemtrace.c
+++ b/kernel/trace/kmemtrace.c
@@ -501,7 +501,7 @@ static int __init init_kmem_tracer(void)
501 return 1; 501 return 1;
502 } 502 }
503 503
504 if (!register_tracer(&kmem_tracer)) { 504 if (register_tracer(&kmem_tracer) != 0) {
505 pr_warning("Warning: could not register the kmem tracer\n"); 505 pr_warning("Warning: could not register the kmem tracer\n");
506 return 1; 506 return 1;
507 } 507 }
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index d4ff01970547..a1ca4956ab5e 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -397,18 +397,21 @@ int ring_buffer_print_page_header(struct trace_seq *s)
397 int ret; 397 int ret;
398 398
399 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" 399 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
400 "offset:0;\tsize:%u;\n", 400 "offset:0;\tsize:%u;\tsigned:%u;\n",
401 (unsigned int)sizeof(field.time_stamp)); 401 (unsigned int)sizeof(field.time_stamp),
402 (unsigned int)is_signed_type(u64));
402 403
403 ret = trace_seq_printf(s, "\tfield: local_t commit;\t" 404 ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
404 "offset:%u;\tsize:%u;\n", 405 "offset:%u;\tsize:%u;\tsigned:%u;\n",
405 (unsigned int)offsetof(typeof(field), commit), 406 (unsigned int)offsetof(typeof(field), commit),
406 (unsigned int)sizeof(field.commit)); 407 (unsigned int)sizeof(field.commit),
408 (unsigned int)is_signed_type(long));
407 409
408 ret = trace_seq_printf(s, "\tfield: char data;\t" 410 ret = trace_seq_printf(s, "\tfield: char data;\t"
409 "offset:%u;\tsize:%u;\n", 411 "offset:%u;\tsize:%u;\tsigned:%u;\n",
410 (unsigned int)offsetof(typeof(field), data), 412 (unsigned int)offsetof(typeof(field), data),
411 (unsigned int)BUF_PAGE_SIZE); 413 (unsigned int)BUF_PAGE_SIZE,
414 (unsigned int)is_signed_type(char));
412 415
413 return ret; 416 return ret;
414} 417}
@@ -483,7 +486,7 @@ struct ring_buffer_iter {
483/* Up this if you want to test the TIME_EXTENTS and normalization */ 486/* Up this if you want to test the TIME_EXTENTS and normalization */
484#define DEBUG_SHIFT 0 487#define DEBUG_SHIFT 0
485 488
486static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu) 489static inline u64 rb_time_stamp(struct ring_buffer *buffer)
487{ 490{
488 /* shift to debug/test normalization and TIME_EXTENTS */ 491 /* shift to debug/test normalization and TIME_EXTENTS */
489 return buffer->clock() << DEBUG_SHIFT; 492 return buffer->clock() << DEBUG_SHIFT;
@@ -494,7 +497,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
494 u64 time; 497 u64 time;
495 498
496 preempt_disable_notrace(); 499 preempt_disable_notrace();
497 time = rb_time_stamp(buffer, cpu); 500 time = rb_time_stamp(buffer);
498 preempt_enable_no_resched_notrace(); 501 preempt_enable_no_resched_notrace();
499 502
500 return time; 503 return time;
@@ -599,7 +602,7 @@ static struct list_head *rb_list_head(struct list_head *list)
599} 602}
600 603
601/* 604/*
602 * rb_is_head_page - test if the give page is the head page 605 * rb_is_head_page - test if the given page is the head page
603 * 606 *
604 * Because the reader may move the head_page pointer, we can 607 * Because the reader may move the head_page pointer, we can
605 * not trust what the head page is (it may be pointing to 608 * not trust what the head page is (it may be pointing to
@@ -1193,6 +1196,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1193 atomic_inc(&cpu_buffer->record_disabled); 1196 atomic_inc(&cpu_buffer->record_disabled);
1194 synchronize_sched(); 1197 synchronize_sched();
1195 1198
1199 spin_lock_irq(&cpu_buffer->reader_lock);
1196 rb_head_page_deactivate(cpu_buffer); 1200 rb_head_page_deactivate(cpu_buffer);
1197 1201
1198 for (i = 0; i < nr_pages; i++) { 1202 for (i = 0; i < nr_pages; i++) {
@@ -1207,6 +1211,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1207 return; 1211 return;
1208 1212
1209 rb_reset_cpu(cpu_buffer); 1213 rb_reset_cpu(cpu_buffer);
1214 spin_unlock_irq(&cpu_buffer->reader_lock);
1210 1215
1211 rb_check_pages(cpu_buffer); 1216 rb_check_pages(cpu_buffer);
1212 1217
@@ -1785,9 +1790,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1785static struct ring_buffer_event * 1790static struct ring_buffer_event *
1786rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, 1791rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1787 unsigned long length, unsigned long tail, 1792 unsigned long length, unsigned long tail,
1788 struct buffer_page *commit_page,
1789 struct buffer_page *tail_page, u64 *ts) 1793 struct buffer_page *tail_page, u64 *ts)
1790{ 1794{
1795 struct buffer_page *commit_page = cpu_buffer->commit_page;
1791 struct ring_buffer *buffer = cpu_buffer->buffer; 1796 struct ring_buffer *buffer = cpu_buffer->buffer;
1792 struct buffer_page *next_page; 1797 struct buffer_page *next_page;
1793 int ret; 1798 int ret;
@@ -1868,7 +1873,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1868 * Nested commits always have zero deltas, so 1873 * Nested commits always have zero deltas, so
1869 * just reread the time stamp 1874 * just reread the time stamp
1870 */ 1875 */
1871 *ts = rb_time_stamp(buffer, cpu_buffer->cpu); 1876 *ts = rb_time_stamp(buffer);
1872 next_page->page->time_stamp = *ts; 1877 next_page->page->time_stamp = *ts;
1873 } 1878 }
1874 1879
@@ -1890,13 +1895,10 @@ static struct ring_buffer_event *
1890__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 1895__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1891 unsigned type, unsigned long length, u64 *ts) 1896 unsigned type, unsigned long length, u64 *ts)
1892{ 1897{
1893 struct buffer_page *tail_page, *commit_page; 1898 struct buffer_page *tail_page;
1894 struct ring_buffer_event *event; 1899 struct ring_buffer_event *event;
1895 unsigned long tail, write; 1900 unsigned long tail, write;
1896 1901
1897 commit_page = cpu_buffer->commit_page;
1898 /* we just need to protect against interrupts */
1899 barrier();
1900 tail_page = cpu_buffer->tail_page; 1902 tail_page = cpu_buffer->tail_page;
1901 write = local_add_return(length, &tail_page->write); 1903 write = local_add_return(length, &tail_page->write);
1902 1904
@@ -1907,7 +1909,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1907 /* See if we shot pass the end of this buffer page */ 1909 /* See if we shot pass the end of this buffer page */
1908 if (write > BUF_PAGE_SIZE) 1910 if (write > BUF_PAGE_SIZE)
1909 return rb_move_tail(cpu_buffer, length, tail, 1911 return rb_move_tail(cpu_buffer, length, tail,
1910 commit_page, tail_page, ts); 1912 tail_page, ts);
1911 1913
1912 /* We reserved something on the buffer */ 1914 /* We reserved something on the buffer */
1913 1915
@@ -2111,7 +2113,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
2111 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) 2113 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2112 goto out_fail; 2114 goto out_fail;
2113 2115
2114 ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu); 2116 ts = rb_time_stamp(cpu_buffer->buffer);
2115 2117
2116 /* 2118 /*
2117 * Only the first commit can update the timestamp. 2119 * Only the first commit can update the timestamp.
@@ -2681,7 +2683,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2681EXPORT_SYMBOL_GPL(ring_buffer_entries); 2683EXPORT_SYMBOL_GPL(ring_buffer_entries);
2682 2684
2683/** 2685/**
2684 * ring_buffer_overrun_cpu - get the number of overruns in buffer 2686 * ring_buffer_overruns - get the number of overruns in buffer
2685 * @buffer: The ring buffer 2687 * @buffer: The ring buffer
2686 * 2688 *
2687 * Returns the total number of overruns in the ring buffer 2689 * Returns the total number of overruns in the ring buffer
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index 573d3cc762c3..b2477caf09c2 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -35,6 +35,28 @@ static int disable_reader;
35module_param(disable_reader, uint, 0644); 35module_param(disable_reader, uint, 0644);
36MODULE_PARM_DESC(disable_reader, "only run producer"); 36MODULE_PARM_DESC(disable_reader, "only run producer");
37 37
38static int write_iteration = 50;
39module_param(write_iteration, uint, 0644);
40MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings");
41
42static int producer_nice = 19;
43static int consumer_nice = 19;
44
45static int producer_fifo = -1;
46static int consumer_fifo = -1;
47
48module_param(producer_nice, uint, 0644);
49MODULE_PARM_DESC(producer_nice, "nice prio for producer");
50
51module_param(consumer_nice, uint, 0644);
52MODULE_PARM_DESC(consumer_nice, "nice prio for consumer");
53
54module_param(producer_fifo, uint, 0644);
55MODULE_PARM_DESC(producer_fifo, "fifo prio for producer");
56
57module_param(consumer_fifo, uint, 0644);
58MODULE_PARM_DESC(consumer_fifo, "fifo prio for consumer");
59
38static int read_events; 60static int read_events;
39 61
40static int kill_test; 62static int kill_test;
@@ -208,15 +230,18 @@ static void ring_buffer_producer(void)
208 do { 230 do {
209 struct ring_buffer_event *event; 231 struct ring_buffer_event *event;
210 int *entry; 232 int *entry;
211 233 int i;
212 event = ring_buffer_lock_reserve(buffer, 10); 234
213 if (!event) { 235 for (i = 0; i < write_iteration; i++) {
214 missed++; 236 event = ring_buffer_lock_reserve(buffer, 10);
215 } else { 237 if (!event) {
216 hit++; 238 missed++;
217 entry = ring_buffer_event_data(event); 239 } else {
218 *entry = smp_processor_id(); 240 hit++;
219 ring_buffer_unlock_commit(buffer, event); 241 entry = ring_buffer_event_data(event);
242 *entry = smp_processor_id();
243 ring_buffer_unlock_commit(buffer, event);
244 }
220 } 245 }
221 do_gettimeofday(&end_tv); 246 do_gettimeofday(&end_tv);
222 247
@@ -263,6 +288,27 @@ static void ring_buffer_producer(void)
263 288
264 if (kill_test) 289 if (kill_test)
265 trace_printk("ERROR!\n"); 290 trace_printk("ERROR!\n");
291
292 if (!disable_reader) {
293 if (consumer_fifo < 0)
294 trace_printk("Running Consumer at nice: %d\n",
295 consumer_nice);
296 else
297 trace_printk("Running Consumer at SCHED_FIFO %d\n",
298 consumer_fifo);
299 }
300 if (producer_fifo < 0)
301 trace_printk("Running Producer at nice: %d\n",
302 producer_nice);
303 else
304 trace_printk("Running Producer at SCHED_FIFO %d\n",
305 producer_fifo);
306
307 /* Let the user know that the test is running at low priority */
308 if (producer_fifo < 0 && consumer_fifo < 0 &&
309 producer_nice == 19 && consumer_nice == 19)
310 trace_printk("WARNING!!! This test is running at lowest priority.\n");
311
266 trace_printk("Time: %lld (usecs)\n", time); 312 trace_printk("Time: %lld (usecs)\n", time);
267 trace_printk("Overruns: %lld\n", overruns); 313 trace_printk("Overruns: %lld\n", overruns);
268 if (disable_reader) 314 if (disable_reader)
@@ -392,6 +438,27 @@ static int __init ring_buffer_benchmark_init(void)
392 if (IS_ERR(producer)) 438 if (IS_ERR(producer))
393 goto out_kill; 439 goto out_kill;
394 440
441 /*
442 * Run them as low-prio background tasks by default:
443 */
444 if (!disable_reader) {
445 if (consumer_fifo >= 0) {
446 struct sched_param param = {
447 .sched_priority = consumer_fifo
448 };
449 sched_setscheduler(consumer, SCHED_FIFO, &param);
450 } else
451 set_user_nice(consumer, consumer_nice);
452 }
453
454 if (producer_fifo >= 0) {
455 struct sched_param param = {
456 .sched_priority = consumer_fifo
457 };
458 sched_setscheduler(producer, SCHED_FIFO, &param);
459 } else
460 set_user_nice(producer, producer_nice);
461
395 return 0; 462 return 0;
396 463
397 out_kill: 464 out_kill:
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 45068269ebb1..874f2893cff0 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -129,7 +129,7 @@ static int tracing_set_tracer(const char *buf);
129static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; 129static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
130static char *default_bootup_tracer; 130static char *default_bootup_tracer;
131 131
132static int __init set_ftrace(char *str) 132static int __init set_cmdline_ftrace(char *str)
133{ 133{
134 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); 134 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
135 default_bootup_tracer = bootup_tracer_buf; 135 default_bootup_tracer = bootup_tracer_buf;
@@ -137,7 +137,7 @@ static int __init set_ftrace(char *str)
137 ring_buffer_expanded = 1; 137 ring_buffer_expanded = 1;
138 return 1; 138 return 1;
139} 139}
140__setup("ftrace=", set_ftrace); 140__setup("ftrace=", set_cmdline_ftrace);
141 141
142static int __init set_ftrace_dump_on_oops(char *str) 142static int __init set_ftrace_dump_on_oops(char *str)
143{ 143{
@@ -1361,10 +1361,11 @@ int trace_array_vprintk(struct trace_array *tr,
1361 pause_graph_tracing(); 1361 pause_graph_tracing();
1362 raw_local_irq_save(irq_flags); 1362 raw_local_irq_save(irq_flags);
1363 __raw_spin_lock(&trace_buf_lock); 1363 __raw_spin_lock(&trace_buf_lock);
1364 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1364 if (args == NULL) {
1365 1365 strncpy(trace_buf, fmt, TRACE_BUF_SIZE);
1366 len = min(len, TRACE_BUF_SIZE-1); 1366 len = strlen(trace_buf);
1367 trace_buf[len] = 0; 1367 } else
1368 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
1368 1369
1369 size = sizeof(*entry) + len + 1; 1370 size = sizeof(*entry) + len + 1;
1370 buffer = tr->buffer; 1371 buffer = tr->buffer;
@@ -1373,10 +1374,10 @@ int trace_array_vprintk(struct trace_array *tr,
1373 if (!event) 1374 if (!event)
1374 goto out_unlock; 1375 goto out_unlock;
1375 entry = ring_buffer_event_data(event); 1376 entry = ring_buffer_event_data(event);
1376 entry->ip = ip; 1377 entry->ip = ip;
1377 1378
1378 memcpy(&entry->buf, trace_buf, len); 1379 memcpy(&entry->buf, trace_buf, len);
1379 entry->buf[len] = 0; 1380 entry->buf[len] = '\0';
1380 if (!filter_check_discard(call, entry, buffer, event)) 1381 if (!filter_check_discard(call, entry, buffer, event))
1381 ring_buffer_unlock_commit(buffer, event); 1382 ring_buffer_unlock_commit(buffer, event);
1382 1383
@@ -1393,7 +1394,7 @@ int trace_array_vprintk(struct trace_array *tr,
1393 1394
1394int trace_vprintk(unsigned long ip, const char *fmt, va_list args) 1395int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1395{ 1396{
1396 return trace_array_printk(&global_trace, ip, fmt, args); 1397 return trace_array_vprintk(&global_trace, ip, fmt, args);
1397} 1398}
1398EXPORT_SYMBOL_GPL(trace_vprintk); 1399EXPORT_SYMBOL_GPL(trace_vprintk);
1399 1400
@@ -2440,7 +2441,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2440 return ret; 2441 return ret;
2441 } 2442 }
2442 2443
2443 filp->f_pos += cnt; 2444 *ppos += cnt;
2444 2445
2445 return cnt; 2446 return cnt;
2446} 2447}
@@ -2582,7 +2583,7 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf,
2582 } 2583 }
2583 mutex_unlock(&trace_types_lock); 2584 mutex_unlock(&trace_types_lock);
2584 2585
2585 filp->f_pos += cnt; 2586 *ppos += cnt;
2586 2587
2587 return cnt; 2588 return cnt;
2588} 2589}
@@ -2764,7 +2765,7 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2764 if (err) 2765 if (err)
2765 return err; 2766 return err;
2766 2767
2767 filp->f_pos += ret; 2768 *ppos += ret;
2768 2769
2769 return ret; 2770 return ret;
2770} 2771}
@@ -3299,7 +3300,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
3299 } 3300 }
3300 } 3301 }
3301 3302
3302 filp->f_pos += cnt; 3303 *ppos += cnt;
3303 3304
3304 /* If check pages failed, return ENOMEM */ 3305 /* If check pages failed, return ENOMEM */
3305 if (tracing_disabled) 3306 if (tracing_disabled)
@@ -3319,22 +3320,11 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
3319 return cnt; 3320 return cnt;
3320} 3321}
3321 3322
3322static int mark_printk(const char *fmt, ...)
3323{
3324 int ret;
3325 va_list args;
3326 va_start(args, fmt);
3327 ret = trace_vprintk(0, fmt, args);
3328 va_end(args);
3329 return ret;
3330}
3331
3332static ssize_t 3323static ssize_t
3333tracing_mark_write(struct file *filp, const char __user *ubuf, 3324tracing_mark_write(struct file *filp, const char __user *ubuf,
3334 size_t cnt, loff_t *fpos) 3325 size_t cnt, loff_t *fpos)
3335{ 3326{
3336 char *buf; 3327 char *buf;
3337 char *end;
3338 3328
3339 if (tracing_disabled) 3329 if (tracing_disabled)
3340 return -EINVAL; 3330 return -EINVAL;
@@ -3342,7 +3332,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3342 if (cnt > TRACE_BUF_SIZE) 3332 if (cnt > TRACE_BUF_SIZE)
3343 cnt = TRACE_BUF_SIZE; 3333 cnt = TRACE_BUF_SIZE;
3344 3334
3345 buf = kmalloc(cnt + 1, GFP_KERNEL); 3335 buf = kmalloc(cnt + 2, GFP_KERNEL);
3346 if (buf == NULL) 3336 if (buf == NULL)
3347 return -ENOMEM; 3337 return -ENOMEM;
3348 3338
@@ -3350,14 +3340,13 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3350 kfree(buf); 3340 kfree(buf);
3351 return -EFAULT; 3341 return -EFAULT;
3352 } 3342 }
3343 if (buf[cnt-1] != '\n') {
3344 buf[cnt] = '\n';
3345 buf[cnt+1] = '\0';
3346 } else
3347 buf[cnt] = '\0';
3353 3348
3354 /* Cut from the first nil or newline. */ 3349 cnt = trace_vprintk(0, buf, NULL);
3355 buf[cnt] = '\0';
3356 end = strchr(buf, '\n');
3357 if (end)
3358 *end = '\0';
3359
3360 cnt = mark_printk("%s\n", buf);
3361 kfree(buf); 3350 kfree(buf);
3362 *fpos += cnt; 3351 *fpos += cnt;
3363 3352
@@ -3730,7 +3719,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
3730 3719
3731 s = kmalloc(sizeof(*s), GFP_KERNEL); 3720 s = kmalloc(sizeof(*s), GFP_KERNEL);
3732 if (!s) 3721 if (!s)
3733 return ENOMEM; 3722 return -ENOMEM;
3734 3723
3735 trace_seq_init(s); 3724 trace_seq_init(s);
3736 3725
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 405cb850b75d..1d7f4830a80d 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -11,6 +11,7 @@
11#include <linux/ftrace.h> 11#include <linux/ftrace.h>
12#include <trace/boot.h> 12#include <trace/boot.h>
13#include <linux/kmemtrace.h> 13#include <linux/kmemtrace.h>
14#include <linux/hw_breakpoint.h>
14 15
15#include <linux/trace_seq.h> 16#include <linux/trace_seq.h>
16#include <linux/ftrace_event.h> 17#include <linux/ftrace_event.h>
@@ -37,6 +38,7 @@ enum trace_type {
37 TRACE_KMEM_ALLOC, 38 TRACE_KMEM_ALLOC,
38 TRACE_KMEM_FREE, 39 TRACE_KMEM_FREE,
39 TRACE_BLK, 40 TRACE_BLK,
41 TRACE_KSYM,
40 42
41 __TRACE_LAST_TYPE, 43 __TRACE_LAST_TYPE,
42}; 44};
@@ -98,9 +100,32 @@ struct syscall_trace_enter {
98struct syscall_trace_exit { 100struct syscall_trace_exit {
99 struct trace_entry ent; 101 struct trace_entry ent;
100 int nr; 102 int nr;
101 unsigned long ret; 103 long ret;
102}; 104};
103 105
106struct kprobe_trace_entry {
107 struct trace_entry ent;
108 unsigned long ip;
109 int nargs;
110 unsigned long args[];
111};
112
113#define SIZEOF_KPROBE_TRACE_ENTRY(n) \
114 (offsetof(struct kprobe_trace_entry, args) + \
115 (sizeof(unsigned long) * (n)))
116
117struct kretprobe_trace_entry {
118 struct trace_entry ent;
119 unsigned long func;
120 unsigned long ret_ip;
121 int nargs;
122 unsigned long args[];
123};
124
125#define SIZEOF_KRETPROBE_TRACE_ENTRY(n) \
126 (offsetof(struct kretprobe_trace_entry, args) + \
127 (sizeof(unsigned long) * (n)))
128
104/* 129/*
105 * trace_flag_type is an enumeration that holds different 130 * trace_flag_type is an enumeration that holds different
106 * states when a trace occurs. These are: 131 * states when a trace occurs. These are:
@@ -209,6 +234,7 @@ extern void __ftrace_bad_type(void);
209 TRACE_KMEM_ALLOC); \ 234 TRACE_KMEM_ALLOC); \
210 IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ 235 IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \
211 TRACE_KMEM_FREE); \ 236 TRACE_KMEM_FREE); \
237 IF_ASSIGN(var, ent, struct ksym_trace_entry, TRACE_KSYM);\
212 __ftrace_bad_type(); \ 238 __ftrace_bad_type(); \
213 } while (0) 239 } while (0)
214 240
@@ -364,6 +390,8 @@ int register_tracer(struct tracer *type);
364void unregister_tracer(struct tracer *type); 390void unregister_tracer(struct tracer *type);
365int is_tracing_stopped(void); 391int is_tracing_stopped(void);
366 392
393extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr);
394
367extern unsigned long nsecs_to_usecs(unsigned long nsecs); 395extern unsigned long nsecs_to_usecs(unsigned long nsecs);
368 396
369#ifdef CONFIG_TRACER_MAX_TRACE 397#ifdef CONFIG_TRACER_MAX_TRACE
@@ -438,6 +466,8 @@ extern int trace_selftest_startup_branch(struct tracer *trace,
438 struct trace_array *tr); 466 struct trace_array *tr);
439extern int trace_selftest_startup_hw_branches(struct tracer *trace, 467extern int trace_selftest_startup_hw_branches(struct tracer *trace,
440 struct trace_array *tr); 468 struct trace_array *tr);
469extern int trace_selftest_startup_ksym(struct tracer *trace,
470 struct trace_array *tr);
441#endif /* CONFIG_FTRACE_STARTUP_TEST */ 471#endif /* CONFIG_FTRACE_STARTUP_TEST */
442 472
443extern void *head_page(struct trace_array_cpu *data); 473extern void *head_page(struct trace_array_cpu *data);
@@ -483,10 +513,6 @@ static inline int ftrace_graph_addr(unsigned long addr)
483 return 0; 513 return 0;
484} 514}
485#else 515#else
486static inline int ftrace_trace_addr(unsigned long addr)
487{
488 return 1;
489}
490static inline int ftrace_graph_addr(unsigned long addr) 516static inline int ftrace_graph_addr(unsigned long addr)
491{ 517{
492 return 1; 518 return 1;
@@ -500,12 +526,12 @@ print_graph_function(struct trace_iterator *iter)
500} 526}
501#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 527#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
502 528
503extern struct pid *ftrace_pid_trace; 529extern struct list_head ftrace_pids;
504 530
505#ifdef CONFIG_FUNCTION_TRACER 531#ifdef CONFIG_FUNCTION_TRACER
506static inline int ftrace_trace_task(struct task_struct *task) 532static inline int ftrace_trace_task(struct task_struct *task)
507{ 533{
508 if (!ftrace_pid_trace) 534 if (list_empty(&ftrace_pids))
509 return 1; 535 return 1;
510 536
511 return test_tsk_trace_trace(task); 537 return test_tsk_trace_trace(task);
@@ -687,7 +713,6 @@ struct event_filter {
687 int n_preds; 713 int n_preds;
688 struct filter_pred **preds; 714 struct filter_pred **preds;
689 char *filter_string; 715 char *filter_string;
690 bool no_reset;
691}; 716};
692 717
693struct event_subsystem { 718struct event_subsystem {
@@ -699,22 +724,40 @@ struct event_subsystem {
699}; 724};
700 725
701struct filter_pred; 726struct filter_pred;
727struct regex;
702 728
703typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event, 729typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event,
704 int val1, int val2); 730 int val1, int val2);
705 731
732typedef int (*regex_match_func)(char *str, struct regex *r, int len);
733
734enum regex_type {
735 MATCH_FULL = 0,
736 MATCH_FRONT_ONLY,
737 MATCH_MIDDLE_ONLY,
738 MATCH_END_ONLY,
739};
740
741struct regex {
742 char pattern[MAX_FILTER_STR_VAL];
743 int len;
744 int field_len;
745 regex_match_func match;
746};
747
706struct filter_pred { 748struct filter_pred {
707 filter_pred_fn_t fn; 749 filter_pred_fn_t fn;
708 u64 val; 750 u64 val;
709 char str_val[MAX_FILTER_STR_VAL]; 751 struct regex regex;
710 int str_len; 752 char *field_name;
711 char *field_name; 753 int offset;
712 int offset; 754 int not;
713 int not; 755 int op;
714 int op; 756 int pop_n;
715 int pop_n;
716}; 757};
717 758
759extern enum regex_type
760filter_parse_regex(char *buff, int len, char **search, int *not);
718extern void print_event_filter(struct ftrace_event_call *call, 761extern void print_event_filter(struct ftrace_event_call *call,
719 struct trace_seq *s); 762 struct trace_seq *s);
720extern int apply_event_filter(struct ftrace_event_call *call, 763extern int apply_event_filter(struct ftrace_event_call *call,
@@ -730,7 +773,8 @@ filter_check_discard(struct ftrace_event_call *call, void *rec,
730 struct ring_buffer *buffer, 773 struct ring_buffer *buffer,
731 struct ring_buffer_event *event) 774 struct ring_buffer_event *event)
732{ 775{
733 if (unlikely(call->filter_active) && !filter_match_preds(call, rec)) { 776 if (unlikely(call->filter_active) &&
777 !filter_match_preds(call->filter, rec)) {
734 ring_buffer_discard_commit(buffer, event); 778 ring_buffer_discard_commit(buffer, event);
735 return 1; 779 return 1;
736 } 780 }
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index 7a7a9fd249a9..4a194f08f88c 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -34,6 +34,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
34 struct trace_array *tr = branch_tracer; 34 struct trace_array *tr = branch_tracer;
35 struct ring_buffer_event *event; 35 struct ring_buffer_event *event;
36 struct trace_branch *entry; 36 struct trace_branch *entry;
37 struct ring_buffer *buffer;
37 unsigned long flags; 38 unsigned long flags;
38 int cpu, pc; 39 int cpu, pc;
39 const char *p; 40 const char *p;
@@ -54,7 +55,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
54 goto out; 55 goto out;
55 56
56 pc = preempt_count(); 57 pc = preempt_count();
57 event = trace_buffer_lock_reserve(tr, TRACE_BRANCH, 58 buffer = tr->buffer;
59 event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
58 sizeof(*entry), flags, pc); 60 sizeof(*entry), flags, pc);
59 if (!event) 61 if (!event)
60 goto out; 62 goto out;
@@ -74,8 +76,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
74 entry->line = f->line; 76 entry->line = f->line;
75 entry->correct = val == expect; 77 entry->correct = val == expect;
76 78
77 if (!filter_check_discard(call, entry, tr->buffer, event)) 79 if (!filter_check_discard(call, entry, buffer, event))
78 ring_buffer_unlock_commit(tr->buffer, event); 80 ring_buffer_unlock_commit(buffer, event);
79 81
80 out: 82 out:
81 atomic_dec(&tr->data[cpu]->disabled); 83 atomic_dec(&tr->data[cpu]->disabled);
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 20c5f92e28a8..878c03f386ba 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -20,6 +20,8 @@
20#include <linux/ktime.h> 20#include <linux/ktime.h>
21#include <linux/trace_clock.h> 21#include <linux/trace_clock.h>
22 22
23#include "trace.h"
24
23/* 25/*
24 * trace_clock_local(): the simplest and least coherent tracing clock. 26 * trace_clock_local(): the simplest and least coherent tracing clock.
25 * 27 *
@@ -28,17 +30,17 @@
28 */ 30 */
29u64 notrace trace_clock_local(void) 31u64 notrace trace_clock_local(void)
30{ 32{
31 unsigned long flags;
32 u64 clock; 33 u64 clock;
34 int resched;
33 35
34 /* 36 /*
35 * sched_clock() is an architecture implemented, fast, scalable, 37 * sched_clock() is an architecture implemented, fast, scalable,
36 * lockless clock. It is not guaranteed to be coherent across 38 * lockless clock. It is not guaranteed to be coherent across
37 * CPUs, nor across CPU idle events. 39 * CPUs, nor across CPU idle events.
38 */ 40 */
39 raw_local_irq_save(flags); 41 resched = ftrace_preempt_disable();
40 clock = sched_clock(); 42 clock = sched_clock();
41 raw_local_irq_restore(flags); 43 ftrace_preempt_enable(resched);
42 44
43 return clock; 45 return clock;
44} 46}
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index ead3d724599d..c16a08f399df 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -364,3 +364,19 @@ FTRACE_ENTRY(kmem_free, kmemtrace_free_entry,
364 F_printk("type:%u call_site:%lx ptr:%p", 364 F_printk("type:%u call_site:%lx ptr:%p",
365 __entry->type_id, __entry->call_site, __entry->ptr) 365 __entry->type_id, __entry->call_site, __entry->ptr)
366); 366);
367
368FTRACE_ENTRY(ksym_trace, ksym_trace_entry,
369
370 TRACE_KSYM,
371
372 F_STRUCT(
373 __field( unsigned long, ip )
374 __field( unsigned char, type )
375 __array( char , cmd, TASK_COMM_LEN )
376 __field( unsigned long, addr )
377 ),
378
379 F_printk("ip: %pF type: %d ksym_name: %pS cmd: %s",
380 (void *)__entry->ip, (unsigned int)__entry->type,
381 (void *)__entry->addr, __entry->cmd)
382);
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
index dd44b8768867..d9c60f80aa0d 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_profile.c
@@ -8,17 +8,14 @@
8#include <linux/module.h> 8#include <linux/module.h>
9#include "trace.h" 9#include "trace.h"
10 10
11/*
12 * We can't use a size but a type in alloc_percpu()
13 * So let's create a dummy type that matches the desired size
14 */
15typedef struct {char buf[FTRACE_MAX_PROFILE_SIZE];} profile_buf_t;
16 11
17char *trace_profile_buf; 12char *perf_trace_buf;
18EXPORT_SYMBOL_GPL(trace_profile_buf); 13EXPORT_SYMBOL_GPL(perf_trace_buf);
14
15char *perf_trace_buf_nmi;
16EXPORT_SYMBOL_GPL(perf_trace_buf_nmi);
19 17
20char *trace_profile_buf_nmi; 18typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ;
21EXPORT_SYMBOL_GPL(trace_profile_buf_nmi);
22 19
23/* Count the events in use (per event id, not per instance) */ 20/* Count the events in use (per event id, not per instance) */
24static int total_profile_count; 21static int total_profile_count;
@@ -31,29 +28,34 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event)
31 if (atomic_inc_return(&event->profile_count)) 28 if (atomic_inc_return(&event->profile_count))
32 return 0; 29 return 0;
33 30
34 if (!total_profile_count++) { 31 if (!total_profile_count) {
35 buf = (char *)alloc_percpu(profile_buf_t); 32 buf = (char *)alloc_percpu(perf_trace_t);
36 if (!buf) 33 if (!buf)
37 goto fail_buf; 34 goto fail_buf;
38 35
39 rcu_assign_pointer(trace_profile_buf, buf); 36 rcu_assign_pointer(perf_trace_buf, buf);
40 37
41 buf = (char *)alloc_percpu(profile_buf_t); 38 buf = (char *)alloc_percpu(perf_trace_t);
42 if (!buf) 39 if (!buf)
43 goto fail_buf_nmi; 40 goto fail_buf_nmi;
44 41
45 rcu_assign_pointer(trace_profile_buf_nmi, buf); 42 rcu_assign_pointer(perf_trace_buf_nmi, buf);
46 } 43 }
47 44
48 ret = event->profile_enable(); 45 ret = event->profile_enable(event);
49 if (!ret) 46 if (!ret) {
47 total_profile_count++;
50 return 0; 48 return 0;
49 }
51 50
52 kfree(trace_profile_buf_nmi);
53fail_buf_nmi: 51fail_buf_nmi:
54 kfree(trace_profile_buf); 52 if (!total_profile_count) {
53 free_percpu(perf_trace_buf_nmi);
54 free_percpu(perf_trace_buf);
55 perf_trace_buf_nmi = NULL;
56 perf_trace_buf = NULL;
57 }
55fail_buf: 58fail_buf:
56 total_profile_count--;
57 atomic_dec(&event->profile_count); 59 atomic_dec(&event->profile_count);
58 60
59 return ret; 61 return ret;
@@ -84,14 +86,14 @@ static void ftrace_profile_disable_event(struct ftrace_event_call *event)
84 if (!atomic_add_negative(-1, &event->profile_count)) 86 if (!atomic_add_negative(-1, &event->profile_count))
85 return; 87 return;
86 88
87 event->profile_disable(); 89 event->profile_disable(event);
88 90
89 if (!--total_profile_count) { 91 if (!--total_profile_count) {
90 buf = trace_profile_buf; 92 buf = perf_trace_buf;
91 rcu_assign_pointer(trace_profile_buf, NULL); 93 rcu_assign_pointer(perf_trace_buf, NULL);
92 94
93 nmi_buf = trace_profile_buf_nmi; 95 nmi_buf = perf_trace_buf_nmi;
94 rcu_assign_pointer(trace_profile_buf_nmi, NULL); 96 rcu_assign_pointer(perf_trace_buf_nmi, NULL);
95 97
96 /* 98 /*
97 * Ensure every events in profiling have finished before 99 * Ensure every events in profiling have finished before
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index d128f65778e6..1d18315dc836 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -93,9 +93,7 @@ int trace_define_common_fields(struct ftrace_event_call *call)
93} 93}
94EXPORT_SYMBOL_GPL(trace_define_common_fields); 94EXPORT_SYMBOL_GPL(trace_define_common_fields);
95 95
96#ifdef CONFIG_MODULES 96void trace_destroy_fields(struct ftrace_event_call *call)
97
98static void trace_destroy_fields(struct ftrace_event_call *call)
99{ 97{
100 struct ftrace_event_field *field, *next; 98 struct ftrace_event_field *field, *next;
101 99
@@ -107,8 +105,6 @@ static void trace_destroy_fields(struct ftrace_event_call *call)
107 } 105 }
108} 106}
109 107
110#endif /* CONFIG_MODULES */
111
112static void ftrace_event_enable_disable(struct ftrace_event_call *call, 108static void ftrace_event_enable_disable(struct ftrace_event_call *call,
113 int enable) 109 int enable)
114{ 110{
@@ -117,14 +113,14 @@ static void ftrace_event_enable_disable(struct ftrace_event_call *call,
117 if (call->enabled) { 113 if (call->enabled) {
118 call->enabled = 0; 114 call->enabled = 0;
119 tracing_stop_cmdline_record(); 115 tracing_stop_cmdline_record();
120 call->unregfunc(call->data); 116 call->unregfunc(call);
121 } 117 }
122 break; 118 break;
123 case 1: 119 case 1:
124 if (!call->enabled) { 120 if (!call->enabled) {
125 call->enabled = 1; 121 call->enabled = 1;
126 tracing_start_cmdline_record(); 122 tracing_start_cmdline_record();
127 call->regfunc(call->data); 123 call->regfunc(call);
128 } 124 }
129 break; 125 break;
130 } 126 }
@@ -507,7 +503,7 @@ extern char *__bad_type_size(void);
507#define FIELD(type, name) \ 503#define FIELD(type, name) \
508 sizeof(type) != sizeof(field.name) ? __bad_type_size() : \ 504 sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
509 #type, "common_" #name, offsetof(typeof(field), name), \ 505 #type, "common_" #name, offsetof(typeof(field), name), \
510 sizeof(field.name) 506 sizeof(field.name), is_signed_type(type)
511 507
512static int trace_write_header(struct trace_seq *s) 508static int trace_write_header(struct trace_seq *s)
513{ 509{
@@ -515,17 +511,17 @@ static int trace_write_header(struct trace_seq *s)
515 511
516 /* struct trace_entry */ 512 /* struct trace_entry */
517 return trace_seq_printf(s, 513 return trace_seq_printf(s,
518 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" 514 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
519 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" 515 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
520 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" 516 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
521 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" 517 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
522 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" 518 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
523 "\n", 519 "\n",
524 FIELD(unsigned short, type), 520 FIELD(unsigned short, type),
525 FIELD(unsigned char, flags), 521 FIELD(unsigned char, flags),
526 FIELD(unsigned char, preempt_count), 522 FIELD(unsigned char, preempt_count),
527 FIELD(int, pid), 523 FIELD(int, pid),
528 FIELD(int, lock_depth)); 524 FIELD(int, lock_depth));
529} 525}
530 526
531static ssize_t 527static ssize_t
@@ -878,9 +874,9 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
878 "'%s/filter' entry\n", name); 874 "'%s/filter' entry\n", name);
879 } 875 }
880 876
881 entry = trace_create_file("enable", 0644, system->entry, 877 trace_create_file("enable", 0644, system->entry,
882 (void *)system->name, 878 (void *)system->name,
883 &ftrace_system_enable_fops); 879 &ftrace_system_enable_fops);
884 880
885 return system->entry; 881 return system->entry;
886} 882}
@@ -892,7 +888,6 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
892 const struct file_operations *filter, 888 const struct file_operations *filter,
893 const struct file_operations *format) 889 const struct file_operations *format)
894{ 890{
895 struct dentry *entry;
896 int ret; 891 int ret;
897 892
898 /* 893 /*
@@ -910,12 +905,12 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
910 } 905 }
911 906
912 if (call->regfunc) 907 if (call->regfunc)
913 entry = trace_create_file("enable", 0644, call->dir, call, 908 trace_create_file("enable", 0644, call->dir, call,
914 enable); 909 enable);
915 910
916 if (call->id && call->profile_enable) 911 if (call->id && call->profile_enable)
917 entry = trace_create_file("id", 0444, call->dir, call, 912 trace_create_file("id", 0444, call->dir, call,
918 id); 913 id);
919 914
920 if (call->define_fields) { 915 if (call->define_fields) {
921 ret = call->define_fields(call); 916 ret = call->define_fields(call);
@@ -924,41 +919,60 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
924 " events/%s\n", call->name); 919 " events/%s\n", call->name);
925 return ret; 920 return ret;
926 } 921 }
927 entry = trace_create_file("filter", 0644, call->dir, call, 922 trace_create_file("filter", 0644, call->dir, call,
928 filter); 923 filter);
929 } 924 }
930 925
931 /* A trace may not want to export its format */ 926 /* A trace may not want to export its format */
932 if (!call->show_format) 927 if (!call->show_format)
933 return 0; 928 return 0;
934 929
935 entry = trace_create_file("format", 0444, call->dir, call, 930 trace_create_file("format", 0444, call->dir, call,
936 format); 931 format);
937 932
938 return 0; 933 return 0;
939} 934}
940 935
941#define for_each_event(event, start, end) \ 936static int __trace_add_event_call(struct ftrace_event_call *call)
942 for (event = start; \ 937{
943 (unsigned long)event < (unsigned long)end; \ 938 struct dentry *d_events;
944 event++) 939 int ret;
945 940
946#ifdef CONFIG_MODULES 941 if (!call->name)
942 return -EINVAL;
947 943
948static LIST_HEAD(ftrace_module_file_list); 944 if (call->raw_init) {
945 ret = call->raw_init(call);
946 if (ret < 0) {
947 if (ret != -ENOSYS)
948 pr_warning("Could not initialize trace "
949 "events/%s\n", call->name);
950 return ret;
951 }
952 }
949 953
950/* 954 d_events = event_trace_events_dir();
951 * Modules must own their file_operations to keep up with 955 if (!d_events)
952 * reference counting. 956 return -ENOENT;
953 */ 957
954struct ftrace_module_file_ops { 958 ret = event_create_dir(call, d_events, &ftrace_event_id_fops,
955 struct list_head list; 959 &ftrace_enable_fops, &ftrace_event_filter_fops,
956 struct module *mod; 960 &ftrace_event_format_fops);
957 struct file_operations id; 961 if (!ret)
958 struct file_operations enable; 962 list_add(&call->list, &ftrace_events);
959 struct file_operations format; 963
960 struct file_operations filter; 964 return ret;
961}; 965}
966
967/* Add an additional event_call dynamically */
968int trace_add_event_call(struct ftrace_event_call *call)
969{
970 int ret;
971 mutex_lock(&event_mutex);
972 ret = __trace_add_event_call(call);
973 mutex_unlock(&event_mutex);
974 return ret;
975}
962 976
963static void remove_subsystem_dir(const char *name) 977static void remove_subsystem_dir(const char *name)
964{ 978{
@@ -986,6 +1000,53 @@ static void remove_subsystem_dir(const char *name)
986 } 1000 }
987} 1001}
988 1002
1003/*
1004 * Must be called under locking both of event_mutex and trace_event_mutex.
1005 */
1006static void __trace_remove_event_call(struct ftrace_event_call *call)
1007{
1008 ftrace_event_enable_disable(call, 0);
1009 if (call->event)
1010 __unregister_ftrace_event(call->event);
1011 debugfs_remove_recursive(call->dir);
1012 list_del(&call->list);
1013 trace_destroy_fields(call);
1014 destroy_preds(call);
1015 remove_subsystem_dir(call->system);
1016}
1017
1018/* Remove an event_call */
1019void trace_remove_event_call(struct ftrace_event_call *call)
1020{
1021 mutex_lock(&event_mutex);
1022 down_write(&trace_event_mutex);
1023 __trace_remove_event_call(call);
1024 up_write(&trace_event_mutex);
1025 mutex_unlock(&event_mutex);
1026}
1027
1028#define for_each_event(event, start, end) \
1029 for (event = start; \
1030 (unsigned long)event < (unsigned long)end; \
1031 event++)
1032
1033#ifdef CONFIG_MODULES
1034
1035static LIST_HEAD(ftrace_module_file_list);
1036
1037/*
1038 * Modules must own their file_operations to keep up with
1039 * reference counting.
1040 */
1041struct ftrace_module_file_ops {
1042 struct list_head list;
1043 struct module *mod;
1044 struct file_operations id;
1045 struct file_operations enable;
1046 struct file_operations format;
1047 struct file_operations filter;
1048};
1049
989static struct ftrace_module_file_ops * 1050static struct ftrace_module_file_ops *
990trace_create_file_ops(struct module *mod) 1051trace_create_file_ops(struct module *mod)
991{ 1052{
@@ -1043,7 +1104,7 @@ static void trace_module_add_events(struct module *mod)
1043 if (!call->name) 1104 if (!call->name)
1044 continue; 1105 continue;
1045 if (call->raw_init) { 1106 if (call->raw_init) {
1046 ret = call->raw_init(); 1107 ret = call->raw_init(call);
1047 if (ret < 0) { 1108 if (ret < 0) {
1048 if (ret != -ENOSYS) 1109 if (ret != -ENOSYS)
1049 pr_warning("Could not initialize trace " 1110 pr_warning("Could not initialize trace "
@@ -1061,10 +1122,11 @@ static void trace_module_add_events(struct module *mod)
1061 return; 1122 return;
1062 } 1123 }
1063 call->mod = mod; 1124 call->mod = mod;
1064 list_add(&call->list, &ftrace_events); 1125 ret = event_create_dir(call, d_events,
1065 event_create_dir(call, d_events, 1126 &file_ops->id, &file_ops->enable,
1066 &file_ops->id, &file_ops->enable, 1127 &file_ops->filter, &file_ops->format);
1067 &file_ops->filter, &file_ops->format); 1128 if (!ret)
1129 list_add(&call->list, &ftrace_events);
1068 } 1130 }
1069} 1131}
1070 1132
@@ -1078,14 +1140,7 @@ static void trace_module_remove_events(struct module *mod)
1078 list_for_each_entry_safe(call, p, &ftrace_events, list) { 1140 list_for_each_entry_safe(call, p, &ftrace_events, list) {
1079 if (call->mod == mod) { 1141 if (call->mod == mod) {
1080 found = true; 1142 found = true;
1081 ftrace_event_enable_disable(call, 0); 1143 __trace_remove_event_call(call);
1082 if (call->event)
1083 __unregister_ftrace_event(call->event);
1084 debugfs_remove_recursive(call->dir);
1085 list_del(&call->list);
1086 trace_destroy_fields(call);
1087 destroy_preds(call);
1088 remove_subsystem_dir(call->system);
1089 } 1144 }
1090 } 1145 }
1091 1146
@@ -1203,7 +1258,7 @@ static __init int event_trace_init(void)
1203 if (!call->name) 1258 if (!call->name)
1204 continue; 1259 continue;
1205 if (call->raw_init) { 1260 if (call->raw_init) {
1206 ret = call->raw_init(); 1261 ret = call->raw_init(call);
1207 if (ret < 0) { 1262 if (ret < 0) {
1208 if (ret != -ENOSYS) 1263 if (ret != -ENOSYS)
1209 pr_warning("Could not initialize trace " 1264 pr_warning("Could not initialize trace "
@@ -1211,10 +1266,12 @@ static __init int event_trace_init(void)
1211 continue; 1266 continue;
1212 } 1267 }
1213 } 1268 }
1214 list_add(&call->list, &ftrace_events); 1269 ret = event_create_dir(call, d_events, &ftrace_event_id_fops,
1215 event_create_dir(call, d_events, &ftrace_event_id_fops, 1270 &ftrace_enable_fops,
1216 &ftrace_enable_fops, &ftrace_event_filter_fops, 1271 &ftrace_event_filter_fops,
1217 &ftrace_event_format_fops); 1272 &ftrace_event_format_fops);
1273 if (!ret)
1274 list_add(&call->list, &ftrace_events);
1218 } 1275 }
1219 1276
1220 while (true) { 1277 while (true) {
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 23245785927f..50504cb228de 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -18,11 +18,10 @@
18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> 18 * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com>
19 */ 19 */
20 20
21#include <linux/debugfs.h>
22#include <linux/uaccess.h>
23#include <linux/module.h> 21#include <linux/module.h>
24#include <linux/ctype.h> 22#include <linux/ctype.h>
25#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/perf_event.h>
26 25
27#include "trace.h" 26#include "trace.h"
28#include "trace_output.h" 27#include "trace_output.h"
@@ -31,6 +30,7 @@ enum filter_op_ids
31{ 30{
32 OP_OR, 31 OP_OR,
33 OP_AND, 32 OP_AND,
33 OP_GLOB,
34 OP_NE, 34 OP_NE,
35 OP_EQ, 35 OP_EQ,
36 OP_LT, 36 OP_LT,
@@ -48,16 +48,17 @@ struct filter_op {
48}; 48};
49 49
50static struct filter_op filter_ops[] = { 50static struct filter_op filter_ops[] = {
51 { OP_OR, "||", 1 }, 51 { OP_OR, "||", 1 },
52 { OP_AND, "&&", 2 }, 52 { OP_AND, "&&", 2 },
53 { OP_NE, "!=", 4 }, 53 { OP_GLOB, "~", 4 },
54 { OP_EQ, "==", 4 }, 54 { OP_NE, "!=", 4 },
55 { OP_LT, "<", 5 }, 55 { OP_EQ, "==", 4 },
56 { OP_LE, "<=", 5 }, 56 { OP_LT, "<", 5 },
57 { OP_GT, ">", 5 }, 57 { OP_LE, "<=", 5 },
58 { OP_GE, ">=", 5 }, 58 { OP_GT, ">", 5 },
59 { OP_NONE, "OP_NONE", 0 }, 59 { OP_GE, ">=", 5 },
60 { OP_OPEN_PAREN, "(", 0 }, 60 { OP_NONE, "OP_NONE", 0 },
61 { OP_OPEN_PAREN, "(", 0 },
61}; 62};
62 63
63enum { 64enum {
@@ -197,9 +198,9 @@ static int filter_pred_string(struct filter_pred *pred, void *event,
197 char *addr = (char *)(event + pred->offset); 198 char *addr = (char *)(event + pred->offset);
198 int cmp, match; 199 int cmp, match;
199 200
200 cmp = strncmp(addr, pred->str_val, pred->str_len); 201 cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
201 202
202 match = (!cmp) ^ pred->not; 203 match = cmp ^ pred->not;
203 204
204 return match; 205 return match;
205} 206}
@@ -211,9 +212,9 @@ static int filter_pred_pchar(struct filter_pred *pred, void *event,
211 char **addr = (char **)(event + pred->offset); 212 char **addr = (char **)(event + pred->offset);
212 int cmp, match; 213 int cmp, match;
213 214
214 cmp = strncmp(*addr, pred->str_val, pred->str_len); 215 cmp = pred->regex.match(*addr, &pred->regex, pred->regex.field_len);
215 216
216 match = (!cmp) ^ pred->not; 217 match = cmp ^ pred->not;
217 218
218 return match; 219 return match;
219} 220}
@@ -237,9 +238,9 @@ static int filter_pred_strloc(struct filter_pred *pred, void *event,
237 char *addr = (char *)(event + str_loc); 238 char *addr = (char *)(event + str_loc);
238 int cmp, match; 239 int cmp, match;
239 240
240 cmp = strncmp(addr, pred->str_val, str_len); 241 cmp = pred->regex.match(addr, &pred->regex, str_len);
241 242
242 match = (!cmp) ^ pred->not; 243 match = cmp ^ pred->not;
243 244
244 return match; 245 return match;
245} 246}
@@ -250,10 +251,121 @@ static int filter_pred_none(struct filter_pred *pred, void *event,
250 return 0; 251 return 0;
251} 252}
252 253
254/* Basic regex callbacks */
255static int regex_match_full(char *str, struct regex *r, int len)
256{
257 if (strncmp(str, r->pattern, len) == 0)
258 return 1;
259 return 0;
260}
261
262static int regex_match_front(char *str, struct regex *r, int len)
263{
264 if (strncmp(str, r->pattern, len) == 0)
265 return 1;
266 return 0;
267}
268
269static int regex_match_middle(char *str, struct regex *r, int len)
270{
271 if (strstr(str, r->pattern))
272 return 1;
273 return 0;
274}
275
276static int regex_match_end(char *str, struct regex *r, int len)
277{
278 char *ptr = strstr(str, r->pattern);
279
280 if (ptr && (ptr[r->len] == 0))
281 return 1;
282 return 0;
283}
284
285/**
286 * filter_parse_regex - parse a basic regex
287 * @buff: the raw regex
288 * @len: length of the regex
289 * @search: will point to the beginning of the string to compare
290 * @not: tell whether the match will have to be inverted
291 *
292 * This passes in a buffer containing a regex and this function will
293 * set search to point to the search part of the buffer and
294 * return the type of search it is (see enum above).
295 * This does modify buff.
296 *
297 * Returns enum type.
298 * search returns the pointer to use for comparison.
299 * not returns 1 if buff started with a '!'
300 * 0 otherwise.
301 */
302enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
303{
304 int type = MATCH_FULL;
305 int i;
306
307 if (buff[0] == '!') {
308 *not = 1;
309 buff++;
310 len--;
311 } else
312 *not = 0;
313
314 *search = buff;
315
316 for (i = 0; i < len; i++) {
317 if (buff[i] == '*') {
318 if (!i) {
319 *search = buff + 1;
320 type = MATCH_END_ONLY;
321 } else {
322 if (type == MATCH_END_ONLY)
323 type = MATCH_MIDDLE_ONLY;
324 else
325 type = MATCH_FRONT_ONLY;
326 buff[i] = 0;
327 break;
328 }
329 }
330 }
331
332 return type;
333}
334
335static void filter_build_regex(struct filter_pred *pred)
336{
337 struct regex *r = &pred->regex;
338 char *search;
339 enum regex_type type = MATCH_FULL;
340 int not = 0;
341
342 if (pred->op == OP_GLOB) {
343 type = filter_parse_regex(r->pattern, r->len, &search, &not);
344 r->len = strlen(search);
345 memmove(r->pattern, search, r->len+1);
346 }
347
348 switch (type) {
349 case MATCH_FULL:
350 r->match = regex_match_full;
351 break;
352 case MATCH_FRONT_ONLY:
353 r->match = regex_match_front;
354 break;
355 case MATCH_MIDDLE_ONLY:
356 r->match = regex_match_middle;
357 break;
358 case MATCH_END_ONLY:
359 r->match = regex_match_end;
360 break;
361 }
362
363 pred->not ^= not;
364}
365
253/* return 1 if event matches, 0 otherwise (discard) */ 366/* return 1 if event matches, 0 otherwise (discard) */
254int filter_match_preds(struct ftrace_event_call *call, void *rec) 367int filter_match_preds(struct event_filter *filter, void *rec)
255{ 368{
256 struct event_filter *filter = call->filter;
257 int match, top = 0, val1 = 0, val2 = 0; 369 int match, top = 0, val1 = 0, val2 = 0;
258 int stack[MAX_FILTER_PRED]; 370 int stack[MAX_FILTER_PRED];
259 struct filter_pred *pred; 371 struct filter_pred *pred;
@@ -396,7 +508,7 @@ static void filter_clear_pred(struct filter_pred *pred)
396{ 508{
397 kfree(pred->field_name); 509 kfree(pred->field_name);
398 pred->field_name = NULL; 510 pred->field_name = NULL;
399 pred->str_len = 0; 511 pred->regex.len = 0;
400} 512}
401 513
402static int filter_set_pred(struct filter_pred *dest, 514static int filter_set_pred(struct filter_pred *dest,
@@ -426,9 +538,8 @@ static void filter_disable_preds(struct ftrace_event_call *call)
426 filter->preds[i]->fn = filter_pred_none; 538 filter->preds[i]->fn = filter_pred_none;
427} 539}
428 540
429void destroy_preds(struct ftrace_event_call *call) 541static void __free_preds(struct event_filter *filter)
430{ 542{
431 struct event_filter *filter = call->filter;
432 int i; 543 int i;
433 544
434 if (!filter) 545 if (!filter)
@@ -441,21 +552,24 @@ void destroy_preds(struct ftrace_event_call *call)
441 kfree(filter->preds); 552 kfree(filter->preds);
442 kfree(filter->filter_string); 553 kfree(filter->filter_string);
443 kfree(filter); 554 kfree(filter);
555}
556
557void destroy_preds(struct ftrace_event_call *call)
558{
559 __free_preds(call->filter);
444 call->filter = NULL; 560 call->filter = NULL;
561 call->filter_active = 0;
445} 562}
446 563
447static int init_preds(struct ftrace_event_call *call) 564static struct event_filter *__alloc_preds(void)
448{ 565{
449 struct event_filter *filter; 566 struct event_filter *filter;
450 struct filter_pred *pred; 567 struct filter_pred *pred;
451 int i; 568 int i;
452 569
453 if (call->filter) 570 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
454 return 0; 571 if (!filter)
455 572 return ERR_PTR(-ENOMEM);
456 filter = call->filter = kzalloc(sizeof(*filter), GFP_KERNEL);
457 if (!call->filter)
458 return -ENOMEM;
459 573
460 filter->n_preds = 0; 574 filter->n_preds = 0;
461 575
@@ -471,12 +585,24 @@ static int init_preds(struct ftrace_event_call *call)
471 filter->preds[i] = pred; 585 filter->preds[i] = pred;
472 } 586 }
473 587
474 return 0; 588 return filter;
475 589
476oom: 590oom:
477 destroy_preds(call); 591 __free_preds(filter);
592 return ERR_PTR(-ENOMEM);
593}
594
595static int init_preds(struct ftrace_event_call *call)
596{
597 if (call->filter)
598 return 0;
599
600 call->filter_active = 0;
601 call->filter = __alloc_preds();
602 if (IS_ERR(call->filter))
603 return PTR_ERR(call->filter);
478 604
479 return -ENOMEM; 605 return 0;
480} 606}
481 607
482static int init_subsystem_preds(struct event_subsystem *system) 608static int init_subsystem_preds(struct event_subsystem *system)
@@ -499,14 +625,7 @@ static int init_subsystem_preds(struct event_subsystem *system)
499 return 0; 625 return 0;
500} 626}
501 627
502enum { 628static void filter_free_subsystem_preds(struct event_subsystem *system)
503 FILTER_DISABLE_ALL,
504 FILTER_INIT_NO_RESET,
505 FILTER_SKIP_NO_RESET,
506};
507
508static void filter_free_subsystem_preds(struct event_subsystem *system,
509 int flag)
510{ 629{
511 struct ftrace_event_call *call; 630 struct ftrace_event_call *call;
512 631
@@ -517,14 +636,6 @@ static void filter_free_subsystem_preds(struct event_subsystem *system,
517 if (strcmp(call->system, system->name) != 0) 636 if (strcmp(call->system, system->name) != 0)
518 continue; 637 continue;
519 638
520 if (flag == FILTER_INIT_NO_RESET) {
521 call->filter->no_reset = false;
522 continue;
523 }
524
525 if (flag == FILTER_SKIP_NO_RESET && call->filter->no_reset)
526 continue;
527
528 filter_disable_preds(call); 639 filter_disable_preds(call);
529 remove_filter_string(call->filter); 640 remove_filter_string(call->filter);
530 } 641 }
@@ -532,10 +643,10 @@ static void filter_free_subsystem_preds(struct event_subsystem *system,
532 643
533static int filter_add_pred_fn(struct filter_parse_state *ps, 644static int filter_add_pred_fn(struct filter_parse_state *ps,
534 struct ftrace_event_call *call, 645 struct ftrace_event_call *call,
646 struct event_filter *filter,
535 struct filter_pred *pred, 647 struct filter_pred *pred,
536 filter_pred_fn_t fn) 648 filter_pred_fn_t fn)
537{ 649{
538 struct event_filter *filter = call->filter;
539 int idx, err; 650 int idx, err;
540 651
541 if (filter->n_preds == MAX_FILTER_PRED) { 652 if (filter->n_preds == MAX_FILTER_PRED) {
@@ -550,7 +661,6 @@ static int filter_add_pred_fn(struct filter_parse_state *ps,
550 return err; 661 return err;
551 662
552 filter->n_preds++; 663 filter->n_preds++;
553 call->filter_active = 1;
554 664
555 return 0; 665 return 0;
556} 666}
@@ -575,7 +685,10 @@ static bool is_string_field(struct ftrace_event_field *field)
575 685
576static int is_legal_op(struct ftrace_event_field *field, int op) 686static int is_legal_op(struct ftrace_event_field *field, int op)
577{ 687{
578 if (is_string_field(field) && (op != OP_EQ && op != OP_NE)) 688 if (is_string_field(field) &&
689 (op != OP_EQ && op != OP_NE && op != OP_GLOB))
690 return 0;
691 if (!is_string_field(field) && op == OP_GLOB)
579 return 0; 692 return 0;
580 693
581 return 1; 694 return 1;
@@ -626,6 +739,7 @@ static filter_pred_fn_t select_comparison_fn(int op, int field_size,
626 739
627static int filter_add_pred(struct filter_parse_state *ps, 740static int filter_add_pred(struct filter_parse_state *ps,
628 struct ftrace_event_call *call, 741 struct ftrace_event_call *call,
742 struct event_filter *filter,
629 struct filter_pred *pred, 743 struct filter_pred *pred,
630 bool dry_run) 744 bool dry_run)
631{ 745{
@@ -660,21 +774,22 @@ static int filter_add_pred(struct filter_parse_state *ps,
660 } 774 }
661 775
662 if (is_string_field(field)) { 776 if (is_string_field(field)) {
663 pred->str_len = field->size; 777 filter_build_regex(pred);
664 778
665 if (field->filter_type == FILTER_STATIC_STRING) 779 if (field->filter_type == FILTER_STATIC_STRING) {
666 fn = filter_pred_string; 780 fn = filter_pred_string;
667 else if (field->filter_type == FILTER_DYN_STRING) 781 pred->regex.field_len = field->size;
782 } else if (field->filter_type == FILTER_DYN_STRING)
668 fn = filter_pred_strloc; 783 fn = filter_pred_strloc;
669 else { 784 else {
670 fn = filter_pred_pchar; 785 fn = filter_pred_pchar;
671 pred->str_len = strlen(pred->str_val); 786 pred->regex.field_len = strlen(pred->regex.pattern);
672 } 787 }
673 } else { 788 } else {
674 if (field->is_signed) 789 if (field->is_signed)
675 ret = strict_strtoll(pred->str_val, 0, &val); 790 ret = strict_strtoll(pred->regex.pattern, 0, &val);
676 else 791 else
677 ret = strict_strtoull(pred->str_val, 0, &val); 792 ret = strict_strtoull(pred->regex.pattern, 0, &val);
678 if (ret) { 793 if (ret) {
679 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0); 794 parse_error(ps, FILT_ERR_ILLEGAL_INTVAL, 0);
680 return -EINVAL; 795 return -EINVAL;
@@ -694,45 +809,7 @@ static int filter_add_pred(struct filter_parse_state *ps,
694 809
695add_pred_fn: 810add_pred_fn:
696 if (!dry_run) 811 if (!dry_run)
697 return filter_add_pred_fn(ps, call, pred, fn); 812 return filter_add_pred_fn(ps, call, filter, pred, fn);
698 return 0;
699}
700
701static int filter_add_subsystem_pred(struct filter_parse_state *ps,
702 struct event_subsystem *system,
703 struct filter_pred *pred,
704 char *filter_string,
705 bool dry_run)
706{
707 struct ftrace_event_call *call;
708 int err = 0;
709 bool fail = true;
710
711 list_for_each_entry(call, &ftrace_events, list) {
712
713 if (!call->define_fields)
714 continue;
715
716 if (strcmp(call->system, system->name))
717 continue;
718
719 if (call->filter->no_reset)
720 continue;
721
722 err = filter_add_pred(ps, call, pred, dry_run);
723 if (err)
724 call->filter->no_reset = true;
725 else
726 fail = false;
727
728 if (!dry_run)
729 replace_filter_string(call->filter, filter_string);
730 }
731
732 if (fail) {
733 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
734 return err;
735 }
736 return 0; 813 return 0;
737} 814}
738 815
@@ -933,8 +1010,9 @@ static void postfix_clear(struct filter_parse_state *ps)
933 1010
934 while (!list_empty(&ps->postfix)) { 1011 while (!list_empty(&ps->postfix)) {
935 elt = list_first_entry(&ps->postfix, struct postfix_elt, list); 1012 elt = list_first_entry(&ps->postfix, struct postfix_elt, list);
936 kfree(elt->operand);
937 list_del(&elt->list); 1013 list_del(&elt->list);
1014 kfree(elt->operand);
1015 kfree(elt);
938 } 1016 }
939} 1017}
940 1018
@@ -1044,8 +1122,8 @@ static struct filter_pred *create_pred(int op, char *operand1, char *operand2)
1044 return NULL; 1122 return NULL;
1045 } 1123 }
1046 1124
1047 strcpy(pred->str_val, operand2); 1125 strcpy(pred->regex.pattern, operand2);
1048 pred->str_len = strlen(operand2); 1126 pred->regex.len = strlen(pred->regex.pattern);
1049 1127
1050 pred->op = op; 1128 pred->op = op;
1051 1129
@@ -1089,8 +1167,8 @@ static int check_preds(struct filter_parse_state *ps)
1089 return 0; 1167 return 0;
1090} 1168}
1091 1169
1092static int replace_preds(struct event_subsystem *system, 1170static int replace_preds(struct ftrace_event_call *call,
1093 struct ftrace_event_call *call, 1171 struct event_filter *filter,
1094 struct filter_parse_state *ps, 1172 struct filter_parse_state *ps,
1095 char *filter_string, 1173 char *filter_string,
1096 bool dry_run) 1174 bool dry_run)
@@ -1137,11 +1215,7 @@ static int replace_preds(struct event_subsystem *system,
1137add_pred: 1215add_pred:
1138 if (!pred) 1216 if (!pred)
1139 return -ENOMEM; 1217 return -ENOMEM;
1140 if (call) 1218 err = filter_add_pred(ps, call, filter, pred, dry_run);
1141 err = filter_add_pred(ps, call, pred, false);
1142 else
1143 err = filter_add_subsystem_pred(ps, system, pred,
1144 filter_string, dry_run);
1145 filter_free_pred(pred); 1219 filter_free_pred(pred);
1146 if (err) 1220 if (err)
1147 return err; 1221 return err;
@@ -1152,10 +1226,50 @@ add_pred:
1152 return 0; 1226 return 0;
1153} 1227}
1154 1228
1155int apply_event_filter(struct ftrace_event_call *call, char *filter_string) 1229static int replace_system_preds(struct event_subsystem *system,
1230 struct filter_parse_state *ps,
1231 char *filter_string)
1156{ 1232{
1233 struct ftrace_event_call *call;
1234 bool fail = true;
1157 int err; 1235 int err;
1158 1236
1237 list_for_each_entry(call, &ftrace_events, list) {
1238 struct event_filter *filter = call->filter;
1239
1240 if (!call->define_fields)
1241 continue;
1242
1243 if (strcmp(call->system, system->name) != 0)
1244 continue;
1245
1246 /* try to see if the filter can be applied */
1247 err = replace_preds(call, filter, ps, filter_string, true);
1248 if (err)
1249 continue;
1250
1251 /* really apply the filter */
1252 filter_disable_preds(call);
1253 err = replace_preds(call, filter, ps, filter_string, false);
1254 if (err)
1255 filter_disable_preds(call);
1256 else {
1257 call->filter_active = 1;
1258 replace_filter_string(filter, filter_string);
1259 }
1260 fail = false;
1261 }
1262
1263 if (fail) {
1264 parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
1265 return -EINVAL;
1266 }
1267 return 0;
1268}
1269
1270int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1271{
1272 int err;
1159 struct filter_parse_state *ps; 1273 struct filter_parse_state *ps;
1160 1274
1161 mutex_lock(&event_mutex); 1275 mutex_lock(&event_mutex);
@@ -1167,8 +1281,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1167 if (!strcmp(strstrip(filter_string), "0")) { 1281 if (!strcmp(strstrip(filter_string), "0")) {
1168 filter_disable_preds(call); 1282 filter_disable_preds(call);
1169 remove_filter_string(call->filter); 1283 remove_filter_string(call->filter);
1170 mutex_unlock(&event_mutex); 1284 goto out_unlock;
1171 return 0;
1172 } 1285 }
1173 1286
1174 err = -ENOMEM; 1287 err = -ENOMEM;
@@ -1186,10 +1299,11 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
1186 goto out; 1299 goto out;
1187 } 1300 }
1188 1301
1189 err = replace_preds(NULL, call, ps, filter_string, false); 1302 err = replace_preds(call, call->filter, ps, filter_string, false);
1190 if (err) 1303 if (err)
1191 append_filter_err(ps, call->filter); 1304 append_filter_err(ps, call->filter);
1192 1305 else
1306 call->filter_active = 1;
1193out: 1307out:
1194 filter_opstack_clear(ps); 1308 filter_opstack_clear(ps);
1195 postfix_clear(ps); 1309 postfix_clear(ps);
@@ -1204,7 +1318,6 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
1204 char *filter_string) 1318 char *filter_string)
1205{ 1319{
1206 int err; 1320 int err;
1207
1208 struct filter_parse_state *ps; 1321 struct filter_parse_state *ps;
1209 1322
1210 mutex_lock(&event_mutex); 1323 mutex_lock(&event_mutex);
@@ -1214,10 +1327,9 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
1214 goto out_unlock; 1327 goto out_unlock;
1215 1328
1216 if (!strcmp(strstrip(filter_string), "0")) { 1329 if (!strcmp(strstrip(filter_string), "0")) {
1217 filter_free_subsystem_preds(system, FILTER_DISABLE_ALL); 1330 filter_free_subsystem_preds(system);
1218 remove_filter_string(system->filter); 1331 remove_filter_string(system->filter);
1219 mutex_unlock(&event_mutex); 1332 goto out_unlock;
1220 return 0;
1221 } 1333 }
1222 1334
1223 err = -ENOMEM; 1335 err = -ENOMEM;
@@ -1234,31 +1346,87 @@ int apply_subsystem_event_filter(struct event_subsystem *system,
1234 goto out; 1346 goto out;
1235 } 1347 }
1236 1348
1237 filter_free_subsystem_preds(system, FILTER_INIT_NO_RESET); 1349 err = replace_system_preds(system, ps, filter_string);
1238 1350 if (err)
1239 /* try to see the filter can be applied to which events */
1240 err = replace_preds(system, NULL, ps, filter_string, true);
1241 if (err) {
1242 append_filter_err(ps, system->filter); 1351 append_filter_err(ps, system->filter);
1243 goto out; 1352
1353out:
1354 filter_opstack_clear(ps);
1355 postfix_clear(ps);
1356 kfree(ps);
1357out_unlock:
1358 mutex_unlock(&event_mutex);
1359
1360 return err;
1361}
1362
1363#ifdef CONFIG_EVENT_PROFILE
1364
1365void ftrace_profile_free_filter(struct perf_event *event)
1366{
1367 struct event_filter *filter = event->filter;
1368
1369 event->filter = NULL;
1370 __free_preds(filter);
1371}
1372
1373int ftrace_profile_set_filter(struct perf_event *event, int event_id,
1374 char *filter_str)
1375{
1376 int err;
1377 struct event_filter *filter;
1378 struct filter_parse_state *ps;
1379 struct ftrace_event_call *call = NULL;
1380
1381 mutex_lock(&event_mutex);
1382
1383 list_for_each_entry(call, &ftrace_events, list) {
1384 if (call->id == event_id)
1385 break;
1244 } 1386 }
1245 1387
1246 filter_free_subsystem_preds(system, FILTER_SKIP_NO_RESET); 1388 err = -EINVAL;
1389 if (!call)
1390 goto out_unlock;
1247 1391
1248 /* really apply the filter to the events */ 1392 err = -EEXIST;
1249 err = replace_preds(system, NULL, ps, filter_string, false); 1393 if (event->filter)
1250 if (err) { 1394 goto out_unlock;
1251 append_filter_err(ps, system->filter); 1395
1252 filter_free_subsystem_preds(system, 2); 1396 filter = __alloc_preds();
1397 if (IS_ERR(filter)) {
1398 err = PTR_ERR(filter);
1399 goto out_unlock;
1253 } 1400 }
1254 1401
1255out: 1402 err = -ENOMEM;
1403 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1404 if (!ps)
1405 goto free_preds;
1406
1407 parse_init(ps, filter_ops, filter_str);
1408 err = filter_parse(ps);
1409 if (err)
1410 goto free_ps;
1411
1412 err = replace_preds(call, filter, ps, filter_str, false);
1413 if (!err)
1414 event->filter = filter;
1415
1416free_ps:
1256 filter_opstack_clear(ps); 1417 filter_opstack_clear(ps);
1257 postfix_clear(ps); 1418 postfix_clear(ps);
1258 kfree(ps); 1419 kfree(ps);
1420
1421free_preds:
1422 if (err)
1423 __free_preds(filter);
1424
1259out_unlock: 1425out_unlock:
1260 mutex_unlock(&event_mutex); 1426 mutex_unlock(&event_mutex);
1261 1427
1262 return err; 1428 return err;
1263} 1429}
1264 1430
1431#endif /* CONFIG_EVENT_PROFILE */
1432
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 9753fcc61bc5..dff8c84ddf17 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -48,11 +48,11 @@
48struct ____ftrace_##name { \ 48struct ____ftrace_##name { \
49 tstruct \ 49 tstruct \
50}; \ 50}; \
51static void __used ____ftrace_check_##name(void) \ 51static void __always_unused ____ftrace_check_##name(void) \
52{ \ 52{ \
53 struct ____ftrace_##name *__entry = NULL; \ 53 struct ____ftrace_##name *__entry = NULL; \
54 \ 54 \
55 /* force cmpile-time check on F_printk() */ \ 55 /* force compile-time check on F_printk() */ \
56 printk(print); \ 56 printk(print); \
57} 57}
58 58
@@ -66,44 +66,47 @@ static void __used ____ftrace_check_##name(void) \
66#undef __field 66#undef __field
67#define __field(type, item) \ 67#define __field(type, item) \
68 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ 68 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
69 "offset:%zu;\tsize:%zu;\n", \ 69 "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \
70 offsetof(typeof(field), item), \ 70 offsetof(typeof(field), item), \
71 sizeof(field.item)); \ 71 sizeof(field.item), is_signed_type(type)); \
72 if (!ret) \ 72 if (!ret) \
73 return 0; 73 return 0;
74 74
75#undef __field_desc 75#undef __field_desc
76#define __field_desc(type, container, item) \ 76#define __field_desc(type, container, item) \
77 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ 77 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
78 "offset:%zu;\tsize:%zu;\n", \ 78 "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \
79 offsetof(typeof(field), container.item), \ 79 offsetof(typeof(field), container.item), \
80 sizeof(field.container.item)); \ 80 sizeof(field.container.item), \
81 is_signed_type(type)); \
81 if (!ret) \ 82 if (!ret) \
82 return 0; 83 return 0;
83 84
84#undef __array 85#undef __array
85#define __array(type, item, len) \ 86#define __array(type, item, len) \
86 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ 87 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
87 "offset:%zu;\tsize:%zu;\n", \ 88 "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \
88 offsetof(typeof(field), item), \ 89 offsetof(typeof(field), item), \
89 sizeof(field.item)); \ 90 sizeof(field.item), is_signed_type(type)); \
90 if (!ret) \ 91 if (!ret) \
91 return 0; 92 return 0;
92 93
93#undef __array_desc 94#undef __array_desc
94#define __array_desc(type, container, item, len) \ 95#define __array_desc(type, container, item, len) \
95 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ 96 ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
96 "offset:%zu;\tsize:%zu;\n", \ 97 "offset:%zu;\tsize:%zu;\tsigned:%u;\n", \
97 offsetof(typeof(field), container.item), \ 98 offsetof(typeof(field), container.item), \
98 sizeof(field.container.item)); \ 99 sizeof(field.container.item), \
100 is_signed_type(type)); \
99 if (!ret) \ 101 if (!ret) \
100 return 0; 102 return 0;
101 103
102#undef __dynamic_array 104#undef __dynamic_array
103#define __dynamic_array(type, item) \ 105#define __dynamic_array(type, item) \
104 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ 106 ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
105 "offset:%zu;\tsize:0;\n", \ 107 "offset:%zu;\tsize:0;\tsigned:%u;\n", \
106 offsetof(typeof(field), item)); \ 108 offsetof(typeof(field), item), \
109 is_signed_type(type)); \
107 if (!ret) \ 110 if (!ret) \
108 return 0; 111 return 0;
109 112
@@ -131,7 +134,6 @@ ftrace_format_##name(struct ftrace_event_call *unused, \
131 134
132#include "trace_entries.h" 135#include "trace_entries.h"
133 136
134
135#undef __field 137#undef __field
136#define __field(type, item) \ 138#define __field(type, item) \
137 ret = trace_define_field(event_call, #type, #item, \ 139 ret = trace_define_field(event_call, #type, #item, \
@@ -193,6 +195,11 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \
193 195
194#include "trace_entries.h" 196#include "trace_entries.h"
195 197
198static int ftrace_raw_init_event(struct ftrace_event_call *call)
199{
200 INIT_LIST_HEAD(&call->fields);
201 return 0;
202}
196 203
197#undef __field 204#undef __field
198#define __field(type, item) 205#define __field(type, item)
@@ -211,7 +218,6 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \
211 218
212#undef FTRACE_ENTRY 219#undef FTRACE_ENTRY
213#define FTRACE_ENTRY(call, struct_name, type, tstruct, print) \ 220#define FTRACE_ENTRY(call, struct_name, type, tstruct, print) \
214static int ftrace_raw_init_event_##call(void); \
215 \ 221 \
216struct ftrace_event_call __used \ 222struct ftrace_event_call __used \
217__attribute__((__aligned__(4))) \ 223__attribute__((__aligned__(4))) \
@@ -219,14 +225,9 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
219 .name = #call, \ 225 .name = #call, \
220 .id = type, \ 226 .id = type, \
221 .system = __stringify(TRACE_SYSTEM), \ 227 .system = __stringify(TRACE_SYSTEM), \
222 .raw_init = ftrace_raw_init_event_##call, \ 228 .raw_init = ftrace_raw_init_event, \
223 .show_format = ftrace_format_##call, \ 229 .show_format = ftrace_format_##call, \
224 .define_fields = ftrace_define_fields_##call, \ 230 .define_fields = ftrace_define_fields_##call, \
225}; \ 231}; \
226static int ftrace_raw_init_event_##call(void) \
227{ \
228 INIT_LIST_HEAD(&event_##call.fields); \
229 return 0; \
230} \
231 232
232#include "trace_entries.h" 233#include "trace_entries.h"
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index 23b63859130e..69543a905cd5 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -165,6 +165,7 @@ void trace_hw_branch(u64 from, u64 to)
165 struct ftrace_event_call *call = &event_hw_branch; 165 struct ftrace_event_call *call = &event_hw_branch;
166 struct trace_array *tr = hw_branch_trace; 166 struct trace_array *tr = hw_branch_trace;
167 struct ring_buffer_event *event; 167 struct ring_buffer_event *event;
168 struct ring_buffer *buf;
168 struct hw_branch_entry *entry; 169 struct hw_branch_entry *entry;
169 unsigned long irq1; 170 unsigned long irq1;
170 int cpu; 171 int cpu;
@@ -180,7 +181,8 @@ void trace_hw_branch(u64 from, u64 to)
180 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) 181 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
181 goto out; 182 goto out;
182 183
183 event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES, 184 buf = tr->buffer;
185 event = trace_buffer_lock_reserve(buf, TRACE_HW_BRANCHES,
184 sizeof(*entry), 0, 0); 186 sizeof(*entry), 0, 0);
185 if (!event) 187 if (!event)
186 goto out; 188 goto out;
@@ -189,8 +191,8 @@ void trace_hw_branch(u64 from, u64 to)
189 entry->ent.type = TRACE_HW_BRANCHES; 191 entry->ent.type = TRACE_HW_BRANCHES;
190 entry->from = from; 192 entry->from = from;
191 entry->to = to; 193 entry->to = to;
192 if (!filter_check_discard(call, entry, tr->buffer, event)) 194 if (!filter_check_discard(call, entry, buf, event))
193 trace_buffer_unlock_commit(tr, event, 0, 0); 195 trace_buffer_unlock_commit(buf, event, 0, 0);
194 196
195 out: 197 out:
196 atomic_dec(&tr->data[cpu]->disabled); 198 atomic_dec(&tr->data[cpu]->disabled);
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
new file mode 100644
index 000000000000..aff5f80b59b8
--- /dev/null
+++ b/kernel/trace/trace_kprobe.c
@@ -0,0 +1,1523 @@
1/*
2 * Kprobes-based tracing events
3 *
4 * Created by Masami Hiramatsu <mhiramat@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/module.h>
21#include <linux/uaccess.h>
22#include <linux/kprobes.h>
23#include <linux/seq_file.h>
24#include <linux/slab.h>
25#include <linux/smp.h>
26#include <linux/debugfs.h>
27#include <linux/types.h>
28#include <linux/string.h>
29#include <linux/ctype.h>
30#include <linux/ptrace.h>
31#include <linux/perf_event.h>
32
33#include "trace.h"
34#include "trace_output.h"
35
36#define MAX_TRACE_ARGS 128
37#define MAX_ARGSTR_LEN 63
38#define MAX_EVENT_NAME_LEN 64
39#define KPROBE_EVENT_SYSTEM "kprobes"
40
41/* Reserved field names */
42#define FIELD_STRING_IP "__probe_ip"
43#define FIELD_STRING_NARGS "__probe_nargs"
44#define FIELD_STRING_RETIP "__probe_ret_ip"
45#define FIELD_STRING_FUNC "__probe_func"
46
47const char *reserved_field_names[] = {
48 "common_type",
49 "common_flags",
50 "common_preempt_count",
51 "common_pid",
52 "common_tgid",
53 "common_lock_depth",
54 FIELD_STRING_IP,
55 FIELD_STRING_NARGS,
56 FIELD_STRING_RETIP,
57 FIELD_STRING_FUNC,
58};
59
60struct fetch_func {
61 unsigned long (*func)(struct pt_regs *, void *);
62 void *data;
63};
64
65static __kprobes unsigned long call_fetch(struct fetch_func *f,
66 struct pt_regs *regs)
67{
68 return f->func(regs, f->data);
69}
70
71/* fetch handlers */
72static __kprobes unsigned long fetch_register(struct pt_regs *regs,
73 void *offset)
74{
75 return regs_get_register(regs, (unsigned int)((unsigned long)offset));
76}
77
78static __kprobes unsigned long fetch_stack(struct pt_regs *regs,
79 void *num)
80{
81 return regs_get_kernel_stack_nth(regs,
82 (unsigned int)((unsigned long)num));
83}
84
85static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr)
86{
87 unsigned long retval;
88
89 if (probe_kernel_address(addr, retval))
90 return 0;
91 return retval;
92}
93
94static __kprobes unsigned long fetch_argument(struct pt_regs *regs, void *num)
95{
96 return regs_get_argument_nth(regs, (unsigned int)((unsigned long)num));
97}
98
99static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs,
100 void *dummy)
101{
102 return regs_return_value(regs);
103}
104
105static __kprobes unsigned long fetch_stack_address(struct pt_regs *regs,
106 void *dummy)
107{
108 return kernel_stack_pointer(regs);
109}
110
111/* Memory fetching by symbol */
112struct symbol_cache {
113 char *symbol;
114 long offset;
115 unsigned long addr;
116};
117
118static unsigned long update_symbol_cache(struct symbol_cache *sc)
119{
120 sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol);
121 if (sc->addr)
122 sc->addr += sc->offset;
123 return sc->addr;
124}
125
126static void free_symbol_cache(struct symbol_cache *sc)
127{
128 kfree(sc->symbol);
129 kfree(sc);
130}
131
132static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset)
133{
134 struct symbol_cache *sc;
135
136 if (!sym || strlen(sym) == 0)
137 return NULL;
138 sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL);
139 if (!sc)
140 return NULL;
141
142 sc->symbol = kstrdup(sym, GFP_KERNEL);
143 if (!sc->symbol) {
144 kfree(sc);
145 return NULL;
146 }
147 sc->offset = offset;
148
149 update_symbol_cache(sc);
150 return sc;
151}
152
153static __kprobes unsigned long fetch_symbol(struct pt_regs *regs, void *data)
154{
155 struct symbol_cache *sc = data;
156
157 if (sc->addr)
158 return fetch_memory(regs, (void *)sc->addr);
159 else
160 return 0;
161}
162
163/* Special indirect memory access interface */
164struct indirect_fetch_data {
165 struct fetch_func orig;
166 long offset;
167};
168
169static __kprobes unsigned long fetch_indirect(struct pt_regs *regs, void *data)
170{
171 struct indirect_fetch_data *ind = data;
172 unsigned long addr;
173
174 addr = call_fetch(&ind->orig, regs);
175 if (addr) {
176 addr += ind->offset;
177 return fetch_memory(regs, (void *)addr);
178 } else
179 return 0;
180}
181
182static __kprobes void free_indirect_fetch_data(struct indirect_fetch_data *data)
183{
184 if (data->orig.func == fetch_indirect)
185 free_indirect_fetch_data(data->orig.data);
186 else if (data->orig.func == fetch_symbol)
187 free_symbol_cache(data->orig.data);
188 kfree(data);
189}
190
191/**
192 * Kprobe event core functions
193 */
194
195struct probe_arg {
196 struct fetch_func fetch;
197 const char *name;
198};
199
200/* Flags for trace_probe */
201#define TP_FLAG_TRACE 1
202#define TP_FLAG_PROFILE 2
203
204struct trace_probe {
205 struct list_head list;
206 struct kretprobe rp; /* Use rp.kp for kprobe use */
207 unsigned long nhit;
208 unsigned int flags; /* For TP_FLAG_* */
209 const char *symbol; /* symbol name */
210 struct ftrace_event_call call;
211 struct trace_event event;
212 unsigned int nr_args;
213 struct probe_arg args[];
214};
215
216#define SIZEOF_TRACE_PROBE(n) \
217 (offsetof(struct trace_probe, args) + \
218 (sizeof(struct probe_arg) * (n)))
219
220static __kprobes int probe_is_return(struct trace_probe *tp)
221{
222 return tp->rp.handler != NULL;
223}
224
225static __kprobes const char *probe_symbol(struct trace_probe *tp)
226{
227 return tp->symbol ? tp->symbol : "unknown";
228}
229
230static int probe_arg_string(char *buf, size_t n, struct fetch_func *ff)
231{
232 int ret = -EINVAL;
233
234 if (ff->func == fetch_argument)
235 ret = snprintf(buf, n, "$arg%lu", (unsigned long)ff->data);
236 else if (ff->func == fetch_register) {
237 const char *name;
238 name = regs_query_register_name((unsigned int)((long)ff->data));
239 ret = snprintf(buf, n, "%%%s", name);
240 } else if (ff->func == fetch_stack)
241 ret = snprintf(buf, n, "$stack%lu", (unsigned long)ff->data);
242 else if (ff->func == fetch_memory)
243 ret = snprintf(buf, n, "@0x%p", ff->data);
244 else if (ff->func == fetch_symbol) {
245 struct symbol_cache *sc = ff->data;
246 if (sc->offset)
247 ret = snprintf(buf, n, "@%s%+ld", sc->symbol,
248 sc->offset);
249 else
250 ret = snprintf(buf, n, "@%s", sc->symbol);
251 } else if (ff->func == fetch_retvalue)
252 ret = snprintf(buf, n, "$retval");
253 else if (ff->func == fetch_stack_address)
254 ret = snprintf(buf, n, "$stack");
255 else if (ff->func == fetch_indirect) {
256 struct indirect_fetch_data *id = ff->data;
257 size_t l = 0;
258 ret = snprintf(buf, n, "%+ld(", id->offset);
259 if (ret >= n)
260 goto end;
261 l += ret;
262 ret = probe_arg_string(buf + l, n - l, &id->orig);
263 if (ret < 0)
264 goto end;
265 l += ret;
266 ret = snprintf(buf + l, n - l, ")");
267 ret += l;
268 }
269end:
270 if (ret >= n)
271 return -ENOSPC;
272 return ret;
273}
274
275static int register_probe_event(struct trace_probe *tp);
276static void unregister_probe_event(struct trace_probe *tp);
277
278static DEFINE_MUTEX(probe_lock);
279static LIST_HEAD(probe_list);
280
281static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
282static int kretprobe_dispatcher(struct kretprobe_instance *ri,
283 struct pt_regs *regs);
284
285/*
286 * Allocate new trace_probe and initialize it (including kprobes).
287 */
288static struct trace_probe *alloc_trace_probe(const char *group,
289 const char *event,
290 void *addr,
291 const char *symbol,
292 unsigned long offs,
293 int nargs, int is_return)
294{
295 struct trace_probe *tp;
296
297 tp = kzalloc(SIZEOF_TRACE_PROBE(nargs), GFP_KERNEL);
298 if (!tp)
299 return ERR_PTR(-ENOMEM);
300
301 if (symbol) {
302 tp->symbol = kstrdup(symbol, GFP_KERNEL);
303 if (!tp->symbol)
304 goto error;
305 tp->rp.kp.symbol_name = tp->symbol;
306 tp->rp.kp.offset = offs;
307 } else
308 tp->rp.kp.addr = addr;
309
310 if (is_return)
311 tp->rp.handler = kretprobe_dispatcher;
312 else
313 tp->rp.kp.pre_handler = kprobe_dispatcher;
314
315 if (!event)
316 goto error;
317 tp->call.name = kstrdup(event, GFP_KERNEL);
318 if (!tp->call.name)
319 goto error;
320
321 if (!group)
322 goto error;
323 tp->call.system = kstrdup(group, GFP_KERNEL);
324 if (!tp->call.system)
325 goto error;
326
327 INIT_LIST_HEAD(&tp->list);
328 return tp;
329error:
330 kfree(tp->call.name);
331 kfree(tp->symbol);
332 kfree(tp);
333 return ERR_PTR(-ENOMEM);
334}
335
336static void free_probe_arg(struct probe_arg *arg)
337{
338 if (arg->fetch.func == fetch_symbol)
339 free_symbol_cache(arg->fetch.data);
340 else if (arg->fetch.func == fetch_indirect)
341 free_indirect_fetch_data(arg->fetch.data);
342 kfree(arg->name);
343}
344
345static void free_trace_probe(struct trace_probe *tp)
346{
347 int i;
348
349 for (i = 0; i < tp->nr_args; i++)
350 free_probe_arg(&tp->args[i]);
351
352 kfree(tp->call.system);
353 kfree(tp->call.name);
354 kfree(tp->symbol);
355 kfree(tp);
356}
357
358static struct trace_probe *find_probe_event(const char *event,
359 const char *group)
360{
361 struct trace_probe *tp;
362
363 list_for_each_entry(tp, &probe_list, list)
364 if (strcmp(tp->call.name, event) == 0 &&
365 strcmp(tp->call.system, group) == 0)
366 return tp;
367 return NULL;
368}
369
370/* Unregister a trace_probe and probe_event: call with locking probe_lock */
371static void unregister_trace_probe(struct trace_probe *tp)
372{
373 if (probe_is_return(tp))
374 unregister_kretprobe(&tp->rp);
375 else
376 unregister_kprobe(&tp->rp.kp);
377 list_del(&tp->list);
378 unregister_probe_event(tp);
379}
380
381/* Register a trace_probe and probe_event */
382static int register_trace_probe(struct trace_probe *tp)
383{
384 struct trace_probe *old_tp;
385 int ret;
386
387 mutex_lock(&probe_lock);
388
389 /* register as an event */
390 old_tp = find_probe_event(tp->call.name, tp->call.system);
391 if (old_tp) {
392 /* delete old event */
393 unregister_trace_probe(old_tp);
394 free_trace_probe(old_tp);
395 }
396 ret = register_probe_event(tp);
397 if (ret) {
398 pr_warning("Faild to register probe event(%d)\n", ret);
399 goto end;
400 }
401
402 tp->rp.kp.flags |= KPROBE_FLAG_DISABLED;
403 if (probe_is_return(tp))
404 ret = register_kretprobe(&tp->rp);
405 else
406 ret = register_kprobe(&tp->rp.kp);
407
408 if (ret) {
409 pr_warning("Could not insert probe(%d)\n", ret);
410 if (ret == -EILSEQ) {
411 pr_warning("Probing address(0x%p) is not an "
412 "instruction boundary.\n",
413 tp->rp.kp.addr);
414 ret = -EINVAL;
415 }
416 unregister_probe_event(tp);
417 } else
418 list_add_tail(&tp->list, &probe_list);
419end:
420 mutex_unlock(&probe_lock);
421 return ret;
422}
423
424/* Split symbol and offset. */
425static int split_symbol_offset(char *symbol, unsigned long *offset)
426{
427 char *tmp;
428 int ret;
429
430 if (!offset)
431 return -EINVAL;
432
433 tmp = strchr(symbol, '+');
434 if (tmp) {
435 /* skip sign because strict_strtol doesn't accept '+' */
436 ret = strict_strtoul(tmp + 1, 0, offset);
437 if (ret)
438 return ret;
439 *tmp = '\0';
440 } else
441 *offset = 0;
442 return 0;
443}
444
445#define PARAM_MAX_ARGS 16
446#define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
447
448static int parse_probe_vars(char *arg, struct fetch_func *ff, int is_return)
449{
450 int ret = 0;
451 unsigned long param;
452
453 if (strcmp(arg, "retval") == 0) {
454 if (is_return) {
455 ff->func = fetch_retvalue;
456 ff->data = NULL;
457 } else
458 ret = -EINVAL;
459 } else if (strncmp(arg, "stack", 5) == 0) {
460 if (arg[5] == '\0') {
461 ff->func = fetch_stack_address;
462 ff->data = NULL;
463 } else if (isdigit(arg[5])) {
464 ret = strict_strtoul(arg + 5, 10, &param);
465 if (ret || param > PARAM_MAX_STACK)
466 ret = -EINVAL;
467 else {
468 ff->func = fetch_stack;
469 ff->data = (void *)param;
470 }
471 } else
472 ret = -EINVAL;
473 } else if (strncmp(arg, "arg", 3) == 0 && isdigit(arg[3])) {
474 ret = strict_strtoul(arg + 3, 10, &param);
475 if (ret || param > PARAM_MAX_ARGS)
476 ret = -EINVAL;
477 else {
478 ff->func = fetch_argument;
479 ff->data = (void *)param;
480 }
481 } else
482 ret = -EINVAL;
483 return ret;
484}
485
486/* Recursive argument parser */
487static int __parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
488{
489 int ret = 0;
490 unsigned long param;
491 long offset;
492 char *tmp;
493
494 switch (arg[0]) {
495 case '$':
496 ret = parse_probe_vars(arg + 1, ff, is_return);
497 break;
498 case '%': /* named register */
499 ret = regs_query_register_offset(arg + 1);
500 if (ret >= 0) {
501 ff->func = fetch_register;
502 ff->data = (void *)(unsigned long)ret;
503 ret = 0;
504 }
505 break;
506 case '@': /* memory or symbol */
507 if (isdigit(arg[1])) {
508 ret = strict_strtoul(arg + 1, 0, &param);
509 if (ret)
510 break;
511 ff->func = fetch_memory;
512 ff->data = (void *)param;
513 } else {
514 ret = split_symbol_offset(arg + 1, &offset);
515 if (ret)
516 break;
517 ff->data = alloc_symbol_cache(arg + 1, offset);
518 if (ff->data)
519 ff->func = fetch_symbol;
520 else
521 ret = -EINVAL;
522 }
523 break;
524 case '+': /* indirect memory */
525 case '-':
526 tmp = strchr(arg, '(');
527 if (!tmp) {
528 ret = -EINVAL;
529 break;
530 }
531 *tmp = '\0';
532 ret = strict_strtol(arg + 1, 0, &offset);
533 if (ret)
534 break;
535 if (arg[0] == '-')
536 offset = -offset;
537 arg = tmp + 1;
538 tmp = strrchr(arg, ')');
539 if (tmp) {
540 struct indirect_fetch_data *id;
541 *tmp = '\0';
542 id = kzalloc(sizeof(struct indirect_fetch_data),
543 GFP_KERNEL);
544 if (!id)
545 return -ENOMEM;
546 id->offset = offset;
547 ret = __parse_probe_arg(arg, &id->orig, is_return);
548 if (ret)
549 kfree(id);
550 else {
551 ff->func = fetch_indirect;
552 ff->data = (void *)id;
553 }
554 } else
555 ret = -EINVAL;
556 break;
557 default:
558 /* TODO: support custom handler */
559 ret = -EINVAL;
560 }
561 return ret;
562}
563
564/* String length checking wrapper */
565static int parse_probe_arg(char *arg, struct fetch_func *ff, int is_return)
566{
567 if (strlen(arg) > MAX_ARGSTR_LEN) {
568 pr_info("Argument is too long.: %s\n", arg);
569 return -ENOSPC;
570 }
571 return __parse_probe_arg(arg, ff, is_return);
572}
573
574/* Return 1 if name is reserved or already used by another argument */
575static int conflict_field_name(const char *name,
576 struct probe_arg *args, int narg)
577{
578 int i;
579 for (i = 0; i < ARRAY_SIZE(reserved_field_names); i++)
580 if (strcmp(reserved_field_names[i], name) == 0)
581 return 1;
582 for (i = 0; i < narg; i++)
583 if (strcmp(args[i].name, name) == 0)
584 return 1;
585 return 0;
586}
587
588static int create_trace_probe(int argc, char **argv)
589{
590 /*
591 * Argument syntax:
592 * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS]
593 * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS]
594 * Fetch args:
595 * $argN : fetch Nth of function argument. (N:0-)
596 * $retval : fetch return value
597 * $stack : fetch stack address
598 * $stackN : fetch Nth of stack (N:0-)
599 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
600 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
601 * %REG : fetch register REG
602 * Indirect memory fetch:
603 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
604 * Alias name of args:
605 * NAME=FETCHARG : set NAME as alias of FETCHARG.
606 */
607 struct trace_probe *tp;
608 int i, ret = 0;
609 int is_return = 0;
610 char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL;
611 unsigned long offset = 0;
612 void *addr = NULL;
613 char buf[MAX_EVENT_NAME_LEN];
614
615 if (argc < 2) {
616 pr_info("Probe point is not specified.\n");
617 return -EINVAL;
618 }
619
620 if (argv[0][0] == 'p')
621 is_return = 0;
622 else if (argv[0][0] == 'r')
623 is_return = 1;
624 else {
625 pr_info("Probe definition must be started with 'p' or 'r'.\n");
626 return -EINVAL;
627 }
628
629 if (argv[0][1] == ':') {
630 event = &argv[0][2];
631 if (strchr(event, '/')) {
632 group = event;
633 event = strchr(group, '/') + 1;
634 event[-1] = '\0';
635 if (strlen(group) == 0) {
636 pr_info("Group name is not specifiled\n");
637 return -EINVAL;
638 }
639 }
640 if (strlen(event) == 0) {
641 pr_info("Event name is not specifiled\n");
642 return -EINVAL;
643 }
644 }
645
646 if (isdigit(argv[1][0])) {
647 if (is_return) {
648 pr_info("Return probe point must be a symbol.\n");
649 return -EINVAL;
650 }
651 /* an address specified */
652 ret = strict_strtoul(&argv[0][2], 0, (unsigned long *)&addr);
653 if (ret) {
654 pr_info("Failed to parse address.\n");
655 return ret;
656 }
657 } else {
658 /* a symbol specified */
659 symbol = argv[1];
660 /* TODO: support .init module functions */
661 ret = split_symbol_offset(symbol, &offset);
662 if (ret) {
663 pr_info("Failed to parse symbol.\n");
664 return ret;
665 }
666 if (offset && is_return) {
667 pr_info("Return probe must be used without offset.\n");
668 return -EINVAL;
669 }
670 }
671 argc -= 2; argv += 2;
672
673 /* setup a probe */
674 if (!group)
675 group = KPROBE_EVENT_SYSTEM;
676 if (!event) {
677 /* Make a new event name */
678 if (symbol)
679 snprintf(buf, MAX_EVENT_NAME_LEN, "%c@%s%+ld",
680 is_return ? 'r' : 'p', symbol, offset);
681 else
682 snprintf(buf, MAX_EVENT_NAME_LEN, "%c@0x%p",
683 is_return ? 'r' : 'p', addr);
684 event = buf;
685 }
686 tp = alloc_trace_probe(group, event, addr, symbol, offset, argc,
687 is_return);
688 if (IS_ERR(tp)) {
689 pr_info("Failed to allocate trace_probe.(%d)\n",
690 (int)PTR_ERR(tp));
691 return PTR_ERR(tp);
692 }
693
694 /* parse arguments */
695 ret = 0;
696 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
697 /* Parse argument name */
698 arg = strchr(argv[i], '=');
699 if (arg)
700 *arg++ = '\0';
701 else
702 arg = argv[i];
703
704 if (conflict_field_name(argv[i], tp->args, i)) {
705 pr_info("Argument%d name '%s' conflicts with "
706 "another field.\n", i, argv[i]);
707 ret = -EINVAL;
708 goto error;
709 }
710
711 tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
712 if (!tp->args[i].name) {
713 pr_info("Failed to allocate argument%d name '%s'.\n",
714 i, argv[i]);
715 ret = -ENOMEM;
716 goto error;
717 }
718
719 /* Parse fetch argument */
720 ret = parse_probe_arg(arg, &tp->args[i].fetch, is_return);
721 if (ret) {
722 pr_info("Parse error at argument%d. (%d)\n", i, ret);
723 kfree(tp->args[i].name);
724 goto error;
725 }
726
727 tp->nr_args++;
728 }
729
730 ret = register_trace_probe(tp);
731 if (ret)
732 goto error;
733 return 0;
734
735error:
736 free_trace_probe(tp);
737 return ret;
738}
739
740static void cleanup_all_probes(void)
741{
742 struct trace_probe *tp;
743
744 mutex_lock(&probe_lock);
745 /* TODO: Use batch unregistration */
746 while (!list_empty(&probe_list)) {
747 tp = list_entry(probe_list.next, struct trace_probe, list);
748 unregister_trace_probe(tp);
749 free_trace_probe(tp);
750 }
751 mutex_unlock(&probe_lock);
752}
753
754
755/* Probes listing interfaces */
756static void *probes_seq_start(struct seq_file *m, loff_t *pos)
757{
758 mutex_lock(&probe_lock);
759 return seq_list_start(&probe_list, *pos);
760}
761
762static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos)
763{
764 return seq_list_next(v, &probe_list, pos);
765}
766
767static void probes_seq_stop(struct seq_file *m, void *v)
768{
769 mutex_unlock(&probe_lock);
770}
771
772static int probes_seq_show(struct seq_file *m, void *v)
773{
774 struct trace_probe *tp = v;
775 int i, ret;
776 char buf[MAX_ARGSTR_LEN + 1];
777
778 seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p');
779 seq_printf(m, ":%s/%s", tp->call.system, tp->call.name);
780
781 if (!tp->symbol)
782 seq_printf(m, " 0x%p", tp->rp.kp.addr);
783 else if (tp->rp.kp.offset)
784 seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset);
785 else
786 seq_printf(m, " %s", probe_symbol(tp));
787
788 for (i = 0; i < tp->nr_args; i++) {
789 ret = probe_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i].fetch);
790 if (ret < 0) {
791 pr_warning("Argument%d decoding error(%d).\n", i, ret);
792 return ret;
793 }
794 seq_printf(m, " %s=%s", tp->args[i].name, buf);
795 }
796 seq_printf(m, "\n");
797 return 0;
798}
799
800static const struct seq_operations probes_seq_op = {
801 .start = probes_seq_start,
802 .next = probes_seq_next,
803 .stop = probes_seq_stop,
804 .show = probes_seq_show
805};
806
807static int probes_open(struct inode *inode, struct file *file)
808{
809 if ((file->f_mode & FMODE_WRITE) &&
810 (file->f_flags & O_TRUNC))
811 cleanup_all_probes();
812
813 return seq_open(file, &probes_seq_op);
814}
815
816static int command_trace_probe(const char *buf)
817{
818 char **argv;
819 int argc = 0, ret = 0;
820
821 argv = argv_split(GFP_KERNEL, buf, &argc);
822 if (!argv)
823 return -ENOMEM;
824
825 if (argc)
826 ret = create_trace_probe(argc, argv);
827
828 argv_free(argv);
829 return ret;
830}
831
832#define WRITE_BUFSIZE 128
833
834static ssize_t probes_write(struct file *file, const char __user *buffer,
835 size_t count, loff_t *ppos)
836{
837 char *kbuf, *tmp;
838 int ret;
839 size_t done;
840 size_t size;
841
842 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
843 if (!kbuf)
844 return -ENOMEM;
845
846 ret = done = 0;
847 while (done < count) {
848 size = count - done;
849 if (size >= WRITE_BUFSIZE)
850 size = WRITE_BUFSIZE - 1;
851 if (copy_from_user(kbuf, buffer + done, size)) {
852 ret = -EFAULT;
853 goto out;
854 }
855 kbuf[size] = '\0';
856 tmp = strchr(kbuf, '\n');
857 if (tmp) {
858 *tmp = '\0';
859 size = tmp - kbuf + 1;
860 } else if (done + size < count) {
861 pr_warning("Line length is too long: "
862 "Should be less than %d.", WRITE_BUFSIZE);
863 ret = -EINVAL;
864 goto out;
865 }
866 done += size;
867 /* Remove comments */
868 tmp = strchr(kbuf, '#');
869 if (tmp)
870 *tmp = '\0';
871
872 ret = command_trace_probe(kbuf);
873 if (ret)
874 goto out;
875 }
876 ret = done;
877out:
878 kfree(kbuf);
879 return ret;
880}
881
882static const struct file_operations kprobe_events_ops = {
883 .owner = THIS_MODULE,
884 .open = probes_open,
885 .read = seq_read,
886 .llseek = seq_lseek,
887 .release = seq_release,
888 .write = probes_write,
889};
890
891/* Probes profiling interfaces */
892static int probes_profile_seq_show(struct seq_file *m, void *v)
893{
894 struct trace_probe *tp = v;
895
896 seq_printf(m, " %-44s %15lu %15lu\n", tp->call.name, tp->nhit,
897 tp->rp.kp.nmissed);
898
899 return 0;
900}
901
902static const struct seq_operations profile_seq_op = {
903 .start = probes_seq_start,
904 .next = probes_seq_next,
905 .stop = probes_seq_stop,
906 .show = probes_profile_seq_show
907};
908
909static int profile_open(struct inode *inode, struct file *file)
910{
911 return seq_open(file, &profile_seq_op);
912}
913
914static const struct file_operations kprobe_profile_ops = {
915 .owner = THIS_MODULE,
916 .open = profile_open,
917 .read = seq_read,
918 .llseek = seq_lseek,
919 .release = seq_release,
920};
921
922/* Kprobe handler */
923static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
924{
925 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
926 struct kprobe_trace_entry *entry;
927 struct ring_buffer_event *event;
928 struct ring_buffer *buffer;
929 int size, i, pc;
930 unsigned long irq_flags;
931 struct ftrace_event_call *call = &tp->call;
932
933 tp->nhit++;
934
935 local_save_flags(irq_flags);
936 pc = preempt_count();
937
938 size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
939
940 event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
941 irq_flags, pc);
942 if (!event)
943 return 0;
944
945 entry = ring_buffer_event_data(event);
946 entry->nargs = tp->nr_args;
947 entry->ip = (unsigned long)kp->addr;
948 for (i = 0; i < tp->nr_args; i++)
949 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
950
951 if (!filter_current_check_discard(buffer, call, entry, event))
952 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
953 return 0;
954}
955
956/* Kretprobe handler */
957static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri,
958 struct pt_regs *regs)
959{
960 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
961 struct kretprobe_trace_entry *entry;
962 struct ring_buffer_event *event;
963 struct ring_buffer *buffer;
964 int size, i, pc;
965 unsigned long irq_flags;
966 struct ftrace_event_call *call = &tp->call;
967
968 local_save_flags(irq_flags);
969 pc = preempt_count();
970
971 size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
972
973 event = trace_current_buffer_lock_reserve(&buffer, call->id, size,
974 irq_flags, pc);
975 if (!event)
976 return 0;
977
978 entry = ring_buffer_event_data(event);
979 entry->nargs = tp->nr_args;
980 entry->func = (unsigned long)tp->rp.kp.addr;
981 entry->ret_ip = (unsigned long)ri->ret_addr;
982 for (i = 0; i < tp->nr_args; i++)
983 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
984
985 if (!filter_current_check_discard(buffer, call, entry, event))
986 trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc);
987
988 return 0;
989}
990
991/* Event entry printers */
992enum print_line_t
993print_kprobe_event(struct trace_iterator *iter, int flags)
994{
995 struct kprobe_trace_entry *field;
996 struct trace_seq *s = &iter->seq;
997 struct trace_event *event;
998 struct trace_probe *tp;
999 int i;
1000
1001 field = (struct kprobe_trace_entry *)iter->ent;
1002 event = ftrace_find_event(field->ent.type);
1003 tp = container_of(event, struct trace_probe, event);
1004
1005 if (!trace_seq_printf(s, "%s: (", tp->call.name))
1006 goto partial;
1007
1008 if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
1009 goto partial;
1010
1011 if (!trace_seq_puts(s, ")"))
1012 goto partial;
1013
1014 for (i = 0; i < field->nargs; i++)
1015 if (!trace_seq_printf(s, " %s=%lx",
1016 tp->args[i].name, field->args[i]))
1017 goto partial;
1018
1019 if (!trace_seq_puts(s, "\n"))
1020 goto partial;
1021
1022 return TRACE_TYPE_HANDLED;
1023partial:
1024 return TRACE_TYPE_PARTIAL_LINE;
1025}
1026
1027enum print_line_t
1028print_kretprobe_event(struct trace_iterator *iter, int flags)
1029{
1030 struct kretprobe_trace_entry *field;
1031 struct trace_seq *s = &iter->seq;
1032 struct trace_event *event;
1033 struct trace_probe *tp;
1034 int i;
1035
1036 field = (struct kretprobe_trace_entry *)iter->ent;
1037 event = ftrace_find_event(field->ent.type);
1038 tp = container_of(event, struct trace_probe, event);
1039
1040 if (!trace_seq_printf(s, "%s: (", tp->call.name))
1041 goto partial;
1042
1043 if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
1044 goto partial;
1045
1046 if (!trace_seq_puts(s, " <- "))
1047 goto partial;
1048
1049 if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
1050 goto partial;
1051
1052 if (!trace_seq_puts(s, ")"))
1053 goto partial;
1054
1055 for (i = 0; i < field->nargs; i++)
1056 if (!trace_seq_printf(s, " %s=%lx",
1057 tp->args[i].name, field->args[i]))
1058 goto partial;
1059
1060 if (!trace_seq_puts(s, "\n"))
1061 goto partial;
1062
1063 return TRACE_TYPE_HANDLED;
1064partial:
1065 return TRACE_TYPE_PARTIAL_LINE;
1066}
1067
1068static int probe_event_enable(struct ftrace_event_call *call)
1069{
1070 struct trace_probe *tp = (struct trace_probe *)call->data;
1071
1072 tp->flags |= TP_FLAG_TRACE;
1073 if (probe_is_return(tp))
1074 return enable_kretprobe(&tp->rp);
1075 else
1076 return enable_kprobe(&tp->rp.kp);
1077}
1078
1079static void probe_event_disable(struct ftrace_event_call *call)
1080{
1081 struct trace_probe *tp = (struct trace_probe *)call->data;
1082
1083 tp->flags &= ~TP_FLAG_TRACE;
1084 if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) {
1085 if (probe_is_return(tp))
1086 disable_kretprobe(&tp->rp);
1087 else
1088 disable_kprobe(&tp->rp.kp);
1089 }
1090}
1091
1092static int probe_event_raw_init(struct ftrace_event_call *event_call)
1093{
1094 INIT_LIST_HEAD(&event_call->fields);
1095
1096 return 0;
1097}
1098
1099#undef DEFINE_FIELD
1100#define DEFINE_FIELD(type, item, name, is_signed) \
1101 do { \
1102 ret = trace_define_field(event_call, #type, name, \
1103 offsetof(typeof(field), item), \
1104 sizeof(field.item), is_signed, \
1105 FILTER_OTHER); \
1106 if (ret) \
1107 return ret; \
1108 } while (0)
1109
1110static int kprobe_event_define_fields(struct ftrace_event_call *event_call)
1111{
1112 int ret, i;
1113 struct kprobe_trace_entry field;
1114 struct trace_probe *tp = (struct trace_probe *)event_call->data;
1115
1116 ret = trace_define_common_fields(event_call);
1117 if (!ret)
1118 return ret;
1119
1120 DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0);
1121 DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
1122 /* Set argument names as fields */
1123 for (i = 0; i < tp->nr_args; i++)
1124 DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
1125 return 0;
1126}
1127
1128static int kretprobe_event_define_fields(struct ftrace_event_call *event_call)
1129{
1130 int ret, i;
1131 struct kretprobe_trace_entry field;
1132 struct trace_probe *tp = (struct trace_probe *)event_call->data;
1133
1134 ret = trace_define_common_fields(event_call);
1135 if (!ret)
1136 return ret;
1137
1138 DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0);
1139 DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0);
1140 DEFINE_FIELD(int, nargs, FIELD_STRING_NARGS, 1);
1141 /* Set argument names as fields */
1142 for (i = 0; i < tp->nr_args; i++)
1143 DEFINE_FIELD(unsigned long, args[i], tp->args[i].name, 0);
1144 return 0;
1145}
1146
1147static int __probe_event_show_format(struct trace_seq *s,
1148 struct trace_probe *tp, const char *fmt,
1149 const char *arg)
1150{
1151 int i;
1152
1153 /* Show format */
1154 if (!trace_seq_printf(s, "\nprint fmt: \"%s", fmt))
1155 return 0;
1156
1157 for (i = 0; i < tp->nr_args; i++)
1158 if (!trace_seq_printf(s, " %s=%%lx", tp->args[i].name))
1159 return 0;
1160
1161 if (!trace_seq_printf(s, "\", %s", arg))
1162 return 0;
1163
1164 for (i = 0; i < tp->nr_args; i++)
1165 if (!trace_seq_printf(s, ", REC->%s", tp->args[i].name))
1166 return 0;
1167
1168 return trace_seq_puts(s, "\n");
1169}
1170
1171#undef SHOW_FIELD
1172#define SHOW_FIELD(type, item, name) \
1173 do { \
1174 ret = trace_seq_printf(s, "\tfield: " #type " %s;\t" \
1175 "offset:%u;\tsize:%u;\n", name, \
1176 (unsigned int)offsetof(typeof(field), item),\
1177 (unsigned int)sizeof(type)); \
1178 if (!ret) \
1179 return 0; \
1180 } while (0)
1181
1182static int kprobe_event_show_format(struct ftrace_event_call *call,
1183 struct trace_seq *s)
1184{
1185 struct kprobe_trace_entry field __attribute__((unused));
1186 int ret, i;
1187 struct trace_probe *tp = (struct trace_probe *)call->data;
1188
1189 SHOW_FIELD(unsigned long, ip, FIELD_STRING_IP);
1190 SHOW_FIELD(int, nargs, FIELD_STRING_NARGS);
1191
1192 /* Show fields */
1193 for (i = 0; i < tp->nr_args; i++)
1194 SHOW_FIELD(unsigned long, args[i], tp->args[i].name);
1195 trace_seq_puts(s, "\n");
1196
1197 return __probe_event_show_format(s, tp, "(%lx)",
1198 "REC->" FIELD_STRING_IP);
1199}
1200
1201static int kretprobe_event_show_format(struct ftrace_event_call *call,
1202 struct trace_seq *s)
1203{
1204 struct kretprobe_trace_entry field __attribute__((unused));
1205 int ret, i;
1206 struct trace_probe *tp = (struct trace_probe *)call->data;
1207
1208 SHOW_FIELD(unsigned long, func, FIELD_STRING_FUNC);
1209 SHOW_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP);
1210 SHOW_FIELD(int, nargs, FIELD_STRING_NARGS);
1211
1212 /* Show fields */
1213 for (i = 0; i < tp->nr_args; i++)
1214 SHOW_FIELD(unsigned long, args[i], tp->args[i].name);
1215 trace_seq_puts(s, "\n");
1216
1217 return __probe_event_show_format(s, tp, "(%lx <- %lx)",
1218 "REC->" FIELD_STRING_FUNC
1219 ", REC->" FIELD_STRING_RETIP);
1220}
1221
1222#ifdef CONFIG_EVENT_PROFILE
1223
1224/* Kprobe profile handler */
1225static __kprobes int kprobe_profile_func(struct kprobe *kp,
1226 struct pt_regs *regs)
1227{
1228 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1229 struct ftrace_event_call *call = &tp->call;
1230 struct kprobe_trace_entry *entry;
1231 struct trace_entry *ent;
1232 int size, __size, i, pc, __cpu;
1233 unsigned long irq_flags;
1234 char *trace_buf;
1235 char *raw_data;
1236 int rctx;
1237
1238 pc = preempt_count();
1239 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
1240 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1241 size -= sizeof(u32);
1242 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
1243 "profile buffer not large enough"))
1244 return 0;
1245
1246 /*
1247 * Protect the non nmi buffer
1248 * This also protects the rcu read side
1249 */
1250 local_irq_save(irq_flags);
1251
1252 rctx = perf_swevent_get_recursion_context();
1253 if (rctx < 0)
1254 goto end_recursion;
1255
1256 __cpu = smp_processor_id();
1257
1258 if (in_nmi())
1259 trace_buf = rcu_dereference(perf_trace_buf_nmi);
1260 else
1261 trace_buf = rcu_dereference(perf_trace_buf);
1262
1263 if (!trace_buf)
1264 goto end;
1265
1266 raw_data = per_cpu_ptr(trace_buf, __cpu);
1267
1268 /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1269 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
1270 entry = (struct kprobe_trace_entry *)raw_data;
1271 ent = &entry->ent;
1272
1273 tracing_generic_entry_update(ent, irq_flags, pc);
1274 ent->type = call->id;
1275 entry->nargs = tp->nr_args;
1276 entry->ip = (unsigned long)kp->addr;
1277 for (i = 0; i < tp->nr_args; i++)
1278 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1279 perf_tp_event(call->id, entry->ip, 1, entry, size);
1280
1281end:
1282 perf_swevent_put_recursion_context(rctx);
1283end_recursion:
1284 local_irq_restore(irq_flags);
1285
1286 return 0;
1287}
1288
1289/* Kretprobe profile handler */
1290static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
1291 struct pt_regs *regs)
1292{
1293 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1294 struct ftrace_event_call *call = &tp->call;
1295 struct kretprobe_trace_entry *entry;
1296 struct trace_entry *ent;
1297 int size, __size, i, pc, __cpu;
1298 unsigned long irq_flags;
1299 char *trace_buf;
1300 char *raw_data;
1301 int rctx;
1302
1303 pc = preempt_count();
1304 __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
1305 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1306 size -= sizeof(u32);
1307 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
1308 "profile buffer not large enough"))
1309 return 0;
1310
1311 /*
1312 * Protect the non nmi buffer
1313 * This also protects the rcu read side
1314 */
1315 local_irq_save(irq_flags);
1316
1317 rctx = perf_swevent_get_recursion_context();
1318 if (rctx < 0)
1319 goto end_recursion;
1320
1321 __cpu = smp_processor_id();
1322
1323 if (in_nmi())
1324 trace_buf = rcu_dereference(perf_trace_buf_nmi);
1325 else
1326 trace_buf = rcu_dereference(perf_trace_buf);
1327
1328 if (!trace_buf)
1329 goto end;
1330
1331 raw_data = per_cpu_ptr(trace_buf, __cpu);
1332
1333 /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1334 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
1335 entry = (struct kretprobe_trace_entry *)raw_data;
1336 ent = &entry->ent;
1337
1338 tracing_generic_entry_update(ent, irq_flags, pc);
1339 ent->type = call->id;
1340 entry->nargs = tp->nr_args;
1341 entry->func = (unsigned long)tp->rp.kp.addr;
1342 entry->ret_ip = (unsigned long)ri->ret_addr;
1343 for (i = 0; i < tp->nr_args; i++)
1344 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1345 perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
1346
1347end:
1348 perf_swevent_put_recursion_context(rctx);
1349end_recursion:
1350 local_irq_restore(irq_flags);
1351
1352 return 0;
1353}
1354
1355static int probe_profile_enable(struct ftrace_event_call *call)
1356{
1357 struct trace_probe *tp = (struct trace_probe *)call->data;
1358
1359 tp->flags |= TP_FLAG_PROFILE;
1360
1361 if (probe_is_return(tp))
1362 return enable_kretprobe(&tp->rp);
1363 else
1364 return enable_kprobe(&tp->rp.kp);
1365}
1366
1367static void probe_profile_disable(struct ftrace_event_call *call)
1368{
1369 struct trace_probe *tp = (struct trace_probe *)call->data;
1370
1371 tp->flags &= ~TP_FLAG_PROFILE;
1372
1373 if (!(tp->flags & TP_FLAG_TRACE)) {
1374 if (probe_is_return(tp))
1375 disable_kretprobe(&tp->rp);
1376 else
1377 disable_kprobe(&tp->rp.kp);
1378 }
1379}
1380#endif /* CONFIG_EVENT_PROFILE */
1381
1382
1383static __kprobes
1384int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1385{
1386 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1387
1388 if (tp->flags & TP_FLAG_TRACE)
1389 kprobe_trace_func(kp, regs);
1390#ifdef CONFIG_EVENT_PROFILE
1391 if (tp->flags & TP_FLAG_PROFILE)
1392 kprobe_profile_func(kp, regs);
1393#endif /* CONFIG_EVENT_PROFILE */
1394 return 0; /* We don't tweek kernel, so just return 0 */
1395}
1396
1397static __kprobes
1398int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1399{
1400 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1401
1402 if (tp->flags & TP_FLAG_TRACE)
1403 kretprobe_trace_func(ri, regs);
1404#ifdef CONFIG_EVENT_PROFILE
1405 if (tp->flags & TP_FLAG_PROFILE)
1406 kretprobe_profile_func(ri, regs);
1407#endif /* CONFIG_EVENT_PROFILE */
1408 return 0; /* We don't tweek kernel, so just return 0 */
1409}
1410
1411static int register_probe_event(struct trace_probe *tp)
1412{
1413 struct ftrace_event_call *call = &tp->call;
1414 int ret;
1415
1416 /* Initialize ftrace_event_call */
1417 if (probe_is_return(tp)) {
1418 tp->event.trace = print_kretprobe_event;
1419 call->raw_init = probe_event_raw_init;
1420 call->show_format = kretprobe_event_show_format;
1421 call->define_fields = kretprobe_event_define_fields;
1422 } else {
1423 tp->event.trace = print_kprobe_event;
1424 call->raw_init = probe_event_raw_init;
1425 call->show_format = kprobe_event_show_format;
1426 call->define_fields = kprobe_event_define_fields;
1427 }
1428 call->event = &tp->event;
1429 call->id = register_ftrace_event(&tp->event);
1430 if (!call->id)
1431 return -ENODEV;
1432 call->enabled = 0;
1433 call->regfunc = probe_event_enable;
1434 call->unregfunc = probe_event_disable;
1435
1436#ifdef CONFIG_EVENT_PROFILE
1437 atomic_set(&call->profile_count, -1);
1438 call->profile_enable = probe_profile_enable;
1439 call->profile_disable = probe_profile_disable;
1440#endif
1441 call->data = tp;
1442 ret = trace_add_event_call(call);
1443 if (ret) {
1444 pr_info("Failed to register kprobe event: %s\n", call->name);
1445 unregister_ftrace_event(&tp->event);
1446 }
1447 return ret;
1448}
1449
1450static void unregister_probe_event(struct trace_probe *tp)
1451{
1452 /* tp->event is unregistered in trace_remove_event_call() */
1453 trace_remove_event_call(&tp->call);
1454}
1455
1456/* Make a debugfs interface for controling probe points */
1457static __init int init_kprobe_trace(void)
1458{
1459 struct dentry *d_tracer;
1460 struct dentry *entry;
1461
1462 d_tracer = tracing_init_dentry();
1463 if (!d_tracer)
1464 return 0;
1465
1466 entry = debugfs_create_file("kprobe_events", 0644, d_tracer,
1467 NULL, &kprobe_events_ops);
1468
1469 /* Event list interface */
1470 if (!entry)
1471 pr_warning("Could not create debugfs "
1472 "'kprobe_events' entry\n");
1473
1474 /* Profile interface */
1475 entry = debugfs_create_file("kprobe_profile", 0444, d_tracer,
1476 NULL, &kprobe_profile_ops);
1477
1478 if (!entry)
1479 pr_warning("Could not create debugfs "
1480 "'kprobe_profile' entry\n");
1481 return 0;
1482}
1483fs_initcall(init_kprobe_trace);
1484
1485
1486#ifdef CONFIG_FTRACE_STARTUP_TEST
1487
1488static int kprobe_trace_selftest_target(int a1, int a2, int a3,
1489 int a4, int a5, int a6)
1490{
1491 return a1 + a2 + a3 + a4 + a5 + a6;
1492}
1493
1494static __init int kprobe_trace_self_tests_init(void)
1495{
1496 int ret;
1497 int (*target)(int, int, int, int, int, int);
1498
1499 target = kprobe_trace_selftest_target;
1500
1501 pr_info("Testing kprobe tracing: ");
1502
1503 ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target "
1504 "$arg1 $arg2 $arg3 $arg4 $stack $stack0");
1505 if (WARN_ON_ONCE(ret))
1506 pr_warning("error enabling function entry\n");
1507
1508 ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
1509 "$retval");
1510 if (WARN_ON_ONCE(ret))
1511 pr_warning("error enabling function return\n");
1512
1513 ret = target(1, 2, 3, 4, 5, 6);
1514
1515 cleanup_all_probes();
1516
1517 pr_cont("OK\n");
1518 return 0;
1519}
1520
1521late_initcall(kprobe_trace_self_tests_init);
1522
1523#endif
diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c
new file mode 100644
index 000000000000..ddfa0fd43bc0
--- /dev/null
+++ b/kernel/trace/trace_ksym.c
@@ -0,0 +1,550 @@
1/*
2 * trace_ksym.c - Kernel Symbol Tracer
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2009
19 */
20
21#include <linux/kallsyms.h>
22#include <linux/uaccess.h>
23#include <linux/debugfs.h>
24#include <linux/ftrace.h>
25#include <linux/module.h>
26#include <linux/fs.h>
27
28#include "trace_output.h"
29#include "trace_stat.h"
30#include "trace.h"
31
32#include <linux/hw_breakpoint.h>
33#include <asm/hw_breakpoint.h>
34
35/*
36 * For now, let us restrict the no. of symbols traced simultaneously to number
37 * of available hardware breakpoint registers.
38 */
39#define KSYM_TRACER_MAX HBP_NUM
40
41#define KSYM_TRACER_OP_LEN 3 /* rw- */
42
43struct trace_ksym {
44 struct perf_event **ksym_hbp;
45 struct perf_event_attr attr;
46#ifdef CONFIG_PROFILE_KSYM_TRACER
47 unsigned long counter;
48#endif
49 struct hlist_node ksym_hlist;
50};
51
52static struct trace_array *ksym_trace_array;
53
54static unsigned int ksym_filter_entry_count;
55static unsigned int ksym_tracing_enabled;
56
57static HLIST_HEAD(ksym_filter_head);
58
59static DEFINE_MUTEX(ksym_tracer_mutex);
60
61#ifdef CONFIG_PROFILE_KSYM_TRACER
62
63#define MAX_UL_INT 0xffffffff
64
65void ksym_collect_stats(unsigned long hbp_hit_addr)
66{
67 struct hlist_node *node;
68 struct trace_ksym *entry;
69
70 rcu_read_lock();
71 hlist_for_each_entry_rcu(entry, node, &ksym_filter_head, ksym_hlist) {
72 if ((entry->attr.bp_addr == hbp_hit_addr) &&
73 (entry->counter <= MAX_UL_INT)) {
74 entry->counter++;
75 break;
76 }
77 }
78 rcu_read_unlock();
79}
80#endif /* CONFIG_PROFILE_KSYM_TRACER */
81
82void ksym_hbp_handler(struct perf_event *hbp, void *data)
83{
84 struct ring_buffer_event *event;
85 struct ksym_trace_entry *entry;
86 struct pt_regs *regs = data;
87 struct ring_buffer *buffer;
88 int pc;
89
90 if (!ksym_tracing_enabled)
91 return;
92
93 buffer = ksym_trace_array->buffer;
94
95 pc = preempt_count();
96
97 event = trace_buffer_lock_reserve(buffer, TRACE_KSYM,
98 sizeof(*entry), 0, pc);
99 if (!event)
100 return;
101
102 entry = ring_buffer_event_data(event);
103 entry->ip = instruction_pointer(regs);
104 entry->type = hw_breakpoint_type(hbp);
105 entry->addr = hw_breakpoint_addr(hbp);
106 strlcpy(entry->cmd, current->comm, TASK_COMM_LEN);
107
108#ifdef CONFIG_PROFILE_KSYM_TRACER
109 ksym_collect_stats(hw_breakpoint_addr(hbp));
110#endif /* CONFIG_PROFILE_KSYM_TRACER */
111
112 trace_buffer_unlock_commit(buffer, event, 0, pc);
113}
114
115/* Valid access types are represented as
116 *
117 * rw- : Set Read/Write Access Breakpoint
118 * -w- : Set Write Access Breakpoint
119 * --- : Clear Breakpoints
120 * --x : Set Execution Break points (Not available yet)
121 *
122 */
123static int ksym_trace_get_access_type(char *str)
124{
125 int access = 0;
126
127 if (str[0] == 'r')
128 access |= HW_BREAKPOINT_R;
129
130 if (str[1] == 'w')
131 access |= HW_BREAKPOINT_W;
132
133 if (str[2] == 'x')
134 access |= HW_BREAKPOINT_X;
135
136 switch (access) {
137 case HW_BREAKPOINT_R:
138 case HW_BREAKPOINT_W:
139 case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
140 return access;
141 default:
142 return -EINVAL;
143 }
144}
145
146/*
147 * There can be several possible malformed requests and we attempt to capture
148 * all of them. We enumerate some of the rules
149 * 1. We will not allow kernel symbols with ':' since it is used as a delimiter.
150 * i.e. multiple ':' symbols disallowed. Possible uses are of the form
151 * <module>:<ksym_name>:<op>.
152 * 2. No delimiter symbol ':' in the input string
153 * 3. Spurious operator symbols or symbols not in their respective positions
154 * 4. <ksym_name>:--- i.e. clear breakpoint request when ksym_name not in file
155 * 5. Kernel symbol not a part of /proc/kallsyms
156 * 6. Duplicate requests
157 */
158static int parse_ksym_trace_str(char *input_string, char **ksymname,
159 unsigned long *addr)
160{
161 int ret;
162
163 *ksymname = strsep(&input_string, ":");
164 *addr = kallsyms_lookup_name(*ksymname);
165
166 /* Check for malformed request: (2), (1) and (5) */
167 if ((!input_string) ||
168 (strlen(input_string) != KSYM_TRACER_OP_LEN) ||
169 (*addr == 0))
170 return -EINVAL;;
171
172 ret = ksym_trace_get_access_type(input_string);
173
174 return ret;
175}
176
177int process_new_ksym_entry(char *ksymname, int op, unsigned long addr)
178{
179 struct trace_ksym *entry;
180 int ret = -ENOMEM;
181
182 if (ksym_filter_entry_count >= KSYM_TRACER_MAX) {
183 printk(KERN_ERR "ksym_tracer: Maximum limit:(%d) reached. No"
184 " new requests for tracing can be accepted now.\n",
185 KSYM_TRACER_MAX);
186 return -ENOSPC;
187 }
188
189 entry = kzalloc(sizeof(struct trace_ksym), GFP_KERNEL);
190 if (!entry)
191 return -ENOMEM;
192
193 hw_breakpoint_init(&entry->attr);
194
195 entry->attr.bp_type = op;
196 entry->attr.bp_addr = addr;
197 entry->attr.bp_len = HW_BREAKPOINT_LEN_4;
198
199 ret = -EAGAIN;
200 entry->ksym_hbp = register_wide_hw_breakpoint(&entry->attr,
201 ksym_hbp_handler);
202
203 if (IS_ERR(entry->ksym_hbp)) {
204 ret = PTR_ERR(entry->ksym_hbp);
205 printk(KERN_INFO "ksym_tracer request failed. Try again"
206 " later!!\n");
207 goto err;
208 }
209
210 hlist_add_head_rcu(&(entry->ksym_hlist), &ksym_filter_head);
211 ksym_filter_entry_count++;
212
213 return 0;
214
215err:
216 kfree(entry);
217
218 return ret;
219}
220
221static ssize_t ksym_trace_filter_read(struct file *filp, char __user *ubuf,
222 size_t count, loff_t *ppos)
223{
224 struct trace_ksym *entry;
225 struct hlist_node *node;
226 struct trace_seq *s;
227 ssize_t cnt = 0;
228 int ret;
229
230 s = kmalloc(sizeof(*s), GFP_KERNEL);
231 if (!s)
232 return -ENOMEM;
233 trace_seq_init(s);
234
235 mutex_lock(&ksym_tracer_mutex);
236
237 hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) {
238 ret = trace_seq_printf(s, "%pS:", (void *)entry->attr.bp_addr);
239 if (entry->attr.bp_type == HW_BREAKPOINT_R)
240 ret = trace_seq_puts(s, "r--\n");
241 else if (entry->attr.bp_type == HW_BREAKPOINT_W)
242 ret = trace_seq_puts(s, "-w-\n");
243 else if (entry->attr.bp_type == (HW_BREAKPOINT_W | HW_BREAKPOINT_R))
244 ret = trace_seq_puts(s, "rw-\n");
245 WARN_ON_ONCE(!ret);
246 }
247
248 cnt = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
249
250 mutex_unlock(&ksym_tracer_mutex);
251
252 kfree(s);
253
254 return cnt;
255}
256
257static void __ksym_trace_reset(void)
258{
259 struct trace_ksym *entry;
260 struct hlist_node *node, *node1;
261
262 mutex_lock(&ksym_tracer_mutex);
263 hlist_for_each_entry_safe(entry, node, node1, &ksym_filter_head,
264 ksym_hlist) {
265 unregister_wide_hw_breakpoint(entry->ksym_hbp);
266 ksym_filter_entry_count--;
267 hlist_del_rcu(&(entry->ksym_hlist));
268 synchronize_rcu();
269 kfree(entry);
270 }
271 mutex_unlock(&ksym_tracer_mutex);
272}
273
274static ssize_t ksym_trace_filter_write(struct file *file,
275 const char __user *buffer,
276 size_t count, loff_t *ppos)
277{
278 struct trace_ksym *entry;
279 struct hlist_node *node;
280 char *input_string, *ksymname = NULL;
281 unsigned long ksym_addr = 0;
282 int ret, op, changed = 0;
283
284 input_string = kzalloc(count + 1, GFP_KERNEL);
285 if (!input_string)
286 return -ENOMEM;
287
288 if (copy_from_user(input_string, buffer, count)) {
289 kfree(input_string);
290 return -EFAULT;
291 }
292 input_string[count] = '\0';
293
294 strstrip(input_string);
295
296 /*
297 * Clear all breakpoints if:
298 * 1: echo > ksym_trace_filter
299 * 2: echo 0 > ksym_trace_filter
300 * 3: echo "*:---" > ksym_trace_filter
301 */
302 if (!input_string[0] || !strcmp(input_string, "0") ||
303 !strcmp(input_string, "*:---")) {
304 __ksym_trace_reset();
305 kfree(input_string);
306 return count;
307 }
308
309 ret = op = parse_ksym_trace_str(input_string, &ksymname, &ksym_addr);
310 if (ret < 0) {
311 kfree(input_string);
312 return ret;
313 }
314
315 mutex_lock(&ksym_tracer_mutex);
316
317 ret = -EINVAL;
318 hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) {
319 if (entry->attr.bp_addr == ksym_addr) {
320 /* Check for malformed request: (6) */
321 if (entry->attr.bp_type != op)
322 changed = 1;
323 else
324 goto out;
325 break;
326 }
327 }
328 if (changed) {
329 unregister_wide_hw_breakpoint(entry->ksym_hbp);
330 entry->attr.bp_type = op;
331 ret = 0;
332 if (op > 0) {
333 entry->ksym_hbp =
334 register_wide_hw_breakpoint(&entry->attr,
335 ksym_hbp_handler);
336 if (IS_ERR(entry->ksym_hbp))
337 ret = PTR_ERR(entry->ksym_hbp);
338 else
339 goto out;
340 }
341 /* Error or "symbol:---" case: drop it */
342 ksym_filter_entry_count--;
343 hlist_del_rcu(&(entry->ksym_hlist));
344 synchronize_rcu();
345 kfree(entry);
346 goto out;
347 } else {
348 /* Check for malformed request: (4) */
349 if (op == 0)
350 goto out;
351 ret = process_new_ksym_entry(ksymname, op, ksym_addr);
352 }
353out:
354 mutex_unlock(&ksym_tracer_mutex);
355
356 kfree(input_string);
357
358 if (!ret)
359 ret = count;
360 return ret;
361}
362
363static const struct file_operations ksym_tracing_fops = {
364 .open = tracing_open_generic,
365 .read = ksym_trace_filter_read,
366 .write = ksym_trace_filter_write,
367};
368
369static void ksym_trace_reset(struct trace_array *tr)
370{
371 ksym_tracing_enabled = 0;
372 __ksym_trace_reset();
373}
374
375static int ksym_trace_init(struct trace_array *tr)
376{
377 int cpu, ret = 0;
378
379 for_each_online_cpu(cpu)
380 tracing_reset(tr, cpu);
381 ksym_tracing_enabled = 1;
382 ksym_trace_array = tr;
383
384 return ret;
385}
386
387static void ksym_trace_print_header(struct seq_file *m)
388{
389 seq_puts(m,
390 "# TASK-PID CPU# Symbol "
391 "Type Function\n");
392 seq_puts(m,
393 "# | | | "
394 " | |\n");
395}
396
397static enum print_line_t ksym_trace_output(struct trace_iterator *iter)
398{
399 struct trace_entry *entry = iter->ent;
400 struct trace_seq *s = &iter->seq;
401 struct ksym_trace_entry *field;
402 char str[KSYM_SYMBOL_LEN];
403 int ret;
404
405 if (entry->type != TRACE_KSYM)
406 return TRACE_TYPE_UNHANDLED;
407
408 trace_assign_type(field, entry);
409
410 ret = trace_seq_printf(s, "%11s-%-5d [%03d] %pS", field->cmd,
411 entry->pid, iter->cpu, (char *)field->addr);
412 if (!ret)
413 return TRACE_TYPE_PARTIAL_LINE;
414
415 switch (field->type) {
416 case HW_BREAKPOINT_R:
417 ret = trace_seq_printf(s, " R ");
418 break;
419 case HW_BREAKPOINT_W:
420 ret = trace_seq_printf(s, " W ");
421 break;
422 case HW_BREAKPOINT_R | HW_BREAKPOINT_W:
423 ret = trace_seq_printf(s, " RW ");
424 break;
425 default:
426 return TRACE_TYPE_PARTIAL_LINE;
427 }
428
429 if (!ret)
430 return TRACE_TYPE_PARTIAL_LINE;
431
432 sprint_symbol(str, field->ip);
433 ret = trace_seq_printf(s, "%s\n", str);
434 if (!ret)
435 return TRACE_TYPE_PARTIAL_LINE;
436
437 return TRACE_TYPE_HANDLED;
438}
439
440struct tracer ksym_tracer __read_mostly =
441{
442 .name = "ksym_tracer",
443 .init = ksym_trace_init,
444 .reset = ksym_trace_reset,
445#ifdef CONFIG_FTRACE_SELFTEST
446 .selftest = trace_selftest_startup_ksym,
447#endif
448 .print_header = ksym_trace_print_header,
449 .print_line = ksym_trace_output
450};
451
452__init static int init_ksym_trace(void)
453{
454 struct dentry *d_tracer;
455 struct dentry *entry;
456
457 d_tracer = tracing_init_dentry();
458 ksym_filter_entry_count = 0;
459
460 entry = debugfs_create_file("ksym_trace_filter", 0644, d_tracer,
461 NULL, &ksym_tracing_fops);
462 if (!entry)
463 pr_warning("Could not create debugfs "
464 "'ksym_trace_filter' file\n");
465
466 return register_tracer(&ksym_tracer);
467}
468device_initcall(init_ksym_trace);
469
470
471#ifdef CONFIG_PROFILE_KSYM_TRACER
472static int ksym_tracer_stat_headers(struct seq_file *m)
473{
474 seq_puts(m, " Access Type ");
475 seq_puts(m, " Symbol Counter\n");
476 seq_puts(m, " ----------- ");
477 seq_puts(m, " ------ -------\n");
478 return 0;
479}
480
481static int ksym_tracer_stat_show(struct seq_file *m, void *v)
482{
483 struct hlist_node *stat = v;
484 struct trace_ksym *entry;
485 int access_type = 0;
486 char fn_name[KSYM_NAME_LEN];
487
488 entry = hlist_entry(stat, struct trace_ksym, ksym_hlist);
489
490 access_type = entry->attr.bp_type;
491
492 switch (access_type) {
493 case HW_BREAKPOINT_R:
494 seq_puts(m, " R ");
495 break;
496 case HW_BREAKPOINT_W:
497 seq_puts(m, " W ");
498 break;
499 case HW_BREAKPOINT_R | HW_BREAKPOINT_W:
500 seq_puts(m, " RW ");
501 break;
502 default:
503 seq_puts(m, " NA ");
504 }
505
506 if (lookup_symbol_name(entry->attr.bp_addr, fn_name) >= 0)
507 seq_printf(m, " %-36s", fn_name);
508 else
509 seq_printf(m, " %-36s", "<NA>");
510 seq_printf(m, " %15lu\n", entry->counter);
511
512 return 0;
513}
514
515static void *ksym_tracer_stat_start(struct tracer_stat *trace)
516{
517 return ksym_filter_head.first;
518}
519
520static void *
521ksym_tracer_stat_next(void *v, int idx)
522{
523 struct hlist_node *stat = v;
524
525 return stat->next;
526}
527
528static struct tracer_stat ksym_tracer_stats = {
529 .name = "ksym_tracer",
530 .stat_start = ksym_tracer_stat_start,
531 .stat_next = ksym_tracer_stat_next,
532 .stat_headers = ksym_tracer_stat_headers,
533 .stat_show = ksym_tracer_stat_show
534};
535
536__init static int ksym_tracer_stat_init(void)
537{
538 int ret;
539
540 ret = register_stat_tracer(&ksym_tracer_stats);
541 if (ret) {
542 printk(KERN_WARNING "Warning: could not register "
543 "ksym tracer stats\n");
544 return 1;
545 }
546
547 return 0;
548}
549fs_initcall(ksym_tracer_stat_init);
550#endif /* CONFIG_PROFILE_KSYM_TRACER */
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index f572f44c6e1e..b6c12c6a1bcd 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -69,6 +69,9 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
69 * @s: trace sequence descriptor 69 * @s: trace sequence descriptor
70 * @fmt: printf format string 70 * @fmt: printf format string
71 * 71 *
72 * It returns 0 if the trace oversizes the buffer's free
73 * space, 1 otherwise.
74 *
72 * The tracer may use either sequence operations or its own 75 * The tracer may use either sequence operations or its own
73 * copy to user routines. To simplify formating of a trace 76 * copy to user routines. To simplify formating of a trace
74 * trace_seq_printf is used to store strings into a special 77 * trace_seq_printf is used to store strings into a special
@@ -95,7 +98,7 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
95 98
96 s->len += ret; 99 s->len += ret;
97 100
98 return len; 101 return 1;
99} 102}
100EXPORT_SYMBOL_GPL(trace_seq_printf); 103EXPORT_SYMBOL_GPL(trace_seq_printf);
101 104
@@ -486,16 +489,18 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
486 hardirq ? 'h' : softirq ? 's' : '.')) 489 hardirq ? 'h' : softirq ? 's' : '.'))
487 return 0; 490 return 0;
488 491
489 if (entry->lock_depth < 0) 492 if (entry->preempt_count)
490 ret = trace_seq_putc(s, '.'); 493 ret = trace_seq_printf(s, "%x", entry->preempt_count);
491 else 494 else
492 ret = trace_seq_printf(s, "%d", entry->lock_depth); 495 ret = trace_seq_putc(s, '.');
496
493 if (!ret) 497 if (!ret)
494 return 0; 498 return 0;
495 499
496 if (entry->preempt_count) 500 if (entry->lock_depth < 0)
497 return trace_seq_printf(s, "%x", entry->preempt_count); 501 return trace_seq_putc(s, '.');
498 return trace_seq_putc(s, '.'); 502
503 return trace_seq_printf(s, "%d", entry->lock_depth);
499} 504}
500 505
501static int 506static int
@@ -883,7 +888,7 @@ static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
883 trace_assign_type(field, iter->ent); 888 trace_assign_type(field, iter->ent);
884 889
885 if (!S) 890 if (!S)
886 task_state_char(field->prev_state); 891 S = task_state_char(field->prev_state);
887 T = task_state_char(field->next_state); 892 T = task_state_char(field->next_state);
888 if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", 893 if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
889 field->prev_pid, 894 field->prev_pid,
@@ -918,7 +923,7 @@ static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
918 trace_assign_type(field, iter->ent); 923 trace_assign_type(field, iter->ent);
919 924
920 if (!S) 925 if (!S)
921 task_state_char(field->prev_state); 926 S = task_state_char(field->prev_state);
922 T = task_state_char(field->next_state); 927 T = task_state_char(field->next_state);
923 928
924 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); 929 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index d2cdbabb4ead..dc98309e839a 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -17,6 +17,7 @@ static inline int trace_valid_entry(struct trace_entry *entry)
17 case TRACE_GRAPH_ENT: 17 case TRACE_GRAPH_ENT:
18 case TRACE_GRAPH_RET: 18 case TRACE_GRAPH_RET:
19 case TRACE_HW_BRANCHES: 19 case TRACE_HW_BRANCHES:
20 case TRACE_KSYM:
20 return 1; 21 return 1;
21 } 22 }
22 return 0; 23 return 0;
@@ -808,3 +809,57 @@ trace_selftest_startup_hw_branches(struct tracer *trace,
808 return ret; 809 return ret;
809} 810}
810#endif /* CONFIG_HW_BRANCH_TRACER */ 811#endif /* CONFIG_HW_BRANCH_TRACER */
812
813#ifdef CONFIG_KSYM_TRACER
814static int ksym_selftest_dummy;
815
816int
817trace_selftest_startup_ksym(struct tracer *trace, struct trace_array *tr)
818{
819 unsigned long count;
820 int ret;
821
822 /* start the tracing */
823 ret = tracer_init(trace, tr);
824 if (ret) {
825 warn_failed_init_tracer(trace, ret);
826 return ret;
827 }
828
829 ksym_selftest_dummy = 0;
830 /* Register the read-write tracing request */
831
832 ret = process_new_ksym_entry("ksym_selftest_dummy",
833 HW_BREAKPOINT_R | HW_BREAKPOINT_W,
834 (unsigned long)(&ksym_selftest_dummy));
835
836 if (ret < 0) {
837 printk(KERN_CONT "ksym_trace read-write startup test failed\n");
838 goto ret_path;
839 }
840 /* Perform a read and a write operation over the dummy variable to
841 * trigger the tracer
842 */
843 if (ksym_selftest_dummy == 0)
844 ksym_selftest_dummy++;
845
846 /* stop the tracing. */
847 tracing_stop();
848 /* check the trace buffer */
849 ret = trace_test_buffer(tr, &count);
850 trace->reset(tr);
851 tracing_start();
852
853 /* read & write operations - one each is performed on the dummy variable
854 * triggering two entries in the trace buffer
855 */
856 if (!ret && count != 2) {
857 printk(KERN_CONT "Ksym tracer startup test failed");
858 ret = -1;
859 }
860
861ret_path:
862 return ret;
863}
864#endif /* CONFIG_KSYM_TRACER */
865
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 9fbce6c9d2e1..57501d90096a 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -14,6 +14,43 @@ static int sys_refcount_exit;
14static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); 14static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
15static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); 15static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
16 16
17extern unsigned long __start_syscalls_metadata[];
18extern unsigned long __stop_syscalls_metadata[];
19
20static struct syscall_metadata **syscalls_metadata;
21
22static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
23{
24 struct syscall_metadata *start;
25 struct syscall_metadata *stop;
26 char str[KSYM_SYMBOL_LEN];
27
28
29 start = (struct syscall_metadata *)__start_syscalls_metadata;
30 stop = (struct syscall_metadata *)__stop_syscalls_metadata;
31 kallsyms_lookup(syscall, NULL, NULL, NULL, str);
32
33 for ( ; start < stop; start++) {
34 /*
35 * Only compare after the "sys" prefix. Archs that use
36 * syscall wrappers may have syscalls symbols aliases prefixed
37 * with "SyS" instead of "sys", leading to an unwanted
38 * mismatch.
39 */
40 if (start->name && !strcmp(start->name + 3, str + 3))
41 return start;
42 }
43 return NULL;
44}
45
46static struct syscall_metadata *syscall_nr_to_meta(int nr)
47{
48 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
49 return NULL;
50
51 return syscalls_metadata[nr];
52}
53
17enum print_line_t 54enum print_line_t
18print_syscall_enter(struct trace_iterator *iter, int flags) 55print_syscall_enter(struct trace_iterator *iter, int flags)
19{ 56{
@@ -30,7 +67,7 @@ print_syscall_enter(struct trace_iterator *iter, int flags)
30 if (!entry) 67 if (!entry)
31 goto end; 68 goto end;
32 69
33 if (entry->enter_id != ent->type) { 70 if (entry->enter_event->id != ent->type) {
34 WARN_ON_ONCE(1); 71 WARN_ON_ONCE(1);
35 goto end; 72 goto end;
36 } 73 }
@@ -85,7 +122,7 @@ print_syscall_exit(struct trace_iterator *iter, int flags)
85 return TRACE_TYPE_HANDLED; 122 return TRACE_TYPE_HANDLED;
86 } 123 }
87 124
88 if (entry->exit_id != ent->type) { 125 if (entry->exit_event->id != ent->type) {
89 WARN_ON_ONCE(1); 126 WARN_ON_ONCE(1);
90 return TRACE_TYPE_UNHANDLED; 127 return TRACE_TYPE_UNHANDLED;
91 } 128 }
@@ -103,24 +140,19 @@ extern char *__bad_type_size(void);
103#define SYSCALL_FIELD(type, name) \ 140#define SYSCALL_FIELD(type, name) \
104 sizeof(type) != sizeof(trace.name) ? \ 141 sizeof(type) != sizeof(trace.name) ? \
105 __bad_type_size() : \ 142 __bad_type_size() : \
106 #type, #name, offsetof(typeof(trace), name), sizeof(trace.name) 143 #type, #name, offsetof(typeof(trace), name), \
144 sizeof(trace.name), is_signed_type(type)
107 145
108int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s) 146int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s)
109{ 147{
110 int i; 148 int i;
111 int nr;
112 int ret; 149 int ret;
113 struct syscall_metadata *entry; 150 struct syscall_metadata *entry = call->data;
114 struct syscall_trace_enter trace; 151 struct syscall_trace_enter trace;
115 int offset = offsetof(struct syscall_trace_enter, args); 152 int offset = offsetof(struct syscall_trace_enter, args);
116 153
117 nr = syscall_name_to_nr(call->data); 154 ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
118 entry = syscall_nr_to_meta(nr); 155 "\tsigned:%u;\n",
119
120 if (!entry)
121 return 0;
122
123 ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n",
124 SYSCALL_FIELD(int, nr)); 156 SYSCALL_FIELD(int, nr));
125 if (!ret) 157 if (!ret)
126 return 0; 158 return 0;
@@ -130,8 +162,10 @@ int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s)
130 entry->args[i]); 162 entry->args[i]);
131 if (!ret) 163 if (!ret)
132 return 0; 164 return 0;
133 ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;\n", offset, 165 ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;"
134 sizeof(unsigned long)); 166 "\tsigned:%u;\n", offset,
167 sizeof(unsigned long),
168 is_signed_type(unsigned long));
135 if (!ret) 169 if (!ret)
136 return 0; 170 return 0;
137 offset += sizeof(unsigned long); 171 offset += sizeof(unsigned long);
@@ -163,10 +197,12 @@ int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s)
163 struct syscall_trace_exit trace; 197 struct syscall_trace_exit trace;
164 198
165 ret = trace_seq_printf(s, 199 ret = trace_seq_printf(s,
166 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" 200 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
167 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n", 201 "\tsigned:%u;\n"
202 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
203 "\tsigned:%u;\n",
168 SYSCALL_FIELD(int, nr), 204 SYSCALL_FIELD(int, nr),
169 SYSCALL_FIELD(unsigned long, ret)); 205 SYSCALL_FIELD(long, ret));
170 if (!ret) 206 if (!ret)
171 return 0; 207 return 0;
172 208
@@ -176,22 +212,19 @@ int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s)
176int syscall_enter_define_fields(struct ftrace_event_call *call) 212int syscall_enter_define_fields(struct ftrace_event_call *call)
177{ 213{
178 struct syscall_trace_enter trace; 214 struct syscall_trace_enter trace;
179 struct syscall_metadata *meta; 215 struct syscall_metadata *meta = call->data;
180 int ret; 216 int ret;
181 int nr;
182 int i; 217 int i;
183 int offset = offsetof(typeof(trace), args); 218 int offset = offsetof(typeof(trace), args);
184 219
185 nr = syscall_name_to_nr(call->data);
186 meta = syscall_nr_to_meta(nr);
187
188 if (!meta)
189 return 0;
190
191 ret = trace_define_common_fields(call); 220 ret = trace_define_common_fields(call);
192 if (ret) 221 if (ret)
193 return ret; 222 return ret;
194 223
224 ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
225 if (ret)
226 return ret;
227
195 for (i = 0; i < meta->nb_args; i++) { 228 for (i = 0; i < meta->nb_args; i++) {
196 ret = trace_define_field(call, meta->types[i], 229 ret = trace_define_field(call, meta->types[i],
197 meta->args[i], offset, 230 meta->args[i], offset,
@@ -212,7 +245,11 @@ int syscall_exit_define_fields(struct ftrace_event_call *call)
212 if (ret) 245 if (ret)
213 return ret; 246 return ret;
214 247
215 ret = trace_define_field(call, SYSCALL_FIELD(unsigned long, ret), 0, 248 ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
249 if (ret)
250 return ret;
251
252 ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
216 FILTER_OTHER); 253 FILTER_OTHER);
217 254
218 return ret; 255 return ret;
@@ -239,8 +276,8 @@ void ftrace_syscall_enter(struct pt_regs *regs, long id)
239 276
240 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; 277 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
241 278
242 event = trace_current_buffer_lock_reserve(&buffer, sys_data->enter_id, 279 event = trace_current_buffer_lock_reserve(&buffer,
243 size, 0, 0); 280 sys_data->enter_event->id, size, 0, 0);
244 if (!event) 281 if (!event)
245 return; 282 return;
246 283
@@ -271,8 +308,8 @@ void ftrace_syscall_exit(struct pt_regs *regs, long ret)
271 if (!sys_data) 308 if (!sys_data)
272 return; 309 return;
273 310
274 event = trace_current_buffer_lock_reserve(&buffer, sys_data->exit_id, 311 event = trace_current_buffer_lock_reserve(&buffer,
275 sizeof(*entry), 0, 0); 312 sys_data->exit_event->id, sizeof(*entry), 0, 0);
276 if (!event) 313 if (!event)
277 return; 314 return;
278 315
@@ -285,14 +322,12 @@ void ftrace_syscall_exit(struct pt_regs *regs, long ret)
285 trace_current_buffer_unlock_commit(buffer, event, 0, 0); 322 trace_current_buffer_unlock_commit(buffer, event, 0, 0);
286} 323}
287 324
288int reg_event_syscall_enter(void *ptr) 325int reg_event_syscall_enter(struct ftrace_event_call *call)
289{ 326{
290 int ret = 0; 327 int ret = 0;
291 int num; 328 int num;
292 char *name;
293 329
294 name = (char *)ptr; 330 num = ((struct syscall_metadata *)call->data)->syscall_nr;
295 num = syscall_name_to_nr(name);
296 if (num < 0 || num >= NR_syscalls) 331 if (num < 0 || num >= NR_syscalls)
297 return -ENOSYS; 332 return -ENOSYS;
298 mutex_lock(&syscall_trace_lock); 333 mutex_lock(&syscall_trace_lock);
@@ -309,13 +344,11 @@ int reg_event_syscall_enter(void *ptr)
309 return ret; 344 return ret;
310} 345}
311 346
312void unreg_event_syscall_enter(void *ptr) 347void unreg_event_syscall_enter(struct ftrace_event_call *call)
313{ 348{
314 int num; 349 int num;
315 char *name;
316 350
317 name = (char *)ptr; 351 num = ((struct syscall_metadata *)call->data)->syscall_nr;
318 num = syscall_name_to_nr(name);
319 if (num < 0 || num >= NR_syscalls) 352 if (num < 0 || num >= NR_syscalls)
320 return; 353 return;
321 mutex_lock(&syscall_trace_lock); 354 mutex_lock(&syscall_trace_lock);
@@ -326,14 +359,12 @@ void unreg_event_syscall_enter(void *ptr)
326 mutex_unlock(&syscall_trace_lock); 359 mutex_unlock(&syscall_trace_lock);
327} 360}
328 361
329int reg_event_syscall_exit(void *ptr) 362int reg_event_syscall_exit(struct ftrace_event_call *call)
330{ 363{
331 int ret = 0; 364 int ret = 0;
332 int num; 365 int num;
333 char *name;
334 366
335 name = (char *)ptr; 367 num = ((struct syscall_metadata *)call->data)->syscall_nr;
336 num = syscall_name_to_nr(name);
337 if (num < 0 || num >= NR_syscalls) 368 if (num < 0 || num >= NR_syscalls)
338 return -ENOSYS; 369 return -ENOSYS;
339 mutex_lock(&syscall_trace_lock); 370 mutex_lock(&syscall_trace_lock);
@@ -350,13 +381,11 @@ int reg_event_syscall_exit(void *ptr)
350 return ret; 381 return ret;
351} 382}
352 383
353void unreg_event_syscall_exit(void *ptr) 384void unreg_event_syscall_exit(struct ftrace_event_call *call)
354{ 385{
355 int num; 386 int num;
356 char *name;
357 387
358 name = (char *)ptr; 388 num = ((struct syscall_metadata *)call->data)->syscall_nr;
359 num = syscall_name_to_nr(name);
360 if (num < 0 || num >= NR_syscalls) 389 if (num < 0 || num >= NR_syscalls)
361 return; 390 return;
362 mutex_lock(&syscall_trace_lock); 391 mutex_lock(&syscall_trace_lock);
@@ -367,13 +396,44 @@ void unreg_event_syscall_exit(void *ptr)
367 mutex_unlock(&syscall_trace_lock); 396 mutex_unlock(&syscall_trace_lock);
368} 397}
369 398
370struct trace_event event_syscall_enter = { 399int init_syscall_trace(struct ftrace_event_call *call)
371 .trace = print_syscall_enter, 400{
372}; 401 int id;
402
403 id = register_ftrace_event(call->event);
404 if (!id)
405 return -ENODEV;
406 call->id = id;
407 INIT_LIST_HEAD(&call->fields);
408 return 0;
409}
410
411int __init init_ftrace_syscalls(void)
412{
413 struct syscall_metadata *meta;
414 unsigned long addr;
415 int i;
416
417 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
418 NR_syscalls, GFP_KERNEL);
419 if (!syscalls_metadata) {
420 WARN_ON(1);
421 return -ENOMEM;
422 }
423
424 for (i = 0; i < NR_syscalls; i++) {
425 addr = arch_syscall_addr(i);
426 meta = find_syscall_meta(addr);
427 if (!meta)
428 continue;
429
430 meta->syscall_nr = i;
431 syscalls_metadata[i] = meta;
432 }
373 433
374struct trace_event event_syscall_exit = { 434 return 0;
375 .trace = print_syscall_exit, 435}
376}; 436core_initcall(init_ftrace_syscalls);
377 437
378#ifdef CONFIG_EVENT_PROFILE 438#ifdef CONFIG_EVENT_PROFILE
379 439
@@ -387,8 +447,10 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
387 struct syscall_metadata *sys_data; 447 struct syscall_metadata *sys_data;
388 struct syscall_trace_enter *rec; 448 struct syscall_trace_enter *rec;
389 unsigned long flags; 449 unsigned long flags;
450 char *trace_buf;
390 char *raw_data; 451 char *raw_data;
391 int syscall_nr; 452 int syscall_nr;
453 int rctx;
392 int size; 454 int size;
393 int cpu; 455 int cpu;
394 456
@@ -412,41 +474,42 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
412 /* Protect the per cpu buffer, begin the rcu read side */ 474 /* Protect the per cpu buffer, begin the rcu read side */
413 local_irq_save(flags); 475 local_irq_save(flags);
414 476
477 rctx = perf_swevent_get_recursion_context();
478 if (rctx < 0)
479 goto end_recursion;
480
415 cpu = smp_processor_id(); 481 cpu = smp_processor_id();
416 482
417 if (in_nmi()) 483 trace_buf = rcu_dereference(perf_trace_buf);
418 raw_data = rcu_dereference(trace_profile_buf_nmi);
419 else
420 raw_data = rcu_dereference(trace_profile_buf);
421 484
422 if (!raw_data) 485 if (!trace_buf)
423 goto end; 486 goto end;
424 487
425 raw_data = per_cpu_ptr(raw_data, cpu); 488 raw_data = per_cpu_ptr(trace_buf, cpu);
426 489
427 /* zero the dead bytes from align to not leak stack to user */ 490 /* zero the dead bytes from align to not leak stack to user */
428 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; 491 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
429 492
430 rec = (struct syscall_trace_enter *) raw_data; 493 rec = (struct syscall_trace_enter *) raw_data;
431 tracing_generic_entry_update(&rec->ent, 0, 0); 494 tracing_generic_entry_update(&rec->ent, 0, 0);
432 rec->ent.type = sys_data->enter_id; 495 rec->ent.type = sys_data->enter_event->id;
433 rec->nr = syscall_nr; 496 rec->nr = syscall_nr;
434 syscall_get_arguments(current, regs, 0, sys_data->nb_args, 497 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
435 (unsigned long *)&rec->args); 498 (unsigned long *)&rec->args);
436 perf_tp_event(sys_data->enter_id, 0, 1, rec, size); 499 perf_tp_event(sys_data->enter_event->id, 0, 1, rec, size);
437 500
438end: 501end:
502 perf_swevent_put_recursion_context(rctx);
503end_recursion:
439 local_irq_restore(flags); 504 local_irq_restore(flags);
440} 505}
441 506
442int reg_prof_syscall_enter(char *name) 507int prof_sysenter_enable(struct ftrace_event_call *call)
443{ 508{
444 int ret = 0; 509 int ret = 0;
445 int num; 510 int num;
446 511
447 num = syscall_name_to_nr(name); 512 num = ((struct syscall_metadata *)call->data)->syscall_nr;
448 if (num < 0 || num >= NR_syscalls)
449 return -ENOSYS;
450 513
451 mutex_lock(&syscall_trace_lock); 514 mutex_lock(&syscall_trace_lock);
452 if (!sys_prof_refcount_enter) 515 if (!sys_prof_refcount_enter)
@@ -462,13 +525,11 @@ int reg_prof_syscall_enter(char *name)
462 return ret; 525 return ret;
463} 526}
464 527
465void unreg_prof_syscall_enter(char *name) 528void prof_sysenter_disable(struct ftrace_event_call *call)
466{ 529{
467 int num; 530 int num;
468 531
469 num = syscall_name_to_nr(name); 532 num = ((struct syscall_metadata *)call->data)->syscall_nr;
470 if (num < 0 || num >= NR_syscalls)
471 return;
472 533
473 mutex_lock(&syscall_trace_lock); 534 mutex_lock(&syscall_trace_lock);
474 sys_prof_refcount_enter--; 535 sys_prof_refcount_enter--;
@@ -484,7 +545,9 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
484 struct syscall_trace_exit *rec; 545 struct syscall_trace_exit *rec;
485 unsigned long flags; 546 unsigned long flags;
486 int syscall_nr; 547 int syscall_nr;
548 char *trace_buf;
487 char *raw_data; 549 char *raw_data;
550 int rctx;
488 int size; 551 int size;
489 int cpu; 552 int cpu;
490 553
@@ -510,17 +573,19 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
510 573
511 /* Protect the per cpu buffer, begin the rcu read side */ 574 /* Protect the per cpu buffer, begin the rcu read side */
512 local_irq_save(flags); 575 local_irq_save(flags);
576
577 rctx = perf_swevent_get_recursion_context();
578 if (rctx < 0)
579 goto end_recursion;
580
513 cpu = smp_processor_id(); 581 cpu = smp_processor_id();
514 582
515 if (in_nmi()) 583 trace_buf = rcu_dereference(perf_trace_buf);
516 raw_data = rcu_dereference(trace_profile_buf_nmi);
517 else
518 raw_data = rcu_dereference(trace_profile_buf);
519 584
520 if (!raw_data) 585 if (!trace_buf)
521 goto end; 586 goto end;
522 587
523 raw_data = per_cpu_ptr(raw_data, cpu); 588 raw_data = per_cpu_ptr(trace_buf, cpu);
524 589
525 /* zero the dead bytes from align to not leak stack to user */ 590 /* zero the dead bytes from align to not leak stack to user */
526 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; 591 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
@@ -528,24 +593,24 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
528 rec = (struct syscall_trace_exit *)raw_data; 593 rec = (struct syscall_trace_exit *)raw_data;
529 594
530 tracing_generic_entry_update(&rec->ent, 0, 0); 595 tracing_generic_entry_update(&rec->ent, 0, 0);
531 rec->ent.type = sys_data->exit_id; 596 rec->ent.type = sys_data->exit_event->id;
532 rec->nr = syscall_nr; 597 rec->nr = syscall_nr;
533 rec->ret = syscall_get_return_value(current, regs); 598 rec->ret = syscall_get_return_value(current, regs);
534 599
535 perf_tp_event(sys_data->exit_id, 0, 1, rec, size); 600 perf_tp_event(sys_data->exit_event->id, 0, 1, rec, size);
536 601
537end: 602end:
603 perf_swevent_put_recursion_context(rctx);
604end_recursion:
538 local_irq_restore(flags); 605 local_irq_restore(flags);
539} 606}
540 607
541int reg_prof_syscall_exit(char *name) 608int prof_sysexit_enable(struct ftrace_event_call *call)
542{ 609{
543 int ret = 0; 610 int ret = 0;
544 int num; 611 int num;
545 612
546 num = syscall_name_to_nr(name); 613 num = ((struct syscall_metadata *)call->data)->syscall_nr;
547 if (num < 0 || num >= NR_syscalls)
548 return -ENOSYS;
549 614
550 mutex_lock(&syscall_trace_lock); 615 mutex_lock(&syscall_trace_lock);
551 if (!sys_prof_refcount_exit) 616 if (!sys_prof_refcount_exit)
@@ -561,13 +626,11 @@ int reg_prof_syscall_exit(char *name)
561 return ret; 626 return ret;
562} 627}
563 628
564void unreg_prof_syscall_exit(char *name) 629void prof_sysexit_disable(struct ftrace_event_call *call)
565{ 630{
566 int num; 631 int num;
567 632
568 num = syscall_name_to_nr(name); 633 num = ((struct syscall_metadata *)call->data)->syscall_nr;
569 if (num < 0 || num >= NR_syscalls)
570 return;
571 634
572 mutex_lock(&syscall_trace_lock); 635 mutex_lock(&syscall_trace_lock);
573 sys_prof_refcount_exit--; 636 sys_prof_refcount_exit--;
diff --git a/kernel/user-return-notifier.c b/kernel/user-return-notifier.c
new file mode 100644
index 000000000000..eb27fd3430a2
--- /dev/null
+++ b/kernel/user-return-notifier.c
@@ -0,0 +1,44 @@
1
2#include <linux/user-return-notifier.h>
3#include <linux/percpu.h>
4#include <linux/sched.h>
5#include <linux/module.h>
6
7static DEFINE_PER_CPU(struct hlist_head, return_notifier_list);
8
9/*
10 * Request a notification when the current cpu returns to userspace. Must be
11 * called in atomic context. The notifier will also be called in atomic
12 * context.
13 */
14void user_return_notifier_register(struct user_return_notifier *urn)
15{
16 set_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
17 hlist_add_head(&urn->link, &__get_cpu_var(return_notifier_list));
18}
19EXPORT_SYMBOL_GPL(user_return_notifier_register);
20
21/*
22 * Removes a registered user return notifier. Must be called from atomic
23 * context, and from the same cpu registration occured in.
24 */
25void user_return_notifier_unregister(struct user_return_notifier *urn)
26{
27 hlist_del(&urn->link);
28 if (hlist_empty(&__get_cpu_var(return_notifier_list)))
29 clear_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
30}
31EXPORT_SYMBOL_GPL(user_return_notifier_unregister);
32
33/* Calls registered user return notifiers */
34void fire_user_return_notifiers(void)
35{
36 struct user_return_notifier *urn;
37 struct hlist_node *tmp1, *tmp2;
38 struct hlist_head *head;
39
40 head = &get_cpu_var(return_notifier_list);
41 hlist_for_each_entry_safe(urn, tmp1, tmp2, head, link)
42 urn->on_user_return(urn);
43 put_cpu_var(return_notifier_list);
44}
diff --git a/kernel/user.c b/kernel/user.c
index 2c000e7132ac..46d0165ca70c 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -330,9 +330,9 @@ done:
330 */ 330 */
331static void free_user(struct user_struct *up, unsigned long flags) 331static void free_user(struct user_struct *up, unsigned long flags)
332{ 332{
333 spin_unlock_irqrestore(&uidhash_lock, flags);
334 INIT_DELAYED_WORK(&up->work, cleanup_user_struct); 333 INIT_DELAYED_WORK(&up->work, cleanup_user_struct);
335 schedule_delayed_work(&up->work, msecs_to_jiffies(1000)); 334 schedule_delayed_work(&up->work, msecs_to_jiffies(1000));
335 spin_unlock_irqrestore(&uidhash_lock, flags);
336} 336}
337 337
338#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ 338#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
index 69eae358a726..a2cd77e70d4d 100644
--- a/kernel/utsname_sysctl.c
+++ b/kernel/utsname_sysctl.c
@@ -57,78 +57,47 @@ static int proc_do_uts_string(ctl_table *table, int write,
57#define proc_do_uts_string NULL 57#define proc_do_uts_string NULL
58#endif 58#endif
59 59
60
61#ifdef CONFIG_SYSCTL_SYSCALL
62/* The generic string strategy routine: */
63static int sysctl_uts_string(ctl_table *table,
64 void __user *oldval, size_t __user *oldlenp,
65 void __user *newval, size_t newlen)
66{
67 struct ctl_table uts_table;
68 int r, write;
69 write = newval && newlen;
70 memcpy(&uts_table, table, sizeof(uts_table));
71 uts_table.data = get_uts(table, write);
72 r = sysctl_string(&uts_table, oldval, oldlenp, newval, newlen);
73 put_uts(table, write, uts_table.data);
74 return r;
75}
76#else
77#define sysctl_uts_string NULL
78#endif
79
80static struct ctl_table uts_kern_table[] = { 60static struct ctl_table uts_kern_table[] = {
81 { 61 {
82 .ctl_name = KERN_OSTYPE,
83 .procname = "ostype", 62 .procname = "ostype",
84 .data = init_uts_ns.name.sysname, 63 .data = init_uts_ns.name.sysname,
85 .maxlen = sizeof(init_uts_ns.name.sysname), 64 .maxlen = sizeof(init_uts_ns.name.sysname),
86 .mode = 0444, 65 .mode = 0444,
87 .proc_handler = proc_do_uts_string, 66 .proc_handler = proc_do_uts_string,
88 .strategy = sysctl_uts_string,
89 }, 67 },
90 { 68 {
91 .ctl_name = KERN_OSRELEASE,
92 .procname = "osrelease", 69 .procname = "osrelease",
93 .data = init_uts_ns.name.release, 70 .data = init_uts_ns.name.release,
94 .maxlen = sizeof(init_uts_ns.name.release), 71 .maxlen = sizeof(init_uts_ns.name.release),
95 .mode = 0444, 72 .mode = 0444,
96 .proc_handler = proc_do_uts_string, 73 .proc_handler = proc_do_uts_string,
97 .strategy = sysctl_uts_string,
98 }, 74 },
99 { 75 {
100 .ctl_name = KERN_VERSION,
101 .procname = "version", 76 .procname = "version",
102 .data = init_uts_ns.name.version, 77 .data = init_uts_ns.name.version,
103 .maxlen = sizeof(init_uts_ns.name.version), 78 .maxlen = sizeof(init_uts_ns.name.version),
104 .mode = 0444, 79 .mode = 0444,
105 .proc_handler = proc_do_uts_string, 80 .proc_handler = proc_do_uts_string,
106 .strategy = sysctl_uts_string,
107 }, 81 },
108 { 82 {
109 .ctl_name = KERN_NODENAME,
110 .procname = "hostname", 83 .procname = "hostname",
111 .data = init_uts_ns.name.nodename, 84 .data = init_uts_ns.name.nodename,
112 .maxlen = sizeof(init_uts_ns.name.nodename), 85 .maxlen = sizeof(init_uts_ns.name.nodename),
113 .mode = 0644, 86 .mode = 0644,
114 .proc_handler = proc_do_uts_string, 87 .proc_handler = proc_do_uts_string,
115 .strategy = sysctl_uts_string,
116 }, 88 },
117 { 89 {
118 .ctl_name = KERN_DOMAINNAME,
119 .procname = "domainname", 90 .procname = "domainname",
120 .data = init_uts_ns.name.domainname, 91 .data = init_uts_ns.name.domainname,
121 .maxlen = sizeof(init_uts_ns.name.domainname), 92 .maxlen = sizeof(init_uts_ns.name.domainname),
122 .mode = 0644, 93 .mode = 0644,
123 .proc_handler = proc_do_uts_string, 94 .proc_handler = proc_do_uts_string,
124 .strategy = sysctl_uts_string,
125 }, 95 },
126 {} 96 {}
127}; 97};
128 98
129static struct ctl_table uts_root_table[] = { 99static struct ctl_table uts_root_table[] = {
130 { 100 {
131 .ctl_name = CTL_KERN,
132 .procname = "kernel", 101 .procname = "kernel",
133 .mode = 0555, 102 .mode = 0555,
134 .child = uts_kern_table, 103 .child = uts_kern_table,
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index addfe2df93b1..67e526b6ae81 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -640,6 +640,24 @@ int schedule_delayed_work(struct delayed_work *dwork,
640EXPORT_SYMBOL(schedule_delayed_work); 640EXPORT_SYMBOL(schedule_delayed_work);
641 641
642/** 642/**
643 * flush_delayed_work - block until a dwork_struct's callback has terminated
644 * @dwork: the delayed work which is to be flushed
645 *
646 * Any timeout is cancelled, and any pending work is run immediately.
647 */
648void flush_delayed_work(struct delayed_work *dwork)
649{
650 if (del_timer_sync(&dwork->timer)) {
651 struct cpu_workqueue_struct *cwq;
652 cwq = wq_per_cpu(keventd_wq, get_cpu());
653 __queue_work(cwq, &dwork->work);
654 put_cpu();
655 }
656 flush_work(&dwork->work);
657}
658EXPORT_SYMBOL(flush_delayed_work);
659
660/**
643 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 661 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
644 * @cpu: cpu to use 662 * @cpu: cpu to use
645 * @dwork: job to be done 663 * @dwork: job to be done
@@ -667,6 +685,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on);
667int schedule_on_each_cpu(work_func_t func) 685int schedule_on_each_cpu(work_func_t func)
668{ 686{
669 int cpu; 687 int cpu;
688 int orig = -1;
670 struct work_struct *works; 689 struct work_struct *works;
671 690
672 works = alloc_percpu(struct work_struct); 691 works = alloc_percpu(struct work_struct);
@@ -674,14 +693,28 @@ int schedule_on_each_cpu(work_func_t func)
674 return -ENOMEM; 693 return -ENOMEM;
675 694
676 get_online_cpus(); 695 get_online_cpus();
696
697 /*
698 * When running in keventd don't schedule a work item on
699 * itself. Can just call directly because the work queue is
700 * already bound. This also is faster.
701 */
702 if (current_is_keventd())
703 orig = raw_smp_processor_id();
704
677 for_each_online_cpu(cpu) { 705 for_each_online_cpu(cpu) {
678 struct work_struct *work = per_cpu_ptr(works, cpu); 706 struct work_struct *work = per_cpu_ptr(works, cpu);
679 707
680 INIT_WORK(work, func); 708 INIT_WORK(work, func);
681 schedule_work_on(cpu, work); 709 if (cpu != orig)
710 schedule_work_on(cpu, work);
682 } 711 }
712 if (orig >= 0)
713 func(per_cpu_ptr(works, orig));
714
683 for_each_online_cpu(cpu) 715 for_each_online_cpu(cpu)
684 flush_work(per_cpu_ptr(works, cpu)); 716 flush_work(per_cpu_ptr(works, cpu));
717
685 put_online_cpus(); 718 put_online_cpus();
686 free_percpu(works); 719 free_percpu(works);
687 return 0; 720 return 0;