diff options
author | Ingo Molnar <mingo@elte.hu> | 2011-02-16 07:27:18 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-02-16 07:27:23 -0500 |
commit | b00560f2d4de69bb12f66f9605985b516df98d77 (patch) | |
tree | 0c92fc994125dc3ddb635842715be29d8b16808b /kernel | |
parent | bf1af3a809506645b9130755b713b008da14737f (diff) | |
parent | 4fe757dd48a9e95e1a071291f15dda5421dacb66 (diff) |
Merge branch 'perf/urgent' into perf/core
Merge reason: we need to queue up dependent patch
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/capability.c | 2 | ||||
-rw-r--r-- | kernel/perf_event.c | 19 | ||||
-rw-r--r-- | kernel/printk.c | 54 | ||||
-rw-r--r-- | kernel/ptrace.c | 2 | ||||
-rw-r--r-- | kernel/time/timer_list.c | 4 | ||||
-rw-r--r-- | kernel/timer.c | 8 | ||||
-rw-r--r-- | kernel/trace/blktrace.c | 7 |
7 files changed, 64 insertions, 32 deletions
diff --git a/kernel/capability.c b/kernel/capability.c index 2f05303715a5..9e9385f132c8 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
@@ -306,7 +306,7 @@ int capable(int cap) | |||
306 | BUG(); | 306 | BUG(); |
307 | } | 307 | } |
308 | 308 | ||
309 | if (security_capable(cap) == 0) { | 309 | if (security_capable(current_cred(), cap) == 0) { |
310 | current->flags |= PF_SUPERPRIV; | 310 | current->flags |= PF_SUPERPRIV; |
311 | return 1; | 311 | return 1; |
312 | } | 312 | } |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index a353a4d6d00d..3d3f282fa50e 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -851,6 +851,10 @@ retry: | |||
851 | raw_spin_unlock_irq(&ctx->lock); | 851 | raw_spin_unlock_irq(&ctx->lock); |
852 | } | 852 | } |
853 | 853 | ||
854 | #define MAX_INTERRUPTS (~0ULL) | ||
855 | |||
856 | static void perf_log_throttle(struct perf_event *event, int enable); | ||
857 | |||
854 | static int | 858 | static int |
855 | event_sched_in(struct perf_event *event, | 859 | event_sched_in(struct perf_event *event, |
856 | struct perf_cpu_context *cpuctx, | 860 | struct perf_cpu_context *cpuctx, |
@@ -863,6 +867,17 @@ event_sched_in(struct perf_event *event, | |||
863 | 867 | ||
864 | event->state = PERF_EVENT_STATE_ACTIVE; | 868 | event->state = PERF_EVENT_STATE_ACTIVE; |
865 | event->oncpu = smp_processor_id(); | 869 | event->oncpu = smp_processor_id(); |
870 | |||
871 | /* | ||
872 | * Unthrottle events, since we scheduled we might have missed several | ||
873 | * ticks already, also for a heavily scheduling task there is little | ||
874 | * guarantee it'll get a tick in a timely manner. | ||
875 | */ | ||
876 | if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { | ||
877 | perf_log_throttle(event, 1); | ||
878 | event->hw.interrupts = 0; | ||
879 | } | ||
880 | |||
866 | /* | 881 | /* |
867 | * The new state must be visible before we turn it on in the hardware: | 882 | * The new state must be visible before we turn it on in the hardware: |
868 | */ | 883 | */ |
@@ -1661,10 +1676,6 @@ void __perf_event_task_sched_in(struct task_struct *task) | |||
1661 | } | 1676 | } |
1662 | } | 1677 | } |
1663 | 1678 | ||
1664 | #define MAX_INTERRUPTS (~0ULL) | ||
1665 | |||
1666 | static void perf_log_throttle(struct perf_event *event, int enable); | ||
1667 | |||
1668 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) | 1679 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) |
1669 | { | 1680 | { |
1670 | u64 frequency = event->attr.sample_freq; | 1681 | u64 frequency = event->attr.sample_freq; |
diff --git a/kernel/printk.c b/kernel/printk.c index 2ddbdc73aade..36231525e22f 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -262,25 +262,47 @@ int dmesg_restrict = 1; | |||
262 | int dmesg_restrict; | 262 | int dmesg_restrict; |
263 | #endif | 263 | #endif |
264 | 264 | ||
265 | static int syslog_action_restricted(int type) | ||
266 | { | ||
267 | if (dmesg_restrict) | ||
268 | return 1; | ||
269 | /* Unless restricted, we allow "read all" and "get buffer size" for everybody */ | ||
270 | return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER; | ||
271 | } | ||
272 | |||
273 | static int check_syslog_permissions(int type, bool from_file) | ||
274 | { | ||
275 | /* | ||
276 | * If this is from /proc/kmsg and we've already opened it, then we've | ||
277 | * already done the capabilities checks at open time. | ||
278 | */ | ||
279 | if (from_file && type != SYSLOG_ACTION_OPEN) | ||
280 | return 0; | ||
281 | |||
282 | if (syslog_action_restricted(type)) { | ||
283 | if (capable(CAP_SYSLOG)) | ||
284 | return 0; | ||
285 | /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */ | ||
286 | if (capable(CAP_SYS_ADMIN)) { | ||
287 | WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN " | ||
288 | "but no CAP_SYSLOG (deprecated).\n"); | ||
289 | return 0; | ||
290 | } | ||
291 | return -EPERM; | ||
292 | } | ||
293 | return 0; | ||
294 | } | ||
295 | |||
265 | int do_syslog(int type, char __user *buf, int len, bool from_file) | 296 | int do_syslog(int type, char __user *buf, int len, bool from_file) |
266 | { | 297 | { |
267 | unsigned i, j, limit, count; | 298 | unsigned i, j, limit, count; |
268 | int do_clear = 0; | 299 | int do_clear = 0; |
269 | char c; | 300 | char c; |
270 | int error = 0; | 301 | int error; |
271 | 302 | ||
272 | /* | 303 | error = check_syslog_permissions(type, from_file); |
273 | * If this is from /proc/kmsg we only do the capabilities checks | 304 | if (error) |
274 | * at open time. | 305 | goto out; |
275 | */ | ||
276 | if (type == SYSLOG_ACTION_OPEN || !from_file) { | ||
277 | if (dmesg_restrict && !capable(CAP_SYSLOG)) | ||
278 | goto warn; /* switch to return -EPERM after 2.6.39 */ | ||
279 | if ((type != SYSLOG_ACTION_READ_ALL && | ||
280 | type != SYSLOG_ACTION_SIZE_BUFFER) && | ||
281 | !capable(CAP_SYSLOG)) | ||
282 | goto warn; /* switch to return -EPERM after 2.6.39 */ | ||
283 | } | ||
284 | 306 | ||
285 | error = security_syslog(type); | 307 | error = security_syslog(type); |
286 | if (error) | 308 | if (error) |
@@ -423,12 +445,6 @@ int do_syslog(int type, char __user *buf, int len, bool from_file) | |||
423 | } | 445 | } |
424 | out: | 446 | out: |
425 | return error; | 447 | return error; |
426 | warn: | ||
427 | /* remove after 2.6.39 */ | ||
428 | if (capable(CAP_SYS_ADMIN)) | ||
429 | WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN " | ||
430 | "but no CAP_SYSLOG (deprecated and denied).\n"); | ||
431 | return -EPERM; | ||
432 | } | 448 | } |
433 | 449 | ||
434 | SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) | 450 | SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 99bbaa3e5b0d..1708b1e2972d 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -313,7 +313,7 @@ int ptrace_detach(struct task_struct *child, unsigned int data) | |||
313 | child->exit_code = data; | 313 | child->exit_code = data; |
314 | dead = __ptrace_detach(current, child); | 314 | dead = __ptrace_detach(current, child); |
315 | if (!child->exit_state) | 315 | if (!child->exit_state) |
316 | wake_up_process(child); | 316 | wake_up_state(child, TASK_TRACED | TASK_STOPPED); |
317 | } | 317 | } |
318 | write_unlock_irq(&tasklist_lock); | 318 | write_unlock_irq(&tasklist_lock); |
319 | 319 | ||
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index 32a19f9397fc..3258455549f4 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c | |||
@@ -41,7 +41,7 @@ static void print_name_offset(struct seq_file *m, void *sym) | |||
41 | char symname[KSYM_NAME_LEN]; | 41 | char symname[KSYM_NAME_LEN]; |
42 | 42 | ||
43 | if (lookup_symbol_name((unsigned long)sym, symname) < 0) | 43 | if (lookup_symbol_name((unsigned long)sym, symname) < 0) |
44 | SEQ_printf(m, "<%p>", sym); | 44 | SEQ_printf(m, "<%pK>", sym); |
45 | else | 45 | else |
46 | SEQ_printf(m, "%s", symname); | 46 | SEQ_printf(m, "%s", symname); |
47 | } | 47 | } |
@@ -112,7 +112,7 @@ next_one: | |||
112 | static void | 112 | static void |
113 | print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) | 113 | print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) |
114 | { | 114 | { |
115 | SEQ_printf(m, " .base: %p\n", base); | 115 | SEQ_printf(m, " .base: %pK\n", base); |
116 | SEQ_printf(m, " .index: %d\n", | 116 | SEQ_printf(m, " .index: %d\n", |
117 | base->index); | 117 | base->index); |
118 | SEQ_printf(m, " .resolution: %Lu nsecs\n", | 118 | SEQ_printf(m, " .resolution: %Lu nsecs\n", |
diff --git a/kernel/timer.c b/kernel/timer.c index d53ce66daea0..d6459923d245 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -959,7 +959,7 @@ EXPORT_SYMBOL(try_to_del_timer_sync); | |||
959 | * | 959 | * |
960 | * Synchronization rules: Callers must prevent restarting of the timer, | 960 | * Synchronization rules: Callers must prevent restarting of the timer, |
961 | * otherwise this function is meaningless. It must not be called from | 961 | * otherwise this function is meaningless. It must not be called from |
962 | * hardirq contexts. The caller must not hold locks which would prevent | 962 | * interrupt contexts. The caller must not hold locks which would prevent |
963 | * completion of the timer's handler. The timer's handler must not call | 963 | * completion of the timer's handler. The timer's handler must not call |
964 | * add_timer_on(). Upon exit the timer is not queued and the handler is | 964 | * add_timer_on(). Upon exit the timer is not queued and the handler is |
965 | * not running on any CPU. | 965 | * not running on any CPU. |
@@ -971,12 +971,10 @@ int del_timer_sync(struct timer_list *timer) | |||
971 | #ifdef CONFIG_LOCKDEP | 971 | #ifdef CONFIG_LOCKDEP |
972 | unsigned long flags; | 972 | unsigned long flags; |
973 | 973 | ||
974 | raw_local_irq_save(flags); | 974 | local_irq_save(flags); |
975 | local_bh_disable(); | ||
976 | lock_map_acquire(&timer->lockdep_map); | 975 | lock_map_acquire(&timer->lockdep_map); |
977 | lock_map_release(&timer->lockdep_map); | 976 | lock_map_release(&timer->lockdep_map); |
978 | _local_bh_enable(); | 977 | local_irq_restore(flags); |
979 | raw_local_irq_restore(flags); | ||
980 | #endif | 978 | #endif |
981 | /* | 979 | /* |
982 | * don't use it in hardirq context, because it | 980 | * don't use it in hardirq context, because it |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 153562d0b93c..d95721f33702 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -138,6 +138,13 @@ void __trace_note_message(struct blk_trace *bt, const char *fmt, ...) | |||
138 | !blk_tracer_enabled)) | 138 | !blk_tracer_enabled)) |
139 | return; | 139 | return; |
140 | 140 | ||
141 | /* | ||
142 | * If the BLK_TC_NOTIFY action mask isn't set, don't send any note | ||
143 | * message to the trace. | ||
144 | */ | ||
145 | if (!(bt->act_mask & BLK_TC_NOTIFY)) | ||
146 | return; | ||
147 | |||
141 | local_irq_save(flags); | 148 | local_irq_save(flags); |
142 | buf = per_cpu_ptr(bt->msg_data, smp_processor_id()); | 149 | buf = per_cpu_ptr(bt->msg_data, smp_processor_id()); |
143 | va_start(args, fmt); | 150 | va_start(args, fmt); |