diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/trace/Kconfig | 10 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 23 | ||||
| -rw-r--r-- | kernel/user.c | 67 |
3 files changed, 55 insertions, 45 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 4a13e5a01ce3..61071fecc82e 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
| @@ -147,7 +147,7 @@ config IRQSOFF_TRACER | |||
| 147 | disabled by default and can be runtime (re-)started | 147 | disabled by default and can be runtime (re-)started |
| 148 | via: | 148 | via: |
| 149 | 149 | ||
| 150 | echo 0 > /debugfs/tracing/tracing_max_latency | 150 | echo 0 > /sys/kernel/debug/tracing/tracing_max_latency |
| 151 | 151 | ||
| 152 | (Note that kernel size and overhead increases with this option | 152 | (Note that kernel size and overhead increases with this option |
| 153 | enabled. This option and the preempt-off timing option can be | 153 | enabled. This option and the preempt-off timing option can be |
| @@ -168,7 +168,7 @@ config PREEMPT_TRACER | |||
| 168 | disabled by default and can be runtime (re-)started | 168 | disabled by default and can be runtime (re-)started |
| 169 | via: | 169 | via: |
| 170 | 170 | ||
| 171 | echo 0 > /debugfs/tracing/tracing_max_latency | 171 | echo 0 > /sys/kernel/debug/tracing/tracing_max_latency |
| 172 | 172 | ||
| 173 | (Note that kernel size and overhead increases with this option | 173 | (Note that kernel size and overhead increases with this option |
| 174 | enabled. This option and the irqs-off timing option can be | 174 | enabled. This option and the irqs-off timing option can be |
| @@ -261,7 +261,7 @@ config PROFILE_ANNOTATED_BRANCHES | |||
| 261 | This tracer profiles all the the likely and unlikely macros | 261 | This tracer profiles all the the likely and unlikely macros |
| 262 | in the kernel. It will display the results in: | 262 | in the kernel. It will display the results in: |
| 263 | 263 | ||
| 264 | /debugfs/tracing/profile_annotated_branch | 264 | /sys/kernel/debug/tracing/profile_annotated_branch |
| 265 | 265 | ||
| 266 | Note: this will add a significant overhead, only turn this | 266 | Note: this will add a significant overhead, only turn this |
| 267 | on if you need to profile the system's use of these macros. | 267 | on if you need to profile the system's use of these macros. |
| @@ -274,7 +274,7 @@ config PROFILE_ALL_BRANCHES | |||
| 274 | taken in the kernel is recorded whether it hit or miss. | 274 | taken in the kernel is recorded whether it hit or miss. |
| 275 | The results will be displayed in: | 275 | The results will be displayed in: |
| 276 | 276 | ||
| 277 | /debugfs/tracing/profile_branch | 277 | /sys/kernel/debug/tracing/profile_branch |
| 278 | 278 | ||
| 279 | This option also enables the likely/unlikely profiler. | 279 | This option also enables the likely/unlikely profiler. |
| 280 | 280 | ||
| @@ -323,7 +323,7 @@ config STACK_TRACER | |||
| 323 | select KALLSYMS | 323 | select KALLSYMS |
| 324 | help | 324 | help |
| 325 | This special tracer records the maximum stack footprint of the | 325 | This special tracer records the maximum stack footprint of the |
| 326 | kernel and displays it in debugfs/tracing/stack_trace. | 326 | kernel and displays it in /sys/kernel/debug/tracing/stack_trace. |
| 327 | 327 | ||
| 328 | This tracer works by hooking into every function call that the | 328 | This tracer works by hooking into every function call that the |
| 329 | kernel executes, and keeping a maximum stack depth value and | 329 | kernel executes, and keeping a maximum stack depth value and |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8acd9b81a5d7..c1878bfb2e1e 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -344,7 +344,7 @@ static raw_spinlock_t ftrace_max_lock = | |||
| 344 | /* | 344 | /* |
| 345 | * Copy the new maximum trace into the separate maximum-trace | 345 | * Copy the new maximum trace into the separate maximum-trace |
| 346 | * structure. (this way the maximum trace is permanently saved, | 346 | * structure. (this way the maximum trace is permanently saved, |
| 347 | * for later retrieval via /debugfs/tracing/latency_trace) | 347 | * for later retrieval via /sys/kernel/debug/tracing/latency_trace) |
| 348 | */ | 348 | */ |
| 349 | static void | 349 | static void |
| 350 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 350 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
| @@ -2414,21 +2414,20 @@ static const struct file_operations tracing_iter_fops = { | |||
| 2414 | 2414 | ||
| 2415 | static const char readme_msg[] = | 2415 | static const char readme_msg[] = |
| 2416 | "tracing mini-HOWTO:\n\n" | 2416 | "tracing mini-HOWTO:\n\n" |
| 2417 | "# mkdir /debug\n" | 2417 | "# mount -t debugfs nodev /sys/kernel/debug\n\n" |
| 2418 | "# mount -t debugfs nodev /debug\n\n" | 2418 | "# cat /sys/kernel/debug/tracing/available_tracers\n" |
| 2419 | "# cat /debug/tracing/available_tracers\n" | ||
| 2420 | "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n" | 2419 | "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n" |
| 2421 | "# cat /debug/tracing/current_tracer\n" | 2420 | "# cat /sys/kernel/debug/tracing/current_tracer\n" |
| 2422 | "nop\n" | 2421 | "nop\n" |
| 2423 | "# echo sched_switch > /debug/tracing/current_tracer\n" | 2422 | "# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n" |
| 2424 | "# cat /debug/tracing/current_tracer\n" | 2423 | "# cat /sys/kernel/debug/tracing/current_tracer\n" |
| 2425 | "sched_switch\n" | 2424 | "sched_switch\n" |
| 2426 | "# cat /debug/tracing/trace_options\n" | 2425 | "# cat /sys/kernel/debug/tracing/trace_options\n" |
| 2427 | "noprint-parent nosym-offset nosym-addr noverbose\n" | 2426 | "noprint-parent nosym-offset nosym-addr noverbose\n" |
| 2428 | "# echo print-parent > /debug/tracing/trace_options\n" | 2427 | "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n" |
| 2429 | "# echo 1 > /debug/tracing/tracing_enabled\n" | 2428 | "# echo 1 > /sys/kernel/debug/tracing/tracing_enabled\n" |
| 2430 | "# cat /debug/tracing/trace > /tmp/trace.txt\n" | 2429 | "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n" |
| 2431 | "# echo 0 > /debug/tracing/tracing_enabled\n" | 2430 | "# echo 0 > /sys/kernel/debug/tracing/tracing_enabled\n" |
| 2432 | ; | 2431 | ; |
| 2433 | 2432 | ||
| 2434 | static ssize_t | 2433 | static ssize_t |
diff --git a/kernel/user.c b/kernel/user.c index 850e0ba41c1e..2c000e7132ac 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
| @@ -75,21 +75,6 @@ static void uid_hash_remove(struct user_struct *up) | |||
| 75 | put_user_ns(up->user_ns); | 75 | put_user_ns(up->user_ns); |
| 76 | } | 76 | } |
| 77 | 77 | ||
| 78 | static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) | ||
| 79 | { | ||
| 80 | struct user_struct *user; | ||
| 81 | struct hlist_node *h; | ||
| 82 | |||
| 83 | hlist_for_each_entry(user, h, hashent, uidhash_node) { | ||
| 84 | if (user->uid == uid) { | ||
| 85 | atomic_inc(&user->__count); | ||
| 86 | return user; | ||
| 87 | } | ||
| 88 | } | ||
| 89 | |||
| 90 | return NULL; | ||
| 91 | } | ||
| 92 | |||
| 93 | #ifdef CONFIG_USER_SCHED | 78 | #ifdef CONFIG_USER_SCHED |
| 94 | 79 | ||
| 95 | static void sched_destroy_user(struct user_struct *up) | 80 | static void sched_destroy_user(struct user_struct *up) |
| @@ -119,6 +104,23 @@ static int sched_create_user(struct user_struct *up) { return 0; } | |||
| 119 | 104 | ||
| 120 | #if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS) | 105 | #if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS) |
| 121 | 106 | ||
| 107 | static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) | ||
| 108 | { | ||
| 109 | struct user_struct *user; | ||
| 110 | struct hlist_node *h; | ||
| 111 | |||
| 112 | hlist_for_each_entry(user, h, hashent, uidhash_node) { | ||
| 113 | if (user->uid == uid) { | ||
| 114 | /* possibly resurrect an "almost deleted" object */ | ||
| 115 | if (atomic_inc_return(&user->__count) == 1) | ||
| 116 | cancel_delayed_work(&user->work); | ||
| 117 | return user; | ||
| 118 | } | ||
| 119 | } | ||
| 120 | |||
| 121 | return NULL; | ||
| 122 | } | ||
| 123 | |||
| 122 | static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */ | 124 | static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */ |
| 123 | static DEFINE_MUTEX(uids_mutex); | 125 | static DEFINE_MUTEX(uids_mutex); |
| 124 | 126 | ||
| @@ -283,12 +285,12 @@ int __init uids_sysfs_init(void) | |||
| 283 | return uids_user_create(&root_user); | 285 | return uids_user_create(&root_user); |
| 284 | } | 286 | } |
| 285 | 287 | ||
| 286 | /* work function to remove sysfs directory for a user and free up | 288 | /* delayed work function to remove sysfs directory for a user and free up |
| 287 | * corresponding structures. | 289 | * corresponding structures. |
| 288 | */ | 290 | */ |
| 289 | static void cleanup_user_struct(struct work_struct *w) | 291 | static void cleanup_user_struct(struct work_struct *w) |
| 290 | { | 292 | { |
| 291 | struct user_struct *up = container_of(w, struct user_struct, work); | 293 | struct user_struct *up = container_of(w, struct user_struct, work.work); |
| 292 | unsigned long flags; | 294 | unsigned long flags; |
| 293 | int remove_user = 0; | 295 | int remove_user = 0; |
| 294 | 296 | ||
| @@ -297,15 +299,12 @@ static void cleanup_user_struct(struct work_struct *w) | |||
| 297 | */ | 299 | */ |
| 298 | uids_mutex_lock(); | 300 | uids_mutex_lock(); |
| 299 | 301 | ||
| 300 | local_irq_save(flags); | 302 | spin_lock_irqsave(&uidhash_lock, flags); |
| 301 | 303 | if (atomic_read(&up->__count) == 0) { | |
| 302 | if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) { | ||
| 303 | uid_hash_remove(up); | 304 | uid_hash_remove(up); |
| 304 | remove_user = 1; | 305 | remove_user = 1; |
| 305 | spin_unlock_irqrestore(&uidhash_lock, flags); | ||
| 306 | } else { | ||
| 307 | local_irq_restore(flags); | ||
| 308 | } | 306 | } |
| 307 | spin_unlock_irqrestore(&uidhash_lock, flags); | ||
| 309 | 308 | ||
| 310 | if (!remove_user) | 309 | if (!remove_user) |
| 311 | goto done; | 310 | goto done; |
| @@ -331,16 +330,28 @@ done: | |||
| 331 | */ | 330 | */ |
| 332 | static void free_user(struct user_struct *up, unsigned long flags) | 331 | static void free_user(struct user_struct *up, unsigned long flags) |
| 333 | { | 332 | { |
| 334 | /* restore back the count */ | ||
| 335 | atomic_inc(&up->__count); | ||
| 336 | spin_unlock_irqrestore(&uidhash_lock, flags); | 333 | spin_unlock_irqrestore(&uidhash_lock, flags); |
| 337 | 334 | INIT_DELAYED_WORK(&up->work, cleanup_user_struct); | |
| 338 | INIT_WORK(&up->work, cleanup_user_struct); | 335 | schedule_delayed_work(&up->work, msecs_to_jiffies(1000)); |
| 339 | schedule_work(&up->work); | ||
| 340 | } | 336 | } |
| 341 | 337 | ||
| 342 | #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ | 338 | #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ |
| 343 | 339 | ||
| 340 | static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) | ||
| 341 | { | ||
| 342 | struct user_struct *user; | ||
| 343 | struct hlist_node *h; | ||
| 344 | |||
| 345 | hlist_for_each_entry(user, h, hashent, uidhash_node) { | ||
| 346 | if (user->uid == uid) { | ||
| 347 | atomic_inc(&user->__count); | ||
| 348 | return user; | ||
| 349 | } | ||
| 350 | } | ||
| 351 | |||
| 352 | return NULL; | ||
| 353 | } | ||
| 354 | |||
| 344 | int uids_sysfs_init(void) { return 0; } | 355 | int uids_sysfs_init(void) { return 0; } |
| 345 | static inline int uids_user_create(struct user_struct *up) { return 0; } | 356 | static inline int uids_user_create(struct user_struct *up) { return 0; } |
| 346 | static inline void uids_mutex_lock(void) { } | 357 | static inline void uids_mutex_lock(void) { } |
