diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/compat.c | 8 | ||||
-rw-r--r-- | kernel/fork.c | 42 | ||||
-rw-r--r-- | kernel/hrtimer.c | 2 | ||||
-rw-r--r-- | kernel/irq/proc.c | 54 | ||||
-rw-r--r-- | kernel/mutex.c | 25 | ||||
-rw-r--r-- | kernel/posix-timers.c | 25 | ||||
-rw-r--r-- | kernel/printk.c | 87 | ||||
-rw-r--r-- | kernel/sysctl.c | 2 |
8 files changed, 185 insertions, 60 deletions
diff --git a/kernel/compat.c b/kernel/compat.c index 9214dcd087b7..fc9eb093acd5 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
@@ -293,6 +293,8 @@ asmlinkage long compat_sys_times(struct compat_tms __user *tbuf) | |||
293 | return compat_jiffies_to_clock_t(jiffies); | 293 | return compat_jiffies_to_clock_t(jiffies); |
294 | } | 294 | } |
295 | 295 | ||
296 | #ifdef __ARCH_WANT_SYS_SIGPENDING | ||
297 | |||
296 | /* | 298 | /* |
297 | * Assumption: old_sigset_t and compat_old_sigset_t are both | 299 | * Assumption: old_sigset_t and compat_old_sigset_t are both |
298 | * types that can be passed to put_user()/get_user(). | 300 | * types that can be passed to put_user()/get_user(). |
@@ -312,6 +314,10 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set) | |||
312 | return ret; | 314 | return ret; |
313 | } | 315 | } |
314 | 316 | ||
317 | #endif | ||
318 | |||
319 | #ifdef __ARCH_WANT_SYS_SIGPROCMASK | ||
320 | |||
315 | asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set, | 321 | asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set, |
316 | compat_old_sigset_t __user *oset) | 322 | compat_old_sigset_t __user *oset) |
317 | { | 323 | { |
@@ -333,6 +339,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set, | |||
333 | return ret; | 339 | return ret; |
334 | } | 340 | } |
335 | 341 | ||
342 | #endif | ||
343 | |||
336 | asmlinkage long compat_sys_setrlimit(unsigned int resource, | 344 | asmlinkage long compat_sys_setrlimit(unsigned int resource, |
337 | struct compat_rlimit __user *rlim) | 345 | struct compat_rlimit __user *rlim) |
338 | { | 346 | { |
diff --git a/kernel/fork.c b/kernel/fork.c index 2b44d82b8237..8e7e135d0817 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -383,15 +383,14 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
383 | get_file(file); | 383 | get_file(file); |
384 | if (tmp->vm_flags & VM_DENYWRITE) | 384 | if (tmp->vm_flags & VM_DENYWRITE) |
385 | atomic_dec(&inode->i_writecount); | 385 | atomic_dec(&inode->i_writecount); |
386 | spin_lock(&mapping->i_mmap_lock); | 386 | mutex_lock(&mapping->i_mmap_mutex); |
387 | if (tmp->vm_flags & VM_SHARED) | 387 | if (tmp->vm_flags & VM_SHARED) |
388 | mapping->i_mmap_writable++; | 388 | mapping->i_mmap_writable++; |
389 | tmp->vm_truncate_count = mpnt->vm_truncate_count; | ||
390 | flush_dcache_mmap_lock(mapping); | 389 | flush_dcache_mmap_lock(mapping); |
391 | /* insert tmp into the share list, just after mpnt */ | 390 | /* insert tmp into the share list, just after mpnt */ |
392 | vma_prio_tree_add(tmp, mpnt); | 391 | vma_prio_tree_add(tmp, mpnt); |
393 | flush_dcache_mmap_unlock(mapping); | 392 | flush_dcache_mmap_unlock(mapping); |
394 | spin_unlock(&mapping->i_mmap_lock); | 393 | mutex_unlock(&mapping->i_mmap_mutex); |
395 | } | 394 | } |
396 | 395 | ||
397 | /* | 396 | /* |
@@ -486,6 +485,20 @@ static void mm_init_aio(struct mm_struct *mm) | |||
486 | #endif | 485 | #endif |
487 | } | 486 | } |
488 | 487 | ||
488 | int mm_init_cpumask(struct mm_struct *mm, struct mm_struct *oldmm) | ||
489 | { | ||
490 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
491 | if (!alloc_cpumask_var(&mm->cpu_vm_mask_var, GFP_KERNEL)) | ||
492 | return -ENOMEM; | ||
493 | |||
494 | if (oldmm) | ||
495 | cpumask_copy(mm_cpumask(mm), mm_cpumask(oldmm)); | ||
496 | else | ||
497 | memset(mm_cpumask(mm), 0, cpumask_size()); | ||
498 | #endif | ||
499 | return 0; | ||
500 | } | ||
501 | |||
489 | static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) | 502 | static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) |
490 | { | 503 | { |
491 | atomic_set(&mm->mm_users, 1); | 504 | atomic_set(&mm->mm_users, 1); |
@@ -522,10 +535,20 @@ struct mm_struct * mm_alloc(void) | |||
522 | struct mm_struct * mm; | 535 | struct mm_struct * mm; |
523 | 536 | ||
524 | mm = allocate_mm(); | 537 | mm = allocate_mm(); |
525 | if (mm) { | 538 | if (!mm) |
526 | memset(mm, 0, sizeof(*mm)); | 539 | return NULL; |
527 | mm = mm_init(mm, current); | 540 | |
541 | memset(mm, 0, sizeof(*mm)); | ||
542 | mm = mm_init(mm, current); | ||
543 | if (!mm) | ||
544 | return NULL; | ||
545 | |||
546 | if (mm_init_cpumask(mm, NULL)) { | ||
547 | mm_free_pgd(mm); | ||
548 | free_mm(mm); | ||
549 | return NULL; | ||
528 | } | 550 | } |
551 | |||
529 | return mm; | 552 | return mm; |
530 | } | 553 | } |
531 | 554 | ||
@@ -537,6 +560,7 @@ struct mm_struct * mm_alloc(void) | |||
537 | void __mmdrop(struct mm_struct *mm) | 560 | void __mmdrop(struct mm_struct *mm) |
538 | { | 561 | { |
539 | BUG_ON(mm == &init_mm); | 562 | BUG_ON(mm == &init_mm); |
563 | free_cpumask_var(mm->cpu_vm_mask_var); | ||
540 | mm_free_pgd(mm); | 564 | mm_free_pgd(mm); |
541 | destroy_context(mm); | 565 | destroy_context(mm); |
542 | mmu_notifier_mm_destroy(mm); | 566 | mmu_notifier_mm_destroy(mm); |
@@ -691,6 +715,9 @@ struct mm_struct *dup_mm(struct task_struct *tsk) | |||
691 | if (!mm_init(mm, tsk)) | 715 | if (!mm_init(mm, tsk)) |
692 | goto fail_nomem; | 716 | goto fail_nomem; |
693 | 717 | ||
718 | if (mm_init_cpumask(mm, oldmm)) | ||
719 | goto fail_nocpumask; | ||
720 | |||
694 | if (init_new_context(tsk, mm)) | 721 | if (init_new_context(tsk, mm)) |
695 | goto fail_nocontext; | 722 | goto fail_nocontext; |
696 | 723 | ||
@@ -717,6 +744,9 @@ fail_nomem: | |||
717 | return NULL; | 744 | return NULL; |
718 | 745 | ||
719 | fail_nocontext: | 746 | fail_nocontext: |
747 | free_cpumask_var(mm->cpu_vm_mask_var); | ||
748 | |||
749 | fail_nocpumask: | ||
720 | /* | 750 | /* |
721 | * If init_new_context() failed, we cannot use mmput() to free the mm | 751 | * If init_new_context() failed, we cannot use mmput() to free the mm |
722 | * because it calls destroy_context() | 752 | * because it calls destroy_context() |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index c541ee527ecb..a9205e32a059 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -748,7 +748,7 @@ static inline void retrigger_next_event(void *arg) { } | |||
748 | */ | 748 | */ |
749 | void clock_was_set(void) | 749 | void clock_was_set(void) |
750 | { | 750 | { |
751 | #ifdef CONFIG_HIGHRES_TIMERS | 751 | #ifdef CONFIG_HIGH_RES_TIMERS |
752 | /* Retrigger the CPU local events everywhere */ | 752 | /* Retrigger the CPU local events everywhere */ |
753 | on_each_cpu(retrigger_next_event, NULL, 1); | 753 | on_each_cpu(retrigger_next_event, NULL, 1); |
754 | #endif | 754 | #endif |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 834899f2500f..64e3df6ab1ef 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -19,7 +19,7 @@ static struct proc_dir_entry *root_irq_dir; | |||
19 | 19 | ||
20 | #ifdef CONFIG_SMP | 20 | #ifdef CONFIG_SMP |
21 | 21 | ||
22 | static int irq_affinity_proc_show(struct seq_file *m, void *v) | 22 | static int show_irq_affinity(int type, struct seq_file *m, void *v) |
23 | { | 23 | { |
24 | struct irq_desc *desc = irq_to_desc((long)m->private); | 24 | struct irq_desc *desc = irq_to_desc((long)m->private); |
25 | const struct cpumask *mask = desc->irq_data.affinity; | 25 | const struct cpumask *mask = desc->irq_data.affinity; |
@@ -28,7 +28,10 @@ static int irq_affinity_proc_show(struct seq_file *m, void *v) | |||
28 | if (irqd_is_setaffinity_pending(&desc->irq_data)) | 28 | if (irqd_is_setaffinity_pending(&desc->irq_data)) |
29 | mask = desc->pending_mask; | 29 | mask = desc->pending_mask; |
30 | #endif | 30 | #endif |
31 | seq_cpumask(m, mask); | 31 | if (type) |
32 | seq_cpumask_list(m, mask); | ||
33 | else | ||
34 | seq_cpumask(m, mask); | ||
32 | seq_putc(m, '\n'); | 35 | seq_putc(m, '\n'); |
33 | return 0; | 36 | return 0; |
34 | } | 37 | } |
@@ -59,7 +62,18 @@ static int irq_affinity_hint_proc_show(struct seq_file *m, void *v) | |||
59 | #endif | 62 | #endif |
60 | 63 | ||
61 | int no_irq_affinity; | 64 | int no_irq_affinity; |
62 | static ssize_t irq_affinity_proc_write(struct file *file, | 65 | static int irq_affinity_proc_show(struct seq_file *m, void *v) |
66 | { | ||
67 | return show_irq_affinity(0, m, v); | ||
68 | } | ||
69 | |||
70 | static int irq_affinity_list_proc_show(struct seq_file *m, void *v) | ||
71 | { | ||
72 | return show_irq_affinity(1, m, v); | ||
73 | } | ||
74 | |||
75 | |||
76 | static ssize_t write_irq_affinity(int type, struct file *file, | ||
63 | const char __user *buffer, size_t count, loff_t *pos) | 77 | const char __user *buffer, size_t count, loff_t *pos) |
64 | { | 78 | { |
65 | unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data; | 79 | unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data; |
@@ -72,7 +86,10 @@ static ssize_t irq_affinity_proc_write(struct file *file, | |||
72 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) | 86 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) |
73 | return -ENOMEM; | 87 | return -ENOMEM; |
74 | 88 | ||
75 | err = cpumask_parse_user(buffer, count, new_value); | 89 | if (type) |
90 | err = cpumask_parselist_user(buffer, count, new_value); | ||
91 | else | ||
92 | err = cpumask_parse_user(buffer, count, new_value); | ||
76 | if (err) | 93 | if (err) |
77 | goto free_cpumask; | 94 | goto free_cpumask; |
78 | 95 | ||
@@ -100,11 +117,28 @@ free_cpumask: | |||
100 | return err; | 117 | return err; |
101 | } | 118 | } |
102 | 119 | ||
120 | static ssize_t irq_affinity_proc_write(struct file *file, | ||
121 | const char __user *buffer, size_t count, loff_t *pos) | ||
122 | { | ||
123 | return write_irq_affinity(0, file, buffer, count, pos); | ||
124 | } | ||
125 | |||
126 | static ssize_t irq_affinity_list_proc_write(struct file *file, | ||
127 | const char __user *buffer, size_t count, loff_t *pos) | ||
128 | { | ||
129 | return write_irq_affinity(1, file, buffer, count, pos); | ||
130 | } | ||
131 | |||
103 | static int irq_affinity_proc_open(struct inode *inode, struct file *file) | 132 | static int irq_affinity_proc_open(struct inode *inode, struct file *file) |
104 | { | 133 | { |
105 | return single_open(file, irq_affinity_proc_show, PDE(inode)->data); | 134 | return single_open(file, irq_affinity_proc_show, PDE(inode)->data); |
106 | } | 135 | } |
107 | 136 | ||
137 | static int irq_affinity_list_proc_open(struct inode *inode, struct file *file) | ||
138 | { | ||
139 | return single_open(file, irq_affinity_list_proc_show, PDE(inode)->data); | ||
140 | } | ||
141 | |||
108 | static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file) | 142 | static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file) |
109 | { | 143 | { |
110 | return single_open(file, irq_affinity_hint_proc_show, PDE(inode)->data); | 144 | return single_open(file, irq_affinity_hint_proc_show, PDE(inode)->data); |
@@ -125,6 +159,14 @@ static const struct file_operations irq_affinity_hint_proc_fops = { | |||
125 | .release = single_release, | 159 | .release = single_release, |
126 | }; | 160 | }; |
127 | 161 | ||
162 | static const struct file_operations irq_affinity_list_proc_fops = { | ||
163 | .open = irq_affinity_list_proc_open, | ||
164 | .read = seq_read, | ||
165 | .llseek = seq_lseek, | ||
166 | .release = single_release, | ||
167 | .write = irq_affinity_list_proc_write, | ||
168 | }; | ||
169 | |||
128 | static int default_affinity_show(struct seq_file *m, void *v) | 170 | static int default_affinity_show(struct seq_file *m, void *v) |
129 | { | 171 | { |
130 | seq_cpumask(m, irq_default_affinity); | 172 | seq_cpumask(m, irq_default_affinity); |
@@ -289,6 +331,10 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) | |||
289 | proc_create_data("affinity_hint", 0400, desc->dir, | 331 | proc_create_data("affinity_hint", 0400, desc->dir, |
290 | &irq_affinity_hint_proc_fops, (void *)(long)irq); | 332 | &irq_affinity_hint_proc_fops, (void *)(long)irq); |
291 | 333 | ||
334 | /* create /proc/irq/<irq>/smp_affinity_list */ | ||
335 | proc_create_data("smp_affinity_list", 0600, desc->dir, | ||
336 | &irq_affinity_list_proc_fops, (void *)(long)irq); | ||
337 | |||
292 | proc_create_data("node", 0444, desc->dir, | 338 | proc_create_data("node", 0444, desc->dir, |
293 | &irq_node_proc_fops, (void *)(long)irq); | 339 | &irq_node_proc_fops, (void *)(long)irq); |
294 | #endif | 340 | #endif |
diff --git a/kernel/mutex.c b/kernel/mutex.c index 2c938e2337cd..d607ed5dd441 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -131,14 +131,14 @@ EXPORT_SYMBOL(mutex_unlock); | |||
131 | */ | 131 | */ |
132 | static inline int __sched | 132 | static inline int __sched |
133 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | 133 | __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, |
134 | unsigned long ip) | 134 | struct lockdep_map *nest_lock, unsigned long ip) |
135 | { | 135 | { |
136 | struct task_struct *task = current; | 136 | struct task_struct *task = current; |
137 | struct mutex_waiter waiter; | 137 | struct mutex_waiter waiter; |
138 | unsigned long flags; | 138 | unsigned long flags; |
139 | 139 | ||
140 | preempt_disable(); | 140 | preempt_disable(); |
141 | mutex_acquire(&lock->dep_map, subclass, 0, ip); | 141 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); |
142 | 142 | ||
143 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | 143 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
144 | /* | 144 | /* |
@@ -269,16 +269,25 @@ void __sched | |||
269 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) | 269 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) |
270 | { | 270 | { |
271 | might_sleep(); | 271 | might_sleep(); |
272 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_); | 272 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); |
273 | } | 273 | } |
274 | 274 | ||
275 | EXPORT_SYMBOL_GPL(mutex_lock_nested); | 275 | EXPORT_SYMBOL_GPL(mutex_lock_nested); |
276 | 276 | ||
277 | void __sched | ||
278 | _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) | ||
279 | { | ||
280 | might_sleep(); | ||
281 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_); | ||
282 | } | ||
283 | |||
284 | EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); | ||
285 | |||
277 | int __sched | 286 | int __sched |
278 | mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) | 287 | mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) |
279 | { | 288 | { |
280 | might_sleep(); | 289 | might_sleep(); |
281 | return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_); | 290 | return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_); |
282 | } | 291 | } |
283 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); | 292 | EXPORT_SYMBOL_GPL(mutex_lock_killable_nested); |
284 | 293 | ||
@@ -287,7 +296,7 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) | |||
287 | { | 296 | { |
288 | might_sleep(); | 297 | might_sleep(); |
289 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, | 298 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, |
290 | subclass, _RET_IP_); | 299 | subclass, NULL, _RET_IP_); |
291 | } | 300 | } |
292 | 301 | ||
293 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); | 302 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
@@ -393,7 +402,7 @@ __mutex_lock_slowpath(atomic_t *lock_count) | |||
393 | { | 402 | { |
394 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 403 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
395 | 404 | ||
396 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_); | 405 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); |
397 | } | 406 | } |
398 | 407 | ||
399 | static noinline int __sched | 408 | static noinline int __sched |
@@ -401,7 +410,7 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count) | |||
401 | { | 410 | { |
402 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 411 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
403 | 412 | ||
404 | return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_); | 413 | return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); |
405 | } | 414 | } |
406 | 415 | ||
407 | static noinline int __sched | 416 | static noinline int __sched |
@@ -409,7 +418,7 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count) | |||
409 | { | 418 | { |
410 | struct mutex *lock = container_of(lock_count, struct mutex, count); | 419 | struct mutex *lock = container_of(lock_count, struct mutex, count); |
411 | 420 | ||
412 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_); | 421 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); |
413 | } | 422 | } |
414 | #endif | 423 | #endif |
415 | 424 | ||
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index a1b5edf1bf92..4556182527f3 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c | |||
@@ -491,6 +491,13 @@ static struct k_itimer * alloc_posix_timer(void) | |||
491 | return tmr; | 491 | return tmr; |
492 | } | 492 | } |
493 | 493 | ||
494 | static void k_itimer_rcu_free(struct rcu_head *head) | ||
495 | { | ||
496 | struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu); | ||
497 | |||
498 | kmem_cache_free(posix_timers_cache, tmr); | ||
499 | } | ||
500 | |||
494 | #define IT_ID_SET 1 | 501 | #define IT_ID_SET 1 |
495 | #define IT_ID_NOT_SET 0 | 502 | #define IT_ID_NOT_SET 0 |
496 | static void release_posix_timer(struct k_itimer *tmr, int it_id_set) | 503 | static void release_posix_timer(struct k_itimer *tmr, int it_id_set) |
@@ -503,7 +510,7 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set) | |||
503 | } | 510 | } |
504 | put_pid(tmr->it_pid); | 511 | put_pid(tmr->it_pid); |
505 | sigqueue_free(tmr->sigq); | 512 | sigqueue_free(tmr->sigq); |
506 | kmem_cache_free(posix_timers_cache, tmr); | 513 | call_rcu(&tmr->it.rcu, k_itimer_rcu_free); |
507 | } | 514 | } |
508 | 515 | ||
509 | static struct k_clock *clockid_to_kclock(const clockid_t id) | 516 | static struct k_clock *clockid_to_kclock(const clockid_t id) |
@@ -631,22 +638,18 @@ out: | |||
631 | static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags) | 638 | static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags) |
632 | { | 639 | { |
633 | struct k_itimer *timr; | 640 | struct k_itimer *timr; |
634 | /* | 641 | |
635 | * Watch out here. We do a irqsave on the idr_lock and pass the | 642 | rcu_read_lock(); |
636 | * flags part over to the timer lock. Must not let interrupts in | ||
637 | * while we are moving the lock. | ||
638 | */ | ||
639 | spin_lock_irqsave(&idr_lock, *flags); | ||
640 | timr = idr_find(&posix_timers_id, (int)timer_id); | 643 | timr = idr_find(&posix_timers_id, (int)timer_id); |
641 | if (timr) { | 644 | if (timr) { |
642 | spin_lock(&timr->it_lock); | 645 | spin_lock_irqsave(&timr->it_lock, *flags); |
643 | if (timr->it_signal == current->signal) { | 646 | if (timr->it_signal == current->signal) { |
644 | spin_unlock(&idr_lock); | 647 | rcu_read_unlock(); |
645 | return timr; | 648 | return timr; |
646 | } | 649 | } |
647 | spin_unlock(&timr->it_lock); | 650 | spin_unlock_irqrestore(&timr->it_lock, *flags); |
648 | } | 651 | } |
649 | spin_unlock_irqrestore(&idr_lock, *flags); | 652 | rcu_read_unlock(); |
650 | 653 | ||
651 | return NULL; | 654 | return NULL; |
652 | } | 655 | } |
diff --git a/kernel/printk.c b/kernel/printk.c index da8ca817eae3..35185392173f 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/smp.h> | 31 | #include <linux/smp.h> |
32 | #include <linux/security.h> | 32 | #include <linux/security.h> |
33 | #include <linux/bootmem.h> | 33 | #include <linux/bootmem.h> |
34 | #include <linux/memblock.h> | ||
34 | #include <linux/syscalls.h> | 35 | #include <linux/syscalls.h> |
35 | #include <linux/kexec.h> | 36 | #include <linux/kexec.h> |
36 | #include <linux/kdb.h> | 37 | #include <linux/kdb.h> |
@@ -167,46 +168,74 @@ void log_buf_kexec_setup(void) | |||
167 | } | 168 | } |
168 | #endif | 169 | #endif |
169 | 170 | ||
171 | /* requested log_buf_len from kernel cmdline */ | ||
172 | static unsigned long __initdata new_log_buf_len; | ||
173 | |||
174 | /* save requested log_buf_len since it's too early to process it */ | ||
170 | static int __init log_buf_len_setup(char *str) | 175 | static int __init log_buf_len_setup(char *str) |
171 | { | 176 | { |
172 | unsigned size = memparse(str, &str); | 177 | unsigned size = memparse(str, &str); |
173 | unsigned long flags; | ||
174 | 178 | ||
175 | if (size) | 179 | if (size) |
176 | size = roundup_pow_of_two(size); | 180 | size = roundup_pow_of_two(size); |
177 | if (size > log_buf_len) { | 181 | if (size > log_buf_len) |
178 | unsigned start, dest_idx, offset; | 182 | new_log_buf_len = size; |
179 | char *new_log_buf; | ||
180 | 183 | ||
181 | new_log_buf = alloc_bootmem(size); | 184 | return 0; |
182 | if (!new_log_buf) { | 185 | } |
183 | printk(KERN_WARNING "log_buf_len: allocation failed\n"); | 186 | early_param("log_buf_len", log_buf_len_setup); |
184 | goto out; | ||
185 | } | ||
186 | 187 | ||
187 | spin_lock_irqsave(&logbuf_lock, flags); | 188 | void __init setup_log_buf(int early) |
188 | log_buf_len = size; | 189 | { |
189 | log_buf = new_log_buf; | 190 | unsigned long flags; |
190 | 191 | unsigned start, dest_idx, offset; | |
191 | offset = start = min(con_start, log_start); | 192 | char *new_log_buf; |
192 | dest_idx = 0; | 193 | int free; |
193 | while (start != log_end) { | 194 | |
194 | log_buf[dest_idx] = __log_buf[start & (__LOG_BUF_LEN - 1)]; | 195 | if (!new_log_buf_len) |
195 | start++; | 196 | return; |
196 | dest_idx++; | 197 | |
197 | } | 198 | if (early) { |
198 | log_start -= offset; | 199 | unsigned long mem; |
199 | con_start -= offset; | ||
200 | log_end -= offset; | ||
201 | spin_unlock_irqrestore(&logbuf_lock, flags); | ||
202 | 200 | ||
203 | printk(KERN_NOTICE "log_buf_len: %d\n", log_buf_len); | 201 | mem = memblock_alloc(new_log_buf_len, PAGE_SIZE); |
202 | if (mem == MEMBLOCK_ERROR) | ||
203 | return; | ||
204 | new_log_buf = __va(mem); | ||
205 | } else { | ||
206 | new_log_buf = alloc_bootmem_nopanic(new_log_buf_len); | ||
204 | } | 207 | } |
205 | out: | ||
206 | return 1; | ||
207 | } | ||
208 | 208 | ||
209 | __setup("log_buf_len=", log_buf_len_setup); | 209 | if (unlikely(!new_log_buf)) { |
210 | pr_err("log_buf_len: %ld bytes not available\n", | ||
211 | new_log_buf_len); | ||
212 | return; | ||
213 | } | ||
214 | |||
215 | spin_lock_irqsave(&logbuf_lock, flags); | ||
216 | log_buf_len = new_log_buf_len; | ||
217 | log_buf = new_log_buf; | ||
218 | new_log_buf_len = 0; | ||
219 | free = __LOG_BUF_LEN - log_end; | ||
220 | |||
221 | offset = start = min(con_start, log_start); | ||
222 | dest_idx = 0; | ||
223 | while (start != log_end) { | ||
224 | unsigned log_idx_mask = start & (__LOG_BUF_LEN - 1); | ||
225 | |||
226 | log_buf[dest_idx] = __log_buf[log_idx_mask]; | ||
227 | start++; | ||
228 | dest_idx++; | ||
229 | } | ||
230 | log_start -= offset; | ||
231 | con_start -= offset; | ||
232 | log_end -= offset; | ||
233 | spin_unlock_irqrestore(&logbuf_lock, flags); | ||
234 | |||
235 | pr_info("log_buf_len: %d\n", log_buf_len); | ||
236 | pr_info("early log buf free: %d(%d%%)\n", | ||
237 | free, (free * 100) / __LOG_BUF_LEN); | ||
238 | } | ||
210 | 239 | ||
211 | #ifdef CONFIG_BOOT_PRINTK_DELAY | 240 | #ifdef CONFIG_BOOT_PRINTK_DELAY |
212 | 241 | ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 4bffd62c2f13..4fc92445a29c 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -1506,7 +1506,7 @@ static struct ctl_table fs_table[] = { | |||
1506 | 1506 | ||
1507 | static struct ctl_table debug_table[] = { | 1507 | static struct ctl_table debug_table[] = { |
1508 | #if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) || \ | 1508 | #if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) || \ |
1509 | defined(CONFIG_S390) | 1509 | defined(CONFIG_S390) || defined(CONFIG_TILE) |
1510 | { | 1510 | { |
1511 | .procname = "exception-trace", | 1511 | .procname = "exception-trace", |
1512 | .data = &show_unhandled_signals, | 1512 | .data = &show_unhandled_signals, |