diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/Makefile | 2 | ||||
| -rw-r--r-- | kernel/cpuset.c | 24 | ||||
| -rw-r--r-- | kernel/irq/handle.c | 3 | ||||
| -rw-r--r-- | kernel/itimer.c | 6 | ||||
| -rw-r--r-- | kernel/kallsyms.c | 13 | ||||
| -rw-r--r-- | kernel/kprobes.c | 142 | ||||
| -rw-r--r-- | kernel/module.c | 6 | ||||
| -rw-r--r-- | kernel/power/main.c | 6 | ||||
| -rw-r--r-- | kernel/printk.c | 72 | ||||
| -rw-r--r-- | kernel/profile.c | 16 | ||||
| -rw-r--r-- | kernel/sched.c | 2 | ||||
| -rw-r--r-- | kernel/signal.c | 11 | ||||
| -rw-r--r-- | kernel/spinlock.c | 8 | ||||
| -rw-r--r-- | kernel/sys.c | 2 |
14 files changed, 240 insertions, 73 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index eb88b446c2cc..b01d26fe8db7 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
| @@ -29,7 +29,7 @@ obj-$(CONFIG_SYSFS) += ksysfs.o | |||
| 29 | obj-$(CONFIG_GENERIC_HARDIRQS) += irq/ | 29 | obj-$(CONFIG_GENERIC_HARDIRQS) += irq/ |
| 30 | obj-$(CONFIG_SECCOMP) += seccomp.o | 30 | obj-$(CONFIG_SECCOMP) += seccomp.o |
| 31 | 31 | ||
| 32 | ifneq ($(CONFIG_IA64),y) | 32 | ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y) |
| 33 | # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is | 33 | # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is |
| 34 | # needed for x86 only. Why this used to be enabled for all architectures is beyond | 34 | # needed for x86 only. Why this used to be enabled for all architectures is beyond |
| 35 | # me. I suspect most platforms don't need this, but until we know that for sure | 35 | # me. I suspect most platforms don't need this, but until we know that for sure |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 961d74044deb..00e8f2575512 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -166,9 +166,8 @@ static struct super_block *cpuset_sb = NULL; | |||
| 166 | * The hooks from fork and exit, cpuset_fork() and cpuset_exit(), don't | 166 | * The hooks from fork and exit, cpuset_fork() and cpuset_exit(), don't |
| 167 | * (usually) grab cpuset_sem. These are the two most performance | 167 | * (usually) grab cpuset_sem. These are the two most performance |
| 168 | * critical pieces of code here. The exception occurs on exit(), | 168 | * critical pieces of code here. The exception occurs on exit(), |
| 169 | * if the last task using a cpuset exits, and the cpuset was marked | 169 | * when a task in a notify_on_release cpuset exits. Then cpuset_sem |
| 170 | * notify_on_release. In that case, the cpuset_sem is taken, the | 170 | * is taken, and if the cpuset count is zero, a usermode call made |
| 171 | * path to the released cpuset calculated, and a usermode call made | ||
| 172 | * to /sbin/cpuset_release_agent with the name of the cpuset (path | 171 | * to /sbin/cpuset_release_agent with the name of the cpuset (path |
| 173 | * relative to the root of cpuset file system) as the argument. | 172 | * relative to the root of cpuset file system) as the argument. |
| 174 | * | 173 | * |
| @@ -1404,6 +1403,18 @@ void cpuset_fork(struct task_struct *tsk) | |||
| 1404 | * | 1403 | * |
| 1405 | * Description: Detach cpuset from @tsk and release it. | 1404 | * Description: Detach cpuset from @tsk and release it. |
| 1406 | * | 1405 | * |
| 1406 | * Note that cpusets marked notify_on_release force every task | ||
| 1407 | * in them to take the global cpuset_sem semaphore when exiting. | ||
| 1408 | * This could impact scaling on very large systems. Be reluctant | ||
| 1409 | * to use notify_on_release cpusets where very high task exit | ||
| 1410 | * scaling is required on large systems. | ||
| 1411 | * | ||
| 1412 | * Don't even think about derefencing 'cs' after the cpuset use | ||
| 1413 | * count goes to zero, except inside a critical section guarded | ||
| 1414 | * by the cpuset_sem semaphore. If you don't hold cpuset_sem, | ||
| 1415 | * then a zero cpuset use count is a license to any other task to | ||
| 1416 | * nuke the cpuset immediately. | ||
| 1417 | * | ||
| 1407 | **/ | 1418 | **/ |
| 1408 | 1419 | ||
| 1409 | void cpuset_exit(struct task_struct *tsk) | 1420 | void cpuset_exit(struct task_struct *tsk) |
| @@ -1415,10 +1426,13 @@ void cpuset_exit(struct task_struct *tsk) | |||
| 1415 | tsk->cpuset = NULL; | 1426 | tsk->cpuset = NULL; |
| 1416 | task_unlock(tsk); | 1427 | task_unlock(tsk); |
| 1417 | 1428 | ||
| 1418 | if (atomic_dec_and_test(&cs->count)) { | 1429 | if (notify_on_release(cs)) { |
| 1419 | down(&cpuset_sem); | 1430 | down(&cpuset_sem); |
| 1420 | check_for_release(cs); | 1431 | if (atomic_dec_and_test(&cs->count)) |
| 1432 | check_for_release(cs); | ||
| 1421 | up(&cpuset_sem); | 1433 | up(&cpuset_sem); |
| 1434 | } else { | ||
| 1435 | atomic_dec(&cs->count); | ||
| 1422 | } | 1436 | } |
| 1423 | } | 1437 | } |
| 1424 | 1438 | ||
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 2fb0e46e11f3..436c7d93c00a 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | */ | 30 | */ |
| 31 | irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = { | 31 | irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = { |
| 32 | [0 ... NR_IRQS-1] = { | 32 | [0 ... NR_IRQS-1] = { |
| 33 | .status = IRQ_DISABLED, | ||
| 33 | .handler = &no_irq_type, | 34 | .handler = &no_irq_type, |
| 34 | .lock = SPIN_LOCK_UNLOCKED | 35 | .lock = SPIN_LOCK_UNLOCKED |
| 35 | } | 36 | } |
| @@ -118,8 +119,6 @@ fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs) | |||
| 118 | */ | 119 | */ |
| 119 | desc->handler->ack(irq); | 120 | desc->handler->ack(irq); |
| 120 | action_ret = handle_IRQ_event(irq, regs, desc->action); | 121 | action_ret = handle_IRQ_event(irq, regs, desc->action); |
| 121 | if (!noirqdebug) | ||
| 122 | note_interrupt(irq, desc, action_ret); | ||
| 123 | desc->handler->end(irq); | 122 | desc->handler->end(irq); |
| 124 | return 1; | 123 | return 1; |
| 125 | } | 124 | } |
diff --git a/kernel/itimer.c b/kernel/itimer.c index e9a40e947e07..1dc988e0d2c7 100644 --- a/kernel/itimer.c +++ b/kernel/itimer.c | |||
| @@ -123,7 +123,11 @@ static inline void it_real_arm(struct task_struct *p, unsigned long interval) | |||
| 123 | return; | 123 | return; |
| 124 | if (interval > (unsigned long) LONG_MAX) | 124 | if (interval > (unsigned long) LONG_MAX) |
| 125 | interval = LONG_MAX; | 125 | interval = LONG_MAX; |
| 126 | p->signal->real_timer.expires = jiffies + interval; | 126 | /* the "+ 1" below makes sure that the timer doesn't go off before |
| 127 | * the interval requested. This could happen if | ||
| 128 | * time requested % (usecs per jiffy) is more than the usecs left | ||
| 129 | * in the current jiffy */ | ||
| 130 | p->signal->real_timer.expires = jiffies + interval + 1; | ||
| 127 | add_timer(&p->signal->real_timer); | 131 | add_timer(&p->signal->real_timer); |
| 128 | } | 132 | } |
| 129 | 133 | ||
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 1627f8d6e0cd..13bcec151b57 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c | |||
| @@ -46,6 +46,14 @@ static inline int is_kernel_inittext(unsigned long addr) | |||
| 46 | return 0; | 46 | return 0; |
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | static inline int is_kernel_extratext(unsigned long addr) | ||
| 50 | { | ||
| 51 | if (addr >= (unsigned long)_sextratext | ||
| 52 | && addr <= (unsigned long)_eextratext) | ||
| 53 | return 1; | ||
| 54 | return 0; | ||
| 55 | } | ||
| 56 | |||
| 49 | static inline int is_kernel_text(unsigned long addr) | 57 | static inline int is_kernel_text(unsigned long addr) |
| 50 | { | 58 | { |
| 51 | if (addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) | 59 | if (addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) |
| @@ -169,8 +177,9 @@ const char *kallsyms_lookup(unsigned long addr, | |||
| 169 | namebuf[0] = 0; | 177 | namebuf[0] = 0; |
| 170 | 178 | ||
| 171 | if ((all_var && is_kernel(addr)) || | 179 | if ((all_var && is_kernel(addr)) || |
| 172 | (!all_var && (is_kernel_text(addr) || is_kernel_inittext(addr)))) { | 180 | (!all_var && (is_kernel_text(addr) || is_kernel_inittext(addr) || |
| 173 | unsigned long symbol_end=0; | 181 | is_kernel_extratext(addr)))) { |
| 182 | unsigned long symbol_end = 0; | ||
| 174 | 183 | ||
| 175 | /* do a binary search on the sorted kallsyms_addresses array */ | 184 | /* do a binary search on the sorted kallsyms_addresses array */ |
| 176 | low = 0; | 185 | low = 0; |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 1d5dd1337bd1..037142b72a49 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
| @@ -44,6 +44,7 @@ static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; | |||
| 44 | 44 | ||
| 45 | unsigned int kprobe_cpu = NR_CPUS; | 45 | unsigned int kprobe_cpu = NR_CPUS; |
| 46 | static DEFINE_SPINLOCK(kprobe_lock); | 46 | static DEFINE_SPINLOCK(kprobe_lock); |
| 47 | static struct kprobe *curr_kprobe; | ||
| 47 | 48 | ||
| 48 | /* Locks kprobe: irqs must be disabled */ | 49 | /* Locks kprobe: irqs must be disabled */ |
| 49 | void lock_kprobes(void) | 50 | void lock_kprobes(void) |
| @@ -73,22 +74,139 @@ struct kprobe *get_kprobe(void *addr) | |||
| 73 | return NULL; | 74 | return NULL; |
| 74 | } | 75 | } |
| 75 | 76 | ||
| 77 | /* | ||
| 78 | * Aggregate handlers for multiple kprobes support - these handlers | ||
| 79 | * take care of invoking the individual kprobe handlers on p->list | ||
| 80 | */ | ||
| 81 | int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) | ||
| 82 | { | ||
| 83 | struct kprobe *kp; | ||
| 84 | |||
| 85 | list_for_each_entry(kp, &p->list, list) { | ||
| 86 | if (kp->pre_handler) { | ||
| 87 | curr_kprobe = kp; | ||
| 88 | kp->pre_handler(kp, regs); | ||
| 89 | curr_kprobe = NULL; | ||
| 90 | } | ||
| 91 | } | ||
| 92 | return 0; | ||
| 93 | } | ||
| 94 | |||
| 95 | void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, | ||
| 96 | unsigned long flags) | ||
| 97 | { | ||
| 98 | struct kprobe *kp; | ||
| 99 | |||
| 100 | list_for_each_entry(kp, &p->list, list) { | ||
| 101 | if (kp->post_handler) { | ||
| 102 | curr_kprobe = kp; | ||
| 103 | kp->post_handler(kp, regs, flags); | ||
| 104 | curr_kprobe = NULL; | ||
| 105 | } | ||
| 106 | } | ||
| 107 | return; | ||
| 108 | } | ||
| 109 | |||
| 110 | int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr) | ||
| 111 | { | ||
| 112 | /* | ||
| 113 | * if we faulted "during" the execution of a user specified | ||
| 114 | * probe handler, invoke just that probe's fault handler | ||
| 115 | */ | ||
| 116 | if (curr_kprobe && curr_kprobe->fault_handler) { | ||
| 117 | if (curr_kprobe->fault_handler(curr_kprobe, regs, trapnr)) | ||
| 118 | return 1; | ||
| 119 | } | ||
| 120 | return 0; | ||
| 121 | } | ||
| 122 | |||
| 123 | /* | ||
| 124 | * Fill in the required fields of the "manager kprobe". Replace the | ||
| 125 | * earlier kprobe in the hlist with the manager kprobe | ||
| 126 | */ | ||
| 127 | static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) | ||
| 128 | { | ||
| 129 | ap->addr = p->addr; | ||
| 130 | ap->opcode = p->opcode; | ||
| 131 | memcpy(&ap->ainsn, &p->ainsn, sizeof(struct arch_specific_insn)); | ||
| 132 | |||
| 133 | ap->pre_handler = aggr_pre_handler; | ||
| 134 | ap->post_handler = aggr_post_handler; | ||
| 135 | ap->fault_handler = aggr_fault_handler; | ||
| 136 | |||
| 137 | INIT_LIST_HEAD(&ap->list); | ||
| 138 | list_add(&p->list, &ap->list); | ||
| 139 | |||
| 140 | INIT_HLIST_NODE(&ap->hlist); | ||
| 141 | hlist_del(&p->hlist); | ||
| 142 | hlist_add_head(&ap->hlist, | ||
| 143 | &kprobe_table[hash_ptr(ap->addr, KPROBE_HASH_BITS)]); | ||
| 144 | } | ||
| 145 | |||
| 146 | /* | ||
| 147 | * This is the second or subsequent kprobe at the address - handle | ||
| 148 | * the intricacies | ||
| 149 | * TODO: Move kcalloc outside the spinlock | ||
| 150 | */ | ||
| 151 | static int register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p) | ||
| 152 | { | ||
| 153 | int ret = 0; | ||
| 154 | struct kprobe *ap; | ||
| 155 | |||
| 156 | if (old_p->break_handler || p->break_handler) { | ||
| 157 | ret = -EEXIST; /* kprobe and jprobe can't (yet) coexist */ | ||
| 158 | } else if (old_p->pre_handler == aggr_pre_handler) { | ||
| 159 | list_add(&p->list, &old_p->list); | ||
| 160 | } else { | ||
| 161 | ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC); | ||
| 162 | if (!ap) | ||
| 163 | return -ENOMEM; | ||
| 164 | add_aggr_kprobe(ap, old_p); | ||
| 165 | list_add(&p->list, &ap->list); | ||
| 166 | } | ||
| 167 | return ret; | ||
| 168 | } | ||
| 169 | |||
| 170 | /* kprobe removal house-keeping routines */ | ||
| 171 | static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags) | ||
| 172 | { | ||
| 173 | *p->addr = p->opcode; | ||
| 174 | hlist_del(&p->hlist); | ||
| 175 | flush_icache_range((unsigned long) p->addr, | ||
| 176 | (unsigned long) p->addr + sizeof(kprobe_opcode_t)); | ||
| 177 | spin_unlock_irqrestore(&kprobe_lock, flags); | ||
| 178 | arch_remove_kprobe(p); | ||
| 179 | } | ||
| 180 | |||
| 181 | static inline void cleanup_aggr_kprobe(struct kprobe *old_p, | ||
| 182 | struct kprobe *p, unsigned long flags) | ||
| 183 | { | ||
| 184 | list_del(&p->list); | ||
| 185 | if (list_empty(&old_p->list)) { | ||
| 186 | cleanup_kprobe(old_p, flags); | ||
| 187 | kfree(old_p); | ||
| 188 | } else | ||
| 189 | spin_unlock_irqrestore(&kprobe_lock, flags); | ||
| 190 | } | ||
| 191 | |||
| 76 | int register_kprobe(struct kprobe *p) | 192 | int register_kprobe(struct kprobe *p) |
| 77 | { | 193 | { |
| 78 | int ret = 0; | 194 | int ret = 0; |
| 79 | unsigned long flags = 0; | 195 | unsigned long flags = 0; |
| 196 | struct kprobe *old_p; | ||
| 80 | 197 | ||
| 81 | if ((ret = arch_prepare_kprobe(p)) != 0) { | 198 | if ((ret = arch_prepare_kprobe(p)) != 0) { |
| 82 | goto rm_kprobe; | 199 | goto rm_kprobe; |
| 83 | } | 200 | } |
| 84 | spin_lock_irqsave(&kprobe_lock, flags); | 201 | spin_lock_irqsave(&kprobe_lock, flags); |
| 85 | INIT_HLIST_NODE(&p->hlist); | 202 | old_p = get_kprobe(p->addr); |
| 86 | if (get_kprobe(p->addr)) { | 203 | if (old_p) { |
| 87 | ret = -EEXIST; | 204 | ret = register_aggr_kprobe(old_p, p); |
| 88 | goto out; | 205 | goto out; |
| 89 | } | 206 | } |
| 90 | arch_copy_kprobe(p); | ||
| 91 | 207 | ||
| 208 | arch_copy_kprobe(p); | ||
| 209 | INIT_HLIST_NODE(&p->hlist); | ||
| 92 | hlist_add_head(&p->hlist, | 210 | hlist_add_head(&p->hlist, |
| 93 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); | 211 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); |
| 94 | 212 | ||
| @@ -107,13 +225,17 @@ rm_kprobe: | |||
| 107 | void unregister_kprobe(struct kprobe *p) | 225 | void unregister_kprobe(struct kprobe *p) |
| 108 | { | 226 | { |
| 109 | unsigned long flags; | 227 | unsigned long flags; |
| 110 | arch_remove_kprobe(p); | 228 | struct kprobe *old_p; |
| 229 | |||
| 111 | spin_lock_irqsave(&kprobe_lock, flags); | 230 | spin_lock_irqsave(&kprobe_lock, flags); |
| 112 | *p->addr = p->opcode; | 231 | old_p = get_kprobe(p->addr); |
| 113 | hlist_del(&p->hlist); | 232 | if (old_p) { |
| 114 | flush_icache_range((unsigned long) p->addr, | 233 | if (old_p->pre_handler == aggr_pre_handler) |
| 115 | (unsigned long) p->addr + sizeof(kprobe_opcode_t)); | 234 | cleanup_aggr_kprobe(old_p, p, flags); |
| 116 | spin_unlock_irqrestore(&kprobe_lock, flags); | 235 | else |
| 236 | cleanup_kprobe(p, flags); | ||
| 237 | } else | ||
| 238 | spin_unlock_irqrestore(&kprobe_lock, flags); | ||
| 117 | } | 239 | } |
| 118 | 240 | ||
| 119 | static struct notifier_block kprobe_exceptions_nb = { | 241 | static struct notifier_block kprobe_exceptions_nb = { |
diff --git a/kernel/module.c b/kernel/module.c index 5734ab09d3f9..83b3d376708c 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -1758,6 +1758,7 @@ sys_init_module(void __user *umod, | |||
| 1758 | const char __user *uargs) | 1758 | const char __user *uargs) |
| 1759 | { | 1759 | { |
| 1760 | struct module *mod; | 1760 | struct module *mod; |
| 1761 | mm_segment_t old_fs = get_fs(); | ||
| 1761 | int ret = 0; | 1762 | int ret = 0; |
| 1762 | 1763 | ||
| 1763 | /* Must have permission */ | 1764 | /* Must have permission */ |
| @@ -1775,6 +1776,9 @@ sys_init_module(void __user *umod, | |||
| 1775 | return PTR_ERR(mod); | 1776 | return PTR_ERR(mod); |
| 1776 | } | 1777 | } |
| 1777 | 1778 | ||
| 1779 | /* flush the icache in correct context */ | ||
| 1780 | set_fs(KERNEL_DS); | ||
| 1781 | |||
| 1778 | /* Flush the instruction cache, since we've played with text */ | 1782 | /* Flush the instruction cache, since we've played with text */ |
| 1779 | if (mod->module_init) | 1783 | if (mod->module_init) |
| 1780 | flush_icache_range((unsigned long)mod->module_init, | 1784 | flush_icache_range((unsigned long)mod->module_init, |
| @@ -1783,6 +1787,8 @@ sys_init_module(void __user *umod, | |||
| 1783 | flush_icache_range((unsigned long)mod->module_core, | 1787 | flush_icache_range((unsigned long)mod->module_core, |
| 1784 | (unsigned long)mod->module_core + mod->core_size); | 1788 | (unsigned long)mod->module_core + mod->core_size); |
| 1785 | 1789 | ||
| 1790 | set_fs(old_fs); | ||
| 1791 | |||
| 1786 | /* Now sew it into the lists. They won't access us, since | 1792 | /* Now sew it into the lists. They won't access us, since |
| 1787 | strong_try_module_get() will fail. */ | 1793 | strong_try_module_get() will fail. */ |
| 1788 | stop_machine_run(__link_module, mod, NR_CPUS); | 1794 | stop_machine_run(__link_module, mod, NR_CPUS); |
diff --git a/kernel/power/main.c b/kernel/power/main.c index 7960ddf04a57..4cdebc972ff2 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
| @@ -156,14 +156,14 @@ static int enter_state(suspend_state_t state) | |||
| 156 | goto Unlock; | 156 | goto Unlock; |
| 157 | } | 157 | } |
| 158 | 158 | ||
| 159 | pr_debug("PM: Preparing system for suspend\n"); | 159 | pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]); |
| 160 | if ((error = suspend_prepare(state))) | 160 | if ((error = suspend_prepare(state))) |
| 161 | goto Unlock; | 161 | goto Unlock; |
| 162 | 162 | ||
| 163 | pr_debug("PM: Entering state.\n"); | 163 | pr_debug("PM: Entering %s sleep\n", pm_states[state]); |
| 164 | error = suspend_enter(state); | 164 | error = suspend_enter(state); |
| 165 | 165 | ||
| 166 | pr_debug("PM: Finishing up.\n"); | 166 | pr_debug("PM: Finishing wakeup.\n"); |
| 167 | suspend_finish(state); | 167 | suspend_finish(state); |
| 168 | Unlock: | 168 | Unlock: |
| 169 | up(&pm_sem); | 169 | up(&pm_sem); |
diff --git a/kernel/printk.c b/kernel/printk.c index 290a07ce2c8a..01b58d7d17ff 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
| @@ -160,42 +160,6 @@ static int __init console_setup(char *str) | |||
| 160 | 160 | ||
| 161 | __setup("console=", console_setup); | 161 | __setup("console=", console_setup); |
| 162 | 162 | ||
| 163 | /** | ||
| 164 | * add_preferred_console - add a device to the list of preferred consoles. | ||
| 165 | * | ||
| 166 | * The last preferred console added will be used for kernel messages | ||
| 167 | * and stdin/out/err for init. Normally this is used by console_setup | ||
| 168 | * above to handle user-supplied console arguments; however it can also | ||
| 169 | * be used by arch-specific code either to override the user or more | ||
| 170 | * commonly to provide a default console (ie from PROM variables) when | ||
| 171 | * the user has not supplied one. | ||
| 172 | */ | ||
| 173 | int __init add_preferred_console(char *name, int idx, char *options) | ||
| 174 | { | ||
| 175 | struct console_cmdline *c; | ||
| 176 | int i; | ||
| 177 | |||
| 178 | /* | ||
| 179 | * See if this tty is not yet registered, and | ||
| 180 | * if we have a slot free. | ||
| 181 | */ | ||
| 182 | for(i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++) | ||
| 183 | if (strcmp(console_cmdline[i].name, name) == 0 && | ||
| 184 | console_cmdline[i].index == idx) { | ||
| 185 | selected_console = i; | ||
| 186 | return 0; | ||
| 187 | } | ||
| 188 | if (i == MAX_CMDLINECONSOLES) | ||
| 189 | return -E2BIG; | ||
| 190 | selected_console = i; | ||
| 191 | c = &console_cmdline[i]; | ||
| 192 | memcpy(c->name, name, sizeof(c->name)); | ||
| 193 | c->name[sizeof(c->name) - 1] = 0; | ||
| 194 | c->options = options; | ||
| 195 | c->index = idx; | ||
| 196 | return 0; | ||
| 197 | } | ||
| 198 | |||
| 199 | static int __init log_buf_len_setup(char *str) | 163 | static int __init log_buf_len_setup(char *str) |
| 200 | { | 164 | { |
| 201 | unsigned long size = memparse(str, &str); | 165 | unsigned long size = memparse(str, &str); |
| @@ -671,6 +635,42 @@ static void call_console_drivers(unsigned long start, unsigned long end) {} | |||
| 671 | #endif | 635 | #endif |
| 672 | 636 | ||
| 673 | /** | 637 | /** |
| 638 | * add_preferred_console - add a device to the list of preferred consoles. | ||
| 639 | * | ||
| 640 | * The last preferred console added will be used for kernel messages | ||
| 641 | * and stdin/out/err for init. Normally this is used by console_setup | ||
| 642 | * above to handle user-supplied console arguments; however it can also | ||
| 643 | * be used by arch-specific code either to override the user or more | ||
| 644 | * commonly to provide a default console (ie from PROM variables) when | ||
| 645 | * the user has not supplied one. | ||
| 646 | */ | ||
| 647 | int __init add_preferred_console(char *name, int idx, char *options) | ||
| 648 | { | ||
| 649 | struct console_cmdline *c; | ||
| 650 | int i; | ||
| 651 | |||
| 652 | /* | ||
| 653 | * See if this tty is not yet registered, and | ||
| 654 | * if we have a slot free. | ||
| 655 | */ | ||
| 656 | for(i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++) | ||
| 657 | if (strcmp(console_cmdline[i].name, name) == 0 && | ||
| 658 | console_cmdline[i].index == idx) { | ||
| 659 | selected_console = i; | ||
| 660 | return 0; | ||
| 661 | } | ||
| 662 | if (i == MAX_CMDLINECONSOLES) | ||
| 663 | return -E2BIG; | ||
| 664 | selected_console = i; | ||
| 665 | c = &console_cmdline[i]; | ||
| 666 | memcpy(c->name, name, sizeof(c->name)); | ||
| 667 | c->name[sizeof(c->name) - 1] = 0; | ||
| 668 | c->options = options; | ||
| 669 | c->index = idx; | ||
| 670 | return 0; | ||
| 671 | } | ||
| 672 | |||
| 673 | /** | ||
| 674 | * acquire_console_sem - lock the console system for exclusive use. | 674 | * acquire_console_sem - lock the console system for exclusive use. |
| 675 | * | 675 | * |
| 676 | * Acquires a semaphore which guarantees that the caller has | 676 | * Acquires a semaphore which guarantees that the caller has |
diff --git a/kernel/profile.c b/kernel/profile.c index 0221a50ca867..ad8cbb75ffa2 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
| @@ -49,15 +49,19 @@ static DECLARE_MUTEX(profile_flip_mutex); | |||
| 49 | 49 | ||
| 50 | static int __init profile_setup(char * str) | 50 | static int __init profile_setup(char * str) |
| 51 | { | 51 | { |
| 52 | static char __initdata schedstr[] = "schedule"; | ||
| 52 | int par; | 53 | int par; |
| 53 | 54 | ||
| 54 | if (!strncmp(str, "schedule", 8)) { | 55 | if (!strncmp(str, schedstr, strlen(schedstr))) { |
| 55 | prof_on = SCHED_PROFILING; | 56 | prof_on = SCHED_PROFILING; |
| 56 | printk(KERN_INFO "kernel schedule profiling enabled\n"); | 57 | if (str[strlen(schedstr)] == ',') |
| 57 | if (str[7] == ',') | 58 | str += strlen(schedstr) + 1; |
| 58 | str += 8; | 59 | if (get_option(&str, &par)) |
| 59 | } | 60 | prof_shift = par; |
| 60 | if (get_option(&str,&par)) { | 61 | printk(KERN_INFO |
| 62 | "kernel schedule profiling enabled (shift: %ld)\n", | ||
| 63 | prof_shift); | ||
| 64 | } else if (get_option(&str, &par)) { | ||
| 61 | prof_shift = par; | 65 | prof_shift = par; |
| 62 | prof_on = CPU_PROFILING; | 66 | prof_on = CPU_PROFILING; |
| 63 | printk(KERN_INFO "kernel profiling enabled (shift: %ld)\n", | 67 | printk(KERN_INFO "kernel profiling enabled (shift: %ld)\n", |
diff --git a/kernel/sched.c b/kernel/sched.c index 0dc3158667a2..66b2ed784822 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -4243,7 +4243,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk) | |||
| 4243 | 4243 | ||
| 4244 | /* No more Mr. Nice Guy. */ | 4244 | /* No more Mr. Nice Guy. */ |
| 4245 | if (dest_cpu == NR_CPUS) { | 4245 | if (dest_cpu == NR_CPUS) { |
| 4246 | tsk->cpus_allowed = cpuset_cpus_allowed(tsk); | 4246 | cpus_setall(tsk->cpus_allowed); |
| 4247 | dest_cpu = any_online_cpu(tsk->cpus_allowed); | 4247 | dest_cpu = any_online_cpu(tsk->cpus_allowed); |
| 4248 | 4248 | ||
| 4249 | /* | 4249 | /* |
diff --git a/kernel/signal.c b/kernel/signal.c index 8f3debc77c5b..b3c24c732c5a 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -522,7 +522,16 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, | |||
| 522 | { | 522 | { |
| 523 | int sig = 0; | 523 | int sig = 0; |
| 524 | 524 | ||
| 525 | sig = next_signal(pending, mask); | 525 | /* SIGKILL must have priority, otherwise it is quite easy |
| 526 | * to create an unkillable process, sending sig < SIGKILL | ||
| 527 | * to self */ | ||
| 528 | if (unlikely(sigismember(&pending->signal, SIGKILL))) { | ||
| 529 | if (!sigismember(mask, SIGKILL)) | ||
| 530 | sig = SIGKILL; | ||
| 531 | } | ||
| 532 | |||
| 533 | if (likely(!sig)) | ||
| 534 | sig = next_signal(pending, mask); | ||
| 526 | if (sig) { | 535 | if (sig) { |
| 527 | if (current->notifier) { | 536 | if (current->notifier) { |
| 528 | if (sigismember(current->notifier_mask, sig)) { | 537 | if (sigismember(current->notifier_mask, sig)) { |
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index e15ed17863f1..0c3f9d8bbe17 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
| @@ -294,7 +294,7 @@ EXPORT_SYMBOL(_spin_unlock_irq); | |||
| 294 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) | 294 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) |
| 295 | { | 295 | { |
| 296 | _raw_spin_unlock(lock); | 296 | _raw_spin_unlock(lock); |
| 297 | preempt_enable(); | 297 | preempt_enable_no_resched(); |
| 298 | local_bh_enable(); | 298 | local_bh_enable(); |
| 299 | } | 299 | } |
| 300 | EXPORT_SYMBOL(_spin_unlock_bh); | 300 | EXPORT_SYMBOL(_spin_unlock_bh); |
| @@ -318,7 +318,7 @@ EXPORT_SYMBOL(_read_unlock_irq); | |||
| 318 | void __lockfunc _read_unlock_bh(rwlock_t *lock) | 318 | void __lockfunc _read_unlock_bh(rwlock_t *lock) |
| 319 | { | 319 | { |
| 320 | _raw_read_unlock(lock); | 320 | _raw_read_unlock(lock); |
| 321 | preempt_enable(); | 321 | preempt_enable_no_resched(); |
| 322 | local_bh_enable(); | 322 | local_bh_enable(); |
| 323 | } | 323 | } |
| 324 | EXPORT_SYMBOL(_read_unlock_bh); | 324 | EXPORT_SYMBOL(_read_unlock_bh); |
| @@ -342,7 +342,7 @@ EXPORT_SYMBOL(_write_unlock_irq); | |||
| 342 | void __lockfunc _write_unlock_bh(rwlock_t *lock) | 342 | void __lockfunc _write_unlock_bh(rwlock_t *lock) |
| 343 | { | 343 | { |
| 344 | _raw_write_unlock(lock); | 344 | _raw_write_unlock(lock); |
| 345 | preempt_enable(); | 345 | preempt_enable_no_resched(); |
| 346 | local_bh_enable(); | 346 | local_bh_enable(); |
| 347 | } | 347 | } |
| 348 | EXPORT_SYMBOL(_write_unlock_bh); | 348 | EXPORT_SYMBOL(_write_unlock_bh); |
| @@ -354,7 +354,7 @@ int __lockfunc _spin_trylock_bh(spinlock_t *lock) | |||
| 354 | if (_raw_spin_trylock(lock)) | 354 | if (_raw_spin_trylock(lock)) |
| 355 | return 1; | 355 | return 1; |
| 356 | 356 | ||
| 357 | preempt_enable(); | 357 | preempt_enable_no_resched(); |
| 358 | local_bh_enable(); | 358 | local_bh_enable(); |
| 359 | return 0; | 359 | return 0; |
| 360 | } | 360 | } |
diff --git a/kernel/sys.c b/kernel/sys.c index f64e97cabe25..f006632c2ba7 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
| @@ -1195,7 +1195,7 @@ static int groups_from_user(struct group_info *group_info, | |||
| 1195 | return 0; | 1195 | return 0; |
| 1196 | } | 1196 | } |
| 1197 | 1197 | ||
| 1198 | /* a simple shell-metzner sort */ | 1198 | /* a simple Shell sort */ |
| 1199 | static void groups_sort(struct group_info *group_info) | 1199 | static void groups_sort(struct group_info *group_info) |
| 1200 | { | 1200 | { |
| 1201 | int base, max, stride; | 1201 | int base, max, stride; |
