diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/cpu.c | 4 | ||||
| -rw-r--r-- | kernel/exit.c | 2 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 11 | ||||
| -rw-r--r-- | kernel/power/Kconfig | 41 | ||||
| -rw-r--r-- | kernel/sched.c | 1 | ||||
| -rw-r--r-- | kernel/sched_fair.c | 46 | ||||
| -rw-r--r-- | kernel/signal.c | 19 | ||||
| -rw-r--r-- | kernel/sys.c | 3 | ||||
| -rw-r--r-- | kernel/user_namespace.c | 1 |
9 files changed, 85 insertions, 43 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 181ae7086029..38033db8d8ec 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
| @@ -273,7 +273,7 @@ int __cpuinit cpu_up(unsigned int cpu) | |||
| 273 | return err; | 273 | return err; |
| 274 | } | 274 | } |
| 275 | 275 | ||
| 276 | #ifdef CONFIG_SUSPEND_SMP | 276 | #ifdef CONFIG_PM_SLEEP_SMP |
| 277 | static cpumask_t frozen_cpus; | 277 | static cpumask_t frozen_cpus; |
| 278 | 278 | ||
| 279 | int disable_nonboot_cpus(void) | 279 | int disable_nonboot_cpus(void) |
| @@ -334,4 +334,4 @@ void enable_nonboot_cpus(void) | |||
| 334 | out: | 334 | out: |
| 335 | mutex_unlock(&cpu_add_remove_lock); | 335 | mutex_unlock(&cpu_add_remove_lock); |
| 336 | } | 336 | } |
| 337 | #endif | 337 | #endif /* CONFIG_PM_SLEEP_SMP */ |
diff --git a/kernel/exit.c b/kernel/exit.c index 9578c1ae19ca..06b24b3aa370 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -975,6 +975,7 @@ fastcall NORET_TYPE void do_exit(long code) | |||
| 975 | if (unlikely(tsk->audit_context)) | 975 | if (unlikely(tsk->audit_context)) |
| 976 | audit_free(tsk); | 976 | audit_free(tsk); |
| 977 | 977 | ||
| 978 | tsk->exit_code = code; | ||
| 978 | taskstats_exit(tsk, group_dead); | 979 | taskstats_exit(tsk, group_dead); |
| 979 | 980 | ||
| 980 | exit_mm(tsk); | 981 | exit_mm(tsk); |
| @@ -996,7 +997,6 @@ fastcall NORET_TYPE void do_exit(long code) | |||
| 996 | if (tsk->binfmt) | 997 | if (tsk->binfmt) |
| 997 | module_put(tsk->binfmt->module); | 998 | module_put(tsk->binfmt->module); |
| 998 | 999 | ||
| 999 | tsk->exit_code = code; | ||
| 1000 | proc_exit_connector(tsk); | 1000 | proc_exit_connector(tsk); |
| 1001 | exit_task_namespaces(tsk); | 1001 | exit_task_namespaces(tsk); |
| 1002 | exit_notify(tsk); | 1002 | exit_notify(tsk); |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 853aefbd184b..7230d914eaa2 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -547,14 +547,11 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
| 547 | * We do this before actually registering it, to make sure that | 547 | * We do this before actually registering it, to make sure that |
| 548 | * a 'real' IRQ doesn't run in parallel with our fake | 548 | * a 'real' IRQ doesn't run in parallel with our fake |
| 549 | */ | 549 | */ |
| 550 | if (irqflags & IRQF_DISABLED) { | 550 | unsigned long flags; |
| 551 | unsigned long flags; | ||
| 552 | 551 | ||
| 553 | local_irq_save(flags); | 552 | local_irq_save(flags); |
| 554 | handler(irq, dev_id); | 553 | handler(irq, dev_id); |
| 555 | local_irq_restore(flags); | 554 | local_irq_restore(flags); |
| 556 | } else | ||
| 557 | handler(irq, dev_id); | ||
| 558 | } | 555 | } |
| 559 | #endif | 556 | #endif |
| 560 | 557 | ||
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 412859f8d94a..c8580a1e6873 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
| @@ -72,15 +72,10 @@ config PM_TRACE | |||
| 72 | CAUTION: this option will cause your machine's real-time clock to be | 72 | CAUTION: this option will cause your machine's real-time clock to be |
| 73 | set to an invalid time after a resume. | 73 | set to an invalid time after a resume. |
| 74 | 74 | ||
| 75 | config SUSPEND_SMP_POSSIBLE | 75 | config PM_SLEEP_SMP |
| 76 | bool | ||
| 77 | depends on (X86 && !X86_VOYAGER) || (PPC64 && (PPC_PSERIES || PPC_PMAC)) | ||
| 78 | depends on SMP | ||
| 79 | default y | ||
| 80 | |||
| 81 | config SUSPEND_SMP | ||
| 82 | bool | 76 | bool |
| 83 | depends on SUSPEND_SMP_POSSIBLE && PM_SLEEP | 77 | depends on SUSPEND_SMP_POSSIBLE || HIBERNATION_SMP_POSSIBLE |
| 78 | depends on PM_SLEEP | ||
| 84 | select HOTPLUG_CPU | 79 | select HOTPLUG_CPU |
| 85 | default y | 80 | default y |
| 86 | 81 | ||
| @@ -89,20 +84,46 @@ config PM_SLEEP | |||
| 89 | depends on SUSPEND || HIBERNATION | 84 | depends on SUSPEND || HIBERNATION |
| 90 | default y | 85 | default y |
| 91 | 86 | ||
| 87 | config SUSPEND_UP_POSSIBLE | ||
| 88 | bool | ||
| 89 | depends on (X86 && !X86_VOYAGER) || PPC || ARM || BLACKFIN || MIPS \ | ||
| 90 | || SUPERH || FRV | ||
| 91 | depends on !SMP | ||
| 92 | default y | ||
| 93 | |||
| 94 | config SUSPEND_SMP_POSSIBLE | ||
| 95 | bool | ||
| 96 | depends on (X86 && !X86_VOYAGER) \ | ||
| 97 | || (PPC && (PPC_PSERIES || PPC_PMAC)) || ARM | ||
| 98 | depends on SMP | ||
| 99 | default y | ||
| 100 | |||
| 92 | config SUSPEND | 101 | config SUSPEND |
| 93 | bool "Suspend to RAM and standby" | 102 | bool "Suspend to RAM and standby" |
| 94 | depends on PM | 103 | depends on PM |
| 95 | depends on !SMP || SUSPEND_SMP_POSSIBLE | 104 | depends on SUSPEND_UP_POSSIBLE || SUSPEND_SMP_POSSIBLE |
| 96 | default y | 105 | default y |
| 97 | ---help--- | 106 | ---help--- |
| 98 | Allow the system to enter sleep states in which main memory is | 107 | Allow the system to enter sleep states in which main memory is |
| 99 | powered and thus its contents are preserved, such as the | 108 | powered and thus its contents are preserved, such as the |
| 100 | suspend-to-RAM state (i.e. the ACPI S3 state). | 109 | suspend-to-RAM state (i.e. the ACPI S3 state). |
| 101 | 110 | ||
| 111 | config HIBERNATION_UP_POSSIBLE | ||
| 112 | bool | ||
| 113 | depends on X86 || PPC64_SWSUSP || FRV || PPC32 | ||
| 114 | depends on !SMP | ||
| 115 | default y | ||
| 116 | |||
| 117 | config HIBERNATION_SMP_POSSIBLE | ||
| 118 | bool | ||
| 119 | depends on (X86 && !X86_VOYAGER) || PPC64_SWSUSP | ||
| 120 | depends on SMP | ||
| 121 | default y | ||
| 122 | |||
| 102 | config HIBERNATION | 123 | config HIBERNATION |
| 103 | bool "Hibernation (aka 'suspend to disk')" | 124 | bool "Hibernation (aka 'suspend to disk')" |
| 104 | depends on PM && SWAP | 125 | depends on PM && SWAP |
| 105 | depends on ((X86 || PPC64_SWSUSP || FRV || PPC32) && !SMP) || SUSPEND_SMP_POSSIBLE | 126 | depends on HIBERNATION_UP_POSSIBLE || HIBERNATION_SMP_POSSIBLE |
| 106 | ---help--- | 127 | ---help--- |
| 107 | Enable the suspend to disk (STD) functionality, which is usually | 128 | Enable the suspend to disk (STD) functionality, which is usually |
| 108 | called "hibernation" in user interfaces. STD checkpoints the | 129 | called "hibernation" in user interfaces. STD checkpoints the |
diff --git a/kernel/sched.c b/kernel/sched.c index 9fe473a190de..b533d6db78aa 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -1587,6 +1587,7 @@ static void __sched_fork(struct task_struct *p) | |||
| 1587 | p->se.wait_start_fair = 0; | 1587 | p->se.wait_start_fair = 0; |
| 1588 | p->se.exec_start = 0; | 1588 | p->se.exec_start = 0; |
| 1589 | p->se.sum_exec_runtime = 0; | 1589 | p->se.sum_exec_runtime = 0; |
| 1590 | p->se.prev_sum_exec_runtime = 0; | ||
| 1590 | p->se.delta_exec = 0; | 1591 | p->se.delta_exec = 0; |
| 1591 | p->se.delta_fair_run = 0; | 1592 | p->se.delta_fair_run = 0; |
| 1592 | p->se.delta_fair_sleep = 0; | 1593 | p->se.delta_fair_sleep = 0; |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index ee3771850aaf..ce39282d9c0d 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -354,7 +354,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
| 354 | delta_fair = calc_delta_fair(delta_exec, lw); | 354 | delta_fair = calc_delta_fair(delta_exec, lw); |
| 355 | delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw); | 355 | delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw); |
| 356 | 356 | ||
| 357 | if (cfs_rq->sleeper_bonus > sysctl_sched_latency) { | 357 | if (cfs_rq->sleeper_bonus > sysctl_sched_min_granularity) { |
| 358 | delta = min((u64)delta_mine, cfs_rq->sleeper_bonus); | 358 | delta = min((u64)delta_mine, cfs_rq->sleeper_bonus); |
| 359 | delta = min(delta, (unsigned long)( | 359 | delta = min(delta, (unsigned long)( |
| 360 | (long)sysctl_sched_runtime_limit - curr->wait_runtime)); | 360 | (long)sysctl_sched_runtime_limit - curr->wait_runtime)); |
| @@ -489,6 +489,9 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
| 489 | { | 489 | { |
| 490 | unsigned long delta_fair; | 490 | unsigned long delta_fair; |
| 491 | 491 | ||
| 492 | if (unlikely(!se->wait_start_fair)) | ||
| 493 | return; | ||
| 494 | |||
| 492 | delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit), | 495 | delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit), |
| 493 | (u64)(cfs_rq->fair_clock - se->wait_start_fair)); | 496 | (u64)(cfs_rq->fair_clock - se->wait_start_fair)); |
| 494 | 497 | ||
| @@ -668,7 +671,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | |||
| 668 | /* | 671 | /* |
| 669 | * Preempt the current task with a newly woken task if needed: | 672 | * Preempt the current task with a newly woken task if needed: |
| 670 | */ | 673 | */ |
| 671 | static void | 674 | static int |
| 672 | __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, | 675 | __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, |
| 673 | struct sched_entity *curr, unsigned long granularity) | 676 | struct sched_entity *curr, unsigned long granularity) |
| 674 | { | 677 | { |
| @@ -679,8 +682,11 @@ __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, | |||
| 679 | * preempt the current task unless the best task has | 682 | * preempt the current task unless the best task has |
| 680 | * a larger than sched_granularity fairness advantage: | 683 | * a larger than sched_granularity fairness advantage: |
| 681 | */ | 684 | */ |
| 682 | if (__delta > niced_granularity(curr, granularity)) | 685 | if (__delta > niced_granularity(curr, granularity)) { |
| 683 | resched_task(rq_of(cfs_rq)->curr); | 686 | resched_task(rq_of(cfs_rq)->curr); |
| 687 | return 1; | ||
| 688 | } | ||
| 689 | return 0; | ||
| 684 | } | 690 | } |
| 685 | 691 | ||
| 686 | static inline void | 692 | static inline void |
| @@ -725,6 +731,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) | |||
| 725 | 731 | ||
| 726 | static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | 732 | static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) |
| 727 | { | 733 | { |
| 734 | unsigned long gran, ideal_runtime, delta_exec; | ||
| 728 | struct sched_entity *next; | 735 | struct sched_entity *next; |
| 729 | 736 | ||
| 730 | /* | 737 | /* |
| @@ -741,8 +748,22 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
| 741 | if (next == curr) | 748 | if (next == curr) |
| 742 | return; | 749 | return; |
| 743 | 750 | ||
| 744 | __check_preempt_curr_fair(cfs_rq, next, curr, | 751 | gran = sched_granularity(cfs_rq); |
| 745 | sched_granularity(cfs_rq)); | 752 | ideal_runtime = niced_granularity(curr, |
| 753 | max(sysctl_sched_latency / cfs_rq->nr_running, | ||
| 754 | (unsigned long)sysctl_sched_min_granularity)); | ||
| 755 | /* | ||
| 756 | * If we executed more than what the latency constraint suggests, | ||
| 757 | * reduce the rescheduling granularity. This way the total latency | ||
| 758 | * of how much a task is not scheduled converges to | ||
| 759 | * sysctl_sched_latency: | ||
| 760 | */ | ||
| 761 | delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; | ||
| 762 | if (delta_exec > ideal_runtime) | ||
| 763 | gran = 0; | ||
| 764 | |||
| 765 | if (__check_preempt_curr_fair(cfs_rq, next, curr, gran)) | ||
| 766 | curr->prev_sum_exec_runtime = curr->sum_exec_runtime; | ||
| 746 | } | 767 | } |
| 747 | 768 | ||
| 748 | /************************************************** | 769 | /************************************************** |
| @@ -1076,31 +1097,34 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr) | |||
| 1076 | static void task_new_fair(struct rq *rq, struct task_struct *p) | 1097 | static void task_new_fair(struct rq *rq, struct task_struct *p) |
| 1077 | { | 1098 | { |
| 1078 | struct cfs_rq *cfs_rq = task_cfs_rq(p); | 1099 | struct cfs_rq *cfs_rq = task_cfs_rq(p); |
| 1079 | struct sched_entity *se = &p->se; | 1100 | struct sched_entity *se = &p->se, *curr = cfs_rq_curr(cfs_rq); |
| 1080 | 1101 | ||
| 1081 | sched_info_queued(p); | 1102 | sched_info_queued(p); |
| 1082 | 1103 | ||
| 1104 | update_curr(cfs_rq); | ||
| 1083 | update_stats_enqueue(cfs_rq, se); | 1105 | update_stats_enqueue(cfs_rq, se); |
| 1084 | /* | 1106 | /* |
| 1085 | * Child runs first: we let it run before the parent | 1107 | * Child runs first: we let it run before the parent |
| 1086 | * until it reschedules once. We set up the key so that | 1108 | * until it reschedules once. We set up the key so that |
| 1087 | * it will preempt the parent: | 1109 | * it will preempt the parent: |
| 1088 | */ | 1110 | */ |
| 1089 | p->se.fair_key = current->se.fair_key - | 1111 | se->fair_key = curr->fair_key - |
| 1090 | niced_granularity(&rq->curr->se, sched_granularity(cfs_rq)) - 1; | 1112 | niced_granularity(curr, sched_granularity(cfs_rq)) - 1; |
| 1091 | /* | 1113 | /* |
| 1092 | * The first wait is dominated by the child-runs-first logic, | 1114 | * The first wait is dominated by the child-runs-first logic, |
| 1093 | * so do not credit it with that waiting time yet: | 1115 | * so do not credit it with that waiting time yet: |
| 1094 | */ | 1116 | */ |
| 1095 | if (sysctl_sched_features & SCHED_FEAT_SKIP_INITIAL) | 1117 | if (sysctl_sched_features & SCHED_FEAT_SKIP_INITIAL) |
| 1096 | p->se.wait_start_fair = 0; | 1118 | se->wait_start_fair = 0; |
| 1097 | 1119 | ||
| 1098 | /* | 1120 | /* |
| 1099 | * The statistical average of wait_runtime is about | 1121 | * The statistical average of wait_runtime is about |
| 1100 | * -granularity/2, so initialize the task with that: | 1122 | * -granularity/2, so initialize the task with that: |
| 1101 | */ | 1123 | */ |
| 1102 | if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) | 1124 | if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) { |
| 1103 | p->se.wait_runtime = -(sched_granularity(cfs_rq) / 2); | 1125 | se->wait_runtime = -(sched_granularity(cfs_rq) / 2); |
| 1126 | schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); | ||
| 1127 | } | ||
| 1104 | 1128 | ||
| 1105 | __enqueue_entity(cfs_rq, se); | 1129 | __enqueue_entity(cfs_rq, se); |
| 1106 | } | 1130 | } |
diff --git a/kernel/signal.c b/kernel/signal.c index ad63109e413c..3169bed0b4d0 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -1300,20 +1300,19 @@ struct sigqueue *sigqueue_alloc(void) | |||
| 1300 | void sigqueue_free(struct sigqueue *q) | 1300 | void sigqueue_free(struct sigqueue *q) |
| 1301 | { | 1301 | { |
| 1302 | unsigned long flags; | 1302 | unsigned long flags; |
| 1303 | spinlock_t *lock = ¤t->sighand->siglock; | ||
| 1304 | |||
| 1303 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); | 1305 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
| 1304 | /* | 1306 | /* |
| 1305 | * If the signal is still pending remove it from the | 1307 | * If the signal is still pending remove it from the |
| 1306 | * pending queue. | 1308 | * pending queue. We must hold ->siglock while testing |
| 1309 | * q->list to serialize with collect_signal(). | ||
| 1307 | */ | 1310 | */ |
| 1308 | if (unlikely(!list_empty(&q->list))) { | 1311 | spin_lock_irqsave(lock, flags); |
| 1309 | spinlock_t *lock = ¤t->sighand->siglock; | 1312 | if (!list_empty(&q->list)) |
| 1310 | read_lock(&tasklist_lock); | 1313 | list_del_init(&q->list); |
| 1311 | spin_lock_irqsave(lock, flags); | 1314 | spin_unlock_irqrestore(lock, flags); |
| 1312 | if (!list_empty(&q->list)) | 1315 | |
| 1313 | list_del_init(&q->list); | ||
| 1314 | spin_unlock_irqrestore(lock, flags); | ||
| 1315 | read_unlock(&tasklist_lock); | ||
| 1316 | } | ||
| 1317 | q->flags &= ~SIGQUEUE_PREALLOC; | 1316 | q->flags &= ~SIGQUEUE_PREALLOC; |
| 1318 | __sigqueue_free(q); | 1317 | __sigqueue_free(q); |
| 1319 | } | 1318 | } |
diff --git a/kernel/sys.c b/kernel/sys.c index 449b81b98b3d..1b33b05d346b 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
| @@ -1442,7 +1442,6 @@ asmlinkage long sys_times(struct tms __user * tbuf) | |||
| 1442 | * Auch. Had to add the 'did_exec' flag to conform completely to POSIX. | 1442 | * Auch. Had to add the 'did_exec' flag to conform completely to POSIX. |
| 1443 | * LBT 04.03.94 | 1443 | * LBT 04.03.94 |
| 1444 | */ | 1444 | */ |
| 1445 | |||
| 1446 | asmlinkage long sys_setpgid(pid_t pid, pid_t pgid) | 1445 | asmlinkage long sys_setpgid(pid_t pid, pid_t pgid) |
| 1447 | { | 1446 | { |
| 1448 | struct task_struct *p; | 1447 | struct task_struct *p; |
| @@ -1470,7 +1469,7 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid) | |||
| 1470 | if (!thread_group_leader(p)) | 1469 | if (!thread_group_leader(p)) |
| 1471 | goto out; | 1470 | goto out; |
| 1472 | 1471 | ||
| 1473 | if (p->real_parent == group_leader) { | 1472 | if (p->real_parent->tgid == group_leader->tgid) { |
| 1474 | err = -EPERM; | 1473 | err = -EPERM; |
| 1475 | if (task_session(p) != task_session(group_leader)) | 1474 | if (task_session(p) != task_session(group_leader)) |
| 1476 | goto out; | 1475 | goto out; |
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index d055d987850c..85af9422ea6e 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
| @@ -81,6 +81,7 @@ void free_user_ns(struct kref *kref) | |||
| 81 | struct user_namespace *ns; | 81 | struct user_namespace *ns; |
| 82 | 82 | ||
| 83 | ns = container_of(kref, struct user_namespace, kref); | 83 | ns = container_of(kref, struct user_namespace, kref); |
| 84 | free_uid(ns->root_user); | ||
| 84 | kfree(ns); | 85 | kfree(ns); |
| 85 | } | 86 | } |
| 86 | 87 | ||
