diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2010-10-20 19:01:12 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-10-23 11:56:48 -0400 |
commit | fe7de49f9d4e53f24ec9ef762a503f70b562341c (patch) | |
tree | 766b012c7199f12625bc909748175d9e37b0b87d /kernel | |
parent | d4429f608abde89e8bc1e24b43cd503feb95c496 (diff) |
sched: Make sched_param argument static in sched_setscheduler() callers
Andrew Morton pointed out almost all sched_setscheduler() callers are
using fixed parameters and can be converted to static. It reduces runtime
memory use a little.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reported-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: James Morris <jmorris@namei.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/irq/manage.c | 4 | ||||
-rw-r--r-- | kernel/kthread.c | 2 | ||||
-rw-r--r-- | kernel/sched.c | 6 | ||||
-rw-r--r-- | kernel/softirq.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 2 | ||||
-rw-r--r-- | kernel/watchdog.c | 2 |
6 files changed, 12 insertions, 8 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 644e8d5fa367..850f030fa0c2 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -573,7 +573,9 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } | |||
573 | */ | 573 | */ |
574 | static int irq_thread(void *data) | 574 | static int irq_thread(void *data) |
575 | { | 575 | { |
576 | struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; | 576 | static struct sched_param param = { |
577 | .sched_priority = MAX_USER_RT_PRIO/2, | ||
578 | }; | ||
577 | struct irqaction *action = data; | 579 | struct irqaction *action = data; |
578 | struct irq_desc *desc = irq_to_desc(action->irq); | 580 | struct irq_desc *desc = irq_to_desc(action->irq); |
579 | int wake, oneshot = desc->status & IRQ_ONESHOT; | 581 | int wake, oneshot = desc->status & IRQ_ONESHOT; |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 2dc3786349d1..74cf6f5e7ade 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -148,7 +148,7 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), | |||
148 | wait_for_completion(&create.done); | 148 | wait_for_completion(&create.done); |
149 | 149 | ||
150 | if (!IS_ERR(create.result)) { | 150 | if (!IS_ERR(create.result)) { |
151 | struct sched_param param = { .sched_priority = 0 }; | 151 | static struct sched_param param = { .sched_priority = 0 }; |
152 | va_list args; | 152 | va_list args; |
153 | 153 | ||
154 | va_start(args, namefmt); | 154 | va_start(args, namefmt); |
diff --git a/kernel/sched.c b/kernel/sched.c index d42992bccdfa..51944e8c38a8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4701,7 +4701,7 @@ static bool check_same_owner(struct task_struct *p) | |||
4701 | } | 4701 | } |
4702 | 4702 | ||
4703 | static int __sched_setscheduler(struct task_struct *p, int policy, | 4703 | static int __sched_setscheduler(struct task_struct *p, int policy, |
4704 | struct sched_param *param, bool user) | 4704 | const struct sched_param *param, bool user) |
4705 | { | 4705 | { |
4706 | int retval, oldprio, oldpolicy = -1, on_rq, running; | 4706 | int retval, oldprio, oldpolicy = -1, on_rq, running; |
4707 | unsigned long flags; | 4707 | unsigned long flags; |
@@ -4856,7 +4856,7 @@ recheck: | |||
4856 | * NOTE that the task may be already dead. | 4856 | * NOTE that the task may be already dead. |
4857 | */ | 4857 | */ |
4858 | int sched_setscheduler(struct task_struct *p, int policy, | 4858 | int sched_setscheduler(struct task_struct *p, int policy, |
4859 | struct sched_param *param) | 4859 | const struct sched_param *param) |
4860 | { | 4860 | { |
4861 | return __sched_setscheduler(p, policy, param, true); | 4861 | return __sched_setscheduler(p, policy, param, true); |
4862 | } | 4862 | } |
@@ -4874,7 +4874,7 @@ EXPORT_SYMBOL_GPL(sched_setscheduler); | |||
4874 | * but our caller might not have that capability. | 4874 | * but our caller might not have that capability. |
4875 | */ | 4875 | */ |
4876 | int sched_setscheduler_nocheck(struct task_struct *p, int policy, | 4876 | int sched_setscheduler_nocheck(struct task_struct *p, int policy, |
4877 | struct sched_param *param) | 4877 | const struct sched_param *param) |
4878 | { | 4878 | { |
4879 | return __sched_setscheduler(p, policy, param, false); | 4879 | return __sched_setscheduler(p, policy, param, false); |
4880 | } | 4880 | } |
diff --git a/kernel/softirq.c b/kernel/softirq.c index fc978889b194..081869ed3a9f 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -851,7 +851,9 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, | |||
851 | cpumask_any(cpu_online_mask)); | 851 | cpumask_any(cpu_online_mask)); |
852 | case CPU_DEAD: | 852 | case CPU_DEAD: |
853 | case CPU_DEAD_FROZEN: { | 853 | case CPU_DEAD_FROZEN: { |
854 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; | 854 | static struct sched_param param = { |
855 | .sched_priority = MAX_RT_PRIO-1 | ||
856 | }; | ||
855 | 857 | ||
856 | p = per_cpu(ksoftirqd, hotcpu); | 858 | p = per_cpu(ksoftirqd, hotcpu); |
857 | per_cpu(ksoftirqd, hotcpu) = NULL; | 859 | per_cpu(ksoftirqd, hotcpu) = NULL; |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 155a415b3209..562c56e048fd 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -558,7 +558,7 @@ trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) | |||
558 | static int trace_wakeup_test_thread(void *data) | 558 | static int trace_wakeup_test_thread(void *data) |
559 | { | 559 | { |
560 | /* Make this a RT thread, doesn't need to be too high */ | 560 | /* Make this a RT thread, doesn't need to be too high */ |
561 | struct sched_param param = { .sched_priority = 5 }; | 561 | static struct sched_param param = { .sched_priority = 5 }; |
562 | struct completion *x = data; | 562 | struct completion *x = data; |
563 | 563 | ||
564 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 564 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index bafba687a6d8..94ca779aa9c2 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -307,7 +307,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |||
307 | */ | 307 | */ |
308 | static int watchdog(void *unused) | 308 | static int watchdog(void *unused) |
309 | { | 309 | { |
310 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; | 310 | static struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
311 | struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); | 311 | struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); |
312 | 312 | ||
313 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 313 | sched_setscheduler(current, SCHED_FIFO, ¶m); |