aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorArjan van de Ven <arjan@infradead.org>2006-01-14 16:20:43 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-14 21:27:06 -0500
commit858119e159384308a5dde67776691a2ebf70df0f (patch)
treef360768f999d51edc0863917ce0bf79e88c0ec4c /kernel
parentb0a9499c3dd50d333e2aedb7e894873c58da3785 (diff)
[PATCH] Unlinline a bunch of other functions
Remove the "inline" keyword from a bunch of big functions in the kernel with the goal of shrinking it by 30kb to 40kb Signed-off-by: Arjan van de Ven <arjan@infradead.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Acked-by: Jeff Garzik <jgarzik@pobox.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c2
-rw-r--r--kernel/exit.c10
-rw-r--r--kernel/posix-timers.c8
-rw-r--r--kernel/sched.c16
-rw-r--r--kernel/signal.c4
-rw-r--r--kernel/workqueue.c2
6 files changed, 21 insertions, 21 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 2a75e44e1a41..d4b6bd7d74e5 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1554,7 +1554,7 @@ struct ctr_struct {
1554 * when reading out p->cpuset, as we don't really care if it changes 1554 * when reading out p->cpuset, as we don't really care if it changes
1555 * on the next cycle, and we are not going to try to dereference it. 1555 * on the next cycle, and we are not going to try to dereference it.
1556 */ 1556 */
1557static inline int pid_array_load(pid_t *pidarray, int npids, struct cpuset *cs) 1557static int pid_array_load(pid_t *pidarray, int npids, struct cpuset *cs)
1558{ 1558{
1559 int n = 0; 1559 int n = 0;
1560 struct task_struct *g, *p; 1560 struct task_struct *g, *p;
diff --git a/kernel/exit.c b/kernel/exit.c
index 7fb541cb8d69..93cee3671332 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -193,7 +193,7 @@ int is_orphaned_pgrp(int pgrp)
193 return retval; 193 return retval;
194} 194}
195 195
196static inline int has_stopped_jobs(int pgrp) 196static int has_stopped_jobs(int pgrp)
197{ 197{
198 int retval = 0; 198 int retval = 0;
199 struct task_struct *p; 199 struct task_struct *p;
@@ -230,7 +230,7 @@ static inline int has_stopped_jobs(int pgrp)
230 * 230 *
231 * NOTE that reparent_to_init() gives the caller full capabilities. 231 * NOTE that reparent_to_init() gives the caller full capabilities.
232 */ 232 */
233static inline void reparent_to_init(void) 233static void reparent_to_init(void)
234{ 234{
235 write_lock_irq(&tasklist_lock); 235 write_lock_irq(&tasklist_lock);
236 236
@@ -369,7 +369,7 @@ void daemonize(const char *name, ...)
369 369
370EXPORT_SYMBOL(daemonize); 370EXPORT_SYMBOL(daemonize);
371 371
372static inline void close_files(struct files_struct * files) 372static void close_files(struct files_struct * files)
373{ 373{
374 int i, j; 374 int i, j;
375 struct fdtable *fdt; 375 struct fdtable *fdt;
@@ -543,7 +543,7 @@ static inline void choose_new_parent(task_t *p, task_t *reaper, task_t *child_re
543 p->real_parent = reaper; 543 p->real_parent = reaper;
544} 544}
545 545
546static inline void reparent_thread(task_t *p, task_t *father, int traced) 546static void reparent_thread(task_t *p, task_t *father, int traced)
547{ 547{
548 /* We don't want people slaying init. */ 548 /* We don't want people slaying init. */
549 if (p->exit_signal != -1) 549 if (p->exit_signal != -1)
@@ -607,7 +607,7 @@ static inline void reparent_thread(task_t *p, task_t *father, int traced)
607 * group, and if no such member exists, give it to 607 * group, and if no such member exists, give it to
608 * the global child reaper process (ie "init") 608 * the global child reaper process (ie "init")
609 */ 609 */
610static inline void forget_original_parent(struct task_struct * father, 610static void forget_original_parent(struct task_struct * father,
611 struct list_head *to_release) 611 struct list_head *to_release)
612{ 612{
613 struct task_struct *p, *reaper = father; 613 struct task_struct *p, *reaper = father;
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 9e66e614862a..197208b3aa2a 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -192,7 +192,7 @@ static inline int common_clock_set(const clockid_t which_clock,
192 return do_sys_settimeofday(tp, NULL); 192 return do_sys_settimeofday(tp, NULL);
193} 193}
194 194
195static inline int common_timer_create(struct k_itimer *new_timer) 195static int common_timer_create(struct k_itimer *new_timer)
196{ 196{
197 hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock); 197 hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock);
198 new_timer->it.real.timer.data = new_timer; 198 new_timer->it.real.timer.data = new_timer;
@@ -361,7 +361,7 @@ static int posix_timer_fn(void *data)
361 return ret; 361 return ret;
362} 362}
363 363
364static inline struct task_struct * good_sigevent(sigevent_t * event) 364static struct task_struct * good_sigevent(sigevent_t * event)
365{ 365{
366 struct task_struct *rtn = current->group_leader; 366 struct task_struct *rtn = current->group_leader;
367 367
@@ -687,7 +687,7 @@ sys_timer_getoverrun(timer_t timer_id)
687 687
688/* Set a POSIX.1b interval timer. */ 688/* Set a POSIX.1b interval timer. */
689/* timr->it_lock is taken. */ 689/* timr->it_lock is taken. */
690static inline int 690static int
691common_timer_set(struct k_itimer *timr, int flags, 691common_timer_set(struct k_itimer *timr, int flags,
692 struct itimerspec *new_setting, struct itimerspec *old_setting) 692 struct itimerspec *new_setting, struct itimerspec *old_setting)
693{ 693{
@@ -829,7 +829,7 @@ retry_delete:
829/* 829/*
830 * return timer owned by the process, used by exit_itimers 830 * return timer owned by the process, used by exit_itimers
831 */ 831 */
832static inline void itimer_delete(struct k_itimer *timer) 832static void itimer_delete(struct k_itimer *timer)
833{ 833{
834 unsigned long flags; 834 unsigned long flags;
835 835
diff --git a/kernel/sched.c b/kernel/sched.c
index e1dc903d5a75..788ecce1e0e4 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -521,7 +521,7 @@ static inline void sched_info_dequeued(task_t *t)
521 * long it was waiting to run. We also note when it began so that we 521 * long it was waiting to run. We also note when it began so that we
522 * can keep stats on how long its timeslice is. 522 * can keep stats on how long its timeslice is.
523 */ 523 */
524static inline void sched_info_arrive(task_t *t) 524static void sched_info_arrive(task_t *t)
525{ 525{
526 unsigned long now = jiffies, diff = 0; 526 unsigned long now = jiffies, diff = 0;
527 struct runqueue *rq = task_rq(t); 527 struct runqueue *rq = task_rq(t);
@@ -1007,7 +1007,7 @@ void kick_process(task_t *p)
1007 * We want to under-estimate the load of migration sources, to 1007 * We want to under-estimate the load of migration sources, to
1008 * balance conservatively. 1008 * balance conservatively.
1009 */ 1009 */
1010static inline unsigned long __source_load(int cpu, int type, enum idle_type idle) 1010static unsigned long __source_load(int cpu, int type, enum idle_type idle)
1011{ 1011{
1012 runqueue_t *rq = cpu_rq(cpu); 1012 runqueue_t *rq = cpu_rq(cpu);
1013 unsigned long running = rq->nr_running; 1013 unsigned long running = rq->nr_running;
@@ -1870,7 +1870,7 @@ void sched_exec(void)
1870 * pull_task - move a task from a remote runqueue to the local runqueue. 1870 * pull_task - move a task from a remote runqueue to the local runqueue.
1871 * Both runqueues must be locked. 1871 * Both runqueues must be locked.
1872 */ 1872 */
1873static inline 1873static
1874void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, 1874void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
1875 runqueue_t *this_rq, prio_array_t *this_array, int this_cpu) 1875 runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
1876{ 1876{
@@ -1892,7 +1892,7 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
1892/* 1892/*
1893 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? 1893 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
1894 */ 1894 */
1895static inline 1895static
1896int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, 1896int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
1897 struct sched_domain *sd, enum idle_type idle, 1897 struct sched_domain *sd, enum idle_type idle,
1898 int *all_pinned) 1898 int *all_pinned)
@@ -2378,7 +2378,7 @@ out_balanced:
2378 * idle_balance is called by schedule() if this_cpu is about to become 2378 * idle_balance is called by schedule() if this_cpu is about to become
2379 * idle. Attempts to pull tasks from other CPUs. 2379 * idle. Attempts to pull tasks from other CPUs.
2380 */ 2380 */
2381static inline void idle_balance(int this_cpu, runqueue_t *this_rq) 2381static void idle_balance(int this_cpu, runqueue_t *this_rq)
2382{ 2382{
2383 struct sched_domain *sd; 2383 struct sched_domain *sd;
2384 2384
@@ -2762,7 +2762,7 @@ static inline void wakeup_busy_runqueue(runqueue_t *rq)
2762 resched_task(rq->idle); 2762 resched_task(rq->idle);
2763} 2763}
2764 2764
2765static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) 2765static void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
2766{ 2766{
2767 struct sched_domain *tmp, *sd = NULL; 2767 struct sched_domain *tmp, *sd = NULL;
2768 cpumask_t sibling_map; 2768 cpumask_t sibling_map;
@@ -2816,7 +2816,7 @@ static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd)
2816 return p->time_slice * (100 - sd->per_cpu_gain) / 100; 2816 return p->time_slice * (100 - sd->per_cpu_gain) / 100;
2817} 2817}
2818 2818
2819static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq) 2819static int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
2820{ 2820{
2821 struct sched_domain *tmp, *sd = NULL; 2821 struct sched_domain *tmp, *sd = NULL;
2822 cpumask_t sibling_map; 2822 cpumask_t sibling_map;
@@ -6008,7 +6008,7 @@ next_sg:
6008 * Detach sched domains from a group of cpus specified in cpu_map 6008 * Detach sched domains from a group of cpus specified in cpu_map
6009 * These cpus will now be attached to the NULL domain 6009 * These cpus will now be attached to the NULL domain
6010 */ 6010 */
6011static inline void detach_destroy_domains(const cpumask_t *cpu_map) 6011static void detach_destroy_domains(const cpumask_t *cpu_map)
6012{ 6012{
6013 int i; 6013 int i;
6014 6014
diff --git a/kernel/signal.c b/kernel/signal.c
index 1da2e74beb97..5dafbd36d62e 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -476,7 +476,7 @@ unblock_all_signals(void)
476 spin_unlock_irqrestore(&current->sighand->siglock, flags); 476 spin_unlock_irqrestore(&current->sighand->siglock, flags);
477} 477}
478 478
479static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info) 479static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
480{ 480{
481 struct sigqueue *q, *first = NULL; 481 struct sigqueue *q, *first = NULL;
482 int still_pending = 0; 482 int still_pending = 0;
@@ -1881,7 +1881,7 @@ do_signal_stop(int signr)
1881 * We return zero if we still hold the siglock and should look 1881 * We return zero if we still hold the siglock and should look
1882 * for another signal without checking group_stop_count again. 1882 * for another signal without checking group_stop_count again.
1883 */ 1883 */
1884static inline int handle_group_stop(void) 1884static int handle_group_stop(void)
1885{ 1885{
1886 int stop_count; 1886 int stop_count;
1887 1887
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 82c4fa70595c..b052e2c4c710 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -147,7 +147,7 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq,
147 return ret; 147 return ret;
148} 148}
149 149
150static inline void run_workqueue(struct cpu_workqueue_struct *cwq) 150static void run_workqueue(struct cpu_workqueue_struct *cwq)
151{ 151{
152 unsigned long flags; 152 unsigned long flags;
153 153