aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorHarvey Harrison <harvey.harrison@gmail.com>2008-02-08 07:19:53 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-08 12:22:31 -0500
commit7ad5b3a505e68cfdc342933d6e0fc0eaa5e0a4f7 (patch)
tree6715ffd8df509d3d53dea581bb97418a21bc7cbc /kernel
parentfc9b52cd8f5f459b88adcf67c47668425ae31a78 (diff)
kernel: remove fastcall in kernel/*
[akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/irq/chip.c10
-rw-r--r--kernel/irq/handle.c4
-rw-r--r--kernel/mutex-debug.c2
-rw-r--r--kernel/mutex.c29
-rw-r--r--kernel/pid.c18
-rw-r--r--kernel/sched.c16
-rw-r--r--kernel/softirq.c8
-rw-r--r--kernel/timer.c6
-rw-r--r--kernel/wait.c26
-rw-r--r--kernel/workqueue.c10
12 files changed, 67 insertions, 68 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 81345ba4b253..3b893e78ce61 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -458,7 +458,7 @@ struct files_struct *get_files_struct(struct task_struct *task)
458 return files; 458 return files;
459} 459}
460 460
461void fastcall put_files_struct(struct files_struct *files) 461void put_files_struct(struct files_struct *files)
462{ 462{
463 struct fdtable *fdt; 463 struct fdtable *fdt;
464 464
@@ -887,7 +887,7 @@ static inline void exit_child_reaper(struct task_struct *tsk)
887 zap_pid_ns_processes(tsk->nsproxy->pid_ns); 887 zap_pid_ns_processes(tsk->nsproxy->pid_ns);
888} 888}
889 889
890fastcall NORET_TYPE void do_exit(long code) 890NORET_TYPE void do_exit(long code)
891{ 891{
892 struct task_struct *tsk = current; 892 struct task_struct *tsk = current;
893 int group_dead; 893 int group_dead;
diff --git a/kernel/fork.c b/kernel/fork.c
index 31a2bad63a08..4363a4eb84e3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -390,7 +390,7 @@ struct mm_struct * mm_alloc(void)
390 * is dropped: either by a lazy thread or by 390 * is dropped: either by a lazy thread or by
391 * mmput. Free the page directory and the mm. 391 * mmput. Free the page directory and the mm.
392 */ 392 */
393void fastcall __mmdrop(struct mm_struct *mm) 393void __mmdrop(struct mm_struct *mm)
394{ 394{
395 BUG_ON(mm == &init_mm); 395 BUG_ON(mm == &init_mm);
396 mm_free_pgd(mm); 396 mm_free_pgd(mm);
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 44019ce30a14..10e006643c8c 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -286,7 +286,7 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq)
286 * Note: The caller is expected to handle the ack, clear, mask and 286 * Note: The caller is expected to handle the ack, clear, mask and
287 * unmask issues if necessary. 287 * unmask issues if necessary.
288 */ 288 */
289void fastcall 289void
290handle_simple_irq(unsigned int irq, struct irq_desc *desc) 290handle_simple_irq(unsigned int irq, struct irq_desc *desc)
291{ 291{
292 struct irqaction *action; 292 struct irqaction *action;
@@ -327,7 +327,7 @@ out_unlock:
327 * it after the associated handler has acknowledged the device, so the 327 * it after the associated handler has acknowledged the device, so the
328 * interrupt line is back to inactive. 328 * interrupt line is back to inactive.
329 */ 329 */
330void fastcall 330void
331handle_level_irq(unsigned int irq, struct irq_desc *desc) 331handle_level_irq(unsigned int irq, struct irq_desc *desc)
332{ 332{
333 unsigned int cpu = smp_processor_id(); 333 unsigned int cpu = smp_processor_id();
@@ -375,7 +375,7 @@ out_unlock:
375 * for modern forms of interrupt handlers, which handle the flow 375 * for modern forms of interrupt handlers, which handle the flow
376 * details in hardware, transparently. 376 * details in hardware, transparently.
377 */ 377 */
378void fastcall 378void
379handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) 379handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
380{ 380{
381 unsigned int cpu = smp_processor_id(); 381 unsigned int cpu = smp_processor_id();
@@ -434,7 +434,7 @@ out:
434 * the handler was running. If all pending interrupts are handled, the 434 * the handler was running. If all pending interrupts are handled, the
435 * loop is left. 435 * loop is left.
436 */ 436 */
437void fastcall 437void
438handle_edge_irq(unsigned int irq, struct irq_desc *desc) 438handle_edge_irq(unsigned int irq, struct irq_desc *desc)
439{ 439{
440 const unsigned int cpu = smp_processor_id(); 440 const unsigned int cpu = smp_processor_id();
@@ -505,7 +505,7 @@ out_unlock:
505 * 505 *
506 * Per CPU interrupts on SMP machines without locking requirements 506 * Per CPU interrupts on SMP machines without locking requirements
507 */ 507 */
508void fastcall 508void
509handle_percpu_irq(unsigned int irq, struct irq_desc *desc) 509handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
510{ 510{
511 irqreturn_t action_ret; 511 irqreturn_t action_ret;
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index dc335ad27525..5fa6198e9139 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -25,7 +25,7 @@
25 * 25 *
26 * Handles spurious and unhandled IRQ's. It also prints a debugmessage. 26 * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
27 */ 27 */
28void fastcall 28void
29handle_bad_irq(unsigned int irq, struct irq_desc *desc) 29handle_bad_irq(unsigned int irq, struct irq_desc *desc)
30{ 30{
31 print_irq_desc(irq, desc); 31 print_irq_desc(irq, desc);
@@ -163,7 +163,7 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
163 * This is the original x86 implementation which is used for every 163 * This is the original x86 implementation which is used for every
164 * interrupt type. 164 * interrupt type.
165 */ 165 */
166fastcall unsigned int __do_IRQ(unsigned int irq) 166unsigned int __do_IRQ(unsigned int irq)
167{ 167{
168 struct irq_desc *desc = irq_desc + irq; 168 struct irq_desc *desc = irq_desc + irq;
169 struct irqaction *action; 169 struct irqaction *action;
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
index d17436cdea1b..3aaa06c561de 100644
--- a/kernel/mutex-debug.c
+++ b/kernel/mutex-debug.c
@@ -107,7 +107,7 @@ void debug_mutex_init(struct mutex *lock, const char *name,
107 * use of the mutex is forbidden. The mutex must not be locked when 107 * use of the mutex is forbidden. The mutex must not be locked when
108 * this function is called. 108 * this function is called.
109 */ 109 */
110void fastcall mutex_destroy(struct mutex *lock) 110void mutex_destroy(struct mutex *lock)
111{ 111{
112 DEBUG_LOCKS_WARN_ON(mutex_is_locked(lock)); 112 DEBUG_LOCKS_WARN_ON(mutex_is_locked(lock));
113 lock->magic = NULL; 113 lock->magic = NULL;
diff --git a/kernel/mutex.c b/kernel/mutex.c
index d9ec9b666250..d046a345d365 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -58,7 +58,7 @@ EXPORT_SYMBOL(__mutex_init);
58 * We also put the fastpath first in the kernel image, to make sure the 58 * We also put the fastpath first in the kernel image, to make sure the
59 * branch is predicted by the CPU as default-untaken. 59 * branch is predicted by the CPU as default-untaken.
60 */ 60 */
61static void fastcall noinline __sched 61static void noinline __sched
62__mutex_lock_slowpath(atomic_t *lock_count); 62__mutex_lock_slowpath(atomic_t *lock_count);
63 63
64/*** 64/***
@@ -82,7 +82,7 @@ __mutex_lock_slowpath(atomic_t *lock_count);
82 * 82 *
83 * This function is similar to (but not equivalent to) down(). 83 * This function is similar to (but not equivalent to) down().
84 */ 84 */
85void inline fastcall __sched mutex_lock(struct mutex *lock) 85void inline __sched mutex_lock(struct mutex *lock)
86{ 86{
87 might_sleep(); 87 might_sleep();
88 /* 88 /*
@@ -95,8 +95,7 @@ void inline fastcall __sched mutex_lock(struct mutex *lock)
95EXPORT_SYMBOL(mutex_lock); 95EXPORT_SYMBOL(mutex_lock);
96#endif 96#endif
97 97
98static void fastcall noinline __sched 98static noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
99__mutex_unlock_slowpath(atomic_t *lock_count);
100 99
101/*** 100/***
102 * mutex_unlock - release the mutex 101 * mutex_unlock - release the mutex
@@ -109,7 +108,7 @@ __mutex_unlock_slowpath(atomic_t *lock_count);
109 * 108 *
110 * This function is similar to (but not equivalent to) up(). 109 * This function is similar to (but not equivalent to) up().
111 */ 110 */
112void fastcall __sched mutex_unlock(struct mutex *lock) 111void __sched mutex_unlock(struct mutex *lock)
113{ 112{
114 /* 113 /*
115 * The unlocking fastpath is the 0->1 transition from 'locked' 114 * The unlocking fastpath is the 0->1 transition from 'locked'
@@ -234,7 +233,7 @@ EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
234/* 233/*
235 * Release the lock, slowpath: 234 * Release the lock, slowpath:
236 */ 235 */
237static fastcall inline void 236static inline void
238__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) 237__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
239{ 238{
240 struct mutex *lock = container_of(lock_count, struct mutex, count); 239 struct mutex *lock = container_of(lock_count, struct mutex, count);
@@ -271,7 +270,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
271/* 270/*
272 * Release the lock, slowpath: 271 * Release the lock, slowpath:
273 */ 272 */
274static fastcall noinline void 273static noinline void
275__mutex_unlock_slowpath(atomic_t *lock_count) 274__mutex_unlock_slowpath(atomic_t *lock_count)
276{ 275{
277 __mutex_unlock_common_slowpath(lock_count, 1); 276 __mutex_unlock_common_slowpath(lock_count, 1);
@@ -282,10 +281,10 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
282 * Here come the less common (and hence less performance-critical) APIs: 281 * Here come the less common (and hence less performance-critical) APIs:
283 * mutex_lock_interruptible() and mutex_trylock(). 282 * mutex_lock_interruptible() and mutex_trylock().
284 */ 283 */
285static int fastcall noinline __sched 284static noinline int __sched
286__mutex_lock_killable_slowpath(atomic_t *lock_count); 285__mutex_lock_killable_slowpath(atomic_t *lock_count);
287 286
288static noinline int fastcall __sched 287static noinline int __sched
289__mutex_lock_interruptible_slowpath(atomic_t *lock_count); 288__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
290 289
291/*** 290/***
@@ -299,7 +298,7 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
299 * 298 *
300 * This function is similar to (but not equivalent to) down_interruptible(). 299 * This function is similar to (but not equivalent to) down_interruptible().
301 */ 300 */
302int fastcall __sched mutex_lock_interruptible(struct mutex *lock) 301int __sched mutex_lock_interruptible(struct mutex *lock)
303{ 302{
304 might_sleep(); 303 might_sleep();
305 return __mutex_fastpath_lock_retval 304 return __mutex_fastpath_lock_retval
@@ -308,7 +307,7 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
308 307
309EXPORT_SYMBOL(mutex_lock_interruptible); 308EXPORT_SYMBOL(mutex_lock_interruptible);
310 309
311int fastcall __sched mutex_lock_killable(struct mutex *lock) 310int __sched mutex_lock_killable(struct mutex *lock)
312{ 311{
313 might_sleep(); 312 might_sleep();
314 return __mutex_fastpath_lock_retval 313 return __mutex_fastpath_lock_retval
@@ -316,7 +315,7 @@ int fastcall __sched mutex_lock_killable(struct mutex *lock)
316} 315}
317EXPORT_SYMBOL(mutex_lock_killable); 316EXPORT_SYMBOL(mutex_lock_killable);
318 317
319static void fastcall noinline __sched 318static noinline void __sched
320__mutex_lock_slowpath(atomic_t *lock_count) 319__mutex_lock_slowpath(atomic_t *lock_count)
321{ 320{
322 struct mutex *lock = container_of(lock_count, struct mutex, count); 321 struct mutex *lock = container_of(lock_count, struct mutex, count);
@@ -324,7 +323,7 @@ __mutex_lock_slowpath(atomic_t *lock_count)
324 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_); 323 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
325} 324}
326 325
327static int fastcall noinline __sched 326static noinline int __sched
328__mutex_lock_killable_slowpath(atomic_t *lock_count) 327__mutex_lock_killable_slowpath(atomic_t *lock_count)
329{ 328{
330 struct mutex *lock = container_of(lock_count, struct mutex, count); 329 struct mutex *lock = container_of(lock_count, struct mutex, count);
@@ -332,7 +331,7 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count)
332 return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_); 331 return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_);
333} 332}
334 333
335static noinline int fastcall __sched 334static noinline int __sched
336__mutex_lock_interruptible_slowpath(atomic_t *lock_count) 335__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
337{ 336{
338 struct mutex *lock = container_of(lock_count, struct mutex, count); 337 struct mutex *lock = container_of(lock_count, struct mutex, count);
@@ -381,7 +380,7 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
381 * This function must not be used in interrupt context. The 380 * This function must not be used in interrupt context. The
382 * mutex must be released by the same task that acquired it. 381 * mutex must be released by the same task that acquired it.
383 */ 382 */
384int fastcall __sched mutex_trylock(struct mutex *lock) 383int __sched mutex_trylock(struct mutex *lock)
385{ 384{
386 return __mutex_fastpath_trylock(&lock->count, 385 return __mutex_fastpath_trylock(&lock->count,
387 __mutex_trylock_slowpath); 386 __mutex_trylock_slowpath);
diff --git a/kernel/pid.c b/kernel/pid.c
index a32859c4a3cd..477691576b33 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -111,7 +111,7 @@ EXPORT_SYMBOL(is_container_init);
111 111
112static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); 112static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
113 113
114static fastcall void free_pidmap(struct pid_namespace *pid_ns, int pid) 114static void free_pidmap(struct pid_namespace *pid_ns, int pid)
115{ 115{
116 struct pidmap *map = pid_ns->pidmap + pid / BITS_PER_PAGE; 116 struct pidmap *map = pid_ns->pidmap + pid / BITS_PER_PAGE;
117 int offset = pid & BITS_PER_PAGE_MASK; 117 int offset = pid & BITS_PER_PAGE_MASK;
@@ -198,7 +198,7 @@ int next_pidmap(struct pid_namespace *pid_ns, int last)
198 return -1; 198 return -1;
199} 199}
200 200
201fastcall void put_pid(struct pid *pid) 201void put_pid(struct pid *pid)
202{ 202{
203 struct pid_namespace *ns; 203 struct pid_namespace *ns;
204 204
@@ -220,7 +220,7 @@ static void delayed_put_pid(struct rcu_head *rhp)
220 put_pid(pid); 220 put_pid(pid);
221} 221}
222 222
223fastcall void free_pid(struct pid *pid) 223void free_pid(struct pid *pid)
224{ 224{
225 /* We can be called with write_lock_irq(&tasklist_lock) held */ 225 /* We can be called with write_lock_irq(&tasklist_lock) held */
226 int i; 226 int i;
@@ -286,7 +286,7 @@ out_free:
286 goto out; 286 goto out;
287} 287}
288 288
289struct pid * fastcall find_pid_ns(int nr, struct pid_namespace *ns) 289struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
290{ 290{
291 struct hlist_node *elem; 291 struct hlist_node *elem;
292 struct upid *pnr; 292 struct upid *pnr;
@@ -316,7 +316,7 @@ EXPORT_SYMBOL_GPL(find_pid);
316/* 316/*
317 * attach_pid() must be called with the tasklist_lock write-held. 317 * attach_pid() must be called with the tasklist_lock write-held.
318 */ 318 */
319int fastcall attach_pid(struct task_struct *task, enum pid_type type, 319int attach_pid(struct task_struct *task, enum pid_type type,
320 struct pid *pid) 320 struct pid *pid)
321{ 321{
322 struct pid_link *link; 322 struct pid_link *link;
@@ -328,7 +328,7 @@ int fastcall attach_pid(struct task_struct *task, enum pid_type type,
328 return 0; 328 return 0;
329} 329}
330 330
331void fastcall detach_pid(struct task_struct *task, enum pid_type type) 331void detach_pid(struct task_struct *task, enum pid_type type)
332{ 332{
333 struct pid_link *link; 333 struct pid_link *link;
334 struct pid *pid; 334 struct pid *pid;
@@ -348,7 +348,7 @@ void fastcall detach_pid(struct task_struct *task, enum pid_type type)
348} 348}
349 349
350/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ 350/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
351void fastcall transfer_pid(struct task_struct *old, struct task_struct *new, 351void transfer_pid(struct task_struct *old, struct task_struct *new,
352 enum pid_type type) 352 enum pid_type type)
353{ 353{
354 new->pids[type].pid = old->pids[type].pid; 354 new->pids[type].pid = old->pids[type].pid;
@@ -356,7 +356,7 @@ void fastcall transfer_pid(struct task_struct *old, struct task_struct *new,
356 old->pids[type].pid = NULL; 356 old->pids[type].pid = NULL;
357} 357}
358 358
359struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type) 359struct task_struct *pid_task(struct pid *pid, enum pid_type type)
360{ 360{
361 struct task_struct *result = NULL; 361 struct task_struct *result = NULL;
362 if (pid) { 362 if (pid) {
@@ -408,7 +408,7 @@ struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
408 return pid; 408 return pid;
409} 409}
410 410
411struct task_struct *fastcall get_pid_task(struct pid *pid, enum pid_type type) 411struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
412{ 412{
413 struct task_struct *result; 413 struct task_struct *result;
414 rcu_read_lock(); 414 rcu_read_lock();
diff --git a/kernel/sched.c b/kernel/sched.c
index 9474b23c28bf..3eedd5260907 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1893,13 +1893,13 @@ out:
1893 return success; 1893 return success;
1894} 1894}
1895 1895
1896int fastcall wake_up_process(struct task_struct *p) 1896int wake_up_process(struct task_struct *p)
1897{ 1897{
1898 return try_to_wake_up(p, TASK_ALL, 0); 1898 return try_to_wake_up(p, TASK_ALL, 0);
1899} 1899}
1900EXPORT_SYMBOL(wake_up_process); 1900EXPORT_SYMBOL(wake_up_process);
1901 1901
1902int fastcall wake_up_state(struct task_struct *p, unsigned int state) 1902int wake_up_state(struct task_struct *p, unsigned int state)
1903{ 1903{
1904 return try_to_wake_up(p, state, 0); 1904 return try_to_wake_up(p, state, 0);
1905} 1905}
@@ -1986,7 +1986,7 @@ void sched_fork(struct task_struct *p, int clone_flags)
1986 * that must be done for every newly created context, then puts the task 1986 * that must be done for every newly created context, then puts the task
1987 * on the runqueue and wakes it. 1987 * on the runqueue and wakes it.
1988 */ 1988 */
1989void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) 1989void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
1990{ 1990{
1991 unsigned long flags; 1991 unsigned long flags;
1992 struct rq *rq; 1992 struct rq *rq;
@@ -3753,7 +3753,7 @@ void scheduler_tick(void)
3753 3753
3754#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) 3754#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
3755 3755
3756void fastcall add_preempt_count(int val) 3756void add_preempt_count(int val)
3757{ 3757{
3758 /* 3758 /*
3759 * Underflow? 3759 * Underflow?
@@ -3769,7 +3769,7 @@ void fastcall add_preempt_count(int val)
3769} 3769}
3770EXPORT_SYMBOL(add_preempt_count); 3770EXPORT_SYMBOL(add_preempt_count);
3771 3771
3772void fastcall sub_preempt_count(int val) 3772void sub_preempt_count(int val)
3773{ 3773{
3774 /* 3774 /*
3775 * Underflow? 3775 * Underflow?
@@ -4067,7 +4067,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
4067 * @nr_exclusive: how many wake-one or wake-many threads to wake up 4067 * @nr_exclusive: how many wake-one or wake-many threads to wake up
4068 * @key: is directly passed to the wakeup function 4068 * @key: is directly passed to the wakeup function
4069 */ 4069 */
4070void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode, 4070void __wake_up(wait_queue_head_t *q, unsigned int mode,
4071 int nr_exclusive, void *key) 4071 int nr_exclusive, void *key)
4072{ 4072{
4073 unsigned long flags; 4073 unsigned long flags;
@@ -4081,7 +4081,7 @@ EXPORT_SYMBOL(__wake_up);
4081/* 4081/*
4082 * Same as __wake_up but called with the spinlock in wait_queue_head_t held. 4082 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
4083 */ 4083 */
4084void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode) 4084void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
4085{ 4085{
4086 __wake_up_common(q, mode, 1, 0, NULL); 4086 __wake_up_common(q, mode, 1, 0, NULL);
4087} 4087}
@@ -4099,7 +4099,7 @@ void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
4099 * 4099 *
4100 * On UP it can prevent extra preemption. 4100 * On UP it can prevent extra preemption.
4101 */ 4101 */
4102void fastcall 4102void
4103__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) 4103__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
4104{ 4104{
4105 unsigned long flags; 4105 unsigned long flags;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index d7837d45419e..5b3aea5f471e 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -320,7 +320,7 @@ void irq_exit(void)
320/* 320/*
321 * This function must run with irqs disabled! 321 * This function must run with irqs disabled!
322 */ 322 */
323inline fastcall void raise_softirq_irqoff(unsigned int nr) 323inline void raise_softirq_irqoff(unsigned int nr)
324{ 324{
325 __raise_softirq_irqoff(nr); 325 __raise_softirq_irqoff(nr);
326 326
@@ -337,7 +337,7 @@ inline fastcall void raise_softirq_irqoff(unsigned int nr)
337 wakeup_softirqd(); 337 wakeup_softirqd();
338} 338}
339 339
340void fastcall raise_softirq(unsigned int nr) 340void raise_softirq(unsigned int nr)
341{ 341{
342 unsigned long flags; 342 unsigned long flags;
343 343
@@ -363,7 +363,7 @@ struct tasklet_head
363static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL }; 363static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL };
364static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL }; 364static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL };
365 365
366void fastcall __tasklet_schedule(struct tasklet_struct *t) 366void __tasklet_schedule(struct tasklet_struct *t)
367{ 367{
368 unsigned long flags; 368 unsigned long flags;
369 369
@@ -376,7 +376,7 @@ void fastcall __tasklet_schedule(struct tasklet_struct *t)
376 376
377EXPORT_SYMBOL(__tasklet_schedule); 377EXPORT_SYMBOL(__tasklet_schedule);
378 378
379void fastcall __tasklet_hi_schedule(struct tasklet_struct *t) 379void __tasklet_hi_schedule(struct tasklet_struct *t)
380{ 380{
381 unsigned long flags; 381 unsigned long flags;
382 382
diff --git a/kernel/timer.c b/kernel/timer.c
index 1c4183cd8bdb..99b00a25f88b 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -327,7 +327,7 @@ static void timer_stats_account_timer(struct timer_list *timer) {}
327 * init_timer() must be done to a timer prior calling *any* of the 327 * init_timer() must be done to a timer prior calling *any* of the
328 * other timer functions. 328 * other timer functions.
329 */ 329 */
330void fastcall init_timer(struct timer_list *timer) 330void init_timer(struct timer_list *timer)
331{ 331{
332 timer->entry.next = NULL; 332 timer->entry.next = NULL;
333 timer->base = __raw_get_cpu_var(tvec_bases); 333 timer->base = __raw_get_cpu_var(tvec_bases);
@@ -339,7 +339,7 @@ void fastcall init_timer(struct timer_list *timer)
339} 339}
340EXPORT_SYMBOL(init_timer); 340EXPORT_SYMBOL(init_timer);
341 341
342void fastcall init_timer_deferrable(struct timer_list *timer) 342void init_timer_deferrable(struct timer_list *timer)
343{ 343{
344 init_timer(timer); 344 init_timer(timer);
345 timer_set_deferrable(timer); 345 timer_set_deferrable(timer);
@@ -1042,7 +1042,7 @@ static void process_timeout(unsigned long __data)
1042 * 1042 *
1043 * In all cases the return value is guaranteed to be non-negative. 1043 * In all cases the return value is guaranteed to be non-negative.
1044 */ 1044 */
1045fastcall signed long __sched schedule_timeout(signed long timeout) 1045signed long __sched schedule_timeout(signed long timeout)
1046{ 1046{
1047 struct timer_list timer; 1047 struct timer_list timer;
1048 unsigned long expire; 1048 unsigned long expire;
diff --git a/kernel/wait.c b/kernel/wait.c
index f9876888a569..c275c56cf2d3 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -18,7 +18,7 @@ void init_waitqueue_head(wait_queue_head_t *q)
18 18
19EXPORT_SYMBOL(init_waitqueue_head); 19EXPORT_SYMBOL(init_waitqueue_head);
20 20
21void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) 21void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
22{ 22{
23 unsigned long flags; 23 unsigned long flags;
24 24
@@ -29,7 +29,7 @@ void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
29} 29}
30EXPORT_SYMBOL(add_wait_queue); 30EXPORT_SYMBOL(add_wait_queue);
31 31
32void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) 32void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
33{ 33{
34 unsigned long flags; 34 unsigned long flags;
35 35
@@ -40,7 +40,7 @@ void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
40} 40}
41EXPORT_SYMBOL(add_wait_queue_exclusive); 41EXPORT_SYMBOL(add_wait_queue_exclusive);
42 42
43void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) 43void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
44{ 44{
45 unsigned long flags; 45 unsigned long flags;
46 46
@@ -63,7 +63,7 @@ EXPORT_SYMBOL(remove_wait_queue);
63 * stops them from bleeding out - it would still allow subsequent 63 * stops them from bleeding out - it would still allow subsequent
64 * loads to move into the critical region). 64 * loads to move into the critical region).
65 */ 65 */
66void fastcall 66void
67prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) 67prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
68{ 68{
69 unsigned long flags; 69 unsigned long flags;
@@ -82,7 +82,7 @@ prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
82} 82}
83EXPORT_SYMBOL(prepare_to_wait); 83EXPORT_SYMBOL(prepare_to_wait);
84 84
85void fastcall 85void
86prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) 86prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
87{ 87{
88 unsigned long flags; 88 unsigned long flags;
@@ -101,7 +101,7 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
101} 101}
102EXPORT_SYMBOL(prepare_to_wait_exclusive); 102EXPORT_SYMBOL(prepare_to_wait_exclusive);
103 103
104void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait) 104void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
105{ 105{
106 unsigned long flags; 106 unsigned long flags;
107 107
@@ -157,7 +157,7 @@ EXPORT_SYMBOL(wake_bit_function);
157 * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are 157 * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
158 * permitted return codes. Nonzero return codes halt waiting and return. 158 * permitted return codes. Nonzero return codes halt waiting and return.
159 */ 159 */
160int __sched fastcall 160int __sched
161__wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q, 161__wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
162 int (*action)(void *), unsigned mode) 162 int (*action)(void *), unsigned mode)
163{ 163{
@@ -173,7 +173,7 @@ __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
173} 173}
174EXPORT_SYMBOL(__wait_on_bit); 174EXPORT_SYMBOL(__wait_on_bit);
175 175
176int __sched fastcall out_of_line_wait_on_bit(void *word, int bit, 176int __sched out_of_line_wait_on_bit(void *word, int bit,
177 int (*action)(void *), unsigned mode) 177 int (*action)(void *), unsigned mode)
178{ 178{
179 wait_queue_head_t *wq = bit_waitqueue(word, bit); 179 wait_queue_head_t *wq = bit_waitqueue(word, bit);
@@ -183,7 +183,7 @@ int __sched fastcall out_of_line_wait_on_bit(void *word, int bit,
183} 183}
184EXPORT_SYMBOL(out_of_line_wait_on_bit); 184EXPORT_SYMBOL(out_of_line_wait_on_bit);
185 185
186int __sched fastcall 186int __sched
187__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, 187__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
188 int (*action)(void *), unsigned mode) 188 int (*action)(void *), unsigned mode)
189{ 189{
@@ -201,7 +201,7 @@ __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
201} 201}
202EXPORT_SYMBOL(__wait_on_bit_lock); 202EXPORT_SYMBOL(__wait_on_bit_lock);
203 203
204int __sched fastcall out_of_line_wait_on_bit_lock(void *word, int bit, 204int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
205 int (*action)(void *), unsigned mode) 205 int (*action)(void *), unsigned mode)
206{ 206{
207 wait_queue_head_t *wq = bit_waitqueue(word, bit); 207 wait_queue_head_t *wq = bit_waitqueue(word, bit);
@@ -211,7 +211,7 @@ int __sched fastcall out_of_line_wait_on_bit_lock(void *word, int bit,
211} 211}
212EXPORT_SYMBOL(out_of_line_wait_on_bit_lock); 212EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
213 213
214void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit) 214void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
215{ 215{
216 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit); 216 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
217 if (waitqueue_active(wq)) 217 if (waitqueue_active(wq))
@@ -236,13 +236,13 @@ EXPORT_SYMBOL(__wake_up_bit);
236 * may need to use a less regular barrier, such fs/inode.c's smp_mb(), 236 * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
237 * because spin_unlock() does not guarantee a memory barrier. 237 * because spin_unlock() does not guarantee a memory barrier.
238 */ 238 */
239void fastcall wake_up_bit(void *word, int bit) 239void wake_up_bit(void *word, int bit)
240{ 240{
241 __wake_up_bit(bit_waitqueue(word, bit), word, bit); 241 __wake_up_bit(bit_waitqueue(word, bit), word, bit);
242} 242}
243EXPORT_SYMBOL(wake_up_bit); 243EXPORT_SYMBOL(wake_up_bit);
244 244
245fastcall wait_queue_head_t *bit_waitqueue(void *word, int bit) 245wait_queue_head_t *bit_waitqueue(void *word, int bit)
246{ 246{
247 const int shift = BITS_PER_LONG == 32 ? 5 : 6; 247 const int shift = BITS_PER_LONG == 32 ? 5 : 6;
248 const struct zone *zone = page_zone(virt_to_page(word)); 248 const struct zone *zone = page_zone(virt_to_page(word));
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 52db48e7f6e7..3f168e00ce5b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -161,7 +161,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
161 * We queue the work to the CPU it was submitted, but there is no 161 * We queue the work to the CPU it was submitted, but there is no
162 * guarantee that it will be processed by that CPU. 162 * guarantee that it will be processed by that CPU.
163 */ 163 */
164int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) 164int queue_work(struct workqueue_struct *wq, struct work_struct *work)
165{ 165{
166 int ret = 0; 166 int ret = 0;
167 167
@@ -192,7 +192,7 @@ void delayed_work_timer_fn(unsigned long __data)
192 * 192 *
193 * Returns 0 if @work was already on a queue, non-zero otherwise. 193 * Returns 0 if @work was already on a queue, non-zero otherwise.
194 */ 194 */
195int fastcall queue_delayed_work(struct workqueue_struct *wq, 195int queue_delayed_work(struct workqueue_struct *wq,
196 struct delayed_work *dwork, unsigned long delay) 196 struct delayed_work *dwork, unsigned long delay)
197{ 197{
198 timer_stats_timer_set_start_info(&dwork->timer); 198 timer_stats_timer_set_start_info(&dwork->timer);
@@ -388,7 +388,7 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
388 * This function used to run the workqueues itself. Now we just wait for the 388 * This function used to run the workqueues itself. Now we just wait for the
389 * helper threads to do it. 389 * helper threads to do it.
390 */ 390 */
391void fastcall flush_workqueue(struct workqueue_struct *wq) 391void flush_workqueue(struct workqueue_struct *wq)
392{ 392{
393 const cpumask_t *cpu_map = wq_cpu_map(wq); 393 const cpumask_t *cpu_map = wq_cpu_map(wq);
394 int cpu; 394 int cpu;
@@ -546,7 +546,7 @@ static struct workqueue_struct *keventd_wq __read_mostly;
546 * 546 *
547 * This puts a job in the kernel-global workqueue. 547 * This puts a job in the kernel-global workqueue.
548 */ 548 */
549int fastcall schedule_work(struct work_struct *work) 549int schedule_work(struct work_struct *work)
550{ 550{
551 return queue_work(keventd_wq, work); 551 return queue_work(keventd_wq, work);
552} 552}
@@ -560,7 +560,7 @@ EXPORT_SYMBOL(schedule_work);
560 * After waiting for a given time this puts a job in the kernel-global 560 * After waiting for a given time this puts a job in the kernel-global
561 * workqueue. 561 * workqueue.
562 */ 562 */
563int fastcall schedule_delayed_work(struct delayed_work *dwork, 563int schedule_delayed_work(struct delayed_work *dwork,
564 unsigned long delay) 564 unsigned long delay)
565{ 565{
566 timer_stats_timer_set_start_info(&dwork->timer); 566 timer_stats_timer_set_start_info(&dwork->timer);