aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/mutex.c
diff options
context:
space:
mode:
authorHarvey Harrison <harvey.harrison@gmail.com>2008-02-08 07:19:53 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-08 12:22:31 -0500
commit7ad5b3a505e68cfdc342933d6e0fc0eaa5e0a4f7 (patch)
tree6715ffd8df509d3d53dea581bb97418a21bc7cbc /kernel/mutex.c
parentfc9b52cd8f5f459b88adcf67c47668425ae31a78 (diff)
kernel: remove fastcall in kernel/*
[akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r--kernel/mutex.c29
1 files changed, 14 insertions, 15 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c
index d9ec9b666250..d046a345d365 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -58,7 +58,7 @@ EXPORT_SYMBOL(__mutex_init);
58 * We also put the fastpath first in the kernel image, to make sure the 58 * We also put the fastpath first in the kernel image, to make sure the
59 * branch is predicted by the CPU as default-untaken. 59 * branch is predicted by the CPU as default-untaken.
60 */ 60 */
61static void fastcall noinline __sched 61static void noinline __sched
62__mutex_lock_slowpath(atomic_t *lock_count); 62__mutex_lock_slowpath(atomic_t *lock_count);
63 63
64/*** 64/***
@@ -82,7 +82,7 @@ __mutex_lock_slowpath(atomic_t *lock_count);
82 * 82 *
83 * This function is similar to (but not equivalent to) down(). 83 * This function is similar to (but not equivalent to) down().
84 */ 84 */
85void inline fastcall __sched mutex_lock(struct mutex *lock) 85void inline __sched mutex_lock(struct mutex *lock)
86{ 86{
87 might_sleep(); 87 might_sleep();
88 /* 88 /*
@@ -95,8 +95,7 @@ void inline fastcall __sched mutex_lock(struct mutex *lock)
95EXPORT_SYMBOL(mutex_lock); 95EXPORT_SYMBOL(mutex_lock);
96#endif 96#endif
97 97
98static void fastcall noinline __sched 98static noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
99__mutex_unlock_slowpath(atomic_t *lock_count);
100 99
101/*** 100/***
102 * mutex_unlock - release the mutex 101 * mutex_unlock - release the mutex
@@ -109,7 +108,7 @@ __mutex_unlock_slowpath(atomic_t *lock_count);
109 * 108 *
110 * This function is similar to (but not equivalent to) up(). 109 * This function is similar to (but not equivalent to) up().
111 */ 110 */
112void fastcall __sched mutex_unlock(struct mutex *lock) 111void __sched mutex_unlock(struct mutex *lock)
113{ 112{
114 /* 113 /*
115 * The unlocking fastpath is the 0->1 transition from 'locked' 114 * The unlocking fastpath is the 0->1 transition from 'locked'
@@ -234,7 +233,7 @@ EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
234/* 233/*
235 * Release the lock, slowpath: 234 * Release the lock, slowpath:
236 */ 235 */
237static fastcall inline void 236static inline void
238__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) 237__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
239{ 238{
240 struct mutex *lock = container_of(lock_count, struct mutex, count); 239 struct mutex *lock = container_of(lock_count, struct mutex, count);
@@ -271,7 +270,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
271/* 270/*
272 * Release the lock, slowpath: 271 * Release the lock, slowpath:
273 */ 272 */
274static fastcall noinline void 273static noinline void
275__mutex_unlock_slowpath(atomic_t *lock_count) 274__mutex_unlock_slowpath(atomic_t *lock_count)
276{ 275{
277 __mutex_unlock_common_slowpath(lock_count, 1); 276 __mutex_unlock_common_slowpath(lock_count, 1);
@@ -282,10 +281,10 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
282 * Here come the less common (and hence less performance-critical) APIs: 281 * Here come the less common (and hence less performance-critical) APIs:
283 * mutex_lock_interruptible() and mutex_trylock(). 282 * mutex_lock_interruptible() and mutex_trylock().
284 */ 283 */
285static int fastcall noinline __sched 284static noinline int __sched
286__mutex_lock_killable_slowpath(atomic_t *lock_count); 285__mutex_lock_killable_slowpath(atomic_t *lock_count);
287 286
288static noinline int fastcall __sched 287static noinline int __sched
289__mutex_lock_interruptible_slowpath(atomic_t *lock_count); 288__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
290 289
291/*** 290/***
@@ -299,7 +298,7 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
299 * 298 *
300 * This function is similar to (but not equivalent to) down_interruptible(). 299 * This function is similar to (but not equivalent to) down_interruptible().
301 */ 300 */
302int fastcall __sched mutex_lock_interruptible(struct mutex *lock) 301int __sched mutex_lock_interruptible(struct mutex *lock)
303{ 302{
304 might_sleep(); 303 might_sleep();
305 return __mutex_fastpath_lock_retval 304 return __mutex_fastpath_lock_retval
@@ -308,7 +307,7 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
308 307
309EXPORT_SYMBOL(mutex_lock_interruptible); 308EXPORT_SYMBOL(mutex_lock_interruptible);
310 309
311int fastcall __sched mutex_lock_killable(struct mutex *lock) 310int __sched mutex_lock_killable(struct mutex *lock)
312{ 311{
313 might_sleep(); 312 might_sleep();
314 return __mutex_fastpath_lock_retval 313 return __mutex_fastpath_lock_retval
@@ -316,7 +315,7 @@ int fastcall __sched mutex_lock_killable(struct mutex *lock)
316} 315}
317EXPORT_SYMBOL(mutex_lock_killable); 316EXPORT_SYMBOL(mutex_lock_killable);
318 317
319static void fastcall noinline __sched 318static noinline void __sched
320__mutex_lock_slowpath(atomic_t *lock_count) 319__mutex_lock_slowpath(atomic_t *lock_count)
321{ 320{
322 struct mutex *lock = container_of(lock_count, struct mutex, count); 321 struct mutex *lock = container_of(lock_count, struct mutex, count);
@@ -324,7 +323,7 @@ __mutex_lock_slowpath(atomic_t *lock_count)
324 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_); 323 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
325} 324}
326 325
327static int fastcall noinline __sched 326static noinline int __sched
328__mutex_lock_killable_slowpath(atomic_t *lock_count) 327__mutex_lock_killable_slowpath(atomic_t *lock_count)
329{ 328{
330 struct mutex *lock = container_of(lock_count, struct mutex, count); 329 struct mutex *lock = container_of(lock_count, struct mutex, count);
@@ -332,7 +331,7 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count)
332 return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_); 331 return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_);
333} 332}
334 333
335static noinline int fastcall __sched 334static noinline int __sched
336__mutex_lock_interruptible_slowpath(atomic_t *lock_count) 335__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
337{ 336{
338 struct mutex *lock = container_of(lock_count, struct mutex, count); 337 struct mutex *lock = container_of(lock_count, struct mutex, count);
@@ -381,7 +380,7 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
381 * This function must not be used in interrupt context. The 380 * This function must not be used in interrupt context. The
382 * mutex must be released by the same task that acquired it. 381 * mutex must be released by the same task that acquired it.
383 */ 382 */
384int fastcall __sched mutex_trylock(struct mutex *lock) 383int __sched mutex_trylock(struct mutex *lock)
385{ 384{
386 return __mutex_fastpath_trylock(&lock->count, 385 return __mutex_fastpath_trylock(&lock->count,
387 __mutex_trylock_slowpath); 386 __mutex_trylock_slowpath);