diff options
author | Paul Mundt <lethal@linux-sh.org> | 2007-11-21 03:55:12 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2008-01-27 23:18:53 -0500 |
commit | fb8e569c1d4f44a4632e2db95a27ed45734d4705 (patch) | |
tree | 3f1f4f7abe68bac6362843af909f0cd923937ba7 /arch/sh64 | |
parent | 5db141a9469c8446a179696bc7d374f4cd9b207a (diff) |
sh: Fix up user_fpu_struct typo for SH-5.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh64')
-rw-r--r-- | arch/sh64/kernel/asm-offsets.c | 33 | ||||
-rw-r--r-- | arch/sh64/kernel/init_task.c | 46 | ||||
-rw-r--r-- | arch/sh64/kernel/semaphore.c | 140 |
3 files changed, 0 insertions, 219 deletions
diff --git a/arch/sh64/kernel/asm-offsets.c b/arch/sh64/kernel/asm-offsets.c deleted file mode 100644 index ca76537c16c0..000000000000 --- a/arch/sh64/kernel/asm-offsets.c +++ /dev/null | |||
@@ -1,33 +0,0 @@ | |||
1 | /* | ||
2 | * This program is used to generate definitions needed by | ||
3 | * assembly language modules. | ||
4 | * | ||
5 | * We use the technique used in the OSF Mach kernel code: | ||
6 | * generate asm statements containing #defines, | ||
7 | * compile this file to assembler, and then extract the | ||
8 | * #defines from the assembly-language output. | ||
9 | */ | ||
10 | |||
11 | #include <linux/stddef.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <asm/thread_info.h> | ||
15 | |||
16 | #define DEFINE(sym, val) \ | ||
17 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
18 | |||
19 | #define BLANK() asm volatile("\n->" : : ) | ||
20 | |||
21 | int main(void) | ||
22 | { | ||
23 | /* offsets into the thread_info struct */ | ||
24 | DEFINE(TI_TASK, offsetof(struct thread_info, task)); | ||
25 | DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); | ||
26 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); | ||
27 | DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); | ||
28 | DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); | ||
29 | DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); | ||
30 | DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block)); | ||
31 | |||
32 | return 0; | ||
33 | } | ||
diff --git a/arch/sh64/kernel/init_task.c b/arch/sh64/kernel/init_task.c deleted file mode 100644 index deee8bfd3270..000000000000 --- a/arch/sh64/kernel/init_task.c +++ /dev/null | |||
@@ -1,46 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/kernel/init_task.c | ||
7 | * | ||
8 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
9 | * Copyright (C) 2003 Paul Mundt | ||
10 | * | ||
11 | */ | ||
12 | #include <linux/rwsem.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/init_task.h> | ||
16 | #include <linux/mqueue.h> | ||
17 | #include <linux/fs.h> | ||
18 | #include <asm/uaccess.h> | ||
19 | #include <asm/pgtable.h> | ||
20 | |||
21 | static struct fs_struct init_fs = INIT_FS; | ||
22 | static struct files_struct init_files = INIT_FILES; | ||
23 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | ||
24 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | ||
25 | struct mm_struct init_mm = INIT_MM(init_mm); | ||
26 | |||
27 | struct pt_regs fake_swapper_regs; | ||
28 | |||
29 | /* | ||
30 | * Initial thread structure. | ||
31 | * | ||
32 | * We need to make sure that this is THREAD_SIZE-byte aligned due | ||
33 | * to the way process stacks are handled. This is done by having a | ||
34 | * special "init_task" linker map entry.. | ||
35 | */ | ||
36 | union thread_union init_thread_union | ||
37 | __attribute__((__section__(".data.init_task"))) = | ||
38 | { INIT_THREAD_INFO(init_task) }; | ||
39 | |||
40 | /* | ||
41 | * Initial task structure. | ||
42 | * | ||
43 | * All other task structs will be allocated on slabs in fork.c | ||
44 | */ | ||
45 | struct task_struct init_task = INIT_TASK(init_task); | ||
46 | |||
diff --git a/arch/sh64/kernel/semaphore.c b/arch/sh64/kernel/semaphore.c deleted file mode 100644 index 72c16533436e..000000000000 --- a/arch/sh64/kernel/semaphore.c +++ /dev/null | |||
@@ -1,140 +0,0 @@ | |||
1 | /* | ||
2 | * Just taken from alpha implementation. | ||
3 | * This can't work well, perhaps. | ||
4 | */ | ||
5 | /* | ||
6 | * Generic semaphore code. Buyer beware. Do your own | ||
7 | * specific changes in <asm/semaphore-helper.h> | ||
8 | */ | ||
9 | |||
10 | #include <linux/errno.h> | ||
11 | #include <linux/rwsem.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/wait.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <asm/semaphore.h> | ||
16 | #include <asm/semaphore-helper.h> | ||
17 | |||
18 | spinlock_t semaphore_wake_lock; | ||
19 | |||
20 | /* | ||
21 | * Semaphores are implemented using a two-way counter: | ||
22 | * The "count" variable is decremented for each process | ||
23 | * that tries to sleep, while the "waking" variable is | ||
24 | * incremented when the "up()" code goes to wake up waiting | ||
25 | * processes. | ||
26 | * | ||
27 | * Notably, the inline "up()" and "down()" functions can | ||
28 | * efficiently test if they need to do any extra work (up | ||
29 | * needs to do something only if count was negative before | ||
30 | * the increment operation. | ||
31 | * | ||
32 | * waking_non_zero() (from asm/semaphore.h) must execute | ||
33 | * atomically. | ||
34 | * | ||
35 | * When __up() is called, the count was negative before | ||
36 | * incrementing it, and we need to wake up somebody. | ||
37 | * | ||
38 | * This routine adds one to the count of processes that need to | ||
39 | * wake up and exit. ALL waiting processes actually wake up but | ||
40 | * only the one that gets to the "waking" field first will gate | ||
41 | * through and acquire the semaphore. The others will go back | ||
42 | * to sleep. | ||
43 | * | ||
44 | * Note that these functions are only called when there is | ||
45 | * contention on the lock, and as such all this is the | ||
46 | * "non-critical" part of the whole semaphore business. The | ||
47 | * critical part is the inline stuff in <asm/semaphore.h> | ||
48 | * where we want to avoid any extra jumps and calls. | ||
49 | */ | ||
50 | void __up(struct semaphore *sem) | ||
51 | { | ||
52 | wake_one_more(sem); | ||
53 | wake_up(&sem->wait); | ||
54 | } | ||
55 | |||
56 | /* | ||
57 | * Perform the "down" function. Return zero for semaphore acquired, | ||
58 | * return negative for signalled out of the function. | ||
59 | * | ||
60 | * If called from __down, the return is ignored and the wait loop is | ||
61 | * not interruptible. This means that a task waiting on a semaphore | ||
62 | * using "down()" cannot be killed until someone does an "up()" on | ||
63 | * the semaphore. | ||
64 | * | ||
65 | * If called from __down_interruptible, the return value gets checked | ||
66 | * upon return. If the return value is negative then the task continues | ||
67 | * with the negative value in the return register (it can be tested by | ||
68 | * the caller). | ||
69 | * | ||
70 | * Either form may be used in conjunction with "up()". | ||
71 | * | ||
72 | */ | ||
73 | |||
74 | #define DOWN_VAR \ | ||
75 | struct task_struct *tsk = current; \ | ||
76 | wait_queue_t wait; \ | ||
77 | init_waitqueue_entry(&wait, tsk); | ||
78 | |||
79 | #define DOWN_HEAD(task_state) \ | ||
80 | \ | ||
81 | \ | ||
82 | tsk->state = (task_state); \ | ||
83 | add_wait_queue(&sem->wait, &wait); \ | ||
84 | \ | ||
85 | /* \ | ||
86 | * Ok, we're set up. sem->count is known to be less than zero \ | ||
87 | * so we must wait. \ | ||
88 | * \ | ||
89 | * We can let go the lock for purposes of waiting. \ | ||
90 | * We re-acquire it after awaking so as to protect \ | ||
91 | * all semaphore operations. \ | ||
92 | * \ | ||
93 | * If "up()" is called before we call waking_non_zero() then \ | ||
94 | * we will catch it right away. If it is called later then \ | ||
95 | * we will have to go through a wakeup cycle to catch it. \ | ||
96 | * \ | ||
97 | * Multiple waiters contend for the semaphore lock to see \ | ||
98 | * who gets to gate through and who has to wait some more. \ | ||
99 | */ \ | ||
100 | for (;;) { | ||
101 | |||
102 | #define DOWN_TAIL(task_state) \ | ||
103 | tsk->state = (task_state); \ | ||
104 | } \ | ||
105 | tsk->state = TASK_RUNNING; \ | ||
106 | remove_wait_queue(&sem->wait, &wait); | ||
107 | |||
108 | void __sched __down(struct semaphore * sem) | ||
109 | { | ||
110 | DOWN_VAR | ||
111 | DOWN_HEAD(TASK_UNINTERRUPTIBLE) | ||
112 | if (waking_non_zero(sem)) | ||
113 | break; | ||
114 | schedule(); | ||
115 | DOWN_TAIL(TASK_UNINTERRUPTIBLE) | ||
116 | } | ||
117 | |||
118 | int __sched __down_interruptible(struct semaphore * sem) | ||
119 | { | ||
120 | int ret = 0; | ||
121 | DOWN_VAR | ||
122 | DOWN_HEAD(TASK_INTERRUPTIBLE) | ||
123 | |||
124 | ret = waking_non_zero_interruptible(sem, tsk); | ||
125 | if (ret) | ||
126 | { | ||
127 | if (ret == 1) | ||
128 | /* ret != 0 only if we get interrupted -arca */ | ||
129 | ret = 0; | ||
130 | break; | ||
131 | } | ||
132 | schedule(); | ||
133 | DOWN_TAIL(TASK_INTERRUPTIBLE) | ||
134 | return ret; | ||
135 | } | ||
136 | |||
137 | int __down_trylock(struct semaphore * sem) | ||
138 | { | ||
139 | return waking_non_zero_trylock(sem); | ||
140 | } | ||